1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2005-2006, Devicescape Software, Inc. 5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 7 * Copyright 2013-2014 Intel Mobile Communications GmbH 8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 9 * Copyright (C) 2018-2021 Intel Corporation 10 */ 11 12 #include <linux/jiffies.h> 13 #include <linux/slab.h> 14 #include <linux/kernel.h> 15 #include <linux/skbuff.h> 16 #include <linux/netdevice.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rcupdate.h> 19 #include <linux/export.h> 20 #include <linux/kcov.h> 21 #include <linux/bitops.h> 22 #include <net/mac80211.h> 23 #include <net/ieee80211_radiotap.h> 24 #include <asm/unaligned.h> 25 26 #include "ieee80211_i.h" 27 #include "driver-ops.h" 28 #include "led.h" 29 #include "mesh.h" 30 #include "wep.h" 31 #include "wpa.h" 32 #include "tkip.h" 33 #include "wme.h" 34 #include "rate.h" 35 36 /* 37 * monitor mode reception 38 * 39 * This function cleans up the SKB, i.e. it removes all the stuff 40 * only useful for monitoring. 41 */ 42 static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb, 43 unsigned int present_fcs_len, 44 unsigned int rtap_space) 45 { 46 struct ieee80211_hdr *hdr; 47 unsigned int hdrlen; 48 __le16 fc; 49 50 if (present_fcs_len) 51 __pskb_trim(skb, skb->len - present_fcs_len); 52 __pskb_pull(skb, rtap_space); 53 54 hdr = (void *)skb->data; 55 fc = hdr->frame_control; 56 57 /* 58 * Remove the HT-Control field (if present) on management 59 * frames after we've sent the frame to monitoring. We 60 * (currently) don't need it, and don't properly parse 61 * frames with it present, due to the assumption of a 62 * fixed management header length. 63 */ 64 if (likely(!ieee80211_is_mgmt(fc) || !ieee80211_has_order(fc))) 65 return skb; 66 67 hdrlen = ieee80211_hdrlen(fc); 68 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_ORDER); 69 70 if (!pskb_may_pull(skb, hdrlen)) { 71 dev_kfree_skb(skb); 72 return NULL; 73 } 74 75 memmove(skb->data + IEEE80211_HT_CTL_LEN, skb->data, 76 hdrlen - IEEE80211_HT_CTL_LEN); 77 __pskb_pull(skb, IEEE80211_HT_CTL_LEN); 78 79 return skb; 80 } 81 82 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, 83 unsigned int rtap_space) 84 { 85 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 86 struct ieee80211_hdr *hdr; 87 88 hdr = (void *)(skb->data + rtap_space); 89 90 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 91 RX_FLAG_FAILED_PLCP_CRC | 92 RX_FLAG_ONLY_MONITOR | 93 RX_FLAG_NO_PSDU)) 94 return true; 95 96 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space)) 97 return true; 98 99 if (ieee80211_is_ctl(hdr->frame_control) && 100 !ieee80211_is_pspoll(hdr->frame_control) && 101 !ieee80211_is_back_req(hdr->frame_control)) 102 return true; 103 104 return false; 105 } 106 107 static int 108 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, 109 struct ieee80211_rx_status *status, 110 struct sk_buff *skb) 111 { 112 int len; 113 114 /* always present fields */ 115 len = sizeof(struct ieee80211_radiotap_header) + 8; 116 117 /* allocate extra bitmaps */ 118 if (status->chains) 119 len += 4 * hweight8(status->chains); 120 /* vendor presence bitmap */ 121 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) 122 len += 4; 123 124 if (ieee80211_have_rx_timestamp(status)) { 125 len = ALIGN(len, 8); 126 len += 8; 127 } 128 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) 129 len += 1; 130 131 /* antenna field, if we don't have per-chain info */ 132 if (!status->chains) 133 len += 1; 134 135 /* padding for RX_FLAGS if necessary */ 136 len = ALIGN(len, 2); 137 138 if (status->encoding == RX_ENC_HT) /* HT info */ 139 len += 3; 140 141 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 142 len = ALIGN(len, 4); 143 len += 8; 144 } 145 146 if (status->encoding == RX_ENC_VHT) { 147 len = ALIGN(len, 2); 148 len += 12; 149 } 150 151 if (local->hw.radiotap_timestamp.units_pos >= 0) { 152 len = ALIGN(len, 8); 153 len += 12; 154 } 155 156 if (status->encoding == RX_ENC_HE && 157 status->flag & RX_FLAG_RADIOTAP_HE) { 158 len = ALIGN(len, 2); 159 len += 12; 160 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12); 161 } 162 163 if (status->encoding == RX_ENC_HE && 164 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 165 len = ALIGN(len, 2); 166 len += 12; 167 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12); 168 } 169 170 if (status->flag & RX_FLAG_NO_PSDU) 171 len += 1; 172 173 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 174 len = ALIGN(len, 2); 175 len += 4; 176 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4); 177 } 178 179 if (status->chains) { 180 /* antenna and antenna signal fields */ 181 len += 2 * hweight8(status->chains); 182 } 183 184 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 185 struct ieee80211_vendor_radiotap *rtap; 186 int vendor_data_offset = 0; 187 188 /* 189 * The position to look at depends on the existence (or non- 190 * existence) of other elements, so take that into account... 191 */ 192 if (status->flag & RX_FLAG_RADIOTAP_HE) 193 vendor_data_offset += 194 sizeof(struct ieee80211_radiotap_he); 195 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 196 vendor_data_offset += 197 sizeof(struct ieee80211_radiotap_he_mu); 198 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 199 vendor_data_offset += 200 sizeof(struct ieee80211_radiotap_lsig); 201 202 rtap = (void *)&skb->data[vendor_data_offset]; 203 204 /* alignment for fixed 6-byte vendor data header */ 205 len = ALIGN(len, 2); 206 /* vendor data header */ 207 len += 6; 208 if (WARN_ON(rtap->align == 0)) 209 rtap->align = 1; 210 len = ALIGN(len, rtap->align); 211 len += rtap->len + rtap->pad; 212 } 213 214 return len; 215 } 216 217 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, 218 struct sk_buff *skb, 219 int rtap_space) 220 { 221 struct { 222 struct ieee80211_hdr_3addr hdr; 223 u8 category; 224 u8 action_code; 225 } __packed __aligned(2) action; 226 227 if (!sdata) 228 return; 229 230 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1); 231 232 if (skb->len < rtap_space + sizeof(action) + 233 VHT_MUMIMO_GROUPS_DATA_LEN) 234 return; 235 236 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr)) 237 return; 238 239 skb_copy_bits(skb, rtap_space, &action, sizeof(action)); 240 241 if (!ieee80211_is_action(action.hdr.frame_control)) 242 return; 243 244 if (action.category != WLAN_CATEGORY_VHT) 245 return; 246 247 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT) 248 return; 249 250 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr)) 251 return; 252 253 skb = skb_copy(skb, GFP_ATOMIC); 254 if (!skb) 255 return; 256 257 skb_queue_tail(&sdata->skb_queue, skb); 258 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 259 } 260 261 /* 262 * ieee80211_add_rx_radiotap_header - add radiotap header 263 * 264 * add a radiotap header containing all the fields which the hardware provided. 265 */ 266 static void 267 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 268 struct sk_buff *skb, 269 struct ieee80211_rate *rate, 270 int rtap_len, bool has_fcs) 271 { 272 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 273 struct ieee80211_radiotap_header *rthdr; 274 unsigned char *pos; 275 __le32 *it_present; 276 u32 it_present_val; 277 u16 rx_flags = 0; 278 u16 channel_flags = 0; 279 int mpdulen, chain; 280 unsigned long chains = status->chains; 281 struct ieee80211_vendor_radiotap rtap = {}; 282 struct ieee80211_radiotap_he he = {}; 283 struct ieee80211_radiotap_he_mu he_mu = {}; 284 struct ieee80211_radiotap_lsig lsig = {}; 285 286 if (status->flag & RX_FLAG_RADIOTAP_HE) { 287 he = *(struct ieee80211_radiotap_he *)skb->data; 288 skb_pull(skb, sizeof(he)); 289 WARN_ON_ONCE(status->encoding != RX_ENC_HE); 290 } 291 292 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) { 293 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data; 294 skb_pull(skb, sizeof(he_mu)); 295 } 296 297 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 298 lsig = *(struct ieee80211_radiotap_lsig *)skb->data; 299 skb_pull(skb, sizeof(lsig)); 300 } 301 302 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 303 rtap = *(struct ieee80211_vendor_radiotap *)skb->data; 304 /* rtap.len and rtap.pad are undone immediately */ 305 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad); 306 } 307 308 mpdulen = skb->len; 309 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))) 310 mpdulen += FCS_LEN; 311 312 rthdr = skb_push(skb, rtap_len); 313 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad); 314 it_present = &rthdr->it_present; 315 316 /* radiotap header, set always present flags */ 317 rthdr->it_len = cpu_to_le16(rtap_len); 318 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | 319 BIT(IEEE80211_RADIOTAP_CHANNEL) | 320 BIT(IEEE80211_RADIOTAP_RX_FLAGS); 321 322 if (!status->chains) 323 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); 324 325 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 326 it_present_val |= 327 BIT(IEEE80211_RADIOTAP_EXT) | 328 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); 329 put_unaligned_le32(it_present_val, it_present); 330 it_present++; 331 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | 332 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 333 } 334 335 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 336 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) | 337 BIT(IEEE80211_RADIOTAP_EXT); 338 put_unaligned_le32(it_present_val, it_present); 339 it_present++; 340 it_present_val = rtap.present; 341 } 342 343 put_unaligned_le32(it_present_val, it_present); 344 345 pos = (void *)(it_present + 1); 346 347 /* the order of the following fields is important */ 348 349 /* IEEE80211_RADIOTAP_TSFT */ 350 if (ieee80211_have_rx_timestamp(status)) { 351 /* padding */ 352 while ((pos - (u8 *)rthdr) & 7) 353 *pos++ = 0; 354 put_unaligned_le64( 355 ieee80211_calculate_rx_timestamp(local, status, 356 mpdulen, 0), 357 pos); 358 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 359 pos += 8; 360 } 361 362 /* IEEE80211_RADIOTAP_FLAGS */ 363 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) 364 *pos |= IEEE80211_RADIOTAP_F_FCS; 365 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 366 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 367 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) 368 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 369 pos++; 370 371 /* IEEE80211_RADIOTAP_RATE */ 372 if (!rate || status->encoding != RX_ENC_LEGACY) { 373 /* 374 * Without rate information don't add it. If we have, 375 * MCS information is a separate field in radiotap, 376 * added below. The byte here is needed as padding 377 * for the channel though, so initialise it to 0. 378 */ 379 *pos = 0; 380 } else { 381 int shift = 0; 382 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 383 if (status->bw == RATE_INFO_BW_10) 384 shift = 1; 385 else if (status->bw == RATE_INFO_BW_5) 386 shift = 2; 387 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); 388 } 389 pos++; 390 391 /* IEEE80211_RADIOTAP_CHANNEL */ 392 /* TODO: frequency offset in KHz */ 393 put_unaligned_le16(status->freq, pos); 394 pos += 2; 395 if (status->bw == RATE_INFO_BW_10) 396 channel_flags |= IEEE80211_CHAN_HALF; 397 else if (status->bw == RATE_INFO_BW_5) 398 channel_flags |= IEEE80211_CHAN_QUARTER; 399 400 if (status->band == NL80211_BAND_5GHZ || 401 status->band == NL80211_BAND_6GHZ) 402 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; 403 else if (status->encoding != RX_ENC_LEGACY) 404 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 405 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 406 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 407 else if (rate) 408 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; 409 else 410 channel_flags |= IEEE80211_CHAN_2GHZ; 411 put_unaligned_le16(channel_flags, pos); 412 pos += 2; 413 414 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 415 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) && 416 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 417 *pos = status->signal; 418 rthdr->it_present |= 419 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 420 pos++; 421 } 422 423 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 424 425 if (!status->chains) { 426 /* IEEE80211_RADIOTAP_ANTENNA */ 427 *pos = status->antenna; 428 pos++; 429 } 430 431 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 432 433 /* IEEE80211_RADIOTAP_RX_FLAGS */ 434 /* ensure 2 byte alignment for the 2 byte field as required */ 435 if ((pos - (u8 *)rthdr) & 1) 436 *pos++ = 0; 437 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 438 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 439 put_unaligned_le16(rx_flags, pos); 440 pos += 2; 441 442 if (status->encoding == RX_ENC_HT) { 443 unsigned int stbc; 444 445 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 446 *pos++ = local->hw.radiotap_mcs_details; 447 *pos = 0; 448 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) 449 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 450 if (status->bw == RATE_INFO_BW_40) 451 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 452 if (status->enc_flags & RX_ENC_FLAG_HT_GF) 453 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; 454 if (status->enc_flags & RX_ENC_FLAG_LDPC) 455 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; 456 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT; 457 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; 458 pos++; 459 *pos++ = status->rate_idx; 460 } 461 462 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 463 u16 flags = 0; 464 465 /* ensure 4 byte alignment */ 466 while ((pos - (u8 *)rthdr) & 3) 467 pos++; 468 rthdr->it_present |= 469 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); 470 put_unaligned_le32(status->ampdu_reference, pos); 471 pos += 4; 472 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) 473 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; 474 if (status->flag & RX_FLAG_AMPDU_IS_LAST) 475 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; 476 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) 477 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; 478 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 479 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN; 480 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN) 481 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN; 482 if (status->flag & RX_FLAG_AMPDU_EOF_BIT) 483 flags |= IEEE80211_RADIOTAP_AMPDU_EOF; 484 put_unaligned_le16(flags, pos); 485 pos += 2; 486 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 487 *pos++ = status->ampdu_delimiter_crc; 488 else 489 *pos++ = 0; 490 *pos++ = 0; 491 } 492 493 if (status->encoding == RX_ENC_VHT) { 494 u16 known = local->hw.radiotap_vht_details; 495 496 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); 497 put_unaligned_le16(known, pos); 498 pos += 2; 499 /* flags */ 500 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) 501 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; 502 /* in VHT, STBC is binary */ 503 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) 504 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; 505 if (status->enc_flags & RX_ENC_FLAG_BF) 506 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; 507 pos++; 508 /* bandwidth */ 509 switch (status->bw) { 510 case RATE_INFO_BW_80: 511 *pos++ = 4; 512 break; 513 case RATE_INFO_BW_160: 514 *pos++ = 11; 515 break; 516 case RATE_INFO_BW_40: 517 *pos++ = 1; 518 break; 519 default: 520 *pos++ = 0; 521 } 522 /* MCS/NSS */ 523 *pos = (status->rate_idx << 4) | status->nss; 524 pos += 4; 525 /* coding field */ 526 if (status->enc_flags & RX_ENC_FLAG_LDPC) 527 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; 528 pos++; 529 /* group ID */ 530 pos++; 531 /* partial_aid */ 532 pos += 2; 533 } 534 535 if (local->hw.radiotap_timestamp.units_pos >= 0) { 536 u16 accuracy = 0; 537 u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT; 538 539 rthdr->it_present |= 540 cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP); 541 542 /* ensure 8 byte alignment */ 543 while ((pos - (u8 *)rthdr) & 7) 544 pos++; 545 546 put_unaligned_le64(status->device_timestamp, pos); 547 pos += sizeof(u64); 548 549 if (local->hw.radiotap_timestamp.accuracy >= 0) { 550 accuracy = local->hw.radiotap_timestamp.accuracy; 551 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY; 552 } 553 put_unaligned_le16(accuracy, pos); 554 pos += sizeof(u16); 555 556 *pos++ = local->hw.radiotap_timestamp.units_pos; 557 *pos++ = flags; 558 } 559 560 if (status->encoding == RX_ENC_HE && 561 status->flag & RX_FLAG_RADIOTAP_HE) { 562 #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f) 563 564 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) { 565 he.data6 |= HE_PREP(DATA6_NSTS, 566 FIELD_GET(RX_ENC_FLAG_STBC_MASK, 567 status->enc_flags)); 568 he.data3 |= HE_PREP(DATA3_STBC, 1); 569 } else { 570 he.data6 |= HE_PREP(DATA6_NSTS, status->nss); 571 } 572 573 #define CHECK_GI(s) \ 574 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ 575 (int)NL80211_RATE_INFO_HE_GI_##s) 576 577 CHECK_GI(0_8); 578 CHECK_GI(1_6); 579 CHECK_GI(3_2); 580 581 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx); 582 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm); 583 he.data3 |= HE_PREP(DATA3_CODING, 584 !!(status->enc_flags & RX_ENC_FLAG_LDPC)); 585 586 he.data5 |= HE_PREP(DATA5_GI, status->he_gi); 587 588 switch (status->bw) { 589 case RATE_INFO_BW_20: 590 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 591 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); 592 break; 593 case RATE_INFO_BW_40: 594 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 595 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); 596 break; 597 case RATE_INFO_BW_80: 598 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 599 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); 600 break; 601 case RATE_INFO_BW_160: 602 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 603 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); 604 break; 605 case RATE_INFO_BW_HE_RU: 606 #define CHECK_RU_ALLOC(s) \ 607 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \ 608 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4) 609 610 CHECK_RU_ALLOC(26); 611 CHECK_RU_ALLOC(52); 612 CHECK_RU_ALLOC(106); 613 CHECK_RU_ALLOC(242); 614 CHECK_RU_ALLOC(484); 615 CHECK_RU_ALLOC(996); 616 CHECK_RU_ALLOC(2x996); 617 618 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 619 status->he_ru + 4); 620 break; 621 default: 622 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw); 623 } 624 625 /* ensure 2 byte alignment */ 626 while ((pos - (u8 *)rthdr) & 1) 627 pos++; 628 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE); 629 memcpy(pos, &he, sizeof(he)); 630 pos += sizeof(he); 631 } 632 633 if (status->encoding == RX_ENC_HE && 634 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 635 /* ensure 2 byte alignment */ 636 while ((pos - (u8 *)rthdr) & 1) 637 pos++; 638 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU); 639 memcpy(pos, &he_mu, sizeof(he_mu)); 640 pos += sizeof(he_mu); 641 } 642 643 if (status->flag & RX_FLAG_NO_PSDU) { 644 rthdr->it_present |= 645 cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU); 646 *pos++ = status->zero_length_psdu_type; 647 } 648 649 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 650 /* ensure 2 byte alignment */ 651 while ((pos - (u8 *)rthdr) & 1) 652 pos++; 653 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG); 654 memcpy(pos, &lsig, sizeof(lsig)); 655 pos += sizeof(lsig); 656 } 657 658 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 659 *pos++ = status->chain_signal[chain]; 660 *pos++ = chain; 661 } 662 663 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 664 /* ensure 2 byte alignment for the vendor field as required */ 665 if ((pos - (u8 *)rthdr) & 1) 666 *pos++ = 0; 667 *pos++ = rtap.oui[0]; 668 *pos++ = rtap.oui[1]; 669 *pos++ = rtap.oui[2]; 670 *pos++ = rtap.subns; 671 put_unaligned_le16(rtap.len, pos); 672 pos += 2; 673 /* align the actual payload as requested */ 674 while ((pos - (u8 *)rthdr) & (rtap.align - 1)) 675 *pos++ = 0; 676 /* data (and possible padding) already follows */ 677 } 678 } 679 680 static struct sk_buff * 681 ieee80211_make_monitor_skb(struct ieee80211_local *local, 682 struct sk_buff **origskb, 683 struct ieee80211_rate *rate, 684 int rtap_space, bool use_origskb) 685 { 686 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb); 687 int rt_hdrlen, needed_headroom; 688 struct sk_buff *skb; 689 690 /* room for the radiotap header based on driver features */ 691 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb); 692 needed_headroom = rt_hdrlen - rtap_space; 693 694 if (use_origskb) { 695 /* only need to expand headroom if necessary */ 696 skb = *origskb; 697 *origskb = NULL; 698 699 /* 700 * This shouldn't trigger often because most devices have an 701 * RX header they pull before we get here, and that should 702 * be big enough for our radiotap information. We should 703 * probably export the length to drivers so that we can have 704 * them allocate enough headroom to start with. 705 */ 706 if (skb_headroom(skb) < needed_headroom && 707 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 708 dev_kfree_skb(skb); 709 return NULL; 710 } 711 } else { 712 /* 713 * Need to make a copy and possibly remove radiotap header 714 * and FCS from the original. 715 */ 716 skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC); 717 718 if (!skb) 719 return NULL; 720 } 721 722 /* prepend radiotap information */ 723 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); 724 725 skb_reset_mac_header(skb); 726 skb->ip_summed = CHECKSUM_UNNECESSARY; 727 skb->pkt_type = PACKET_OTHERHOST; 728 skb->protocol = htons(ETH_P_802_2); 729 730 return skb; 731 } 732 733 /* 734 * This function copies a received frame to all monitor interfaces and 735 * returns a cleaned-up SKB that no longer includes the FCS nor the 736 * radiotap header the driver might have added. 737 */ 738 static struct sk_buff * 739 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 740 struct ieee80211_rate *rate) 741 { 742 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 743 struct ieee80211_sub_if_data *sdata; 744 struct sk_buff *monskb = NULL; 745 int present_fcs_len = 0; 746 unsigned int rtap_space = 0; 747 struct ieee80211_sub_if_data *monitor_sdata = 748 rcu_dereference(local->monitor_sdata); 749 bool only_monitor = false; 750 unsigned int min_head_len; 751 752 if (status->flag & RX_FLAG_RADIOTAP_HE) 753 rtap_space += sizeof(struct ieee80211_radiotap_he); 754 755 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 756 rtap_space += sizeof(struct ieee80211_radiotap_he_mu); 757 758 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 759 rtap_space += sizeof(struct ieee80211_radiotap_lsig); 760 761 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { 762 struct ieee80211_vendor_radiotap *rtap = 763 (void *)(origskb->data + rtap_space); 764 765 rtap_space += sizeof(*rtap) + rtap->len + rtap->pad; 766 } 767 768 min_head_len = rtap_space; 769 770 /* 771 * First, we may need to make a copy of the skb because 772 * (1) we need to modify it for radiotap (if not present), and 773 * (2) the other RX handlers will modify the skb we got. 774 * 775 * We don't need to, of course, if we aren't going to return 776 * the SKB because it has a bad FCS/PLCP checksum. 777 */ 778 779 if (!(status->flag & RX_FLAG_NO_PSDU)) { 780 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { 781 if (unlikely(origskb->len <= FCS_LEN + rtap_space)) { 782 /* driver bug */ 783 WARN_ON(1); 784 dev_kfree_skb(origskb); 785 return NULL; 786 } 787 present_fcs_len = FCS_LEN; 788 } 789 790 /* also consider the hdr->frame_control */ 791 min_head_len += 2; 792 } 793 794 /* ensure that the expected data elements are in skb head */ 795 if (!pskb_may_pull(origskb, min_head_len)) { 796 dev_kfree_skb(origskb); 797 return NULL; 798 } 799 800 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space); 801 802 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) { 803 if (only_monitor) { 804 dev_kfree_skb(origskb); 805 return NULL; 806 } 807 808 return ieee80211_clean_skb(origskb, present_fcs_len, 809 rtap_space); 810 } 811 812 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space); 813 814 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) { 815 bool last_monitor = list_is_last(&sdata->u.mntr.list, 816 &local->mon_list); 817 818 if (!monskb) 819 monskb = ieee80211_make_monitor_skb(local, &origskb, 820 rate, rtap_space, 821 only_monitor && 822 last_monitor); 823 824 if (monskb) { 825 struct sk_buff *skb; 826 827 if (last_monitor) { 828 skb = monskb; 829 monskb = NULL; 830 } else { 831 skb = skb_clone(monskb, GFP_ATOMIC); 832 } 833 834 if (skb) { 835 skb->dev = sdata->dev; 836 dev_sw_netstats_rx_add(skb->dev, skb->len); 837 netif_receive_skb(skb); 838 } 839 } 840 841 if (last_monitor) 842 break; 843 } 844 845 /* this happens if last_monitor was erroneously false */ 846 dev_kfree_skb(monskb); 847 848 /* ditto */ 849 if (!origskb) 850 return NULL; 851 852 return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space); 853 } 854 855 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 856 { 857 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 858 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 859 int tid, seqno_idx, security_idx; 860 861 /* does the frame have a qos control field? */ 862 if (ieee80211_is_data_qos(hdr->frame_control)) { 863 u8 *qc = ieee80211_get_qos_ctl(hdr); 864 /* frame has qos control */ 865 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 866 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 867 status->rx_flags |= IEEE80211_RX_AMSDU; 868 869 seqno_idx = tid; 870 security_idx = tid; 871 } else { 872 /* 873 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 874 * 875 * Sequence numbers for management frames, QoS data 876 * frames with a broadcast/multicast address in the 877 * Address 1 field, and all non-QoS data frames sent 878 * by QoS STAs are assigned using an additional single 879 * modulo-4096 counter, [...] 880 * 881 * We also use that counter for non-QoS STAs. 882 */ 883 seqno_idx = IEEE80211_NUM_TIDS; 884 security_idx = 0; 885 if (ieee80211_is_mgmt(hdr->frame_control)) 886 security_idx = IEEE80211_NUM_TIDS; 887 tid = 0; 888 } 889 890 rx->seqno_idx = seqno_idx; 891 rx->security_idx = security_idx; 892 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 893 * For now, set skb->priority to 0 for other cases. */ 894 rx->skb->priority = (tid > 7) ? 0 : tid; 895 } 896 897 /** 898 * DOC: Packet alignment 899 * 900 * Drivers always need to pass packets that are aligned to two-byte boundaries 901 * to the stack. 902 * 903 * Additionally, should, if possible, align the payload data in a way that 904 * guarantees that the contained IP header is aligned to a four-byte 905 * boundary. In the case of regular frames, this simply means aligning the 906 * payload to a four-byte boundary (because either the IP header is directly 907 * contained, or IV/RFC1042 headers that have a length divisible by four are 908 * in front of it). If the payload data is not properly aligned and the 909 * architecture doesn't support efficient unaligned operations, mac80211 910 * will align the data. 911 * 912 * With A-MSDU frames, however, the payload data address must yield two modulo 913 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 914 * push the IP header further back to a multiple of four again. Thankfully, the 915 * specs were sane enough this time around to require padding each A-MSDU 916 * subframe to a length that is a multiple of four. 917 * 918 * Padding like Atheros hardware adds which is between the 802.11 header and 919 * the payload is not supported, the driver is required to move the 802.11 920 * header to be directly in front of the payload in that case. 921 */ 922 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 923 { 924 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 925 WARN_ON_ONCE((unsigned long)rx->skb->data & 1); 926 #endif 927 } 928 929 930 /* rx handlers */ 931 932 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 933 { 934 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 935 936 if (is_multicast_ether_addr(hdr->addr1)) 937 return 0; 938 939 return ieee80211_is_robust_mgmt_frame(skb); 940 } 941 942 943 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 944 { 945 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 946 947 if (!is_multicast_ether_addr(hdr->addr1)) 948 return 0; 949 950 return ieee80211_is_robust_mgmt_frame(skb); 951 } 952 953 954 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 955 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 956 { 957 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 958 struct ieee80211_mmie *mmie; 959 struct ieee80211_mmie_16 *mmie16; 960 961 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) 962 return -1; 963 964 if (!ieee80211_is_robust_mgmt_frame(skb) && 965 !ieee80211_is_beacon(hdr->frame_control)) 966 return -1; /* not a robust management frame */ 967 968 mmie = (struct ieee80211_mmie *) 969 (skb->data + skb->len - sizeof(*mmie)); 970 if (mmie->element_id == WLAN_EID_MMIE && 971 mmie->length == sizeof(*mmie) - 2) 972 return le16_to_cpu(mmie->key_id); 973 974 mmie16 = (struct ieee80211_mmie_16 *) 975 (skb->data + skb->len - sizeof(*mmie16)); 976 if (skb->len >= 24 + sizeof(*mmie16) && 977 mmie16->element_id == WLAN_EID_MMIE && 978 mmie16->length == sizeof(*mmie16) - 2) 979 return le16_to_cpu(mmie16->key_id); 980 981 return -1; 982 } 983 984 static int ieee80211_get_keyid(struct sk_buff *skb, 985 const struct ieee80211_cipher_scheme *cs) 986 { 987 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 988 __le16 fc; 989 int hdrlen; 990 int minlen; 991 u8 key_idx_off; 992 u8 key_idx_shift; 993 u8 keyid; 994 995 fc = hdr->frame_control; 996 hdrlen = ieee80211_hdrlen(fc); 997 998 if (cs) { 999 minlen = hdrlen + cs->hdr_len; 1000 key_idx_off = hdrlen + cs->key_idx_off; 1001 key_idx_shift = cs->key_idx_shift; 1002 } else { 1003 /* WEP, TKIP, CCMP and GCMP */ 1004 minlen = hdrlen + IEEE80211_WEP_IV_LEN; 1005 key_idx_off = hdrlen + 3; 1006 key_idx_shift = 6; 1007 } 1008 1009 if (unlikely(skb->len < minlen)) 1010 return -EINVAL; 1011 1012 skb_copy_bits(skb, key_idx_off, &keyid, 1); 1013 1014 if (cs) 1015 keyid &= cs->key_idx_mask; 1016 keyid >>= key_idx_shift; 1017 1018 /* cs could use more than the usual two bits for the keyid */ 1019 if (unlikely(keyid >= NUM_DEFAULT_KEYS)) 1020 return -EINVAL; 1021 1022 return keyid; 1023 } 1024 1025 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 1026 { 1027 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1028 char *dev_addr = rx->sdata->vif.addr; 1029 1030 if (ieee80211_is_data(hdr->frame_control)) { 1031 if (is_multicast_ether_addr(hdr->addr1)) { 1032 if (ieee80211_has_tods(hdr->frame_control) || 1033 !ieee80211_has_fromds(hdr->frame_control)) 1034 return RX_DROP_MONITOR; 1035 if (ether_addr_equal(hdr->addr3, dev_addr)) 1036 return RX_DROP_MONITOR; 1037 } else { 1038 if (!ieee80211_has_a4(hdr->frame_control)) 1039 return RX_DROP_MONITOR; 1040 if (ether_addr_equal(hdr->addr4, dev_addr)) 1041 return RX_DROP_MONITOR; 1042 } 1043 } 1044 1045 /* If there is not an established peer link and this is not a peer link 1046 * establisment frame, beacon or probe, drop the frame. 1047 */ 1048 1049 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { 1050 struct ieee80211_mgmt *mgmt; 1051 1052 if (!ieee80211_is_mgmt(hdr->frame_control)) 1053 return RX_DROP_MONITOR; 1054 1055 if (ieee80211_is_action(hdr->frame_control)) { 1056 u8 category; 1057 1058 /* make sure category field is present */ 1059 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 1060 return RX_DROP_MONITOR; 1061 1062 mgmt = (struct ieee80211_mgmt *)hdr; 1063 category = mgmt->u.action.category; 1064 if (category != WLAN_CATEGORY_MESH_ACTION && 1065 category != WLAN_CATEGORY_SELF_PROTECTED) 1066 return RX_DROP_MONITOR; 1067 return RX_CONTINUE; 1068 } 1069 1070 if (ieee80211_is_probe_req(hdr->frame_control) || 1071 ieee80211_is_probe_resp(hdr->frame_control) || 1072 ieee80211_is_beacon(hdr->frame_control) || 1073 ieee80211_is_auth(hdr->frame_control)) 1074 return RX_CONTINUE; 1075 1076 return RX_DROP_MONITOR; 1077 } 1078 1079 return RX_CONTINUE; 1080 } 1081 1082 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx, 1083 int index) 1084 { 1085 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index]; 1086 struct sk_buff *tail = skb_peek_tail(frames); 1087 struct ieee80211_rx_status *status; 1088 1089 if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) 1090 return true; 1091 1092 if (!tail) 1093 return false; 1094 1095 status = IEEE80211_SKB_RXCB(tail); 1096 if (status->flag & RX_FLAG_AMSDU_MORE) 1097 return false; 1098 1099 return true; 1100 } 1101 1102 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 1103 struct tid_ampdu_rx *tid_agg_rx, 1104 int index, 1105 struct sk_buff_head *frames) 1106 { 1107 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; 1108 struct sk_buff *skb; 1109 struct ieee80211_rx_status *status; 1110 1111 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1112 1113 if (skb_queue_empty(skb_list)) 1114 goto no_frame; 1115 1116 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1117 __skb_queue_purge(skb_list); 1118 goto no_frame; 1119 } 1120 1121 /* release frames from the reorder ring buffer */ 1122 tid_agg_rx->stored_mpdu_num--; 1123 while ((skb = __skb_dequeue(skb_list))) { 1124 status = IEEE80211_SKB_RXCB(skb); 1125 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 1126 __skb_queue_tail(frames, skb); 1127 } 1128 1129 no_frame: 1130 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 1131 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1132 } 1133 1134 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 1135 struct tid_ampdu_rx *tid_agg_rx, 1136 u16 head_seq_num, 1137 struct sk_buff_head *frames) 1138 { 1139 int index; 1140 1141 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1142 1143 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { 1144 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1145 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1146 frames); 1147 } 1148 } 1149 1150 /* 1151 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 1152 * the skb was added to the buffer longer than this time ago, the earlier 1153 * frames that have not yet been received are assumed to be lost and the skb 1154 * can be released for processing. This may also release other skb's from the 1155 * reorder buffer if there are no additional gaps between the frames. 1156 * 1157 * Callers must hold tid_agg_rx->reorder_lock. 1158 */ 1159 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 1160 1161 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 1162 struct tid_ampdu_rx *tid_agg_rx, 1163 struct sk_buff_head *frames) 1164 { 1165 int index, i, j; 1166 1167 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1168 1169 /* release the buffer until next missing frame */ 1170 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1171 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) && 1172 tid_agg_rx->stored_mpdu_num) { 1173 /* 1174 * No buffers ready to be released, but check whether any 1175 * frames in the reorder buffer have timed out. 1176 */ 1177 int skipped = 1; 1178 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 1179 j = (j + 1) % tid_agg_rx->buf_size) { 1180 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) { 1181 skipped++; 1182 continue; 1183 } 1184 if (skipped && 1185 !time_after(jiffies, tid_agg_rx->reorder_time[j] + 1186 HT_RX_REORDER_BUF_TIMEOUT)) 1187 goto set_release_timer; 1188 1189 /* don't leave incomplete A-MSDUs around */ 1190 for (i = (index + 1) % tid_agg_rx->buf_size; i != j; 1191 i = (i + 1) % tid_agg_rx->buf_size) 1192 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); 1193 1194 ht_dbg_ratelimited(sdata, 1195 "release an RX reorder frame due to timeout on earlier frames\n"); 1196 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, 1197 frames); 1198 1199 /* 1200 * Increment the head seq# also for the skipped slots. 1201 */ 1202 tid_agg_rx->head_seq_num = 1203 (tid_agg_rx->head_seq_num + 1204 skipped) & IEEE80211_SN_MASK; 1205 skipped = 0; 1206 } 1207 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1208 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1209 frames); 1210 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1211 } 1212 1213 if (tid_agg_rx->stored_mpdu_num) { 1214 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1215 1216 for (; j != (index - 1) % tid_agg_rx->buf_size; 1217 j = (j + 1) % tid_agg_rx->buf_size) { 1218 if (ieee80211_rx_reorder_ready(tid_agg_rx, j)) 1219 break; 1220 } 1221 1222 set_release_timer: 1223 1224 if (!tid_agg_rx->removed) 1225 mod_timer(&tid_agg_rx->reorder_timer, 1226 tid_agg_rx->reorder_time[j] + 1 + 1227 HT_RX_REORDER_BUF_TIMEOUT); 1228 } else { 1229 del_timer(&tid_agg_rx->reorder_timer); 1230 } 1231 } 1232 1233 /* 1234 * As this function belongs to the RX path it must be under 1235 * rcu_read_lock protection. It returns false if the frame 1236 * can be processed immediately, true if it was consumed. 1237 */ 1238 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 1239 struct tid_ampdu_rx *tid_agg_rx, 1240 struct sk_buff *skb, 1241 struct sk_buff_head *frames) 1242 { 1243 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1244 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1245 u16 sc = le16_to_cpu(hdr->seq_ctrl); 1246 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 1247 u16 head_seq_num, buf_size; 1248 int index; 1249 bool ret = true; 1250 1251 spin_lock(&tid_agg_rx->reorder_lock); 1252 1253 /* 1254 * Offloaded BA sessions have no known starting sequence number so pick 1255 * one from first Rxed frame for this tid after BA was started. 1256 */ 1257 if (unlikely(tid_agg_rx->auto_seq)) { 1258 tid_agg_rx->auto_seq = false; 1259 tid_agg_rx->ssn = mpdu_seq_num; 1260 tid_agg_rx->head_seq_num = mpdu_seq_num; 1261 } 1262 1263 buf_size = tid_agg_rx->buf_size; 1264 head_seq_num = tid_agg_rx->head_seq_num; 1265 1266 /* 1267 * If the current MPDU's SN is smaller than the SSN, it shouldn't 1268 * be reordered. 1269 */ 1270 if (unlikely(!tid_agg_rx->started)) { 1271 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1272 ret = false; 1273 goto out; 1274 } 1275 tid_agg_rx->started = true; 1276 } 1277 1278 /* frame with out of date sequence number */ 1279 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1280 dev_kfree_skb(skb); 1281 goto out; 1282 } 1283 1284 /* 1285 * If frame the sequence number exceeds our buffering window 1286 * size release some previous frames to make room for this one. 1287 */ 1288 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { 1289 head_seq_num = ieee80211_sn_inc( 1290 ieee80211_sn_sub(mpdu_seq_num, buf_size)); 1291 /* release stored frames up to new head to stack */ 1292 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 1293 head_seq_num, frames); 1294 } 1295 1296 /* Now the new frame is always in the range of the reordering buffer */ 1297 1298 index = mpdu_seq_num % tid_agg_rx->buf_size; 1299 1300 /* check if we already stored this frame */ 1301 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1302 dev_kfree_skb(skb); 1303 goto out; 1304 } 1305 1306 /* 1307 * If the current MPDU is in the right order and nothing else 1308 * is stored we can process it directly, no need to buffer it. 1309 * If it is first but there's something stored, we may be able 1310 * to release frames after this one. 1311 */ 1312 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 1313 tid_agg_rx->stored_mpdu_num == 0) { 1314 if (!(status->flag & RX_FLAG_AMSDU_MORE)) 1315 tid_agg_rx->head_seq_num = 1316 ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1317 ret = false; 1318 goto out; 1319 } 1320 1321 /* put the frame in the reordering buffer */ 1322 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); 1323 if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1324 tid_agg_rx->reorder_time[index] = jiffies; 1325 tid_agg_rx->stored_mpdu_num++; 1326 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); 1327 } 1328 1329 out: 1330 spin_unlock(&tid_agg_rx->reorder_lock); 1331 return ret; 1332 } 1333 1334 /* 1335 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 1336 * true if the MPDU was buffered, false if it should be processed. 1337 */ 1338 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, 1339 struct sk_buff_head *frames) 1340 { 1341 struct sk_buff *skb = rx->skb; 1342 struct ieee80211_local *local = rx->local; 1343 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1344 struct sta_info *sta = rx->sta; 1345 struct tid_ampdu_rx *tid_agg_rx; 1346 u16 sc; 1347 u8 tid, ack_policy; 1348 1349 if (!ieee80211_is_data_qos(hdr->frame_control) || 1350 is_multicast_ether_addr(hdr->addr1)) 1351 goto dont_reorder; 1352 1353 /* 1354 * filter the QoS data rx stream according to 1355 * STA/TID and check if this STA/TID is on aggregation 1356 */ 1357 1358 if (!sta) 1359 goto dont_reorder; 1360 1361 ack_policy = *ieee80211_get_qos_ctl(hdr) & 1362 IEEE80211_QOS_CTL_ACK_POLICY_MASK; 1363 tid = ieee80211_get_tid(hdr); 1364 1365 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 1366 if (!tid_agg_rx) { 1367 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1368 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 1369 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 1370 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 1371 WLAN_BACK_RECIPIENT, 1372 WLAN_REASON_QSTA_REQUIRE_SETUP); 1373 goto dont_reorder; 1374 } 1375 1376 /* qos null data frames are excluded */ 1377 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 1378 goto dont_reorder; 1379 1380 /* not part of a BA session */ 1381 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1382 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) 1383 goto dont_reorder; 1384 1385 /* new, potentially un-ordered, ampdu frame - process it */ 1386 1387 /* reset session timer */ 1388 if (tid_agg_rx->timeout) 1389 tid_agg_rx->last_rx = jiffies; 1390 1391 /* if this mpdu is fragmented - terminate rx aggregation session */ 1392 sc = le16_to_cpu(hdr->seq_ctrl); 1393 if (sc & IEEE80211_SCTL_FRAG) { 1394 skb_queue_tail(&rx->sdata->skb_queue, skb); 1395 ieee80211_queue_work(&local->hw, &rx->sdata->work); 1396 return; 1397 } 1398 1399 /* 1400 * No locking needed -- we will only ever process one 1401 * RX packet at a time, and thus own tid_agg_rx. All 1402 * other code manipulating it needs to (and does) make 1403 * sure that we cannot get to it any more before doing 1404 * anything with it. 1405 */ 1406 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, 1407 frames)) 1408 return; 1409 1410 dont_reorder: 1411 __skb_queue_tail(frames, skb); 1412 } 1413 1414 static ieee80211_rx_result debug_noinline 1415 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) 1416 { 1417 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1418 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1419 1420 if (status->flag & RX_FLAG_DUP_VALIDATED) 1421 return RX_CONTINUE; 1422 1423 /* 1424 * Drop duplicate 802.11 retransmissions 1425 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") 1426 */ 1427 1428 if (rx->skb->len < 24) 1429 return RX_CONTINUE; 1430 1431 if (ieee80211_is_ctl(hdr->frame_control) || 1432 ieee80211_is_any_nullfunc(hdr->frame_control) || 1433 is_multicast_ether_addr(hdr->addr1)) 1434 return RX_CONTINUE; 1435 1436 if (!rx->sta) 1437 return RX_CONTINUE; 1438 1439 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 1440 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { 1441 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); 1442 rx->sta->rx_stats.num_duplicates++; 1443 return RX_DROP_UNUSABLE; 1444 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1445 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; 1446 } 1447 1448 return RX_CONTINUE; 1449 } 1450 1451 static ieee80211_rx_result debug_noinline 1452 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 1453 { 1454 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1455 1456 /* Drop disallowed frame classes based on STA auth/assoc state; 1457 * IEEE 802.11, Chap 5.5. 1458 * 1459 * mac80211 filters only based on association state, i.e. it drops 1460 * Class 3 frames from not associated stations. hostapd sends 1461 * deauth/disassoc frames when needed. In addition, hostapd is 1462 * responsible for filtering on both auth and assoc states. 1463 */ 1464 1465 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1466 return ieee80211_rx_mesh_check(rx); 1467 1468 if (unlikely((ieee80211_is_data(hdr->frame_control) || 1469 ieee80211_is_pspoll(hdr->frame_control)) && 1470 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 1471 rx->sdata->vif.type != NL80211_IFTYPE_OCB && 1472 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 1473 /* 1474 * accept port control frames from the AP even when it's not 1475 * yet marked ASSOC to prevent a race where we don't set the 1476 * assoc bit quickly enough before it sends the first frame 1477 */ 1478 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1479 ieee80211_is_data_present(hdr->frame_control)) { 1480 unsigned int hdrlen; 1481 __be16 ethertype; 1482 1483 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1484 1485 if (rx->skb->len < hdrlen + 8) 1486 return RX_DROP_MONITOR; 1487 1488 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); 1489 if (ethertype == rx->sdata->control_port_protocol) 1490 return RX_CONTINUE; 1491 } 1492 1493 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 1494 cfg80211_rx_spurious_frame(rx->sdata->dev, 1495 hdr->addr2, 1496 GFP_ATOMIC)) 1497 return RX_DROP_UNUSABLE; 1498 1499 return RX_DROP_MONITOR; 1500 } 1501 1502 return RX_CONTINUE; 1503 } 1504 1505 1506 static ieee80211_rx_result debug_noinline 1507 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1508 { 1509 struct ieee80211_local *local; 1510 struct ieee80211_hdr *hdr; 1511 struct sk_buff *skb; 1512 1513 local = rx->local; 1514 skb = rx->skb; 1515 hdr = (struct ieee80211_hdr *) skb->data; 1516 1517 if (!local->pspolling) 1518 return RX_CONTINUE; 1519 1520 if (!ieee80211_has_fromds(hdr->frame_control)) 1521 /* this is not from AP */ 1522 return RX_CONTINUE; 1523 1524 if (!ieee80211_is_data(hdr->frame_control)) 1525 return RX_CONTINUE; 1526 1527 if (!ieee80211_has_moredata(hdr->frame_control)) { 1528 /* AP has no more frames buffered for us */ 1529 local->pspolling = false; 1530 return RX_CONTINUE; 1531 } 1532 1533 /* more data bit is set, let's request a new frame from the AP */ 1534 ieee80211_send_pspoll(local, rx->sdata); 1535 1536 return RX_CONTINUE; 1537 } 1538 1539 static void sta_ps_start(struct sta_info *sta) 1540 { 1541 struct ieee80211_sub_if_data *sdata = sta->sdata; 1542 struct ieee80211_local *local = sdata->local; 1543 struct ps_data *ps; 1544 int tid; 1545 1546 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1547 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1548 ps = &sdata->bss->ps; 1549 else 1550 return; 1551 1552 atomic_inc(&ps->num_sta_ps); 1553 set_sta_flag(sta, WLAN_STA_PS_STA); 1554 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1555 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1556 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", 1557 sta->sta.addr, sta->sta.aid); 1558 1559 ieee80211_clear_fast_xmit(sta); 1560 1561 if (!sta->sta.txq[0]) 1562 return; 1563 1564 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { 1565 struct ieee80211_txq *txq = sta->sta.txq[tid]; 1566 struct txq_info *txqi = to_txq_info(txq); 1567 1568 spin_lock(&local->active_txq_lock[txq->ac]); 1569 if (!list_empty(&txqi->schedule_order)) 1570 list_del_init(&txqi->schedule_order); 1571 spin_unlock(&local->active_txq_lock[txq->ac]); 1572 1573 if (txq_has_queue(txq)) 1574 set_bit(tid, &sta->txq_buffered_tids); 1575 else 1576 clear_bit(tid, &sta->txq_buffered_tids); 1577 } 1578 } 1579 1580 static void sta_ps_end(struct sta_info *sta) 1581 { 1582 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", 1583 sta->sta.addr, sta->sta.aid); 1584 1585 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1586 /* 1587 * Clear the flag only if the other one is still set 1588 * so that the TX path won't start TX'ing new frames 1589 * directly ... In the case that the driver flag isn't 1590 * set ieee80211_sta_ps_deliver_wakeup() will clear it. 1591 */ 1592 clear_sta_flag(sta, WLAN_STA_PS_STA); 1593 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", 1594 sta->sta.addr, sta->sta.aid); 1595 return; 1596 } 1597 1598 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1599 clear_sta_flag(sta, WLAN_STA_PS_STA); 1600 ieee80211_sta_ps_deliver_wakeup(sta); 1601 } 1602 1603 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) 1604 { 1605 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1606 bool in_ps; 1607 1608 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS)); 1609 1610 /* Don't let the same PS state be set twice */ 1611 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA); 1612 if ((start && in_ps) || (!start && !in_ps)) 1613 return -EINVAL; 1614 1615 if (start) 1616 sta_ps_start(sta); 1617 else 1618 sta_ps_end(sta); 1619 1620 return 0; 1621 } 1622 EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1623 1624 void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta) 1625 { 1626 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1627 1628 if (test_sta_flag(sta, WLAN_STA_SP)) 1629 return; 1630 1631 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1632 ieee80211_sta_ps_deliver_poll_response(sta); 1633 else 1634 set_sta_flag(sta, WLAN_STA_PSPOLL); 1635 } 1636 EXPORT_SYMBOL(ieee80211_sta_pspoll); 1637 1638 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid) 1639 { 1640 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1641 int ac = ieee80211_ac_from_tid(tid); 1642 1643 /* 1644 * If this AC is not trigger-enabled do nothing unless the 1645 * driver is calling us after it already checked. 1646 * 1647 * NB: This could/should check a separate bitmap of trigger- 1648 * enabled queues, but for now we only implement uAPSD w/o 1649 * TSPEC changes to the ACs, so they're always the same. 1650 */ 1651 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) && 1652 tid != IEEE80211_NUM_TIDS) 1653 return; 1654 1655 /* if we are in a service period, do nothing */ 1656 if (test_sta_flag(sta, WLAN_STA_SP)) 1657 return; 1658 1659 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1660 ieee80211_sta_ps_deliver_uapsd(sta); 1661 else 1662 set_sta_flag(sta, WLAN_STA_UAPSD); 1663 } 1664 EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger); 1665 1666 static ieee80211_rx_result debug_noinline 1667 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) 1668 { 1669 struct ieee80211_sub_if_data *sdata = rx->sdata; 1670 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 1671 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1672 1673 if (!rx->sta) 1674 return RX_CONTINUE; 1675 1676 if (sdata->vif.type != NL80211_IFTYPE_AP && 1677 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 1678 return RX_CONTINUE; 1679 1680 /* 1681 * The device handles station powersave, so don't do anything about 1682 * uAPSD and PS-Poll frames (the latter shouldn't even come up from 1683 * it to mac80211 since they're handled.) 1684 */ 1685 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS)) 1686 return RX_CONTINUE; 1687 1688 /* 1689 * Don't do anything if the station isn't already asleep. In 1690 * the uAPSD case, the station will probably be marked asleep, 1691 * in the PS-Poll case the station must be confused ... 1692 */ 1693 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) 1694 return RX_CONTINUE; 1695 1696 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { 1697 ieee80211_sta_pspoll(&rx->sta->sta); 1698 1699 /* Free PS Poll skb here instead of returning RX_DROP that would 1700 * count as an dropped frame. */ 1701 dev_kfree_skb(rx->skb); 1702 1703 return RX_QUEUED; 1704 } else if (!ieee80211_has_morefrags(hdr->frame_control) && 1705 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1706 ieee80211_has_pm(hdr->frame_control) && 1707 (ieee80211_is_data_qos(hdr->frame_control) || 1708 ieee80211_is_qos_nullfunc(hdr->frame_control))) { 1709 u8 tid = ieee80211_get_tid(hdr); 1710 1711 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid); 1712 } 1713 1714 return RX_CONTINUE; 1715 } 1716 1717 static ieee80211_rx_result debug_noinline 1718 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1719 { 1720 struct sta_info *sta = rx->sta; 1721 struct sk_buff *skb = rx->skb; 1722 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1723 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1724 int i; 1725 1726 if (!sta) 1727 return RX_CONTINUE; 1728 1729 /* 1730 * Update last_rx only for IBSS packets which are for the current 1731 * BSSID and for station already AUTHORIZED to avoid keeping the 1732 * current IBSS network alive in cases where other STAs start 1733 * using different BSSID. This will also give the station another 1734 * chance to restart the authentication/authorization in case 1735 * something went wrong the first time. 1736 */ 1737 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1738 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1739 NL80211_IFTYPE_ADHOC); 1740 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1741 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1742 sta->rx_stats.last_rx = jiffies; 1743 if (ieee80211_is_data(hdr->frame_control) && 1744 !is_multicast_ether_addr(hdr->addr1)) 1745 sta->rx_stats.last_rate = 1746 sta_stats_encode_rate(status); 1747 } 1748 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { 1749 sta->rx_stats.last_rx = jiffies; 1750 } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) && 1751 !is_multicast_ether_addr(hdr->addr1)) { 1752 /* 1753 * Mesh beacons will update last_rx when if they are found to 1754 * match the current local configuration when processed. 1755 */ 1756 sta->rx_stats.last_rx = jiffies; 1757 if (ieee80211_is_data(hdr->frame_control)) 1758 sta->rx_stats.last_rate = sta_stats_encode_rate(status); 1759 } 1760 1761 sta->rx_stats.fragments++; 1762 1763 u64_stats_update_begin(&rx->sta->rx_stats.syncp); 1764 sta->rx_stats.bytes += rx->skb->len; 1765 u64_stats_update_end(&rx->sta->rx_stats.syncp); 1766 1767 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 1768 sta->rx_stats.last_signal = status->signal; 1769 ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal); 1770 } 1771 1772 if (status->chains) { 1773 sta->rx_stats.chains = status->chains; 1774 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 1775 int signal = status->chain_signal[i]; 1776 1777 if (!(status->chains & BIT(i))) 1778 continue; 1779 1780 sta->rx_stats.chain_signal_last[i] = signal; 1781 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], 1782 -signal); 1783 } 1784 } 1785 1786 if (ieee80211_is_s1g_beacon(hdr->frame_control)) 1787 return RX_CONTINUE; 1788 1789 /* 1790 * Change STA power saving mode only at the end of a frame 1791 * exchange sequence, and only for a data or management 1792 * frame as specified in IEEE 802.11-2016 11.2.3.2 1793 */ 1794 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1795 !ieee80211_has_morefrags(hdr->frame_control) && 1796 !is_multicast_ether_addr(hdr->addr1) && 1797 (ieee80211_is_mgmt(hdr->frame_control) || 1798 ieee80211_is_data(hdr->frame_control)) && 1799 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1800 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1801 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1802 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 1803 if (!ieee80211_has_pm(hdr->frame_control)) 1804 sta_ps_end(sta); 1805 } else { 1806 if (ieee80211_has_pm(hdr->frame_control)) 1807 sta_ps_start(sta); 1808 } 1809 } 1810 1811 /* mesh power save support */ 1812 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1813 ieee80211_mps_rx_h_sta_process(sta, hdr); 1814 1815 /* 1816 * Drop (qos-)data::nullfunc frames silently, since they 1817 * are used only to control station power saving mode. 1818 */ 1819 if (ieee80211_is_any_nullfunc(hdr->frame_control)) { 1820 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1821 1822 /* 1823 * If we receive a 4-addr nullfunc frame from a STA 1824 * that was not moved to a 4-addr STA vlan yet send 1825 * the event to userspace and for older hostapd drop 1826 * the frame to the monitor interface. 1827 */ 1828 if (ieee80211_has_a4(hdr->frame_control) && 1829 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1830 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1831 !rx->sdata->u.vlan.sta))) { 1832 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) 1833 cfg80211_rx_unexpected_4addr_frame( 1834 rx->sdata->dev, sta->sta.addr, 1835 GFP_ATOMIC); 1836 return RX_DROP_MONITOR; 1837 } 1838 /* 1839 * Update counter and free packet here to avoid 1840 * counting this as a dropped packed. 1841 */ 1842 sta->rx_stats.packets++; 1843 dev_kfree_skb(rx->skb); 1844 return RX_QUEUED; 1845 } 1846 1847 return RX_CONTINUE; 1848 } /* ieee80211_rx_h_sta_process */ 1849 1850 static struct ieee80211_key * 1851 ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx) 1852 { 1853 struct ieee80211_key *key = NULL; 1854 struct ieee80211_sub_if_data *sdata = rx->sdata; 1855 int idx2; 1856 1857 /* Make sure key gets set if either BIGTK key index is set so that 1858 * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected 1859 * Beacon frames and Beacon frames that claim to use another BIGTK key 1860 * index (i.e., a key that we do not have). 1861 */ 1862 1863 if (idx < 0) { 1864 idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS; 1865 idx2 = idx + 1; 1866 } else { 1867 if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1868 idx2 = idx + 1; 1869 else 1870 idx2 = idx - 1; 1871 } 1872 1873 if (rx->sta) 1874 key = rcu_dereference(rx->sta->gtk[idx]); 1875 if (!key) 1876 key = rcu_dereference(sdata->keys[idx]); 1877 if (!key && rx->sta) 1878 key = rcu_dereference(rx->sta->gtk[idx2]); 1879 if (!key) 1880 key = rcu_dereference(sdata->keys[idx2]); 1881 1882 return key; 1883 } 1884 1885 static ieee80211_rx_result debug_noinline 1886 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 1887 { 1888 struct sk_buff *skb = rx->skb; 1889 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1890 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1891 int keyidx; 1892 ieee80211_rx_result result = RX_DROP_UNUSABLE; 1893 struct ieee80211_key *sta_ptk = NULL; 1894 struct ieee80211_key *ptk_idx = NULL; 1895 int mmie_keyidx = -1; 1896 __le16 fc; 1897 const struct ieee80211_cipher_scheme *cs = NULL; 1898 1899 if (ieee80211_is_ext(hdr->frame_control)) 1900 return RX_CONTINUE; 1901 1902 /* 1903 * Key selection 101 1904 * 1905 * There are five types of keys: 1906 * - GTK (group keys) 1907 * - IGTK (group keys for management frames) 1908 * - BIGTK (group keys for Beacon frames) 1909 * - PTK (pairwise keys) 1910 * - STK (station-to-station pairwise keys) 1911 * 1912 * When selecting a key, we have to distinguish between multicast 1913 * (including broadcast) and unicast frames, the latter can only 1914 * use PTKs and STKs while the former always use GTKs, IGTKs, and 1915 * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used, 1916 * then unicast frames can also use key indices like GTKs. Hence, if we 1917 * don't have a PTK/STK we check the key index for a WEP key. 1918 * 1919 * Note that in a regular BSS, multicast frames are sent by the 1920 * AP only, associated stations unicast the frame to the AP first 1921 * which then multicasts it on their behalf. 1922 * 1923 * There is also a slight problem in IBSS mode: GTKs are negotiated 1924 * with each station, that is something we don't currently handle. 1925 * The spec seems to expect that one negotiates the same key with 1926 * every station but there's no such requirement; VLANs could be 1927 * possible. 1928 */ 1929 1930 /* start without a key */ 1931 rx->key = NULL; 1932 fc = hdr->frame_control; 1933 1934 if (rx->sta) { 1935 int keyid = rx->sta->ptk_idx; 1936 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); 1937 1938 if (ieee80211_has_protected(fc)) { 1939 cs = rx->sta->cipher_scheme; 1940 keyid = ieee80211_get_keyid(rx->skb, cs); 1941 1942 if (unlikely(keyid < 0)) 1943 return RX_DROP_UNUSABLE; 1944 1945 ptk_idx = rcu_dereference(rx->sta->ptk[keyid]); 1946 } 1947 } 1948 1949 if (!ieee80211_has_protected(fc)) 1950 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 1951 1952 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 1953 rx->key = ptk_idx ? ptk_idx : sta_ptk; 1954 if ((status->flag & RX_FLAG_DECRYPTED) && 1955 (status->flag & RX_FLAG_IV_STRIPPED)) 1956 return RX_CONTINUE; 1957 /* Skip decryption if the frame is not protected. */ 1958 if (!ieee80211_has_protected(fc)) 1959 return RX_CONTINUE; 1960 } else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) { 1961 /* Broadcast/multicast robust management frame / BIP */ 1962 if ((status->flag & RX_FLAG_DECRYPTED) && 1963 (status->flag & RX_FLAG_IV_STRIPPED)) 1964 return RX_CONTINUE; 1965 1966 if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS || 1967 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + 1968 NUM_DEFAULT_BEACON_KEYS) { 1969 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 1970 skb->data, 1971 skb->len); 1972 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 1973 } 1974 1975 rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx); 1976 if (!rx->key) 1977 return RX_CONTINUE; /* Beacon protection not in use */ 1978 } else if (mmie_keyidx >= 0) { 1979 /* Broadcast/multicast robust management frame / BIP */ 1980 if ((status->flag & RX_FLAG_DECRYPTED) && 1981 (status->flag & RX_FLAG_IV_STRIPPED)) 1982 return RX_CONTINUE; 1983 1984 if (mmie_keyidx < NUM_DEFAULT_KEYS || 1985 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1986 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 1987 if (rx->sta) { 1988 if (ieee80211_is_group_privacy_action(skb) && 1989 test_sta_flag(rx->sta, WLAN_STA_MFP)) 1990 return RX_DROP_MONITOR; 1991 1992 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); 1993 } 1994 if (!rx->key) 1995 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 1996 } else if (!ieee80211_has_protected(fc)) { 1997 /* 1998 * The frame was not protected, so skip decryption. However, we 1999 * need to set rx->key if there is a key that could have been 2000 * used so that the frame may be dropped if encryption would 2001 * have been expected. 2002 */ 2003 struct ieee80211_key *key = NULL; 2004 struct ieee80211_sub_if_data *sdata = rx->sdata; 2005 int i; 2006 2007 if (ieee80211_is_beacon(fc)) { 2008 key = ieee80211_rx_get_bigtk(rx, -1); 2009 } else if (ieee80211_is_mgmt(fc) && 2010 is_multicast_ether_addr(hdr->addr1)) { 2011 key = rcu_dereference(rx->sdata->default_mgmt_key); 2012 } else { 2013 if (rx->sta) { 2014 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 2015 key = rcu_dereference(rx->sta->gtk[i]); 2016 if (key) 2017 break; 2018 } 2019 } 2020 if (!key) { 2021 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 2022 key = rcu_dereference(sdata->keys[i]); 2023 if (key) 2024 break; 2025 } 2026 } 2027 } 2028 if (key) 2029 rx->key = key; 2030 return RX_CONTINUE; 2031 } else { 2032 /* 2033 * The device doesn't give us the IV so we won't be 2034 * able to look up the key. That's ok though, we 2035 * don't need to decrypt the frame, we just won't 2036 * be able to keep statistics accurate. 2037 * Except for key threshold notifications, should 2038 * we somehow allow the driver to tell us which key 2039 * the hardware used if this flag is set? 2040 */ 2041 if ((status->flag & RX_FLAG_DECRYPTED) && 2042 (status->flag & RX_FLAG_IV_STRIPPED)) 2043 return RX_CONTINUE; 2044 2045 keyidx = ieee80211_get_keyid(rx->skb, cs); 2046 2047 if (unlikely(keyidx < 0)) 2048 return RX_DROP_UNUSABLE; 2049 2050 /* check per-station GTK first, if multicast packet */ 2051 if (is_multicast_ether_addr(hdr->addr1) && rx->sta) 2052 rx->key = rcu_dereference(rx->sta->gtk[keyidx]); 2053 2054 /* if not found, try default key */ 2055 if (!rx->key) { 2056 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 2057 2058 /* 2059 * RSNA-protected unicast frames should always be 2060 * sent with pairwise or station-to-station keys, 2061 * but for WEP we allow using a key index as well. 2062 */ 2063 if (rx->key && 2064 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 2065 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 2066 !is_multicast_ether_addr(hdr->addr1)) 2067 rx->key = NULL; 2068 } 2069 } 2070 2071 if (rx->key) { 2072 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) 2073 return RX_DROP_MONITOR; 2074 2075 /* TODO: add threshold stuff again */ 2076 } else { 2077 return RX_DROP_MONITOR; 2078 } 2079 2080 switch (rx->key->conf.cipher) { 2081 case WLAN_CIPHER_SUITE_WEP40: 2082 case WLAN_CIPHER_SUITE_WEP104: 2083 result = ieee80211_crypto_wep_decrypt(rx); 2084 break; 2085 case WLAN_CIPHER_SUITE_TKIP: 2086 result = ieee80211_crypto_tkip_decrypt(rx); 2087 break; 2088 case WLAN_CIPHER_SUITE_CCMP: 2089 result = ieee80211_crypto_ccmp_decrypt( 2090 rx, IEEE80211_CCMP_MIC_LEN); 2091 break; 2092 case WLAN_CIPHER_SUITE_CCMP_256: 2093 result = ieee80211_crypto_ccmp_decrypt( 2094 rx, IEEE80211_CCMP_256_MIC_LEN); 2095 break; 2096 case WLAN_CIPHER_SUITE_AES_CMAC: 2097 result = ieee80211_crypto_aes_cmac_decrypt(rx); 2098 break; 2099 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 2100 result = ieee80211_crypto_aes_cmac_256_decrypt(rx); 2101 break; 2102 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 2103 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 2104 result = ieee80211_crypto_aes_gmac_decrypt(rx); 2105 break; 2106 case WLAN_CIPHER_SUITE_GCMP: 2107 case WLAN_CIPHER_SUITE_GCMP_256: 2108 result = ieee80211_crypto_gcmp_decrypt(rx); 2109 break; 2110 default: 2111 result = ieee80211_crypto_hw_decrypt(rx); 2112 } 2113 2114 /* the hdr variable is invalid after the decrypt handlers */ 2115 2116 /* either the frame has been decrypted or will be dropped */ 2117 status->flag |= RX_FLAG_DECRYPTED; 2118 2119 if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE)) 2120 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2121 skb->data, skb->len); 2122 2123 return result; 2124 } 2125 2126 void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache) 2127 { 2128 int i; 2129 2130 for (i = 0; i < ARRAY_SIZE(cache->entries); i++) 2131 skb_queue_head_init(&cache->entries[i].skb_list); 2132 } 2133 2134 void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache) 2135 { 2136 int i; 2137 2138 for (i = 0; i < ARRAY_SIZE(cache->entries); i++) 2139 __skb_queue_purge(&cache->entries[i].skb_list); 2140 } 2141 2142 static inline struct ieee80211_fragment_entry * 2143 ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache, 2144 unsigned int frag, unsigned int seq, int rx_queue, 2145 struct sk_buff **skb) 2146 { 2147 struct ieee80211_fragment_entry *entry; 2148 2149 entry = &cache->entries[cache->next++]; 2150 if (cache->next >= IEEE80211_FRAGMENT_MAX) 2151 cache->next = 0; 2152 2153 __skb_queue_purge(&entry->skb_list); 2154 2155 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 2156 *skb = NULL; 2157 entry->first_frag_time = jiffies; 2158 entry->seq = seq; 2159 entry->rx_queue = rx_queue; 2160 entry->last_frag = frag; 2161 entry->check_sequential_pn = false; 2162 entry->extra_len = 0; 2163 2164 return entry; 2165 } 2166 2167 static inline struct ieee80211_fragment_entry * 2168 ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache, 2169 unsigned int frag, unsigned int seq, 2170 int rx_queue, struct ieee80211_hdr *hdr) 2171 { 2172 struct ieee80211_fragment_entry *entry; 2173 int i, idx; 2174 2175 idx = cache->next; 2176 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 2177 struct ieee80211_hdr *f_hdr; 2178 struct sk_buff *f_skb; 2179 2180 idx--; 2181 if (idx < 0) 2182 idx = IEEE80211_FRAGMENT_MAX - 1; 2183 2184 entry = &cache->entries[idx]; 2185 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 2186 entry->rx_queue != rx_queue || 2187 entry->last_frag + 1 != frag) 2188 continue; 2189 2190 f_skb = __skb_peek(&entry->skb_list); 2191 f_hdr = (struct ieee80211_hdr *) f_skb->data; 2192 2193 /* 2194 * Check ftype and addresses are equal, else check next fragment 2195 */ 2196 if (((hdr->frame_control ^ f_hdr->frame_control) & 2197 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 2198 !ether_addr_equal(hdr->addr1, f_hdr->addr1) || 2199 !ether_addr_equal(hdr->addr2, f_hdr->addr2)) 2200 continue; 2201 2202 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 2203 __skb_queue_purge(&entry->skb_list); 2204 continue; 2205 } 2206 return entry; 2207 } 2208 2209 return NULL; 2210 } 2211 2212 static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc) 2213 { 2214 return rx->key && 2215 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || 2216 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || 2217 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || 2218 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && 2219 ieee80211_has_protected(fc); 2220 } 2221 2222 static ieee80211_rx_result debug_noinline 2223 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 2224 { 2225 struct ieee80211_fragment_cache *cache = &rx->sdata->frags; 2226 struct ieee80211_hdr *hdr; 2227 u16 sc; 2228 __le16 fc; 2229 unsigned int frag, seq; 2230 struct ieee80211_fragment_entry *entry; 2231 struct sk_buff *skb; 2232 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2233 2234 hdr = (struct ieee80211_hdr *)rx->skb->data; 2235 fc = hdr->frame_control; 2236 2237 if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc)) 2238 return RX_CONTINUE; 2239 2240 sc = le16_to_cpu(hdr->seq_ctrl); 2241 frag = sc & IEEE80211_SCTL_FRAG; 2242 2243 if (is_multicast_ether_addr(hdr->addr1)) { 2244 I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount); 2245 goto out_no_led; 2246 } 2247 2248 if (rx->sta) 2249 cache = &rx->sta->frags; 2250 2251 if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) 2252 goto out; 2253 2254 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 2255 2256 if (skb_linearize(rx->skb)) 2257 return RX_DROP_UNUSABLE; 2258 2259 /* 2260 * skb_linearize() might change the skb->data and 2261 * previously cached variables (in this case, hdr) need to 2262 * be refreshed with the new data. 2263 */ 2264 hdr = (struct ieee80211_hdr *)rx->skb->data; 2265 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 2266 2267 if (frag == 0) { 2268 /* This is the first fragment of a new frame. */ 2269 entry = ieee80211_reassemble_add(cache, frag, seq, 2270 rx->seqno_idx, &(rx->skb)); 2271 if (requires_sequential_pn(rx, fc)) { 2272 int queue = rx->security_idx; 2273 2274 /* Store CCMP/GCMP PN so that we can verify that the 2275 * next fragment has a sequential PN value. 2276 */ 2277 entry->check_sequential_pn = true; 2278 entry->is_protected = true; 2279 entry->key_color = rx->key->color; 2280 memcpy(entry->last_pn, 2281 rx->key->u.ccmp.rx_pn[queue], 2282 IEEE80211_CCMP_PN_LEN); 2283 BUILD_BUG_ON(offsetof(struct ieee80211_key, 2284 u.ccmp.rx_pn) != 2285 offsetof(struct ieee80211_key, 2286 u.gcmp.rx_pn)); 2287 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != 2288 sizeof(rx->key->u.gcmp.rx_pn[queue])); 2289 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != 2290 IEEE80211_GCMP_PN_LEN); 2291 } else if (rx->key && 2292 (ieee80211_has_protected(fc) || 2293 (status->flag & RX_FLAG_DECRYPTED))) { 2294 entry->is_protected = true; 2295 entry->key_color = rx->key->color; 2296 } 2297 return RX_QUEUED; 2298 } 2299 2300 /* This is a fragment for a frame that should already be pending in 2301 * fragment cache. Add this fragment to the end of the pending entry. 2302 */ 2303 entry = ieee80211_reassemble_find(cache, frag, seq, 2304 rx->seqno_idx, hdr); 2305 if (!entry) { 2306 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2307 return RX_DROP_MONITOR; 2308 } 2309 2310 /* "The receiver shall discard MSDUs and MMPDUs whose constituent 2311 * MPDU PN values are not incrementing in steps of 1." 2312 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) 2313 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) 2314 */ 2315 if (entry->check_sequential_pn) { 2316 int i; 2317 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; 2318 2319 if (!requires_sequential_pn(rx, fc)) 2320 return RX_DROP_UNUSABLE; 2321 2322 /* Prevent mixed key and fragment cache attacks */ 2323 if (entry->key_color != rx->key->color) 2324 return RX_DROP_UNUSABLE; 2325 2326 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); 2327 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { 2328 pn[i]++; 2329 if (pn[i]) 2330 break; 2331 } 2332 2333 rpn = rx->ccm_gcm.pn; 2334 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) 2335 return RX_DROP_UNUSABLE; 2336 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); 2337 } else if (entry->is_protected && 2338 (!rx->key || 2339 (!ieee80211_has_protected(fc) && 2340 !(status->flag & RX_FLAG_DECRYPTED)) || 2341 rx->key->color != entry->key_color)) { 2342 /* Drop this as a mixed key or fragment cache attack, even 2343 * if for TKIP Michael MIC should protect us, and WEP is a 2344 * lost cause anyway. 2345 */ 2346 return RX_DROP_UNUSABLE; 2347 } else if (entry->is_protected && rx->key && 2348 entry->key_color != rx->key->color && 2349 (status->flag & RX_FLAG_DECRYPTED)) { 2350 return RX_DROP_UNUSABLE; 2351 } 2352 2353 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 2354 __skb_queue_tail(&entry->skb_list, rx->skb); 2355 entry->last_frag = frag; 2356 entry->extra_len += rx->skb->len; 2357 if (ieee80211_has_morefrags(fc)) { 2358 rx->skb = NULL; 2359 return RX_QUEUED; 2360 } 2361 2362 rx->skb = __skb_dequeue(&entry->skb_list); 2363 if (skb_tailroom(rx->skb) < entry->extra_len) { 2364 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag); 2365 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 2366 GFP_ATOMIC))) { 2367 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2368 __skb_queue_purge(&entry->skb_list); 2369 return RX_DROP_UNUSABLE; 2370 } 2371 } 2372 while ((skb = __skb_dequeue(&entry->skb_list))) { 2373 skb_put_data(rx->skb, skb->data, skb->len); 2374 dev_kfree_skb(skb); 2375 } 2376 2377 out: 2378 ieee80211_led_rx(rx->local); 2379 out_no_led: 2380 if (rx->sta) 2381 rx->sta->rx_stats.packets++; 2382 return RX_CONTINUE; 2383 } 2384 2385 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 2386 { 2387 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) 2388 return -EACCES; 2389 2390 return 0; 2391 } 2392 2393 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 2394 { 2395 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 2396 struct sk_buff *skb = rx->skb; 2397 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2398 2399 /* 2400 * Pass through unencrypted frames if the hardware has 2401 * decrypted them already. 2402 */ 2403 if (status->flag & RX_FLAG_DECRYPTED) 2404 return 0; 2405 2406 /* check mesh EAPOL frames first */ 2407 if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) && 2408 ieee80211_is_data(fc))) { 2409 struct ieee80211s_hdr *mesh_hdr; 2410 u16 hdr_len = ieee80211_hdrlen(fc); 2411 u16 ethertype_offset; 2412 __be16 ethertype; 2413 2414 if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr)) 2415 goto drop_check; 2416 2417 /* make sure fixed part of mesh header is there, also checks skb len */ 2418 if (!pskb_may_pull(rx->skb, hdr_len + 6)) 2419 goto drop_check; 2420 2421 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len); 2422 ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) + 2423 sizeof(rfc1042_header); 2424 2425 if (skb_copy_bits(rx->skb, ethertype_offset, ðertype, 2) == 0 && 2426 ethertype == rx->sdata->control_port_protocol) 2427 return 0; 2428 } 2429 2430 drop_check: 2431 /* Drop unencrypted frames if key is set. */ 2432 if (unlikely(!ieee80211_has_protected(fc) && 2433 !ieee80211_is_any_nullfunc(fc) && 2434 ieee80211_is_data(fc) && rx->key)) 2435 return -EACCES; 2436 2437 return 0; 2438 } 2439 2440 static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 2441 { 2442 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2443 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2444 __le16 fc = hdr->frame_control; 2445 2446 /* 2447 * Pass through unencrypted frames if the hardware has 2448 * decrypted them already. 2449 */ 2450 if (status->flag & RX_FLAG_DECRYPTED) 2451 return 0; 2452 2453 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { 2454 if (unlikely(!ieee80211_has_protected(fc) && 2455 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 2456 rx->key)) { 2457 if (ieee80211_is_deauth(fc) || 2458 ieee80211_is_disassoc(fc)) 2459 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2460 rx->skb->data, 2461 rx->skb->len); 2462 return -EACCES; 2463 } 2464 /* BIP does not use Protected field, so need to check MMIE */ 2465 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 2466 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2467 if (ieee80211_is_deauth(fc) || 2468 ieee80211_is_disassoc(fc)) 2469 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2470 rx->skb->data, 2471 rx->skb->len); 2472 return -EACCES; 2473 } 2474 if (unlikely(ieee80211_is_beacon(fc) && rx->key && 2475 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2476 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2477 rx->skb->data, 2478 rx->skb->len); 2479 return -EACCES; 2480 } 2481 /* 2482 * When using MFP, Action frames are not allowed prior to 2483 * having configured keys. 2484 */ 2485 if (unlikely(ieee80211_is_action(fc) && !rx->key && 2486 ieee80211_is_robust_mgmt_frame(rx->skb))) 2487 return -EACCES; 2488 } 2489 2490 return 0; 2491 } 2492 2493 static int 2494 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) 2495 { 2496 struct ieee80211_sub_if_data *sdata = rx->sdata; 2497 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2498 bool check_port_control = false; 2499 struct ethhdr *ehdr; 2500 int ret; 2501 2502 *port_control = false; 2503 if (ieee80211_has_a4(hdr->frame_control) && 2504 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 2505 return -1; 2506 2507 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2508 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 2509 2510 if (!sdata->u.mgd.use_4addr) 2511 return -1; 2512 else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr)) 2513 check_port_control = true; 2514 } 2515 2516 if (is_multicast_ether_addr(hdr->addr1) && 2517 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 2518 return -1; 2519 2520 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 2521 if (ret < 0) 2522 return ret; 2523 2524 ehdr = (struct ethhdr *) rx->skb->data; 2525 if (ehdr->h_proto == rx->sdata->control_port_protocol) 2526 *port_control = true; 2527 else if (check_port_control) 2528 return -1; 2529 2530 return 0; 2531 } 2532 2533 /* 2534 * requires that rx->skb is a frame with ethernet header 2535 */ 2536 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 2537 { 2538 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 2539 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 2540 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2541 2542 /* 2543 * Allow EAPOL frames to us/the PAE group address regardless of 2544 * whether the frame was encrypted or not, and always disallow 2545 * all other destination addresses for them. 2546 */ 2547 if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol)) 2548 return ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || 2549 ether_addr_equal(ehdr->h_dest, pae_group_addr); 2550 2551 if (ieee80211_802_1x_port_control(rx) || 2552 ieee80211_drop_unencrypted(rx, fc)) 2553 return false; 2554 2555 return true; 2556 } 2557 2558 static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, 2559 struct ieee80211_rx_data *rx) 2560 { 2561 struct ieee80211_sub_if_data *sdata = rx->sdata; 2562 struct net_device *dev = sdata->dev; 2563 2564 if (unlikely((skb->protocol == sdata->control_port_protocol || 2565 (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) && 2566 !sdata->control_port_no_preauth)) && 2567 sdata->control_port_over_nl80211)) { 2568 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2569 bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED); 2570 2571 cfg80211_rx_control_port(dev, skb, noencrypt); 2572 dev_kfree_skb(skb); 2573 } else { 2574 struct ethhdr *ehdr = (void *)skb_mac_header(skb); 2575 2576 memset(skb->cb, 0, sizeof(skb->cb)); 2577 2578 /* 2579 * 802.1X over 802.11 requires that the authenticator address 2580 * be used for EAPOL frames. However, 802.1X allows the use of 2581 * the PAE group address instead. If the interface is part of 2582 * a bridge and we pass the frame with the PAE group address, 2583 * then the bridge will forward it to the network (even if the 2584 * client was not associated yet), which isn't supposed to 2585 * happen. 2586 * To avoid that, rewrite the destination address to our own 2587 * address, so that the authenticator (e.g. hostapd) will see 2588 * the frame, but bridge won't forward it anywhere else. Note 2589 * that due to earlier filtering, the only other address can 2590 * be the PAE group address. 2591 */ 2592 if (unlikely(skb->protocol == sdata->control_port_protocol && 2593 !ether_addr_equal(ehdr->h_dest, sdata->vif.addr))) 2594 ether_addr_copy(ehdr->h_dest, sdata->vif.addr); 2595 2596 /* deliver to local stack */ 2597 if (rx->list) 2598 list_add_tail(&skb->list, rx->list); 2599 else 2600 netif_receive_skb(skb); 2601 } 2602 } 2603 2604 /* 2605 * requires that rx->skb is a frame with ethernet header 2606 */ 2607 static void 2608 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 2609 { 2610 struct ieee80211_sub_if_data *sdata = rx->sdata; 2611 struct net_device *dev = sdata->dev; 2612 struct sk_buff *skb, *xmit_skb; 2613 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2614 struct sta_info *dsta; 2615 2616 skb = rx->skb; 2617 xmit_skb = NULL; 2618 2619 dev_sw_netstats_rx_add(dev, skb->len); 2620 2621 if (rx->sta) { 2622 /* The seqno index has the same property as needed 2623 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 2624 * for non-QoS-data frames. Here we know it's a data 2625 * frame, so count MSDUs. 2626 */ 2627 u64_stats_update_begin(&rx->sta->rx_stats.syncp); 2628 rx->sta->rx_stats.msdu[rx->seqno_idx]++; 2629 u64_stats_update_end(&rx->sta->rx_stats.syncp); 2630 } 2631 2632 if ((sdata->vif.type == NL80211_IFTYPE_AP || 2633 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 2634 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 2635 ehdr->h_proto != rx->sdata->control_port_protocol && 2636 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 2637 if (is_multicast_ether_addr(ehdr->h_dest) && 2638 ieee80211_vif_get_num_mcast_if(sdata) != 0) { 2639 /* 2640 * send multicast frames both to higher layers in 2641 * local net stack and back to the wireless medium 2642 */ 2643 xmit_skb = skb_copy(skb, GFP_ATOMIC); 2644 if (!xmit_skb) 2645 net_info_ratelimited("%s: failed to clone multicast frame\n", 2646 dev->name); 2647 } else if (!is_multicast_ether_addr(ehdr->h_dest) && 2648 !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) { 2649 dsta = sta_info_get(sdata, ehdr->h_dest); 2650 if (dsta) { 2651 /* 2652 * The destination station is associated to 2653 * this AP (in this VLAN), so send the frame 2654 * directly to it and do not pass it to local 2655 * net stack. 2656 */ 2657 xmit_skb = skb; 2658 skb = NULL; 2659 } 2660 } 2661 } 2662 2663 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2664 if (skb) { 2665 /* 'align' will only take the values 0 or 2 here since all 2666 * frames are required to be aligned to 2-byte boundaries 2667 * when being passed to mac80211; the code here works just 2668 * as well if that isn't true, but mac80211 assumes it can 2669 * access fields as 2-byte aligned (e.g. for ether_addr_equal) 2670 */ 2671 int align; 2672 2673 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; 2674 if (align) { 2675 if (WARN_ON(skb_headroom(skb) < 3)) { 2676 dev_kfree_skb(skb); 2677 skb = NULL; 2678 } else { 2679 u8 *data = skb->data; 2680 size_t len = skb_headlen(skb); 2681 skb->data -= align; 2682 memmove(skb->data, data, len); 2683 skb_set_tail_pointer(skb, len); 2684 } 2685 } 2686 } 2687 #endif 2688 2689 if (skb) { 2690 skb->protocol = eth_type_trans(skb, dev); 2691 ieee80211_deliver_skb_to_local_stack(skb, rx); 2692 } 2693 2694 if (xmit_skb) { 2695 /* 2696 * Send to wireless media and increase priority by 256 to 2697 * keep the received priority instead of reclassifying 2698 * the frame (see cfg80211_classify8021d). 2699 */ 2700 xmit_skb->priority += 256; 2701 xmit_skb->protocol = htons(ETH_P_802_3); 2702 skb_reset_network_header(xmit_skb); 2703 skb_reset_mac_header(xmit_skb); 2704 dev_queue_xmit(xmit_skb); 2705 } 2706 } 2707 2708 static ieee80211_rx_result debug_noinline 2709 __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset) 2710 { 2711 struct net_device *dev = rx->sdata->dev; 2712 struct sk_buff *skb = rx->skb; 2713 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2714 __le16 fc = hdr->frame_control; 2715 struct sk_buff_head frame_list; 2716 struct ethhdr ethhdr; 2717 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source; 2718 2719 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 2720 check_da = NULL; 2721 check_sa = NULL; 2722 } else switch (rx->sdata->vif.type) { 2723 case NL80211_IFTYPE_AP: 2724 case NL80211_IFTYPE_AP_VLAN: 2725 check_da = NULL; 2726 break; 2727 case NL80211_IFTYPE_STATION: 2728 if (!rx->sta || 2729 !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER)) 2730 check_sa = NULL; 2731 break; 2732 case NL80211_IFTYPE_MESH_POINT: 2733 check_sa = NULL; 2734 break; 2735 default: 2736 break; 2737 } 2738 2739 skb->dev = dev; 2740 __skb_queue_head_init(&frame_list); 2741 2742 if (ieee80211_data_to_8023_exthdr(skb, ðhdr, 2743 rx->sdata->vif.addr, 2744 rx->sdata->vif.type, 2745 data_offset, true)) 2746 return RX_DROP_UNUSABLE; 2747 2748 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 2749 rx->sdata->vif.type, 2750 rx->local->hw.extra_tx_headroom, 2751 check_da, check_sa); 2752 2753 while (!skb_queue_empty(&frame_list)) { 2754 rx->skb = __skb_dequeue(&frame_list); 2755 2756 if (!ieee80211_frame_allowed(rx, fc)) { 2757 dev_kfree_skb(rx->skb); 2758 continue; 2759 } 2760 2761 ieee80211_deliver_skb(rx); 2762 } 2763 2764 return RX_QUEUED; 2765 } 2766 2767 static ieee80211_rx_result debug_noinline 2768 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 2769 { 2770 struct sk_buff *skb = rx->skb; 2771 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2772 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2773 __le16 fc = hdr->frame_control; 2774 2775 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 2776 return RX_CONTINUE; 2777 2778 if (unlikely(!ieee80211_is_data(fc))) 2779 return RX_CONTINUE; 2780 2781 if (unlikely(!ieee80211_is_data_present(fc))) 2782 return RX_DROP_MONITOR; 2783 2784 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 2785 switch (rx->sdata->vif.type) { 2786 case NL80211_IFTYPE_AP_VLAN: 2787 if (!rx->sdata->u.vlan.sta) 2788 return RX_DROP_UNUSABLE; 2789 break; 2790 case NL80211_IFTYPE_STATION: 2791 if (!rx->sdata->u.mgd.use_4addr) 2792 return RX_DROP_UNUSABLE; 2793 break; 2794 default: 2795 return RX_DROP_UNUSABLE; 2796 } 2797 } 2798 2799 if (is_multicast_ether_addr(hdr->addr1)) 2800 return RX_DROP_UNUSABLE; 2801 2802 if (rx->key) { 2803 /* 2804 * We should not receive A-MSDUs on pre-HT connections, 2805 * and HT connections cannot use old ciphers. Thus drop 2806 * them, as in those cases we couldn't even have SPP 2807 * A-MSDUs or such. 2808 */ 2809 switch (rx->key->conf.cipher) { 2810 case WLAN_CIPHER_SUITE_WEP40: 2811 case WLAN_CIPHER_SUITE_WEP104: 2812 case WLAN_CIPHER_SUITE_TKIP: 2813 return RX_DROP_UNUSABLE; 2814 default: 2815 break; 2816 } 2817 } 2818 2819 return __ieee80211_rx_h_amsdu(rx, 0); 2820 } 2821 2822 #ifdef CONFIG_MAC80211_MESH 2823 static ieee80211_rx_result 2824 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 2825 { 2826 struct ieee80211_hdr *fwd_hdr, *hdr; 2827 struct ieee80211_tx_info *info; 2828 struct ieee80211s_hdr *mesh_hdr; 2829 struct sk_buff *skb = rx->skb, *fwd_skb; 2830 struct ieee80211_local *local = rx->local; 2831 struct ieee80211_sub_if_data *sdata = rx->sdata; 2832 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2833 u16 ac, q, hdrlen; 2834 int tailroom = 0; 2835 2836 hdr = (struct ieee80211_hdr *) skb->data; 2837 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2838 2839 /* make sure fixed part of mesh header is there, also checks skb len */ 2840 if (!pskb_may_pull(rx->skb, hdrlen + 6)) 2841 return RX_DROP_MONITOR; 2842 2843 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2844 2845 /* make sure full mesh header is there, also checks skb len */ 2846 if (!pskb_may_pull(rx->skb, 2847 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) 2848 return RX_DROP_MONITOR; 2849 2850 /* reload pointers */ 2851 hdr = (struct ieee80211_hdr *) skb->data; 2852 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2853 2854 if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) 2855 return RX_DROP_MONITOR; 2856 2857 /* frame is in RMC, don't forward */ 2858 if (ieee80211_is_data(hdr->frame_control) && 2859 is_multicast_ether_addr(hdr->addr1) && 2860 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr)) 2861 return RX_DROP_MONITOR; 2862 2863 if (!ieee80211_is_data(hdr->frame_control)) 2864 return RX_CONTINUE; 2865 2866 if (!mesh_hdr->ttl) 2867 return RX_DROP_MONITOR; 2868 2869 if (mesh_hdr->flags & MESH_FLAGS_AE) { 2870 struct mesh_path *mppath; 2871 char *proxied_addr; 2872 char *mpp_addr; 2873 2874 if (is_multicast_ether_addr(hdr->addr1)) { 2875 mpp_addr = hdr->addr3; 2876 proxied_addr = mesh_hdr->eaddr1; 2877 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) == 2878 MESH_FLAGS_AE_A5_A6) { 2879 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2880 mpp_addr = hdr->addr4; 2881 proxied_addr = mesh_hdr->eaddr2; 2882 } else { 2883 return RX_DROP_MONITOR; 2884 } 2885 2886 rcu_read_lock(); 2887 mppath = mpp_path_lookup(sdata, proxied_addr); 2888 if (!mppath) { 2889 mpp_path_add(sdata, proxied_addr, mpp_addr); 2890 } else { 2891 spin_lock_bh(&mppath->state_lock); 2892 if (!ether_addr_equal(mppath->mpp, mpp_addr)) 2893 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 2894 mppath->exp_time = jiffies; 2895 spin_unlock_bh(&mppath->state_lock); 2896 } 2897 rcu_read_unlock(); 2898 } 2899 2900 /* Frame has reached destination. Don't forward */ 2901 if (!is_multicast_ether_addr(hdr->addr1) && 2902 ether_addr_equal(sdata->vif.addr, hdr->addr3)) 2903 return RX_CONTINUE; 2904 2905 ac = ieee80211_select_queue_80211(sdata, skb, hdr); 2906 q = sdata->vif.hw_queue[ac]; 2907 if (ieee80211_queue_stopped(&local->hw, q)) { 2908 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); 2909 return RX_DROP_MONITOR; 2910 } 2911 skb_set_queue_mapping(skb, q); 2912 2913 if (!--mesh_hdr->ttl) { 2914 if (!is_multicast_ether_addr(hdr->addr1)) 2915 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, 2916 dropped_frames_ttl); 2917 goto out; 2918 } 2919 2920 if (!ifmsh->mshcfg.dot11MeshForwarding) 2921 goto out; 2922 2923 if (sdata->crypto_tx_tailroom_needed_cnt) 2924 tailroom = IEEE80211_ENCRYPT_TAILROOM; 2925 2926 fwd_skb = skb_copy_expand(skb, local->tx_headroom + 2927 sdata->encrypt_headroom, 2928 tailroom, GFP_ATOMIC); 2929 if (!fwd_skb) 2930 goto out; 2931 2932 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 2933 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY); 2934 info = IEEE80211_SKB_CB(fwd_skb); 2935 memset(info, 0, sizeof(*info)); 2936 info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; 2937 info->control.vif = &rx->sdata->vif; 2938 info->control.jiffies = jiffies; 2939 if (is_multicast_ether_addr(fwd_hdr->addr1)) { 2940 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 2941 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 2942 /* update power mode indication when forwarding */ 2943 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); 2944 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { 2945 /* mesh power mode flags updated in mesh_nexthop_lookup */ 2946 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 2947 } else { 2948 /* unable to resolve next hop */ 2949 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, 2950 fwd_hdr->addr3, 0, 2951 WLAN_REASON_MESH_PATH_NOFORWARD, 2952 fwd_hdr->addr2); 2953 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 2954 kfree_skb(fwd_skb); 2955 return RX_DROP_MONITOR; 2956 } 2957 2958 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 2959 ieee80211_add_pending_skb(local, fwd_skb); 2960 out: 2961 if (is_multicast_ether_addr(hdr->addr1)) 2962 return RX_CONTINUE; 2963 return RX_DROP_MONITOR; 2964 } 2965 #endif 2966 2967 static ieee80211_rx_result debug_noinline 2968 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 2969 { 2970 struct ieee80211_sub_if_data *sdata = rx->sdata; 2971 struct ieee80211_local *local = rx->local; 2972 struct net_device *dev = sdata->dev; 2973 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2974 __le16 fc = hdr->frame_control; 2975 bool port_control; 2976 int err; 2977 2978 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 2979 return RX_CONTINUE; 2980 2981 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 2982 return RX_DROP_MONITOR; 2983 2984 /* 2985 * Send unexpected-4addr-frame event to hostapd. For older versions, 2986 * also drop the frame to cooked monitor interfaces. 2987 */ 2988 if (ieee80211_has_a4(hdr->frame_control) && 2989 sdata->vif.type == NL80211_IFTYPE_AP) { 2990 if (rx->sta && 2991 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) 2992 cfg80211_rx_unexpected_4addr_frame( 2993 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); 2994 return RX_DROP_MONITOR; 2995 } 2996 2997 err = __ieee80211_data_to_8023(rx, &port_control); 2998 if (unlikely(err)) 2999 return RX_DROP_UNUSABLE; 3000 3001 if (!ieee80211_frame_allowed(rx, fc)) 3002 return RX_DROP_MONITOR; 3003 3004 /* directly handle TDLS channel switch requests/responses */ 3005 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == 3006 cpu_to_be16(ETH_P_TDLS))) { 3007 struct ieee80211_tdls_data *tf = (void *)rx->skb->data; 3008 3009 if (pskb_may_pull(rx->skb, 3010 offsetof(struct ieee80211_tdls_data, u)) && 3011 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && 3012 tf->category == WLAN_CATEGORY_TDLS && 3013 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || 3014 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { 3015 skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb); 3016 schedule_work(&local->tdls_chsw_work); 3017 if (rx->sta) 3018 rx->sta->rx_stats.packets++; 3019 3020 return RX_QUEUED; 3021 } 3022 } 3023 3024 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 3025 unlikely(port_control) && sdata->bss) { 3026 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 3027 u.ap); 3028 dev = sdata->dev; 3029 rx->sdata = sdata; 3030 } 3031 3032 rx->skb->dev = dev; 3033 3034 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && 3035 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 3036 !is_multicast_ether_addr( 3037 ((struct ethhdr *)rx->skb->data)->h_dest) && 3038 (!local->scanning && 3039 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) 3040 mod_timer(&local->dynamic_ps_timer, jiffies + 3041 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 3042 3043 ieee80211_deliver_skb(rx); 3044 3045 return RX_QUEUED; 3046 } 3047 3048 static ieee80211_rx_result debug_noinline 3049 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) 3050 { 3051 struct sk_buff *skb = rx->skb; 3052 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 3053 struct tid_ampdu_rx *tid_agg_rx; 3054 u16 start_seq_num; 3055 u16 tid; 3056 3057 if (likely(!ieee80211_is_ctl(bar->frame_control))) 3058 return RX_CONTINUE; 3059 3060 if (ieee80211_is_back_req(bar->frame_control)) { 3061 struct { 3062 __le16 control, start_seq_num; 3063 } __packed bar_data; 3064 struct ieee80211_event event = { 3065 .type = BAR_RX_EVENT, 3066 }; 3067 3068 if (!rx->sta) 3069 return RX_DROP_MONITOR; 3070 3071 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 3072 &bar_data, sizeof(bar_data))) 3073 return RX_DROP_MONITOR; 3074 3075 tid = le16_to_cpu(bar_data.control) >> 12; 3076 3077 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 3078 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 3079 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 3080 WLAN_BACK_RECIPIENT, 3081 WLAN_REASON_QSTA_REQUIRE_SETUP); 3082 3083 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 3084 if (!tid_agg_rx) 3085 return RX_DROP_MONITOR; 3086 3087 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 3088 event.u.ba.tid = tid; 3089 event.u.ba.ssn = start_seq_num; 3090 event.u.ba.sta = &rx->sta->sta; 3091 3092 /* reset session timer */ 3093 if (tid_agg_rx->timeout) 3094 mod_timer(&tid_agg_rx->session_timer, 3095 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 3096 3097 spin_lock(&tid_agg_rx->reorder_lock); 3098 /* release stored frames up to start of BAR */ 3099 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 3100 start_seq_num, frames); 3101 spin_unlock(&tid_agg_rx->reorder_lock); 3102 3103 drv_event_callback(rx->local, rx->sdata, &event); 3104 3105 kfree_skb(skb); 3106 return RX_QUEUED; 3107 } 3108 3109 /* 3110 * After this point, we only want management frames, 3111 * so we can drop all remaining control frames to 3112 * cooked monitor interfaces. 3113 */ 3114 return RX_DROP_MONITOR; 3115 } 3116 3117 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 3118 struct ieee80211_mgmt *mgmt, 3119 size_t len) 3120 { 3121 struct ieee80211_local *local = sdata->local; 3122 struct sk_buff *skb; 3123 struct ieee80211_mgmt *resp; 3124 3125 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { 3126 /* Not to own unicast address */ 3127 return; 3128 } 3129 3130 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) || 3131 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) { 3132 /* Not from the current AP or not associated yet. */ 3133 return; 3134 } 3135 3136 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 3137 /* Too short SA Query request frame */ 3138 return; 3139 } 3140 3141 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 3142 if (skb == NULL) 3143 return; 3144 3145 skb_reserve(skb, local->hw.extra_tx_headroom); 3146 resp = skb_put_zero(skb, 24); 3147 memcpy(resp->da, mgmt->sa, ETH_ALEN); 3148 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 3149 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 3150 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 3151 IEEE80211_STYPE_ACTION); 3152 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 3153 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 3154 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 3155 memcpy(resp->u.action.u.sa_query.trans_id, 3156 mgmt->u.action.u.sa_query.trans_id, 3157 WLAN_SA_QUERY_TR_ID_LEN); 3158 3159 ieee80211_tx_skb(sdata, skb); 3160 } 3161 3162 static ieee80211_rx_result debug_noinline 3163 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 3164 { 3165 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3166 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3167 3168 if (ieee80211_is_s1g_beacon(mgmt->frame_control)) 3169 return RX_CONTINUE; 3170 3171 /* 3172 * From here on, look only at management frames. 3173 * Data and control frames are already handled, 3174 * and unknown (reserved) frames are useless. 3175 */ 3176 if (rx->skb->len < 24) 3177 return RX_DROP_MONITOR; 3178 3179 if (!ieee80211_is_mgmt(mgmt->frame_control)) 3180 return RX_DROP_MONITOR; 3181 3182 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 3183 ieee80211_is_beacon(mgmt->frame_control) && 3184 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 3185 int sig = 0; 3186 3187 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3188 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3189 sig = status->signal; 3190 3191 cfg80211_report_obss_beacon_khz(rx->local->hw.wiphy, 3192 rx->skb->data, rx->skb->len, 3193 ieee80211_rx_status_to_khz(status), 3194 sig); 3195 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 3196 } 3197 3198 if (ieee80211_drop_unencrypted_mgmt(rx)) 3199 return RX_DROP_UNUSABLE; 3200 3201 return RX_CONTINUE; 3202 } 3203 3204 static ieee80211_rx_result debug_noinline 3205 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 3206 { 3207 struct ieee80211_local *local = rx->local; 3208 struct ieee80211_sub_if_data *sdata = rx->sdata; 3209 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3210 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3211 int len = rx->skb->len; 3212 3213 if (!ieee80211_is_action(mgmt->frame_control)) 3214 return RX_CONTINUE; 3215 3216 /* drop too small frames */ 3217 if (len < IEEE80211_MIN_ACTION_SIZE) 3218 return RX_DROP_UNUSABLE; 3219 3220 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && 3221 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && 3222 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 3223 return RX_DROP_UNUSABLE; 3224 3225 switch (mgmt->u.action.category) { 3226 case WLAN_CATEGORY_HT: 3227 /* reject HT action frames from stations not supporting HT */ 3228 if (!rx->sta->sta.ht_cap.ht_supported) 3229 goto invalid; 3230 3231 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3232 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3233 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3234 sdata->vif.type != NL80211_IFTYPE_AP && 3235 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3236 break; 3237 3238 /* verify action & smps_control/chanwidth are present */ 3239 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3240 goto invalid; 3241 3242 switch (mgmt->u.action.u.ht_smps.action) { 3243 case WLAN_HT_ACTION_SMPS: { 3244 struct ieee80211_supported_band *sband; 3245 enum ieee80211_smps_mode smps_mode; 3246 struct sta_opmode_info sta_opmode = {}; 3247 3248 if (sdata->vif.type != NL80211_IFTYPE_AP && 3249 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 3250 goto handled; 3251 3252 /* convert to HT capability */ 3253 switch (mgmt->u.action.u.ht_smps.smps_control) { 3254 case WLAN_HT_SMPS_CONTROL_DISABLED: 3255 smps_mode = IEEE80211_SMPS_OFF; 3256 break; 3257 case WLAN_HT_SMPS_CONTROL_STATIC: 3258 smps_mode = IEEE80211_SMPS_STATIC; 3259 break; 3260 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 3261 smps_mode = IEEE80211_SMPS_DYNAMIC; 3262 break; 3263 default: 3264 goto invalid; 3265 } 3266 3267 /* if no change do nothing */ 3268 if (rx->sta->sta.smps_mode == smps_mode) 3269 goto handled; 3270 rx->sta->sta.smps_mode = smps_mode; 3271 sta_opmode.smps_mode = 3272 ieee80211_smps_mode_to_smps_mode(smps_mode); 3273 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED; 3274 3275 sband = rx->local->hw.wiphy->bands[status->band]; 3276 3277 rate_control_rate_update(local, sband, rx->sta, 3278 IEEE80211_RC_SMPS_CHANGED); 3279 cfg80211_sta_opmode_change_notify(sdata->dev, 3280 rx->sta->addr, 3281 &sta_opmode, 3282 GFP_ATOMIC); 3283 goto handled; 3284 } 3285 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { 3286 struct ieee80211_supported_band *sband; 3287 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; 3288 enum ieee80211_sta_rx_bandwidth max_bw, new_bw; 3289 struct sta_opmode_info sta_opmode = {}; 3290 3291 /* If it doesn't support 40 MHz it can't change ... */ 3292 if (!(rx->sta->sta.ht_cap.cap & 3293 IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 3294 goto handled; 3295 3296 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ) 3297 max_bw = IEEE80211_STA_RX_BW_20; 3298 else 3299 max_bw = ieee80211_sta_cap_rx_bw(rx->sta); 3300 3301 /* set cur_max_bandwidth and recalc sta bw */ 3302 rx->sta->cur_max_bandwidth = max_bw; 3303 new_bw = ieee80211_sta_cur_vht_bw(rx->sta); 3304 3305 if (rx->sta->sta.bandwidth == new_bw) 3306 goto handled; 3307 3308 rx->sta->sta.bandwidth = new_bw; 3309 sband = rx->local->hw.wiphy->bands[status->band]; 3310 sta_opmode.bw = 3311 ieee80211_sta_rx_bw_to_chan_width(rx->sta); 3312 sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED; 3313 3314 rate_control_rate_update(local, sband, rx->sta, 3315 IEEE80211_RC_BW_CHANGED); 3316 cfg80211_sta_opmode_change_notify(sdata->dev, 3317 rx->sta->addr, 3318 &sta_opmode, 3319 GFP_ATOMIC); 3320 goto handled; 3321 } 3322 default: 3323 goto invalid; 3324 } 3325 3326 break; 3327 case WLAN_CATEGORY_PUBLIC: 3328 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3329 goto invalid; 3330 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3331 break; 3332 if (!rx->sta) 3333 break; 3334 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) 3335 break; 3336 if (mgmt->u.action.u.ext_chan_switch.action_code != 3337 WLAN_PUB_ACTION_EXT_CHANSW_ANN) 3338 break; 3339 if (len < offsetof(struct ieee80211_mgmt, 3340 u.action.u.ext_chan_switch.variable)) 3341 goto invalid; 3342 goto queue; 3343 case WLAN_CATEGORY_VHT: 3344 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3345 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3346 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3347 sdata->vif.type != NL80211_IFTYPE_AP && 3348 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3349 break; 3350 3351 /* verify action code is present */ 3352 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3353 goto invalid; 3354 3355 switch (mgmt->u.action.u.vht_opmode_notif.action_code) { 3356 case WLAN_VHT_ACTION_OPMODE_NOTIF: { 3357 /* verify opmode is present */ 3358 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3359 goto invalid; 3360 goto queue; 3361 } 3362 case WLAN_VHT_ACTION_GROUPID_MGMT: { 3363 if (len < IEEE80211_MIN_ACTION_SIZE + 25) 3364 goto invalid; 3365 goto queue; 3366 } 3367 default: 3368 break; 3369 } 3370 break; 3371 case WLAN_CATEGORY_BACK: 3372 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3373 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3374 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3375 sdata->vif.type != NL80211_IFTYPE_AP && 3376 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3377 break; 3378 3379 /* verify action_code is present */ 3380 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3381 break; 3382 3383 switch (mgmt->u.action.u.addba_req.action_code) { 3384 case WLAN_ACTION_ADDBA_REQ: 3385 if (len < (IEEE80211_MIN_ACTION_SIZE + 3386 sizeof(mgmt->u.action.u.addba_req))) 3387 goto invalid; 3388 break; 3389 case WLAN_ACTION_ADDBA_RESP: 3390 if (len < (IEEE80211_MIN_ACTION_SIZE + 3391 sizeof(mgmt->u.action.u.addba_resp))) 3392 goto invalid; 3393 break; 3394 case WLAN_ACTION_DELBA: 3395 if (len < (IEEE80211_MIN_ACTION_SIZE + 3396 sizeof(mgmt->u.action.u.delba))) 3397 goto invalid; 3398 break; 3399 default: 3400 goto invalid; 3401 } 3402 3403 goto queue; 3404 case WLAN_CATEGORY_SPECTRUM_MGMT: 3405 /* verify action_code is present */ 3406 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3407 break; 3408 3409 switch (mgmt->u.action.u.measurement.action_code) { 3410 case WLAN_ACTION_SPCT_MSR_REQ: 3411 if (status->band != NL80211_BAND_5GHZ) 3412 break; 3413 3414 if (len < (IEEE80211_MIN_ACTION_SIZE + 3415 sizeof(mgmt->u.action.u.measurement))) 3416 break; 3417 3418 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3419 break; 3420 3421 ieee80211_process_measurement_req(sdata, mgmt, len); 3422 goto handled; 3423 case WLAN_ACTION_SPCT_CHL_SWITCH: { 3424 u8 *bssid; 3425 if (len < (IEEE80211_MIN_ACTION_SIZE + 3426 sizeof(mgmt->u.action.u.chan_switch))) 3427 break; 3428 3429 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3430 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3431 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3432 break; 3433 3434 if (sdata->vif.type == NL80211_IFTYPE_STATION) 3435 bssid = sdata->u.mgd.bssid; 3436 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 3437 bssid = sdata->u.ibss.bssid; 3438 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 3439 bssid = mgmt->sa; 3440 else 3441 break; 3442 3443 if (!ether_addr_equal(mgmt->bssid, bssid)) 3444 break; 3445 3446 goto queue; 3447 } 3448 } 3449 break; 3450 case WLAN_CATEGORY_SELF_PROTECTED: 3451 if (len < (IEEE80211_MIN_ACTION_SIZE + 3452 sizeof(mgmt->u.action.u.self_prot.action_code))) 3453 break; 3454 3455 switch (mgmt->u.action.u.self_prot.action_code) { 3456 case WLAN_SP_MESH_PEERING_OPEN: 3457 case WLAN_SP_MESH_PEERING_CLOSE: 3458 case WLAN_SP_MESH_PEERING_CONFIRM: 3459 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3460 goto invalid; 3461 if (sdata->u.mesh.user_mpm) 3462 /* userspace handles this frame */ 3463 break; 3464 goto queue; 3465 case WLAN_SP_MGK_INFORM: 3466 case WLAN_SP_MGK_ACK: 3467 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3468 goto invalid; 3469 break; 3470 } 3471 break; 3472 case WLAN_CATEGORY_MESH_ACTION: 3473 if (len < (IEEE80211_MIN_ACTION_SIZE + 3474 sizeof(mgmt->u.action.u.mesh_action.action_code))) 3475 break; 3476 3477 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3478 break; 3479 if (mesh_action_is_path_sel(mgmt) && 3480 !mesh_path_sel_is_hwmp(sdata)) 3481 break; 3482 goto queue; 3483 } 3484 3485 return RX_CONTINUE; 3486 3487 invalid: 3488 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 3489 /* will return in the next handlers */ 3490 return RX_CONTINUE; 3491 3492 handled: 3493 if (rx->sta) 3494 rx->sta->rx_stats.packets++; 3495 dev_kfree_skb(rx->skb); 3496 return RX_QUEUED; 3497 3498 queue: 3499 skb_queue_tail(&sdata->skb_queue, rx->skb); 3500 ieee80211_queue_work(&local->hw, &sdata->work); 3501 if (rx->sta) 3502 rx->sta->rx_stats.packets++; 3503 return RX_QUEUED; 3504 } 3505 3506 static ieee80211_rx_result debug_noinline 3507 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 3508 { 3509 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3510 int sig = 0; 3511 3512 /* skip known-bad action frames and return them in the next handler */ 3513 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 3514 return RX_CONTINUE; 3515 3516 /* 3517 * Getting here means the kernel doesn't know how to handle 3518 * it, but maybe userspace does ... include returned frames 3519 * so userspace can register for those to know whether ones 3520 * it transmitted were processed or returned. 3521 */ 3522 3523 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3524 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3525 sig = status->signal; 3526 3527 if (cfg80211_rx_mgmt_khz(&rx->sdata->wdev, 3528 ieee80211_rx_status_to_khz(status), sig, 3529 rx->skb->data, rx->skb->len, 0)) { 3530 if (rx->sta) 3531 rx->sta->rx_stats.packets++; 3532 dev_kfree_skb(rx->skb); 3533 return RX_QUEUED; 3534 } 3535 3536 return RX_CONTINUE; 3537 } 3538 3539 static ieee80211_rx_result debug_noinline 3540 ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx) 3541 { 3542 struct ieee80211_sub_if_data *sdata = rx->sdata; 3543 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3544 int len = rx->skb->len; 3545 3546 if (!ieee80211_is_action(mgmt->frame_control)) 3547 return RX_CONTINUE; 3548 3549 switch (mgmt->u.action.category) { 3550 case WLAN_CATEGORY_SA_QUERY: 3551 if (len < (IEEE80211_MIN_ACTION_SIZE + 3552 sizeof(mgmt->u.action.u.sa_query))) 3553 break; 3554 3555 switch (mgmt->u.action.u.sa_query.action) { 3556 case WLAN_ACTION_SA_QUERY_REQUEST: 3557 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3558 break; 3559 ieee80211_process_sa_query_req(sdata, mgmt, len); 3560 goto handled; 3561 } 3562 break; 3563 } 3564 3565 return RX_CONTINUE; 3566 3567 handled: 3568 if (rx->sta) 3569 rx->sta->rx_stats.packets++; 3570 dev_kfree_skb(rx->skb); 3571 return RX_QUEUED; 3572 } 3573 3574 static ieee80211_rx_result debug_noinline 3575 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 3576 { 3577 struct ieee80211_local *local = rx->local; 3578 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3579 struct sk_buff *nskb; 3580 struct ieee80211_sub_if_data *sdata = rx->sdata; 3581 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3582 3583 if (!ieee80211_is_action(mgmt->frame_control)) 3584 return RX_CONTINUE; 3585 3586 /* 3587 * For AP mode, hostapd is responsible for handling any action 3588 * frames that we didn't handle, including returning unknown 3589 * ones. For all other modes we will return them to the sender, 3590 * setting the 0x80 bit in the action category, as required by 3591 * 802.11-2012 9.24.4. 3592 * Newer versions of hostapd shall also use the management frame 3593 * registration mechanisms, but older ones still use cooked 3594 * monitor interfaces so push all frames there. 3595 */ 3596 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 3597 (sdata->vif.type == NL80211_IFTYPE_AP || 3598 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 3599 return RX_DROP_MONITOR; 3600 3601 if (is_multicast_ether_addr(mgmt->da)) 3602 return RX_DROP_MONITOR; 3603 3604 /* do not return rejected action frames */ 3605 if (mgmt->u.action.category & 0x80) 3606 return RX_DROP_UNUSABLE; 3607 3608 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 3609 GFP_ATOMIC); 3610 if (nskb) { 3611 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 3612 3613 nmgmt->u.action.category |= 0x80; 3614 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 3615 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 3616 3617 memset(nskb->cb, 0, sizeof(nskb->cb)); 3618 3619 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { 3620 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); 3621 3622 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | 3623 IEEE80211_TX_INTFL_OFFCHAN_TX_OK | 3624 IEEE80211_TX_CTL_NO_CCK_RATE; 3625 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) 3626 info->hw_queue = 3627 local->hw.offchannel_tx_hw_queue; 3628 } 3629 3630 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, 3631 status->band); 3632 } 3633 dev_kfree_skb(rx->skb); 3634 return RX_QUEUED; 3635 } 3636 3637 static ieee80211_rx_result debug_noinline 3638 ieee80211_rx_h_ext(struct ieee80211_rx_data *rx) 3639 { 3640 struct ieee80211_sub_if_data *sdata = rx->sdata; 3641 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 3642 3643 if (!ieee80211_is_ext(hdr->frame_control)) 3644 return RX_CONTINUE; 3645 3646 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3647 return RX_DROP_MONITOR; 3648 3649 /* for now only beacons are ext, so queue them */ 3650 skb_queue_tail(&sdata->skb_queue, rx->skb); 3651 ieee80211_queue_work(&rx->local->hw, &sdata->work); 3652 if (rx->sta) 3653 rx->sta->rx_stats.packets++; 3654 3655 return RX_QUEUED; 3656 } 3657 3658 static ieee80211_rx_result debug_noinline 3659 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 3660 { 3661 struct ieee80211_sub_if_data *sdata = rx->sdata; 3662 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 3663 __le16 stype; 3664 3665 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 3666 3667 if (!ieee80211_vif_is_mesh(&sdata->vif) && 3668 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3669 sdata->vif.type != NL80211_IFTYPE_OCB && 3670 sdata->vif.type != NL80211_IFTYPE_STATION) 3671 return RX_DROP_MONITOR; 3672 3673 switch (stype) { 3674 case cpu_to_le16(IEEE80211_STYPE_AUTH): 3675 case cpu_to_le16(IEEE80211_STYPE_BEACON): 3676 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 3677 /* process for all: mesh, mlme, ibss */ 3678 break; 3679 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 3680 if (is_multicast_ether_addr(mgmt->da) && 3681 !is_broadcast_ether_addr(mgmt->da)) 3682 return RX_DROP_MONITOR; 3683 3684 /* process only for station/IBSS */ 3685 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3686 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3687 return RX_DROP_MONITOR; 3688 break; 3689 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): 3690 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): 3691 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 3692 if (is_multicast_ether_addr(mgmt->da) && 3693 !is_broadcast_ether_addr(mgmt->da)) 3694 return RX_DROP_MONITOR; 3695 3696 /* process only for station */ 3697 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3698 return RX_DROP_MONITOR; 3699 break; 3700 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 3701 /* process only for ibss and mesh */ 3702 if (sdata->vif.type != NL80211_IFTYPE_ADHOC && 3703 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3704 return RX_DROP_MONITOR; 3705 break; 3706 default: 3707 return RX_DROP_MONITOR; 3708 } 3709 3710 /* queue up frame and kick off work to process it */ 3711 skb_queue_tail(&sdata->skb_queue, rx->skb); 3712 ieee80211_queue_work(&rx->local->hw, &sdata->work); 3713 if (rx->sta) 3714 rx->sta->rx_stats.packets++; 3715 3716 return RX_QUEUED; 3717 } 3718 3719 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 3720 struct ieee80211_rate *rate) 3721 { 3722 struct ieee80211_sub_if_data *sdata; 3723 struct ieee80211_local *local = rx->local; 3724 struct sk_buff *skb = rx->skb, *skb2; 3725 struct net_device *prev_dev = NULL; 3726 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3727 int needed_headroom; 3728 3729 /* 3730 * If cooked monitor has been processed already, then 3731 * don't do it again. If not, set the flag. 3732 */ 3733 if (rx->flags & IEEE80211_RX_CMNTR) 3734 goto out_free_skb; 3735 rx->flags |= IEEE80211_RX_CMNTR; 3736 3737 /* If there are no cooked monitor interfaces, just free the SKB */ 3738 if (!local->cooked_mntrs) 3739 goto out_free_skb; 3740 3741 /* vendor data is long removed here */ 3742 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA; 3743 /* room for the radiotap header based on driver features */ 3744 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb); 3745 3746 if (skb_headroom(skb) < needed_headroom && 3747 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) 3748 goto out_free_skb; 3749 3750 /* prepend radiotap information */ 3751 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, 3752 false); 3753 3754 skb_reset_mac_header(skb); 3755 skb->ip_summed = CHECKSUM_UNNECESSARY; 3756 skb->pkt_type = PACKET_OTHERHOST; 3757 skb->protocol = htons(ETH_P_802_2); 3758 3759 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3760 if (!ieee80211_sdata_running(sdata)) 3761 continue; 3762 3763 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 3764 !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)) 3765 continue; 3766 3767 if (prev_dev) { 3768 skb2 = skb_clone(skb, GFP_ATOMIC); 3769 if (skb2) { 3770 skb2->dev = prev_dev; 3771 netif_receive_skb(skb2); 3772 } 3773 } 3774 3775 prev_dev = sdata->dev; 3776 dev_sw_netstats_rx_add(sdata->dev, skb->len); 3777 } 3778 3779 if (prev_dev) { 3780 skb->dev = prev_dev; 3781 netif_receive_skb(skb); 3782 return; 3783 } 3784 3785 out_free_skb: 3786 dev_kfree_skb(skb); 3787 } 3788 3789 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 3790 ieee80211_rx_result res) 3791 { 3792 switch (res) { 3793 case RX_DROP_MONITOR: 3794 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3795 if (rx->sta) 3796 rx->sta->rx_stats.dropped++; 3797 fallthrough; 3798 case RX_CONTINUE: { 3799 struct ieee80211_rate *rate = NULL; 3800 struct ieee80211_supported_band *sband; 3801 struct ieee80211_rx_status *status; 3802 3803 status = IEEE80211_SKB_RXCB((rx->skb)); 3804 3805 sband = rx->local->hw.wiphy->bands[status->band]; 3806 if (status->encoding == RX_ENC_LEGACY) 3807 rate = &sband->bitrates[status->rate_idx]; 3808 3809 ieee80211_rx_cooked_monitor(rx, rate); 3810 break; 3811 } 3812 case RX_DROP_UNUSABLE: 3813 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3814 if (rx->sta) 3815 rx->sta->rx_stats.dropped++; 3816 dev_kfree_skb(rx->skb); 3817 break; 3818 case RX_QUEUED: 3819 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 3820 break; 3821 } 3822 } 3823 3824 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, 3825 struct sk_buff_head *frames) 3826 { 3827 ieee80211_rx_result res = RX_DROP_MONITOR; 3828 struct sk_buff *skb; 3829 3830 #define CALL_RXH(rxh) \ 3831 do { \ 3832 res = rxh(rx); \ 3833 if (res != RX_CONTINUE) \ 3834 goto rxh_next; \ 3835 } while (0) 3836 3837 /* Lock here to avoid hitting all of the data used in the RX 3838 * path (e.g. key data, station data, ...) concurrently when 3839 * a frame is released from the reorder buffer due to timeout 3840 * from the timer, potentially concurrently with RX from the 3841 * driver. 3842 */ 3843 spin_lock_bh(&rx->local->rx_path_lock); 3844 3845 while ((skb = __skb_dequeue(frames))) { 3846 /* 3847 * all the other fields are valid across frames 3848 * that belong to an aMPDU since they are on the 3849 * same TID from the same station 3850 */ 3851 rx->skb = skb; 3852 3853 CALL_RXH(ieee80211_rx_h_check_more_data); 3854 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll); 3855 CALL_RXH(ieee80211_rx_h_sta_process); 3856 CALL_RXH(ieee80211_rx_h_decrypt); 3857 CALL_RXH(ieee80211_rx_h_defragment); 3858 CALL_RXH(ieee80211_rx_h_michael_mic_verify); 3859 /* must be after MMIC verify so header is counted in MPDU mic */ 3860 #ifdef CONFIG_MAC80211_MESH 3861 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 3862 CALL_RXH(ieee80211_rx_h_mesh_fwding); 3863 #endif 3864 CALL_RXH(ieee80211_rx_h_amsdu); 3865 CALL_RXH(ieee80211_rx_h_data); 3866 3867 /* special treatment -- needs the queue */ 3868 res = ieee80211_rx_h_ctrl(rx, frames); 3869 if (res != RX_CONTINUE) 3870 goto rxh_next; 3871 3872 CALL_RXH(ieee80211_rx_h_mgmt_check); 3873 CALL_RXH(ieee80211_rx_h_action); 3874 CALL_RXH(ieee80211_rx_h_userspace_mgmt); 3875 CALL_RXH(ieee80211_rx_h_action_post_userspace); 3876 CALL_RXH(ieee80211_rx_h_action_return); 3877 CALL_RXH(ieee80211_rx_h_ext); 3878 CALL_RXH(ieee80211_rx_h_mgmt); 3879 3880 rxh_next: 3881 ieee80211_rx_handlers_result(rx, res); 3882 3883 #undef CALL_RXH 3884 } 3885 3886 spin_unlock_bh(&rx->local->rx_path_lock); 3887 } 3888 3889 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 3890 { 3891 struct sk_buff_head reorder_release; 3892 ieee80211_rx_result res = RX_DROP_MONITOR; 3893 3894 __skb_queue_head_init(&reorder_release); 3895 3896 #define CALL_RXH(rxh) \ 3897 do { \ 3898 res = rxh(rx); \ 3899 if (res != RX_CONTINUE) \ 3900 goto rxh_next; \ 3901 } while (0) 3902 3903 CALL_RXH(ieee80211_rx_h_check_dup); 3904 CALL_RXH(ieee80211_rx_h_check); 3905 3906 ieee80211_rx_reorder_ampdu(rx, &reorder_release); 3907 3908 ieee80211_rx_handlers(rx, &reorder_release); 3909 return; 3910 3911 rxh_next: 3912 ieee80211_rx_handlers_result(rx, res); 3913 3914 #undef CALL_RXH 3915 } 3916 3917 /* 3918 * This function makes calls into the RX path, therefore 3919 * it has to be invoked under RCU read lock. 3920 */ 3921 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 3922 { 3923 struct sk_buff_head frames; 3924 struct ieee80211_rx_data rx = { 3925 .sta = sta, 3926 .sdata = sta->sdata, 3927 .local = sta->local, 3928 /* This is OK -- must be QoS data frame */ 3929 .security_idx = tid, 3930 .seqno_idx = tid, 3931 }; 3932 struct tid_ampdu_rx *tid_agg_rx; 3933 3934 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3935 if (!tid_agg_rx) 3936 return; 3937 3938 __skb_queue_head_init(&frames); 3939 3940 spin_lock(&tid_agg_rx->reorder_lock); 3941 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 3942 spin_unlock(&tid_agg_rx->reorder_lock); 3943 3944 if (!skb_queue_empty(&frames)) { 3945 struct ieee80211_event event = { 3946 .type = BA_FRAME_TIMEOUT, 3947 .u.ba.tid = tid, 3948 .u.ba.sta = &sta->sta, 3949 }; 3950 drv_event_callback(rx.local, rx.sdata, &event); 3951 } 3952 3953 ieee80211_rx_handlers(&rx, &frames); 3954 } 3955 3956 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, 3957 u16 ssn, u64 filtered, 3958 u16 received_mpdus) 3959 { 3960 struct sta_info *sta; 3961 struct tid_ampdu_rx *tid_agg_rx; 3962 struct sk_buff_head frames; 3963 struct ieee80211_rx_data rx = { 3964 /* This is OK -- must be QoS data frame */ 3965 .security_idx = tid, 3966 .seqno_idx = tid, 3967 }; 3968 int i, diff; 3969 3970 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS)) 3971 return; 3972 3973 __skb_queue_head_init(&frames); 3974 3975 sta = container_of(pubsta, struct sta_info, sta); 3976 3977 rx.sta = sta; 3978 rx.sdata = sta->sdata; 3979 rx.local = sta->local; 3980 3981 rcu_read_lock(); 3982 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3983 if (!tid_agg_rx) 3984 goto out; 3985 3986 spin_lock_bh(&tid_agg_rx->reorder_lock); 3987 3988 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) { 3989 int release; 3990 3991 /* release all frames in the reorder buffer */ 3992 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) % 3993 IEEE80211_SN_MODULO; 3994 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, 3995 release, &frames); 3996 /* update ssn to match received ssn */ 3997 tid_agg_rx->head_seq_num = ssn; 3998 } else { 3999 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn, 4000 &frames); 4001 } 4002 4003 /* handle the case that received ssn is behind the mac ssn. 4004 * it can be tid_agg_rx->buf_size behind and still be valid */ 4005 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK; 4006 if (diff >= tid_agg_rx->buf_size) { 4007 tid_agg_rx->reorder_buf_filtered = 0; 4008 goto release; 4009 } 4010 filtered = filtered >> diff; 4011 ssn += diff; 4012 4013 /* update bitmap */ 4014 for (i = 0; i < tid_agg_rx->buf_size; i++) { 4015 int index = (ssn + i) % tid_agg_rx->buf_size; 4016 4017 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 4018 if (filtered & BIT_ULL(i)) 4019 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index); 4020 } 4021 4022 /* now process also frames that the filter marking released */ 4023 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 4024 4025 release: 4026 spin_unlock_bh(&tid_agg_rx->reorder_lock); 4027 4028 ieee80211_rx_handlers(&rx, &frames); 4029 4030 out: 4031 rcu_read_unlock(); 4032 } 4033 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames); 4034 4035 /* main receive path */ 4036 4037 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) 4038 { 4039 struct ieee80211_sub_if_data *sdata = rx->sdata; 4040 struct sk_buff *skb = rx->skb; 4041 struct ieee80211_hdr *hdr = (void *)skb->data; 4042 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4043 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 4044 bool multicast = is_multicast_ether_addr(hdr->addr1) || 4045 ieee80211_is_s1g_beacon(hdr->frame_control); 4046 4047 switch (sdata->vif.type) { 4048 case NL80211_IFTYPE_STATION: 4049 if (!bssid && !sdata->u.mgd.use_4addr) 4050 return false; 4051 if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta) 4052 return false; 4053 if (multicast) 4054 return true; 4055 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 4056 case NL80211_IFTYPE_ADHOC: 4057 if (!bssid) 4058 return false; 4059 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || 4060 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) 4061 return false; 4062 if (ieee80211_is_beacon(hdr->frame_control)) 4063 return true; 4064 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) 4065 return false; 4066 if (!multicast && 4067 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 4068 return false; 4069 if (!rx->sta) { 4070 int rate_idx; 4071 if (status->encoding != RX_ENC_LEGACY) 4072 rate_idx = 0; /* TODO: HT/VHT rates */ 4073 else 4074 rate_idx = status->rate_idx; 4075 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 4076 BIT(rate_idx)); 4077 } 4078 return true; 4079 case NL80211_IFTYPE_OCB: 4080 if (!bssid) 4081 return false; 4082 if (!ieee80211_is_data_present(hdr->frame_control)) 4083 return false; 4084 if (!is_broadcast_ether_addr(bssid)) 4085 return false; 4086 if (!multicast && 4087 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) 4088 return false; 4089 if (!rx->sta) { 4090 int rate_idx; 4091 if (status->encoding != RX_ENC_LEGACY) 4092 rate_idx = 0; /* TODO: HT rates */ 4093 else 4094 rate_idx = status->rate_idx; 4095 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, 4096 BIT(rate_idx)); 4097 } 4098 return true; 4099 case NL80211_IFTYPE_MESH_POINT: 4100 if (ether_addr_equal(sdata->vif.addr, hdr->addr2)) 4101 return false; 4102 if (multicast) 4103 return true; 4104 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 4105 case NL80211_IFTYPE_AP_VLAN: 4106 case NL80211_IFTYPE_AP: 4107 if (!bssid) 4108 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 4109 4110 if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { 4111 /* 4112 * Accept public action frames even when the 4113 * BSSID doesn't match, this is used for P2P 4114 * and location updates. Note that mac80211 4115 * itself never looks at these frames. 4116 */ 4117 if (!multicast && 4118 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 4119 return false; 4120 if (ieee80211_is_public_action(hdr, skb->len)) 4121 return true; 4122 return ieee80211_is_beacon(hdr->frame_control); 4123 } 4124 4125 if (!ieee80211_has_tods(hdr->frame_control)) { 4126 /* ignore data frames to TDLS-peers */ 4127 if (ieee80211_is_data(hdr->frame_control)) 4128 return false; 4129 /* ignore action frames to TDLS-peers */ 4130 if (ieee80211_is_action(hdr->frame_control) && 4131 !is_broadcast_ether_addr(bssid) && 4132 !ether_addr_equal(bssid, hdr->addr1)) 4133 return false; 4134 } 4135 4136 /* 4137 * 802.11-2016 Table 9-26 says that for data frames, A1 must be 4138 * the BSSID - we've checked that already but may have accepted 4139 * the wildcard (ff:ff:ff:ff:ff:ff). 4140 * 4141 * It also says: 4142 * The BSSID of the Data frame is determined as follows: 4143 * a) If the STA is contained within an AP or is associated 4144 * with an AP, the BSSID is the address currently in use 4145 * by the STA contained in the AP. 4146 * 4147 * So we should not accept data frames with an address that's 4148 * multicast. 4149 * 4150 * Accepting it also opens a security problem because stations 4151 * could encrypt it with the GTK and inject traffic that way. 4152 */ 4153 if (ieee80211_is_data(hdr->frame_control) && multicast) 4154 return false; 4155 4156 return true; 4157 case NL80211_IFTYPE_P2P_DEVICE: 4158 return ieee80211_is_public_action(hdr, skb->len) || 4159 ieee80211_is_probe_req(hdr->frame_control) || 4160 ieee80211_is_probe_resp(hdr->frame_control) || 4161 ieee80211_is_beacon(hdr->frame_control); 4162 case NL80211_IFTYPE_NAN: 4163 /* Currently no frames on NAN interface are allowed */ 4164 return false; 4165 default: 4166 break; 4167 } 4168 4169 WARN_ON_ONCE(1); 4170 return false; 4171 } 4172 4173 void ieee80211_check_fast_rx(struct sta_info *sta) 4174 { 4175 struct ieee80211_sub_if_data *sdata = sta->sdata; 4176 struct ieee80211_local *local = sdata->local; 4177 struct ieee80211_key *key; 4178 struct ieee80211_fast_rx fastrx = { 4179 .dev = sdata->dev, 4180 .vif_type = sdata->vif.type, 4181 .control_port_protocol = sdata->control_port_protocol, 4182 }, *old, *new = NULL; 4183 bool set_offload = false; 4184 bool assign = false; 4185 bool offload; 4186 4187 /* use sparse to check that we don't return without updating */ 4188 __acquire(check_fast_rx); 4189 4190 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header)); 4191 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN); 4192 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header); 4193 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr); 4194 4195 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS); 4196 4197 /* fast-rx doesn't do reordering */ 4198 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && 4199 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) 4200 goto clear; 4201 4202 switch (sdata->vif.type) { 4203 case NL80211_IFTYPE_STATION: 4204 if (sta->sta.tdls) { 4205 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 4206 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 4207 fastrx.expected_ds_bits = 0; 4208 } else { 4209 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 4210 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3); 4211 fastrx.expected_ds_bits = 4212 cpu_to_le16(IEEE80211_FCTL_FROMDS); 4213 } 4214 4215 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) { 4216 fastrx.expected_ds_bits |= 4217 cpu_to_le16(IEEE80211_FCTL_TODS); 4218 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4219 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4220 } 4221 4222 if (!sdata->u.mgd.powersave) 4223 break; 4224 4225 /* software powersave is a huge mess, avoid all of it */ 4226 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) 4227 goto clear; 4228 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && 4229 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) 4230 goto clear; 4231 break; 4232 case NL80211_IFTYPE_AP_VLAN: 4233 case NL80211_IFTYPE_AP: 4234 /* parallel-rx requires this, at least with calls to 4235 * ieee80211_sta_ps_transition() 4236 */ 4237 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 4238 goto clear; 4239 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4240 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 4241 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS); 4242 4243 fastrx.internal_forward = 4244 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 4245 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || 4246 !sdata->u.vlan.sta); 4247 4248 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 4249 sdata->u.vlan.sta) { 4250 fastrx.expected_ds_bits |= 4251 cpu_to_le16(IEEE80211_FCTL_FROMDS); 4252 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4253 fastrx.internal_forward = 0; 4254 } 4255 4256 break; 4257 default: 4258 goto clear; 4259 } 4260 4261 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 4262 goto clear; 4263 4264 rcu_read_lock(); 4265 key = rcu_dereference(sta->ptk[sta->ptk_idx]); 4266 if (!key) 4267 key = rcu_dereference(sdata->default_unicast_key); 4268 if (key) { 4269 switch (key->conf.cipher) { 4270 case WLAN_CIPHER_SUITE_TKIP: 4271 /* we don't want to deal with MMIC in fast-rx */ 4272 goto clear_rcu; 4273 case WLAN_CIPHER_SUITE_CCMP: 4274 case WLAN_CIPHER_SUITE_CCMP_256: 4275 case WLAN_CIPHER_SUITE_GCMP: 4276 case WLAN_CIPHER_SUITE_GCMP_256: 4277 break; 4278 default: 4279 /* We also don't want to deal with 4280 * WEP or cipher scheme. 4281 */ 4282 goto clear_rcu; 4283 } 4284 4285 fastrx.key = true; 4286 fastrx.icv_len = key->conf.icv_len; 4287 } 4288 4289 assign = true; 4290 clear_rcu: 4291 rcu_read_unlock(); 4292 clear: 4293 __release(check_fast_rx); 4294 4295 if (assign) 4296 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL); 4297 4298 offload = assign && 4299 (sdata->vif.offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED); 4300 4301 if (offload) 4302 set_offload = !test_and_set_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD); 4303 else 4304 set_offload = test_and_clear_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD); 4305 4306 if (set_offload) 4307 drv_sta_set_decap_offload(local, sdata, &sta->sta, assign); 4308 4309 spin_lock_bh(&sta->lock); 4310 old = rcu_dereference_protected(sta->fast_rx, true); 4311 rcu_assign_pointer(sta->fast_rx, new); 4312 spin_unlock_bh(&sta->lock); 4313 4314 if (old) 4315 kfree_rcu(old, rcu_head); 4316 } 4317 4318 void ieee80211_clear_fast_rx(struct sta_info *sta) 4319 { 4320 struct ieee80211_fast_rx *old; 4321 4322 spin_lock_bh(&sta->lock); 4323 old = rcu_dereference_protected(sta->fast_rx, true); 4324 RCU_INIT_POINTER(sta->fast_rx, NULL); 4325 spin_unlock_bh(&sta->lock); 4326 4327 if (old) 4328 kfree_rcu(old, rcu_head); 4329 } 4330 4331 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4332 { 4333 struct ieee80211_local *local = sdata->local; 4334 struct sta_info *sta; 4335 4336 lockdep_assert_held(&local->sta_mtx); 4337 4338 list_for_each_entry(sta, &local->sta_list, list) { 4339 if (sdata != sta->sdata && 4340 (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) 4341 continue; 4342 ieee80211_check_fast_rx(sta); 4343 } 4344 } 4345 4346 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4347 { 4348 struct ieee80211_local *local = sdata->local; 4349 4350 mutex_lock(&local->sta_mtx); 4351 __ieee80211_check_fast_rx_iface(sdata); 4352 mutex_unlock(&local->sta_mtx); 4353 } 4354 4355 static void ieee80211_rx_8023(struct ieee80211_rx_data *rx, 4356 struct ieee80211_fast_rx *fast_rx, 4357 int orig_len) 4358 { 4359 struct ieee80211_sta_rx_stats *stats; 4360 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 4361 struct sta_info *sta = rx->sta; 4362 struct sk_buff *skb = rx->skb; 4363 void *sa = skb->data + ETH_ALEN; 4364 void *da = skb->data; 4365 4366 stats = &sta->rx_stats; 4367 if (fast_rx->uses_rss) 4368 stats = this_cpu_ptr(sta->pcpu_rx_stats); 4369 4370 /* statistics part of ieee80211_rx_h_sta_process() */ 4371 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 4372 stats->last_signal = status->signal; 4373 if (!fast_rx->uses_rss) 4374 ewma_signal_add(&sta->rx_stats_avg.signal, 4375 -status->signal); 4376 } 4377 4378 if (status->chains) { 4379 int i; 4380 4381 stats->chains = status->chains; 4382 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 4383 int signal = status->chain_signal[i]; 4384 4385 if (!(status->chains & BIT(i))) 4386 continue; 4387 4388 stats->chain_signal_last[i] = signal; 4389 if (!fast_rx->uses_rss) 4390 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], 4391 -signal); 4392 } 4393 } 4394 /* end of statistics */ 4395 4396 stats->last_rx = jiffies; 4397 stats->last_rate = sta_stats_encode_rate(status); 4398 4399 stats->fragments++; 4400 stats->packets++; 4401 4402 skb->dev = fast_rx->dev; 4403 4404 dev_sw_netstats_rx_add(fast_rx->dev, skb->len); 4405 4406 /* The seqno index has the same property as needed 4407 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 4408 * for non-QoS-data frames. Here we know it's a data 4409 * frame, so count MSDUs. 4410 */ 4411 u64_stats_update_begin(&stats->syncp); 4412 stats->msdu[rx->seqno_idx]++; 4413 stats->bytes += orig_len; 4414 u64_stats_update_end(&stats->syncp); 4415 4416 if (fast_rx->internal_forward) { 4417 struct sk_buff *xmit_skb = NULL; 4418 if (is_multicast_ether_addr(da)) { 4419 xmit_skb = skb_copy(skb, GFP_ATOMIC); 4420 } else if (!ether_addr_equal(da, sa) && 4421 sta_info_get(rx->sdata, da)) { 4422 xmit_skb = skb; 4423 skb = NULL; 4424 } 4425 4426 if (xmit_skb) { 4427 /* 4428 * Send to wireless media and increase priority by 256 4429 * to keep the received priority instead of 4430 * reclassifying the frame (see cfg80211_classify8021d). 4431 */ 4432 xmit_skb->priority += 256; 4433 xmit_skb->protocol = htons(ETH_P_802_3); 4434 skb_reset_network_header(xmit_skb); 4435 skb_reset_mac_header(xmit_skb); 4436 dev_queue_xmit(xmit_skb); 4437 } 4438 4439 if (!skb) 4440 return; 4441 } 4442 4443 /* deliver to local stack */ 4444 skb->protocol = eth_type_trans(skb, fast_rx->dev); 4445 memset(skb->cb, 0, sizeof(skb->cb)); 4446 if (rx->list) 4447 list_add_tail(&skb->list, rx->list); 4448 else 4449 netif_receive_skb(skb); 4450 4451 } 4452 4453 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, 4454 struct ieee80211_fast_rx *fast_rx) 4455 { 4456 struct sk_buff *skb = rx->skb; 4457 struct ieee80211_hdr *hdr = (void *)skb->data; 4458 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4459 struct sta_info *sta = rx->sta; 4460 int orig_len = skb->len; 4461 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 4462 int snap_offs = hdrlen; 4463 struct { 4464 u8 snap[sizeof(rfc1042_header)]; 4465 __be16 proto; 4466 } *payload __aligned(2); 4467 struct { 4468 u8 da[ETH_ALEN]; 4469 u8 sa[ETH_ALEN]; 4470 } addrs __aligned(2); 4471 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; 4472 4473 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write 4474 * to a common data structure; drivers can implement that per queue 4475 * but we don't have that information in mac80211 4476 */ 4477 if (!(status->flag & RX_FLAG_DUP_VALIDATED)) 4478 return false; 4479 4480 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED) 4481 4482 /* If using encryption, we also need to have: 4483 * - PN_VALIDATED: similar, but the implementation is tricky 4484 * - DECRYPTED: necessary for PN_VALIDATED 4485 */ 4486 if (fast_rx->key && 4487 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS) 4488 return false; 4489 4490 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 4491 return false; 4492 4493 if (unlikely(ieee80211_is_frag(hdr))) 4494 return false; 4495 4496 /* Since our interface address cannot be multicast, this 4497 * implicitly also rejects multicast frames without the 4498 * explicit check. 4499 * 4500 * We shouldn't get any *data* frames not addressed to us 4501 * (AP mode will accept multicast *management* frames), but 4502 * punting here will make it go through the full checks in 4503 * ieee80211_accept_frame(). 4504 */ 4505 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1)) 4506 return false; 4507 4508 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | 4509 IEEE80211_FCTL_TODS)) != 4510 fast_rx->expected_ds_bits) 4511 return false; 4512 4513 /* assign the key to drop unencrypted frames (later) 4514 * and strip the IV/MIC if necessary 4515 */ 4516 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) { 4517 /* GCMP header length is the same */ 4518 snap_offs += IEEE80211_CCMP_HDR_LEN; 4519 } 4520 4521 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) { 4522 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload))) 4523 goto drop; 4524 4525 payload = (void *)(skb->data + snap_offs); 4526 4527 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr)) 4528 return false; 4529 4530 /* Don't handle these here since they require special code. 4531 * Accept AARP and IPX even though they should come with a 4532 * bridge-tunnel header - but if we get them this way then 4533 * there's little point in discarding them. 4534 */ 4535 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) || 4536 payload->proto == fast_rx->control_port_protocol)) 4537 return false; 4538 } 4539 4540 /* after this point, don't punt to the slowpath! */ 4541 4542 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) && 4543 pskb_trim(skb, skb->len - fast_rx->icv_len)) 4544 goto drop; 4545 4546 if (rx->key && !ieee80211_has_protected(hdr->frame_control)) 4547 goto drop; 4548 4549 if (status->rx_flags & IEEE80211_RX_AMSDU) { 4550 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) != 4551 RX_QUEUED) 4552 goto drop; 4553 4554 return true; 4555 } 4556 4557 /* do the header conversion - first grab the addresses */ 4558 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs); 4559 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs); 4560 /* remove the SNAP but leave the ethertype */ 4561 skb_pull(skb, snap_offs + sizeof(rfc1042_header)); 4562 /* push the addresses in front */ 4563 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs)); 4564 4565 ieee80211_rx_8023(rx, fast_rx, orig_len); 4566 4567 return true; 4568 drop: 4569 dev_kfree_skb(skb); 4570 if (fast_rx->uses_rss) 4571 stats = this_cpu_ptr(sta->pcpu_rx_stats); 4572 4573 stats->dropped++; 4574 return true; 4575 } 4576 4577 /* 4578 * This function returns whether or not the SKB 4579 * was destined for RX processing or not, which, 4580 * if consume is true, is equivalent to whether 4581 * or not the skb was consumed. 4582 */ 4583 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 4584 struct sk_buff *skb, bool consume) 4585 { 4586 struct ieee80211_local *local = rx->local; 4587 struct ieee80211_sub_if_data *sdata = rx->sdata; 4588 4589 rx->skb = skb; 4590 4591 /* See if we can do fast-rx; if we have to copy we already lost, 4592 * so punt in that case. We should never have to deliver a data 4593 * frame to multiple interfaces anyway. 4594 * 4595 * We skip the ieee80211_accept_frame() call and do the necessary 4596 * checking inside ieee80211_invoke_fast_rx(). 4597 */ 4598 if (consume && rx->sta) { 4599 struct ieee80211_fast_rx *fast_rx; 4600 4601 fast_rx = rcu_dereference(rx->sta->fast_rx); 4602 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx)) 4603 return true; 4604 } 4605 4606 if (!ieee80211_accept_frame(rx)) 4607 return false; 4608 4609 if (!consume) { 4610 skb = skb_copy(skb, GFP_ATOMIC); 4611 if (!skb) { 4612 if (net_ratelimit()) 4613 wiphy_debug(local->hw.wiphy, 4614 "failed to copy skb for %s\n", 4615 sdata->name); 4616 return true; 4617 } 4618 4619 rx->skb = skb; 4620 } 4621 4622 ieee80211_invoke_rx_handlers(rx); 4623 return true; 4624 } 4625 4626 static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw, 4627 struct ieee80211_sta *pubsta, 4628 struct sk_buff *skb, 4629 struct list_head *list) 4630 { 4631 struct ieee80211_local *local = hw_to_local(hw); 4632 struct ieee80211_fast_rx *fast_rx; 4633 struct ieee80211_rx_data rx; 4634 4635 memset(&rx, 0, sizeof(rx)); 4636 rx.skb = skb; 4637 rx.local = local; 4638 rx.list = list; 4639 4640 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 4641 4642 /* drop frame if too short for header */ 4643 if (skb->len < sizeof(struct ethhdr)) 4644 goto drop; 4645 4646 if (!pubsta) 4647 goto drop; 4648 4649 rx.sta = container_of(pubsta, struct sta_info, sta); 4650 rx.sdata = rx.sta->sdata; 4651 4652 fast_rx = rcu_dereference(rx.sta->fast_rx); 4653 if (!fast_rx) 4654 goto drop; 4655 4656 ieee80211_rx_8023(&rx, fast_rx, skb->len); 4657 return; 4658 4659 drop: 4660 dev_kfree_skb(skb); 4661 } 4662 4663 /* 4664 * This is the actual Rx frames handler. as it belongs to Rx path it must 4665 * be called with rcu_read_lock protection. 4666 */ 4667 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 4668 struct ieee80211_sta *pubsta, 4669 struct sk_buff *skb, 4670 struct list_head *list) 4671 { 4672 struct ieee80211_local *local = hw_to_local(hw); 4673 struct ieee80211_sub_if_data *sdata; 4674 struct ieee80211_hdr *hdr; 4675 __le16 fc; 4676 struct ieee80211_rx_data rx; 4677 struct ieee80211_sub_if_data *prev; 4678 struct rhlist_head *tmp; 4679 int err = 0; 4680 4681 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 4682 memset(&rx, 0, sizeof(rx)); 4683 rx.skb = skb; 4684 rx.local = local; 4685 rx.list = list; 4686 4687 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 4688 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 4689 4690 if (ieee80211_is_mgmt(fc)) { 4691 /* drop frame if too short for header */ 4692 if (skb->len < ieee80211_hdrlen(fc)) 4693 err = -ENOBUFS; 4694 else 4695 err = skb_linearize(skb); 4696 } else { 4697 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 4698 } 4699 4700 if (err) { 4701 dev_kfree_skb(skb); 4702 return; 4703 } 4704 4705 hdr = (struct ieee80211_hdr *)skb->data; 4706 ieee80211_parse_qos(&rx); 4707 ieee80211_verify_alignment(&rx); 4708 4709 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || 4710 ieee80211_is_beacon(hdr->frame_control) || 4711 ieee80211_is_s1g_beacon(hdr->frame_control))) 4712 ieee80211_scan_rx(local, skb); 4713 4714 if (ieee80211_is_data(fc)) { 4715 struct sta_info *sta, *prev_sta; 4716 4717 if (pubsta) { 4718 rx.sta = container_of(pubsta, struct sta_info, sta); 4719 rx.sdata = rx.sta->sdata; 4720 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4721 return; 4722 goto out; 4723 } 4724 4725 prev_sta = NULL; 4726 4727 for_each_sta_info(local, hdr->addr2, sta, tmp) { 4728 if (!prev_sta) { 4729 prev_sta = sta; 4730 continue; 4731 } 4732 4733 rx.sta = prev_sta; 4734 rx.sdata = prev_sta->sdata; 4735 ieee80211_prepare_and_rx_handle(&rx, skb, false); 4736 4737 prev_sta = sta; 4738 } 4739 4740 if (prev_sta) { 4741 rx.sta = prev_sta; 4742 rx.sdata = prev_sta->sdata; 4743 4744 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4745 return; 4746 goto out; 4747 } 4748 } 4749 4750 prev = NULL; 4751 4752 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 4753 if (!ieee80211_sdata_running(sdata)) 4754 continue; 4755 4756 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 4757 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 4758 continue; 4759 4760 /* 4761 * frame is destined for this interface, but if it's 4762 * not also for the previous one we handle that after 4763 * the loop to avoid copying the SKB once too much 4764 */ 4765 4766 if (!prev) { 4767 prev = sdata; 4768 continue; 4769 } 4770 4771 rx.sta = sta_info_get_bss(prev, hdr->addr2); 4772 rx.sdata = prev; 4773 ieee80211_prepare_and_rx_handle(&rx, skb, false); 4774 4775 prev = sdata; 4776 } 4777 4778 if (prev) { 4779 rx.sta = sta_info_get_bss(prev, hdr->addr2); 4780 rx.sdata = prev; 4781 4782 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4783 return; 4784 } 4785 4786 out: 4787 dev_kfree_skb(skb); 4788 } 4789 4790 /* 4791 * This is the receive path handler. It is called by a low level driver when an 4792 * 802.11 MPDU is received from the hardware. 4793 */ 4794 void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, 4795 struct sk_buff *skb, struct list_head *list) 4796 { 4797 struct ieee80211_local *local = hw_to_local(hw); 4798 struct ieee80211_rate *rate = NULL; 4799 struct ieee80211_supported_band *sband; 4800 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4801 4802 WARN_ON_ONCE(softirq_count() == 0); 4803 4804 if (WARN_ON(status->band >= NUM_NL80211_BANDS)) 4805 goto drop; 4806 4807 sband = local->hw.wiphy->bands[status->band]; 4808 if (WARN_ON(!sband)) 4809 goto drop; 4810 4811 /* 4812 * If we're suspending, it is possible although not too likely 4813 * that we'd be receiving frames after having already partially 4814 * quiesced the stack. We can't process such frames then since 4815 * that might, for example, cause stations to be added or other 4816 * driver callbacks be invoked. 4817 */ 4818 if (unlikely(local->quiescing || local->suspended)) 4819 goto drop; 4820 4821 /* We might be during a HW reconfig, prevent Rx for the same reason */ 4822 if (unlikely(local->in_reconfig)) 4823 goto drop; 4824 4825 /* 4826 * The same happens when we're not even started, 4827 * but that's worth a warning. 4828 */ 4829 if (WARN_ON(!local->started)) 4830 goto drop; 4831 4832 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { 4833 /* 4834 * Validate the rate, unless a PLCP error means that 4835 * we probably can't have a valid rate here anyway. 4836 */ 4837 4838 switch (status->encoding) { 4839 case RX_ENC_HT: 4840 /* 4841 * rate_idx is MCS index, which can be [0-76] 4842 * as documented on: 4843 * 4844 * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n 4845 * 4846 * Anything else would be some sort of driver or 4847 * hardware error. The driver should catch hardware 4848 * errors. 4849 */ 4850 if (WARN(status->rate_idx > 76, 4851 "Rate marked as an HT rate but passed " 4852 "status->rate_idx is not " 4853 "an MCS index [0-76]: %d (0x%02x)\n", 4854 status->rate_idx, 4855 status->rate_idx)) 4856 goto drop; 4857 break; 4858 case RX_ENC_VHT: 4859 if (WARN_ONCE(status->rate_idx > 9 || 4860 !status->nss || 4861 status->nss > 8, 4862 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", 4863 status->rate_idx, status->nss)) 4864 goto drop; 4865 break; 4866 case RX_ENC_HE: 4867 if (WARN_ONCE(status->rate_idx > 11 || 4868 !status->nss || 4869 status->nss > 8, 4870 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n", 4871 status->rate_idx, status->nss)) 4872 goto drop; 4873 break; 4874 default: 4875 WARN_ON_ONCE(1); 4876 fallthrough; 4877 case RX_ENC_LEGACY: 4878 if (WARN_ON(status->rate_idx >= sband->n_bitrates)) 4879 goto drop; 4880 rate = &sband->bitrates[status->rate_idx]; 4881 } 4882 } 4883 4884 status->rx_flags = 0; 4885 4886 kcov_remote_start_common(skb_get_kcov_handle(skb)); 4887 4888 /* 4889 * Frames with failed FCS/PLCP checksum are not returned, 4890 * all other frames are returned without radiotap header 4891 * if it was previously present. 4892 * Also, frames with less than 16 bytes are dropped. 4893 */ 4894 if (!(status->flag & RX_FLAG_8023)) 4895 skb = ieee80211_rx_monitor(local, skb, rate); 4896 if (skb) { 4897 ieee80211_tpt_led_trig_rx(local, 4898 ((struct ieee80211_hdr *)skb->data)->frame_control, 4899 skb->len); 4900 4901 if (status->flag & RX_FLAG_8023) 4902 __ieee80211_rx_handle_8023(hw, pubsta, skb, list); 4903 else 4904 __ieee80211_rx_handle_packet(hw, pubsta, skb, list); 4905 } 4906 4907 kcov_remote_stop(); 4908 return; 4909 drop: 4910 kfree_skb(skb); 4911 } 4912 EXPORT_SYMBOL(ieee80211_rx_list); 4913 4914 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, 4915 struct sk_buff *skb, struct napi_struct *napi) 4916 { 4917 struct sk_buff *tmp; 4918 LIST_HEAD(list); 4919 4920 4921 /* 4922 * key references and virtual interfaces are protected using RCU 4923 * and this requires that we are in a read-side RCU section during 4924 * receive processing 4925 */ 4926 rcu_read_lock(); 4927 ieee80211_rx_list(hw, pubsta, skb, &list); 4928 rcu_read_unlock(); 4929 4930 if (!napi) { 4931 netif_receive_skb_list(&list); 4932 return; 4933 } 4934 4935 list_for_each_entry_safe(skb, tmp, &list, list) { 4936 skb_list_del_init(skb); 4937 napi_gro_receive(napi, skb); 4938 } 4939 } 4940 EXPORT_SYMBOL(ieee80211_rx_napi); 4941 4942 /* This is a version of the rx handler that can be called from hard irq 4943 * context. Post the skb on the queue and schedule the tasklet */ 4944 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 4945 { 4946 struct ieee80211_local *local = hw_to_local(hw); 4947 4948 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 4949 4950 skb->pkt_type = IEEE80211_RX_MSG; 4951 skb_queue_tail(&local->skb_queue, skb); 4952 tasklet_schedule(&local->tasklet); 4953 } 4954 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 4955