1 /* 2 * Copyright 2002-2005, Instant802 Networks, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 6 * Copyright 2013-2014 Intel Mobile Communications GmbH 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/jiffies.h> 14 #include <linux/slab.h> 15 #include <linux/kernel.h> 16 #include <linux/skbuff.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/rcupdate.h> 20 #include <linux/export.h> 21 #include <net/mac80211.h> 22 #include <net/ieee80211_radiotap.h> 23 #include <asm/unaligned.h> 24 25 #include "ieee80211_i.h" 26 #include "driver-ops.h" 27 #include "led.h" 28 #include "mesh.h" 29 #include "wep.h" 30 #include "wpa.h" 31 #include "tkip.h" 32 #include "wme.h" 33 #include "rate.h" 34 35 static inline void ieee80211_rx_stats(struct net_device *dev, u32 len) 36 { 37 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 38 39 u64_stats_update_begin(&tstats->syncp); 40 tstats->rx_packets++; 41 tstats->rx_bytes += len; 42 u64_stats_update_end(&tstats->syncp); 43 } 44 45 static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 46 enum nl80211_iftype type) 47 { 48 __le16 fc = hdr->frame_control; 49 50 if (ieee80211_is_data(fc)) { 51 if (len < 24) /* drop incorrect hdr len (data) */ 52 return NULL; 53 54 if (ieee80211_has_a4(fc)) 55 return NULL; 56 if (ieee80211_has_tods(fc)) 57 return hdr->addr1; 58 if (ieee80211_has_fromds(fc)) 59 return hdr->addr2; 60 61 return hdr->addr3; 62 } 63 64 if (ieee80211_is_mgmt(fc)) { 65 if (len < 24) /* drop incorrect hdr len (mgmt) */ 66 return NULL; 67 return hdr->addr3; 68 } 69 70 if (ieee80211_is_ctl(fc)) { 71 if (ieee80211_is_pspoll(fc)) 72 return hdr->addr1; 73 74 if (ieee80211_is_back_req(fc)) { 75 switch (type) { 76 case NL80211_IFTYPE_STATION: 77 return hdr->addr2; 78 case NL80211_IFTYPE_AP: 79 case NL80211_IFTYPE_AP_VLAN: 80 return hdr->addr1; 81 default: 82 break; /* fall through to the return */ 83 } 84 } 85 } 86 87 return NULL; 88 } 89 90 /* 91 * monitor mode reception 92 * 93 * This function cleans up the SKB, i.e. it removes all the stuff 94 * only useful for monitoring. 95 */ 96 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, 97 struct sk_buff *skb, 98 unsigned int rtap_vendor_space) 99 { 100 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { 101 if (likely(skb->len > FCS_LEN)) 102 __pskb_trim(skb, skb->len - FCS_LEN); 103 else { 104 /* driver bug */ 105 WARN_ON(1); 106 dev_kfree_skb(skb); 107 return NULL; 108 } 109 } 110 111 __pskb_pull(skb, rtap_vendor_space); 112 113 return skb; 114 } 115 116 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, 117 unsigned int rtap_vendor_space) 118 { 119 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 120 struct ieee80211_hdr *hdr; 121 122 hdr = (void *)(skb->data + rtap_vendor_space); 123 124 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 125 RX_FLAG_FAILED_PLCP_CRC)) 126 return true; 127 128 if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space)) 129 return true; 130 131 if (ieee80211_is_ctl(hdr->frame_control) && 132 !ieee80211_is_pspoll(hdr->frame_control) && 133 !ieee80211_is_back_req(hdr->frame_control)) 134 return true; 135 136 return false; 137 } 138 139 static int 140 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, 141 struct ieee80211_rx_status *status, 142 struct sk_buff *skb) 143 { 144 int len; 145 146 /* always present fields */ 147 len = sizeof(struct ieee80211_radiotap_header) + 8; 148 149 /* allocate extra bitmaps */ 150 if (status->chains) 151 len += 4 * hweight8(status->chains); 152 153 if (ieee80211_have_rx_timestamp(status)) { 154 len = ALIGN(len, 8); 155 len += 8; 156 } 157 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) 158 len += 1; 159 160 /* antenna field, if we don't have per-chain info */ 161 if (!status->chains) 162 len += 1; 163 164 /* padding for RX_FLAGS if necessary */ 165 len = ALIGN(len, 2); 166 167 if (status->flag & RX_FLAG_HT) /* HT info */ 168 len += 3; 169 170 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 171 len = ALIGN(len, 4); 172 len += 8; 173 } 174 175 if (status->flag & RX_FLAG_VHT) { 176 len = ALIGN(len, 2); 177 len += 12; 178 } 179 180 if (status->chains) { 181 /* antenna and antenna signal fields */ 182 len += 2 * hweight8(status->chains); 183 } 184 185 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 186 struct ieee80211_vendor_radiotap *rtap = (void *)skb->data; 187 188 /* vendor presence bitmap */ 189 len += 4; 190 /* alignment for fixed 6-byte vendor data header */ 191 len = ALIGN(len, 2); 192 /* vendor data header */ 193 len += 6; 194 if (WARN_ON(rtap->align == 0)) 195 rtap->align = 1; 196 len = ALIGN(len, rtap->align); 197 len += rtap->len + rtap->pad; 198 } 199 200 return len; 201 } 202 203 /* 204 * ieee80211_add_rx_radiotap_header - add radiotap header 205 * 206 * add a radiotap header containing all the fields which the hardware provided. 207 */ 208 static void 209 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 210 struct sk_buff *skb, 211 struct ieee80211_rate *rate, 212 int rtap_len, bool has_fcs) 213 { 214 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 215 struct ieee80211_radiotap_header *rthdr; 216 unsigned char *pos; 217 __le32 *it_present; 218 u32 it_present_val; 219 u16 rx_flags = 0; 220 u16 channel_flags = 0; 221 int mpdulen, chain; 222 unsigned long chains = status->chains; 223 struct ieee80211_vendor_radiotap rtap = {}; 224 225 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 226 rtap = *(struct ieee80211_vendor_radiotap *)skb->data; 227 /* rtap.len and rtap.pad are undone immediately */ 228 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad); 229 } 230 231 mpdulen = skb->len; 232 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))) 233 mpdulen += FCS_LEN; 234 235 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); 236 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad); 237 it_present = &rthdr->it_present; 238 239 /* radiotap header, set always present flags */ 240 rthdr->it_len = cpu_to_le16(rtap_len); 241 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | 242 BIT(IEEE80211_RADIOTAP_CHANNEL) | 243 BIT(IEEE80211_RADIOTAP_RX_FLAGS); 244 245 if (!status->chains) 246 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); 247 248 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 249 it_present_val |= 250 BIT(IEEE80211_RADIOTAP_EXT) | 251 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); 252 put_unaligned_le32(it_present_val, it_present); 253 it_present++; 254 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | 255 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 256 } 257 258 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 259 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) | 260 BIT(IEEE80211_RADIOTAP_EXT); 261 put_unaligned_le32(it_present_val, it_present); 262 it_present++; 263 it_present_val = rtap.present; 264 } 265 266 put_unaligned_le32(it_present_val, it_present); 267 268 pos = (void *)(it_present + 1); 269 270 /* the order of the following fields is important */ 271 272 /* IEEE80211_RADIOTAP_TSFT */ 273 if (ieee80211_have_rx_timestamp(status)) { 274 /* padding */ 275 while ((pos - (u8 *)rthdr) & 7) 276 *pos++ = 0; 277 put_unaligned_le64( 278 ieee80211_calculate_rx_timestamp(local, status, 279 mpdulen, 0), 280 pos); 281 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 282 pos += 8; 283 } 284 285 /* IEEE80211_RADIOTAP_FLAGS */ 286 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) 287 *pos |= IEEE80211_RADIOTAP_F_FCS; 288 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 289 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 290 if (status->flag & RX_FLAG_SHORTPRE) 291 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 292 pos++; 293 294 /* IEEE80211_RADIOTAP_RATE */ 295 if (!rate || status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) { 296 /* 297 * Without rate information don't add it. If we have, 298 * MCS information is a separate field in radiotap, 299 * added below. The byte here is needed as padding 300 * for the channel though, so initialise it to 0. 301 */ 302 *pos = 0; 303 } else { 304 int shift = 0; 305 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 306 if (status->flag & RX_FLAG_10MHZ) 307 shift = 1; 308 else if (status->flag & RX_FLAG_5MHZ) 309 shift = 2; 310 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); 311 } 312 pos++; 313 314 /* IEEE80211_RADIOTAP_CHANNEL */ 315 put_unaligned_le16(status->freq, pos); 316 pos += 2; 317 if (status->flag & RX_FLAG_10MHZ) 318 channel_flags |= IEEE80211_CHAN_HALF; 319 else if (status->flag & RX_FLAG_5MHZ) 320 channel_flags |= IEEE80211_CHAN_QUARTER; 321 322 if (status->band == IEEE80211_BAND_5GHZ) 323 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; 324 else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) 325 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 326 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 327 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 328 else if (rate) 329 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; 330 else 331 channel_flags |= IEEE80211_CHAN_2GHZ; 332 put_unaligned_le16(channel_flags, pos); 333 pos += 2; 334 335 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 336 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) && 337 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 338 *pos = status->signal; 339 rthdr->it_present |= 340 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 341 pos++; 342 } 343 344 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 345 346 if (!status->chains) { 347 /* IEEE80211_RADIOTAP_ANTENNA */ 348 *pos = status->antenna; 349 pos++; 350 } 351 352 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 353 354 /* IEEE80211_RADIOTAP_RX_FLAGS */ 355 /* ensure 2 byte alignment for the 2 byte field as required */ 356 if ((pos - (u8 *)rthdr) & 1) 357 *pos++ = 0; 358 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 359 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 360 put_unaligned_le16(rx_flags, pos); 361 pos += 2; 362 363 if (status->flag & RX_FLAG_HT) { 364 unsigned int stbc; 365 366 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 367 *pos++ = local->hw.radiotap_mcs_details; 368 *pos = 0; 369 if (status->flag & RX_FLAG_SHORT_GI) 370 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 371 if (status->flag & RX_FLAG_40MHZ) 372 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 373 if (status->flag & RX_FLAG_HT_GF) 374 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; 375 if (status->flag & RX_FLAG_LDPC) 376 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; 377 stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT; 378 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; 379 pos++; 380 *pos++ = status->rate_idx; 381 } 382 383 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 384 u16 flags = 0; 385 386 /* ensure 4 byte alignment */ 387 while ((pos - (u8 *)rthdr) & 3) 388 pos++; 389 rthdr->it_present |= 390 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); 391 put_unaligned_le32(status->ampdu_reference, pos); 392 pos += 4; 393 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) 394 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; 395 if (status->flag & RX_FLAG_AMPDU_IS_LAST) 396 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; 397 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) 398 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; 399 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 400 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN; 401 put_unaligned_le16(flags, pos); 402 pos += 2; 403 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 404 *pos++ = status->ampdu_delimiter_crc; 405 else 406 *pos++ = 0; 407 *pos++ = 0; 408 } 409 410 if (status->flag & RX_FLAG_VHT) { 411 u16 known = local->hw.radiotap_vht_details; 412 413 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); 414 put_unaligned_le16(known, pos); 415 pos += 2; 416 /* flags */ 417 if (status->flag & RX_FLAG_SHORT_GI) 418 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; 419 /* in VHT, STBC is binary */ 420 if (status->flag & RX_FLAG_STBC_MASK) 421 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; 422 if (status->vht_flag & RX_VHT_FLAG_BF) 423 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; 424 pos++; 425 /* bandwidth */ 426 if (status->vht_flag & RX_VHT_FLAG_80MHZ) 427 *pos++ = 4; 428 else if (status->vht_flag & RX_VHT_FLAG_160MHZ) 429 *pos++ = 11; 430 else if (status->flag & RX_FLAG_40MHZ) 431 *pos++ = 1; 432 else /* 20 MHz */ 433 *pos++ = 0; 434 /* MCS/NSS */ 435 *pos = (status->rate_idx << 4) | status->vht_nss; 436 pos += 4; 437 /* coding field */ 438 if (status->flag & RX_FLAG_LDPC) 439 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; 440 pos++; 441 /* group ID */ 442 pos++; 443 /* partial_aid */ 444 pos += 2; 445 } 446 447 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 448 *pos++ = status->chain_signal[chain]; 449 *pos++ = chain; 450 } 451 452 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 453 /* ensure 2 byte alignment for the vendor field as required */ 454 if ((pos - (u8 *)rthdr) & 1) 455 *pos++ = 0; 456 *pos++ = rtap.oui[0]; 457 *pos++ = rtap.oui[1]; 458 *pos++ = rtap.oui[2]; 459 *pos++ = rtap.subns; 460 put_unaligned_le16(rtap.len, pos); 461 pos += 2; 462 /* align the actual payload as requested */ 463 while ((pos - (u8 *)rthdr) & (rtap.align - 1)) 464 *pos++ = 0; 465 /* data (and possible padding) already follows */ 466 } 467 } 468 469 /* 470 * This function copies a received frame to all monitor interfaces and 471 * returns a cleaned-up SKB that no longer includes the FCS nor the 472 * radiotap header the driver might have added. 473 */ 474 static struct sk_buff * 475 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 476 struct ieee80211_rate *rate) 477 { 478 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 479 struct ieee80211_sub_if_data *sdata; 480 int rt_hdrlen, needed_headroom; 481 struct sk_buff *skb, *skb2; 482 struct net_device *prev_dev = NULL; 483 int present_fcs_len = 0; 484 unsigned int rtap_vendor_space = 0; 485 486 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { 487 struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data; 488 489 rtap_vendor_space = sizeof(*rtap) + rtap->len + rtap->pad; 490 } 491 492 /* 493 * First, we may need to make a copy of the skb because 494 * (1) we need to modify it for radiotap (if not present), and 495 * (2) the other RX handlers will modify the skb we got. 496 * 497 * We don't need to, of course, if we aren't going to return 498 * the SKB because it has a bad FCS/PLCP checksum. 499 */ 500 501 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) 502 present_fcs_len = FCS_LEN; 503 504 /* ensure hdr->frame_control and vendor radiotap data are in skb head */ 505 if (!pskb_may_pull(origskb, 2 + rtap_vendor_space)) { 506 dev_kfree_skb(origskb); 507 return NULL; 508 } 509 510 if (!local->monitors) { 511 if (should_drop_frame(origskb, present_fcs_len, 512 rtap_vendor_space)) { 513 dev_kfree_skb(origskb); 514 return NULL; 515 } 516 517 return remove_monitor_info(local, origskb, rtap_vendor_space); 518 } 519 520 /* room for the radiotap header based on driver features */ 521 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb); 522 needed_headroom = rt_hdrlen - rtap_vendor_space; 523 524 if (should_drop_frame(origskb, present_fcs_len, rtap_vendor_space)) { 525 /* only need to expand headroom if necessary */ 526 skb = origskb; 527 origskb = NULL; 528 529 /* 530 * This shouldn't trigger often because most devices have an 531 * RX header they pull before we get here, and that should 532 * be big enough for our radiotap information. We should 533 * probably export the length to drivers so that we can have 534 * them allocate enough headroom to start with. 535 */ 536 if (skb_headroom(skb) < needed_headroom && 537 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 538 dev_kfree_skb(skb); 539 return NULL; 540 } 541 } else { 542 /* 543 * Need to make a copy and possibly remove radiotap header 544 * and FCS from the original. 545 */ 546 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); 547 548 origskb = remove_monitor_info(local, origskb, 549 rtap_vendor_space); 550 551 if (!skb) 552 return origskb; 553 } 554 555 /* prepend radiotap information */ 556 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); 557 558 skb_reset_mac_header(skb); 559 skb->ip_summed = CHECKSUM_UNNECESSARY; 560 skb->pkt_type = PACKET_OTHERHOST; 561 skb->protocol = htons(ETH_P_802_2); 562 563 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 564 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 565 continue; 566 567 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) 568 continue; 569 570 if (!ieee80211_sdata_running(sdata)) 571 continue; 572 573 if (prev_dev) { 574 skb2 = skb_clone(skb, GFP_ATOMIC); 575 if (skb2) { 576 skb2->dev = prev_dev; 577 netif_receive_skb(skb2); 578 } 579 } 580 581 prev_dev = sdata->dev; 582 ieee80211_rx_stats(sdata->dev, skb->len); 583 } 584 585 if (prev_dev) { 586 skb->dev = prev_dev; 587 netif_receive_skb(skb); 588 } else 589 dev_kfree_skb(skb); 590 591 return origskb; 592 } 593 594 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 595 { 596 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 597 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 598 int tid, seqno_idx, security_idx; 599 600 /* does the frame have a qos control field? */ 601 if (ieee80211_is_data_qos(hdr->frame_control)) { 602 u8 *qc = ieee80211_get_qos_ctl(hdr); 603 /* frame has qos control */ 604 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 605 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 606 status->rx_flags |= IEEE80211_RX_AMSDU; 607 608 seqno_idx = tid; 609 security_idx = tid; 610 } else { 611 /* 612 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 613 * 614 * Sequence numbers for management frames, QoS data 615 * frames with a broadcast/multicast address in the 616 * Address 1 field, and all non-QoS data frames sent 617 * by QoS STAs are assigned using an additional single 618 * modulo-4096 counter, [...] 619 * 620 * We also use that counter for non-QoS STAs. 621 */ 622 seqno_idx = IEEE80211_NUM_TIDS; 623 security_idx = 0; 624 if (ieee80211_is_mgmt(hdr->frame_control)) 625 security_idx = IEEE80211_NUM_TIDS; 626 tid = 0; 627 } 628 629 rx->seqno_idx = seqno_idx; 630 rx->security_idx = security_idx; 631 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 632 * For now, set skb->priority to 0 for other cases. */ 633 rx->skb->priority = (tid > 7) ? 0 : tid; 634 } 635 636 /** 637 * DOC: Packet alignment 638 * 639 * Drivers always need to pass packets that are aligned to two-byte boundaries 640 * to the stack. 641 * 642 * Additionally, should, if possible, align the payload data in a way that 643 * guarantees that the contained IP header is aligned to a four-byte 644 * boundary. In the case of regular frames, this simply means aligning the 645 * payload to a four-byte boundary (because either the IP header is directly 646 * contained, or IV/RFC1042 headers that have a length divisible by four are 647 * in front of it). If the payload data is not properly aligned and the 648 * architecture doesn't support efficient unaligned operations, mac80211 649 * will align the data. 650 * 651 * With A-MSDU frames, however, the payload data address must yield two modulo 652 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 653 * push the IP header further back to a multiple of four again. Thankfully, the 654 * specs were sane enough this time around to require padding each A-MSDU 655 * subframe to a length that is a multiple of four. 656 * 657 * Padding like Atheros hardware adds which is between the 802.11 header and 658 * the payload is not supported, the driver is required to move the 802.11 659 * header to be directly in front of the payload in that case. 660 */ 661 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 662 { 663 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 664 WARN_ONCE((unsigned long)rx->skb->data & 1, 665 "unaligned packet at 0x%p\n", rx->skb->data); 666 #endif 667 } 668 669 670 /* rx handlers */ 671 672 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 673 { 674 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 675 676 if (is_multicast_ether_addr(hdr->addr1)) 677 return 0; 678 679 return ieee80211_is_robust_mgmt_frame(skb); 680 } 681 682 683 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 684 { 685 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 686 687 if (!is_multicast_ether_addr(hdr->addr1)) 688 return 0; 689 690 return ieee80211_is_robust_mgmt_frame(skb); 691 } 692 693 694 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 695 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 696 { 697 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 698 struct ieee80211_mmie *mmie; 699 struct ieee80211_mmie_16 *mmie16; 700 701 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) 702 return -1; 703 704 if (!ieee80211_is_robust_mgmt_frame(skb)) 705 return -1; /* not a robust management frame */ 706 707 mmie = (struct ieee80211_mmie *) 708 (skb->data + skb->len - sizeof(*mmie)); 709 if (mmie->element_id == WLAN_EID_MMIE && 710 mmie->length == sizeof(*mmie) - 2) 711 return le16_to_cpu(mmie->key_id); 712 713 mmie16 = (struct ieee80211_mmie_16 *) 714 (skb->data + skb->len - sizeof(*mmie16)); 715 if (skb->len >= 24 + sizeof(*mmie16) && 716 mmie16->element_id == WLAN_EID_MMIE && 717 mmie16->length == sizeof(*mmie16) - 2) 718 return le16_to_cpu(mmie16->key_id); 719 720 return -1; 721 } 722 723 static int iwl80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs, 724 struct sk_buff *skb) 725 { 726 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 727 __le16 fc; 728 int hdrlen; 729 u8 keyid; 730 731 fc = hdr->frame_control; 732 hdrlen = ieee80211_hdrlen(fc); 733 734 if (skb->len < hdrlen + cs->hdr_len) 735 return -EINVAL; 736 737 skb_copy_bits(skb, hdrlen + cs->key_idx_off, &keyid, 1); 738 keyid &= cs->key_idx_mask; 739 keyid >>= cs->key_idx_shift; 740 741 return keyid; 742 } 743 744 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 745 { 746 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 747 char *dev_addr = rx->sdata->vif.addr; 748 749 if (ieee80211_is_data(hdr->frame_control)) { 750 if (is_multicast_ether_addr(hdr->addr1)) { 751 if (ieee80211_has_tods(hdr->frame_control) || 752 !ieee80211_has_fromds(hdr->frame_control)) 753 return RX_DROP_MONITOR; 754 if (ether_addr_equal(hdr->addr3, dev_addr)) 755 return RX_DROP_MONITOR; 756 } else { 757 if (!ieee80211_has_a4(hdr->frame_control)) 758 return RX_DROP_MONITOR; 759 if (ether_addr_equal(hdr->addr4, dev_addr)) 760 return RX_DROP_MONITOR; 761 } 762 } 763 764 /* If there is not an established peer link and this is not a peer link 765 * establisment frame, beacon or probe, drop the frame. 766 */ 767 768 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { 769 struct ieee80211_mgmt *mgmt; 770 771 if (!ieee80211_is_mgmt(hdr->frame_control)) 772 return RX_DROP_MONITOR; 773 774 if (ieee80211_is_action(hdr->frame_control)) { 775 u8 category; 776 777 /* make sure category field is present */ 778 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 779 return RX_DROP_MONITOR; 780 781 mgmt = (struct ieee80211_mgmt *)hdr; 782 category = mgmt->u.action.category; 783 if (category != WLAN_CATEGORY_MESH_ACTION && 784 category != WLAN_CATEGORY_SELF_PROTECTED) 785 return RX_DROP_MONITOR; 786 return RX_CONTINUE; 787 } 788 789 if (ieee80211_is_probe_req(hdr->frame_control) || 790 ieee80211_is_probe_resp(hdr->frame_control) || 791 ieee80211_is_beacon(hdr->frame_control) || 792 ieee80211_is_auth(hdr->frame_control)) 793 return RX_CONTINUE; 794 795 return RX_DROP_MONITOR; 796 } 797 798 return RX_CONTINUE; 799 } 800 801 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 802 struct tid_ampdu_rx *tid_agg_rx, 803 int index, 804 struct sk_buff_head *frames) 805 { 806 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; 807 struct sk_buff *skb; 808 struct ieee80211_rx_status *status; 809 810 lockdep_assert_held(&tid_agg_rx->reorder_lock); 811 812 if (skb_queue_empty(skb_list)) 813 goto no_frame; 814 815 if (!ieee80211_rx_reorder_ready(skb_list)) { 816 __skb_queue_purge(skb_list); 817 goto no_frame; 818 } 819 820 /* release frames from the reorder ring buffer */ 821 tid_agg_rx->stored_mpdu_num--; 822 while ((skb = __skb_dequeue(skb_list))) { 823 status = IEEE80211_SKB_RXCB(skb); 824 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 825 __skb_queue_tail(frames, skb); 826 } 827 828 no_frame: 829 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); 830 } 831 832 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 833 struct tid_ampdu_rx *tid_agg_rx, 834 u16 head_seq_num, 835 struct sk_buff_head *frames) 836 { 837 int index; 838 839 lockdep_assert_held(&tid_agg_rx->reorder_lock); 840 841 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { 842 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 843 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 844 frames); 845 } 846 } 847 848 /* 849 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 850 * the skb was added to the buffer longer than this time ago, the earlier 851 * frames that have not yet been received are assumed to be lost and the skb 852 * can be released for processing. This may also release other skb's from the 853 * reorder buffer if there are no additional gaps between the frames. 854 * 855 * Callers must hold tid_agg_rx->reorder_lock. 856 */ 857 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 858 859 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 860 struct tid_ampdu_rx *tid_agg_rx, 861 struct sk_buff_head *frames) 862 { 863 int index, i, j; 864 865 lockdep_assert_held(&tid_agg_rx->reorder_lock); 866 867 /* release the buffer until next missing frame */ 868 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 869 if (!ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index]) && 870 tid_agg_rx->stored_mpdu_num) { 871 /* 872 * No buffers ready to be released, but check whether any 873 * frames in the reorder buffer have timed out. 874 */ 875 int skipped = 1; 876 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 877 j = (j + 1) % tid_agg_rx->buf_size) { 878 if (!ieee80211_rx_reorder_ready( 879 &tid_agg_rx->reorder_buf[j])) { 880 skipped++; 881 continue; 882 } 883 if (skipped && 884 !time_after(jiffies, tid_agg_rx->reorder_time[j] + 885 HT_RX_REORDER_BUF_TIMEOUT)) 886 goto set_release_timer; 887 888 /* don't leave incomplete A-MSDUs around */ 889 for (i = (index + 1) % tid_agg_rx->buf_size; i != j; 890 i = (i + 1) % tid_agg_rx->buf_size) 891 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); 892 893 ht_dbg_ratelimited(sdata, 894 "release an RX reorder frame due to timeout on earlier frames\n"); 895 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, 896 frames); 897 898 /* 899 * Increment the head seq# also for the skipped slots. 900 */ 901 tid_agg_rx->head_seq_num = 902 (tid_agg_rx->head_seq_num + 903 skipped) & IEEE80211_SN_MASK; 904 skipped = 0; 905 } 906 } else while (ieee80211_rx_reorder_ready( 907 &tid_agg_rx->reorder_buf[index])) { 908 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 909 frames); 910 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 911 } 912 913 if (tid_agg_rx->stored_mpdu_num) { 914 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 915 916 for (; j != (index - 1) % tid_agg_rx->buf_size; 917 j = (j + 1) % tid_agg_rx->buf_size) { 918 if (ieee80211_rx_reorder_ready( 919 &tid_agg_rx->reorder_buf[j])) 920 break; 921 } 922 923 set_release_timer: 924 925 if (!tid_agg_rx->removed) 926 mod_timer(&tid_agg_rx->reorder_timer, 927 tid_agg_rx->reorder_time[j] + 1 + 928 HT_RX_REORDER_BUF_TIMEOUT); 929 } else { 930 del_timer(&tid_agg_rx->reorder_timer); 931 } 932 } 933 934 /* 935 * As this function belongs to the RX path it must be under 936 * rcu_read_lock protection. It returns false if the frame 937 * can be processed immediately, true if it was consumed. 938 */ 939 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 940 struct tid_ampdu_rx *tid_agg_rx, 941 struct sk_buff *skb, 942 struct sk_buff_head *frames) 943 { 944 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 945 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 946 u16 sc = le16_to_cpu(hdr->seq_ctrl); 947 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 948 u16 head_seq_num, buf_size; 949 int index; 950 bool ret = true; 951 952 spin_lock(&tid_agg_rx->reorder_lock); 953 954 /* 955 * Offloaded BA sessions have no known starting sequence number so pick 956 * one from first Rxed frame for this tid after BA was started. 957 */ 958 if (unlikely(tid_agg_rx->auto_seq)) { 959 tid_agg_rx->auto_seq = false; 960 tid_agg_rx->ssn = mpdu_seq_num; 961 tid_agg_rx->head_seq_num = mpdu_seq_num; 962 } 963 964 buf_size = tid_agg_rx->buf_size; 965 head_seq_num = tid_agg_rx->head_seq_num; 966 967 /* frame with out of date sequence number */ 968 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 969 dev_kfree_skb(skb); 970 goto out; 971 } 972 973 /* 974 * If frame the sequence number exceeds our buffering window 975 * size release some previous frames to make room for this one. 976 */ 977 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { 978 head_seq_num = ieee80211_sn_inc( 979 ieee80211_sn_sub(mpdu_seq_num, buf_size)); 980 /* release stored frames up to new head to stack */ 981 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 982 head_seq_num, frames); 983 } 984 985 /* Now the new frame is always in the range of the reordering buffer */ 986 987 index = mpdu_seq_num % tid_agg_rx->buf_size; 988 989 /* check if we already stored this frame */ 990 if (ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index])) { 991 dev_kfree_skb(skb); 992 goto out; 993 } 994 995 /* 996 * If the current MPDU is in the right order and nothing else 997 * is stored we can process it directly, no need to buffer it. 998 * If it is first but there's something stored, we may be able 999 * to release frames after this one. 1000 */ 1001 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 1002 tid_agg_rx->stored_mpdu_num == 0) { 1003 if (!(status->flag & RX_FLAG_AMSDU_MORE)) 1004 tid_agg_rx->head_seq_num = 1005 ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1006 ret = false; 1007 goto out; 1008 } 1009 1010 /* put the frame in the reordering buffer */ 1011 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); 1012 if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1013 tid_agg_rx->reorder_time[index] = jiffies; 1014 tid_agg_rx->stored_mpdu_num++; 1015 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); 1016 } 1017 1018 out: 1019 spin_unlock(&tid_agg_rx->reorder_lock); 1020 return ret; 1021 } 1022 1023 /* 1024 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 1025 * true if the MPDU was buffered, false if it should be processed. 1026 */ 1027 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, 1028 struct sk_buff_head *frames) 1029 { 1030 struct sk_buff *skb = rx->skb; 1031 struct ieee80211_local *local = rx->local; 1032 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1033 struct sta_info *sta = rx->sta; 1034 struct tid_ampdu_rx *tid_agg_rx; 1035 u16 sc; 1036 u8 tid, ack_policy; 1037 1038 if (!ieee80211_is_data_qos(hdr->frame_control) || 1039 is_multicast_ether_addr(hdr->addr1)) 1040 goto dont_reorder; 1041 1042 /* 1043 * filter the QoS data rx stream according to 1044 * STA/TID and check if this STA/TID is on aggregation 1045 */ 1046 1047 if (!sta) 1048 goto dont_reorder; 1049 1050 ack_policy = *ieee80211_get_qos_ctl(hdr) & 1051 IEEE80211_QOS_CTL_ACK_POLICY_MASK; 1052 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 1053 1054 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 1055 if (!tid_agg_rx) 1056 goto dont_reorder; 1057 1058 /* qos null data frames are excluded */ 1059 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 1060 goto dont_reorder; 1061 1062 /* not part of a BA session */ 1063 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1064 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) 1065 goto dont_reorder; 1066 1067 /* new, potentially un-ordered, ampdu frame - process it */ 1068 1069 /* reset session timer */ 1070 if (tid_agg_rx->timeout) 1071 tid_agg_rx->last_rx = jiffies; 1072 1073 /* if this mpdu is fragmented - terminate rx aggregation session */ 1074 sc = le16_to_cpu(hdr->seq_ctrl); 1075 if (sc & IEEE80211_SCTL_FRAG) { 1076 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 1077 skb_queue_tail(&rx->sdata->skb_queue, skb); 1078 ieee80211_queue_work(&local->hw, &rx->sdata->work); 1079 return; 1080 } 1081 1082 /* 1083 * No locking needed -- we will only ever process one 1084 * RX packet at a time, and thus own tid_agg_rx. All 1085 * other code manipulating it needs to (and does) make 1086 * sure that we cannot get to it any more before doing 1087 * anything with it. 1088 */ 1089 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, 1090 frames)) 1091 return; 1092 1093 dont_reorder: 1094 __skb_queue_tail(frames, skb); 1095 } 1096 1097 static ieee80211_rx_result debug_noinline 1098 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) 1099 { 1100 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1101 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1102 1103 /* 1104 * Drop duplicate 802.11 retransmissions 1105 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") 1106 */ 1107 1108 if (rx->skb->len < 24) 1109 return RX_CONTINUE; 1110 1111 if (ieee80211_is_ctl(hdr->frame_control) || 1112 ieee80211_is_qos_nullfunc(hdr->frame_control) || 1113 is_multicast_ether_addr(hdr->addr1)) 1114 return RX_CONTINUE; 1115 1116 if (!rx->sta) 1117 return RX_CONTINUE; 1118 1119 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 1120 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { 1121 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); 1122 rx->sta->rx_stats.num_duplicates++; 1123 return RX_DROP_UNUSABLE; 1124 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1125 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; 1126 } 1127 1128 return RX_CONTINUE; 1129 } 1130 1131 static ieee80211_rx_result debug_noinline 1132 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 1133 { 1134 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1135 1136 /* Drop disallowed frame classes based on STA auth/assoc state; 1137 * IEEE 802.11, Chap 5.5. 1138 * 1139 * mac80211 filters only based on association state, i.e. it drops 1140 * Class 3 frames from not associated stations. hostapd sends 1141 * deauth/disassoc frames when needed. In addition, hostapd is 1142 * responsible for filtering on both auth and assoc states. 1143 */ 1144 1145 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1146 return ieee80211_rx_mesh_check(rx); 1147 1148 if (unlikely((ieee80211_is_data(hdr->frame_control) || 1149 ieee80211_is_pspoll(hdr->frame_control)) && 1150 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 1151 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 1152 rx->sdata->vif.type != NL80211_IFTYPE_OCB && 1153 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 1154 /* 1155 * accept port control frames from the AP even when it's not 1156 * yet marked ASSOC to prevent a race where we don't set the 1157 * assoc bit quickly enough before it sends the first frame 1158 */ 1159 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1160 ieee80211_is_data_present(hdr->frame_control)) { 1161 unsigned int hdrlen; 1162 __be16 ethertype; 1163 1164 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1165 1166 if (rx->skb->len < hdrlen + 8) 1167 return RX_DROP_MONITOR; 1168 1169 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); 1170 if (ethertype == rx->sdata->control_port_protocol) 1171 return RX_CONTINUE; 1172 } 1173 1174 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 1175 cfg80211_rx_spurious_frame(rx->sdata->dev, 1176 hdr->addr2, 1177 GFP_ATOMIC)) 1178 return RX_DROP_UNUSABLE; 1179 1180 return RX_DROP_MONITOR; 1181 } 1182 1183 return RX_CONTINUE; 1184 } 1185 1186 1187 static ieee80211_rx_result debug_noinline 1188 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1189 { 1190 struct ieee80211_local *local; 1191 struct ieee80211_hdr *hdr; 1192 struct sk_buff *skb; 1193 1194 local = rx->local; 1195 skb = rx->skb; 1196 hdr = (struct ieee80211_hdr *) skb->data; 1197 1198 if (!local->pspolling) 1199 return RX_CONTINUE; 1200 1201 if (!ieee80211_has_fromds(hdr->frame_control)) 1202 /* this is not from AP */ 1203 return RX_CONTINUE; 1204 1205 if (!ieee80211_is_data(hdr->frame_control)) 1206 return RX_CONTINUE; 1207 1208 if (!ieee80211_has_moredata(hdr->frame_control)) { 1209 /* AP has no more frames buffered for us */ 1210 local->pspolling = false; 1211 return RX_CONTINUE; 1212 } 1213 1214 /* more data bit is set, let's request a new frame from the AP */ 1215 ieee80211_send_pspoll(local, rx->sdata); 1216 1217 return RX_CONTINUE; 1218 } 1219 1220 static void sta_ps_start(struct sta_info *sta) 1221 { 1222 struct ieee80211_sub_if_data *sdata = sta->sdata; 1223 struct ieee80211_local *local = sdata->local; 1224 struct ps_data *ps; 1225 int tid; 1226 1227 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1228 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1229 ps = &sdata->bss->ps; 1230 else 1231 return; 1232 1233 atomic_inc(&ps->num_sta_ps); 1234 set_sta_flag(sta, WLAN_STA_PS_STA); 1235 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1236 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1237 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", 1238 sta->sta.addr, sta->sta.aid); 1239 1240 ieee80211_clear_fast_xmit(sta); 1241 1242 if (!sta->sta.txq[0]) 1243 return; 1244 1245 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1246 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); 1247 1248 if (!skb_queue_len(&txqi->queue)) 1249 set_bit(tid, &sta->txq_buffered_tids); 1250 else 1251 clear_bit(tid, &sta->txq_buffered_tids); 1252 } 1253 } 1254 1255 static void sta_ps_end(struct sta_info *sta) 1256 { 1257 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", 1258 sta->sta.addr, sta->sta.aid); 1259 1260 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1261 /* 1262 * Clear the flag only if the other one is still set 1263 * so that the TX path won't start TX'ing new frames 1264 * directly ... In the case that the driver flag isn't 1265 * set ieee80211_sta_ps_deliver_wakeup() will clear it. 1266 */ 1267 clear_sta_flag(sta, WLAN_STA_PS_STA); 1268 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", 1269 sta->sta.addr, sta->sta.aid); 1270 return; 1271 } 1272 1273 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1274 clear_sta_flag(sta, WLAN_STA_PS_STA); 1275 ieee80211_sta_ps_deliver_wakeup(sta); 1276 } 1277 1278 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) 1279 { 1280 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1281 bool in_ps; 1282 1283 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS)); 1284 1285 /* Don't let the same PS state be set twice */ 1286 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA); 1287 if ((start && in_ps) || (!start && !in_ps)) 1288 return -EINVAL; 1289 1290 if (start) 1291 sta_ps_start(sta); 1292 else 1293 sta_ps_end(sta); 1294 1295 return 0; 1296 } 1297 EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1298 1299 static ieee80211_rx_result debug_noinline 1300 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) 1301 { 1302 struct ieee80211_sub_if_data *sdata = rx->sdata; 1303 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 1304 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1305 int tid, ac; 1306 1307 if (!rx->sta) 1308 return RX_CONTINUE; 1309 1310 if (sdata->vif.type != NL80211_IFTYPE_AP && 1311 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 1312 return RX_CONTINUE; 1313 1314 /* 1315 * The device handles station powersave, so don't do anything about 1316 * uAPSD and PS-Poll frames (the latter shouldn't even come up from 1317 * it to mac80211 since they're handled.) 1318 */ 1319 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS)) 1320 return RX_CONTINUE; 1321 1322 /* 1323 * Don't do anything if the station isn't already asleep. In 1324 * the uAPSD case, the station will probably be marked asleep, 1325 * in the PS-Poll case the station must be confused ... 1326 */ 1327 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) 1328 return RX_CONTINUE; 1329 1330 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { 1331 if (!test_sta_flag(rx->sta, WLAN_STA_SP)) { 1332 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) 1333 ieee80211_sta_ps_deliver_poll_response(rx->sta); 1334 else 1335 set_sta_flag(rx->sta, WLAN_STA_PSPOLL); 1336 } 1337 1338 /* Free PS Poll skb here instead of returning RX_DROP that would 1339 * count as an dropped frame. */ 1340 dev_kfree_skb(rx->skb); 1341 1342 return RX_QUEUED; 1343 } else if (!ieee80211_has_morefrags(hdr->frame_control) && 1344 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1345 ieee80211_has_pm(hdr->frame_control) && 1346 (ieee80211_is_data_qos(hdr->frame_control) || 1347 ieee80211_is_qos_nullfunc(hdr->frame_control))) { 1348 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 1349 ac = ieee802_1d_to_ac[tid & 7]; 1350 1351 /* 1352 * If this AC is not trigger-enabled do nothing. 1353 * 1354 * NB: This could/should check a separate bitmap of trigger- 1355 * enabled queues, but for now we only implement uAPSD w/o 1356 * TSPEC changes to the ACs, so they're always the same. 1357 */ 1358 if (!(rx->sta->sta.uapsd_queues & BIT(ac))) 1359 return RX_CONTINUE; 1360 1361 /* if we are in a service period, do nothing */ 1362 if (test_sta_flag(rx->sta, WLAN_STA_SP)) 1363 return RX_CONTINUE; 1364 1365 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) 1366 ieee80211_sta_ps_deliver_uapsd(rx->sta); 1367 else 1368 set_sta_flag(rx->sta, WLAN_STA_UAPSD); 1369 } 1370 1371 return RX_CONTINUE; 1372 } 1373 1374 static ieee80211_rx_result debug_noinline 1375 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1376 { 1377 struct sta_info *sta = rx->sta; 1378 struct sk_buff *skb = rx->skb; 1379 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1380 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1381 int i; 1382 1383 if (!sta) 1384 return RX_CONTINUE; 1385 1386 /* 1387 * Update last_rx only for IBSS packets which are for the current 1388 * BSSID and for station already AUTHORIZED to avoid keeping the 1389 * current IBSS network alive in cases where other STAs start 1390 * using different BSSID. This will also give the station another 1391 * chance to restart the authentication/authorization in case 1392 * something went wrong the first time. 1393 */ 1394 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1395 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1396 NL80211_IFTYPE_ADHOC); 1397 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1398 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1399 sta->rx_stats.last_rx = jiffies; 1400 if (ieee80211_is_data(hdr->frame_control) && 1401 !is_multicast_ether_addr(hdr->addr1)) { 1402 sta->rx_stats.last_rate_idx = 1403 status->rate_idx; 1404 sta->rx_stats.last_rate_flag = 1405 status->flag; 1406 sta->rx_stats.last_rate_vht_flag = 1407 status->vht_flag; 1408 sta->rx_stats.last_rate_vht_nss = 1409 status->vht_nss; 1410 } 1411 } 1412 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { 1413 sta->rx_stats.last_rx = jiffies; 1414 } else if (!is_multicast_ether_addr(hdr->addr1)) { 1415 /* 1416 * Mesh beacons will update last_rx when if they are found to 1417 * match the current local configuration when processed. 1418 */ 1419 sta->rx_stats.last_rx = jiffies; 1420 if (ieee80211_is_data(hdr->frame_control)) { 1421 sta->rx_stats.last_rate_idx = status->rate_idx; 1422 sta->rx_stats.last_rate_flag = status->flag; 1423 sta->rx_stats.last_rate_vht_flag = status->vht_flag; 1424 sta->rx_stats.last_rate_vht_nss = status->vht_nss; 1425 } 1426 } 1427 1428 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) 1429 ieee80211_sta_rx_notify(rx->sdata, hdr); 1430 1431 sta->rx_stats.fragments++; 1432 sta->rx_stats.bytes += rx->skb->len; 1433 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 1434 sta->rx_stats.last_signal = status->signal; 1435 ewma_signal_add(&sta->rx_stats.avg_signal, -status->signal); 1436 } 1437 1438 if (status->chains) { 1439 sta->rx_stats.chains = status->chains; 1440 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 1441 int signal = status->chain_signal[i]; 1442 1443 if (!(status->chains & BIT(i))) 1444 continue; 1445 1446 sta->rx_stats.chain_signal_last[i] = signal; 1447 ewma_signal_add(&sta->rx_stats.chain_signal_avg[i], 1448 -signal); 1449 } 1450 } 1451 1452 /* 1453 * Change STA power saving mode only at the end of a frame 1454 * exchange sequence. 1455 */ 1456 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1457 !ieee80211_has_morefrags(hdr->frame_control) && 1458 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1459 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1460 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 1461 /* PM bit is only checked in frames where it isn't reserved, 1462 * in AP mode it's reserved in non-bufferable management frames 1463 * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) 1464 */ 1465 (!ieee80211_is_mgmt(hdr->frame_control) || 1466 ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { 1467 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 1468 if (!ieee80211_has_pm(hdr->frame_control)) 1469 sta_ps_end(sta); 1470 } else { 1471 if (ieee80211_has_pm(hdr->frame_control)) 1472 sta_ps_start(sta); 1473 } 1474 } 1475 1476 /* mesh power save support */ 1477 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1478 ieee80211_mps_rx_h_sta_process(sta, hdr); 1479 1480 /* 1481 * Drop (qos-)data::nullfunc frames silently, since they 1482 * are used only to control station power saving mode. 1483 */ 1484 if (ieee80211_is_nullfunc(hdr->frame_control) || 1485 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1486 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1487 1488 /* 1489 * If we receive a 4-addr nullfunc frame from a STA 1490 * that was not moved to a 4-addr STA vlan yet send 1491 * the event to userspace and for older hostapd drop 1492 * the frame to the monitor interface. 1493 */ 1494 if (ieee80211_has_a4(hdr->frame_control) && 1495 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1496 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1497 !rx->sdata->u.vlan.sta))) { 1498 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) 1499 cfg80211_rx_unexpected_4addr_frame( 1500 rx->sdata->dev, sta->sta.addr, 1501 GFP_ATOMIC); 1502 return RX_DROP_MONITOR; 1503 } 1504 /* 1505 * Update counter and free packet here to avoid 1506 * counting this as a dropped packed. 1507 */ 1508 sta->rx_stats.packets++; 1509 dev_kfree_skb(rx->skb); 1510 return RX_QUEUED; 1511 } 1512 1513 return RX_CONTINUE; 1514 } /* ieee80211_rx_h_sta_process */ 1515 1516 static ieee80211_rx_result debug_noinline 1517 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 1518 { 1519 struct sk_buff *skb = rx->skb; 1520 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1521 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1522 int keyidx; 1523 int hdrlen; 1524 ieee80211_rx_result result = RX_DROP_UNUSABLE; 1525 struct ieee80211_key *sta_ptk = NULL; 1526 int mmie_keyidx = -1; 1527 __le16 fc; 1528 const struct ieee80211_cipher_scheme *cs = NULL; 1529 1530 /* 1531 * Key selection 101 1532 * 1533 * There are four types of keys: 1534 * - GTK (group keys) 1535 * - IGTK (group keys for management frames) 1536 * - PTK (pairwise keys) 1537 * - STK (station-to-station pairwise keys) 1538 * 1539 * When selecting a key, we have to distinguish between multicast 1540 * (including broadcast) and unicast frames, the latter can only 1541 * use PTKs and STKs while the former always use GTKs and IGTKs. 1542 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then 1543 * unicast frames can also use key indices like GTKs. Hence, if we 1544 * don't have a PTK/STK we check the key index for a WEP key. 1545 * 1546 * Note that in a regular BSS, multicast frames are sent by the 1547 * AP only, associated stations unicast the frame to the AP first 1548 * which then multicasts it on their behalf. 1549 * 1550 * There is also a slight problem in IBSS mode: GTKs are negotiated 1551 * with each station, that is something we don't currently handle. 1552 * The spec seems to expect that one negotiates the same key with 1553 * every station but there's no such requirement; VLANs could be 1554 * possible. 1555 */ 1556 1557 /* start without a key */ 1558 rx->key = NULL; 1559 fc = hdr->frame_control; 1560 1561 if (rx->sta) { 1562 int keyid = rx->sta->ptk_idx; 1563 1564 if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) { 1565 cs = rx->sta->cipher_scheme; 1566 keyid = iwl80211_get_cs_keyid(cs, rx->skb); 1567 if (unlikely(keyid < 0)) 1568 return RX_DROP_UNUSABLE; 1569 } 1570 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); 1571 } 1572 1573 if (!ieee80211_has_protected(fc)) 1574 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 1575 1576 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 1577 rx->key = sta_ptk; 1578 if ((status->flag & RX_FLAG_DECRYPTED) && 1579 (status->flag & RX_FLAG_IV_STRIPPED)) 1580 return RX_CONTINUE; 1581 /* Skip decryption if the frame is not protected. */ 1582 if (!ieee80211_has_protected(fc)) 1583 return RX_CONTINUE; 1584 } else if (mmie_keyidx >= 0) { 1585 /* Broadcast/multicast robust management frame / BIP */ 1586 if ((status->flag & RX_FLAG_DECRYPTED) && 1587 (status->flag & RX_FLAG_IV_STRIPPED)) 1588 return RX_CONTINUE; 1589 1590 if (mmie_keyidx < NUM_DEFAULT_KEYS || 1591 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1592 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 1593 if (rx->sta) 1594 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); 1595 if (!rx->key) 1596 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 1597 } else if (!ieee80211_has_protected(fc)) { 1598 /* 1599 * The frame was not protected, so skip decryption. However, we 1600 * need to set rx->key if there is a key that could have been 1601 * used so that the frame may be dropped if encryption would 1602 * have been expected. 1603 */ 1604 struct ieee80211_key *key = NULL; 1605 struct ieee80211_sub_if_data *sdata = rx->sdata; 1606 int i; 1607 1608 if (ieee80211_is_mgmt(fc) && 1609 is_multicast_ether_addr(hdr->addr1) && 1610 (key = rcu_dereference(rx->sdata->default_mgmt_key))) 1611 rx->key = key; 1612 else { 1613 if (rx->sta) { 1614 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1615 key = rcu_dereference(rx->sta->gtk[i]); 1616 if (key) 1617 break; 1618 } 1619 } 1620 if (!key) { 1621 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1622 key = rcu_dereference(sdata->keys[i]); 1623 if (key) 1624 break; 1625 } 1626 } 1627 if (key) 1628 rx->key = key; 1629 } 1630 return RX_CONTINUE; 1631 } else { 1632 u8 keyid; 1633 1634 /* 1635 * The device doesn't give us the IV so we won't be 1636 * able to look up the key. That's ok though, we 1637 * don't need to decrypt the frame, we just won't 1638 * be able to keep statistics accurate. 1639 * Except for key threshold notifications, should 1640 * we somehow allow the driver to tell us which key 1641 * the hardware used if this flag is set? 1642 */ 1643 if ((status->flag & RX_FLAG_DECRYPTED) && 1644 (status->flag & RX_FLAG_IV_STRIPPED)) 1645 return RX_CONTINUE; 1646 1647 hdrlen = ieee80211_hdrlen(fc); 1648 1649 if (cs) { 1650 keyidx = iwl80211_get_cs_keyid(cs, rx->skb); 1651 1652 if (unlikely(keyidx < 0)) 1653 return RX_DROP_UNUSABLE; 1654 } else { 1655 if (rx->skb->len < 8 + hdrlen) 1656 return RX_DROP_UNUSABLE; /* TODO: count this? */ 1657 /* 1658 * no need to call ieee80211_wep_get_keyidx, 1659 * it verifies a bunch of things we've done already 1660 */ 1661 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); 1662 keyidx = keyid >> 6; 1663 } 1664 1665 /* check per-station GTK first, if multicast packet */ 1666 if (is_multicast_ether_addr(hdr->addr1) && rx->sta) 1667 rx->key = rcu_dereference(rx->sta->gtk[keyidx]); 1668 1669 /* if not found, try default key */ 1670 if (!rx->key) { 1671 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 1672 1673 /* 1674 * RSNA-protected unicast frames should always be 1675 * sent with pairwise or station-to-station keys, 1676 * but for WEP we allow using a key index as well. 1677 */ 1678 if (rx->key && 1679 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 1680 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 1681 !is_multicast_ether_addr(hdr->addr1)) 1682 rx->key = NULL; 1683 } 1684 } 1685 1686 if (rx->key) { 1687 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) 1688 return RX_DROP_MONITOR; 1689 1690 /* TODO: add threshold stuff again */ 1691 } else { 1692 return RX_DROP_MONITOR; 1693 } 1694 1695 switch (rx->key->conf.cipher) { 1696 case WLAN_CIPHER_SUITE_WEP40: 1697 case WLAN_CIPHER_SUITE_WEP104: 1698 result = ieee80211_crypto_wep_decrypt(rx); 1699 break; 1700 case WLAN_CIPHER_SUITE_TKIP: 1701 result = ieee80211_crypto_tkip_decrypt(rx); 1702 break; 1703 case WLAN_CIPHER_SUITE_CCMP: 1704 result = ieee80211_crypto_ccmp_decrypt( 1705 rx, IEEE80211_CCMP_MIC_LEN); 1706 break; 1707 case WLAN_CIPHER_SUITE_CCMP_256: 1708 result = ieee80211_crypto_ccmp_decrypt( 1709 rx, IEEE80211_CCMP_256_MIC_LEN); 1710 break; 1711 case WLAN_CIPHER_SUITE_AES_CMAC: 1712 result = ieee80211_crypto_aes_cmac_decrypt(rx); 1713 break; 1714 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 1715 result = ieee80211_crypto_aes_cmac_256_decrypt(rx); 1716 break; 1717 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 1718 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 1719 result = ieee80211_crypto_aes_gmac_decrypt(rx); 1720 break; 1721 case WLAN_CIPHER_SUITE_GCMP: 1722 case WLAN_CIPHER_SUITE_GCMP_256: 1723 result = ieee80211_crypto_gcmp_decrypt(rx); 1724 break; 1725 default: 1726 result = ieee80211_crypto_hw_decrypt(rx); 1727 } 1728 1729 /* the hdr variable is invalid after the decrypt handlers */ 1730 1731 /* either the frame has been decrypted or will be dropped */ 1732 status->flag |= RX_FLAG_DECRYPTED; 1733 1734 return result; 1735 } 1736 1737 static inline struct ieee80211_fragment_entry * 1738 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, 1739 unsigned int frag, unsigned int seq, int rx_queue, 1740 struct sk_buff **skb) 1741 { 1742 struct ieee80211_fragment_entry *entry; 1743 1744 entry = &sdata->fragments[sdata->fragment_next++]; 1745 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 1746 sdata->fragment_next = 0; 1747 1748 if (!skb_queue_empty(&entry->skb_list)) 1749 __skb_queue_purge(&entry->skb_list); 1750 1751 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 1752 *skb = NULL; 1753 entry->first_frag_time = jiffies; 1754 entry->seq = seq; 1755 entry->rx_queue = rx_queue; 1756 entry->last_frag = frag; 1757 entry->ccmp = 0; 1758 entry->extra_len = 0; 1759 1760 return entry; 1761 } 1762 1763 static inline struct ieee80211_fragment_entry * 1764 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 1765 unsigned int frag, unsigned int seq, 1766 int rx_queue, struct ieee80211_hdr *hdr) 1767 { 1768 struct ieee80211_fragment_entry *entry; 1769 int i, idx; 1770 1771 idx = sdata->fragment_next; 1772 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 1773 struct ieee80211_hdr *f_hdr; 1774 1775 idx--; 1776 if (idx < 0) 1777 idx = IEEE80211_FRAGMENT_MAX - 1; 1778 1779 entry = &sdata->fragments[idx]; 1780 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 1781 entry->rx_queue != rx_queue || 1782 entry->last_frag + 1 != frag) 1783 continue; 1784 1785 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; 1786 1787 /* 1788 * Check ftype and addresses are equal, else check next fragment 1789 */ 1790 if (((hdr->frame_control ^ f_hdr->frame_control) & 1791 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 1792 !ether_addr_equal(hdr->addr1, f_hdr->addr1) || 1793 !ether_addr_equal(hdr->addr2, f_hdr->addr2)) 1794 continue; 1795 1796 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 1797 __skb_queue_purge(&entry->skb_list); 1798 continue; 1799 } 1800 return entry; 1801 } 1802 1803 return NULL; 1804 } 1805 1806 static ieee80211_rx_result debug_noinline 1807 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 1808 { 1809 struct ieee80211_hdr *hdr; 1810 u16 sc; 1811 __le16 fc; 1812 unsigned int frag, seq; 1813 struct ieee80211_fragment_entry *entry; 1814 struct sk_buff *skb; 1815 struct ieee80211_rx_status *status; 1816 1817 hdr = (struct ieee80211_hdr *)rx->skb->data; 1818 fc = hdr->frame_control; 1819 1820 if (ieee80211_is_ctl(fc)) 1821 return RX_CONTINUE; 1822 1823 sc = le16_to_cpu(hdr->seq_ctrl); 1824 frag = sc & IEEE80211_SCTL_FRAG; 1825 1826 if (is_multicast_ether_addr(hdr->addr1)) { 1827 I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount); 1828 goto out_no_led; 1829 } 1830 1831 if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) 1832 goto out; 1833 1834 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 1835 1836 if (skb_linearize(rx->skb)) 1837 return RX_DROP_UNUSABLE; 1838 1839 /* 1840 * skb_linearize() might change the skb->data and 1841 * previously cached variables (in this case, hdr) need to 1842 * be refreshed with the new data. 1843 */ 1844 hdr = (struct ieee80211_hdr *)rx->skb->data; 1845 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 1846 1847 if (frag == 0) { 1848 /* This is the first fragment of a new frame. */ 1849 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 1850 rx->seqno_idx, &(rx->skb)); 1851 if (rx->key && 1852 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || 1853 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) && 1854 ieee80211_has_protected(fc)) { 1855 int queue = rx->security_idx; 1856 /* Store CCMP PN so that we can verify that the next 1857 * fragment has a sequential PN value. */ 1858 entry->ccmp = 1; 1859 memcpy(entry->last_pn, 1860 rx->key->u.ccmp.rx_pn[queue], 1861 IEEE80211_CCMP_PN_LEN); 1862 } 1863 return RX_QUEUED; 1864 } 1865 1866 /* This is a fragment for a frame that should already be pending in 1867 * fragment cache. Add this fragment to the end of the pending entry. 1868 */ 1869 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, 1870 rx->seqno_idx, hdr); 1871 if (!entry) { 1872 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1873 return RX_DROP_MONITOR; 1874 } 1875 1876 /* Verify that MPDUs within one MSDU have sequential PN values. 1877 * (IEEE 802.11i, 8.3.3.4.5) */ 1878 if (entry->ccmp) { 1879 int i; 1880 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; 1881 int queue; 1882 if (!rx->key || 1883 (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && 1884 rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256)) 1885 return RX_DROP_UNUSABLE; 1886 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); 1887 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { 1888 pn[i]++; 1889 if (pn[i]) 1890 break; 1891 } 1892 queue = rx->security_idx; 1893 rpn = rx->key->u.ccmp.rx_pn[queue]; 1894 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) 1895 return RX_DROP_UNUSABLE; 1896 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); 1897 } 1898 1899 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 1900 __skb_queue_tail(&entry->skb_list, rx->skb); 1901 entry->last_frag = frag; 1902 entry->extra_len += rx->skb->len; 1903 if (ieee80211_has_morefrags(fc)) { 1904 rx->skb = NULL; 1905 return RX_QUEUED; 1906 } 1907 1908 rx->skb = __skb_dequeue(&entry->skb_list); 1909 if (skb_tailroom(rx->skb) < entry->extra_len) { 1910 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag); 1911 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 1912 GFP_ATOMIC))) { 1913 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1914 __skb_queue_purge(&entry->skb_list); 1915 return RX_DROP_UNUSABLE; 1916 } 1917 } 1918 while ((skb = __skb_dequeue(&entry->skb_list))) { 1919 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); 1920 dev_kfree_skb(skb); 1921 } 1922 1923 /* Complete frame has been reassembled - process it now */ 1924 status = IEEE80211_SKB_RXCB(rx->skb); 1925 1926 out: 1927 ieee80211_led_rx(rx->local); 1928 out_no_led: 1929 if (rx->sta) 1930 rx->sta->rx_stats.packets++; 1931 return RX_CONTINUE; 1932 } 1933 1934 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 1935 { 1936 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) 1937 return -EACCES; 1938 1939 return 0; 1940 } 1941 1942 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 1943 { 1944 struct sk_buff *skb = rx->skb; 1945 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1946 1947 /* 1948 * Pass through unencrypted frames if the hardware has 1949 * decrypted them already. 1950 */ 1951 if (status->flag & RX_FLAG_DECRYPTED) 1952 return 0; 1953 1954 /* Drop unencrypted frames if key is set. */ 1955 if (unlikely(!ieee80211_has_protected(fc) && 1956 !ieee80211_is_nullfunc(fc) && 1957 ieee80211_is_data(fc) && rx->key)) 1958 return -EACCES; 1959 1960 return 0; 1961 } 1962 1963 static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 1964 { 1965 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1966 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1967 __le16 fc = hdr->frame_control; 1968 1969 /* 1970 * Pass through unencrypted frames if the hardware has 1971 * decrypted them already. 1972 */ 1973 if (status->flag & RX_FLAG_DECRYPTED) 1974 return 0; 1975 1976 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { 1977 if (unlikely(!ieee80211_has_protected(fc) && 1978 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 1979 rx->key)) { 1980 if (ieee80211_is_deauth(fc) || 1981 ieee80211_is_disassoc(fc)) 1982 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 1983 rx->skb->data, 1984 rx->skb->len); 1985 return -EACCES; 1986 } 1987 /* BIP does not use Protected field, so need to check MMIE */ 1988 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 1989 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 1990 if (ieee80211_is_deauth(fc) || 1991 ieee80211_is_disassoc(fc)) 1992 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 1993 rx->skb->data, 1994 rx->skb->len); 1995 return -EACCES; 1996 } 1997 /* 1998 * When using MFP, Action frames are not allowed prior to 1999 * having configured keys. 2000 */ 2001 if (unlikely(ieee80211_is_action(fc) && !rx->key && 2002 ieee80211_is_robust_mgmt_frame(rx->skb))) 2003 return -EACCES; 2004 } 2005 2006 return 0; 2007 } 2008 2009 static int 2010 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) 2011 { 2012 struct ieee80211_sub_if_data *sdata = rx->sdata; 2013 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2014 bool check_port_control = false; 2015 struct ethhdr *ehdr; 2016 int ret; 2017 2018 *port_control = false; 2019 if (ieee80211_has_a4(hdr->frame_control) && 2020 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 2021 return -1; 2022 2023 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2024 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 2025 2026 if (!sdata->u.mgd.use_4addr) 2027 return -1; 2028 else 2029 check_port_control = true; 2030 } 2031 2032 if (is_multicast_ether_addr(hdr->addr1) && 2033 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 2034 return -1; 2035 2036 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 2037 if (ret < 0) 2038 return ret; 2039 2040 ehdr = (struct ethhdr *) rx->skb->data; 2041 if (ehdr->h_proto == rx->sdata->control_port_protocol) 2042 *port_control = true; 2043 else if (check_port_control) 2044 return -1; 2045 2046 return 0; 2047 } 2048 2049 /* 2050 * requires that rx->skb is a frame with ethernet header 2051 */ 2052 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 2053 { 2054 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 2055 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 2056 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2057 2058 /* 2059 * Allow EAPOL frames to us/the PAE group address regardless 2060 * of whether the frame was encrypted or not. 2061 */ 2062 if (ehdr->h_proto == rx->sdata->control_port_protocol && 2063 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || 2064 ether_addr_equal(ehdr->h_dest, pae_group_addr))) 2065 return true; 2066 2067 if (ieee80211_802_1x_port_control(rx) || 2068 ieee80211_drop_unencrypted(rx, fc)) 2069 return false; 2070 2071 return true; 2072 } 2073 2074 /* 2075 * requires that rx->skb is a frame with ethernet header 2076 */ 2077 static void 2078 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 2079 { 2080 struct ieee80211_sub_if_data *sdata = rx->sdata; 2081 struct net_device *dev = sdata->dev; 2082 struct sk_buff *skb, *xmit_skb; 2083 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2084 struct sta_info *dsta; 2085 2086 skb = rx->skb; 2087 xmit_skb = NULL; 2088 2089 ieee80211_rx_stats(dev, skb->len); 2090 2091 if ((sdata->vif.type == NL80211_IFTYPE_AP || 2092 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 2093 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 2094 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 2095 if (is_multicast_ether_addr(ehdr->h_dest)) { 2096 /* 2097 * send multicast frames both to higher layers in 2098 * local net stack and back to the wireless medium 2099 */ 2100 xmit_skb = skb_copy(skb, GFP_ATOMIC); 2101 if (!xmit_skb) 2102 net_info_ratelimited("%s: failed to clone multicast frame\n", 2103 dev->name); 2104 } else { 2105 dsta = sta_info_get(sdata, skb->data); 2106 if (dsta) { 2107 /* 2108 * The destination station is associated to 2109 * this AP (in this VLAN), so send the frame 2110 * directly to it and do not pass it to local 2111 * net stack. 2112 */ 2113 xmit_skb = skb; 2114 skb = NULL; 2115 } 2116 } 2117 } 2118 2119 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2120 if (skb) { 2121 /* 'align' will only take the values 0 or 2 here since all 2122 * frames are required to be aligned to 2-byte boundaries 2123 * when being passed to mac80211; the code here works just 2124 * as well if that isn't true, but mac80211 assumes it can 2125 * access fields as 2-byte aligned (e.g. for ether_addr_equal) 2126 */ 2127 int align; 2128 2129 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; 2130 if (align) { 2131 if (WARN_ON(skb_headroom(skb) < 3)) { 2132 dev_kfree_skb(skb); 2133 skb = NULL; 2134 } else { 2135 u8 *data = skb->data; 2136 size_t len = skb_headlen(skb); 2137 skb->data -= align; 2138 memmove(skb->data, data, len); 2139 skb_set_tail_pointer(skb, len); 2140 } 2141 } 2142 } 2143 #endif 2144 2145 if (skb) { 2146 /* deliver to local stack */ 2147 skb->protocol = eth_type_trans(skb, dev); 2148 memset(skb->cb, 0, sizeof(skb->cb)); 2149 if (rx->napi) 2150 napi_gro_receive(rx->napi, skb); 2151 else 2152 netif_receive_skb(skb); 2153 } 2154 2155 if (xmit_skb) { 2156 /* 2157 * Send to wireless media and increase priority by 256 to 2158 * keep the received priority instead of reclassifying 2159 * the frame (see cfg80211_classify8021d). 2160 */ 2161 xmit_skb->priority += 256; 2162 xmit_skb->protocol = htons(ETH_P_802_3); 2163 skb_reset_network_header(xmit_skb); 2164 skb_reset_mac_header(xmit_skb); 2165 dev_queue_xmit(xmit_skb); 2166 } 2167 } 2168 2169 static ieee80211_rx_result debug_noinline 2170 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 2171 { 2172 struct net_device *dev = rx->sdata->dev; 2173 struct sk_buff *skb = rx->skb; 2174 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2175 __le16 fc = hdr->frame_control; 2176 struct sk_buff_head frame_list; 2177 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2178 2179 if (unlikely(!ieee80211_is_data(fc))) 2180 return RX_CONTINUE; 2181 2182 if (unlikely(!ieee80211_is_data_present(fc))) 2183 return RX_DROP_MONITOR; 2184 2185 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 2186 return RX_CONTINUE; 2187 2188 if (ieee80211_has_a4(hdr->frame_control) && 2189 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2190 !rx->sdata->u.vlan.sta) 2191 return RX_DROP_UNUSABLE; 2192 2193 if (is_multicast_ether_addr(hdr->addr1) && 2194 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2195 rx->sdata->u.vlan.sta) || 2196 (rx->sdata->vif.type == NL80211_IFTYPE_STATION && 2197 rx->sdata->u.mgd.use_4addr))) 2198 return RX_DROP_UNUSABLE; 2199 2200 skb->dev = dev; 2201 __skb_queue_head_init(&frame_list); 2202 2203 if (skb_linearize(skb)) 2204 return RX_DROP_UNUSABLE; 2205 2206 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 2207 rx->sdata->vif.type, 2208 rx->local->hw.extra_tx_headroom, true); 2209 2210 while (!skb_queue_empty(&frame_list)) { 2211 rx->skb = __skb_dequeue(&frame_list); 2212 2213 if (!ieee80211_frame_allowed(rx, fc)) { 2214 dev_kfree_skb(rx->skb); 2215 continue; 2216 } 2217 2218 ieee80211_deliver_skb(rx); 2219 } 2220 2221 return RX_QUEUED; 2222 } 2223 2224 #ifdef CONFIG_MAC80211_MESH 2225 static ieee80211_rx_result 2226 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 2227 { 2228 struct ieee80211_hdr *fwd_hdr, *hdr; 2229 struct ieee80211_tx_info *info; 2230 struct ieee80211s_hdr *mesh_hdr; 2231 struct sk_buff *skb = rx->skb, *fwd_skb; 2232 struct ieee80211_local *local = rx->local; 2233 struct ieee80211_sub_if_data *sdata = rx->sdata; 2234 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2235 u16 q, hdrlen; 2236 2237 hdr = (struct ieee80211_hdr *) skb->data; 2238 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2239 2240 /* make sure fixed part of mesh header is there, also checks skb len */ 2241 if (!pskb_may_pull(rx->skb, hdrlen + 6)) 2242 return RX_DROP_MONITOR; 2243 2244 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2245 2246 /* make sure full mesh header is there, also checks skb len */ 2247 if (!pskb_may_pull(rx->skb, 2248 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) 2249 return RX_DROP_MONITOR; 2250 2251 /* reload pointers */ 2252 hdr = (struct ieee80211_hdr *) skb->data; 2253 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2254 2255 if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) 2256 return RX_DROP_MONITOR; 2257 2258 /* frame is in RMC, don't forward */ 2259 if (ieee80211_is_data(hdr->frame_control) && 2260 is_multicast_ether_addr(hdr->addr1) && 2261 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr)) 2262 return RX_DROP_MONITOR; 2263 2264 if (!ieee80211_is_data(hdr->frame_control)) 2265 return RX_CONTINUE; 2266 2267 if (!mesh_hdr->ttl) 2268 return RX_DROP_MONITOR; 2269 2270 if (mesh_hdr->flags & MESH_FLAGS_AE) { 2271 struct mesh_path *mppath; 2272 char *proxied_addr; 2273 char *mpp_addr; 2274 2275 if (is_multicast_ether_addr(hdr->addr1)) { 2276 mpp_addr = hdr->addr3; 2277 proxied_addr = mesh_hdr->eaddr1; 2278 } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) { 2279 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2280 mpp_addr = hdr->addr4; 2281 proxied_addr = mesh_hdr->eaddr2; 2282 } else { 2283 return RX_DROP_MONITOR; 2284 } 2285 2286 rcu_read_lock(); 2287 mppath = mpp_path_lookup(sdata, proxied_addr); 2288 if (!mppath) { 2289 mpp_path_add(sdata, proxied_addr, mpp_addr); 2290 } else { 2291 spin_lock_bh(&mppath->state_lock); 2292 if (!ether_addr_equal(mppath->mpp, mpp_addr)) 2293 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 2294 spin_unlock_bh(&mppath->state_lock); 2295 } 2296 rcu_read_unlock(); 2297 } 2298 2299 /* Frame has reached destination. Don't forward */ 2300 if (!is_multicast_ether_addr(hdr->addr1) && 2301 ether_addr_equal(sdata->vif.addr, hdr->addr3)) 2302 return RX_CONTINUE; 2303 2304 q = ieee80211_select_queue_80211(sdata, skb, hdr); 2305 if (ieee80211_queue_stopped(&local->hw, q)) { 2306 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); 2307 return RX_DROP_MONITOR; 2308 } 2309 skb_set_queue_mapping(skb, q); 2310 2311 if (!--mesh_hdr->ttl) { 2312 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); 2313 goto out; 2314 } 2315 2316 if (!ifmsh->mshcfg.dot11MeshForwarding) 2317 goto out; 2318 2319 fwd_skb = skb_copy(skb, GFP_ATOMIC); 2320 if (!fwd_skb) { 2321 net_info_ratelimited("%s: failed to clone mesh frame\n", 2322 sdata->name); 2323 goto out; 2324 } 2325 2326 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 2327 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY); 2328 info = IEEE80211_SKB_CB(fwd_skb); 2329 memset(info, 0, sizeof(*info)); 2330 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 2331 info->control.vif = &rx->sdata->vif; 2332 info->control.jiffies = jiffies; 2333 if (is_multicast_ether_addr(fwd_hdr->addr1)) { 2334 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 2335 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 2336 /* update power mode indication when forwarding */ 2337 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); 2338 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { 2339 /* mesh power mode flags updated in mesh_nexthop_lookup */ 2340 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 2341 } else { 2342 /* unable to resolve next hop */ 2343 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, 2344 fwd_hdr->addr3, 0, 2345 WLAN_REASON_MESH_PATH_NOFORWARD, 2346 fwd_hdr->addr2); 2347 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 2348 kfree_skb(fwd_skb); 2349 return RX_DROP_MONITOR; 2350 } 2351 2352 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 2353 ieee80211_add_pending_skb(local, fwd_skb); 2354 out: 2355 if (is_multicast_ether_addr(hdr->addr1)) 2356 return RX_CONTINUE; 2357 return RX_DROP_MONITOR; 2358 } 2359 #endif 2360 2361 static ieee80211_rx_result debug_noinline 2362 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 2363 { 2364 struct ieee80211_sub_if_data *sdata = rx->sdata; 2365 struct ieee80211_local *local = rx->local; 2366 struct net_device *dev = sdata->dev; 2367 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2368 __le16 fc = hdr->frame_control; 2369 bool port_control; 2370 int err; 2371 2372 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 2373 return RX_CONTINUE; 2374 2375 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 2376 return RX_DROP_MONITOR; 2377 2378 if (rx->sta) { 2379 /* The seqno index has the same property as needed 2380 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 2381 * for non-QoS-data frames. Here we know it's a data 2382 * frame, so count MSDUs. 2383 */ 2384 rx->sta->rx_stats.msdu[rx->seqno_idx]++; 2385 } 2386 2387 /* 2388 * Send unexpected-4addr-frame event to hostapd. For older versions, 2389 * also drop the frame to cooked monitor interfaces. 2390 */ 2391 if (ieee80211_has_a4(hdr->frame_control) && 2392 sdata->vif.type == NL80211_IFTYPE_AP) { 2393 if (rx->sta && 2394 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) 2395 cfg80211_rx_unexpected_4addr_frame( 2396 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); 2397 return RX_DROP_MONITOR; 2398 } 2399 2400 err = __ieee80211_data_to_8023(rx, &port_control); 2401 if (unlikely(err)) 2402 return RX_DROP_UNUSABLE; 2403 2404 if (!ieee80211_frame_allowed(rx, fc)) 2405 return RX_DROP_MONITOR; 2406 2407 /* directly handle TDLS channel switch requests/responses */ 2408 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == 2409 cpu_to_be16(ETH_P_TDLS))) { 2410 struct ieee80211_tdls_data *tf = (void *)rx->skb->data; 2411 2412 if (pskb_may_pull(rx->skb, 2413 offsetof(struct ieee80211_tdls_data, u)) && 2414 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && 2415 tf->category == WLAN_CATEGORY_TDLS && 2416 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || 2417 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { 2418 skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb); 2419 schedule_work(&local->tdls_chsw_work); 2420 if (rx->sta) 2421 rx->sta->rx_stats.packets++; 2422 2423 return RX_QUEUED; 2424 } 2425 } 2426 2427 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2428 unlikely(port_control) && sdata->bss) { 2429 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 2430 u.ap); 2431 dev = sdata->dev; 2432 rx->sdata = sdata; 2433 } 2434 2435 rx->skb->dev = dev; 2436 2437 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 2438 !is_multicast_ether_addr( 2439 ((struct ethhdr *)rx->skb->data)->h_dest) && 2440 (!local->scanning && 2441 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) { 2442 mod_timer(&local->dynamic_ps_timer, jiffies + 2443 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 2444 } 2445 2446 ieee80211_deliver_skb(rx); 2447 2448 return RX_QUEUED; 2449 } 2450 2451 static ieee80211_rx_result debug_noinline 2452 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) 2453 { 2454 struct sk_buff *skb = rx->skb; 2455 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 2456 struct tid_ampdu_rx *tid_agg_rx; 2457 u16 start_seq_num; 2458 u16 tid; 2459 2460 if (likely(!ieee80211_is_ctl(bar->frame_control))) 2461 return RX_CONTINUE; 2462 2463 if (ieee80211_is_back_req(bar->frame_control)) { 2464 struct { 2465 __le16 control, start_seq_num; 2466 } __packed bar_data; 2467 struct ieee80211_event event = { 2468 .type = BAR_RX_EVENT, 2469 }; 2470 2471 if (!rx->sta) 2472 return RX_DROP_MONITOR; 2473 2474 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 2475 &bar_data, sizeof(bar_data))) 2476 return RX_DROP_MONITOR; 2477 2478 tid = le16_to_cpu(bar_data.control) >> 12; 2479 2480 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 2481 if (!tid_agg_rx) 2482 return RX_DROP_MONITOR; 2483 2484 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 2485 event.u.ba.tid = tid; 2486 event.u.ba.ssn = start_seq_num; 2487 event.u.ba.sta = &rx->sta->sta; 2488 2489 /* reset session timer */ 2490 if (tid_agg_rx->timeout) 2491 mod_timer(&tid_agg_rx->session_timer, 2492 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 2493 2494 spin_lock(&tid_agg_rx->reorder_lock); 2495 /* release stored frames up to start of BAR */ 2496 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 2497 start_seq_num, frames); 2498 spin_unlock(&tid_agg_rx->reorder_lock); 2499 2500 drv_event_callback(rx->local, rx->sdata, &event); 2501 2502 kfree_skb(skb); 2503 return RX_QUEUED; 2504 } 2505 2506 /* 2507 * After this point, we only want management frames, 2508 * so we can drop all remaining control frames to 2509 * cooked monitor interfaces. 2510 */ 2511 return RX_DROP_MONITOR; 2512 } 2513 2514 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 2515 struct ieee80211_mgmt *mgmt, 2516 size_t len) 2517 { 2518 struct ieee80211_local *local = sdata->local; 2519 struct sk_buff *skb; 2520 struct ieee80211_mgmt *resp; 2521 2522 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { 2523 /* Not to own unicast address */ 2524 return; 2525 } 2526 2527 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) || 2528 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) { 2529 /* Not from the current AP or not associated yet. */ 2530 return; 2531 } 2532 2533 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 2534 /* Too short SA Query request frame */ 2535 return; 2536 } 2537 2538 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 2539 if (skb == NULL) 2540 return; 2541 2542 skb_reserve(skb, local->hw.extra_tx_headroom); 2543 resp = (struct ieee80211_mgmt *) skb_put(skb, 24); 2544 memset(resp, 0, 24); 2545 memcpy(resp->da, mgmt->sa, ETH_ALEN); 2546 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 2547 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 2548 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2549 IEEE80211_STYPE_ACTION); 2550 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 2551 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 2552 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 2553 memcpy(resp->u.action.u.sa_query.trans_id, 2554 mgmt->u.action.u.sa_query.trans_id, 2555 WLAN_SA_QUERY_TR_ID_LEN); 2556 2557 ieee80211_tx_skb(sdata, skb); 2558 } 2559 2560 static ieee80211_rx_result debug_noinline 2561 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 2562 { 2563 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2564 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2565 2566 /* 2567 * From here on, look only at management frames. 2568 * Data and control frames are already handled, 2569 * and unknown (reserved) frames are useless. 2570 */ 2571 if (rx->skb->len < 24) 2572 return RX_DROP_MONITOR; 2573 2574 if (!ieee80211_is_mgmt(mgmt->frame_control)) 2575 return RX_DROP_MONITOR; 2576 2577 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 2578 ieee80211_is_beacon(mgmt->frame_control) && 2579 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 2580 int sig = 0; 2581 2582 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM)) 2583 sig = status->signal; 2584 2585 cfg80211_report_obss_beacon(rx->local->hw.wiphy, 2586 rx->skb->data, rx->skb->len, 2587 status->freq, sig); 2588 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 2589 } 2590 2591 if (ieee80211_drop_unencrypted_mgmt(rx)) 2592 return RX_DROP_UNUSABLE; 2593 2594 return RX_CONTINUE; 2595 } 2596 2597 static ieee80211_rx_result debug_noinline 2598 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 2599 { 2600 struct ieee80211_local *local = rx->local; 2601 struct ieee80211_sub_if_data *sdata = rx->sdata; 2602 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2603 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2604 int len = rx->skb->len; 2605 2606 if (!ieee80211_is_action(mgmt->frame_control)) 2607 return RX_CONTINUE; 2608 2609 /* drop too small frames */ 2610 if (len < IEEE80211_MIN_ACTION_SIZE) 2611 return RX_DROP_UNUSABLE; 2612 2613 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && 2614 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && 2615 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 2616 return RX_DROP_UNUSABLE; 2617 2618 switch (mgmt->u.action.category) { 2619 case WLAN_CATEGORY_HT: 2620 /* reject HT action frames from stations not supporting HT */ 2621 if (!rx->sta->sta.ht_cap.ht_supported) 2622 goto invalid; 2623 2624 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2625 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2626 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2627 sdata->vif.type != NL80211_IFTYPE_AP && 2628 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2629 break; 2630 2631 /* verify action & smps_control/chanwidth are present */ 2632 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 2633 goto invalid; 2634 2635 switch (mgmt->u.action.u.ht_smps.action) { 2636 case WLAN_HT_ACTION_SMPS: { 2637 struct ieee80211_supported_band *sband; 2638 enum ieee80211_smps_mode smps_mode; 2639 2640 /* convert to HT capability */ 2641 switch (mgmt->u.action.u.ht_smps.smps_control) { 2642 case WLAN_HT_SMPS_CONTROL_DISABLED: 2643 smps_mode = IEEE80211_SMPS_OFF; 2644 break; 2645 case WLAN_HT_SMPS_CONTROL_STATIC: 2646 smps_mode = IEEE80211_SMPS_STATIC; 2647 break; 2648 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 2649 smps_mode = IEEE80211_SMPS_DYNAMIC; 2650 break; 2651 default: 2652 goto invalid; 2653 } 2654 2655 /* if no change do nothing */ 2656 if (rx->sta->sta.smps_mode == smps_mode) 2657 goto handled; 2658 rx->sta->sta.smps_mode = smps_mode; 2659 2660 sband = rx->local->hw.wiphy->bands[status->band]; 2661 2662 rate_control_rate_update(local, sband, rx->sta, 2663 IEEE80211_RC_SMPS_CHANGED); 2664 goto handled; 2665 } 2666 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { 2667 struct ieee80211_supported_band *sband; 2668 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; 2669 enum ieee80211_sta_rx_bandwidth max_bw, new_bw; 2670 2671 /* If it doesn't support 40 MHz it can't change ... */ 2672 if (!(rx->sta->sta.ht_cap.cap & 2673 IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 2674 goto handled; 2675 2676 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ) 2677 max_bw = IEEE80211_STA_RX_BW_20; 2678 else 2679 max_bw = ieee80211_sta_cap_rx_bw(rx->sta); 2680 2681 /* set cur_max_bandwidth and recalc sta bw */ 2682 rx->sta->cur_max_bandwidth = max_bw; 2683 new_bw = ieee80211_sta_cur_vht_bw(rx->sta); 2684 2685 if (rx->sta->sta.bandwidth == new_bw) 2686 goto handled; 2687 2688 rx->sta->sta.bandwidth = new_bw; 2689 sband = rx->local->hw.wiphy->bands[status->band]; 2690 2691 rate_control_rate_update(local, sband, rx->sta, 2692 IEEE80211_RC_BW_CHANGED); 2693 goto handled; 2694 } 2695 default: 2696 goto invalid; 2697 } 2698 2699 break; 2700 case WLAN_CATEGORY_PUBLIC: 2701 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2702 goto invalid; 2703 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2704 break; 2705 if (!rx->sta) 2706 break; 2707 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) 2708 break; 2709 if (mgmt->u.action.u.ext_chan_switch.action_code != 2710 WLAN_PUB_ACTION_EXT_CHANSW_ANN) 2711 break; 2712 if (len < offsetof(struct ieee80211_mgmt, 2713 u.action.u.ext_chan_switch.variable)) 2714 goto invalid; 2715 goto queue; 2716 case WLAN_CATEGORY_VHT: 2717 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2718 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2719 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2720 sdata->vif.type != NL80211_IFTYPE_AP && 2721 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2722 break; 2723 2724 /* verify action code is present */ 2725 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2726 goto invalid; 2727 2728 switch (mgmt->u.action.u.vht_opmode_notif.action_code) { 2729 case WLAN_VHT_ACTION_OPMODE_NOTIF: { 2730 u8 opmode; 2731 2732 /* verify opmode is present */ 2733 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 2734 goto invalid; 2735 2736 opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; 2737 2738 ieee80211_vht_handle_opmode(rx->sdata, rx->sta, 2739 opmode, status->band); 2740 goto handled; 2741 } 2742 default: 2743 break; 2744 } 2745 break; 2746 case WLAN_CATEGORY_BACK: 2747 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2748 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2749 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2750 sdata->vif.type != NL80211_IFTYPE_AP && 2751 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2752 break; 2753 2754 /* verify action_code is present */ 2755 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2756 break; 2757 2758 switch (mgmt->u.action.u.addba_req.action_code) { 2759 case WLAN_ACTION_ADDBA_REQ: 2760 if (len < (IEEE80211_MIN_ACTION_SIZE + 2761 sizeof(mgmt->u.action.u.addba_req))) 2762 goto invalid; 2763 break; 2764 case WLAN_ACTION_ADDBA_RESP: 2765 if (len < (IEEE80211_MIN_ACTION_SIZE + 2766 sizeof(mgmt->u.action.u.addba_resp))) 2767 goto invalid; 2768 break; 2769 case WLAN_ACTION_DELBA: 2770 if (len < (IEEE80211_MIN_ACTION_SIZE + 2771 sizeof(mgmt->u.action.u.delba))) 2772 goto invalid; 2773 break; 2774 default: 2775 goto invalid; 2776 } 2777 2778 goto queue; 2779 case WLAN_CATEGORY_SPECTRUM_MGMT: 2780 /* verify action_code is present */ 2781 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2782 break; 2783 2784 switch (mgmt->u.action.u.measurement.action_code) { 2785 case WLAN_ACTION_SPCT_MSR_REQ: 2786 if (status->band != IEEE80211_BAND_5GHZ) 2787 break; 2788 2789 if (len < (IEEE80211_MIN_ACTION_SIZE + 2790 sizeof(mgmt->u.action.u.measurement))) 2791 break; 2792 2793 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2794 break; 2795 2796 ieee80211_process_measurement_req(sdata, mgmt, len); 2797 goto handled; 2798 case WLAN_ACTION_SPCT_CHL_SWITCH: { 2799 u8 *bssid; 2800 if (len < (IEEE80211_MIN_ACTION_SIZE + 2801 sizeof(mgmt->u.action.u.chan_switch))) 2802 break; 2803 2804 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2805 sdata->vif.type != NL80211_IFTYPE_ADHOC && 2806 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 2807 break; 2808 2809 if (sdata->vif.type == NL80211_IFTYPE_STATION) 2810 bssid = sdata->u.mgd.bssid; 2811 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 2812 bssid = sdata->u.ibss.bssid; 2813 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 2814 bssid = mgmt->sa; 2815 else 2816 break; 2817 2818 if (!ether_addr_equal(mgmt->bssid, bssid)) 2819 break; 2820 2821 goto queue; 2822 } 2823 } 2824 break; 2825 case WLAN_CATEGORY_SA_QUERY: 2826 if (len < (IEEE80211_MIN_ACTION_SIZE + 2827 sizeof(mgmt->u.action.u.sa_query))) 2828 break; 2829 2830 switch (mgmt->u.action.u.sa_query.action) { 2831 case WLAN_ACTION_SA_QUERY_REQUEST: 2832 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2833 break; 2834 ieee80211_process_sa_query_req(sdata, mgmt, len); 2835 goto handled; 2836 } 2837 break; 2838 case WLAN_CATEGORY_SELF_PROTECTED: 2839 if (len < (IEEE80211_MIN_ACTION_SIZE + 2840 sizeof(mgmt->u.action.u.self_prot.action_code))) 2841 break; 2842 2843 switch (mgmt->u.action.u.self_prot.action_code) { 2844 case WLAN_SP_MESH_PEERING_OPEN: 2845 case WLAN_SP_MESH_PEERING_CLOSE: 2846 case WLAN_SP_MESH_PEERING_CONFIRM: 2847 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2848 goto invalid; 2849 if (sdata->u.mesh.user_mpm) 2850 /* userspace handles this frame */ 2851 break; 2852 goto queue; 2853 case WLAN_SP_MGK_INFORM: 2854 case WLAN_SP_MGK_ACK: 2855 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2856 goto invalid; 2857 break; 2858 } 2859 break; 2860 case WLAN_CATEGORY_MESH_ACTION: 2861 if (len < (IEEE80211_MIN_ACTION_SIZE + 2862 sizeof(mgmt->u.action.u.mesh_action.action_code))) 2863 break; 2864 2865 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2866 break; 2867 if (mesh_action_is_path_sel(mgmt) && 2868 !mesh_path_sel_is_hwmp(sdata)) 2869 break; 2870 goto queue; 2871 } 2872 2873 return RX_CONTINUE; 2874 2875 invalid: 2876 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 2877 /* will return in the next handlers */ 2878 return RX_CONTINUE; 2879 2880 handled: 2881 if (rx->sta) 2882 rx->sta->rx_stats.packets++; 2883 dev_kfree_skb(rx->skb); 2884 return RX_QUEUED; 2885 2886 queue: 2887 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 2888 skb_queue_tail(&sdata->skb_queue, rx->skb); 2889 ieee80211_queue_work(&local->hw, &sdata->work); 2890 if (rx->sta) 2891 rx->sta->rx_stats.packets++; 2892 return RX_QUEUED; 2893 } 2894 2895 static ieee80211_rx_result debug_noinline 2896 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 2897 { 2898 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2899 int sig = 0; 2900 2901 /* skip known-bad action frames and return them in the next handler */ 2902 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 2903 return RX_CONTINUE; 2904 2905 /* 2906 * Getting here means the kernel doesn't know how to handle 2907 * it, but maybe userspace does ... include returned frames 2908 * so userspace can register for those to know whether ones 2909 * it transmitted were processed or returned. 2910 */ 2911 2912 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM)) 2913 sig = status->signal; 2914 2915 if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig, 2916 rx->skb->data, rx->skb->len, 0)) { 2917 if (rx->sta) 2918 rx->sta->rx_stats.packets++; 2919 dev_kfree_skb(rx->skb); 2920 return RX_QUEUED; 2921 } 2922 2923 return RX_CONTINUE; 2924 } 2925 2926 static ieee80211_rx_result debug_noinline 2927 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 2928 { 2929 struct ieee80211_local *local = rx->local; 2930 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2931 struct sk_buff *nskb; 2932 struct ieee80211_sub_if_data *sdata = rx->sdata; 2933 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2934 2935 if (!ieee80211_is_action(mgmt->frame_control)) 2936 return RX_CONTINUE; 2937 2938 /* 2939 * For AP mode, hostapd is responsible for handling any action 2940 * frames that we didn't handle, including returning unknown 2941 * ones. For all other modes we will return them to the sender, 2942 * setting the 0x80 bit in the action category, as required by 2943 * 802.11-2012 9.24.4. 2944 * Newer versions of hostapd shall also use the management frame 2945 * registration mechanisms, but older ones still use cooked 2946 * monitor interfaces so push all frames there. 2947 */ 2948 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 2949 (sdata->vif.type == NL80211_IFTYPE_AP || 2950 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 2951 return RX_DROP_MONITOR; 2952 2953 if (is_multicast_ether_addr(mgmt->da)) 2954 return RX_DROP_MONITOR; 2955 2956 /* do not return rejected action frames */ 2957 if (mgmt->u.action.category & 0x80) 2958 return RX_DROP_UNUSABLE; 2959 2960 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 2961 GFP_ATOMIC); 2962 if (nskb) { 2963 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 2964 2965 nmgmt->u.action.category |= 0x80; 2966 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 2967 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 2968 2969 memset(nskb->cb, 0, sizeof(nskb->cb)); 2970 2971 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { 2972 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); 2973 2974 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | 2975 IEEE80211_TX_INTFL_OFFCHAN_TX_OK | 2976 IEEE80211_TX_CTL_NO_CCK_RATE; 2977 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) 2978 info->hw_queue = 2979 local->hw.offchannel_tx_hw_queue; 2980 } 2981 2982 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, 2983 status->band); 2984 } 2985 dev_kfree_skb(rx->skb); 2986 return RX_QUEUED; 2987 } 2988 2989 static ieee80211_rx_result debug_noinline 2990 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 2991 { 2992 struct ieee80211_sub_if_data *sdata = rx->sdata; 2993 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 2994 __le16 stype; 2995 2996 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 2997 2998 if (!ieee80211_vif_is_mesh(&sdata->vif) && 2999 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3000 sdata->vif.type != NL80211_IFTYPE_OCB && 3001 sdata->vif.type != NL80211_IFTYPE_STATION) 3002 return RX_DROP_MONITOR; 3003 3004 switch (stype) { 3005 case cpu_to_le16(IEEE80211_STYPE_AUTH): 3006 case cpu_to_le16(IEEE80211_STYPE_BEACON): 3007 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 3008 /* process for all: mesh, mlme, ibss */ 3009 break; 3010 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): 3011 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): 3012 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 3013 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 3014 if (is_multicast_ether_addr(mgmt->da) && 3015 !is_broadcast_ether_addr(mgmt->da)) 3016 return RX_DROP_MONITOR; 3017 3018 /* process only for station */ 3019 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3020 return RX_DROP_MONITOR; 3021 break; 3022 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 3023 /* process only for ibss and mesh */ 3024 if (sdata->vif.type != NL80211_IFTYPE_ADHOC && 3025 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3026 return RX_DROP_MONITOR; 3027 break; 3028 default: 3029 return RX_DROP_MONITOR; 3030 } 3031 3032 /* queue up frame and kick off work to process it */ 3033 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 3034 skb_queue_tail(&sdata->skb_queue, rx->skb); 3035 ieee80211_queue_work(&rx->local->hw, &sdata->work); 3036 if (rx->sta) 3037 rx->sta->rx_stats.packets++; 3038 3039 return RX_QUEUED; 3040 } 3041 3042 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 3043 struct ieee80211_rate *rate) 3044 { 3045 struct ieee80211_sub_if_data *sdata; 3046 struct ieee80211_local *local = rx->local; 3047 struct sk_buff *skb = rx->skb, *skb2; 3048 struct net_device *prev_dev = NULL; 3049 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3050 int needed_headroom; 3051 3052 /* 3053 * If cooked monitor has been processed already, then 3054 * don't do it again. If not, set the flag. 3055 */ 3056 if (rx->flags & IEEE80211_RX_CMNTR) 3057 goto out_free_skb; 3058 rx->flags |= IEEE80211_RX_CMNTR; 3059 3060 /* If there are no cooked monitor interfaces, just free the SKB */ 3061 if (!local->cooked_mntrs) 3062 goto out_free_skb; 3063 3064 /* vendor data is long removed here */ 3065 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA; 3066 /* room for the radiotap header based on driver features */ 3067 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb); 3068 3069 if (skb_headroom(skb) < needed_headroom && 3070 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) 3071 goto out_free_skb; 3072 3073 /* prepend radiotap information */ 3074 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, 3075 false); 3076 3077 skb_set_mac_header(skb, 0); 3078 skb->ip_summed = CHECKSUM_UNNECESSARY; 3079 skb->pkt_type = PACKET_OTHERHOST; 3080 skb->protocol = htons(ETH_P_802_2); 3081 3082 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3083 if (!ieee80211_sdata_running(sdata)) 3084 continue; 3085 3086 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 3087 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) 3088 continue; 3089 3090 if (prev_dev) { 3091 skb2 = skb_clone(skb, GFP_ATOMIC); 3092 if (skb2) { 3093 skb2->dev = prev_dev; 3094 netif_receive_skb(skb2); 3095 } 3096 } 3097 3098 prev_dev = sdata->dev; 3099 ieee80211_rx_stats(sdata->dev, skb->len); 3100 } 3101 3102 if (prev_dev) { 3103 skb->dev = prev_dev; 3104 netif_receive_skb(skb); 3105 return; 3106 } 3107 3108 out_free_skb: 3109 dev_kfree_skb(skb); 3110 } 3111 3112 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 3113 ieee80211_rx_result res) 3114 { 3115 switch (res) { 3116 case RX_DROP_MONITOR: 3117 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3118 if (rx->sta) 3119 rx->sta->rx_stats.dropped++; 3120 /* fall through */ 3121 case RX_CONTINUE: { 3122 struct ieee80211_rate *rate = NULL; 3123 struct ieee80211_supported_band *sband; 3124 struct ieee80211_rx_status *status; 3125 3126 status = IEEE80211_SKB_RXCB((rx->skb)); 3127 3128 sband = rx->local->hw.wiphy->bands[status->band]; 3129 if (!(status->flag & RX_FLAG_HT) && 3130 !(status->flag & RX_FLAG_VHT)) 3131 rate = &sband->bitrates[status->rate_idx]; 3132 3133 ieee80211_rx_cooked_monitor(rx, rate); 3134 break; 3135 } 3136 case RX_DROP_UNUSABLE: 3137 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3138 if (rx->sta) 3139 rx->sta->rx_stats.dropped++; 3140 dev_kfree_skb(rx->skb); 3141 break; 3142 case RX_QUEUED: 3143 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 3144 break; 3145 } 3146 } 3147 3148 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, 3149 struct sk_buff_head *frames) 3150 { 3151 ieee80211_rx_result res = RX_DROP_MONITOR; 3152 struct sk_buff *skb; 3153 3154 #define CALL_RXH(rxh) \ 3155 do { \ 3156 res = rxh(rx); \ 3157 if (res != RX_CONTINUE) \ 3158 goto rxh_next; \ 3159 } while (0); 3160 3161 /* Lock here to avoid hitting all of the data used in the RX 3162 * path (e.g. key data, station data, ...) concurrently when 3163 * a frame is released from the reorder buffer due to timeout 3164 * from the timer, potentially concurrently with RX from the 3165 * driver. 3166 */ 3167 spin_lock_bh(&rx->local->rx_path_lock); 3168 3169 while ((skb = __skb_dequeue(frames))) { 3170 /* 3171 * all the other fields are valid across frames 3172 * that belong to an aMPDU since they are on the 3173 * same TID from the same station 3174 */ 3175 rx->skb = skb; 3176 3177 CALL_RXH(ieee80211_rx_h_check_more_data) 3178 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll) 3179 CALL_RXH(ieee80211_rx_h_sta_process) 3180 CALL_RXH(ieee80211_rx_h_decrypt) 3181 CALL_RXH(ieee80211_rx_h_defragment) 3182 CALL_RXH(ieee80211_rx_h_michael_mic_verify) 3183 /* must be after MMIC verify so header is counted in MPDU mic */ 3184 #ifdef CONFIG_MAC80211_MESH 3185 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 3186 CALL_RXH(ieee80211_rx_h_mesh_fwding); 3187 #endif 3188 CALL_RXH(ieee80211_rx_h_amsdu) 3189 CALL_RXH(ieee80211_rx_h_data) 3190 3191 /* special treatment -- needs the queue */ 3192 res = ieee80211_rx_h_ctrl(rx, frames); 3193 if (res != RX_CONTINUE) 3194 goto rxh_next; 3195 3196 CALL_RXH(ieee80211_rx_h_mgmt_check) 3197 CALL_RXH(ieee80211_rx_h_action) 3198 CALL_RXH(ieee80211_rx_h_userspace_mgmt) 3199 CALL_RXH(ieee80211_rx_h_action_return) 3200 CALL_RXH(ieee80211_rx_h_mgmt) 3201 3202 rxh_next: 3203 ieee80211_rx_handlers_result(rx, res); 3204 3205 #undef CALL_RXH 3206 } 3207 3208 spin_unlock_bh(&rx->local->rx_path_lock); 3209 } 3210 3211 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 3212 { 3213 struct sk_buff_head reorder_release; 3214 ieee80211_rx_result res = RX_DROP_MONITOR; 3215 3216 __skb_queue_head_init(&reorder_release); 3217 3218 #define CALL_RXH(rxh) \ 3219 do { \ 3220 res = rxh(rx); \ 3221 if (res != RX_CONTINUE) \ 3222 goto rxh_next; \ 3223 } while (0); 3224 3225 CALL_RXH(ieee80211_rx_h_check_dup) 3226 CALL_RXH(ieee80211_rx_h_check) 3227 3228 ieee80211_rx_reorder_ampdu(rx, &reorder_release); 3229 3230 ieee80211_rx_handlers(rx, &reorder_release); 3231 return; 3232 3233 rxh_next: 3234 ieee80211_rx_handlers_result(rx, res); 3235 3236 #undef CALL_RXH 3237 } 3238 3239 /* 3240 * This function makes calls into the RX path, therefore 3241 * it has to be invoked under RCU read lock. 3242 */ 3243 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 3244 { 3245 struct sk_buff_head frames; 3246 struct ieee80211_rx_data rx = { 3247 .sta = sta, 3248 .sdata = sta->sdata, 3249 .local = sta->local, 3250 /* This is OK -- must be QoS data frame */ 3251 .security_idx = tid, 3252 .seqno_idx = tid, 3253 .napi = NULL, /* must be NULL to not have races */ 3254 }; 3255 struct tid_ampdu_rx *tid_agg_rx; 3256 3257 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3258 if (!tid_agg_rx) 3259 return; 3260 3261 __skb_queue_head_init(&frames); 3262 3263 spin_lock(&tid_agg_rx->reorder_lock); 3264 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 3265 spin_unlock(&tid_agg_rx->reorder_lock); 3266 3267 if (!skb_queue_empty(&frames)) { 3268 struct ieee80211_event event = { 3269 .type = BA_FRAME_TIMEOUT, 3270 .u.ba.tid = tid, 3271 .u.ba.sta = &sta->sta, 3272 }; 3273 drv_event_callback(rx.local, rx.sdata, &event); 3274 } 3275 3276 ieee80211_rx_handlers(&rx, &frames); 3277 } 3278 3279 /* main receive path */ 3280 3281 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) 3282 { 3283 struct ieee80211_sub_if_data *sdata = rx->sdata; 3284 struct sk_buff *skb = rx->skb; 3285 struct ieee80211_hdr *hdr = (void *)skb->data; 3286 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3287 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 3288 int multicast = is_multicast_ether_addr(hdr->addr1); 3289 3290 switch (sdata->vif.type) { 3291 case NL80211_IFTYPE_STATION: 3292 if (!bssid && !sdata->u.mgd.use_4addr) 3293 return false; 3294 if (multicast) 3295 return true; 3296 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3297 case NL80211_IFTYPE_ADHOC: 3298 if (!bssid) 3299 return false; 3300 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || 3301 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) 3302 return false; 3303 if (ieee80211_is_beacon(hdr->frame_control)) 3304 return true; 3305 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) 3306 return false; 3307 if (!multicast && 3308 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 3309 return false; 3310 if (!rx->sta) { 3311 int rate_idx; 3312 if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) 3313 rate_idx = 0; /* TODO: HT/VHT rates */ 3314 else 3315 rate_idx = status->rate_idx; 3316 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 3317 BIT(rate_idx)); 3318 } 3319 return true; 3320 case NL80211_IFTYPE_OCB: 3321 if (!bssid) 3322 return false; 3323 if (!ieee80211_is_data_present(hdr->frame_control)) 3324 return false; 3325 if (!is_broadcast_ether_addr(bssid)) 3326 return false; 3327 if (!multicast && 3328 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) 3329 return false; 3330 if (!rx->sta) { 3331 int rate_idx; 3332 if (status->flag & RX_FLAG_HT) 3333 rate_idx = 0; /* TODO: HT rates */ 3334 else 3335 rate_idx = status->rate_idx; 3336 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, 3337 BIT(rate_idx)); 3338 } 3339 return true; 3340 case NL80211_IFTYPE_MESH_POINT: 3341 if (multicast) 3342 return true; 3343 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3344 case NL80211_IFTYPE_AP_VLAN: 3345 case NL80211_IFTYPE_AP: 3346 if (!bssid) 3347 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3348 3349 if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { 3350 /* 3351 * Accept public action frames even when the 3352 * BSSID doesn't match, this is used for P2P 3353 * and location updates. Note that mac80211 3354 * itself never looks at these frames. 3355 */ 3356 if (!multicast && 3357 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 3358 return false; 3359 if (ieee80211_is_public_action(hdr, skb->len)) 3360 return true; 3361 return ieee80211_is_beacon(hdr->frame_control); 3362 } 3363 3364 if (!ieee80211_has_tods(hdr->frame_control)) { 3365 /* ignore data frames to TDLS-peers */ 3366 if (ieee80211_is_data(hdr->frame_control)) 3367 return false; 3368 /* ignore action frames to TDLS-peers */ 3369 if (ieee80211_is_action(hdr->frame_control) && 3370 !ether_addr_equal(bssid, hdr->addr1)) 3371 return false; 3372 } 3373 return true; 3374 case NL80211_IFTYPE_WDS: 3375 if (bssid || !ieee80211_is_data(hdr->frame_control)) 3376 return false; 3377 return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2); 3378 case NL80211_IFTYPE_P2P_DEVICE: 3379 return ieee80211_is_public_action(hdr, skb->len) || 3380 ieee80211_is_probe_req(hdr->frame_control) || 3381 ieee80211_is_probe_resp(hdr->frame_control) || 3382 ieee80211_is_beacon(hdr->frame_control); 3383 default: 3384 break; 3385 } 3386 3387 WARN_ON_ONCE(1); 3388 return false; 3389 } 3390 3391 /* 3392 * This function returns whether or not the SKB 3393 * was destined for RX processing or not, which, 3394 * if consume is true, is equivalent to whether 3395 * or not the skb was consumed. 3396 */ 3397 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 3398 struct sk_buff *skb, bool consume) 3399 { 3400 struct ieee80211_local *local = rx->local; 3401 struct ieee80211_sub_if_data *sdata = rx->sdata; 3402 3403 rx->skb = skb; 3404 3405 if (!ieee80211_accept_frame(rx)) 3406 return false; 3407 3408 if (!consume) { 3409 skb = skb_copy(skb, GFP_ATOMIC); 3410 if (!skb) { 3411 if (net_ratelimit()) 3412 wiphy_debug(local->hw.wiphy, 3413 "failed to copy skb for %s\n", 3414 sdata->name); 3415 return true; 3416 } 3417 3418 rx->skb = skb; 3419 } 3420 3421 ieee80211_invoke_rx_handlers(rx); 3422 return true; 3423 } 3424 3425 /* 3426 * This is the actual Rx frames handler. as it belongs to Rx path it must 3427 * be called with rcu_read_lock protection. 3428 */ 3429 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 3430 struct sk_buff *skb, 3431 struct napi_struct *napi) 3432 { 3433 struct ieee80211_local *local = hw_to_local(hw); 3434 struct ieee80211_sub_if_data *sdata; 3435 struct ieee80211_hdr *hdr; 3436 __le16 fc; 3437 struct ieee80211_rx_data rx; 3438 struct ieee80211_sub_if_data *prev; 3439 struct sta_info *sta, *prev_sta; 3440 struct rhash_head *tmp; 3441 int err = 0; 3442 3443 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 3444 memset(&rx, 0, sizeof(rx)); 3445 rx.skb = skb; 3446 rx.local = local; 3447 rx.napi = napi; 3448 3449 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 3450 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 3451 3452 if (ieee80211_is_mgmt(fc)) { 3453 /* drop frame if too short for header */ 3454 if (skb->len < ieee80211_hdrlen(fc)) 3455 err = -ENOBUFS; 3456 else 3457 err = skb_linearize(skb); 3458 } else { 3459 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 3460 } 3461 3462 if (err) { 3463 dev_kfree_skb(skb); 3464 return; 3465 } 3466 3467 hdr = (struct ieee80211_hdr *)skb->data; 3468 ieee80211_parse_qos(&rx); 3469 ieee80211_verify_alignment(&rx); 3470 3471 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || 3472 ieee80211_is_beacon(hdr->frame_control))) 3473 ieee80211_scan_rx(local, skb); 3474 3475 if (ieee80211_is_data(fc)) { 3476 const struct bucket_table *tbl; 3477 3478 prev_sta = NULL; 3479 3480 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); 3481 3482 for_each_sta_info(local, tbl, hdr->addr2, sta, tmp) { 3483 if (!prev_sta) { 3484 prev_sta = sta; 3485 continue; 3486 } 3487 3488 rx.sta = prev_sta; 3489 rx.sdata = prev_sta->sdata; 3490 ieee80211_prepare_and_rx_handle(&rx, skb, false); 3491 3492 prev_sta = sta; 3493 } 3494 3495 if (prev_sta) { 3496 rx.sta = prev_sta; 3497 rx.sdata = prev_sta->sdata; 3498 3499 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 3500 return; 3501 goto out; 3502 } 3503 } 3504 3505 prev = NULL; 3506 3507 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3508 if (!ieee80211_sdata_running(sdata)) 3509 continue; 3510 3511 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 3512 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 3513 continue; 3514 3515 /* 3516 * frame is destined for this interface, but if it's 3517 * not also for the previous one we handle that after 3518 * the loop to avoid copying the SKB once too much 3519 */ 3520 3521 if (!prev) { 3522 prev = sdata; 3523 continue; 3524 } 3525 3526 rx.sta = sta_info_get_bss(prev, hdr->addr2); 3527 rx.sdata = prev; 3528 ieee80211_prepare_and_rx_handle(&rx, skb, false); 3529 3530 prev = sdata; 3531 } 3532 3533 if (prev) { 3534 rx.sta = sta_info_get_bss(prev, hdr->addr2); 3535 rx.sdata = prev; 3536 3537 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 3538 return; 3539 } 3540 3541 out: 3542 dev_kfree_skb(skb); 3543 } 3544 3545 /* 3546 * This is the receive path handler. It is called by a low level driver when an 3547 * 802.11 MPDU is received from the hardware. 3548 */ 3549 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb, 3550 struct napi_struct *napi) 3551 { 3552 struct ieee80211_local *local = hw_to_local(hw); 3553 struct ieee80211_rate *rate = NULL; 3554 struct ieee80211_supported_band *sband; 3555 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3556 3557 WARN_ON_ONCE(softirq_count() == 0); 3558 3559 if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) 3560 goto drop; 3561 3562 sband = local->hw.wiphy->bands[status->band]; 3563 if (WARN_ON(!sband)) 3564 goto drop; 3565 3566 /* 3567 * If we're suspending, it is possible although not too likely 3568 * that we'd be receiving frames after having already partially 3569 * quiesced the stack. We can't process such frames then since 3570 * that might, for example, cause stations to be added or other 3571 * driver callbacks be invoked. 3572 */ 3573 if (unlikely(local->quiescing || local->suspended)) 3574 goto drop; 3575 3576 /* We might be during a HW reconfig, prevent Rx for the same reason */ 3577 if (unlikely(local->in_reconfig)) 3578 goto drop; 3579 3580 /* 3581 * The same happens when we're not even started, 3582 * but that's worth a warning. 3583 */ 3584 if (WARN_ON(!local->started)) 3585 goto drop; 3586 3587 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { 3588 /* 3589 * Validate the rate, unless a PLCP error means that 3590 * we probably can't have a valid rate here anyway. 3591 */ 3592 3593 if (status->flag & RX_FLAG_HT) { 3594 /* 3595 * rate_idx is MCS index, which can be [0-76] 3596 * as documented on: 3597 * 3598 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n 3599 * 3600 * Anything else would be some sort of driver or 3601 * hardware error. The driver should catch hardware 3602 * errors. 3603 */ 3604 if (WARN(status->rate_idx > 76, 3605 "Rate marked as an HT rate but passed " 3606 "status->rate_idx is not " 3607 "an MCS index [0-76]: %d (0x%02x)\n", 3608 status->rate_idx, 3609 status->rate_idx)) 3610 goto drop; 3611 } else if (status->flag & RX_FLAG_VHT) { 3612 if (WARN_ONCE(status->rate_idx > 9 || 3613 !status->vht_nss || 3614 status->vht_nss > 8, 3615 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", 3616 status->rate_idx, status->vht_nss)) 3617 goto drop; 3618 } else { 3619 if (WARN_ON(status->rate_idx >= sband->n_bitrates)) 3620 goto drop; 3621 rate = &sband->bitrates[status->rate_idx]; 3622 } 3623 } 3624 3625 status->rx_flags = 0; 3626 3627 /* 3628 * key references and virtual interfaces are protected using RCU 3629 * and this requires that we are in a read-side RCU section during 3630 * receive processing 3631 */ 3632 rcu_read_lock(); 3633 3634 /* 3635 * Frames with failed FCS/PLCP checksum are not returned, 3636 * all other frames are returned without radiotap header 3637 * if it was previously present. 3638 * Also, frames with less than 16 bytes are dropped. 3639 */ 3640 skb = ieee80211_rx_monitor(local, skb, rate); 3641 if (!skb) { 3642 rcu_read_unlock(); 3643 return; 3644 } 3645 3646 ieee80211_tpt_led_trig_rx(local, 3647 ((struct ieee80211_hdr *)skb->data)->frame_control, 3648 skb->len); 3649 __ieee80211_rx_handle_packet(hw, skb, napi); 3650 3651 rcu_read_unlock(); 3652 3653 return; 3654 drop: 3655 kfree_skb(skb); 3656 } 3657 EXPORT_SYMBOL(ieee80211_rx_napi); 3658 3659 /* This is a version of the rx handler that can be called from hard irq 3660 * context. Post the skb on the queue and schedule the tasklet */ 3661 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 3662 { 3663 struct ieee80211_local *local = hw_to_local(hw); 3664 3665 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 3666 3667 skb->pkt_type = IEEE80211_RX_MSG; 3668 skb_queue_tail(&local->skb_queue, skb); 3669 tasklet_schedule(&local->tasklet); 3670 } 3671 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 3672