1 /* 2 * Copyright 2002-2005, Instant802 Networks, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 6 * Copyright 2013-2014 Intel Mobile Communications GmbH 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/jiffies.h> 14 #include <linux/slab.h> 15 #include <linux/kernel.h> 16 #include <linux/skbuff.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/rcupdate.h> 20 #include <linux/export.h> 21 #include <net/mac80211.h> 22 #include <net/ieee80211_radiotap.h> 23 #include <asm/unaligned.h> 24 25 #include "ieee80211_i.h" 26 #include "driver-ops.h" 27 #include "led.h" 28 #include "mesh.h" 29 #include "wep.h" 30 #include "wpa.h" 31 #include "tkip.h" 32 #include "wme.h" 33 #include "rate.h" 34 35 static inline void ieee80211_rx_stats(struct net_device *dev, u32 len) 36 { 37 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 38 39 u64_stats_update_begin(&tstats->syncp); 40 tstats->rx_packets++; 41 tstats->rx_bytes += len; 42 u64_stats_update_end(&tstats->syncp); 43 } 44 45 /* 46 * monitor mode reception 47 * 48 * This function cleans up the SKB, i.e. it removes all the stuff 49 * only useful for monitoring. 50 */ 51 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, 52 struct sk_buff *skb, 53 unsigned int rtap_vendor_space) 54 { 55 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { 56 if (likely(skb->len > FCS_LEN)) 57 __pskb_trim(skb, skb->len - FCS_LEN); 58 else { 59 /* driver bug */ 60 WARN_ON(1); 61 dev_kfree_skb(skb); 62 return NULL; 63 } 64 } 65 66 __pskb_pull(skb, rtap_vendor_space); 67 68 return skb; 69 } 70 71 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, 72 unsigned int rtap_vendor_space) 73 { 74 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 75 struct ieee80211_hdr *hdr; 76 77 hdr = (void *)(skb->data + rtap_vendor_space); 78 79 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 80 RX_FLAG_FAILED_PLCP_CRC | 81 RX_FLAG_AMPDU_IS_ZEROLEN)) 82 return true; 83 84 if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space)) 85 return true; 86 87 if (ieee80211_is_ctl(hdr->frame_control) && 88 !ieee80211_is_pspoll(hdr->frame_control) && 89 !ieee80211_is_back_req(hdr->frame_control)) 90 return true; 91 92 return false; 93 } 94 95 static int 96 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, 97 struct ieee80211_rx_status *status, 98 struct sk_buff *skb) 99 { 100 int len; 101 102 /* always present fields */ 103 len = sizeof(struct ieee80211_radiotap_header) + 8; 104 105 /* allocate extra bitmaps */ 106 if (status->chains) 107 len += 4 * hweight8(status->chains); 108 109 if (ieee80211_have_rx_timestamp(status)) { 110 len = ALIGN(len, 8); 111 len += 8; 112 } 113 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) 114 len += 1; 115 116 /* antenna field, if we don't have per-chain info */ 117 if (!status->chains) 118 len += 1; 119 120 /* padding for RX_FLAGS if necessary */ 121 len = ALIGN(len, 2); 122 123 if (status->flag & RX_FLAG_HT) /* HT info */ 124 len += 3; 125 126 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 127 len = ALIGN(len, 4); 128 len += 8; 129 } 130 131 if (status->flag & RX_FLAG_VHT) { 132 len = ALIGN(len, 2); 133 len += 12; 134 } 135 136 if (status->chains) { 137 /* antenna and antenna signal fields */ 138 len += 2 * hweight8(status->chains); 139 } 140 141 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 142 struct ieee80211_vendor_radiotap *rtap = (void *)skb->data; 143 144 /* vendor presence bitmap */ 145 len += 4; 146 /* alignment for fixed 6-byte vendor data header */ 147 len = ALIGN(len, 2); 148 /* vendor data header */ 149 len += 6; 150 if (WARN_ON(rtap->align == 0)) 151 rtap->align = 1; 152 len = ALIGN(len, rtap->align); 153 len += rtap->len + rtap->pad; 154 } 155 156 return len; 157 } 158 159 /* 160 * ieee80211_add_rx_radiotap_header - add radiotap header 161 * 162 * add a radiotap header containing all the fields which the hardware provided. 163 */ 164 static void 165 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 166 struct sk_buff *skb, 167 struct ieee80211_rate *rate, 168 int rtap_len, bool has_fcs) 169 { 170 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 171 struct ieee80211_radiotap_header *rthdr; 172 unsigned char *pos; 173 __le32 *it_present; 174 u32 it_present_val; 175 u16 rx_flags = 0; 176 u16 channel_flags = 0; 177 int mpdulen, chain; 178 unsigned long chains = status->chains; 179 struct ieee80211_vendor_radiotap rtap = {}; 180 181 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 182 rtap = *(struct ieee80211_vendor_radiotap *)skb->data; 183 /* rtap.len and rtap.pad are undone immediately */ 184 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad); 185 } 186 187 mpdulen = skb->len; 188 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))) 189 mpdulen += FCS_LEN; 190 191 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); 192 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad); 193 it_present = &rthdr->it_present; 194 195 /* radiotap header, set always present flags */ 196 rthdr->it_len = cpu_to_le16(rtap_len); 197 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | 198 BIT(IEEE80211_RADIOTAP_CHANNEL) | 199 BIT(IEEE80211_RADIOTAP_RX_FLAGS); 200 201 if (!status->chains) 202 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); 203 204 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 205 it_present_val |= 206 BIT(IEEE80211_RADIOTAP_EXT) | 207 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); 208 put_unaligned_le32(it_present_val, it_present); 209 it_present++; 210 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | 211 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 212 } 213 214 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 215 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) | 216 BIT(IEEE80211_RADIOTAP_EXT); 217 put_unaligned_le32(it_present_val, it_present); 218 it_present++; 219 it_present_val = rtap.present; 220 } 221 222 put_unaligned_le32(it_present_val, it_present); 223 224 pos = (void *)(it_present + 1); 225 226 /* the order of the following fields is important */ 227 228 /* IEEE80211_RADIOTAP_TSFT */ 229 if (ieee80211_have_rx_timestamp(status)) { 230 /* padding */ 231 while ((pos - (u8 *)rthdr) & 7) 232 *pos++ = 0; 233 put_unaligned_le64( 234 ieee80211_calculate_rx_timestamp(local, status, 235 mpdulen, 0), 236 pos); 237 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 238 pos += 8; 239 } 240 241 /* IEEE80211_RADIOTAP_FLAGS */ 242 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) 243 *pos |= IEEE80211_RADIOTAP_F_FCS; 244 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 245 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 246 if (status->flag & RX_FLAG_SHORTPRE) 247 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 248 pos++; 249 250 /* IEEE80211_RADIOTAP_RATE */ 251 if (!rate || status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) { 252 /* 253 * Without rate information don't add it. If we have, 254 * MCS information is a separate field in radiotap, 255 * added below. The byte here is needed as padding 256 * for the channel though, so initialise it to 0. 257 */ 258 *pos = 0; 259 } else { 260 int shift = 0; 261 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 262 if (status->flag & RX_FLAG_10MHZ) 263 shift = 1; 264 else if (status->flag & RX_FLAG_5MHZ) 265 shift = 2; 266 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); 267 } 268 pos++; 269 270 /* IEEE80211_RADIOTAP_CHANNEL */ 271 put_unaligned_le16(status->freq, pos); 272 pos += 2; 273 if (status->flag & RX_FLAG_10MHZ) 274 channel_flags |= IEEE80211_CHAN_HALF; 275 else if (status->flag & RX_FLAG_5MHZ) 276 channel_flags |= IEEE80211_CHAN_QUARTER; 277 278 if (status->band == IEEE80211_BAND_5GHZ) 279 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; 280 else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) 281 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 282 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 283 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 284 else if (rate) 285 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; 286 else 287 channel_flags |= IEEE80211_CHAN_2GHZ; 288 put_unaligned_le16(channel_flags, pos); 289 pos += 2; 290 291 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 292 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) && 293 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 294 *pos = status->signal; 295 rthdr->it_present |= 296 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 297 pos++; 298 } 299 300 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 301 302 if (!status->chains) { 303 /* IEEE80211_RADIOTAP_ANTENNA */ 304 *pos = status->antenna; 305 pos++; 306 } 307 308 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 309 310 /* IEEE80211_RADIOTAP_RX_FLAGS */ 311 /* ensure 2 byte alignment for the 2 byte field as required */ 312 if ((pos - (u8 *)rthdr) & 1) 313 *pos++ = 0; 314 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 315 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 316 put_unaligned_le16(rx_flags, pos); 317 pos += 2; 318 319 if (status->flag & RX_FLAG_HT) { 320 unsigned int stbc; 321 322 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 323 *pos++ = local->hw.radiotap_mcs_details; 324 *pos = 0; 325 if (status->flag & RX_FLAG_SHORT_GI) 326 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 327 if (status->flag & RX_FLAG_40MHZ) 328 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 329 if (status->flag & RX_FLAG_HT_GF) 330 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; 331 if (status->flag & RX_FLAG_LDPC) 332 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; 333 stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT; 334 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; 335 pos++; 336 *pos++ = status->rate_idx; 337 } 338 339 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 340 u16 flags = 0; 341 342 /* ensure 4 byte alignment */ 343 while ((pos - (u8 *)rthdr) & 3) 344 pos++; 345 rthdr->it_present |= 346 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); 347 put_unaligned_le32(status->ampdu_reference, pos); 348 pos += 4; 349 if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN) 350 flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN; 351 if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN) 352 flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN; 353 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) 354 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; 355 if (status->flag & RX_FLAG_AMPDU_IS_LAST) 356 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; 357 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) 358 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; 359 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 360 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN; 361 put_unaligned_le16(flags, pos); 362 pos += 2; 363 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 364 *pos++ = status->ampdu_delimiter_crc; 365 else 366 *pos++ = 0; 367 *pos++ = 0; 368 } 369 370 if (status->flag & RX_FLAG_VHT) { 371 u16 known = local->hw.radiotap_vht_details; 372 373 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); 374 put_unaligned_le16(known, pos); 375 pos += 2; 376 /* flags */ 377 if (status->flag & RX_FLAG_SHORT_GI) 378 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; 379 /* in VHT, STBC is binary */ 380 if (status->flag & RX_FLAG_STBC_MASK) 381 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; 382 if (status->vht_flag & RX_VHT_FLAG_BF) 383 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; 384 pos++; 385 /* bandwidth */ 386 if (status->vht_flag & RX_VHT_FLAG_80MHZ) 387 *pos++ = 4; 388 else if (status->vht_flag & RX_VHT_FLAG_160MHZ) 389 *pos++ = 11; 390 else if (status->flag & RX_FLAG_40MHZ) 391 *pos++ = 1; 392 else /* 20 MHz */ 393 *pos++ = 0; 394 /* MCS/NSS */ 395 *pos = (status->rate_idx << 4) | status->vht_nss; 396 pos += 4; 397 /* coding field */ 398 if (status->flag & RX_FLAG_LDPC) 399 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; 400 pos++; 401 /* group ID */ 402 pos++; 403 /* partial_aid */ 404 pos += 2; 405 } 406 407 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 408 *pos++ = status->chain_signal[chain]; 409 *pos++ = chain; 410 } 411 412 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 413 /* ensure 2 byte alignment for the vendor field as required */ 414 if ((pos - (u8 *)rthdr) & 1) 415 *pos++ = 0; 416 *pos++ = rtap.oui[0]; 417 *pos++ = rtap.oui[1]; 418 *pos++ = rtap.oui[2]; 419 *pos++ = rtap.subns; 420 put_unaligned_le16(rtap.len, pos); 421 pos += 2; 422 /* align the actual payload as requested */ 423 while ((pos - (u8 *)rthdr) & (rtap.align - 1)) 424 *pos++ = 0; 425 /* data (and possible padding) already follows */ 426 } 427 } 428 429 /* 430 * This function copies a received frame to all monitor interfaces and 431 * returns a cleaned-up SKB that no longer includes the FCS nor the 432 * radiotap header the driver might have added. 433 */ 434 static struct sk_buff * 435 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 436 struct ieee80211_rate *rate) 437 { 438 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 439 struct ieee80211_sub_if_data *sdata; 440 int rt_hdrlen, needed_headroom; 441 struct sk_buff *skb, *skb2; 442 struct net_device *prev_dev = NULL; 443 int present_fcs_len = 0; 444 unsigned int rtap_vendor_space = 0; 445 446 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { 447 struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data; 448 449 rtap_vendor_space = sizeof(*rtap) + rtap->len + rtap->pad; 450 } 451 452 /* 453 * First, we may need to make a copy of the skb because 454 * (1) we need to modify it for radiotap (if not present), and 455 * (2) the other RX handlers will modify the skb we got. 456 * 457 * We don't need to, of course, if we aren't going to return 458 * the SKB because it has a bad FCS/PLCP checksum. 459 */ 460 461 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) 462 present_fcs_len = FCS_LEN; 463 464 /* ensure hdr->frame_control and vendor radiotap data are in skb head */ 465 if (!pskb_may_pull(origskb, 2 + rtap_vendor_space)) { 466 dev_kfree_skb(origskb); 467 return NULL; 468 } 469 470 if (!local->monitors) { 471 if (should_drop_frame(origskb, present_fcs_len, 472 rtap_vendor_space)) { 473 dev_kfree_skb(origskb); 474 return NULL; 475 } 476 477 return remove_monitor_info(local, origskb, rtap_vendor_space); 478 } 479 480 /* room for the radiotap header based on driver features */ 481 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb); 482 needed_headroom = rt_hdrlen - rtap_vendor_space; 483 484 if (should_drop_frame(origskb, present_fcs_len, rtap_vendor_space)) { 485 /* only need to expand headroom if necessary */ 486 skb = origskb; 487 origskb = NULL; 488 489 /* 490 * This shouldn't trigger often because most devices have an 491 * RX header they pull before we get here, and that should 492 * be big enough for our radiotap information. We should 493 * probably export the length to drivers so that we can have 494 * them allocate enough headroom to start with. 495 */ 496 if (skb_headroom(skb) < needed_headroom && 497 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 498 dev_kfree_skb(skb); 499 return NULL; 500 } 501 } else { 502 /* 503 * Need to make a copy and possibly remove radiotap header 504 * and FCS from the original. 505 */ 506 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); 507 508 origskb = remove_monitor_info(local, origskb, 509 rtap_vendor_space); 510 511 if (!skb) 512 return origskb; 513 } 514 515 /* prepend radiotap information */ 516 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); 517 518 skb_reset_mac_header(skb); 519 skb->ip_summed = CHECKSUM_UNNECESSARY; 520 skb->pkt_type = PACKET_OTHERHOST; 521 skb->protocol = htons(ETH_P_802_2); 522 523 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 524 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 525 continue; 526 527 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) 528 continue; 529 530 if (!ieee80211_sdata_running(sdata)) 531 continue; 532 533 if (prev_dev) { 534 skb2 = skb_clone(skb, GFP_ATOMIC); 535 if (skb2) { 536 skb2->dev = prev_dev; 537 netif_receive_skb(skb2); 538 } 539 } 540 541 prev_dev = sdata->dev; 542 ieee80211_rx_stats(sdata->dev, skb->len); 543 } 544 545 if (prev_dev) { 546 skb->dev = prev_dev; 547 netif_receive_skb(skb); 548 } else 549 dev_kfree_skb(skb); 550 551 return origskb; 552 } 553 554 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 555 { 556 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 557 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 558 int tid, seqno_idx, security_idx; 559 560 /* does the frame have a qos control field? */ 561 if (ieee80211_is_data_qos(hdr->frame_control)) { 562 u8 *qc = ieee80211_get_qos_ctl(hdr); 563 /* frame has qos control */ 564 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 565 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 566 status->rx_flags |= IEEE80211_RX_AMSDU; 567 568 seqno_idx = tid; 569 security_idx = tid; 570 } else { 571 /* 572 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 573 * 574 * Sequence numbers for management frames, QoS data 575 * frames with a broadcast/multicast address in the 576 * Address 1 field, and all non-QoS data frames sent 577 * by QoS STAs are assigned using an additional single 578 * modulo-4096 counter, [...] 579 * 580 * We also use that counter for non-QoS STAs. 581 */ 582 seqno_idx = IEEE80211_NUM_TIDS; 583 security_idx = 0; 584 if (ieee80211_is_mgmt(hdr->frame_control)) 585 security_idx = IEEE80211_NUM_TIDS; 586 tid = 0; 587 } 588 589 rx->seqno_idx = seqno_idx; 590 rx->security_idx = security_idx; 591 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 592 * For now, set skb->priority to 0 for other cases. */ 593 rx->skb->priority = (tid > 7) ? 0 : tid; 594 } 595 596 /** 597 * DOC: Packet alignment 598 * 599 * Drivers always need to pass packets that are aligned to two-byte boundaries 600 * to the stack. 601 * 602 * Additionally, should, if possible, align the payload data in a way that 603 * guarantees that the contained IP header is aligned to a four-byte 604 * boundary. In the case of regular frames, this simply means aligning the 605 * payload to a four-byte boundary (because either the IP header is directly 606 * contained, or IV/RFC1042 headers that have a length divisible by four are 607 * in front of it). If the payload data is not properly aligned and the 608 * architecture doesn't support efficient unaligned operations, mac80211 609 * will align the data. 610 * 611 * With A-MSDU frames, however, the payload data address must yield two modulo 612 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 613 * push the IP header further back to a multiple of four again. Thankfully, the 614 * specs were sane enough this time around to require padding each A-MSDU 615 * subframe to a length that is a multiple of four. 616 * 617 * Padding like Atheros hardware adds which is between the 802.11 header and 618 * the payload is not supported, the driver is required to move the 802.11 619 * header to be directly in front of the payload in that case. 620 */ 621 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 622 { 623 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 624 WARN_ONCE((unsigned long)rx->skb->data & 1, 625 "unaligned packet at 0x%p\n", rx->skb->data); 626 #endif 627 } 628 629 630 /* rx handlers */ 631 632 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 633 { 634 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 635 636 if (is_multicast_ether_addr(hdr->addr1)) 637 return 0; 638 639 return ieee80211_is_robust_mgmt_frame(skb); 640 } 641 642 643 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 644 { 645 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 646 647 if (!is_multicast_ether_addr(hdr->addr1)) 648 return 0; 649 650 return ieee80211_is_robust_mgmt_frame(skb); 651 } 652 653 654 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 655 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 656 { 657 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 658 struct ieee80211_mmie *mmie; 659 struct ieee80211_mmie_16 *mmie16; 660 661 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) 662 return -1; 663 664 if (!ieee80211_is_robust_mgmt_frame(skb)) 665 return -1; /* not a robust management frame */ 666 667 mmie = (struct ieee80211_mmie *) 668 (skb->data + skb->len - sizeof(*mmie)); 669 if (mmie->element_id == WLAN_EID_MMIE && 670 mmie->length == sizeof(*mmie) - 2) 671 return le16_to_cpu(mmie->key_id); 672 673 mmie16 = (struct ieee80211_mmie_16 *) 674 (skb->data + skb->len - sizeof(*mmie16)); 675 if (skb->len >= 24 + sizeof(*mmie16) && 676 mmie16->element_id == WLAN_EID_MMIE && 677 mmie16->length == sizeof(*mmie16) - 2) 678 return le16_to_cpu(mmie16->key_id); 679 680 return -1; 681 } 682 683 static int iwl80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs, 684 struct sk_buff *skb) 685 { 686 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 687 __le16 fc; 688 int hdrlen; 689 u8 keyid; 690 691 fc = hdr->frame_control; 692 hdrlen = ieee80211_hdrlen(fc); 693 694 if (skb->len < hdrlen + cs->hdr_len) 695 return -EINVAL; 696 697 skb_copy_bits(skb, hdrlen + cs->key_idx_off, &keyid, 1); 698 keyid &= cs->key_idx_mask; 699 keyid >>= cs->key_idx_shift; 700 701 return keyid; 702 } 703 704 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 705 { 706 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 707 char *dev_addr = rx->sdata->vif.addr; 708 709 if (ieee80211_is_data(hdr->frame_control)) { 710 if (is_multicast_ether_addr(hdr->addr1)) { 711 if (ieee80211_has_tods(hdr->frame_control) || 712 !ieee80211_has_fromds(hdr->frame_control)) 713 return RX_DROP_MONITOR; 714 if (ether_addr_equal(hdr->addr3, dev_addr)) 715 return RX_DROP_MONITOR; 716 } else { 717 if (!ieee80211_has_a4(hdr->frame_control)) 718 return RX_DROP_MONITOR; 719 if (ether_addr_equal(hdr->addr4, dev_addr)) 720 return RX_DROP_MONITOR; 721 } 722 } 723 724 /* If there is not an established peer link and this is not a peer link 725 * establisment frame, beacon or probe, drop the frame. 726 */ 727 728 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { 729 struct ieee80211_mgmt *mgmt; 730 731 if (!ieee80211_is_mgmt(hdr->frame_control)) 732 return RX_DROP_MONITOR; 733 734 if (ieee80211_is_action(hdr->frame_control)) { 735 u8 category; 736 737 /* make sure category field is present */ 738 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 739 return RX_DROP_MONITOR; 740 741 mgmt = (struct ieee80211_mgmt *)hdr; 742 category = mgmt->u.action.category; 743 if (category != WLAN_CATEGORY_MESH_ACTION && 744 category != WLAN_CATEGORY_SELF_PROTECTED) 745 return RX_DROP_MONITOR; 746 return RX_CONTINUE; 747 } 748 749 if (ieee80211_is_probe_req(hdr->frame_control) || 750 ieee80211_is_probe_resp(hdr->frame_control) || 751 ieee80211_is_beacon(hdr->frame_control) || 752 ieee80211_is_auth(hdr->frame_control)) 753 return RX_CONTINUE; 754 755 return RX_DROP_MONITOR; 756 } 757 758 return RX_CONTINUE; 759 } 760 761 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 762 struct tid_ampdu_rx *tid_agg_rx, 763 int index, 764 struct sk_buff_head *frames) 765 { 766 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; 767 struct sk_buff *skb; 768 struct ieee80211_rx_status *status; 769 770 lockdep_assert_held(&tid_agg_rx->reorder_lock); 771 772 if (skb_queue_empty(skb_list)) 773 goto no_frame; 774 775 if (!ieee80211_rx_reorder_ready(skb_list)) { 776 __skb_queue_purge(skb_list); 777 goto no_frame; 778 } 779 780 /* release frames from the reorder ring buffer */ 781 tid_agg_rx->stored_mpdu_num--; 782 while ((skb = __skb_dequeue(skb_list))) { 783 status = IEEE80211_SKB_RXCB(skb); 784 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 785 __skb_queue_tail(frames, skb); 786 } 787 788 no_frame: 789 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); 790 } 791 792 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 793 struct tid_ampdu_rx *tid_agg_rx, 794 u16 head_seq_num, 795 struct sk_buff_head *frames) 796 { 797 int index; 798 799 lockdep_assert_held(&tid_agg_rx->reorder_lock); 800 801 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { 802 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 803 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 804 frames); 805 } 806 } 807 808 /* 809 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 810 * the skb was added to the buffer longer than this time ago, the earlier 811 * frames that have not yet been received are assumed to be lost and the skb 812 * can be released for processing. This may also release other skb's from the 813 * reorder buffer if there are no additional gaps between the frames. 814 * 815 * Callers must hold tid_agg_rx->reorder_lock. 816 */ 817 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 818 819 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 820 struct tid_ampdu_rx *tid_agg_rx, 821 struct sk_buff_head *frames) 822 { 823 int index, i, j; 824 825 lockdep_assert_held(&tid_agg_rx->reorder_lock); 826 827 /* release the buffer until next missing frame */ 828 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 829 if (!ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index]) && 830 tid_agg_rx->stored_mpdu_num) { 831 /* 832 * No buffers ready to be released, but check whether any 833 * frames in the reorder buffer have timed out. 834 */ 835 int skipped = 1; 836 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 837 j = (j + 1) % tid_agg_rx->buf_size) { 838 if (!ieee80211_rx_reorder_ready( 839 &tid_agg_rx->reorder_buf[j])) { 840 skipped++; 841 continue; 842 } 843 if (skipped && 844 !time_after(jiffies, tid_agg_rx->reorder_time[j] + 845 HT_RX_REORDER_BUF_TIMEOUT)) 846 goto set_release_timer; 847 848 /* don't leave incomplete A-MSDUs around */ 849 for (i = (index + 1) % tid_agg_rx->buf_size; i != j; 850 i = (i + 1) % tid_agg_rx->buf_size) 851 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); 852 853 ht_dbg_ratelimited(sdata, 854 "release an RX reorder frame due to timeout on earlier frames\n"); 855 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, 856 frames); 857 858 /* 859 * Increment the head seq# also for the skipped slots. 860 */ 861 tid_agg_rx->head_seq_num = 862 (tid_agg_rx->head_seq_num + 863 skipped) & IEEE80211_SN_MASK; 864 skipped = 0; 865 } 866 } else while (ieee80211_rx_reorder_ready( 867 &tid_agg_rx->reorder_buf[index])) { 868 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 869 frames); 870 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 871 } 872 873 if (tid_agg_rx->stored_mpdu_num) { 874 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 875 876 for (; j != (index - 1) % tid_agg_rx->buf_size; 877 j = (j + 1) % tid_agg_rx->buf_size) { 878 if (ieee80211_rx_reorder_ready( 879 &tid_agg_rx->reorder_buf[j])) 880 break; 881 } 882 883 set_release_timer: 884 885 if (!tid_agg_rx->removed) 886 mod_timer(&tid_agg_rx->reorder_timer, 887 tid_agg_rx->reorder_time[j] + 1 + 888 HT_RX_REORDER_BUF_TIMEOUT); 889 } else { 890 del_timer(&tid_agg_rx->reorder_timer); 891 } 892 } 893 894 /* 895 * As this function belongs to the RX path it must be under 896 * rcu_read_lock protection. It returns false if the frame 897 * can be processed immediately, true if it was consumed. 898 */ 899 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 900 struct tid_ampdu_rx *tid_agg_rx, 901 struct sk_buff *skb, 902 struct sk_buff_head *frames) 903 { 904 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 905 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 906 u16 sc = le16_to_cpu(hdr->seq_ctrl); 907 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 908 u16 head_seq_num, buf_size; 909 int index; 910 bool ret = true; 911 912 spin_lock(&tid_agg_rx->reorder_lock); 913 914 /* 915 * Offloaded BA sessions have no known starting sequence number so pick 916 * one from first Rxed frame for this tid after BA was started. 917 */ 918 if (unlikely(tid_agg_rx->auto_seq)) { 919 tid_agg_rx->auto_seq = false; 920 tid_agg_rx->ssn = mpdu_seq_num; 921 tid_agg_rx->head_seq_num = mpdu_seq_num; 922 } 923 924 buf_size = tid_agg_rx->buf_size; 925 head_seq_num = tid_agg_rx->head_seq_num; 926 927 /* frame with out of date sequence number */ 928 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 929 dev_kfree_skb(skb); 930 goto out; 931 } 932 933 /* 934 * If frame the sequence number exceeds our buffering window 935 * size release some previous frames to make room for this one. 936 */ 937 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { 938 head_seq_num = ieee80211_sn_inc( 939 ieee80211_sn_sub(mpdu_seq_num, buf_size)); 940 /* release stored frames up to new head to stack */ 941 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 942 head_seq_num, frames); 943 } 944 945 /* Now the new frame is always in the range of the reordering buffer */ 946 947 index = mpdu_seq_num % tid_agg_rx->buf_size; 948 949 /* check if we already stored this frame */ 950 if (ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index])) { 951 dev_kfree_skb(skb); 952 goto out; 953 } 954 955 /* 956 * If the current MPDU is in the right order and nothing else 957 * is stored we can process it directly, no need to buffer it. 958 * If it is first but there's something stored, we may be able 959 * to release frames after this one. 960 */ 961 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 962 tid_agg_rx->stored_mpdu_num == 0) { 963 if (!(status->flag & RX_FLAG_AMSDU_MORE)) 964 tid_agg_rx->head_seq_num = 965 ieee80211_sn_inc(tid_agg_rx->head_seq_num); 966 ret = false; 967 goto out; 968 } 969 970 /* put the frame in the reordering buffer */ 971 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); 972 if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 973 tid_agg_rx->reorder_time[index] = jiffies; 974 tid_agg_rx->stored_mpdu_num++; 975 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); 976 } 977 978 out: 979 spin_unlock(&tid_agg_rx->reorder_lock); 980 return ret; 981 } 982 983 /* 984 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 985 * true if the MPDU was buffered, false if it should be processed. 986 */ 987 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, 988 struct sk_buff_head *frames) 989 { 990 struct sk_buff *skb = rx->skb; 991 struct ieee80211_local *local = rx->local; 992 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 993 struct sta_info *sta = rx->sta; 994 struct tid_ampdu_rx *tid_agg_rx; 995 u16 sc; 996 u8 tid, ack_policy; 997 998 if (!ieee80211_is_data_qos(hdr->frame_control) || 999 is_multicast_ether_addr(hdr->addr1)) 1000 goto dont_reorder; 1001 1002 /* 1003 * filter the QoS data rx stream according to 1004 * STA/TID and check if this STA/TID is on aggregation 1005 */ 1006 1007 if (!sta) 1008 goto dont_reorder; 1009 1010 ack_policy = *ieee80211_get_qos_ctl(hdr) & 1011 IEEE80211_QOS_CTL_ACK_POLICY_MASK; 1012 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 1013 1014 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 1015 if (!tid_agg_rx) 1016 goto dont_reorder; 1017 1018 /* qos null data frames are excluded */ 1019 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 1020 goto dont_reorder; 1021 1022 /* not part of a BA session */ 1023 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1024 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) 1025 goto dont_reorder; 1026 1027 /* new, potentially un-ordered, ampdu frame - process it */ 1028 1029 /* reset session timer */ 1030 if (tid_agg_rx->timeout) 1031 tid_agg_rx->last_rx = jiffies; 1032 1033 /* if this mpdu is fragmented - terminate rx aggregation session */ 1034 sc = le16_to_cpu(hdr->seq_ctrl); 1035 if (sc & IEEE80211_SCTL_FRAG) { 1036 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 1037 skb_queue_tail(&rx->sdata->skb_queue, skb); 1038 ieee80211_queue_work(&local->hw, &rx->sdata->work); 1039 return; 1040 } 1041 1042 /* 1043 * No locking needed -- we will only ever process one 1044 * RX packet at a time, and thus own tid_agg_rx. All 1045 * other code manipulating it needs to (and does) make 1046 * sure that we cannot get to it any more before doing 1047 * anything with it. 1048 */ 1049 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, 1050 frames)) 1051 return; 1052 1053 dont_reorder: 1054 __skb_queue_tail(frames, skb); 1055 } 1056 1057 static ieee80211_rx_result debug_noinline 1058 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) 1059 { 1060 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1061 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1062 1063 /* 1064 * Drop duplicate 802.11 retransmissions 1065 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") 1066 */ 1067 1068 if (rx->skb->len < 24) 1069 return RX_CONTINUE; 1070 1071 if (ieee80211_is_ctl(hdr->frame_control) || 1072 ieee80211_is_qos_nullfunc(hdr->frame_control) || 1073 is_multicast_ether_addr(hdr->addr1)) 1074 return RX_CONTINUE; 1075 1076 if (rx->sta) { 1077 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 1078 rx->sta->last_seq_ctrl[rx->seqno_idx] == 1079 hdr->seq_ctrl)) { 1080 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); 1081 rx->sta->num_duplicates++; 1082 return RX_DROP_UNUSABLE; 1083 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1084 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; 1085 } 1086 } 1087 1088 return RX_CONTINUE; 1089 } 1090 1091 static ieee80211_rx_result debug_noinline 1092 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 1093 { 1094 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1095 1096 if (unlikely(rx->skb->len < 16)) { 1097 I802_DEBUG_INC(rx->local->rx_handlers_drop_short); 1098 return RX_DROP_MONITOR; 1099 } 1100 1101 /* Drop disallowed frame classes based on STA auth/assoc state; 1102 * IEEE 802.11, Chap 5.5. 1103 * 1104 * mac80211 filters only based on association state, i.e. it drops 1105 * Class 3 frames from not associated stations. hostapd sends 1106 * deauth/disassoc frames when needed. In addition, hostapd is 1107 * responsible for filtering on both auth and assoc states. 1108 */ 1109 1110 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1111 return ieee80211_rx_mesh_check(rx); 1112 1113 if (unlikely((ieee80211_is_data(hdr->frame_control) || 1114 ieee80211_is_pspoll(hdr->frame_control)) && 1115 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 1116 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 1117 rx->sdata->vif.type != NL80211_IFTYPE_OCB && 1118 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 1119 /* 1120 * accept port control frames from the AP even when it's not 1121 * yet marked ASSOC to prevent a race where we don't set the 1122 * assoc bit quickly enough before it sends the first frame 1123 */ 1124 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1125 ieee80211_is_data_present(hdr->frame_control)) { 1126 unsigned int hdrlen; 1127 __be16 ethertype; 1128 1129 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1130 1131 if (rx->skb->len < hdrlen + 8) 1132 return RX_DROP_MONITOR; 1133 1134 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); 1135 if (ethertype == rx->sdata->control_port_protocol) 1136 return RX_CONTINUE; 1137 } 1138 1139 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 1140 cfg80211_rx_spurious_frame(rx->sdata->dev, 1141 hdr->addr2, 1142 GFP_ATOMIC)) 1143 return RX_DROP_UNUSABLE; 1144 1145 return RX_DROP_MONITOR; 1146 } 1147 1148 return RX_CONTINUE; 1149 } 1150 1151 1152 static ieee80211_rx_result debug_noinline 1153 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1154 { 1155 struct ieee80211_local *local; 1156 struct ieee80211_hdr *hdr; 1157 struct sk_buff *skb; 1158 1159 local = rx->local; 1160 skb = rx->skb; 1161 hdr = (struct ieee80211_hdr *) skb->data; 1162 1163 if (!local->pspolling) 1164 return RX_CONTINUE; 1165 1166 if (!ieee80211_has_fromds(hdr->frame_control)) 1167 /* this is not from AP */ 1168 return RX_CONTINUE; 1169 1170 if (!ieee80211_is_data(hdr->frame_control)) 1171 return RX_CONTINUE; 1172 1173 if (!ieee80211_has_moredata(hdr->frame_control)) { 1174 /* AP has no more frames buffered for us */ 1175 local->pspolling = false; 1176 return RX_CONTINUE; 1177 } 1178 1179 /* more data bit is set, let's request a new frame from the AP */ 1180 ieee80211_send_pspoll(local, rx->sdata); 1181 1182 return RX_CONTINUE; 1183 } 1184 1185 static void sta_ps_start(struct sta_info *sta) 1186 { 1187 struct ieee80211_sub_if_data *sdata = sta->sdata; 1188 struct ieee80211_local *local = sdata->local; 1189 struct ps_data *ps; 1190 int tid; 1191 1192 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1193 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1194 ps = &sdata->bss->ps; 1195 else 1196 return; 1197 1198 atomic_inc(&ps->num_sta_ps); 1199 set_sta_flag(sta, WLAN_STA_PS_STA); 1200 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1201 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1202 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", 1203 sta->sta.addr, sta->sta.aid); 1204 1205 ieee80211_clear_fast_xmit(sta); 1206 1207 if (!sta->sta.txq[0]) 1208 return; 1209 1210 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1211 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); 1212 1213 if (!skb_queue_len(&txqi->queue)) 1214 set_bit(tid, &sta->txq_buffered_tids); 1215 else 1216 clear_bit(tid, &sta->txq_buffered_tids); 1217 } 1218 } 1219 1220 static void sta_ps_end(struct sta_info *sta) 1221 { 1222 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", 1223 sta->sta.addr, sta->sta.aid); 1224 1225 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1226 /* 1227 * Clear the flag only if the other one is still set 1228 * so that the TX path won't start TX'ing new frames 1229 * directly ... In the case that the driver flag isn't 1230 * set ieee80211_sta_ps_deliver_wakeup() will clear it. 1231 */ 1232 clear_sta_flag(sta, WLAN_STA_PS_STA); 1233 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", 1234 sta->sta.addr, sta->sta.aid); 1235 return; 1236 } 1237 1238 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1239 clear_sta_flag(sta, WLAN_STA_PS_STA); 1240 ieee80211_sta_ps_deliver_wakeup(sta); 1241 } 1242 1243 int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start) 1244 { 1245 struct sta_info *sta_inf = container_of(sta, struct sta_info, sta); 1246 bool in_ps; 1247 1248 WARN_ON(!ieee80211_hw_check(&sta_inf->local->hw, AP_LINK_PS)); 1249 1250 /* Don't let the same PS state be set twice */ 1251 in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA); 1252 if ((start && in_ps) || (!start && !in_ps)) 1253 return -EINVAL; 1254 1255 if (start) 1256 sta_ps_start(sta_inf); 1257 else 1258 sta_ps_end(sta_inf); 1259 1260 return 0; 1261 } 1262 EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1263 1264 static ieee80211_rx_result debug_noinline 1265 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) 1266 { 1267 struct ieee80211_sub_if_data *sdata = rx->sdata; 1268 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 1269 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1270 int tid, ac; 1271 1272 if (!rx->sta) 1273 return RX_CONTINUE; 1274 1275 if (sdata->vif.type != NL80211_IFTYPE_AP && 1276 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 1277 return RX_CONTINUE; 1278 1279 /* 1280 * The device handles station powersave, so don't do anything about 1281 * uAPSD and PS-Poll frames (the latter shouldn't even come up from 1282 * it to mac80211 since they're handled.) 1283 */ 1284 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS)) 1285 return RX_CONTINUE; 1286 1287 /* 1288 * Don't do anything if the station isn't already asleep. In 1289 * the uAPSD case, the station will probably be marked asleep, 1290 * in the PS-Poll case the station must be confused ... 1291 */ 1292 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) 1293 return RX_CONTINUE; 1294 1295 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { 1296 if (!test_sta_flag(rx->sta, WLAN_STA_SP)) { 1297 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) 1298 ieee80211_sta_ps_deliver_poll_response(rx->sta); 1299 else 1300 set_sta_flag(rx->sta, WLAN_STA_PSPOLL); 1301 } 1302 1303 /* Free PS Poll skb here instead of returning RX_DROP that would 1304 * count as an dropped frame. */ 1305 dev_kfree_skb(rx->skb); 1306 1307 return RX_QUEUED; 1308 } else if (!ieee80211_has_morefrags(hdr->frame_control) && 1309 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1310 ieee80211_has_pm(hdr->frame_control) && 1311 (ieee80211_is_data_qos(hdr->frame_control) || 1312 ieee80211_is_qos_nullfunc(hdr->frame_control))) { 1313 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 1314 ac = ieee802_1d_to_ac[tid & 7]; 1315 1316 /* 1317 * If this AC is not trigger-enabled do nothing. 1318 * 1319 * NB: This could/should check a separate bitmap of trigger- 1320 * enabled queues, but for now we only implement uAPSD w/o 1321 * TSPEC changes to the ACs, so they're always the same. 1322 */ 1323 if (!(rx->sta->sta.uapsd_queues & BIT(ac))) 1324 return RX_CONTINUE; 1325 1326 /* if we are in a service period, do nothing */ 1327 if (test_sta_flag(rx->sta, WLAN_STA_SP)) 1328 return RX_CONTINUE; 1329 1330 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) 1331 ieee80211_sta_ps_deliver_uapsd(rx->sta); 1332 else 1333 set_sta_flag(rx->sta, WLAN_STA_UAPSD); 1334 } 1335 1336 return RX_CONTINUE; 1337 } 1338 1339 static ieee80211_rx_result debug_noinline 1340 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1341 { 1342 struct sta_info *sta = rx->sta; 1343 struct sk_buff *skb = rx->skb; 1344 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1345 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1346 int i; 1347 1348 if (!sta) 1349 return RX_CONTINUE; 1350 1351 /* 1352 * Update last_rx only for IBSS packets which are for the current 1353 * BSSID and for station already AUTHORIZED to avoid keeping the 1354 * current IBSS network alive in cases where other STAs start 1355 * using different BSSID. This will also give the station another 1356 * chance to restart the authentication/authorization in case 1357 * something went wrong the first time. 1358 */ 1359 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1360 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1361 NL80211_IFTYPE_ADHOC); 1362 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1363 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1364 sta->last_rx = jiffies; 1365 if (ieee80211_is_data(hdr->frame_control) && 1366 !is_multicast_ether_addr(hdr->addr1)) { 1367 sta->last_rx_rate_idx = status->rate_idx; 1368 sta->last_rx_rate_flag = status->flag; 1369 sta->last_rx_rate_vht_flag = status->vht_flag; 1370 sta->last_rx_rate_vht_nss = status->vht_nss; 1371 } 1372 } 1373 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { 1374 sta->last_rx = jiffies; 1375 } else if (!is_multicast_ether_addr(hdr->addr1)) { 1376 /* 1377 * Mesh beacons will update last_rx when if they are found to 1378 * match the current local configuration when processed. 1379 */ 1380 sta->last_rx = jiffies; 1381 if (ieee80211_is_data(hdr->frame_control)) { 1382 sta->last_rx_rate_idx = status->rate_idx; 1383 sta->last_rx_rate_flag = status->flag; 1384 sta->last_rx_rate_vht_flag = status->vht_flag; 1385 sta->last_rx_rate_vht_nss = status->vht_nss; 1386 } 1387 } 1388 1389 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) 1390 ieee80211_sta_rx_notify(rx->sdata, hdr); 1391 1392 sta->rx_fragments++; 1393 sta->rx_bytes += rx->skb->len; 1394 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 1395 sta->last_signal = status->signal; 1396 ewma_add(&sta->avg_signal, -status->signal); 1397 } 1398 1399 if (status->chains) { 1400 sta->chains = status->chains; 1401 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 1402 int signal = status->chain_signal[i]; 1403 1404 if (!(status->chains & BIT(i))) 1405 continue; 1406 1407 sta->chain_signal_last[i] = signal; 1408 ewma_add(&sta->chain_signal_avg[i], -signal); 1409 } 1410 } 1411 1412 /* 1413 * Change STA power saving mode only at the end of a frame 1414 * exchange sequence. 1415 */ 1416 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1417 !ieee80211_has_morefrags(hdr->frame_control) && 1418 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1419 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1420 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 1421 /* PM bit is only checked in frames where it isn't reserved, 1422 * in AP mode it's reserved in non-bufferable management frames 1423 * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) 1424 */ 1425 (!ieee80211_is_mgmt(hdr->frame_control) || 1426 ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { 1427 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 1428 if (!ieee80211_has_pm(hdr->frame_control)) 1429 sta_ps_end(sta); 1430 } else { 1431 if (ieee80211_has_pm(hdr->frame_control)) 1432 sta_ps_start(sta); 1433 } 1434 } 1435 1436 /* mesh power save support */ 1437 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1438 ieee80211_mps_rx_h_sta_process(sta, hdr); 1439 1440 /* 1441 * Drop (qos-)data::nullfunc frames silently, since they 1442 * are used only to control station power saving mode. 1443 */ 1444 if (ieee80211_is_nullfunc(hdr->frame_control) || 1445 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1446 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1447 1448 /* 1449 * If we receive a 4-addr nullfunc frame from a STA 1450 * that was not moved to a 4-addr STA vlan yet send 1451 * the event to userspace and for older hostapd drop 1452 * the frame to the monitor interface. 1453 */ 1454 if (ieee80211_has_a4(hdr->frame_control) && 1455 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1456 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1457 !rx->sdata->u.vlan.sta))) { 1458 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) 1459 cfg80211_rx_unexpected_4addr_frame( 1460 rx->sdata->dev, sta->sta.addr, 1461 GFP_ATOMIC); 1462 return RX_DROP_MONITOR; 1463 } 1464 /* 1465 * Update counter and free packet here to avoid 1466 * counting this as a dropped packed. 1467 */ 1468 sta->rx_packets++; 1469 dev_kfree_skb(rx->skb); 1470 return RX_QUEUED; 1471 } 1472 1473 return RX_CONTINUE; 1474 } /* ieee80211_rx_h_sta_process */ 1475 1476 static ieee80211_rx_result debug_noinline 1477 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 1478 { 1479 struct sk_buff *skb = rx->skb; 1480 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1481 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1482 int keyidx; 1483 int hdrlen; 1484 ieee80211_rx_result result = RX_DROP_UNUSABLE; 1485 struct ieee80211_key *sta_ptk = NULL; 1486 int mmie_keyidx = -1; 1487 __le16 fc; 1488 const struct ieee80211_cipher_scheme *cs = NULL; 1489 1490 /* 1491 * Key selection 101 1492 * 1493 * There are four types of keys: 1494 * - GTK (group keys) 1495 * - IGTK (group keys for management frames) 1496 * - PTK (pairwise keys) 1497 * - STK (station-to-station pairwise keys) 1498 * 1499 * When selecting a key, we have to distinguish between multicast 1500 * (including broadcast) and unicast frames, the latter can only 1501 * use PTKs and STKs while the former always use GTKs and IGTKs. 1502 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then 1503 * unicast frames can also use key indices like GTKs. Hence, if we 1504 * don't have a PTK/STK we check the key index for a WEP key. 1505 * 1506 * Note that in a regular BSS, multicast frames are sent by the 1507 * AP only, associated stations unicast the frame to the AP first 1508 * which then multicasts it on their behalf. 1509 * 1510 * There is also a slight problem in IBSS mode: GTKs are negotiated 1511 * with each station, that is something we don't currently handle. 1512 * The spec seems to expect that one negotiates the same key with 1513 * every station but there's no such requirement; VLANs could be 1514 * possible. 1515 */ 1516 1517 /* start without a key */ 1518 rx->key = NULL; 1519 fc = hdr->frame_control; 1520 1521 if (rx->sta) { 1522 int keyid = rx->sta->ptk_idx; 1523 1524 if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) { 1525 cs = rx->sta->cipher_scheme; 1526 keyid = iwl80211_get_cs_keyid(cs, rx->skb); 1527 if (unlikely(keyid < 0)) 1528 return RX_DROP_UNUSABLE; 1529 } 1530 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); 1531 } 1532 1533 if (!ieee80211_has_protected(fc)) 1534 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 1535 1536 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 1537 rx->key = sta_ptk; 1538 if ((status->flag & RX_FLAG_DECRYPTED) && 1539 (status->flag & RX_FLAG_IV_STRIPPED)) 1540 return RX_CONTINUE; 1541 /* Skip decryption if the frame is not protected. */ 1542 if (!ieee80211_has_protected(fc)) 1543 return RX_CONTINUE; 1544 } else if (mmie_keyidx >= 0) { 1545 /* Broadcast/multicast robust management frame / BIP */ 1546 if ((status->flag & RX_FLAG_DECRYPTED) && 1547 (status->flag & RX_FLAG_IV_STRIPPED)) 1548 return RX_CONTINUE; 1549 1550 if (mmie_keyidx < NUM_DEFAULT_KEYS || 1551 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1552 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 1553 if (rx->sta) 1554 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); 1555 if (!rx->key) 1556 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 1557 } else if (!ieee80211_has_protected(fc)) { 1558 /* 1559 * The frame was not protected, so skip decryption. However, we 1560 * need to set rx->key if there is a key that could have been 1561 * used so that the frame may be dropped if encryption would 1562 * have been expected. 1563 */ 1564 struct ieee80211_key *key = NULL; 1565 struct ieee80211_sub_if_data *sdata = rx->sdata; 1566 int i; 1567 1568 if (ieee80211_is_mgmt(fc) && 1569 is_multicast_ether_addr(hdr->addr1) && 1570 (key = rcu_dereference(rx->sdata->default_mgmt_key))) 1571 rx->key = key; 1572 else { 1573 if (rx->sta) { 1574 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1575 key = rcu_dereference(rx->sta->gtk[i]); 1576 if (key) 1577 break; 1578 } 1579 } 1580 if (!key) { 1581 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1582 key = rcu_dereference(sdata->keys[i]); 1583 if (key) 1584 break; 1585 } 1586 } 1587 if (key) 1588 rx->key = key; 1589 } 1590 return RX_CONTINUE; 1591 } else { 1592 u8 keyid; 1593 1594 /* 1595 * The device doesn't give us the IV so we won't be 1596 * able to look up the key. That's ok though, we 1597 * don't need to decrypt the frame, we just won't 1598 * be able to keep statistics accurate. 1599 * Except for key threshold notifications, should 1600 * we somehow allow the driver to tell us which key 1601 * the hardware used if this flag is set? 1602 */ 1603 if ((status->flag & RX_FLAG_DECRYPTED) && 1604 (status->flag & RX_FLAG_IV_STRIPPED)) 1605 return RX_CONTINUE; 1606 1607 hdrlen = ieee80211_hdrlen(fc); 1608 1609 if (cs) { 1610 keyidx = iwl80211_get_cs_keyid(cs, rx->skb); 1611 1612 if (unlikely(keyidx < 0)) 1613 return RX_DROP_UNUSABLE; 1614 } else { 1615 if (rx->skb->len < 8 + hdrlen) 1616 return RX_DROP_UNUSABLE; /* TODO: count this? */ 1617 /* 1618 * no need to call ieee80211_wep_get_keyidx, 1619 * it verifies a bunch of things we've done already 1620 */ 1621 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); 1622 keyidx = keyid >> 6; 1623 } 1624 1625 /* check per-station GTK first, if multicast packet */ 1626 if (is_multicast_ether_addr(hdr->addr1) && rx->sta) 1627 rx->key = rcu_dereference(rx->sta->gtk[keyidx]); 1628 1629 /* if not found, try default key */ 1630 if (!rx->key) { 1631 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 1632 1633 /* 1634 * RSNA-protected unicast frames should always be 1635 * sent with pairwise or station-to-station keys, 1636 * but for WEP we allow using a key index as well. 1637 */ 1638 if (rx->key && 1639 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 1640 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 1641 !is_multicast_ether_addr(hdr->addr1)) 1642 rx->key = NULL; 1643 } 1644 } 1645 1646 if (rx->key) { 1647 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) 1648 return RX_DROP_MONITOR; 1649 1650 rx->key->tx_rx_count++; 1651 /* TODO: add threshold stuff again */ 1652 } else { 1653 return RX_DROP_MONITOR; 1654 } 1655 1656 switch (rx->key->conf.cipher) { 1657 case WLAN_CIPHER_SUITE_WEP40: 1658 case WLAN_CIPHER_SUITE_WEP104: 1659 result = ieee80211_crypto_wep_decrypt(rx); 1660 break; 1661 case WLAN_CIPHER_SUITE_TKIP: 1662 result = ieee80211_crypto_tkip_decrypt(rx); 1663 break; 1664 case WLAN_CIPHER_SUITE_CCMP: 1665 result = ieee80211_crypto_ccmp_decrypt( 1666 rx, IEEE80211_CCMP_MIC_LEN); 1667 break; 1668 case WLAN_CIPHER_SUITE_CCMP_256: 1669 result = ieee80211_crypto_ccmp_decrypt( 1670 rx, IEEE80211_CCMP_256_MIC_LEN); 1671 break; 1672 case WLAN_CIPHER_SUITE_AES_CMAC: 1673 result = ieee80211_crypto_aes_cmac_decrypt(rx); 1674 break; 1675 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 1676 result = ieee80211_crypto_aes_cmac_256_decrypt(rx); 1677 break; 1678 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 1679 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 1680 result = ieee80211_crypto_aes_gmac_decrypt(rx); 1681 break; 1682 case WLAN_CIPHER_SUITE_GCMP: 1683 case WLAN_CIPHER_SUITE_GCMP_256: 1684 result = ieee80211_crypto_gcmp_decrypt(rx); 1685 break; 1686 default: 1687 result = ieee80211_crypto_hw_decrypt(rx); 1688 } 1689 1690 /* the hdr variable is invalid after the decrypt handlers */ 1691 1692 /* either the frame has been decrypted or will be dropped */ 1693 status->flag |= RX_FLAG_DECRYPTED; 1694 1695 return result; 1696 } 1697 1698 static inline struct ieee80211_fragment_entry * 1699 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, 1700 unsigned int frag, unsigned int seq, int rx_queue, 1701 struct sk_buff **skb) 1702 { 1703 struct ieee80211_fragment_entry *entry; 1704 1705 entry = &sdata->fragments[sdata->fragment_next++]; 1706 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 1707 sdata->fragment_next = 0; 1708 1709 if (!skb_queue_empty(&entry->skb_list)) 1710 __skb_queue_purge(&entry->skb_list); 1711 1712 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 1713 *skb = NULL; 1714 entry->first_frag_time = jiffies; 1715 entry->seq = seq; 1716 entry->rx_queue = rx_queue; 1717 entry->last_frag = frag; 1718 entry->ccmp = 0; 1719 entry->extra_len = 0; 1720 1721 return entry; 1722 } 1723 1724 static inline struct ieee80211_fragment_entry * 1725 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 1726 unsigned int frag, unsigned int seq, 1727 int rx_queue, struct ieee80211_hdr *hdr) 1728 { 1729 struct ieee80211_fragment_entry *entry; 1730 int i, idx; 1731 1732 idx = sdata->fragment_next; 1733 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 1734 struct ieee80211_hdr *f_hdr; 1735 1736 idx--; 1737 if (idx < 0) 1738 idx = IEEE80211_FRAGMENT_MAX - 1; 1739 1740 entry = &sdata->fragments[idx]; 1741 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 1742 entry->rx_queue != rx_queue || 1743 entry->last_frag + 1 != frag) 1744 continue; 1745 1746 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; 1747 1748 /* 1749 * Check ftype and addresses are equal, else check next fragment 1750 */ 1751 if (((hdr->frame_control ^ f_hdr->frame_control) & 1752 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 1753 !ether_addr_equal(hdr->addr1, f_hdr->addr1) || 1754 !ether_addr_equal(hdr->addr2, f_hdr->addr2)) 1755 continue; 1756 1757 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 1758 __skb_queue_purge(&entry->skb_list); 1759 continue; 1760 } 1761 return entry; 1762 } 1763 1764 return NULL; 1765 } 1766 1767 static ieee80211_rx_result debug_noinline 1768 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 1769 { 1770 struct ieee80211_hdr *hdr; 1771 u16 sc; 1772 __le16 fc; 1773 unsigned int frag, seq; 1774 struct ieee80211_fragment_entry *entry; 1775 struct sk_buff *skb; 1776 struct ieee80211_rx_status *status; 1777 1778 hdr = (struct ieee80211_hdr *)rx->skb->data; 1779 fc = hdr->frame_control; 1780 1781 if (ieee80211_is_ctl(fc)) 1782 return RX_CONTINUE; 1783 1784 sc = le16_to_cpu(hdr->seq_ctrl); 1785 frag = sc & IEEE80211_SCTL_FRAG; 1786 1787 if (is_multicast_ether_addr(hdr->addr1)) { 1788 I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount); 1789 goto out_no_led; 1790 } 1791 1792 if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) 1793 goto out; 1794 1795 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 1796 1797 if (skb_linearize(rx->skb)) 1798 return RX_DROP_UNUSABLE; 1799 1800 /* 1801 * skb_linearize() might change the skb->data and 1802 * previously cached variables (in this case, hdr) need to 1803 * be refreshed with the new data. 1804 */ 1805 hdr = (struct ieee80211_hdr *)rx->skb->data; 1806 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 1807 1808 if (frag == 0) { 1809 /* This is the first fragment of a new frame. */ 1810 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 1811 rx->seqno_idx, &(rx->skb)); 1812 if (rx->key && 1813 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || 1814 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) && 1815 ieee80211_has_protected(fc)) { 1816 int queue = rx->security_idx; 1817 /* Store CCMP PN so that we can verify that the next 1818 * fragment has a sequential PN value. */ 1819 entry->ccmp = 1; 1820 memcpy(entry->last_pn, 1821 rx->key->u.ccmp.rx_pn[queue], 1822 IEEE80211_CCMP_PN_LEN); 1823 } 1824 return RX_QUEUED; 1825 } 1826 1827 /* This is a fragment for a frame that should already be pending in 1828 * fragment cache. Add this fragment to the end of the pending entry. 1829 */ 1830 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, 1831 rx->seqno_idx, hdr); 1832 if (!entry) { 1833 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1834 return RX_DROP_MONITOR; 1835 } 1836 1837 /* Verify that MPDUs within one MSDU have sequential PN values. 1838 * (IEEE 802.11i, 8.3.3.4.5) */ 1839 if (entry->ccmp) { 1840 int i; 1841 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; 1842 int queue; 1843 if (!rx->key || 1844 (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && 1845 rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256)) 1846 return RX_DROP_UNUSABLE; 1847 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); 1848 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { 1849 pn[i]++; 1850 if (pn[i]) 1851 break; 1852 } 1853 queue = rx->security_idx; 1854 rpn = rx->key->u.ccmp.rx_pn[queue]; 1855 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) 1856 return RX_DROP_UNUSABLE; 1857 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); 1858 } 1859 1860 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 1861 __skb_queue_tail(&entry->skb_list, rx->skb); 1862 entry->last_frag = frag; 1863 entry->extra_len += rx->skb->len; 1864 if (ieee80211_has_morefrags(fc)) { 1865 rx->skb = NULL; 1866 return RX_QUEUED; 1867 } 1868 1869 rx->skb = __skb_dequeue(&entry->skb_list); 1870 if (skb_tailroom(rx->skb) < entry->extra_len) { 1871 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag); 1872 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 1873 GFP_ATOMIC))) { 1874 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1875 __skb_queue_purge(&entry->skb_list); 1876 return RX_DROP_UNUSABLE; 1877 } 1878 } 1879 while ((skb = __skb_dequeue(&entry->skb_list))) { 1880 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); 1881 dev_kfree_skb(skb); 1882 } 1883 1884 /* Complete frame has been reassembled - process it now */ 1885 status = IEEE80211_SKB_RXCB(rx->skb); 1886 status->rx_flags |= IEEE80211_RX_FRAGMENTED; 1887 1888 out: 1889 ieee80211_led_rx(rx->local); 1890 out_no_led: 1891 if (rx->sta) 1892 rx->sta->rx_packets++; 1893 return RX_CONTINUE; 1894 } 1895 1896 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 1897 { 1898 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) 1899 return -EACCES; 1900 1901 return 0; 1902 } 1903 1904 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 1905 { 1906 struct sk_buff *skb = rx->skb; 1907 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1908 1909 /* 1910 * Pass through unencrypted frames if the hardware has 1911 * decrypted them already. 1912 */ 1913 if (status->flag & RX_FLAG_DECRYPTED) 1914 return 0; 1915 1916 /* Drop unencrypted frames if key is set. */ 1917 if (unlikely(!ieee80211_has_protected(fc) && 1918 !ieee80211_is_nullfunc(fc) && 1919 ieee80211_is_data(fc) && rx->key)) 1920 return -EACCES; 1921 1922 return 0; 1923 } 1924 1925 static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 1926 { 1927 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1928 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1929 __le16 fc = hdr->frame_control; 1930 1931 /* 1932 * Pass through unencrypted frames if the hardware has 1933 * decrypted them already. 1934 */ 1935 if (status->flag & RX_FLAG_DECRYPTED) 1936 return 0; 1937 1938 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { 1939 if (unlikely(!ieee80211_has_protected(fc) && 1940 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 1941 rx->key)) { 1942 if (ieee80211_is_deauth(fc) || 1943 ieee80211_is_disassoc(fc)) 1944 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 1945 rx->skb->data, 1946 rx->skb->len); 1947 return -EACCES; 1948 } 1949 /* BIP does not use Protected field, so need to check MMIE */ 1950 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 1951 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 1952 if (ieee80211_is_deauth(fc) || 1953 ieee80211_is_disassoc(fc)) 1954 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 1955 rx->skb->data, 1956 rx->skb->len); 1957 return -EACCES; 1958 } 1959 /* 1960 * When using MFP, Action frames are not allowed prior to 1961 * having configured keys. 1962 */ 1963 if (unlikely(ieee80211_is_action(fc) && !rx->key && 1964 ieee80211_is_robust_mgmt_frame(rx->skb))) 1965 return -EACCES; 1966 } 1967 1968 return 0; 1969 } 1970 1971 static int 1972 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) 1973 { 1974 struct ieee80211_sub_if_data *sdata = rx->sdata; 1975 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1976 bool check_port_control = false; 1977 struct ethhdr *ehdr; 1978 int ret; 1979 1980 *port_control = false; 1981 if (ieee80211_has_a4(hdr->frame_control) && 1982 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 1983 return -1; 1984 1985 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1986 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 1987 1988 if (!sdata->u.mgd.use_4addr) 1989 return -1; 1990 else 1991 check_port_control = true; 1992 } 1993 1994 if (is_multicast_ether_addr(hdr->addr1) && 1995 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 1996 return -1; 1997 1998 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 1999 if (ret < 0) 2000 return ret; 2001 2002 ehdr = (struct ethhdr *) rx->skb->data; 2003 if (ehdr->h_proto == rx->sdata->control_port_protocol) 2004 *port_control = true; 2005 else if (check_port_control) 2006 return -1; 2007 2008 return 0; 2009 } 2010 2011 /* 2012 * requires that rx->skb is a frame with ethernet header 2013 */ 2014 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 2015 { 2016 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 2017 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 2018 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2019 2020 /* 2021 * Allow EAPOL frames to us/the PAE group address regardless 2022 * of whether the frame was encrypted or not. 2023 */ 2024 if (ehdr->h_proto == rx->sdata->control_port_protocol && 2025 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || 2026 ether_addr_equal(ehdr->h_dest, pae_group_addr))) 2027 return true; 2028 2029 if (ieee80211_802_1x_port_control(rx) || 2030 ieee80211_drop_unencrypted(rx, fc)) 2031 return false; 2032 2033 return true; 2034 } 2035 2036 /* 2037 * requires that rx->skb is a frame with ethernet header 2038 */ 2039 static void 2040 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 2041 { 2042 struct ieee80211_sub_if_data *sdata = rx->sdata; 2043 struct net_device *dev = sdata->dev; 2044 struct sk_buff *skb, *xmit_skb; 2045 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2046 struct sta_info *dsta; 2047 2048 skb = rx->skb; 2049 xmit_skb = NULL; 2050 2051 ieee80211_rx_stats(dev, skb->len); 2052 2053 if ((sdata->vif.type == NL80211_IFTYPE_AP || 2054 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 2055 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 2056 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 2057 if (is_multicast_ether_addr(ehdr->h_dest)) { 2058 /* 2059 * send multicast frames both to higher layers in 2060 * local net stack and back to the wireless medium 2061 */ 2062 xmit_skb = skb_copy(skb, GFP_ATOMIC); 2063 if (!xmit_skb) 2064 net_info_ratelimited("%s: failed to clone multicast frame\n", 2065 dev->name); 2066 } else { 2067 dsta = sta_info_get(sdata, skb->data); 2068 if (dsta) { 2069 /* 2070 * The destination station is associated to 2071 * this AP (in this VLAN), so send the frame 2072 * directly to it and do not pass it to local 2073 * net stack. 2074 */ 2075 xmit_skb = skb; 2076 skb = NULL; 2077 } 2078 } 2079 } 2080 2081 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2082 if (skb) { 2083 /* 'align' will only take the values 0 or 2 here since all 2084 * frames are required to be aligned to 2-byte boundaries 2085 * when being passed to mac80211; the code here works just 2086 * as well if that isn't true, but mac80211 assumes it can 2087 * access fields as 2-byte aligned (e.g. for ether_addr_equal) 2088 */ 2089 int align; 2090 2091 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; 2092 if (align) { 2093 if (WARN_ON(skb_headroom(skb) < 3)) { 2094 dev_kfree_skb(skb); 2095 skb = NULL; 2096 } else { 2097 u8 *data = skb->data; 2098 size_t len = skb_headlen(skb); 2099 skb->data -= align; 2100 memmove(skb->data, data, len); 2101 skb_set_tail_pointer(skb, len); 2102 } 2103 } 2104 } 2105 #endif 2106 2107 if (skb) { 2108 /* deliver to local stack */ 2109 skb->protocol = eth_type_trans(skb, dev); 2110 memset(skb->cb, 0, sizeof(skb->cb)); 2111 if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) && 2112 rx->local->napi) 2113 napi_gro_receive(rx->local->napi, skb); 2114 else 2115 netif_receive_skb(skb); 2116 } 2117 2118 if (xmit_skb) { 2119 /* 2120 * Send to wireless media and increase priority by 256 to 2121 * keep the received priority instead of reclassifying 2122 * the frame (see cfg80211_classify8021d). 2123 */ 2124 xmit_skb->priority += 256; 2125 xmit_skb->protocol = htons(ETH_P_802_3); 2126 skb_reset_network_header(xmit_skb); 2127 skb_reset_mac_header(xmit_skb); 2128 dev_queue_xmit(xmit_skb); 2129 } 2130 } 2131 2132 static ieee80211_rx_result debug_noinline 2133 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 2134 { 2135 struct net_device *dev = rx->sdata->dev; 2136 struct sk_buff *skb = rx->skb; 2137 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2138 __le16 fc = hdr->frame_control; 2139 struct sk_buff_head frame_list; 2140 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2141 2142 if (unlikely(!ieee80211_is_data(fc))) 2143 return RX_CONTINUE; 2144 2145 if (unlikely(!ieee80211_is_data_present(fc))) 2146 return RX_DROP_MONITOR; 2147 2148 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 2149 return RX_CONTINUE; 2150 2151 if (ieee80211_has_a4(hdr->frame_control) && 2152 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2153 !rx->sdata->u.vlan.sta) 2154 return RX_DROP_UNUSABLE; 2155 2156 if (is_multicast_ether_addr(hdr->addr1) && 2157 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2158 rx->sdata->u.vlan.sta) || 2159 (rx->sdata->vif.type == NL80211_IFTYPE_STATION && 2160 rx->sdata->u.mgd.use_4addr))) 2161 return RX_DROP_UNUSABLE; 2162 2163 skb->dev = dev; 2164 __skb_queue_head_init(&frame_list); 2165 2166 if (skb_linearize(skb)) 2167 return RX_DROP_UNUSABLE; 2168 2169 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 2170 rx->sdata->vif.type, 2171 rx->local->hw.extra_tx_headroom, true); 2172 2173 while (!skb_queue_empty(&frame_list)) { 2174 rx->skb = __skb_dequeue(&frame_list); 2175 2176 if (!ieee80211_frame_allowed(rx, fc)) { 2177 dev_kfree_skb(rx->skb); 2178 continue; 2179 } 2180 2181 ieee80211_deliver_skb(rx); 2182 } 2183 2184 return RX_QUEUED; 2185 } 2186 2187 #ifdef CONFIG_MAC80211_MESH 2188 static ieee80211_rx_result 2189 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 2190 { 2191 struct ieee80211_hdr *fwd_hdr, *hdr; 2192 struct ieee80211_tx_info *info; 2193 struct ieee80211s_hdr *mesh_hdr; 2194 struct sk_buff *skb = rx->skb, *fwd_skb; 2195 struct ieee80211_local *local = rx->local; 2196 struct ieee80211_sub_if_data *sdata = rx->sdata; 2197 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2198 u16 q, hdrlen; 2199 2200 hdr = (struct ieee80211_hdr *) skb->data; 2201 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2202 2203 /* make sure fixed part of mesh header is there, also checks skb len */ 2204 if (!pskb_may_pull(rx->skb, hdrlen + 6)) 2205 return RX_DROP_MONITOR; 2206 2207 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2208 2209 /* make sure full mesh header is there, also checks skb len */ 2210 if (!pskb_may_pull(rx->skb, 2211 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) 2212 return RX_DROP_MONITOR; 2213 2214 /* reload pointers */ 2215 hdr = (struct ieee80211_hdr *) skb->data; 2216 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2217 2218 if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) 2219 return RX_DROP_MONITOR; 2220 2221 /* frame is in RMC, don't forward */ 2222 if (ieee80211_is_data(hdr->frame_control) && 2223 is_multicast_ether_addr(hdr->addr1) && 2224 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr)) 2225 return RX_DROP_MONITOR; 2226 2227 if (!ieee80211_is_data(hdr->frame_control)) 2228 return RX_CONTINUE; 2229 2230 if (!mesh_hdr->ttl) 2231 return RX_DROP_MONITOR; 2232 2233 if (mesh_hdr->flags & MESH_FLAGS_AE) { 2234 struct mesh_path *mppath; 2235 char *proxied_addr; 2236 char *mpp_addr; 2237 2238 if (is_multicast_ether_addr(hdr->addr1)) { 2239 mpp_addr = hdr->addr3; 2240 proxied_addr = mesh_hdr->eaddr1; 2241 } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) { 2242 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2243 mpp_addr = hdr->addr4; 2244 proxied_addr = mesh_hdr->eaddr2; 2245 } else { 2246 return RX_DROP_MONITOR; 2247 } 2248 2249 rcu_read_lock(); 2250 mppath = mpp_path_lookup(sdata, proxied_addr); 2251 if (!mppath) { 2252 mpp_path_add(sdata, proxied_addr, mpp_addr); 2253 } else { 2254 spin_lock_bh(&mppath->state_lock); 2255 if (!ether_addr_equal(mppath->mpp, mpp_addr)) 2256 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 2257 spin_unlock_bh(&mppath->state_lock); 2258 } 2259 rcu_read_unlock(); 2260 } 2261 2262 /* Frame has reached destination. Don't forward */ 2263 if (!is_multicast_ether_addr(hdr->addr1) && 2264 ether_addr_equal(sdata->vif.addr, hdr->addr3)) 2265 return RX_CONTINUE; 2266 2267 q = ieee80211_select_queue_80211(sdata, skb, hdr); 2268 if (ieee80211_queue_stopped(&local->hw, q)) { 2269 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); 2270 return RX_DROP_MONITOR; 2271 } 2272 skb_set_queue_mapping(skb, q); 2273 2274 if (!--mesh_hdr->ttl) { 2275 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); 2276 goto out; 2277 } 2278 2279 if (!ifmsh->mshcfg.dot11MeshForwarding) 2280 goto out; 2281 2282 fwd_skb = skb_copy(skb, GFP_ATOMIC); 2283 if (!fwd_skb) { 2284 net_info_ratelimited("%s: failed to clone mesh frame\n", 2285 sdata->name); 2286 goto out; 2287 } 2288 2289 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 2290 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY); 2291 info = IEEE80211_SKB_CB(fwd_skb); 2292 memset(info, 0, sizeof(*info)); 2293 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 2294 info->control.vif = &rx->sdata->vif; 2295 info->control.jiffies = jiffies; 2296 if (is_multicast_ether_addr(fwd_hdr->addr1)) { 2297 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 2298 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 2299 /* update power mode indication when forwarding */ 2300 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); 2301 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { 2302 /* mesh power mode flags updated in mesh_nexthop_lookup */ 2303 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 2304 } else { 2305 /* unable to resolve next hop */ 2306 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, 2307 fwd_hdr->addr3, 0, 2308 WLAN_REASON_MESH_PATH_NOFORWARD, 2309 fwd_hdr->addr2); 2310 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 2311 kfree_skb(fwd_skb); 2312 return RX_DROP_MONITOR; 2313 } 2314 2315 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 2316 ieee80211_add_pending_skb(local, fwd_skb); 2317 out: 2318 if (is_multicast_ether_addr(hdr->addr1)) 2319 return RX_CONTINUE; 2320 return RX_DROP_MONITOR; 2321 } 2322 #endif 2323 2324 static ieee80211_rx_result debug_noinline 2325 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 2326 { 2327 struct ieee80211_sub_if_data *sdata = rx->sdata; 2328 struct ieee80211_local *local = rx->local; 2329 struct net_device *dev = sdata->dev; 2330 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2331 __le16 fc = hdr->frame_control; 2332 bool port_control; 2333 int err; 2334 2335 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 2336 return RX_CONTINUE; 2337 2338 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 2339 return RX_DROP_MONITOR; 2340 2341 if (rx->sta) { 2342 /* The seqno index has the same property as needed 2343 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 2344 * for non-QoS-data frames. Here we know it's a data 2345 * frame, so count MSDUs. 2346 */ 2347 rx->sta->rx_msdu[rx->seqno_idx]++; 2348 } 2349 2350 /* 2351 * Send unexpected-4addr-frame event to hostapd. For older versions, 2352 * also drop the frame to cooked monitor interfaces. 2353 */ 2354 if (ieee80211_has_a4(hdr->frame_control) && 2355 sdata->vif.type == NL80211_IFTYPE_AP) { 2356 if (rx->sta && 2357 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) 2358 cfg80211_rx_unexpected_4addr_frame( 2359 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); 2360 return RX_DROP_MONITOR; 2361 } 2362 2363 err = __ieee80211_data_to_8023(rx, &port_control); 2364 if (unlikely(err)) 2365 return RX_DROP_UNUSABLE; 2366 2367 if (!ieee80211_frame_allowed(rx, fc)) 2368 return RX_DROP_MONITOR; 2369 2370 /* directly handle TDLS channel switch requests/responses */ 2371 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == 2372 cpu_to_be16(ETH_P_TDLS))) { 2373 struct ieee80211_tdls_data *tf = (void *)rx->skb->data; 2374 2375 if (pskb_may_pull(rx->skb, 2376 offsetof(struct ieee80211_tdls_data, u)) && 2377 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && 2378 tf->category == WLAN_CATEGORY_TDLS && 2379 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || 2380 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { 2381 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TDLS_CHSW; 2382 skb_queue_tail(&sdata->skb_queue, rx->skb); 2383 ieee80211_queue_work(&rx->local->hw, &sdata->work); 2384 if (rx->sta) 2385 rx->sta->rx_packets++; 2386 2387 return RX_QUEUED; 2388 } 2389 } 2390 2391 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2392 unlikely(port_control) && sdata->bss) { 2393 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 2394 u.ap); 2395 dev = sdata->dev; 2396 rx->sdata = sdata; 2397 } 2398 2399 rx->skb->dev = dev; 2400 2401 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 2402 !is_multicast_ether_addr( 2403 ((struct ethhdr *)rx->skb->data)->h_dest) && 2404 (!local->scanning && 2405 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) { 2406 mod_timer(&local->dynamic_ps_timer, jiffies + 2407 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 2408 } 2409 2410 ieee80211_deliver_skb(rx); 2411 2412 return RX_QUEUED; 2413 } 2414 2415 static ieee80211_rx_result debug_noinline 2416 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) 2417 { 2418 struct sk_buff *skb = rx->skb; 2419 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 2420 struct tid_ampdu_rx *tid_agg_rx; 2421 u16 start_seq_num; 2422 u16 tid; 2423 2424 if (likely(!ieee80211_is_ctl(bar->frame_control))) 2425 return RX_CONTINUE; 2426 2427 if (ieee80211_is_back_req(bar->frame_control)) { 2428 struct { 2429 __le16 control, start_seq_num; 2430 } __packed bar_data; 2431 struct ieee80211_event event = { 2432 .type = BAR_RX_EVENT, 2433 }; 2434 2435 if (!rx->sta) 2436 return RX_DROP_MONITOR; 2437 2438 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 2439 &bar_data, sizeof(bar_data))) 2440 return RX_DROP_MONITOR; 2441 2442 tid = le16_to_cpu(bar_data.control) >> 12; 2443 2444 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 2445 if (!tid_agg_rx) 2446 return RX_DROP_MONITOR; 2447 2448 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 2449 event.u.ba.tid = tid; 2450 event.u.ba.ssn = start_seq_num; 2451 event.u.ba.sta = &rx->sta->sta; 2452 2453 /* reset session timer */ 2454 if (tid_agg_rx->timeout) 2455 mod_timer(&tid_agg_rx->session_timer, 2456 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 2457 2458 spin_lock(&tid_agg_rx->reorder_lock); 2459 /* release stored frames up to start of BAR */ 2460 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 2461 start_seq_num, frames); 2462 spin_unlock(&tid_agg_rx->reorder_lock); 2463 2464 drv_event_callback(rx->local, rx->sdata, &event); 2465 2466 kfree_skb(skb); 2467 return RX_QUEUED; 2468 } 2469 2470 /* 2471 * After this point, we only want management frames, 2472 * so we can drop all remaining control frames to 2473 * cooked monitor interfaces. 2474 */ 2475 return RX_DROP_MONITOR; 2476 } 2477 2478 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 2479 struct ieee80211_mgmt *mgmt, 2480 size_t len) 2481 { 2482 struct ieee80211_local *local = sdata->local; 2483 struct sk_buff *skb; 2484 struct ieee80211_mgmt *resp; 2485 2486 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { 2487 /* Not to own unicast address */ 2488 return; 2489 } 2490 2491 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) || 2492 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) { 2493 /* Not from the current AP or not associated yet. */ 2494 return; 2495 } 2496 2497 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 2498 /* Too short SA Query request frame */ 2499 return; 2500 } 2501 2502 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 2503 if (skb == NULL) 2504 return; 2505 2506 skb_reserve(skb, local->hw.extra_tx_headroom); 2507 resp = (struct ieee80211_mgmt *) skb_put(skb, 24); 2508 memset(resp, 0, 24); 2509 memcpy(resp->da, mgmt->sa, ETH_ALEN); 2510 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 2511 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 2512 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2513 IEEE80211_STYPE_ACTION); 2514 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 2515 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 2516 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 2517 memcpy(resp->u.action.u.sa_query.trans_id, 2518 mgmt->u.action.u.sa_query.trans_id, 2519 WLAN_SA_QUERY_TR_ID_LEN); 2520 2521 ieee80211_tx_skb(sdata, skb); 2522 } 2523 2524 static ieee80211_rx_result debug_noinline 2525 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 2526 { 2527 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2528 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2529 2530 /* 2531 * From here on, look only at management frames. 2532 * Data and control frames are already handled, 2533 * and unknown (reserved) frames are useless. 2534 */ 2535 if (rx->skb->len < 24) 2536 return RX_DROP_MONITOR; 2537 2538 if (!ieee80211_is_mgmt(mgmt->frame_control)) 2539 return RX_DROP_MONITOR; 2540 2541 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 2542 ieee80211_is_beacon(mgmt->frame_control) && 2543 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 2544 int sig = 0; 2545 2546 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM)) 2547 sig = status->signal; 2548 2549 cfg80211_report_obss_beacon(rx->local->hw.wiphy, 2550 rx->skb->data, rx->skb->len, 2551 status->freq, sig); 2552 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 2553 } 2554 2555 if (ieee80211_drop_unencrypted_mgmt(rx)) 2556 return RX_DROP_UNUSABLE; 2557 2558 return RX_CONTINUE; 2559 } 2560 2561 static ieee80211_rx_result debug_noinline 2562 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 2563 { 2564 struct ieee80211_local *local = rx->local; 2565 struct ieee80211_sub_if_data *sdata = rx->sdata; 2566 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2567 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2568 int len = rx->skb->len; 2569 2570 if (!ieee80211_is_action(mgmt->frame_control)) 2571 return RX_CONTINUE; 2572 2573 /* drop too small frames */ 2574 if (len < IEEE80211_MIN_ACTION_SIZE) 2575 return RX_DROP_UNUSABLE; 2576 2577 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && 2578 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && 2579 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 2580 return RX_DROP_UNUSABLE; 2581 2582 switch (mgmt->u.action.category) { 2583 case WLAN_CATEGORY_HT: 2584 /* reject HT action frames from stations not supporting HT */ 2585 if (!rx->sta->sta.ht_cap.ht_supported) 2586 goto invalid; 2587 2588 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2589 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2590 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2591 sdata->vif.type != NL80211_IFTYPE_AP && 2592 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2593 break; 2594 2595 /* verify action & smps_control/chanwidth are present */ 2596 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 2597 goto invalid; 2598 2599 switch (mgmt->u.action.u.ht_smps.action) { 2600 case WLAN_HT_ACTION_SMPS: { 2601 struct ieee80211_supported_band *sband; 2602 enum ieee80211_smps_mode smps_mode; 2603 2604 /* convert to HT capability */ 2605 switch (mgmt->u.action.u.ht_smps.smps_control) { 2606 case WLAN_HT_SMPS_CONTROL_DISABLED: 2607 smps_mode = IEEE80211_SMPS_OFF; 2608 break; 2609 case WLAN_HT_SMPS_CONTROL_STATIC: 2610 smps_mode = IEEE80211_SMPS_STATIC; 2611 break; 2612 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 2613 smps_mode = IEEE80211_SMPS_DYNAMIC; 2614 break; 2615 default: 2616 goto invalid; 2617 } 2618 2619 /* if no change do nothing */ 2620 if (rx->sta->sta.smps_mode == smps_mode) 2621 goto handled; 2622 rx->sta->sta.smps_mode = smps_mode; 2623 2624 sband = rx->local->hw.wiphy->bands[status->band]; 2625 2626 rate_control_rate_update(local, sband, rx->sta, 2627 IEEE80211_RC_SMPS_CHANGED); 2628 goto handled; 2629 } 2630 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { 2631 struct ieee80211_supported_band *sband; 2632 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; 2633 enum ieee80211_sta_rx_bandwidth max_bw, new_bw; 2634 2635 /* If it doesn't support 40 MHz it can't change ... */ 2636 if (!(rx->sta->sta.ht_cap.cap & 2637 IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 2638 goto handled; 2639 2640 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ) 2641 max_bw = IEEE80211_STA_RX_BW_20; 2642 else 2643 max_bw = ieee80211_sta_cap_rx_bw(rx->sta); 2644 2645 /* set cur_max_bandwidth and recalc sta bw */ 2646 rx->sta->cur_max_bandwidth = max_bw; 2647 new_bw = ieee80211_sta_cur_vht_bw(rx->sta); 2648 2649 if (rx->sta->sta.bandwidth == new_bw) 2650 goto handled; 2651 2652 rx->sta->sta.bandwidth = new_bw; 2653 sband = rx->local->hw.wiphy->bands[status->band]; 2654 2655 rate_control_rate_update(local, sband, rx->sta, 2656 IEEE80211_RC_BW_CHANGED); 2657 goto handled; 2658 } 2659 default: 2660 goto invalid; 2661 } 2662 2663 break; 2664 case WLAN_CATEGORY_PUBLIC: 2665 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2666 goto invalid; 2667 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2668 break; 2669 if (!rx->sta) 2670 break; 2671 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) 2672 break; 2673 if (mgmt->u.action.u.ext_chan_switch.action_code != 2674 WLAN_PUB_ACTION_EXT_CHANSW_ANN) 2675 break; 2676 if (len < offsetof(struct ieee80211_mgmt, 2677 u.action.u.ext_chan_switch.variable)) 2678 goto invalid; 2679 goto queue; 2680 case WLAN_CATEGORY_VHT: 2681 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2682 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2683 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2684 sdata->vif.type != NL80211_IFTYPE_AP && 2685 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2686 break; 2687 2688 /* verify action code is present */ 2689 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2690 goto invalid; 2691 2692 switch (mgmt->u.action.u.vht_opmode_notif.action_code) { 2693 case WLAN_VHT_ACTION_OPMODE_NOTIF: { 2694 u8 opmode; 2695 2696 /* verify opmode is present */ 2697 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 2698 goto invalid; 2699 2700 opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; 2701 2702 ieee80211_vht_handle_opmode(rx->sdata, rx->sta, 2703 opmode, status->band, 2704 false); 2705 goto handled; 2706 } 2707 default: 2708 break; 2709 } 2710 break; 2711 case WLAN_CATEGORY_BACK: 2712 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2713 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2714 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2715 sdata->vif.type != NL80211_IFTYPE_AP && 2716 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2717 break; 2718 2719 /* verify action_code is present */ 2720 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2721 break; 2722 2723 switch (mgmt->u.action.u.addba_req.action_code) { 2724 case WLAN_ACTION_ADDBA_REQ: 2725 if (len < (IEEE80211_MIN_ACTION_SIZE + 2726 sizeof(mgmt->u.action.u.addba_req))) 2727 goto invalid; 2728 break; 2729 case WLAN_ACTION_ADDBA_RESP: 2730 if (len < (IEEE80211_MIN_ACTION_SIZE + 2731 sizeof(mgmt->u.action.u.addba_resp))) 2732 goto invalid; 2733 break; 2734 case WLAN_ACTION_DELBA: 2735 if (len < (IEEE80211_MIN_ACTION_SIZE + 2736 sizeof(mgmt->u.action.u.delba))) 2737 goto invalid; 2738 break; 2739 default: 2740 goto invalid; 2741 } 2742 2743 goto queue; 2744 case WLAN_CATEGORY_SPECTRUM_MGMT: 2745 /* verify action_code is present */ 2746 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2747 break; 2748 2749 switch (mgmt->u.action.u.measurement.action_code) { 2750 case WLAN_ACTION_SPCT_MSR_REQ: 2751 if (status->band != IEEE80211_BAND_5GHZ) 2752 break; 2753 2754 if (len < (IEEE80211_MIN_ACTION_SIZE + 2755 sizeof(mgmt->u.action.u.measurement))) 2756 break; 2757 2758 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2759 break; 2760 2761 ieee80211_process_measurement_req(sdata, mgmt, len); 2762 goto handled; 2763 case WLAN_ACTION_SPCT_CHL_SWITCH: { 2764 u8 *bssid; 2765 if (len < (IEEE80211_MIN_ACTION_SIZE + 2766 sizeof(mgmt->u.action.u.chan_switch))) 2767 break; 2768 2769 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2770 sdata->vif.type != NL80211_IFTYPE_ADHOC && 2771 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 2772 break; 2773 2774 if (sdata->vif.type == NL80211_IFTYPE_STATION) 2775 bssid = sdata->u.mgd.bssid; 2776 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 2777 bssid = sdata->u.ibss.bssid; 2778 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 2779 bssid = mgmt->sa; 2780 else 2781 break; 2782 2783 if (!ether_addr_equal(mgmt->bssid, bssid)) 2784 break; 2785 2786 goto queue; 2787 } 2788 } 2789 break; 2790 case WLAN_CATEGORY_SA_QUERY: 2791 if (len < (IEEE80211_MIN_ACTION_SIZE + 2792 sizeof(mgmt->u.action.u.sa_query))) 2793 break; 2794 2795 switch (mgmt->u.action.u.sa_query.action) { 2796 case WLAN_ACTION_SA_QUERY_REQUEST: 2797 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2798 break; 2799 ieee80211_process_sa_query_req(sdata, mgmt, len); 2800 goto handled; 2801 } 2802 break; 2803 case WLAN_CATEGORY_SELF_PROTECTED: 2804 if (len < (IEEE80211_MIN_ACTION_SIZE + 2805 sizeof(mgmt->u.action.u.self_prot.action_code))) 2806 break; 2807 2808 switch (mgmt->u.action.u.self_prot.action_code) { 2809 case WLAN_SP_MESH_PEERING_OPEN: 2810 case WLAN_SP_MESH_PEERING_CLOSE: 2811 case WLAN_SP_MESH_PEERING_CONFIRM: 2812 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2813 goto invalid; 2814 if (sdata->u.mesh.user_mpm) 2815 /* userspace handles this frame */ 2816 break; 2817 goto queue; 2818 case WLAN_SP_MGK_INFORM: 2819 case WLAN_SP_MGK_ACK: 2820 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2821 goto invalid; 2822 break; 2823 } 2824 break; 2825 case WLAN_CATEGORY_MESH_ACTION: 2826 if (len < (IEEE80211_MIN_ACTION_SIZE + 2827 sizeof(mgmt->u.action.u.mesh_action.action_code))) 2828 break; 2829 2830 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2831 break; 2832 if (mesh_action_is_path_sel(mgmt) && 2833 !mesh_path_sel_is_hwmp(sdata)) 2834 break; 2835 goto queue; 2836 } 2837 2838 return RX_CONTINUE; 2839 2840 invalid: 2841 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 2842 /* will return in the next handlers */ 2843 return RX_CONTINUE; 2844 2845 handled: 2846 if (rx->sta) 2847 rx->sta->rx_packets++; 2848 dev_kfree_skb(rx->skb); 2849 return RX_QUEUED; 2850 2851 queue: 2852 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 2853 skb_queue_tail(&sdata->skb_queue, rx->skb); 2854 ieee80211_queue_work(&local->hw, &sdata->work); 2855 if (rx->sta) 2856 rx->sta->rx_packets++; 2857 return RX_QUEUED; 2858 } 2859 2860 static ieee80211_rx_result debug_noinline 2861 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 2862 { 2863 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2864 int sig = 0; 2865 2866 /* skip known-bad action frames and return them in the next handler */ 2867 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 2868 return RX_CONTINUE; 2869 2870 /* 2871 * Getting here means the kernel doesn't know how to handle 2872 * it, but maybe userspace does ... include returned frames 2873 * so userspace can register for those to know whether ones 2874 * it transmitted were processed or returned. 2875 */ 2876 2877 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM)) 2878 sig = status->signal; 2879 2880 if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig, 2881 rx->skb->data, rx->skb->len, 0)) { 2882 if (rx->sta) 2883 rx->sta->rx_packets++; 2884 dev_kfree_skb(rx->skb); 2885 return RX_QUEUED; 2886 } 2887 2888 return RX_CONTINUE; 2889 } 2890 2891 static ieee80211_rx_result debug_noinline 2892 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 2893 { 2894 struct ieee80211_local *local = rx->local; 2895 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2896 struct sk_buff *nskb; 2897 struct ieee80211_sub_if_data *sdata = rx->sdata; 2898 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2899 2900 if (!ieee80211_is_action(mgmt->frame_control)) 2901 return RX_CONTINUE; 2902 2903 /* 2904 * For AP mode, hostapd is responsible for handling any action 2905 * frames that we didn't handle, including returning unknown 2906 * ones. For all other modes we will return them to the sender, 2907 * setting the 0x80 bit in the action category, as required by 2908 * 802.11-2012 9.24.4. 2909 * Newer versions of hostapd shall also use the management frame 2910 * registration mechanisms, but older ones still use cooked 2911 * monitor interfaces so push all frames there. 2912 */ 2913 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 2914 (sdata->vif.type == NL80211_IFTYPE_AP || 2915 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 2916 return RX_DROP_MONITOR; 2917 2918 if (is_multicast_ether_addr(mgmt->da)) 2919 return RX_DROP_MONITOR; 2920 2921 /* do not return rejected action frames */ 2922 if (mgmt->u.action.category & 0x80) 2923 return RX_DROP_UNUSABLE; 2924 2925 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 2926 GFP_ATOMIC); 2927 if (nskb) { 2928 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 2929 2930 nmgmt->u.action.category |= 0x80; 2931 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 2932 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 2933 2934 memset(nskb->cb, 0, sizeof(nskb->cb)); 2935 2936 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { 2937 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); 2938 2939 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | 2940 IEEE80211_TX_INTFL_OFFCHAN_TX_OK | 2941 IEEE80211_TX_CTL_NO_CCK_RATE; 2942 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) 2943 info->hw_queue = 2944 local->hw.offchannel_tx_hw_queue; 2945 } 2946 2947 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, 2948 status->band); 2949 } 2950 dev_kfree_skb(rx->skb); 2951 return RX_QUEUED; 2952 } 2953 2954 static ieee80211_rx_result debug_noinline 2955 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 2956 { 2957 struct ieee80211_sub_if_data *sdata = rx->sdata; 2958 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 2959 __le16 stype; 2960 2961 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 2962 2963 if (!ieee80211_vif_is_mesh(&sdata->vif) && 2964 sdata->vif.type != NL80211_IFTYPE_ADHOC && 2965 sdata->vif.type != NL80211_IFTYPE_OCB && 2966 sdata->vif.type != NL80211_IFTYPE_STATION) 2967 return RX_DROP_MONITOR; 2968 2969 switch (stype) { 2970 case cpu_to_le16(IEEE80211_STYPE_AUTH): 2971 case cpu_to_le16(IEEE80211_STYPE_BEACON): 2972 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 2973 /* process for all: mesh, mlme, ibss */ 2974 break; 2975 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): 2976 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): 2977 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 2978 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 2979 if (is_multicast_ether_addr(mgmt->da) && 2980 !is_broadcast_ether_addr(mgmt->da)) 2981 return RX_DROP_MONITOR; 2982 2983 /* process only for station */ 2984 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2985 return RX_DROP_MONITOR; 2986 break; 2987 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 2988 /* process only for ibss and mesh */ 2989 if (sdata->vif.type != NL80211_IFTYPE_ADHOC && 2990 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 2991 return RX_DROP_MONITOR; 2992 break; 2993 default: 2994 return RX_DROP_MONITOR; 2995 } 2996 2997 /* queue up frame and kick off work to process it */ 2998 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 2999 skb_queue_tail(&sdata->skb_queue, rx->skb); 3000 ieee80211_queue_work(&rx->local->hw, &sdata->work); 3001 if (rx->sta) 3002 rx->sta->rx_packets++; 3003 3004 return RX_QUEUED; 3005 } 3006 3007 /* TODO: use IEEE80211_RX_FRAGMENTED */ 3008 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 3009 struct ieee80211_rate *rate) 3010 { 3011 struct ieee80211_sub_if_data *sdata; 3012 struct ieee80211_local *local = rx->local; 3013 struct sk_buff *skb = rx->skb, *skb2; 3014 struct net_device *prev_dev = NULL; 3015 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3016 int needed_headroom; 3017 3018 /* 3019 * If cooked monitor has been processed already, then 3020 * don't do it again. If not, set the flag. 3021 */ 3022 if (rx->flags & IEEE80211_RX_CMNTR) 3023 goto out_free_skb; 3024 rx->flags |= IEEE80211_RX_CMNTR; 3025 3026 /* If there are no cooked monitor interfaces, just free the SKB */ 3027 if (!local->cooked_mntrs) 3028 goto out_free_skb; 3029 3030 /* vendor data is long removed here */ 3031 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA; 3032 /* room for the radiotap header based on driver features */ 3033 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb); 3034 3035 if (skb_headroom(skb) < needed_headroom && 3036 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) 3037 goto out_free_skb; 3038 3039 /* prepend radiotap information */ 3040 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, 3041 false); 3042 3043 skb_set_mac_header(skb, 0); 3044 skb->ip_summed = CHECKSUM_UNNECESSARY; 3045 skb->pkt_type = PACKET_OTHERHOST; 3046 skb->protocol = htons(ETH_P_802_2); 3047 3048 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3049 if (!ieee80211_sdata_running(sdata)) 3050 continue; 3051 3052 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 3053 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) 3054 continue; 3055 3056 if (prev_dev) { 3057 skb2 = skb_clone(skb, GFP_ATOMIC); 3058 if (skb2) { 3059 skb2->dev = prev_dev; 3060 netif_receive_skb(skb2); 3061 } 3062 } 3063 3064 prev_dev = sdata->dev; 3065 ieee80211_rx_stats(sdata->dev, skb->len); 3066 } 3067 3068 if (prev_dev) { 3069 skb->dev = prev_dev; 3070 netif_receive_skb(skb); 3071 return; 3072 } 3073 3074 out_free_skb: 3075 dev_kfree_skb(skb); 3076 } 3077 3078 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 3079 ieee80211_rx_result res) 3080 { 3081 switch (res) { 3082 case RX_DROP_MONITOR: 3083 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3084 if (rx->sta) 3085 rx->sta->rx_dropped++; 3086 /* fall through */ 3087 case RX_CONTINUE: { 3088 struct ieee80211_rate *rate = NULL; 3089 struct ieee80211_supported_band *sband; 3090 struct ieee80211_rx_status *status; 3091 3092 status = IEEE80211_SKB_RXCB((rx->skb)); 3093 3094 sband = rx->local->hw.wiphy->bands[status->band]; 3095 if (!(status->flag & RX_FLAG_HT) && 3096 !(status->flag & RX_FLAG_VHT)) 3097 rate = &sband->bitrates[status->rate_idx]; 3098 3099 ieee80211_rx_cooked_monitor(rx, rate); 3100 break; 3101 } 3102 case RX_DROP_UNUSABLE: 3103 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3104 if (rx->sta) 3105 rx->sta->rx_dropped++; 3106 dev_kfree_skb(rx->skb); 3107 break; 3108 case RX_QUEUED: 3109 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 3110 break; 3111 } 3112 } 3113 3114 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, 3115 struct sk_buff_head *frames) 3116 { 3117 ieee80211_rx_result res = RX_DROP_MONITOR; 3118 struct sk_buff *skb; 3119 3120 #define CALL_RXH(rxh) \ 3121 do { \ 3122 res = rxh(rx); \ 3123 if (res != RX_CONTINUE) \ 3124 goto rxh_next; \ 3125 } while (0); 3126 3127 /* Lock here to avoid hitting all of the data used in the RX 3128 * path (e.g. key data, station data, ...) concurrently when 3129 * a frame is released from the reorder buffer due to timeout 3130 * from the timer, potentially concurrently with RX from the 3131 * driver. 3132 */ 3133 spin_lock_bh(&rx->local->rx_path_lock); 3134 3135 while ((skb = __skb_dequeue(frames))) { 3136 /* 3137 * all the other fields are valid across frames 3138 * that belong to an aMPDU since they are on the 3139 * same TID from the same station 3140 */ 3141 rx->skb = skb; 3142 3143 CALL_RXH(ieee80211_rx_h_check_more_data) 3144 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll) 3145 CALL_RXH(ieee80211_rx_h_sta_process) 3146 CALL_RXH(ieee80211_rx_h_decrypt) 3147 CALL_RXH(ieee80211_rx_h_defragment) 3148 CALL_RXH(ieee80211_rx_h_michael_mic_verify) 3149 /* must be after MMIC verify so header is counted in MPDU mic */ 3150 #ifdef CONFIG_MAC80211_MESH 3151 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 3152 CALL_RXH(ieee80211_rx_h_mesh_fwding); 3153 #endif 3154 CALL_RXH(ieee80211_rx_h_amsdu) 3155 CALL_RXH(ieee80211_rx_h_data) 3156 3157 /* special treatment -- needs the queue */ 3158 res = ieee80211_rx_h_ctrl(rx, frames); 3159 if (res != RX_CONTINUE) 3160 goto rxh_next; 3161 3162 CALL_RXH(ieee80211_rx_h_mgmt_check) 3163 CALL_RXH(ieee80211_rx_h_action) 3164 CALL_RXH(ieee80211_rx_h_userspace_mgmt) 3165 CALL_RXH(ieee80211_rx_h_action_return) 3166 CALL_RXH(ieee80211_rx_h_mgmt) 3167 3168 rxh_next: 3169 ieee80211_rx_handlers_result(rx, res); 3170 3171 #undef CALL_RXH 3172 } 3173 3174 spin_unlock_bh(&rx->local->rx_path_lock); 3175 } 3176 3177 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 3178 { 3179 struct sk_buff_head reorder_release; 3180 ieee80211_rx_result res = RX_DROP_MONITOR; 3181 3182 __skb_queue_head_init(&reorder_release); 3183 3184 #define CALL_RXH(rxh) \ 3185 do { \ 3186 res = rxh(rx); \ 3187 if (res != RX_CONTINUE) \ 3188 goto rxh_next; \ 3189 } while (0); 3190 3191 CALL_RXH(ieee80211_rx_h_check_dup) 3192 CALL_RXH(ieee80211_rx_h_check) 3193 3194 ieee80211_rx_reorder_ampdu(rx, &reorder_release); 3195 3196 ieee80211_rx_handlers(rx, &reorder_release); 3197 return; 3198 3199 rxh_next: 3200 ieee80211_rx_handlers_result(rx, res); 3201 3202 #undef CALL_RXH 3203 } 3204 3205 /* 3206 * This function makes calls into the RX path, therefore 3207 * it has to be invoked under RCU read lock. 3208 */ 3209 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 3210 { 3211 struct sk_buff_head frames; 3212 struct ieee80211_rx_data rx = { 3213 .sta = sta, 3214 .sdata = sta->sdata, 3215 .local = sta->local, 3216 /* This is OK -- must be QoS data frame */ 3217 .security_idx = tid, 3218 .seqno_idx = tid, 3219 .flags = IEEE80211_RX_REORDER_TIMER, 3220 }; 3221 struct tid_ampdu_rx *tid_agg_rx; 3222 3223 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3224 if (!tid_agg_rx) 3225 return; 3226 3227 __skb_queue_head_init(&frames); 3228 3229 spin_lock(&tid_agg_rx->reorder_lock); 3230 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 3231 spin_unlock(&tid_agg_rx->reorder_lock); 3232 3233 if (!skb_queue_empty(&frames)) { 3234 struct ieee80211_event event = { 3235 .type = BA_FRAME_TIMEOUT, 3236 .u.ba.tid = tid, 3237 .u.ba.sta = &sta->sta, 3238 }; 3239 drv_event_callback(rx.local, rx.sdata, &event); 3240 } 3241 3242 ieee80211_rx_handlers(&rx, &frames); 3243 } 3244 3245 /* main receive path */ 3246 3247 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) 3248 { 3249 struct ieee80211_sub_if_data *sdata = rx->sdata; 3250 struct sk_buff *skb = rx->skb; 3251 struct ieee80211_hdr *hdr = (void *)skb->data; 3252 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3253 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 3254 int multicast = is_multicast_ether_addr(hdr->addr1); 3255 3256 switch (sdata->vif.type) { 3257 case NL80211_IFTYPE_STATION: 3258 if (!bssid && !sdata->u.mgd.use_4addr) 3259 return false; 3260 if (multicast) 3261 return true; 3262 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3263 case NL80211_IFTYPE_ADHOC: 3264 if (!bssid) 3265 return false; 3266 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || 3267 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) 3268 return false; 3269 if (ieee80211_is_beacon(hdr->frame_control)) 3270 return true; 3271 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) 3272 return false; 3273 if (!multicast && 3274 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 3275 return false; 3276 if (!rx->sta) { 3277 int rate_idx; 3278 if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) 3279 rate_idx = 0; /* TODO: HT/VHT rates */ 3280 else 3281 rate_idx = status->rate_idx; 3282 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 3283 BIT(rate_idx)); 3284 } 3285 return true; 3286 case NL80211_IFTYPE_OCB: 3287 if (!bssid) 3288 return false; 3289 if (ieee80211_is_beacon(hdr->frame_control)) 3290 return false; 3291 if (!is_broadcast_ether_addr(bssid)) 3292 return false; 3293 if (!multicast && 3294 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) 3295 return false; 3296 if (!rx->sta) { 3297 int rate_idx; 3298 if (status->flag & RX_FLAG_HT) 3299 rate_idx = 0; /* TODO: HT rates */ 3300 else 3301 rate_idx = status->rate_idx; 3302 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, 3303 BIT(rate_idx)); 3304 } 3305 return true; 3306 case NL80211_IFTYPE_MESH_POINT: 3307 if (multicast) 3308 return true; 3309 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3310 case NL80211_IFTYPE_AP_VLAN: 3311 case NL80211_IFTYPE_AP: 3312 if (!bssid) 3313 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3314 3315 if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { 3316 /* 3317 * Accept public action frames even when the 3318 * BSSID doesn't match, this is used for P2P 3319 * and location updates. Note that mac80211 3320 * itself never looks at these frames. 3321 */ 3322 if (!multicast && 3323 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 3324 return false; 3325 if (ieee80211_is_public_action(hdr, skb->len)) 3326 return true; 3327 return ieee80211_is_beacon(hdr->frame_control); 3328 } 3329 3330 if (!ieee80211_has_tods(hdr->frame_control)) { 3331 /* ignore data frames to TDLS-peers */ 3332 if (ieee80211_is_data(hdr->frame_control)) 3333 return false; 3334 /* ignore action frames to TDLS-peers */ 3335 if (ieee80211_is_action(hdr->frame_control) && 3336 !ether_addr_equal(bssid, hdr->addr1)) 3337 return false; 3338 } 3339 return true; 3340 case NL80211_IFTYPE_WDS: 3341 if (bssid || !ieee80211_is_data(hdr->frame_control)) 3342 return false; 3343 return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2); 3344 case NL80211_IFTYPE_P2P_DEVICE: 3345 return ieee80211_is_public_action(hdr, skb->len) || 3346 ieee80211_is_probe_req(hdr->frame_control) || 3347 ieee80211_is_probe_resp(hdr->frame_control) || 3348 ieee80211_is_beacon(hdr->frame_control); 3349 default: 3350 break; 3351 } 3352 3353 WARN_ON_ONCE(1); 3354 return false; 3355 } 3356 3357 /* 3358 * This function returns whether or not the SKB 3359 * was destined for RX processing or not, which, 3360 * if consume is true, is equivalent to whether 3361 * or not the skb was consumed. 3362 */ 3363 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 3364 struct sk_buff *skb, bool consume) 3365 { 3366 struct ieee80211_local *local = rx->local; 3367 struct ieee80211_sub_if_data *sdata = rx->sdata; 3368 3369 rx->skb = skb; 3370 3371 if (!ieee80211_accept_frame(rx)) 3372 return false; 3373 3374 if (!consume) { 3375 skb = skb_copy(skb, GFP_ATOMIC); 3376 if (!skb) { 3377 if (net_ratelimit()) 3378 wiphy_debug(local->hw.wiphy, 3379 "failed to copy skb for %s\n", 3380 sdata->name); 3381 return true; 3382 } 3383 3384 rx->skb = skb; 3385 } 3386 3387 ieee80211_invoke_rx_handlers(rx); 3388 return true; 3389 } 3390 3391 /* 3392 * This is the actual Rx frames handler. as it belongs to Rx path it must 3393 * be called with rcu_read_lock protection. 3394 */ 3395 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 3396 struct sk_buff *skb) 3397 { 3398 struct ieee80211_local *local = hw_to_local(hw); 3399 struct ieee80211_sub_if_data *sdata; 3400 struct ieee80211_hdr *hdr; 3401 __le16 fc; 3402 struct ieee80211_rx_data rx; 3403 struct ieee80211_sub_if_data *prev; 3404 struct sta_info *sta, *prev_sta; 3405 struct rhash_head *tmp; 3406 int err = 0; 3407 3408 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 3409 memset(&rx, 0, sizeof(rx)); 3410 rx.skb = skb; 3411 rx.local = local; 3412 3413 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 3414 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 3415 3416 if (ieee80211_is_mgmt(fc)) { 3417 /* drop frame if too short for header */ 3418 if (skb->len < ieee80211_hdrlen(fc)) 3419 err = -ENOBUFS; 3420 else 3421 err = skb_linearize(skb); 3422 } else { 3423 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 3424 } 3425 3426 if (err) { 3427 dev_kfree_skb(skb); 3428 return; 3429 } 3430 3431 hdr = (struct ieee80211_hdr *)skb->data; 3432 ieee80211_parse_qos(&rx); 3433 ieee80211_verify_alignment(&rx); 3434 3435 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || 3436 ieee80211_is_beacon(hdr->frame_control))) 3437 ieee80211_scan_rx(local, skb); 3438 3439 if (ieee80211_is_data(fc)) { 3440 const struct bucket_table *tbl; 3441 3442 prev_sta = NULL; 3443 3444 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); 3445 3446 for_each_sta_info(local, tbl, hdr->addr2, sta, tmp) { 3447 if (!prev_sta) { 3448 prev_sta = sta; 3449 continue; 3450 } 3451 3452 rx.sta = prev_sta; 3453 rx.sdata = prev_sta->sdata; 3454 ieee80211_prepare_and_rx_handle(&rx, skb, false); 3455 3456 prev_sta = sta; 3457 } 3458 3459 if (prev_sta) { 3460 rx.sta = prev_sta; 3461 rx.sdata = prev_sta->sdata; 3462 3463 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 3464 return; 3465 goto out; 3466 } 3467 } 3468 3469 prev = NULL; 3470 3471 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3472 if (!ieee80211_sdata_running(sdata)) 3473 continue; 3474 3475 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 3476 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 3477 continue; 3478 3479 /* 3480 * frame is destined for this interface, but if it's 3481 * not also for the previous one we handle that after 3482 * the loop to avoid copying the SKB once too much 3483 */ 3484 3485 if (!prev) { 3486 prev = sdata; 3487 continue; 3488 } 3489 3490 rx.sta = sta_info_get_bss(prev, hdr->addr2); 3491 rx.sdata = prev; 3492 ieee80211_prepare_and_rx_handle(&rx, skb, false); 3493 3494 prev = sdata; 3495 } 3496 3497 if (prev) { 3498 rx.sta = sta_info_get_bss(prev, hdr->addr2); 3499 rx.sdata = prev; 3500 3501 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 3502 return; 3503 } 3504 3505 out: 3506 dev_kfree_skb(skb); 3507 } 3508 3509 /* 3510 * This is the receive path handler. It is called by a low level driver when an 3511 * 802.11 MPDU is received from the hardware. 3512 */ 3513 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) 3514 { 3515 struct ieee80211_local *local = hw_to_local(hw); 3516 struct ieee80211_rate *rate = NULL; 3517 struct ieee80211_supported_band *sband; 3518 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3519 3520 WARN_ON_ONCE(softirq_count() == 0); 3521 3522 if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) 3523 goto drop; 3524 3525 sband = local->hw.wiphy->bands[status->band]; 3526 if (WARN_ON(!sband)) 3527 goto drop; 3528 3529 /* 3530 * If we're suspending, it is possible although not too likely 3531 * that we'd be receiving frames after having already partially 3532 * quiesced the stack. We can't process such frames then since 3533 * that might, for example, cause stations to be added or other 3534 * driver callbacks be invoked. 3535 */ 3536 if (unlikely(local->quiescing || local->suspended)) 3537 goto drop; 3538 3539 /* We might be during a HW reconfig, prevent Rx for the same reason */ 3540 if (unlikely(local->in_reconfig)) 3541 goto drop; 3542 3543 /* 3544 * The same happens when we're not even started, 3545 * but that's worth a warning. 3546 */ 3547 if (WARN_ON(!local->started)) 3548 goto drop; 3549 3550 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { 3551 /* 3552 * Validate the rate, unless a PLCP error means that 3553 * we probably can't have a valid rate here anyway. 3554 */ 3555 3556 if (status->flag & RX_FLAG_HT) { 3557 /* 3558 * rate_idx is MCS index, which can be [0-76] 3559 * as documented on: 3560 * 3561 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n 3562 * 3563 * Anything else would be some sort of driver or 3564 * hardware error. The driver should catch hardware 3565 * errors. 3566 */ 3567 if (WARN(status->rate_idx > 76, 3568 "Rate marked as an HT rate but passed " 3569 "status->rate_idx is not " 3570 "an MCS index [0-76]: %d (0x%02x)\n", 3571 status->rate_idx, 3572 status->rate_idx)) 3573 goto drop; 3574 } else if (status->flag & RX_FLAG_VHT) { 3575 if (WARN_ONCE(status->rate_idx > 9 || 3576 !status->vht_nss || 3577 status->vht_nss > 8, 3578 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", 3579 status->rate_idx, status->vht_nss)) 3580 goto drop; 3581 } else { 3582 if (WARN_ON(status->rate_idx >= sband->n_bitrates)) 3583 goto drop; 3584 rate = &sband->bitrates[status->rate_idx]; 3585 } 3586 } 3587 3588 status->rx_flags = 0; 3589 3590 /* 3591 * key references and virtual interfaces are protected using RCU 3592 * and this requires that we are in a read-side RCU section during 3593 * receive processing 3594 */ 3595 rcu_read_lock(); 3596 3597 /* 3598 * Frames with failed FCS/PLCP checksum are not returned, 3599 * all other frames are returned without radiotap header 3600 * if it was previously present. 3601 * Also, frames with less than 16 bytes are dropped. 3602 */ 3603 skb = ieee80211_rx_monitor(local, skb, rate); 3604 if (!skb) { 3605 rcu_read_unlock(); 3606 return; 3607 } 3608 3609 ieee80211_tpt_led_trig_rx(local, 3610 ((struct ieee80211_hdr *)skb->data)->frame_control, 3611 skb->len); 3612 __ieee80211_rx_handle_packet(hw, skb); 3613 3614 rcu_read_unlock(); 3615 3616 return; 3617 drop: 3618 kfree_skb(skb); 3619 } 3620 EXPORT_SYMBOL(ieee80211_rx); 3621 3622 /* This is a version of the rx handler that can be called from hard irq 3623 * context. Post the skb on the queue and schedule the tasklet */ 3624 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 3625 { 3626 struct ieee80211_local *local = hw_to_local(hw); 3627 3628 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 3629 3630 skb->pkt_type = IEEE80211_RX_MSG; 3631 skb_queue_tail(&local->skb_queue, skb); 3632 tasklet_schedule(&local->tasklet); 3633 } 3634 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 3635