1 /* 2 * Copyright 2002-2005, Instant802 Networks, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/jiffies.h> 13 #include <linux/slab.h> 14 #include <linux/kernel.h> 15 #include <linux/skbuff.h> 16 #include <linux/netdevice.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rcupdate.h> 19 #include <linux/export.h> 20 #include <net/mac80211.h> 21 #include <net/ieee80211_radiotap.h> 22 #include <asm/unaligned.h> 23 24 #include "ieee80211_i.h" 25 #include "driver-ops.h" 26 #include "led.h" 27 #include "mesh.h" 28 #include "wep.h" 29 #include "wpa.h" 30 #include "tkip.h" 31 #include "wme.h" 32 #include "rate.h" 33 34 /* 35 * monitor mode reception 36 * 37 * This function cleans up the SKB, i.e. it removes all the stuff 38 * only useful for monitoring. 39 */ 40 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, 41 struct sk_buff *skb) 42 { 43 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { 44 if (likely(skb->len > FCS_LEN)) 45 __pskb_trim(skb, skb->len - FCS_LEN); 46 else { 47 /* driver bug */ 48 WARN_ON(1); 49 dev_kfree_skb(skb); 50 skb = NULL; 51 } 52 } 53 54 return skb; 55 } 56 57 static inline int should_drop_frame(struct sk_buff *skb, 58 int present_fcs_len) 59 { 60 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 61 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 62 63 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 64 RX_FLAG_FAILED_PLCP_CRC | 65 RX_FLAG_AMPDU_IS_ZEROLEN)) 66 return 1; 67 if (unlikely(skb->len < 16 + present_fcs_len)) 68 return 1; 69 if (ieee80211_is_ctl(hdr->frame_control) && 70 !ieee80211_is_pspoll(hdr->frame_control) && 71 !ieee80211_is_back_req(hdr->frame_control)) 72 return 1; 73 return 0; 74 } 75 76 static int 77 ieee80211_rx_radiotap_len(struct ieee80211_local *local, 78 struct ieee80211_rx_status *status) 79 { 80 int len; 81 82 /* always present fields */ 83 len = sizeof(struct ieee80211_radiotap_header) + 9; 84 85 if (status->flag & RX_FLAG_MACTIME_MPDU) 86 len += 8; 87 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 88 len += 1; 89 90 if (len & 1) /* padding for RX_FLAGS if necessary */ 91 len++; 92 93 if (status->flag & RX_FLAG_HT) /* HT info */ 94 len += 3; 95 96 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 97 /* padding */ 98 while (len & 3) 99 len++; 100 len += 8; 101 } 102 103 return len; 104 } 105 106 /* 107 * ieee80211_add_rx_radiotap_header - add radiotap header 108 * 109 * add a radiotap header containing all the fields which the hardware provided. 110 */ 111 static void 112 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 113 struct sk_buff *skb, 114 struct ieee80211_rate *rate, 115 int rtap_len, bool has_fcs) 116 { 117 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 118 struct ieee80211_radiotap_header *rthdr; 119 unsigned char *pos; 120 u16 rx_flags = 0; 121 122 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); 123 memset(rthdr, 0, rtap_len); 124 125 /* radiotap header, set always present flags */ 126 rthdr->it_present = 127 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | 128 (1 << IEEE80211_RADIOTAP_CHANNEL) | 129 (1 << IEEE80211_RADIOTAP_ANTENNA) | 130 (1 << IEEE80211_RADIOTAP_RX_FLAGS)); 131 rthdr->it_len = cpu_to_le16(rtap_len); 132 133 pos = (unsigned char *)(rthdr+1); 134 135 /* the order of the following fields is important */ 136 137 /* IEEE80211_RADIOTAP_TSFT */ 138 if (status->flag & RX_FLAG_MACTIME_MPDU) { 139 put_unaligned_le64(status->mactime, pos); 140 rthdr->it_present |= 141 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 142 pos += 8; 143 } 144 145 /* IEEE80211_RADIOTAP_FLAGS */ 146 if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)) 147 *pos |= IEEE80211_RADIOTAP_F_FCS; 148 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 149 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 150 if (status->flag & RX_FLAG_SHORTPRE) 151 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 152 pos++; 153 154 /* IEEE80211_RADIOTAP_RATE */ 155 if (!rate || status->flag & RX_FLAG_HT) { 156 /* 157 * Without rate information don't add it. If we have, 158 * MCS information is a separate field in radiotap, 159 * added below. The byte here is needed as padding 160 * for the channel though, so initialise it to 0. 161 */ 162 *pos = 0; 163 } else { 164 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 165 *pos = rate->bitrate / 5; 166 } 167 pos++; 168 169 /* IEEE80211_RADIOTAP_CHANNEL */ 170 put_unaligned_le16(status->freq, pos); 171 pos += 2; 172 if (status->band == IEEE80211_BAND_5GHZ) 173 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ, 174 pos); 175 else if (status->flag & RX_FLAG_HT) 176 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ, 177 pos); 178 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 179 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ, 180 pos); 181 else if (rate) 182 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ, 183 pos); 184 else 185 put_unaligned_le16(IEEE80211_CHAN_2GHZ, pos); 186 pos += 2; 187 188 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 189 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM && 190 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 191 *pos = status->signal; 192 rthdr->it_present |= 193 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 194 pos++; 195 } 196 197 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 198 199 /* IEEE80211_RADIOTAP_ANTENNA */ 200 *pos = status->antenna; 201 pos++; 202 203 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 204 205 /* IEEE80211_RADIOTAP_RX_FLAGS */ 206 /* ensure 2 byte alignment for the 2 byte field as required */ 207 if ((pos - (u8 *)rthdr) & 1) 208 pos++; 209 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 210 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 211 put_unaligned_le16(rx_flags, pos); 212 pos += 2; 213 214 if (status->flag & RX_FLAG_HT) { 215 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 216 *pos++ = local->hw.radiotap_mcs_details; 217 *pos = 0; 218 if (status->flag & RX_FLAG_SHORT_GI) 219 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 220 if (status->flag & RX_FLAG_40MHZ) 221 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 222 if (status->flag & RX_FLAG_HT_GF) 223 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; 224 pos++; 225 *pos++ = status->rate_idx; 226 } 227 228 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 229 u16 flags = 0; 230 231 /* ensure 4 byte alignment */ 232 while ((pos - (u8 *)rthdr) & 3) 233 pos++; 234 rthdr->it_present |= 235 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); 236 put_unaligned_le32(status->ampdu_reference, pos); 237 pos += 4; 238 if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN) 239 flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN; 240 if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN) 241 flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN; 242 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) 243 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; 244 if (status->flag & RX_FLAG_AMPDU_IS_LAST) 245 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; 246 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) 247 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; 248 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 249 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN; 250 put_unaligned_le16(flags, pos); 251 pos += 2; 252 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 253 *pos++ = status->ampdu_delimiter_crc; 254 else 255 *pos++ = 0; 256 *pos++ = 0; 257 } 258 } 259 260 /* 261 * This function copies a received frame to all monitor interfaces and 262 * returns a cleaned-up SKB that no longer includes the FCS nor the 263 * radiotap header the driver might have added. 264 */ 265 static struct sk_buff * 266 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 267 struct ieee80211_rate *rate) 268 { 269 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 270 struct ieee80211_sub_if_data *sdata; 271 int needed_headroom; 272 struct sk_buff *skb, *skb2; 273 struct net_device *prev_dev = NULL; 274 int present_fcs_len = 0; 275 276 /* 277 * First, we may need to make a copy of the skb because 278 * (1) we need to modify it for radiotap (if not present), and 279 * (2) the other RX handlers will modify the skb we got. 280 * 281 * We don't need to, of course, if we aren't going to return 282 * the SKB because it has a bad FCS/PLCP checksum. 283 */ 284 285 /* room for the radiotap header based on driver features */ 286 needed_headroom = ieee80211_rx_radiotap_len(local, status); 287 288 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 289 present_fcs_len = FCS_LEN; 290 291 /* make sure hdr->frame_control is on the linear part */ 292 if (!pskb_may_pull(origskb, 2)) { 293 dev_kfree_skb(origskb); 294 return NULL; 295 } 296 297 if (!local->monitors) { 298 if (should_drop_frame(origskb, present_fcs_len)) { 299 dev_kfree_skb(origskb); 300 return NULL; 301 } 302 303 return remove_monitor_info(local, origskb); 304 } 305 306 if (should_drop_frame(origskb, present_fcs_len)) { 307 /* only need to expand headroom if necessary */ 308 skb = origskb; 309 origskb = NULL; 310 311 /* 312 * This shouldn't trigger often because most devices have an 313 * RX header they pull before we get here, and that should 314 * be big enough for our radiotap information. We should 315 * probably export the length to drivers so that we can have 316 * them allocate enough headroom to start with. 317 */ 318 if (skb_headroom(skb) < needed_headroom && 319 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 320 dev_kfree_skb(skb); 321 return NULL; 322 } 323 } else { 324 /* 325 * Need to make a copy and possibly remove radiotap header 326 * and FCS from the original. 327 */ 328 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); 329 330 origskb = remove_monitor_info(local, origskb); 331 332 if (!skb) 333 return origskb; 334 } 335 336 /* prepend radiotap information */ 337 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, 338 true); 339 340 skb_reset_mac_header(skb); 341 skb->ip_summed = CHECKSUM_UNNECESSARY; 342 skb->pkt_type = PACKET_OTHERHOST; 343 skb->protocol = htons(ETH_P_802_2); 344 345 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 346 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 347 continue; 348 349 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) 350 continue; 351 352 if (!ieee80211_sdata_running(sdata)) 353 continue; 354 355 if (prev_dev) { 356 skb2 = skb_clone(skb, GFP_ATOMIC); 357 if (skb2) { 358 skb2->dev = prev_dev; 359 netif_receive_skb(skb2); 360 } 361 } 362 363 prev_dev = sdata->dev; 364 sdata->dev->stats.rx_packets++; 365 sdata->dev->stats.rx_bytes += skb->len; 366 } 367 368 if (prev_dev) { 369 skb->dev = prev_dev; 370 netif_receive_skb(skb); 371 } else 372 dev_kfree_skb(skb); 373 374 return origskb; 375 } 376 377 378 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 379 { 380 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 381 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 382 int tid, seqno_idx, security_idx; 383 384 /* does the frame have a qos control field? */ 385 if (ieee80211_is_data_qos(hdr->frame_control)) { 386 u8 *qc = ieee80211_get_qos_ctl(hdr); 387 /* frame has qos control */ 388 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 389 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 390 status->rx_flags |= IEEE80211_RX_AMSDU; 391 392 seqno_idx = tid; 393 security_idx = tid; 394 } else { 395 /* 396 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 397 * 398 * Sequence numbers for management frames, QoS data 399 * frames with a broadcast/multicast address in the 400 * Address 1 field, and all non-QoS data frames sent 401 * by QoS STAs are assigned using an additional single 402 * modulo-4096 counter, [...] 403 * 404 * We also use that counter for non-QoS STAs. 405 */ 406 seqno_idx = NUM_RX_DATA_QUEUES; 407 security_idx = 0; 408 if (ieee80211_is_mgmt(hdr->frame_control)) 409 security_idx = NUM_RX_DATA_QUEUES; 410 tid = 0; 411 } 412 413 rx->seqno_idx = seqno_idx; 414 rx->security_idx = security_idx; 415 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 416 * For now, set skb->priority to 0 for other cases. */ 417 rx->skb->priority = (tid > 7) ? 0 : tid; 418 } 419 420 /** 421 * DOC: Packet alignment 422 * 423 * Drivers always need to pass packets that are aligned to two-byte boundaries 424 * to the stack. 425 * 426 * Additionally, should, if possible, align the payload data in a way that 427 * guarantees that the contained IP header is aligned to a four-byte 428 * boundary. In the case of regular frames, this simply means aligning the 429 * payload to a four-byte boundary (because either the IP header is directly 430 * contained, or IV/RFC1042 headers that have a length divisible by four are 431 * in front of it). If the payload data is not properly aligned and the 432 * architecture doesn't support efficient unaligned operations, mac80211 433 * will align the data. 434 * 435 * With A-MSDU frames, however, the payload data address must yield two modulo 436 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 437 * push the IP header further back to a multiple of four again. Thankfully, the 438 * specs were sane enough this time around to require padding each A-MSDU 439 * subframe to a length that is a multiple of four. 440 * 441 * Padding like Atheros hardware adds which is between the 802.11 header and 442 * the payload is not supported, the driver is required to move the 802.11 443 * header to be directly in front of the payload in that case. 444 */ 445 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 446 { 447 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 448 WARN_ONCE((unsigned long)rx->skb->data & 1, 449 "unaligned packet at 0x%p\n", rx->skb->data); 450 #endif 451 } 452 453 454 /* rx handlers */ 455 456 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 457 { 458 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 459 460 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1)) 461 return 0; 462 463 return ieee80211_is_robust_mgmt_frame(hdr); 464 } 465 466 467 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 468 { 469 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 470 471 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1)) 472 return 0; 473 474 return ieee80211_is_robust_mgmt_frame(hdr); 475 } 476 477 478 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 479 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 480 { 481 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 482 struct ieee80211_mmie *mmie; 483 484 if (skb->len < 24 + sizeof(*mmie) || 485 !is_multicast_ether_addr(hdr->da)) 486 return -1; 487 488 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr)) 489 return -1; /* not a robust management frame */ 490 491 mmie = (struct ieee80211_mmie *) 492 (skb->data + skb->len - sizeof(*mmie)); 493 if (mmie->element_id != WLAN_EID_MMIE || 494 mmie->length != sizeof(*mmie) - 2) 495 return -1; 496 497 return le16_to_cpu(mmie->key_id); 498 } 499 500 501 static ieee80211_rx_result 502 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 503 { 504 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 505 char *dev_addr = rx->sdata->vif.addr; 506 507 if (ieee80211_is_data(hdr->frame_control)) { 508 if (is_multicast_ether_addr(hdr->addr1)) { 509 if (ieee80211_has_tods(hdr->frame_control) || 510 !ieee80211_has_fromds(hdr->frame_control)) 511 return RX_DROP_MONITOR; 512 if (ether_addr_equal(hdr->addr3, dev_addr)) 513 return RX_DROP_MONITOR; 514 } else { 515 if (!ieee80211_has_a4(hdr->frame_control)) 516 return RX_DROP_MONITOR; 517 if (ether_addr_equal(hdr->addr4, dev_addr)) 518 return RX_DROP_MONITOR; 519 } 520 } 521 522 /* If there is not an established peer link and this is not a peer link 523 * establisment frame, beacon or probe, drop the frame. 524 */ 525 526 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { 527 struct ieee80211_mgmt *mgmt; 528 529 if (!ieee80211_is_mgmt(hdr->frame_control)) 530 return RX_DROP_MONITOR; 531 532 if (ieee80211_is_action(hdr->frame_control)) { 533 u8 category; 534 535 /* make sure category field is present */ 536 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 537 return RX_DROP_MONITOR; 538 539 mgmt = (struct ieee80211_mgmt *)hdr; 540 category = mgmt->u.action.category; 541 if (category != WLAN_CATEGORY_MESH_ACTION && 542 category != WLAN_CATEGORY_SELF_PROTECTED) 543 return RX_DROP_MONITOR; 544 return RX_CONTINUE; 545 } 546 547 if (ieee80211_is_probe_req(hdr->frame_control) || 548 ieee80211_is_probe_resp(hdr->frame_control) || 549 ieee80211_is_beacon(hdr->frame_control) || 550 ieee80211_is_auth(hdr->frame_control)) 551 return RX_CONTINUE; 552 553 return RX_DROP_MONITOR; 554 555 } 556 557 return RX_CONTINUE; 558 } 559 560 #define SEQ_MODULO 0x1000 561 #define SEQ_MASK 0xfff 562 563 static inline int seq_less(u16 sq1, u16 sq2) 564 { 565 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1); 566 } 567 568 static inline u16 seq_inc(u16 sq) 569 { 570 return (sq + 1) & SEQ_MASK; 571 } 572 573 static inline u16 seq_sub(u16 sq1, u16 sq2) 574 { 575 return (sq1 - sq2) & SEQ_MASK; 576 } 577 578 579 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 580 struct tid_ampdu_rx *tid_agg_rx, 581 int index) 582 { 583 struct ieee80211_local *local = sdata->local; 584 struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; 585 struct ieee80211_rx_status *status; 586 587 lockdep_assert_held(&tid_agg_rx->reorder_lock); 588 589 if (!skb) 590 goto no_frame; 591 592 /* release the frame from the reorder ring buffer */ 593 tid_agg_rx->stored_mpdu_num--; 594 tid_agg_rx->reorder_buf[index] = NULL; 595 status = IEEE80211_SKB_RXCB(skb); 596 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 597 skb_queue_tail(&local->rx_skb_queue, skb); 598 599 no_frame: 600 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 601 } 602 603 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 604 struct tid_ampdu_rx *tid_agg_rx, 605 u16 head_seq_num) 606 { 607 int index; 608 609 lockdep_assert_held(&tid_agg_rx->reorder_lock); 610 611 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { 612 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 613 tid_agg_rx->buf_size; 614 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index); 615 } 616 } 617 618 /* 619 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 620 * the skb was added to the buffer longer than this time ago, the earlier 621 * frames that have not yet been received are assumed to be lost and the skb 622 * can be released for processing. This may also release other skb's from the 623 * reorder buffer if there are no additional gaps between the frames. 624 * 625 * Callers must hold tid_agg_rx->reorder_lock. 626 */ 627 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 628 629 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 630 struct tid_ampdu_rx *tid_agg_rx) 631 { 632 int index, j; 633 634 lockdep_assert_held(&tid_agg_rx->reorder_lock); 635 636 /* release the buffer until next missing frame */ 637 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 638 tid_agg_rx->buf_size; 639 if (!tid_agg_rx->reorder_buf[index] && 640 tid_agg_rx->stored_mpdu_num) { 641 /* 642 * No buffers ready to be released, but check whether any 643 * frames in the reorder buffer have timed out. 644 */ 645 int skipped = 1; 646 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 647 j = (j + 1) % tid_agg_rx->buf_size) { 648 if (!tid_agg_rx->reorder_buf[j]) { 649 skipped++; 650 continue; 651 } 652 if (skipped && 653 !time_after(jiffies, tid_agg_rx->reorder_time[j] + 654 HT_RX_REORDER_BUF_TIMEOUT)) 655 goto set_release_timer; 656 657 ht_dbg_ratelimited(sdata, 658 "release an RX reorder frame due to timeout on earlier frames\n"); 659 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j); 660 661 /* 662 * Increment the head seq# also for the skipped slots. 663 */ 664 tid_agg_rx->head_seq_num = 665 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK; 666 skipped = 0; 667 } 668 } else while (tid_agg_rx->reorder_buf[index]) { 669 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index); 670 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 671 tid_agg_rx->buf_size; 672 } 673 674 if (tid_agg_rx->stored_mpdu_num) { 675 j = index = seq_sub(tid_agg_rx->head_seq_num, 676 tid_agg_rx->ssn) % tid_agg_rx->buf_size; 677 678 for (; j != (index - 1) % tid_agg_rx->buf_size; 679 j = (j + 1) % tid_agg_rx->buf_size) { 680 if (tid_agg_rx->reorder_buf[j]) 681 break; 682 } 683 684 set_release_timer: 685 686 mod_timer(&tid_agg_rx->reorder_timer, 687 tid_agg_rx->reorder_time[j] + 1 + 688 HT_RX_REORDER_BUF_TIMEOUT); 689 } else { 690 del_timer(&tid_agg_rx->reorder_timer); 691 } 692 } 693 694 /* 695 * As this function belongs to the RX path it must be under 696 * rcu_read_lock protection. It returns false if the frame 697 * can be processed immediately, true if it was consumed. 698 */ 699 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 700 struct tid_ampdu_rx *tid_agg_rx, 701 struct sk_buff *skb) 702 { 703 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 704 u16 sc = le16_to_cpu(hdr->seq_ctrl); 705 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 706 u16 head_seq_num, buf_size; 707 int index; 708 bool ret = true; 709 710 spin_lock(&tid_agg_rx->reorder_lock); 711 712 buf_size = tid_agg_rx->buf_size; 713 head_seq_num = tid_agg_rx->head_seq_num; 714 715 /* frame with out of date sequence number */ 716 if (seq_less(mpdu_seq_num, head_seq_num)) { 717 dev_kfree_skb(skb); 718 goto out; 719 } 720 721 /* 722 * If frame the sequence number exceeds our buffering window 723 * size release some previous frames to make room for this one. 724 */ 725 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { 726 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); 727 /* release stored frames up to new head to stack */ 728 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 729 head_seq_num); 730 } 731 732 /* Now the new frame is always in the range of the reordering buffer */ 733 734 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size; 735 736 /* check if we already stored this frame */ 737 if (tid_agg_rx->reorder_buf[index]) { 738 dev_kfree_skb(skb); 739 goto out; 740 } 741 742 /* 743 * If the current MPDU is in the right order and nothing else 744 * is stored we can process it directly, no need to buffer it. 745 * If it is first but there's something stored, we may be able 746 * to release frames after this one. 747 */ 748 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 749 tid_agg_rx->stored_mpdu_num == 0) { 750 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 751 ret = false; 752 goto out; 753 } 754 755 /* put the frame in the reordering buffer */ 756 tid_agg_rx->reorder_buf[index] = skb; 757 tid_agg_rx->reorder_time[index] = jiffies; 758 tid_agg_rx->stored_mpdu_num++; 759 ieee80211_sta_reorder_release(sdata, tid_agg_rx); 760 761 out: 762 spin_unlock(&tid_agg_rx->reorder_lock); 763 return ret; 764 } 765 766 /* 767 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 768 * true if the MPDU was buffered, false if it should be processed. 769 */ 770 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) 771 { 772 struct sk_buff *skb = rx->skb; 773 struct ieee80211_local *local = rx->local; 774 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 775 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 776 struct sta_info *sta = rx->sta; 777 struct tid_ampdu_rx *tid_agg_rx; 778 u16 sc; 779 u8 tid, ack_policy; 780 781 if (!ieee80211_is_data_qos(hdr->frame_control)) 782 goto dont_reorder; 783 784 /* 785 * filter the QoS data rx stream according to 786 * STA/TID and check if this STA/TID is on aggregation 787 */ 788 789 if (!sta) 790 goto dont_reorder; 791 792 ack_policy = *ieee80211_get_qos_ctl(hdr) & 793 IEEE80211_QOS_CTL_ACK_POLICY_MASK; 794 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 795 796 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 797 if (!tid_agg_rx) 798 goto dont_reorder; 799 800 /* qos null data frames are excluded */ 801 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 802 goto dont_reorder; 803 804 /* not part of a BA session */ 805 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 806 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) 807 goto dont_reorder; 808 809 /* not actually part of this BA session */ 810 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 811 goto dont_reorder; 812 813 /* new, potentially un-ordered, ampdu frame - process it */ 814 815 /* reset session timer */ 816 if (tid_agg_rx->timeout) 817 tid_agg_rx->last_rx = jiffies; 818 819 /* if this mpdu is fragmented - terminate rx aggregation session */ 820 sc = le16_to_cpu(hdr->seq_ctrl); 821 if (sc & IEEE80211_SCTL_FRAG) { 822 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 823 skb_queue_tail(&rx->sdata->skb_queue, skb); 824 ieee80211_queue_work(&local->hw, &rx->sdata->work); 825 return; 826 } 827 828 /* 829 * No locking needed -- we will only ever process one 830 * RX packet at a time, and thus own tid_agg_rx. All 831 * other code manipulating it needs to (and does) make 832 * sure that we cannot get to it any more before doing 833 * anything with it. 834 */ 835 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb)) 836 return; 837 838 dont_reorder: 839 skb_queue_tail(&local->rx_skb_queue, skb); 840 } 841 842 static ieee80211_rx_result debug_noinline 843 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 844 { 845 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 846 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 847 848 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ 849 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { 850 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 851 rx->sta->last_seq_ctrl[rx->seqno_idx] == 852 hdr->seq_ctrl)) { 853 if (status->rx_flags & IEEE80211_RX_RA_MATCH) { 854 rx->local->dot11FrameDuplicateCount++; 855 rx->sta->num_duplicates++; 856 } 857 return RX_DROP_UNUSABLE; 858 } else 859 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; 860 } 861 862 if (unlikely(rx->skb->len < 16)) { 863 I802_DEBUG_INC(rx->local->rx_handlers_drop_short); 864 return RX_DROP_MONITOR; 865 } 866 867 /* Drop disallowed frame classes based on STA auth/assoc state; 868 * IEEE 802.11, Chap 5.5. 869 * 870 * mac80211 filters only based on association state, i.e. it drops 871 * Class 3 frames from not associated stations. hostapd sends 872 * deauth/disassoc frames when needed. In addition, hostapd is 873 * responsible for filtering on both auth and assoc states. 874 */ 875 876 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 877 return ieee80211_rx_mesh_check(rx); 878 879 if (unlikely((ieee80211_is_data(hdr->frame_control) || 880 ieee80211_is_pspoll(hdr->frame_control)) && 881 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 882 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 883 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 884 /* 885 * accept port control frames from the AP even when it's not 886 * yet marked ASSOC to prevent a race where we don't set the 887 * assoc bit quickly enough before it sends the first frame 888 */ 889 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 890 ieee80211_is_data_present(hdr->frame_control)) { 891 unsigned int hdrlen; 892 __be16 ethertype; 893 894 hdrlen = ieee80211_hdrlen(hdr->frame_control); 895 896 if (rx->skb->len < hdrlen + 8) 897 return RX_DROP_MONITOR; 898 899 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); 900 if (ethertype == rx->sdata->control_port_protocol) 901 return RX_CONTINUE; 902 } 903 904 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 905 cfg80211_rx_spurious_frame(rx->sdata->dev, 906 hdr->addr2, 907 GFP_ATOMIC)) 908 return RX_DROP_UNUSABLE; 909 910 return RX_DROP_MONITOR; 911 } 912 913 return RX_CONTINUE; 914 } 915 916 917 static ieee80211_rx_result debug_noinline 918 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 919 { 920 struct sk_buff *skb = rx->skb; 921 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 922 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 923 int keyidx; 924 int hdrlen; 925 ieee80211_rx_result result = RX_DROP_UNUSABLE; 926 struct ieee80211_key *sta_ptk = NULL; 927 int mmie_keyidx = -1; 928 __le16 fc; 929 930 /* 931 * Key selection 101 932 * 933 * There are four types of keys: 934 * - GTK (group keys) 935 * - IGTK (group keys for management frames) 936 * - PTK (pairwise keys) 937 * - STK (station-to-station pairwise keys) 938 * 939 * When selecting a key, we have to distinguish between multicast 940 * (including broadcast) and unicast frames, the latter can only 941 * use PTKs and STKs while the former always use GTKs and IGTKs. 942 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then 943 * unicast frames can also use key indices like GTKs. Hence, if we 944 * don't have a PTK/STK we check the key index for a WEP key. 945 * 946 * Note that in a regular BSS, multicast frames are sent by the 947 * AP only, associated stations unicast the frame to the AP first 948 * which then multicasts it on their behalf. 949 * 950 * There is also a slight problem in IBSS mode: GTKs are negotiated 951 * with each station, that is something we don't currently handle. 952 * The spec seems to expect that one negotiates the same key with 953 * every station but there's no such requirement; VLANs could be 954 * possible. 955 */ 956 957 /* 958 * No point in finding a key and decrypting if the frame is neither 959 * addressed to us nor a multicast frame. 960 */ 961 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 962 return RX_CONTINUE; 963 964 /* start without a key */ 965 rx->key = NULL; 966 967 if (rx->sta) 968 sta_ptk = rcu_dereference(rx->sta->ptk); 969 970 fc = hdr->frame_control; 971 972 if (!ieee80211_has_protected(fc)) 973 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 974 975 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 976 rx->key = sta_ptk; 977 if ((status->flag & RX_FLAG_DECRYPTED) && 978 (status->flag & RX_FLAG_IV_STRIPPED)) 979 return RX_CONTINUE; 980 /* Skip decryption if the frame is not protected. */ 981 if (!ieee80211_has_protected(fc)) 982 return RX_CONTINUE; 983 } else if (mmie_keyidx >= 0) { 984 /* Broadcast/multicast robust management frame / BIP */ 985 if ((status->flag & RX_FLAG_DECRYPTED) && 986 (status->flag & RX_FLAG_IV_STRIPPED)) 987 return RX_CONTINUE; 988 989 if (mmie_keyidx < NUM_DEFAULT_KEYS || 990 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 991 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 992 if (rx->sta) 993 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); 994 if (!rx->key) 995 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 996 } else if (!ieee80211_has_protected(fc)) { 997 /* 998 * The frame was not protected, so skip decryption. However, we 999 * need to set rx->key if there is a key that could have been 1000 * used so that the frame may be dropped if encryption would 1001 * have been expected. 1002 */ 1003 struct ieee80211_key *key = NULL; 1004 struct ieee80211_sub_if_data *sdata = rx->sdata; 1005 int i; 1006 1007 if (ieee80211_is_mgmt(fc) && 1008 is_multicast_ether_addr(hdr->addr1) && 1009 (key = rcu_dereference(rx->sdata->default_mgmt_key))) 1010 rx->key = key; 1011 else { 1012 if (rx->sta) { 1013 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1014 key = rcu_dereference(rx->sta->gtk[i]); 1015 if (key) 1016 break; 1017 } 1018 } 1019 if (!key) { 1020 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1021 key = rcu_dereference(sdata->keys[i]); 1022 if (key) 1023 break; 1024 } 1025 } 1026 if (key) 1027 rx->key = key; 1028 } 1029 return RX_CONTINUE; 1030 } else { 1031 u8 keyid; 1032 /* 1033 * The device doesn't give us the IV so we won't be 1034 * able to look up the key. That's ok though, we 1035 * don't need to decrypt the frame, we just won't 1036 * be able to keep statistics accurate. 1037 * Except for key threshold notifications, should 1038 * we somehow allow the driver to tell us which key 1039 * the hardware used if this flag is set? 1040 */ 1041 if ((status->flag & RX_FLAG_DECRYPTED) && 1042 (status->flag & RX_FLAG_IV_STRIPPED)) 1043 return RX_CONTINUE; 1044 1045 hdrlen = ieee80211_hdrlen(fc); 1046 1047 if (rx->skb->len < 8 + hdrlen) 1048 return RX_DROP_UNUSABLE; /* TODO: count this? */ 1049 1050 /* 1051 * no need to call ieee80211_wep_get_keyidx, 1052 * it verifies a bunch of things we've done already 1053 */ 1054 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); 1055 keyidx = keyid >> 6; 1056 1057 /* check per-station GTK first, if multicast packet */ 1058 if (is_multicast_ether_addr(hdr->addr1) && rx->sta) 1059 rx->key = rcu_dereference(rx->sta->gtk[keyidx]); 1060 1061 /* if not found, try default key */ 1062 if (!rx->key) { 1063 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 1064 1065 /* 1066 * RSNA-protected unicast frames should always be 1067 * sent with pairwise or station-to-station keys, 1068 * but for WEP we allow using a key index as well. 1069 */ 1070 if (rx->key && 1071 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 1072 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 1073 !is_multicast_ether_addr(hdr->addr1)) 1074 rx->key = NULL; 1075 } 1076 } 1077 1078 if (rx->key) { 1079 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) 1080 return RX_DROP_MONITOR; 1081 1082 rx->key->tx_rx_count++; 1083 /* TODO: add threshold stuff again */ 1084 } else { 1085 return RX_DROP_MONITOR; 1086 } 1087 1088 switch (rx->key->conf.cipher) { 1089 case WLAN_CIPHER_SUITE_WEP40: 1090 case WLAN_CIPHER_SUITE_WEP104: 1091 result = ieee80211_crypto_wep_decrypt(rx); 1092 break; 1093 case WLAN_CIPHER_SUITE_TKIP: 1094 result = ieee80211_crypto_tkip_decrypt(rx); 1095 break; 1096 case WLAN_CIPHER_SUITE_CCMP: 1097 result = ieee80211_crypto_ccmp_decrypt(rx); 1098 break; 1099 case WLAN_CIPHER_SUITE_AES_CMAC: 1100 result = ieee80211_crypto_aes_cmac_decrypt(rx); 1101 break; 1102 default: 1103 /* 1104 * We can reach here only with HW-only algorithms 1105 * but why didn't it decrypt the frame?! 1106 */ 1107 return RX_DROP_UNUSABLE; 1108 } 1109 1110 /* the hdr variable is invalid after the decrypt handlers */ 1111 1112 /* either the frame has been decrypted or will be dropped */ 1113 status->flag |= RX_FLAG_DECRYPTED; 1114 1115 return result; 1116 } 1117 1118 static ieee80211_rx_result debug_noinline 1119 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1120 { 1121 struct ieee80211_local *local; 1122 struct ieee80211_hdr *hdr; 1123 struct sk_buff *skb; 1124 1125 local = rx->local; 1126 skb = rx->skb; 1127 hdr = (struct ieee80211_hdr *) skb->data; 1128 1129 if (!local->pspolling) 1130 return RX_CONTINUE; 1131 1132 if (!ieee80211_has_fromds(hdr->frame_control)) 1133 /* this is not from AP */ 1134 return RX_CONTINUE; 1135 1136 if (!ieee80211_is_data(hdr->frame_control)) 1137 return RX_CONTINUE; 1138 1139 if (!ieee80211_has_moredata(hdr->frame_control)) { 1140 /* AP has no more frames buffered for us */ 1141 local->pspolling = false; 1142 return RX_CONTINUE; 1143 } 1144 1145 /* more data bit is set, let's request a new frame from the AP */ 1146 ieee80211_send_pspoll(local, rx->sdata); 1147 1148 return RX_CONTINUE; 1149 } 1150 1151 static void ap_sta_ps_start(struct sta_info *sta) 1152 { 1153 struct ieee80211_sub_if_data *sdata = sta->sdata; 1154 struct ieee80211_local *local = sdata->local; 1155 1156 atomic_inc(&sdata->bss->num_sta_ps); 1157 set_sta_flag(sta, WLAN_STA_PS_STA); 1158 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) 1159 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1160 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", 1161 sta->sta.addr, sta->sta.aid); 1162 } 1163 1164 static void ap_sta_ps_end(struct sta_info *sta) 1165 { 1166 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", 1167 sta->sta.addr, sta->sta.aid); 1168 1169 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1170 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", 1171 sta->sta.addr, sta->sta.aid); 1172 return; 1173 } 1174 1175 ieee80211_sta_ps_deliver_wakeup(sta); 1176 } 1177 1178 int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start) 1179 { 1180 struct sta_info *sta_inf = container_of(sta, struct sta_info, sta); 1181 bool in_ps; 1182 1183 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS)); 1184 1185 /* Don't let the same PS state be set twice */ 1186 in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA); 1187 if ((start && in_ps) || (!start && !in_ps)) 1188 return -EINVAL; 1189 1190 if (start) 1191 ap_sta_ps_start(sta_inf); 1192 else 1193 ap_sta_ps_end(sta_inf); 1194 1195 return 0; 1196 } 1197 EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1198 1199 static ieee80211_rx_result debug_noinline 1200 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) 1201 { 1202 struct ieee80211_sub_if_data *sdata = rx->sdata; 1203 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 1204 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1205 int tid, ac; 1206 1207 if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH)) 1208 return RX_CONTINUE; 1209 1210 if (sdata->vif.type != NL80211_IFTYPE_AP && 1211 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 1212 return RX_CONTINUE; 1213 1214 /* 1215 * The device handles station powersave, so don't do anything about 1216 * uAPSD and PS-Poll frames (the latter shouldn't even come up from 1217 * it to mac80211 since they're handled.) 1218 */ 1219 if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS) 1220 return RX_CONTINUE; 1221 1222 /* 1223 * Don't do anything if the station isn't already asleep. In 1224 * the uAPSD case, the station will probably be marked asleep, 1225 * in the PS-Poll case the station must be confused ... 1226 */ 1227 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) 1228 return RX_CONTINUE; 1229 1230 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { 1231 if (!test_sta_flag(rx->sta, WLAN_STA_SP)) { 1232 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) 1233 ieee80211_sta_ps_deliver_poll_response(rx->sta); 1234 else 1235 set_sta_flag(rx->sta, WLAN_STA_PSPOLL); 1236 } 1237 1238 /* Free PS Poll skb here instead of returning RX_DROP that would 1239 * count as an dropped frame. */ 1240 dev_kfree_skb(rx->skb); 1241 1242 return RX_QUEUED; 1243 } else if (!ieee80211_has_morefrags(hdr->frame_control) && 1244 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1245 ieee80211_has_pm(hdr->frame_control) && 1246 (ieee80211_is_data_qos(hdr->frame_control) || 1247 ieee80211_is_qos_nullfunc(hdr->frame_control))) { 1248 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 1249 ac = ieee802_1d_to_ac[tid & 7]; 1250 1251 /* 1252 * If this AC is not trigger-enabled do nothing. 1253 * 1254 * NB: This could/should check a separate bitmap of trigger- 1255 * enabled queues, but for now we only implement uAPSD w/o 1256 * TSPEC changes to the ACs, so they're always the same. 1257 */ 1258 if (!(rx->sta->sta.uapsd_queues & BIT(ac))) 1259 return RX_CONTINUE; 1260 1261 /* if we are in a service period, do nothing */ 1262 if (test_sta_flag(rx->sta, WLAN_STA_SP)) 1263 return RX_CONTINUE; 1264 1265 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) 1266 ieee80211_sta_ps_deliver_uapsd(rx->sta); 1267 else 1268 set_sta_flag(rx->sta, WLAN_STA_UAPSD); 1269 } 1270 1271 return RX_CONTINUE; 1272 } 1273 1274 static ieee80211_rx_result debug_noinline 1275 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1276 { 1277 struct sta_info *sta = rx->sta; 1278 struct sk_buff *skb = rx->skb; 1279 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1280 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1281 1282 if (!sta) 1283 return RX_CONTINUE; 1284 1285 /* 1286 * Update last_rx only for IBSS packets which are for the current 1287 * BSSID to avoid keeping the current IBSS network alive in cases 1288 * where other STAs start using different BSSID. 1289 */ 1290 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1291 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1292 NL80211_IFTYPE_ADHOC); 1293 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid)) { 1294 sta->last_rx = jiffies; 1295 if (ieee80211_is_data(hdr->frame_control)) { 1296 sta->last_rx_rate_idx = status->rate_idx; 1297 sta->last_rx_rate_flag = status->flag; 1298 } 1299 } 1300 } else if (!is_multicast_ether_addr(hdr->addr1)) { 1301 /* 1302 * Mesh beacons will update last_rx when if they are found to 1303 * match the current local configuration when processed. 1304 */ 1305 sta->last_rx = jiffies; 1306 if (ieee80211_is_data(hdr->frame_control)) { 1307 sta->last_rx_rate_idx = status->rate_idx; 1308 sta->last_rx_rate_flag = status->flag; 1309 } 1310 } 1311 1312 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 1313 return RX_CONTINUE; 1314 1315 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) 1316 ieee80211_sta_rx_notify(rx->sdata, hdr); 1317 1318 sta->rx_fragments++; 1319 sta->rx_bytes += rx->skb->len; 1320 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 1321 sta->last_signal = status->signal; 1322 ewma_add(&sta->avg_signal, -status->signal); 1323 } 1324 1325 /* 1326 * Change STA power saving mode only at the end of a frame 1327 * exchange sequence. 1328 */ 1329 if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) && 1330 !ieee80211_has_morefrags(hdr->frame_control) && 1331 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1332 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1333 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1334 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 1335 /* 1336 * Ignore doze->wake transitions that are 1337 * indicated by non-data frames, the standard 1338 * is unclear here, but for example going to 1339 * PS mode and then scanning would cause a 1340 * doze->wake transition for the probe request, 1341 * and that is clearly undesirable. 1342 */ 1343 if (ieee80211_is_data(hdr->frame_control) && 1344 !ieee80211_has_pm(hdr->frame_control)) 1345 ap_sta_ps_end(sta); 1346 } else { 1347 if (ieee80211_has_pm(hdr->frame_control)) 1348 ap_sta_ps_start(sta); 1349 } 1350 } 1351 1352 /* 1353 * Drop (qos-)data::nullfunc frames silently, since they 1354 * are used only to control station power saving mode. 1355 */ 1356 if (ieee80211_is_nullfunc(hdr->frame_control) || 1357 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1358 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1359 1360 /* 1361 * If we receive a 4-addr nullfunc frame from a STA 1362 * that was not moved to a 4-addr STA vlan yet send 1363 * the event to userspace and for older hostapd drop 1364 * the frame to the monitor interface. 1365 */ 1366 if (ieee80211_has_a4(hdr->frame_control) && 1367 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1368 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1369 !rx->sdata->u.vlan.sta))) { 1370 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) 1371 cfg80211_rx_unexpected_4addr_frame( 1372 rx->sdata->dev, sta->sta.addr, 1373 GFP_ATOMIC); 1374 return RX_DROP_MONITOR; 1375 } 1376 /* 1377 * Update counter and free packet here to avoid 1378 * counting this as a dropped packed. 1379 */ 1380 sta->rx_packets++; 1381 dev_kfree_skb(rx->skb); 1382 return RX_QUEUED; 1383 } 1384 1385 return RX_CONTINUE; 1386 } /* ieee80211_rx_h_sta_process */ 1387 1388 static inline struct ieee80211_fragment_entry * 1389 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, 1390 unsigned int frag, unsigned int seq, int rx_queue, 1391 struct sk_buff **skb) 1392 { 1393 struct ieee80211_fragment_entry *entry; 1394 int idx; 1395 1396 idx = sdata->fragment_next; 1397 entry = &sdata->fragments[sdata->fragment_next++]; 1398 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 1399 sdata->fragment_next = 0; 1400 1401 if (!skb_queue_empty(&entry->skb_list)) 1402 __skb_queue_purge(&entry->skb_list); 1403 1404 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 1405 *skb = NULL; 1406 entry->first_frag_time = jiffies; 1407 entry->seq = seq; 1408 entry->rx_queue = rx_queue; 1409 entry->last_frag = frag; 1410 entry->ccmp = 0; 1411 entry->extra_len = 0; 1412 1413 return entry; 1414 } 1415 1416 static inline struct ieee80211_fragment_entry * 1417 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 1418 unsigned int frag, unsigned int seq, 1419 int rx_queue, struct ieee80211_hdr *hdr) 1420 { 1421 struct ieee80211_fragment_entry *entry; 1422 int i, idx; 1423 1424 idx = sdata->fragment_next; 1425 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 1426 struct ieee80211_hdr *f_hdr; 1427 1428 idx--; 1429 if (idx < 0) 1430 idx = IEEE80211_FRAGMENT_MAX - 1; 1431 1432 entry = &sdata->fragments[idx]; 1433 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 1434 entry->rx_queue != rx_queue || 1435 entry->last_frag + 1 != frag) 1436 continue; 1437 1438 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; 1439 1440 /* 1441 * Check ftype and addresses are equal, else check next fragment 1442 */ 1443 if (((hdr->frame_control ^ f_hdr->frame_control) & 1444 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 1445 !ether_addr_equal(hdr->addr1, f_hdr->addr1) || 1446 !ether_addr_equal(hdr->addr2, f_hdr->addr2)) 1447 continue; 1448 1449 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 1450 __skb_queue_purge(&entry->skb_list); 1451 continue; 1452 } 1453 return entry; 1454 } 1455 1456 return NULL; 1457 } 1458 1459 static ieee80211_rx_result debug_noinline 1460 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 1461 { 1462 struct ieee80211_hdr *hdr; 1463 u16 sc; 1464 __le16 fc; 1465 unsigned int frag, seq; 1466 struct ieee80211_fragment_entry *entry; 1467 struct sk_buff *skb; 1468 struct ieee80211_rx_status *status; 1469 1470 hdr = (struct ieee80211_hdr *)rx->skb->data; 1471 fc = hdr->frame_control; 1472 1473 if (ieee80211_is_ctl(fc)) 1474 return RX_CONTINUE; 1475 1476 sc = le16_to_cpu(hdr->seq_ctrl); 1477 frag = sc & IEEE80211_SCTL_FRAG; 1478 1479 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || 1480 is_multicast_ether_addr(hdr->addr1))) { 1481 /* not fragmented */ 1482 goto out; 1483 } 1484 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 1485 1486 if (skb_linearize(rx->skb)) 1487 return RX_DROP_UNUSABLE; 1488 1489 /* 1490 * skb_linearize() might change the skb->data and 1491 * previously cached variables (in this case, hdr) need to 1492 * be refreshed with the new data. 1493 */ 1494 hdr = (struct ieee80211_hdr *)rx->skb->data; 1495 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 1496 1497 if (frag == 0) { 1498 /* This is the first fragment of a new frame. */ 1499 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 1500 rx->seqno_idx, &(rx->skb)); 1501 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP && 1502 ieee80211_has_protected(fc)) { 1503 int queue = rx->security_idx; 1504 /* Store CCMP PN so that we can verify that the next 1505 * fragment has a sequential PN value. */ 1506 entry->ccmp = 1; 1507 memcpy(entry->last_pn, 1508 rx->key->u.ccmp.rx_pn[queue], 1509 CCMP_PN_LEN); 1510 } 1511 return RX_QUEUED; 1512 } 1513 1514 /* This is a fragment for a frame that should already be pending in 1515 * fragment cache. Add this fragment to the end of the pending entry. 1516 */ 1517 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, 1518 rx->seqno_idx, hdr); 1519 if (!entry) { 1520 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1521 return RX_DROP_MONITOR; 1522 } 1523 1524 /* Verify that MPDUs within one MSDU have sequential PN values. 1525 * (IEEE 802.11i, 8.3.3.4.5) */ 1526 if (entry->ccmp) { 1527 int i; 1528 u8 pn[CCMP_PN_LEN], *rpn; 1529 int queue; 1530 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP) 1531 return RX_DROP_UNUSABLE; 1532 memcpy(pn, entry->last_pn, CCMP_PN_LEN); 1533 for (i = CCMP_PN_LEN - 1; i >= 0; i--) { 1534 pn[i]++; 1535 if (pn[i]) 1536 break; 1537 } 1538 queue = rx->security_idx; 1539 rpn = rx->key->u.ccmp.rx_pn[queue]; 1540 if (memcmp(pn, rpn, CCMP_PN_LEN)) 1541 return RX_DROP_UNUSABLE; 1542 memcpy(entry->last_pn, pn, CCMP_PN_LEN); 1543 } 1544 1545 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 1546 __skb_queue_tail(&entry->skb_list, rx->skb); 1547 entry->last_frag = frag; 1548 entry->extra_len += rx->skb->len; 1549 if (ieee80211_has_morefrags(fc)) { 1550 rx->skb = NULL; 1551 return RX_QUEUED; 1552 } 1553 1554 rx->skb = __skb_dequeue(&entry->skb_list); 1555 if (skb_tailroom(rx->skb) < entry->extra_len) { 1556 I802_DEBUG_INC(rx->local->rx_expand_skb_head2); 1557 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 1558 GFP_ATOMIC))) { 1559 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1560 __skb_queue_purge(&entry->skb_list); 1561 return RX_DROP_UNUSABLE; 1562 } 1563 } 1564 while ((skb = __skb_dequeue(&entry->skb_list))) { 1565 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); 1566 dev_kfree_skb(skb); 1567 } 1568 1569 /* Complete frame has been reassembled - process it now */ 1570 status = IEEE80211_SKB_RXCB(rx->skb); 1571 status->rx_flags |= IEEE80211_RX_FRAGMENTED; 1572 1573 out: 1574 if (rx->sta) 1575 rx->sta->rx_packets++; 1576 if (is_multicast_ether_addr(hdr->addr1)) 1577 rx->local->dot11MulticastReceivedFrameCount++; 1578 else 1579 ieee80211_led_rx(rx->local); 1580 return RX_CONTINUE; 1581 } 1582 1583 static int 1584 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 1585 { 1586 if (unlikely(!rx->sta || 1587 !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) 1588 return -EACCES; 1589 1590 return 0; 1591 } 1592 1593 static int 1594 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 1595 { 1596 struct sk_buff *skb = rx->skb; 1597 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1598 1599 /* 1600 * Pass through unencrypted frames if the hardware has 1601 * decrypted them already. 1602 */ 1603 if (status->flag & RX_FLAG_DECRYPTED) 1604 return 0; 1605 1606 /* Drop unencrypted frames if key is set. */ 1607 if (unlikely(!ieee80211_has_protected(fc) && 1608 !ieee80211_is_nullfunc(fc) && 1609 ieee80211_is_data(fc) && 1610 (rx->key || rx->sdata->drop_unencrypted))) 1611 return -EACCES; 1612 1613 return 0; 1614 } 1615 1616 static int 1617 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 1618 { 1619 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1620 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1621 __le16 fc = hdr->frame_control; 1622 1623 /* 1624 * Pass through unencrypted frames if the hardware has 1625 * decrypted them already. 1626 */ 1627 if (status->flag & RX_FLAG_DECRYPTED) 1628 return 0; 1629 1630 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { 1631 if (unlikely(!ieee80211_has_protected(fc) && 1632 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 1633 rx->key)) { 1634 if (ieee80211_is_deauth(fc)) 1635 cfg80211_send_unprot_deauth(rx->sdata->dev, 1636 rx->skb->data, 1637 rx->skb->len); 1638 else if (ieee80211_is_disassoc(fc)) 1639 cfg80211_send_unprot_disassoc(rx->sdata->dev, 1640 rx->skb->data, 1641 rx->skb->len); 1642 return -EACCES; 1643 } 1644 /* BIP does not use Protected field, so need to check MMIE */ 1645 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 1646 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 1647 if (ieee80211_is_deauth(fc)) 1648 cfg80211_send_unprot_deauth(rx->sdata->dev, 1649 rx->skb->data, 1650 rx->skb->len); 1651 else if (ieee80211_is_disassoc(fc)) 1652 cfg80211_send_unprot_disassoc(rx->sdata->dev, 1653 rx->skb->data, 1654 rx->skb->len); 1655 return -EACCES; 1656 } 1657 /* 1658 * When using MFP, Action frames are not allowed prior to 1659 * having configured keys. 1660 */ 1661 if (unlikely(ieee80211_is_action(fc) && !rx->key && 1662 ieee80211_is_robust_mgmt_frame( 1663 (struct ieee80211_hdr *) rx->skb->data))) 1664 return -EACCES; 1665 } 1666 1667 return 0; 1668 } 1669 1670 static int 1671 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) 1672 { 1673 struct ieee80211_sub_if_data *sdata = rx->sdata; 1674 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1675 bool check_port_control = false; 1676 struct ethhdr *ehdr; 1677 int ret; 1678 1679 *port_control = false; 1680 if (ieee80211_has_a4(hdr->frame_control) && 1681 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 1682 return -1; 1683 1684 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1685 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 1686 1687 if (!sdata->u.mgd.use_4addr) 1688 return -1; 1689 else 1690 check_port_control = true; 1691 } 1692 1693 if (is_multicast_ether_addr(hdr->addr1) && 1694 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 1695 return -1; 1696 1697 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 1698 if (ret < 0) 1699 return ret; 1700 1701 ehdr = (struct ethhdr *) rx->skb->data; 1702 if (ehdr->h_proto == rx->sdata->control_port_protocol) 1703 *port_control = true; 1704 else if (check_port_control) 1705 return -1; 1706 1707 return 0; 1708 } 1709 1710 /* 1711 * requires that rx->skb is a frame with ethernet header 1712 */ 1713 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 1714 { 1715 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 1716 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 1717 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1718 1719 /* 1720 * Allow EAPOL frames to us/the PAE group address regardless 1721 * of whether the frame was encrypted or not. 1722 */ 1723 if (ehdr->h_proto == rx->sdata->control_port_protocol && 1724 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || 1725 ether_addr_equal(ehdr->h_dest, pae_group_addr))) 1726 return true; 1727 1728 if (ieee80211_802_1x_port_control(rx) || 1729 ieee80211_drop_unencrypted(rx, fc)) 1730 return false; 1731 1732 return true; 1733 } 1734 1735 /* 1736 * requires that rx->skb is a frame with ethernet header 1737 */ 1738 static void 1739 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 1740 { 1741 struct ieee80211_sub_if_data *sdata = rx->sdata; 1742 struct net_device *dev = sdata->dev; 1743 struct sk_buff *skb, *xmit_skb; 1744 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1745 struct sta_info *dsta; 1746 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1747 1748 skb = rx->skb; 1749 xmit_skb = NULL; 1750 1751 if ((sdata->vif.type == NL80211_IFTYPE_AP || 1752 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 1753 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 1754 (status->rx_flags & IEEE80211_RX_RA_MATCH) && 1755 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 1756 if (is_multicast_ether_addr(ehdr->h_dest)) { 1757 /* 1758 * send multicast frames both to higher layers in 1759 * local net stack and back to the wireless medium 1760 */ 1761 xmit_skb = skb_copy(skb, GFP_ATOMIC); 1762 if (!xmit_skb) 1763 net_info_ratelimited("%s: failed to clone multicast frame\n", 1764 dev->name); 1765 } else { 1766 dsta = sta_info_get(sdata, skb->data); 1767 if (dsta) { 1768 /* 1769 * The destination station is associated to 1770 * this AP (in this VLAN), so send the frame 1771 * directly to it and do not pass it to local 1772 * net stack. 1773 */ 1774 xmit_skb = skb; 1775 skb = NULL; 1776 } 1777 } 1778 } 1779 1780 if (skb) { 1781 int align __maybe_unused; 1782 1783 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1784 /* 1785 * 'align' will only take the values 0 or 2 here 1786 * since all frames are required to be aligned 1787 * to 2-byte boundaries when being passed to 1788 * mac80211. That also explains the __skb_push() 1789 * below. 1790 */ 1791 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3; 1792 if (align) { 1793 if (WARN_ON(skb_headroom(skb) < 3)) { 1794 dev_kfree_skb(skb); 1795 skb = NULL; 1796 } else { 1797 u8 *data = skb->data; 1798 size_t len = skb_headlen(skb); 1799 skb->data -= align; 1800 memmove(skb->data, data, len); 1801 skb_set_tail_pointer(skb, len); 1802 } 1803 } 1804 #endif 1805 1806 if (skb) { 1807 /* deliver to local stack */ 1808 skb->protocol = eth_type_trans(skb, dev); 1809 memset(skb->cb, 0, sizeof(skb->cb)); 1810 netif_receive_skb(skb); 1811 } 1812 } 1813 1814 if (xmit_skb) { 1815 /* 1816 * Send to wireless media and increase priority by 256 to 1817 * keep the received priority instead of reclassifying 1818 * the frame (see cfg80211_classify8021d). 1819 */ 1820 xmit_skb->priority += 256; 1821 xmit_skb->protocol = htons(ETH_P_802_3); 1822 skb_reset_network_header(xmit_skb); 1823 skb_reset_mac_header(xmit_skb); 1824 dev_queue_xmit(xmit_skb); 1825 } 1826 } 1827 1828 static ieee80211_rx_result debug_noinline 1829 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 1830 { 1831 struct net_device *dev = rx->sdata->dev; 1832 struct sk_buff *skb = rx->skb; 1833 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1834 __le16 fc = hdr->frame_control; 1835 struct sk_buff_head frame_list; 1836 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1837 1838 if (unlikely(!ieee80211_is_data(fc))) 1839 return RX_CONTINUE; 1840 1841 if (unlikely(!ieee80211_is_data_present(fc))) 1842 return RX_DROP_MONITOR; 1843 1844 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 1845 return RX_CONTINUE; 1846 1847 if (ieee80211_has_a4(hdr->frame_control) && 1848 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1849 !rx->sdata->u.vlan.sta) 1850 return RX_DROP_UNUSABLE; 1851 1852 if (is_multicast_ether_addr(hdr->addr1) && 1853 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1854 rx->sdata->u.vlan.sta) || 1855 (rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1856 rx->sdata->u.mgd.use_4addr))) 1857 return RX_DROP_UNUSABLE; 1858 1859 skb->dev = dev; 1860 __skb_queue_head_init(&frame_list); 1861 1862 if (skb_linearize(skb)) 1863 return RX_DROP_UNUSABLE; 1864 1865 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 1866 rx->sdata->vif.type, 1867 rx->local->hw.extra_tx_headroom, true); 1868 1869 while (!skb_queue_empty(&frame_list)) { 1870 rx->skb = __skb_dequeue(&frame_list); 1871 1872 if (!ieee80211_frame_allowed(rx, fc)) { 1873 dev_kfree_skb(rx->skb); 1874 continue; 1875 } 1876 dev->stats.rx_packets++; 1877 dev->stats.rx_bytes += rx->skb->len; 1878 1879 ieee80211_deliver_skb(rx); 1880 } 1881 1882 return RX_QUEUED; 1883 } 1884 1885 #ifdef CONFIG_MAC80211_MESH 1886 static ieee80211_rx_result 1887 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 1888 { 1889 struct ieee80211_hdr *fwd_hdr, *hdr; 1890 struct ieee80211_tx_info *info; 1891 struct ieee80211s_hdr *mesh_hdr; 1892 struct sk_buff *skb = rx->skb, *fwd_skb; 1893 struct ieee80211_local *local = rx->local; 1894 struct ieee80211_sub_if_data *sdata = rx->sdata; 1895 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1896 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1897 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD); 1898 u16 q, hdrlen; 1899 1900 hdr = (struct ieee80211_hdr *) skb->data; 1901 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1902 1903 /* make sure fixed part of mesh header is there, also checks skb len */ 1904 if (!pskb_may_pull(rx->skb, hdrlen + 6)) 1905 return RX_DROP_MONITOR; 1906 1907 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 1908 1909 /* make sure full mesh header is there, also checks skb len */ 1910 if (!pskb_may_pull(rx->skb, 1911 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) 1912 return RX_DROP_MONITOR; 1913 1914 /* reload pointers */ 1915 hdr = (struct ieee80211_hdr *) skb->data; 1916 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 1917 1918 /* frame is in RMC, don't forward */ 1919 if (ieee80211_is_data(hdr->frame_control) && 1920 is_multicast_ether_addr(hdr->addr1) && 1921 mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata)) 1922 return RX_DROP_MONITOR; 1923 1924 if (!ieee80211_is_data(hdr->frame_control) || 1925 !(status->rx_flags & IEEE80211_RX_RA_MATCH)) 1926 return RX_CONTINUE; 1927 1928 if (!mesh_hdr->ttl) 1929 return RX_DROP_MONITOR; 1930 1931 if (mesh_hdr->flags & MESH_FLAGS_AE) { 1932 struct mesh_path *mppath; 1933 char *proxied_addr; 1934 char *mpp_addr; 1935 1936 if (is_multicast_ether_addr(hdr->addr1)) { 1937 mpp_addr = hdr->addr3; 1938 proxied_addr = mesh_hdr->eaddr1; 1939 } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) { 1940 /* has_a4 already checked in ieee80211_rx_mesh_check */ 1941 mpp_addr = hdr->addr4; 1942 proxied_addr = mesh_hdr->eaddr2; 1943 } else { 1944 return RX_DROP_MONITOR; 1945 } 1946 1947 rcu_read_lock(); 1948 mppath = mpp_path_lookup(proxied_addr, sdata); 1949 if (!mppath) { 1950 mpp_path_add(proxied_addr, mpp_addr, sdata); 1951 } else { 1952 spin_lock_bh(&mppath->state_lock); 1953 if (!ether_addr_equal(mppath->mpp, mpp_addr)) 1954 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 1955 spin_unlock_bh(&mppath->state_lock); 1956 } 1957 rcu_read_unlock(); 1958 } 1959 1960 /* Frame has reached destination. Don't forward */ 1961 if (!is_multicast_ether_addr(hdr->addr1) && 1962 ether_addr_equal(sdata->vif.addr, hdr->addr3)) 1963 return RX_CONTINUE; 1964 1965 q = ieee80211_select_queue_80211(sdata, skb, hdr); 1966 if (ieee80211_queue_stopped(&local->hw, q)) { 1967 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); 1968 return RX_DROP_MONITOR; 1969 } 1970 skb_set_queue_mapping(skb, q); 1971 1972 if (!--mesh_hdr->ttl) { 1973 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); 1974 goto out; 1975 } 1976 1977 if (!ifmsh->mshcfg.dot11MeshForwarding) 1978 goto out; 1979 1980 fwd_skb = skb_copy(skb, GFP_ATOMIC); 1981 if (!fwd_skb) { 1982 net_info_ratelimited("%s: failed to clone mesh frame\n", 1983 sdata->name); 1984 goto out; 1985 } 1986 1987 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 1988 info = IEEE80211_SKB_CB(fwd_skb); 1989 memset(info, 0, sizeof(*info)); 1990 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1991 info->control.vif = &rx->sdata->vif; 1992 info->control.jiffies = jiffies; 1993 if (is_multicast_ether_addr(fwd_hdr->addr1)) { 1994 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 1995 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 1996 } else if (!mesh_nexthop_lookup(fwd_skb, sdata)) { 1997 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 1998 } else { 1999 /* unable to resolve next hop */ 2000 mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3, 2001 0, reason, fwd_hdr->addr2, sdata); 2002 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 2003 kfree_skb(fwd_skb); 2004 return RX_DROP_MONITOR; 2005 } 2006 2007 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 2008 ieee80211_add_pending_skb(local, fwd_skb); 2009 out: 2010 if (is_multicast_ether_addr(hdr->addr1) || 2011 sdata->dev->flags & IFF_PROMISC) 2012 return RX_CONTINUE; 2013 else 2014 return RX_DROP_MONITOR; 2015 } 2016 #endif 2017 2018 static ieee80211_rx_result debug_noinline 2019 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 2020 { 2021 struct ieee80211_sub_if_data *sdata = rx->sdata; 2022 struct ieee80211_local *local = rx->local; 2023 struct net_device *dev = sdata->dev; 2024 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2025 __le16 fc = hdr->frame_control; 2026 bool port_control; 2027 int err; 2028 2029 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 2030 return RX_CONTINUE; 2031 2032 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 2033 return RX_DROP_MONITOR; 2034 2035 /* 2036 * Send unexpected-4addr-frame event to hostapd. For older versions, 2037 * also drop the frame to cooked monitor interfaces. 2038 */ 2039 if (ieee80211_has_a4(hdr->frame_control) && 2040 sdata->vif.type == NL80211_IFTYPE_AP) { 2041 if (rx->sta && 2042 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) 2043 cfg80211_rx_unexpected_4addr_frame( 2044 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); 2045 return RX_DROP_MONITOR; 2046 } 2047 2048 err = __ieee80211_data_to_8023(rx, &port_control); 2049 if (unlikely(err)) 2050 return RX_DROP_UNUSABLE; 2051 2052 if (!ieee80211_frame_allowed(rx, fc)) 2053 return RX_DROP_MONITOR; 2054 2055 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2056 unlikely(port_control) && sdata->bss) { 2057 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 2058 u.ap); 2059 dev = sdata->dev; 2060 rx->sdata = sdata; 2061 } 2062 2063 rx->skb->dev = dev; 2064 2065 dev->stats.rx_packets++; 2066 dev->stats.rx_bytes += rx->skb->len; 2067 2068 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 2069 !is_multicast_ether_addr( 2070 ((struct ethhdr *)rx->skb->data)->h_dest) && 2071 (!local->scanning && 2072 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) { 2073 mod_timer(&local->dynamic_ps_timer, jiffies + 2074 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 2075 } 2076 2077 ieee80211_deliver_skb(rx); 2078 2079 return RX_QUEUED; 2080 } 2081 2082 static ieee80211_rx_result debug_noinline 2083 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) 2084 { 2085 struct sk_buff *skb = rx->skb; 2086 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 2087 struct tid_ampdu_rx *tid_agg_rx; 2088 u16 start_seq_num; 2089 u16 tid; 2090 2091 if (likely(!ieee80211_is_ctl(bar->frame_control))) 2092 return RX_CONTINUE; 2093 2094 if (ieee80211_is_back_req(bar->frame_control)) { 2095 struct { 2096 __le16 control, start_seq_num; 2097 } __packed bar_data; 2098 2099 if (!rx->sta) 2100 return RX_DROP_MONITOR; 2101 2102 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 2103 &bar_data, sizeof(bar_data))) 2104 return RX_DROP_MONITOR; 2105 2106 tid = le16_to_cpu(bar_data.control) >> 12; 2107 2108 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 2109 if (!tid_agg_rx) 2110 return RX_DROP_MONITOR; 2111 2112 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 2113 2114 /* reset session timer */ 2115 if (tid_agg_rx->timeout) 2116 mod_timer(&tid_agg_rx->session_timer, 2117 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 2118 2119 spin_lock(&tid_agg_rx->reorder_lock); 2120 /* release stored frames up to start of BAR */ 2121 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 2122 start_seq_num); 2123 spin_unlock(&tid_agg_rx->reorder_lock); 2124 2125 kfree_skb(skb); 2126 return RX_QUEUED; 2127 } 2128 2129 /* 2130 * After this point, we only want management frames, 2131 * so we can drop all remaining control frames to 2132 * cooked monitor interfaces. 2133 */ 2134 return RX_DROP_MONITOR; 2135 } 2136 2137 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 2138 struct ieee80211_mgmt *mgmt, 2139 size_t len) 2140 { 2141 struct ieee80211_local *local = sdata->local; 2142 struct sk_buff *skb; 2143 struct ieee80211_mgmt *resp; 2144 2145 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { 2146 /* Not to own unicast address */ 2147 return; 2148 } 2149 2150 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) || 2151 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) { 2152 /* Not from the current AP or not associated yet. */ 2153 return; 2154 } 2155 2156 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 2157 /* Too short SA Query request frame */ 2158 return; 2159 } 2160 2161 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 2162 if (skb == NULL) 2163 return; 2164 2165 skb_reserve(skb, local->hw.extra_tx_headroom); 2166 resp = (struct ieee80211_mgmt *) skb_put(skb, 24); 2167 memset(resp, 0, 24); 2168 memcpy(resp->da, mgmt->sa, ETH_ALEN); 2169 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 2170 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 2171 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2172 IEEE80211_STYPE_ACTION); 2173 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 2174 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 2175 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 2176 memcpy(resp->u.action.u.sa_query.trans_id, 2177 mgmt->u.action.u.sa_query.trans_id, 2178 WLAN_SA_QUERY_TR_ID_LEN); 2179 2180 ieee80211_tx_skb(sdata, skb); 2181 } 2182 2183 static ieee80211_rx_result debug_noinline 2184 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 2185 { 2186 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2187 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2188 2189 /* 2190 * From here on, look only at management frames. 2191 * Data and control frames are already handled, 2192 * and unknown (reserved) frames are useless. 2193 */ 2194 if (rx->skb->len < 24) 2195 return RX_DROP_MONITOR; 2196 2197 if (!ieee80211_is_mgmt(mgmt->frame_control)) 2198 return RX_DROP_MONITOR; 2199 2200 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 2201 ieee80211_is_beacon(mgmt->frame_control) && 2202 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 2203 int sig = 0; 2204 2205 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 2206 sig = status->signal; 2207 2208 cfg80211_report_obss_beacon(rx->local->hw.wiphy, 2209 rx->skb->data, rx->skb->len, 2210 status->freq, sig, GFP_ATOMIC); 2211 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 2212 } 2213 2214 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 2215 return RX_DROP_MONITOR; 2216 2217 if (ieee80211_drop_unencrypted_mgmt(rx)) 2218 return RX_DROP_UNUSABLE; 2219 2220 return RX_CONTINUE; 2221 } 2222 2223 static ieee80211_rx_result debug_noinline 2224 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 2225 { 2226 struct ieee80211_local *local = rx->local; 2227 struct ieee80211_sub_if_data *sdata = rx->sdata; 2228 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2229 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2230 int len = rx->skb->len; 2231 2232 if (!ieee80211_is_action(mgmt->frame_control)) 2233 return RX_CONTINUE; 2234 2235 /* drop too small frames */ 2236 if (len < IEEE80211_MIN_ACTION_SIZE) 2237 return RX_DROP_UNUSABLE; 2238 2239 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) 2240 return RX_DROP_UNUSABLE; 2241 2242 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 2243 return RX_DROP_UNUSABLE; 2244 2245 switch (mgmt->u.action.category) { 2246 case WLAN_CATEGORY_HT: 2247 /* reject HT action frames from stations not supporting HT */ 2248 if (!rx->sta->sta.ht_cap.ht_supported) 2249 goto invalid; 2250 2251 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2252 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2253 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2254 sdata->vif.type != NL80211_IFTYPE_AP && 2255 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2256 break; 2257 2258 /* verify action & smps_control are present */ 2259 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 2260 goto invalid; 2261 2262 switch (mgmt->u.action.u.ht_smps.action) { 2263 case WLAN_HT_ACTION_SMPS: { 2264 struct ieee80211_supported_band *sband; 2265 u8 smps; 2266 2267 /* convert to HT capability */ 2268 switch (mgmt->u.action.u.ht_smps.smps_control) { 2269 case WLAN_HT_SMPS_CONTROL_DISABLED: 2270 smps = WLAN_HT_CAP_SM_PS_DISABLED; 2271 break; 2272 case WLAN_HT_SMPS_CONTROL_STATIC: 2273 smps = WLAN_HT_CAP_SM_PS_STATIC; 2274 break; 2275 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 2276 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 2277 break; 2278 default: 2279 goto invalid; 2280 } 2281 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; 2282 2283 /* if no change do nothing */ 2284 if ((rx->sta->sta.ht_cap.cap & 2285 IEEE80211_HT_CAP_SM_PS) == smps) 2286 goto handled; 2287 2288 rx->sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SM_PS; 2289 rx->sta->sta.ht_cap.cap |= smps; 2290 2291 sband = rx->local->hw.wiphy->bands[status->band]; 2292 2293 rate_control_rate_update(local, sband, rx->sta, 2294 IEEE80211_RC_SMPS_CHANGED); 2295 goto handled; 2296 } 2297 default: 2298 goto invalid; 2299 } 2300 2301 break; 2302 case WLAN_CATEGORY_BACK: 2303 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2304 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2305 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2306 sdata->vif.type != NL80211_IFTYPE_AP && 2307 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2308 break; 2309 2310 /* verify action_code is present */ 2311 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2312 break; 2313 2314 switch (mgmt->u.action.u.addba_req.action_code) { 2315 case WLAN_ACTION_ADDBA_REQ: 2316 if (len < (IEEE80211_MIN_ACTION_SIZE + 2317 sizeof(mgmt->u.action.u.addba_req))) 2318 goto invalid; 2319 break; 2320 case WLAN_ACTION_ADDBA_RESP: 2321 if (len < (IEEE80211_MIN_ACTION_SIZE + 2322 sizeof(mgmt->u.action.u.addba_resp))) 2323 goto invalid; 2324 break; 2325 case WLAN_ACTION_DELBA: 2326 if (len < (IEEE80211_MIN_ACTION_SIZE + 2327 sizeof(mgmt->u.action.u.delba))) 2328 goto invalid; 2329 break; 2330 default: 2331 goto invalid; 2332 } 2333 2334 goto queue; 2335 case WLAN_CATEGORY_SPECTRUM_MGMT: 2336 if (status->band != IEEE80211_BAND_5GHZ) 2337 break; 2338 2339 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2340 break; 2341 2342 /* verify action_code is present */ 2343 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2344 break; 2345 2346 switch (mgmt->u.action.u.measurement.action_code) { 2347 case WLAN_ACTION_SPCT_MSR_REQ: 2348 if (len < (IEEE80211_MIN_ACTION_SIZE + 2349 sizeof(mgmt->u.action.u.measurement))) 2350 break; 2351 ieee80211_process_measurement_req(sdata, mgmt, len); 2352 goto handled; 2353 case WLAN_ACTION_SPCT_CHL_SWITCH: 2354 if (len < (IEEE80211_MIN_ACTION_SIZE + 2355 sizeof(mgmt->u.action.u.chan_switch))) 2356 break; 2357 2358 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2359 break; 2360 2361 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) 2362 break; 2363 2364 goto queue; 2365 } 2366 break; 2367 case WLAN_CATEGORY_SA_QUERY: 2368 if (len < (IEEE80211_MIN_ACTION_SIZE + 2369 sizeof(mgmt->u.action.u.sa_query))) 2370 break; 2371 2372 switch (mgmt->u.action.u.sa_query.action) { 2373 case WLAN_ACTION_SA_QUERY_REQUEST: 2374 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2375 break; 2376 ieee80211_process_sa_query_req(sdata, mgmt, len); 2377 goto handled; 2378 } 2379 break; 2380 case WLAN_CATEGORY_SELF_PROTECTED: 2381 if (len < (IEEE80211_MIN_ACTION_SIZE + 2382 sizeof(mgmt->u.action.u.self_prot.action_code))) 2383 break; 2384 2385 switch (mgmt->u.action.u.self_prot.action_code) { 2386 case WLAN_SP_MESH_PEERING_OPEN: 2387 case WLAN_SP_MESH_PEERING_CLOSE: 2388 case WLAN_SP_MESH_PEERING_CONFIRM: 2389 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2390 goto invalid; 2391 if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE) 2392 /* userspace handles this frame */ 2393 break; 2394 goto queue; 2395 case WLAN_SP_MGK_INFORM: 2396 case WLAN_SP_MGK_ACK: 2397 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2398 goto invalid; 2399 break; 2400 } 2401 break; 2402 case WLAN_CATEGORY_MESH_ACTION: 2403 if (len < (IEEE80211_MIN_ACTION_SIZE + 2404 sizeof(mgmt->u.action.u.mesh_action.action_code))) 2405 break; 2406 2407 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2408 break; 2409 if (mesh_action_is_path_sel(mgmt) && 2410 (!mesh_path_sel_is_hwmp(sdata))) 2411 break; 2412 goto queue; 2413 } 2414 2415 return RX_CONTINUE; 2416 2417 invalid: 2418 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 2419 /* will return in the next handlers */ 2420 return RX_CONTINUE; 2421 2422 handled: 2423 if (rx->sta) 2424 rx->sta->rx_packets++; 2425 dev_kfree_skb(rx->skb); 2426 return RX_QUEUED; 2427 2428 queue: 2429 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 2430 skb_queue_tail(&sdata->skb_queue, rx->skb); 2431 ieee80211_queue_work(&local->hw, &sdata->work); 2432 if (rx->sta) 2433 rx->sta->rx_packets++; 2434 return RX_QUEUED; 2435 } 2436 2437 static ieee80211_rx_result debug_noinline 2438 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 2439 { 2440 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2441 int sig = 0; 2442 2443 /* skip known-bad action frames and return them in the next handler */ 2444 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 2445 return RX_CONTINUE; 2446 2447 /* 2448 * Getting here means the kernel doesn't know how to handle 2449 * it, but maybe userspace does ... include returned frames 2450 * so userspace can register for those to know whether ones 2451 * it transmitted were processed or returned. 2452 */ 2453 2454 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 2455 sig = status->signal; 2456 2457 if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig, 2458 rx->skb->data, rx->skb->len, 2459 GFP_ATOMIC)) { 2460 if (rx->sta) 2461 rx->sta->rx_packets++; 2462 dev_kfree_skb(rx->skb); 2463 return RX_QUEUED; 2464 } 2465 2466 2467 return RX_CONTINUE; 2468 } 2469 2470 static ieee80211_rx_result debug_noinline 2471 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 2472 { 2473 struct ieee80211_local *local = rx->local; 2474 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2475 struct sk_buff *nskb; 2476 struct ieee80211_sub_if_data *sdata = rx->sdata; 2477 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2478 2479 if (!ieee80211_is_action(mgmt->frame_control)) 2480 return RX_CONTINUE; 2481 2482 /* 2483 * For AP mode, hostapd is responsible for handling any action 2484 * frames that we didn't handle, including returning unknown 2485 * ones. For all other modes we will return them to the sender, 2486 * setting the 0x80 bit in the action category, as required by 2487 * 802.11-2012 9.24.4. 2488 * Newer versions of hostapd shall also use the management frame 2489 * registration mechanisms, but older ones still use cooked 2490 * monitor interfaces so push all frames there. 2491 */ 2492 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 2493 (sdata->vif.type == NL80211_IFTYPE_AP || 2494 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 2495 return RX_DROP_MONITOR; 2496 2497 if (is_multicast_ether_addr(mgmt->da)) 2498 return RX_DROP_MONITOR; 2499 2500 /* do not return rejected action frames */ 2501 if (mgmt->u.action.category & 0x80) 2502 return RX_DROP_UNUSABLE; 2503 2504 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 2505 GFP_ATOMIC); 2506 if (nskb) { 2507 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 2508 2509 nmgmt->u.action.category |= 0x80; 2510 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 2511 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 2512 2513 memset(nskb->cb, 0, sizeof(nskb->cb)); 2514 2515 ieee80211_tx_skb(rx->sdata, nskb); 2516 } 2517 dev_kfree_skb(rx->skb); 2518 return RX_QUEUED; 2519 } 2520 2521 static ieee80211_rx_result debug_noinline 2522 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 2523 { 2524 struct ieee80211_sub_if_data *sdata = rx->sdata; 2525 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 2526 __le16 stype; 2527 2528 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 2529 2530 if (!ieee80211_vif_is_mesh(&sdata->vif) && 2531 sdata->vif.type != NL80211_IFTYPE_ADHOC && 2532 sdata->vif.type != NL80211_IFTYPE_STATION) 2533 return RX_DROP_MONITOR; 2534 2535 switch (stype) { 2536 case cpu_to_le16(IEEE80211_STYPE_AUTH): 2537 case cpu_to_le16(IEEE80211_STYPE_BEACON): 2538 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 2539 /* process for all: mesh, mlme, ibss */ 2540 break; 2541 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): 2542 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): 2543 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 2544 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 2545 if (is_multicast_ether_addr(mgmt->da) && 2546 !is_broadcast_ether_addr(mgmt->da)) 2547 return RX_DROP_MONITOR; 2548 2549 /* process only for station */ 2550 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2551 return RX_DROP_MONITOR; 2552 break; 2553 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 2554 /* process only for ibss */ 2555 if (sdata->vif.type != NL80211_IFTYPE_ADHOC) 2556 return RX_DROP_MONITOR; 2557 break; 2558 default: 2559 return RX_DROP_MONITOR; 2560 } 2561 2562 /* queue up frame and kick off work to process it */ 2563 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 2564 skb_queue_tail(&sdata->skb_queue, rx->skb); 2565 ieee80211_queue_work(&rx->local->hw, &sdata->work); 2566 if (rx->sta) 2567 rx->sta->rx_packets++; 2568 2569 return RX_QUEUED; 2570 } 2571 2572 /* TODO: use IEEE80211_RX_FRAGMENTED */ 2573 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 2574 struct ieee80211_rate *rate) 2575 { 2576 struct ieee80211_sub_if_data *sdata; 2577 struct ieee80211_local *local = rx->local; 2578 struct sk_buff *skb = rx->skb, *skb2; 2579 struct net_device *prev_dev = NULL; 2580 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2581 int needed_headroom; 2582 2583 /* 2584 * If cooked monitor has been processed already, then 2585 * don't do it again. If not, set the flag. 2586 */ 2587 if (rx->flags & IEEE80211_RX_CMNTR) 2588 goto out_free_skb; 2589 rx->flags |= IEEE80211_RX_CMNTR; 2590 2591 /* If there are no cooked monitor interfaces, just free the SKB */ 2592 if (!local->cooked_mntrs) 2593 goto out_free_skb; 2594 2595 /* room for the radiotap header based on driver features */ 2596 needed_headroom = ieee80211_rx_radiotap_len(local, status); 2597 2598 if (skb_headroom(skb) < needed_headroom && 2599 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) 2600 goto out_free_skb; 2601 2602 /* prepend radiotap information */ 2603 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, 2604 false); 2605 2606 skb_set_mac_header(skb, 0); 2607 skb->ip_summed = CHECKSUM_UNNECESSARY; 2608 skb->pkt_type = PACKET_OTHERHOST; 2609 skb->protocol = htons(ETH_P_802_2); 2610 2611 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2612 if (!ieee80211_sdata_running(sdata)) 2613 continue; 2614 2615 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 2616 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) 2617 continue; 2618 2619 if (prev_dev) { 2620 skb2 = skb_clone(skb, GFP_ATOMIC); 2621 if (skb2) { 2622 skb2->dev = prev_dev; 2623 netif_receive_skb(skb2); 2624 } 2625 } 2626 2627 prev_dev = sdata->dev; 2628 sdata->dev->stats.rx_packets++; 2629 sdata->dev->stats.rx_bytes += skb->len; 2630 } 2631 2632 if (prev_dev) { 2633 skb->dev = prev_dev; 2634 netif_receive_skb(skb); 2635 return; 2636 } 2637 2638 out_free_skb: 2639 dev_kfree_skb(skb); 2640 } 2641 2642 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 2643 ieee80211_rx_result res) 2644 { 2645 switch (res) { 2646 case RX_DROP_MONITOR: 2647 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 2648 if (rx->sta) 2649 rx->sta->rx_dropped++; 2650 /* fall through */ 2651 case RX_CONTINUE: { 2652 struct ieee80211_rate *rate = NULL; 2653 struct ieee80211_supported_band *sband; 2654 struct ieee80211_rx_status *status; 2655 2656 status = IEEE80211_SKB_RXCB((rx->skb)); 2657 2658 sband = rx->local->hw.wiphy->bands[status->band]; 2659 if (!(status->flag & RX_FLAG_HT)) 2660 rate = &sband->bitrates[status->rate_idx]; 2661 2662 ieee80211_rx_cooked_monitor(rx, rate); 2663 break; 2664 } 2665 case RX_DROP_UNUSABLE: 2666 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 2667 if (rx->sta) 2668 rx->sta->rx_dropped++; 2669 dev_kfree_skb(rx->skb); 2670 break; 2671 case RX_QUEUED: 2672 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 2673 break; 2674 } 2675 } 2676 2677 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) 2678 { 2679 ieee80211_rx_result res = RX_DROP_MONITOR; 2680 struct sk_buff *skb; 2681 2682 #define CALL_RXH(rxh) \ 2683 do { \ 2684 res = rxh(rx); \ 2685 if (res != RX_CONTINUE) \ 2686 goto rxh_next; \ 2687 } while (0); 2688 2689 spin_lock(&rx->local->rx_skb_queue.lock); 2690 if (rx->local->running_rx_handler) 2691 goto unlock; 2692 2693 rx->local->running_rx_handler = true; 2694 2695 while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) { 2696 spin_unlock(&rx->local->rx_skb_queue.lock); 2697 2698 /* 2699 * all the other fields are valid across frames 2700 * that belong to an aMPDU since they are on the 2701 * same TID from the same station 2702 */ 2703 rx->skb = skb; 2704 2705 CALL_RXH(ieee80211_rx_h_decrypt) 2706 CALL_RXH(ieee80211_rx_h_check_more_data) 2707 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll) 2708 CALL_RXH(ieee80211_rx_h_sta_process) 2709 CALL_RXH(ieee80211_rx_h_defragment) 2710 CALL_RXH(ieee80211_rx_h_michael_mic_verify) 2711 /* must be after MMIC verify so header is counted in MPDU mic */ 2712 #ifdef CONFIG_MAC80211_MESH 2713 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 2714 CALL_RXH(ieee80211_rx_h_mesh_fwding); 2715 #endif 2716 CALL_RXH(ieee80211_rx_h_amsdu) 2717 CALL_RXH(ieee80211_rx_h_data) 2718 CALL_RXH(ieee80211_rx_h_ctrl); 2719 CALL_RXH(ieee80211_rx_h_mgmt_check) 2720 CALL_RXH(ieee80211_rx_h_action) 2721 CALL_RXH(ieee80211_rx_h_userspace_mgmt) 2722 CALL_RXH(ieee80211_rx_h_action_return) 2723 CALL_RXH(ieee80211_rx_h_mgmt) 2724 2725 rxh_next: 2726 ieee80211_rx_handlers_result(rx, res); 2727 spin_lock(&rx->local->rx_skb_queue.lock); 2728 #undef CALL_RXH 2729 } 2730 2731 rx->local->running_rx_handler = false; 2732 2733 unlock: 2734 spin_unlock(&rx->local->rx_skb_queue.lock); 2735 } 2736 2737 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 2738 { 2739 ieee80211_rx_result res = RX_DROP_MONITOR; 2740 2741 #define CALL_RXH(rxh) \ 2742 do { \ 2743 res = rxh(rx); \ 2744 if (res != RX_CONTINUE) \ 2745 goto rxh_next; \ 2746 } while (0); 2747 2748 CALL_RXH(ieee80211_rx_h_check) 2749 2750 ieee80211_rx_reorder_ampdu(rx); 2751 2752 ieee80211_rx_handlers(rx); 2753 return; 2754 2755 rxh_next: 2756 ieee80211_rx_handlers_result(rx, res); 2757 2758 #undef CALL_RXH 2759 } 2760 2761 /* 2762 * This function makes calls into the RX path, therefore 2763 * it has to be invoked under RCU read lock. 2764 */ 2765 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 2766 { 2767 struct ieee80211_rx_data rx = { 2768 .sta = sta, 2769 .sdata = sta->sdata, 2770 .local = sta->local, 2771 /* This is OK -- must be QoS data frame */ 2772 .security_idx = tid, 2773 .seqno_idx = tid, 2774 .flags = 0, 2775 }; 2776 struct tid_ampdu_rx *tid_agg_rx; 2777 2778 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 2779 if (!tid_agg_rx) 2780 return; 2781 2782 spin_lock(&tid_agg_rx->reorder_lock); 2783 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx); 2784 spin_unlock(&tid_agg_rx->reorder_lock); 2785 2786 ieee80211_rx_handlers(&rx); 2787 } 2788 2789 /* main receive path */ 2790 2791 static int prepare_for_handlers(struct ieee80211_rx_data *rx, 2792 struct ieee80211_hdr *hdr) 2793 { 2794 struct ieee80211_sub_if_data *sdata = rx->sdata; 2795 struct sk_buff *skb = rx->skb; 2796 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2797 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 2798 int multicast = is_multicast_ether_addr(hdr->addr1); 2799 2800 switch (sdata->vif.type) { 2801 case NL80211_IFTYPE_STATION: 2802 if (!bssid && !sdata->u.mgd.use_4addr) 2803 return 0; 2804 if (!multicast && 2805 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { 2806 if (!(sdata->dev->flags & IFF_PROMISC) || 2807 sdata->u.mgd.use_4addr) 2808 return 0; 2809 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2810 } 2811 break; 2812 case NL80211_IFTYPE_ADHOC: 2813 if (!bssid) 2814 return 0; 2815 if (ieee80211_is_beacon(hdr->frame_control)) { 2816 return 1; 2817 } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { 2818 return 0; 2819 } else if (!multicast && 2820 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { 2821 if (!(sdata->dev->flags & IFF_PROMISC)) 2822 return 0; 2823 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2824 } else if (!rx->sta) { 2825 int rate_idx; 2826 if (status->flag & RX_FLAG_HT) 2827 rate_idx = 0; /* TODO: HT rates */ 2828 else 2829 rate_idx = status->rate_idx; 2830 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 2831 BIT(rate_idx)); 2832 } 2833 break; 2834 case NL80211_IFTYPE_MESH_POINT: 2835 if (!multicast && 2836 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { 2837 if (!(sdata->dev->flags & IFF_PROMISC)) 2838 return 0; 2839 2840 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2841 } 2842 break; 2843 case NL80211_IFTYPE_AP_VLAN: 2844 case NL80211_IFTYPE_AP: 2845 if (!bssid) { 2846 if (!ether_addr_equal(sdata->vif.addr, hdr->addr1)) 2847 return 0; 2848 } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { 2849 /* 2850 * Accept public action frames even when the 2851 * BSSID doesn't match, this is used for P2P 2852 * and location updates. Note that mac80211 2853 * itself never looks at these frames. 2854 */ 2855 if (ieee80211_is_public_action(hdr, skb->len)) 2856 return 1; 2857 if (!ieee80211_is_beacon(hdr->frame_control)) 2858 return 0; 2859 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2860 } 2861 break; 2862 case NL80211_IFTYPE_WDS: 2863 if (bssid || !ieee80211_is_data(hdr->frame_control)) 2864 return 0; 2865 if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2)) 2866 return 0; 2867 break; 2868 case NL80211_IFTYPE_P2P_DEVICE: 2869 if (!ieee80211_is_public_action(hdr, skb->len) && 2870 !ieee80211_is_probe_req(hdr->frame_control) && 2871 !ieee80211_is_probe_resp(hdr->frame_control) && 2872 !ieee80211_is_beacon(hdr->frame_control)) 2873 return 0; 2874 if (!ether_addr_equal(sdata->vif.addr, hdr->addr1)) 2875 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2876 break; 2877 default: 2878 /* should never get here */ 2879 WARN_ON_ONCE(1); 2880 break; 2881 } 2882 2883 return 1; 2884 } 2885 2886 /* 2887 * This function returns whether or not the SKB 2888 * was destined for RX processing or not, which, 2889 * if consume is true, is equivalent to whether 2890 * or not the skb was consumed. 2891 */ 2892 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 2893 struct sk_buff *skb, bool consume) 2894 { 2895 struct ieee80211_local *local = rx->local; 2896 struct ieee80211_sub_if_data *sdata = rx->sdata; 2897 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2898 struct ieee80211_hdr *hdr = (void *)skb->data; 2899 int prepares; 2900 2901 rx->skb = skb; 2902 status->rx_flags |= IEEE80211_RX_RA_MATCH; 2903 prepares = prepare_for_handlers(rx, hdr); 2904 2905 if (!prepares) 2906 return false; 2907 2908 if (!consume) { 2909 skb = skb_copy(skb, GFP_ATOMIC); 2910 if (!skb) { 2911 if (net_ratelimit()) 2912 wiphy_debug(local->hw.wiphy, 2913 "failed to copy skb for %s\n", 2914 sdata->name); 2915 return true; 2916 } 2917 2918 rx->skb = skb; 2919 } 2920 2921 ieee80211_invoke_rx_handlers(rx); 2922 return true; 2923 } 2924 2925 /* 2926 * This is the actual Rx frames handler. as it blongs to Rx path it must 2927 * be called with rcu_read_lock protection. 2928 */ 2929 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 2930 struct sk_buff *skb) 2931 { 2932 struct ieee80211_local *local = hw_to_local(hw); 2933 struct ieee80211_sub_if_data *sdata; 2934 struct ieee80211_hdr *hdr; 2935 __le16 fc; 2936 struct ieee80211_rx_data rx; 2937 struct ieee80211_sub_if_data *prev; 2938 struct sta_info *sta, *tmp, *prev_sta; 2939 int err = 0; 2940 2941 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 2942 memset(&rx, 0, sizeof(rx)); 2943 rx.skb = skb; 2944 rx.local = local; 2945 2946 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 2947 local->dot11ReceivedFragmentCount++; 2948 2949 if (ieee80211_is_mgmt(fc)) { 2950 /* drop frame if too short for header */ 2951 if (skb->len < ieee80211_hdrlen(fc)) 2952 err = -ENOBUFS; 2953 else 2954 err = skb_linearize(skb); 2955 } else { 2956 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 2957 } 2958 2959 if (err) { 2960 dev_kfree_skb(skb); 2961 return; 2962 } 2963 2964 hdr = (struct ieee80211_hdr *)skb->data; 2965 ieee80211_parse_qos(&rx); 2966 ieee80211_verify_alignment(&rx); 2967 2968 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || 2969 ieee80211_is_beacon(hdr->frame_control))) 2970 ieee80211_scan_rx(local, skb); 2971 2972 if (ieee80211_is_data(fc)) { 2973 prev_sta = NULL; 2974 2975 for_each_sta_info(local, hdr->addr2, sta, tmp) { 2976 if (!prev_sta) { 2977 prev_sta = sta; 2978 continue; 2979 } 2980 2981 rx.sta = prev_sta; 2982 rx.sdata = prev_sta->sdata; 2983 ieee80211_prepare_and_rx_handle(&rx, skb, false); 2984 2985 prev_sta = sta; 2986 } 2987 2988 if (prev_sta) { 2989 rx.sta = prev_sta; 2990 rx.sdata = prev_sta->sdata; 2991 2992 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 2993 return; 2994 goto out; 2995 } 2996 } 2997 2998 prev = NULL; 2999 3000 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3001 if (!ieee80211_sdata_running(sdata)) 3002 continue; 3003 3004 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 3005 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 3006 continue; 3007 3008 /* 3009 * frame is destined for this interface, but if it's 3010 * not also for the previous one we handle that after 3011 * the loop to avoid copying the SKB once too much 3012 */ 3013 3014 if (!prev) { 3015 prev = sdata; 3016 continue; 3017 } 3018 3019 rx.sta = sta_info_get_bss(prev, hdr->addr2); 3020 rx.sdata = prev; 3021 ieee80211_prepare_and_rx_handle(&rx, skb, false); 3022 3023 prev = sdata; 3024 } 3025 3026 if (prev) { 3027 rx.sta = sta_info_get_bss(prev, hdr->addr2); 3028 rx.sdata = prev; 3029 3030 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 3031 return; 3032 } 3033 3034 out: 3035 dev_kfree_skb(skb); 3036 } 3037 3038 /* 3039 * This is the receive path handler. It is called by a low level driver when an 3040 * 802.11 MPDU is received from the hardware. 3041 */ 3042 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) 3043 { 3044 struct ieee80211_local *local = hw_to_local(hw); 3045 struct ieee80211_rate *rate = NULL; 3046 struct ieee80211_supported_band *sband; 3047 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3048 3049 WARN_ON_ONCE(softirq_count() == 0); 3050 3051 if (WARN_ON(status->band < 0 || 3052 status->band >= IEEE80211_NUM_BANDS)) 3053 goto drop; 3054 3055 sband = local->hw.wiphy->bands[status->band]; 3056 if (WARN_ON(!sband)) 3057 goto drop; 3058 3059 /* 3060 * If we're suspending, it is possible although not too likely 3061 * that we'd be receiving frames after having already partially 3062 * quiesced the stack. We can't process such frames then since 3063 * that might, for example, cause stations to be added or other 3064 * driver callbacks be invoked. 3065 */ 3066 if (unlikely(local->quiescing || local->suspended)) 3067 goto drop; 3068 3069 /* We might be during a HW reconfig, prevent Rx for the same reason */ 3070 if (unlikely(local->in_reconfig)) 3071 goto drop; 3072 3073 /* 3074 * The same happens when we're not even started, 3075 * but that's worth a warning. 3076 */ 3077 if (WARN_ON(!local->started)) 3078 goto drop; 3079 3080 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { 3081 /* 3082 * Validate the rate, unless a PLCP error means that 3083 * we probably can't have a valid rate here anyway. 3084 */ 3085 3086 if (status->flag & RX_FLAG_HT) { 3087 /* 3088 * rate_idx is MCS index, which can be [0-76] 3089 * as documented on: 3090 * 3091 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n 3092 * 3093 * Anything else would be some sort of driver or 3094 * hardware error. The driver should catch hardware 3095 * errors. 3096 */ 3097 if (WARN((status->rate_idx < 0 || 3098 status->rate_idx > 76), 3099 "Rate marked as an HT rate but passed " 3100 "status->rate_idx is not " 3101 "an MCS index [0-76]: %d (0x%02x)\n", 3102 status->rate_idx, 3103 status->rate_idx)) 3104 goto drop; 3105 } else { 3106 if (WARN_ON(status->rate_idx < 0 || 3107 status->rate_idx >= sband->n_bitrates)) 3108 goto drop; 3109 rate = &sband->bitrates[status->rate_idx]; 3110 } 3111 } 3112 3113 status->rx_flags = 0; 3114 3115 /* 3116 * key references and virtual interfaces are protected using RCU 3117 * and this requires that we are in a read-side RCU section during 3118 * receive processing 3119 */ 3120 rcu_read_lock(); 3121 3122 /* 3123 * Frames with failed FCS/PLCP checksum are not returned, 3124 * all other frames are returned without radiotap header 3125 * if it was previously present. 3126 * Also, frames with less than 16 bytes are dropped. 3127 */ 3128 skb = ieee80211_rx_monitor(local, skb, rate); 3129 if (!skb) { 3130 rcu_read_unlock(); 3131 return; 3132 } 3133 3134 ieee80211_tpt_led_trig_rx(local, 3135 ((struct ieee80211_hdr *)skb->data)->frame_control, 3136 skb->len); 3137 __ieee80211_rx_handle_packet(hw, skb); 3138 3139 rcu_read_unlock(); 3140 3141 return; 3142 drop: 3143 kfree_skb(skb); 3144 } 3145 EXPORT_SYMBOL(ieee80211_rx); 3146 3147 /* This is a version of the rx handler that can be called from hard irq 3148 * context. Post the skb on the queue and schedule the tasklet */ 3149 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 3150 { 3151 struct ieee80211_local *local = hw_to_local(hw); 3152 3153 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 3154 3155 skb->pkt_type = IEEE80211_RX_MSG; 3156 skb_queue_tail(&local->skb_queue, skb); 3157 tasklet_schedule(&local->tasklet); 3158 } 3159 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 3160