1 /* 2 * Copyright 2002-2005, Instant802 Networks, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/jiffies.h> 13 #include <linux/slab.h> 14 #include <linux/kernel.h> 15 #include <linux/skbuff.h> 16 #include <linux/netdevice.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rcupdate.h> 19 #include <net/mac80211.h> 20 #include <net/ieee80211_radiotap.h> 21 22 #include "ieee80211_i.h" 23 #include "driver-ops.h" 24 #include "led.h" 25 #include "mesh.h" 26 #include "wep.h" 27 #include "wpa.h" 28 #include "tkip.h" 29 #include "wme.h" 30 31 /* 32 * monitor mode reception 33 * 34 * This function cleans up the SKB, i.e. it removes all the stuff 35 * only useful for monitoring. 36 */ 37 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, 38 struct sk_buff *skb) 39 { 40 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { 41 if (likely(skb->len > FCS_LEN)) 42 __pskb_trim(skb, skb->len - FCS_LEN); 43 else { 44 /* driver bug */ 45 WARN_ON(1); 46 dev_kfree_skb(skb); 47 skb = NULL; 48 } 49 } 50 51 return skb; 52 } 53 54 static inline int should_drop_frame(struct sk_buff *skb, 55 int present_fcs_len) 56 { 57 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 58 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 59 60 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 61 return 1; 62 if (unlikely(skb->len < 16 + present_fcs_len)) 63 return 1; 64 if (ieee80211_is_ctl(hdr->frame_control) && 65 !ieee80211_is_pspoll(hdr->frame_control) && 66 !ieee80211_is_back_req(hdr->frame_control)) 67 return 1; 68 return 0; 69 } 70 71 static int 72 ieee80211_rx_radiotap_len(struct ieee80211_local *local, 73 struct ieee80211_rx_status *status) 74 { 75 int len; 76 77 /* always present fields */ 78 len = sizeof(struct ieee80211_radiotap_header) + 9; 79 80 if (status->flag & RX_FLAG_TSFT) 81 len += 8; 82 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 83 len += 1; 84 85 if (len & 1) /* padding for RX_FLAGS if necessary */ 86 len++; 87 88 return len; 89 } 90 91 /* 92 * ieee80211_add_rx_radiotap_header - add radiotap header 93 * 94 * add a radiotap header containing all the fields which the hardware provided. 95 */ 96 static void 97 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 98 struct sk_buff *skb, 99 struct ieee80211_rate *rate, 100 int rtap_len) 101 { 102 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 103 struct ieee80211_radiotap_header *rthdr; 104 unsigned char *pos; 105 u16 rx_flags = 0; 106 107 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); 108 memset(rthdr, 0, rtap_len); 109 110 /* radiotap header, set always present flags */ 111 rthdr->it_present = 112 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | 113 (1 << IEEE80211_RADIOTAP_CHANNEL) | 114 (1 << IEEE80211_RADIOTAP_ANTENNA) | 115 (1 << IEEE80211_RADIOTAP_RX_FLAGS)); 116 rthdr->it_len = cpu_to_le16(rtap_len); 117 118 pos = (unsigned char *)(rthdr+1); 119 120 /* the order of the following fields is important */ 121 122 /* IEEE80211_RADIOTAP_TSFT */ 123 if (status->flag & RX_FLAG_TSFT) { 124 put_unaligned_le64(status->mactime, pos); 125 rthdr->it_present |= 126 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 127 pos += 8; 128 } 129 130 /* IEEE80211_RADIOTAP_FLAGS */ 131 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 132 *pos |= IEEE80211_RADIOTAP_F_FCS; 133 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 134 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 135 if (status->flag & RX_FLAG_SHORTPRE) 136 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 137 pos++; 138 139 /* IEEE80211_RADIOTAP_RATE */ 140 if (status->flag & RX_FLAG_HT) { 141 /* 142 * TODO: add following information into radiotap header once 143 * suitable fields are defined for it: 144 * - MCS index (status->rate_idx) 145 * - HT40 (status->flag & RX_FLAG_40MHZ) 146 * - short-GI (status->flag & RX_FLAG_SHORT_GI) 147 */ 148 *pos = 0; 149 } else { 150 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 151 *pos = rate->bitrate / 5; 152 } 153 pos++; 154 155 /* IEEE80211_RADIOTAP_CHANNEL */ 156 put_unaligned_le16(status->freq, pos); 157 pos += 2; 158 if (status->band == IEEE80211_BAND_5GHZ) 159 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ, 160 pos); 161 else if (status->flag & RX_FLAG_HT) 162 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ, 163 pos); 164 else if (rate->flags & IEEE80211_RATE_ERP_G) 165 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ, 166 pos); 167 else 168 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ, 169 pos); 170 pos += 2; 171 172 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 173 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) { 174 *pos = status->signal; 175 rthdr->it_present |= 176 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 177 pos++; 178 } 179 180 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 181 182 /* IEEE80211_RADIOTAP_ANTENNA */ 183 *pos = status->antenna; 184 pos++; 185 186 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 187 188 /* IEEE80211_RADIOTAP_RX_FLAGS */ 189 /* ensure 2 byte alignment for the 2 byte field as required */ 190 if ((pos - (u8 *)rthdr) & 1) 191 pos++; 192 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 193 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 194 put_unaligned_le16(rx_flags, pos); 195 pos += 2; 196 } 197 198 /* 199 * This function copies a received frame to all monitor interfaces and 200 * returns a cleaned-up SKB that no longer includes the FCS nor the 201 * radiotap header the driver might have added. 202 */ 203 static struct sk_buff * 204 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 205 struct ieee80211_rate *rate) 206 { 207 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 208 struct ieee80211_sub_if_data *sdata; 209 int needed_headroom = 0; 210 struct sk_buff *skb, *skb2; 211 struct net_device *prev_dev = NULL; 212 int present_fcs_len = 0; 213 214 /* 215 * First, we may need to make a copy of the skb because 216 * (1) we need to modify it for radiotap (if not present), and 217 * (2) the other RX handlers will modify the skb we got. 218 * 219 * We don't need to, of course, if we aren't going to return 220 * the SKB because it has a bad FCS/PLCP checksum. 221 */ 222 223 /* room for the radiotap header based on driver features */ 224 needed_headroom = ieee80211_rx_radiotap_len(local, status); 225 226 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 227 present_fcs_len = FCS_LEN; 228 229 /* make sure hdr->frame_control is on the linear part */ 230 if (!pskb_may_pull(origskb, 2)) { 231 dev_kfree_skb(origskb); 232 return NULL; 233 } 234 235 if (!local->monitors) { 236 if (should_drop_frame(origskb, present_fcs_len)) { 237 dev_kfree_skb(origskb); 238 return NULL; 239 } 240 241 return remove_monitor_info(local, origskb); 242 } 243 244 if (should_drop_frame(origskb, present_fcs_len)) { 245 /* only need to expand headroom if necessary */ 246 skb = origskb; 247 origskb = NULL; 248 249 /* 250 * This shouldn't trigger often because most devices have an 251 * RX header they pull before we get here, and that should 252 * be big enough for our radiotap information. We should 253 * probably export the length to drivers so that we can have 254 * them allocate enough headroom to start with. 255 */ 256 if (skb_headroom(skb) < needed_headroom && 257 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 258 dev_kfree_skb(skb); 259 return NULL; 260 } 261 } else { 262 /* 263 * Need to make a copy and possibly remove radiotap header 264 * and FCS from the original. 265 */ 266 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); 267 268 origskb = remove_monitor_info(local, origskb); 269 270 if (!skb) 271 return origskb; 272 } 273 274 /* prepend radiotap information */ 275 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); 276 277 skb_reset_mac_header(skb); 278 skb->ip_summed = CHECKSUM_UNNECESSARY; 279 skb->pkt_type = PACKET_OTHERHOST; 280 skb->protocol = htons(ETH_P_802_2); 281 282 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 283 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 284 continue; 285 286 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) 287 continue; 288 289 if (!ieee80211_sdata_running(sdata)) 290 continue; 291 292 if (prev_dev) { 293 skb2 = skb_clone(skb, GFP_ATOMIC); 294 if (skb2) { 295 skb2->dev = prev_dev; 296 netif_receive_skb(skb2); 297 } 298 } 299 300 prev_dev = sdata->dev; 301 sdata->dev->stats.rx_packets++; 302 sdata->dev->stats.rx_bytes += skb->len; 303 } 304 305 if (prev_dev) { 306 skb->dev = prev_dev; 307 netif_receive_skb(skb); 308 } else 309 dev_kfree_skb(skb); 310 311 return origskb; 312 } 313 314 315 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 316 { 317 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 318 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 319 int tid; 320 321 /* does the frame have a qos control field? */ 322 if (ieee80211_is_data_qos(hdr->frame_control)) { 323 u8 *qc = ieee80211_get_qos_ctl(hdr); 324 /* frame has qos control */ 325 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 326 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) 327 status->rx_flags |= IEEE80211_RX_AMSDU; 328 } else { 329 /* 330 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 331 * 332 * Sequence numbers for management frames, QoS data 333 * frames with a broadcast/multicast address in the 334 * Address 1 field, and all non-QoS data frames sent 335 * by QoS STAs are assigned using an additional single 336 * modulo-4096 counter, [...] 337 * 338 * We also use that counter for non-QoS STAs. 339 */ 340 tid = NUM_RX_DATA_QUEUES - 1; 341 } 342 343 rx->queue = tid; 344 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 345 * For now, set skb->priority to 0 for other cases. */ 346 rx->skb->priority = (tid > 7) ? 0 : tid; 347 } 348 349 /** 350 * DOC: Packet alignment 351 * 352 * Drivers always need to pass packets that are aligned to two-byte boundaries 353 * to the stack. 354 * 355 * Additionally, should, if possible, align the payload data in a way that 356 * guarantees that the contained IP header is aligned to a four-byte 357 * boundary. In the case of regular frames, this simply means aligning the 358 * payload to a four-byte boundary (because either the IP header is directly 359 * contained, or IV/RFC1042 headers that have a length divisible by four are 360 * in front of it). If the payload data is not properly aligned and the 361 * architecture doesn't support efficient unaligned operations, mac80211 362 * will align the data. 363 * 364 * With A-MSDU frames, however, the payload data address must yield two modulo 365 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 366 * push the IP header further back to a multiple of four again. Thankfully, the 367 * specs were sane enough this time around to require padding each A-MSDU 368 * subframe to a length that is a multiple of four. 369 * 370 * Padding like Atheros hardware adds which is inbetween the 802.11 header and 371 * the payload is not supported, the driver is required to move the 802.11 372 * header to be directly in front of the payload in that case. 373 */ 374 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 375 { 376 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 377 WARN_ONCE((unsigned long)rx->skb->data & 1, 378 "unaligned packet at 0x%p\n", rx->skb->data); 379 #endif 380 } 381 382 383 /* rx handlers */ 384 385 static ieee80211_rx_result debug_noinline 386 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) 387 { 388 struct ieee80211_local *local = rx->local; 389 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 390 struct sk_buff *skb = rx->skb; 391 392 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN))) 393 return RX_CONTINUE; 394 395 if (test_bit(SCAN_HW_SCANNING, &local->scanning)) 396 return ieee80211_scan_rx(rx->sdata, skb); 397 398 if (test_bit(SCAN_SW_SCANNING, &local->scanning)) { 399 /* drop all the other packets during a software scan anyway */ 400 if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED) 401 dev_kfree_skb(skb); 402 return RX_QUEUED; 403 } 404 405 /* scanning finished during invoking of handlers */ 406 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); 407 return RX_DROP_UNUSABLE; 408 } 409 410 411 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 412 { 413 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 414 415 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1)) 416 return 0; 417 418 return ieee80211_is_robust_mgmt_frame(hdr); 419 } 420 421 422 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 423 { 424 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 425 426 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1)) 427 return 0; 428 429 return ieee80211_is_robust_mgmt_frame(hdr); 430 } 431 432 433 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 434 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 435 { 436 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 437 struct ieee80211_mmie *mmie; 438 439 if (skb->len < 24 + sizeof(*mmie) || 440 !is_multicast_ether_addr(hdr->da)) 441 return -1; 442 443 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr)) 444 return -1; /* not a robust management frame */ 445 446 mmie = (struct ieee80211_mmie *) 447 (skb->data + skb->len - sizeof(*mmie)); 448 if (mmie->element_id != WLAN_EID_MMIE || 449 mmie->length != sizeof(*mmie) - 2) 450 return -1; 451 452 return le16_to_cpu(mmie->key_id); 453 } 454 455 456 static ieee80211_rx_result 457 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 458 { 459 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 460 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); 461 char *dev_addr = rx->sdata->vif.addr; 462 463 if (ieee80211_is_data(hdr->frame_control)) { 464 if (is_multicast_ether_addr(hdr->addr1)) { 465 if (ieee80211_has_tods(hdr->frame_control) || 466 !ieee80211_has_fromds(hdr->frame_control)) 467 return RX_DROP_MONITOR; 468 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0) 469 return RX_DROP_MONITOR; 470 } else { 471 if (!ieee80211_has_a4(hdr->frame_control)) 472 return RX_DROP_MONITOR; 473 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0) 474 return RX_DROP_MONITOR; 475 } 476 } 477 478 /* If there is not an established peer link and this is not a peer link 479 * establisment frame, beacon or probe, drop the frame. 480 */ 481 482 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) { 483 struct ieee80211_mgmt *mgmt; 484 485 if (!ieee80211_is_mgmt(hdr->frame_control)) 486 return RX_DROP_MONITOR; 487 488 if (ieee80211_is_action(hdr->frame_control)) { 489 mgmt = (struct ieee80211_mgmt *)hdr; 490 if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK) 491 return RX_DROP_MONITOR; 492 return RX_CONTINUE; 493 } 494 495 if (ieee80211_is_probe_req(hdr->frame_control) || 496 ieee80211_is_probe_resp(hdr->frame_control) || 497 ieee80211_is_beacon(hdr->frame_control)) 498 return RX_CONTINUE; 499 500 return RX_DROP_MONITOR; 501 502 } 503 504 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l)) 505 506 if (ieee80211_is_data(hdr->frame_control) && 507 is_multicast_ether_addr(hdr->addr1) && 508 mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata)) 509 return RX_DROP_MONITOR; 510 #undef msh_h_get 511 512 return RX_CONTINUE; 513 } 514 515 #define SEQ_MODULO 0x1000 516 #define SEQ_MASK 0xfff 517 518 static inline int seq_less(u16 sq1, u16 sq2) 519 { 520 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1); 521 } 522 523 static inline u16 seq_inc(u16 sq) 524 { 525 return (sq + 1) & SEQ_MASK; 526 } 527 528 static inline u16 seq_sub(u16 sq1, u16 sq2) 529 { 530 return (sq1 - sq2) & SEQ_MASK; 531 } 532 533 534 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, 535 struct tid_ampdu_rx *tid_agg_rx, 536 int index) 537 { 538 struct ieee80211_local *local = hw_to_local(hw); 539 struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; 540 struct ieee80211_rx_status *status; 541 542 lockdep_assert_held(&tid_agg_rx->reorder_lock); 543 544 if (!skb) 545 goto no_frame; 546 547 /* release the frame from the reorder ring buffer */ 548 tid_agg_rx->stored_mpdu_num--; 549 tid_agg_rx->reorder_buf[index] = NULL; 550 status = IEEE80211_SKB_RXCB(skb); 551 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 552 skb_queue_tail(&local->rx_skb_queue, skb); 553 554 no_frame: 555 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 556 } 557 558 static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, 559 struct tid_ampdu_rx *tid_agg_rx, 560 u16 head_seq_num) 561 { 562 int index; 563 564 lockdep_assert_held(&tid_agg_rx->reorder_lock); 565 566 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { 567 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 568 tid_agg_rx->buf_size; 569 ieee80211_release_reorder_frame(hw, tid_agg_rx, index); 570 } 571 } 572 573 /* 574 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 575 * the skb was added to the buffer longer than this time ago, the earlier 576 * frames that have not yet been received are assumed to be lost and the skb 577 * can be released for processing. This may also release other skb's from the 578 * reorder buffer if there are no additional gaps between the frames. 579 * 580 * Callers must hold tid_agg_rx->reorder_lock. 581 */ 582 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 583 584 static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, 585 struct tid_ampdu_rx *tid_agg_rx) 586 { 587 int index, j; 588 589 lockdep_assert_held(&tid_agg_rx->reorder_lock); 590 591 /* release the buffer until next missing frame */ 592 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 593 tid_agg_rx->buf_size; 594 if (!tid_agg_rx->reorder_buf[index] && 595 tid_agg_rx->stored_mpdu_num > 1) { 596 /* 597 * No buffers ready to be released, but check whether any 598 * frames in the reorder buffer have timed out. 599 */ 600 int skipped = 1; 601 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 602 j = (j + 1) % tid_agg_rx->buf_size) { 603 if (!tid_agg_rx->reorder_buf[j]) { 604 skipped++; 605 continue; 606 } 607 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] + 608 HT_RX_REORDER_BUF_TIMEOUT)) 609 goto set_release_timer; 610 611 #ifdef CONFIG_MAC80211_HT_DEBUG 612 if (net_ratelimit()) 613 wiphy_debug(hw->wiphy, 614 "release an RX reorder frame due to timeout on earlier frames\n"); 615 #endif 616 ieee80211_release_reorder_frame(hw, tid_agg_rx, j); 617 618 /* 619 * Increment the head seq# also for the skipped slots. 620 */ 621 tid_agg_rx->head_seq_num = 622 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK; 623 skipped = 0; 624 } 625 } else while (tid_agg_rx->reorder_buf[index]) { 626 ieee80211_release_reorder_frame(hw, tid_agg_rx, index); 627 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 628 tid_agg_rx->buf_size; 629 } 630 631 if (tid_agg_rx->stored_mpdu_num) { 632 j = index = seq_sub(tid_agg_rx->head_seq_num, 633 tid_agg_rx->ssn) % tid_agg_rx->buf_size; 634 635 for (; j != (index - 1) % tid_agg_rx->buf_size; 636 j = (j + 1) % tid_agg_rx->buf_size) { 637 if (tid_agg_rx->reorder_buf[j]) 638 break; 639 } 640 641 set_release_timer: 642 643 mod_timer(&tid_agg_rx->reorder_timer, 644 tid_agg_rx->reorder_time[j] + 645 HT_RX_REORDER_BUF_TIMEOUT); 646 } else { 647 del_timer(&tid_agg_rx->reorder_timer); 648 } 649 } 650 651 /* 652 * As this function belongs to the RX path it must be under 653 * rcu_read_lock protection. It returns false if the frame 654 * can be processed immediately, true if it was consumed. 655 */ 656 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, 657 struct tid_ampdu_rx *tid_agg_rx, 658 struct sk_buff *skb) 659 { 660 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 661 u16 sc = le16_to_cpu(hdr->seq_ctrl); 662 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 663 u16 head_seq_num, buf_size; 664 int index; 665 bool ret = true; 666 667 spin_lock(&tid_agg_rx->reorder_lock); 668 669 buf_size = tid_agg_rx->buf_size; 670 head_seq_num = tid_agg_rx->head_seq_num; 671 672 /* frame with out of date sequence number */ 673 if (seq_less(mpdu_seq_num, head_seq_num)) { 674 dev_kfree_skb(skb); 675 goto out; 676 } 677 678 /* 679 * If frame the sequence number exceeds our buffering window 680 * size release some previous frames to make room for this one. 681 */ 682 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { 683 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); 684 /* release stored frames up to new head to stack */ 685 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num); 686 } 687 688 /* Now the new frame is always in the range of the reordering buffer */ 689 690 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size; 691 692 /* check if we already stored this frame */ 693 if (tid_agg_rx->reorder_buf[index]) { 694 dev_kfree_skb(skb); 695 goto out; 696 } 697 698 /* 699 * If the current MPDU is in the right order and nothing else 700 * is stored we can process it directly, no need to buffer it. 701 */ 702 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 703 tid_agg_rx->stored_mpdu_num == 0) { 704 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 705 ret = false; 706 goto out; 707 } 708 709 /* put the frame in the reordering buffer */ 710 tid_agg_rx->reorder_buf[index] = skb; 711 tid_agg_rx->reorder_time[index] = jiffies; 712 tid_agg_rx->stored_mpdu_num++; 713 ieee80211_sta_reorder_release(hw, tid_agg_rx); 714 715 out: 716 spin_unlock(&tid_agg_rx->reorder_lock); 717 return ret; 718 } 719 720 /* 721 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 722 * true if the MPDU was buffered, false if it should be processed. 723 */ 724 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) 725 { 726 struct sk_buff *skb = rx->skb; 727 struct ieee80211_local *local = rx->local; 728 struct ieee80211_hw *hw = &local->hw; 729 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 730 struct sta_info *sta = rx->sta; 731 struct tid_ampdu_rx *tid_agg_rx; 732 u16 sc; 733 int tid; 734 735 if (!ieee80211_is_data_qos(hdr->frame_control)) 736 goto dont_reorder; 737 738 /* 739 * filter the QoS data rx stream according to 740 * STA/TID and check if this STA/TID is on aggregation 741 */ 742 743 if (!sta) 744 goto dont_reorder; 745 746 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 747 748 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 749 if (!tid_agg_rx) 750 goto dont_reorder; 751 752 /* qos null data frames are excluded */ 753 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 754 goto dont_reorder; 755 756 /* new, potentially un-ordered, ampdu frame - process it */ 757 758 /* reset session timer */ 759 if (tid_agg_rx->timeout) 760 mod_timer(&tid_agg_rx->session_timer, 761 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 762 763 /* if this mpdu is fragmented - terminate rx aggregation session */ 764 sc = le16_to_cpu(hdr->seq_ctrl); 765 if (sc & IEEE80211_SCTL_FRAG) { 766 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 767 skb_queue_tail(&rx->sdata->skb_queue, skb); 768 ieee80211_queue_work(&local->hw, &rx->sdata->work); 769 return; 770 } 771 772 /* 773 * No locking needed -- we will only ever process one 774 * RX packet at a time, and thus own tid_agg_rx. All 775 * other code manipulating it needs to (and does) make 776 * sure that we cannot get to it any more before doing 777 * anything with it. 778 */ 779 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb)) 780 return; 781 782 dont_reorder: 783 skb_queue_tail(&local->rx_skb_queue, skb); 784 } 785 786 static ieee80211_rx_result debug_noinline 787 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 788 { 789 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 790 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 791 792 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ 793 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { 794 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 795 rx->sta->last_seq_ctrl[rx->queue] == 796 hdr->seq_ctrl)) { 797 if (status->rx_flags & IEEE80211_RX_RA_MATCH) { 798 rx->local->dot11FrameDuplicateCount++; 799 rx->sta->num_duplicates++; 800 } 801 return RX_DROP_MONITOR; 802 } else 803 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl; 804 } 805 806 if (unlikely(rx->skb->len < 16)) { 807 I802_DEBUG_INC(rx->local->rx_handlers_drop_short); 808 return RX_DROP_MONITOR; 809 } 810 811 /* Drop disallowed frame classes based on STA auth/assoc state; 812 * IEEE 802.11, Chap 5.5. 813 * 814 * mac80211 filters only based on association state, i.e. it drops 815 * Class 3 frames from not associated stations. hostapd sends 816 * deauth/disassoc frames when needed. In addition, hostapd is 817 * responsible for filtering on both auth and assoc states. 818 */ 819 820 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 821 return ieee80211_rx_mesh_check(rx); 822 823 if (unlikely((ieee80211_is_data(hdr->frame_control) || 824 ieee80211_is_pspoll(hdr->frame_control)) && 825 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 826 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 827 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) { 828 if ((!ieee80211_has_fromds(hdr->frame_control) && 829 !ieee80211_has_tods(hdr->frame_control) && 830 ieee80211_is_data(hdr->frame_control)) || 831 !(status->rx_flags & IEEE80211_RX_RA_MATCH)) { 832 /* Drop IBSS frames and frames for other hosts 833 * silently. */ 834 return RX_DROP_MONITOR; 835 } 836 837 return RX_DROP_MONITOR; 838 } 839 840 return RX_CONTINUE; 841 } 842 843 844 static ieee80211_rx_result debug_noinline 845 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 846 { 847 struct sk_buff *skb = rx->skb; 848 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 849 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 850 int keyidx; 851 int hdrlen; 852 ieee80211_rx_result result = RX_DROP_UNUSABLE; 853 struct ieee80211_key *sta_ptk = NULL; 854 int mmie_keyidx = -1; 855 __le16 fc; 856 857 /* 858 * Key selection 101 859 * 860 * There are four types of keys: 861 * - GTK (group keys) 862 * - IGTK (group keys for management frames) 863 * - PTK (pairwise keys) 864 * - STK (station-to-station pairwise keys) 865 * 866 * When selecting a key, we have to distinguish between multicast 867 * (including broadcast) and unicast frames, the latter can only 868 * use PTKs and STKs while the former always use GTKs and IGTKs. 869 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then 870 * unicast frames can also use key indices like GTKs. Hence, if we 871 * don't have a PTK/STK we check the key index for a WEP key. 872 * 873 * Note that in a regular BSS, multicast frames are sent by the 874 * AP only, associated stations unicast the frame to the AP first 875 * which then multicasts it on their behalf. 876 * 877 * There is also a slight problem in IBSS mode: GTKs are negotiated 878 * with each station, that is something we don't currently handle. 879 * The spec seems to expect that one negotiates the same key with 880 * every station but there's no such requirement; VLANs could be 881 * possible. 882 */ 883 884 /* 885 * No point in finding a key and decrypting if the frame is neither 886 * addressed to us nor a multicast frame. 887 */ 888 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 889 return RX_CONTINUE; 890 891 /* start without a key */ 892 rx->key = NULL; 893 894 if (rx->sta) 895 sta_ptk = rcu_dereference(rx->sta->ptk); 896 897 fc = hdr->frame_control; 898 899 if (!ieee80211_has_protected(fc)) 900 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 901 902 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 903 rx->key = sta_ptk; 904 if ((status->flag & RX_FLAG_DECRYPTED) && 905 (status->flag & RX_FLAG_IV_STRIPPED)) 906 return RX_CONTINUE; 907 /* Skip decryption if the frame is not protected. */ 908 if (!ieee80211_has_protected(fc)) 909 return RX_CONTINUE; 910 } else if (mmie_keyidx >= 0) { 911 /* Broadcast/multicast robust management frame / BIP */ 912 if ((status->flag & RX_FLAG_DECRYPTED) && 913 (status->flag & RX_FLAG_IV_STRIPPED)) 914 return RX_CONTINUE; 915 916 if (mmie_keyidx < NUM_DEFAULT_KEYS || 917 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 918 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 919 if (rx->sta) 920 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); 921 if (!rx->key) 922 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 923 } else if (!ieee80211_has_protected(fc)) { 924 /* 925 * The frame was not protected, so skip decryption. However, we 926 * need to set rx->key if there is a key that could have been 927 * used so that the frame may be dropped if encryption would 928 * have been expected. 929 */ 930 struct ieee80211_key *key = NULL; 931 struct ieee80211_sub_if_data *sdata = rx->sdata; 932 int i; 933 934 if (ieee80211_is_mgmt(fc) && 935 is_multicast_ether_addr(hdr->addr1) && 936 (key = rcu_dereference(rx->sdata->default_mgmt_key))) 937 rx->key = key; 938 else { 939 if (rx->sta) { 940 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 941 key = rcu_dereference(rx->sta->gtk[i]); 942 if (key) 943 break; 944 } 945 } 946 if (!key) { 947 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 948 key = rcu_dereference(sdata->keys[i]); 949 if (key) 950 break; 951 } 952 } 953 if (key) 954 rx->key = key; 955 } 956 return RX_CONTINUE; 957 } else { 958 u8 keyid; 959 /* 960 * The device doesn't give us the IV so we won't be 961 * able to look up the key. That's ok though, we 962 * don't need to decrypt the frame, we just won't 963 * be able to keep statistics accurate. 964 * Except for key threshold notifications, should 965 * we somehow allow the driver to tell us which key 966 * the hardware used if this flag is set? 967 */ 968 if ((status->flag & RX_FLAG_DECRYPTED) && 969 (status->flag & RX_FLAG_IV_STRIPPED)) 970 return RX_CONTINUE; 971 972 hdrlen = ieee80211_hdrlen(fc); 973 974 if (rx->skb->len < 8 + hdrlen) 975 return RX_DROP_UNUSABLE; /* TODO: count this? */ 976 977 /* 978 * no need to call ieee80211_wep_get_keyidx, 979 * it verifies a bunch of things we've done already 980 */ 981 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); 982 keyidx = keyid >> 6; 983 984 /* check per-station GTK first, if multicast packet */ 985 if (is_multicast_ether_addr(hdr->addr1) && rx->sta) 986 rx->key = rcu_dereference(rx->sta->gtk[keyidx]); 987 988 /* if not found, try default key */ 989 if (!rx->key) { 990 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 991 992 /* 993 * RSNA-protected unicast frames should always be 994 * sent with pairwise or station-to-station keys, 995 * but for WEP we allow using a key index as well. 996 */ 997 if (rx->key && 998 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 999 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 1000 !is_multicast_ether_addr(hdr->addr1)) 1001 rx->key = NULL; 1002 } 1003 } 1004 1005 if (rx->key) { 1006 rx->key->tx_rx_count++; 1007 /* TODO: add threshold stuff again */ 1008 } else { 1009 return RX_DROP_MONITOR; 1010 } 1011 1012 if (skb_linearize(rx->skb)) 1013 return RX_DROP_UNUSABLE; 1014 /* the hdr variable is invalid now! */ 1015 1016 switch (rx->key->conf.cipher) { 1017 case WLAN_CIPHER_SUITE_WEP40: 1018 case WLAN_CIPHER_SUITE_WEP104: 1019 /* Check for weak IVs if possible */ 1020 if (rx->sta && ieee80211_is_data(fc) && 1021 (!(status->flag & RX_FLAG_IV_STRIPPED) || 1022 !(status->flag & RX_FLAG_DECRYPTED)) && 1023 ieee80211_wep_is_weak_iv(rx->skb, rx->key)) 1024 rx->sta->wep_weak_iv_count++; 1025 1026 result = ieee80211_crypto_wep_decrypt(rx); 1027 break; 1028 case WLAN_CIPHER_SUITE_TKIP: 1029 result = ieee80211_crypto_tkip_decrypt(rx); 1030 break; 1031 case WLAN_CIPHER_SUITE_CCMP: 1032 result = ieee80211_crypto_ccmp_decrypt(rx); 1033 break; 1034 case WLAN_CIPHER_SUITE_AES_CMAC: 1035 result = ieee80211_crypto_aes_cmac_decrypt(rx); 1036 break; 1037 default: 1038 /* 1039 * We can reach here only with HW-only algorithms 1040 * but why didn't it decrypt the frame?! 1041 */ 1042 return RX_DROP_UNUSABLE; 1043 } 1044 1045 /* either the frame has been decrypted or will be dropped */ 1046 status->flag |= RX_FLAG_DECRYPTED; 1047 1048 return result; 1049 } 1050 1051 static ieee80211_rx_result debug_noinline 1052 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1053 { 1054 struct ieee80211_local *local; 1055 struct ieee80211_hdr *hdr; 1056 struct sk_buff *skb; 1057 1058 local = rx->local; 1059 skb = rx->skb; 1060 hdr = (struct ieee80211_hdr *) skb->data; 1061 1062 if (!local->pspolling) 1063 return RX_CONTINUE; 1064 1065 if (!ieee80211_has_fromds(hdr->frame_control)) 1066 /* this is not from AP */ 1067 return RX_CONTINUE; 1068 1069 if (!ieee80211_is_data(hdr->frame_control)) 1070 return RX_CONTINUE; 1071 1072 if (!ieee80211_has_moredata(hdr->frame_control)) { 1073 /* AP has no more frames buffered for us */ 1074 local->pspolling = false; 1075 return RX_CONTINUE; 1076 } 1077 1078 /* more data bit is set, let's request a new frame from the AP */ 1079 ieee80211_send_pspoll(local, rx->sdata); 1080 1081 return RX_CONTINUE; 1082 } 1083 1084 static void ap_sta_ps_start(struct sta_info *sta) 1085 { 1086 struct ieee80211_sub_if_data *sdata = sta->sdata; 1087 struct ieee80211_local *local = sdata->local; 1088 1089 atomic_inc(&sdata->bss->num_sta_ps); 1090 set_sta_flags(sta, WLAN_STA_PS_STA); 1091 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1092 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1093 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", 1094 sdata->name, sta->sta.addr, sta->sta.aid); 1095 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1096 } 1097 1098 static void ap_sta_ps_end(struct sta_info *sta) 1099 { 1100 struct ieee80211_sub_if_data *sdata = sta->sdata; 1101 1102 atomic_dec(&sdata->bss->num_sta_ps); 1103 1104 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1105 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", 1106 sdata->name, sta->sta.addr, sta->sta.aid); 1107 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1108 1109 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) { 1110 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1111 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", 1112 sdata->name, sta->sta.addr, sta->sta.aid); 1113 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1114 return; 1115 } 1116 1117 ieee80211_sta_ps_deliver_wakeup(sta); 1118 } 1119 1120 static ieee80211_rx_result debug_noinline 1121 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1122 { 1123 struct sta_info *sta = rx->sta; 1124 struct sk_buff *skb = rx->skb; 1125 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1126 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1127 1128 if (!sta) 1129 return RX_CONTINUE; 1130 1131 /* 1132 * Update last_rx only for IBSS packets which are for the current 1133 * BSSID to avoid keeping the current IBSS network alive in cases 1134 * where other STAs start using different BSSID. 1135 */ 1136 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1137 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1138 NL80211_IFTYPE_ADHOC); 1139 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) 1140 sta->last_rx = jiffies; 1141 } else if (!is_multicast_ether_addr(hdr->addr1)) { 1142 /* 1143 * Mesh beacons will update last_rx when if they are found to 1144 * match the current local configuration when processed. 1145 */ 1146 sta->last_rx = jiffies; 1147 } 1148 1149 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 1150 return RX_CONTINUE; 1151 1152 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) 1153 ieee80211_sta_rx_notify(rx->sdata, hdr); 1154 1155 sta->rx_fragments++; 1156 sta->rx_bytes += rx->skb->len; 1157 sta->last_signal = status->signal; 1158 ewma_add(&sta->avg_signal, -status->signal); 1159 1160 /* 1161 * Change STA power saving mode only at the end of a frame 1162 * exchange sequence. 1163 */ 1164 if (!ieee80211_has_morefrags(hdr->frame_control) && 1165 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1166 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1167 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1168 if (test_sta_flags(sta, WLAN_STA_PS_STA)) { 1169 /* 1170 * Ignore doze->wake transitions that are 1171 * indicated by non-data frames, the standard 1172 * is unclear here, but for example going to 1173 * PS mode and then scanning would cause a 1174 * doze->wake transition for the probe request, 1175 * and that is clearly undesirable. 1176 */ 1177 if (ieee80211_is_data(hdr->frame_control) && 1178 !ieee80211_has_pm(hdr->frame_control)) 1179 ap_sta_ps_end(sta); 1180 } else { 1181 if (ieee80211_has_pm(hdr->frame_control)) 1182 ap_sta_ps_start(sta); 1183 } 1184 } 1185 1186 /* 1187 * Drop (qos-)data::nullfunc frames silently, since they 1188 * are used only to control station power saving mode. 1189 */ 1190 if (ieee80211_is_nullfunc(hdr->frame_control) || 1191 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1192 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1193 1194 /* 1195 * If we receive a 4-addr nullfunc frame from a STA 1196 * that was not moved to a 4-addr STA vlan yet, drop 1197 * the frame to the monitor interface, to make sure 1198 * that hostapd sees it 1199 */ 1200 if (ieee80211_has_a4(hdr->frame_control) && 1201 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1202 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1203 !rx->sdata->u.vlan.sta))) 1204 return RX_DROP_MONITOR; 1205 /* 1206 * Update counter and free packet here to avoid 1207 * counting this as a dropped packed. 1208 */ 1209 sta->rx_packets++; 1210 dev_kfree_skb(rx->skb); 1211 return RX_QUEUED; 1212 } 1213 1214 return RX_CONTINUE; 1215 } /* ieee80211_rx_h_sta_process */ 1216 1217 static inline struct ieee80211_fragment_entry * 1218 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, 1219 unsigned int frag, unsigned int seq, int rx_queue, 1220 struct sk_buff **skb) 1221 { 1222 struct ieee80211_fragment_entry *entry; 1223 int idx; 1224 1225 idx = sdata->fragment_next; 1226 entry = &sdata->fragments[sdata->fragment_next++]; 1227 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 1228 sdata->fragment_next = 0; 1229 1230 if (!skb_queue_empty(&entry->skb_list)) { 1231 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1232 struct ieee80211_hdr *hdr = 1233 (struct ieee80211_hdr *) entry->skb_list.next->data; 1234 printk(KERN_DEBUG "%s: RX reassembly removed oldest " 1235 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d " 1236 "addr1=%pM addr2=%pM\n", 1237 sdata->name, idx, 1238 jiffies - entry->first_frag_time, entry->seq, 1239 entry->last_frag, hdr->addr1, hdr->addr2); 1240 #endif 1241 __skb_queue_purge(&entry->skb_list); 1242 } 1243 1244 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 1245 *skb = NULL; 1246 entry->first_frag_time = jiffies; 1247 entry->seq = seq; 1248 entry->rx_queue = rx_queue; 1249 entry->last_frag = frag; 1250 entry->ccmp = 0; 1251 entry->extra_len = 0; 1252 1253 return entry; 1254 } 1255 1256 static inline struct ieee80211_fragment_entry * 1257 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 1258 unsigned int frag, unsigned int seq, 1259 int rx_queue, struct ieee80211_hdr *hdr) 1260 { 1261 struct ieee80211_fragment_entry *entry; 1262 int i, idx; 1263 1264 idx = sdata->fragment_next; 1265 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 1266 struct ieee80211_hdr *f_hdr; 1267 1268 idx--; 1269 if (idx < 0) 1270 idx = IEEE80211_FRAGMENT_MAX - 1; 1271 1272 entry = &sdata->fragments[idx]; 1273 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 1274 entry->rx_queue != rx_queue || 1275 entry->last_frag + 1 != frag) 1276 continue; 1277 1278 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; 1279 1280 /* 1281 * Check ftype and addresses are equal, else check next fragment 1282 */ 1283 if (((hdr->frame_control ^ f_hdr->frame_control) & 1284 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 1285 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || 1286 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) 1287 continue; 1288 1289 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 1290 __skb_queue_purge(&entry->skb_list); 1291 continue; 1292 } 1293 return entry; 1294 } 1295 1296 return NULL; 1297 } 1298 1299 static ieee80211_rx_result debug_noinline 1300 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 1301 { 1302 struct ieee80211_hdr *hdr; 1303 u16 sc; 1304 __le16 fc; 1305 unsigned int frag, seq; 1306 struct ieee80211_fragment_entry *entry; 1307 struct sk_buff *skb; 1308 struct ieee80211_rx_status *status; 1309 1310 hdr = (struct ieee80211_hdr *)rx->skb->data; 1311 fc = hdr->frame_control; 1312 sc = le16_to_cpu(hdr->seq_ctrl); 1313 frag = sc & IEEE80211_SCTL_FRAG; 1314 1315 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || 1316 (rx->skb)->len < 24 || 1317 is_multicast_ether_addr(hdr->addr1))) { 1318 /* not fragmented */ 1319 goto out; 1320 } 1321 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 1322 1323 if (skb_linearize(rx->skb)) 1324 return RX_DROP_UNUSABLE; 1325 1326 /* 1327 * skb_linearize() might change the skb->data and 1328 * previously cached variables (in this case, hdr) need to 1329 * be refreshed with the new data. 1330 */ 1331 hdr = (struct ieee80211_hdr *)rx->skb->data; 1332 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 1333 1334 if (frag == 0) { 1335 /* This is the first fragment of a new frame. */ 1336 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 1337 rx->queue, &(rx->skb)); 1338 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP && 1339 ieee80211_has_protected(fc)) { 1340 int queue = ieee80211_is_mgmt(fc) ? 1341 NUM_RX_DATA_QUEUES : rx->queue; 1342 /* Store CCMP PN so that we can verify that the next 1343 * fragment has a sequential PN value. */ 1344 entry->ccmp = 1; 1345 memcpy(entry->last_pn, 1346 rx->key->u.ccmp.rx_pn[queue], 1347 CCMP_PN_LEN); 1348 } 1349 return RX_QUEUED; 1350 } 1351 1352 /* This is a fragment for a frame that should already be pending in 1353 * fragment cache. Add this fragment to the end of the pending entry. 1354 */ 1355 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr); 1356 if (!entry) { 1357 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1358 return RX_DROP_MONITOR; 1359 } 1360 1361 /* Verify that MPDUs within one MSDU have sequential PN values. 1362 * (IEEE 802.11i, 8.3.3.4.5) */ 1363 if (entry->ccmp) { 1364 int i; 1365 u8 pn[CCMP_PN_LEN], *rpn; 1366 int queue; 1367 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP) 1368 return RX_DROP_UNUSABLE; 1369 memcpy(pn, entry->last_pn, CCMP_PN_LEN); 1370 for (i = CCMP_PN_LEN - 1; i >= 0; i--) { 1371 pn[i]++; 1372 if (pn[i]) 1373 break; 1374 } 1375 queue = ieee80211_is_mgmt(fc) ? 1376 NUM_RX_DATA_QUEUES : rx->queue; 1377 rpn = rx->key->u.ccmp.rx_pn[queue]; 1378 if (memcmp(pn, rpn, CCMP_PN_LEN)) 1379 return RX_DROP_UNUSABLE; 1380 memcpy(entry->last_pn, pn, CCMP_PN_LEN); 1381 } 1382 1383 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 1384 __skb_queue_tail(&entry->skb_list, rx->skb); 1385 entry->last_frag = frag; 1386 entry->extra_len += rx->skb->len; 1387 if (ieee80211_has_morefrags(fc)) { 1388 rx->skb = NULL; 1389 return RX_QUEUED; 1390 } 1391 1392 rx->skb = __skb_dequeue(&entry->skb_list); 1393 if (skb_tailroom(rx->skb) < entry->extra_len) { 1394 I802_DEBUG_INC(rx->local->rx_expand_skb_head2); 1395 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 1396 GFP_ATOMIC))) { 1397 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1398 __skb_queue_purge(&entry->skb_list); 1399 return RX_DROP_UNUSABLE; 1400 } 1401 } 1402 while ((skb = __skb_dequeue(&entry->skb_list))) { 1403 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); 1404 dev_kfree_skb(skb); 1405 } 1406 1407 /* Complete frame has been reassembled - process it now */ 1408 status = IEEE80211_SKB_RXCB(rx->skb); 1409 status->rx_flags |= IEEE80211_RX_FRAGMENTED; 1410 1411 out: 1412 if (rx->sta) 1413 rx->sta->rx_packets++; 1414 if (is_multicast_ether_addr(hdr->addr1)) 1415 rx->local->dot11MulticastReceivedFrameCount++; 1416 else 1417 ieee80211_led_rx(rx->local); 1418 return RX_CONTINUE; 1419 } 1420 1421 static ieee80211_rx_result debug_noinline 1422 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) 1423 { 1424 struct ieee80211_sub_if_data *sdata = rx->sdata; 1425 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control; 1426 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1427 1428 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) || 1429 !(status->rx_flags & IEEE80211_RX_RA_MATCH))) 1430 return RX_CONTINUE; 1431 1432 if ((sdata->vif.type != NL80211_IFTYPE_AP) && 1433 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN)) 1434 return RX_DROP_UNUSABLE; 1435 1436 if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER)) 1437 ieee80211_sta_ps_deliver_poll_response(rx->sta); 1438 else 1439 set_sta_flags(rx->sta, WLAN_STA_PSPOLL); 1440 1441 /* Free PS Poll skb here instead of returning RX_DROP that would 1442 * count as an dropped frame. */ 1443 dev_kfree_skb(rx->skb); 1444 1445 return RX_QUEUED; 1446 } 1447 1448 static ieee80211_rx_result debug_noinline 1449 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) 1450 { 1451 u8 *data = rx->skb->data; 1452 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data; 1453 1454 if (!ieee80211_is_data_qos(hdr->frame_control)) 1455 return RX_CONTINUE; 1456 1457 /* remove the qos control field, update frame type and meta-data */ 1458 memmove(data + IEEE80211_QOS_CTL_LEN, data, 1459 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN); 1460 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN); 1461 /* change frame type to non QOS */ 1462 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1463 1464 return RX_CONTINUE; 1465 } 1466 1467 static int 1468 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 1469 { 1470 if (unlikely(!rx->sta || 1471 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED))) 1472 return -EACCES; 1473 1474 return 0; 1475 } 1476 1477 static int 1478 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 1479 { 1480 struct sk_buff *skb = rx->skb; 1481 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1482 1483 /* 1484 * Pass through unencrypted frames if the hardware has 1485 * decrypted them already. 1486 */ 1487 if (status->flag & RX_FLAG_DECRYPTED) 1488 return 0; 1489 1490 /* Drop unencrypted frames if key is set. */ 1491 if (unlikely(!ieee80211_has_protected(fc) && 1492 !ieee80211_is_nullfunc(fc) && 1493 ieee80211_is_data(fc) && 1494 (rx->key || rx->sdata->drop_unencrypted))) 1495 return -EACCES; 1496 1497 return 0; 1498 } 1499 1500 static int 1501 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 1502 { 1503 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1504 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1505 __le16 fc = hdr->frame_control; 1506 1507 /* 1508 * Pass through unencrypted frames if the hardware has 1509 * decrypted them already. 1510 */ 1511 if (status->flag & RX_FLAG_DECRYPTED) 1512 return 0; 1513 1514 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { 1515 if (unlikely(!ieee80211_has_protected(fc) && 1516 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 1517 rx->key)) { 1518 if (ieee80211_is_deauth(fc)) 1519 cfg80211_send_unprot_deauth(rx->sdata->dev, 1520 rx->skb->data, 1521 rx->skb->len); 1522 else if (ieee80211_is_disassoc(fc)) 1523 cfg80211_send_unprot_disassoc(rx->sdata->dev, 1524 rx->skb->data, 1525 rx->skb->len); 1526 return -EACCES; 1527 } 1528 /* BIP does not use Protected field, so need to check MMIE */ 1529 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 1530 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 1531 if (ieee80211_is_deauth(fc)) 1532 cfg80211_send_unprot_deauth(rx->sdata->dev, 1533 rx->skb->data, 1534 rx->skb->len); 1535 else if (ieee80211_is_disassoc(fc)) 1536 cfg80211_send_unprot_disassoc(rx->sdata->dev, 1537 rx->skb->data, 1538 rx->skb->len); 1539 return -EACCES; 1540 } 1541 /* 1542 * When using MFP, Action frames are not allowed prior to 1543 * having configured keys. 1544 */ 1545 if (unlikely(ieee80211_is_action(fc) && !rx->key && 1546 ieee80211_is_robust_mgmt_frame( 1547 (struct ieee80211_hdr *) rx->skb->data))) 1548 return -EACCES; 1549 } 1550 1551 return 0; 1552 } 1553 1554 static int 1555 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx) 1556 { 1557 struct ieee80211_sub_if_data *sdata = rx->sdata; 1558 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1559 1560 if (ieee80211_has_a4(hdr->frame_control) && 1561 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 1562 return -1; 1563 1564 if (is_multicast_ether_addr(hdr->addr1) && 1565 ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) || 1566 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr))) 1567 return -1; 1568 1569 return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 1570 } 1571 1572 /* 1573 * requires that rx->skb is a frame with ethernet header 1574 */ 1575 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 1576 { 1577 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 1578 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 1579 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1580 1581 /* 1582 * Allow EAPOL frames to us/the PAE group address regardless 1583 * of whether the frame was encrypted or not. 1584 */ 1585 if (ehdr->h_proto == rx->sdata->control_port_protocol && 1586 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 || 1587 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) 1588 return true; 1589 1590 if (ieee80211_802_1x_port_control(rx) || 1591 ieee80211_drop_unencrypted(rx, fc)) 1592 return false; 1593 1594 return true; 1595 } 1596 1597 /* 1598 * requires that rx->skb is a frame with ethernet header 1599 */ 1600 static void 1601 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 1602 { 1603 struct ieee80211_sub_if_data *sdata = rx->sdata; 1604 struct net_device *dev = sdata->dev; 1605 struct sk_buff *skb, *xmit_skb; 1606 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1607 struct sta_info *dsta; 1608 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1609 1610 skb = rx->skb; 1611 xmit_skb = NULL; 1612 1613 if ((sdata->vif.type == NL80211_IFTYPE_AP || 1614 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 1615 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 1616 (status->rx_flags & IEEE80211_RX_RA_MATCH) && 1617 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 1618 if (is_multicast_ether_addr(ehdr->h_dest)) { 1619 /* 1620 * send multicast frames both to higher layers in 1621 * local net stack and back to the wireless medium 1622 */ 1623 xmit_skb = skb_copy(skb, GFP_ATOMIC); 1624 if (!xmit_skb && net_ratelimit()) 1625 printk(KERN_DEBUG "%s: failed to clone " 1626 "multicast frame\n", dev->name); 1627 } else { 1628 dsta = sta_info_get(sdata, skb->data); 1629 if (dsta) { 1630 /* 1631 * The destination station is associated to 1632 * this AP (in this VLAN), so send the frame 1633 * directly to it and do not pass it to local 1634 * net stack. 1635 */ 1636 xmit_skb = skb; 1637 skb = NULL; 1638 } 1639 } 1640 } 1641 1642 if (skb) { 1643 int align __maybe_unused; 1644 1645 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1646 /* 1647 * 'align' will only take the values 0 or 2 here 1648 * since all frames are required to be aligned 1649 * to 2-byte boundaries when being passed to 1650 * mac80211. That also explains the __skb_push() 1651 * below. 1652 */ 1653 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3; 1654 if (align) { 1655 if (WARN_ON(skb_headroom(skb) < 3)) { 1656 dev_kfree_skb(skb); 1657 skb = NULL; 1658 } else { 1659 u8 *data = skb->data; 1660 size_t len = skb_headlen(skb); 1661 skb->data -= align; 1662 memmove(skb->data, data, len); 1663 skb_set_tail_pointer(skb, len); 1664 } 1665 } 1666 #endif 1667 1668 if (skb) { 1669 /* deliver to local stack */ 1670 skb->protocol = eth_type_trans(skb, dev); 1671 memset(skb->cb, 0, sizeof(skb->cb)); 1672 netif_receive_skb(skb); 1673 } 1674 } 1675 1676 if (xmit_skb) { 1677 /* send to wireless media */ 1678 xmit_skb->protocol = htons(ETH_P_802_3); 1679 skb_reset_network_header(xmit_skb); 1680 skb_reset_mac_header(xmit_skb); 1681 dev_queue_xmit(xmit_skb); 1682 } 1683 } 1684 1685 static ieee80211_rx_result debug_noinline 1686 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 1687 { 1688 struct net_device *dev = rx->sdata->dev; 1689 struct sk_buff *skb = rx->skb; 1690 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1691 __le16 fc = hdr->frame_control; 1692 struct sk_buff_head frame_list; 1693 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1694 1695 if (unlikely(!ieee80211_is_data(fc))) 1696 return RX_CONTINUE; 1697 1698 if (unlikely(!ieee80211_is_data_present(fc))) 1699 return RX_DROP_MONITOR; 1700 1701 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 1702 return RX_CONTINUE; 1703 1704 if (ieee80211_has_a4(hdr->frame_control) && 1705 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1706 !rx->sdata->u.vlan.sta) 1707 return RX_DROP_UNUSABLE; 1708 1709 if (is_multicast_ether_addr(hdr->addr1) && 1710 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1711 rx->sdata->u.vlan.sta) || 1712 (rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1713 rx->sdata->u.mgd.use_4addr))) 1714 return RX_DROP_UNUSABLE; 1715 1716 skb->dev = dev; 1717 __skb_queue_head_init(&frame_list); 1718 1719 if (skb_linearize(skb)) 1720 return RX_DROP_UNUSABLE; 1721 1722 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 1723 rx->sdata->vif.type, 1724 rx->local->hw.extra_tx_headroom); 1725 1726 while (!skb_queue_empty(&frame_list)) { 1727 rx->skb = __skb_dequeue(&frame_list); 1728 1729 if (!ieee80211_frame_allowed(rx, fc)) { 1730 dev_kfree_skb(rx->skb); 1731 continue; 1732 } 1733 dev->stats.rx_packets++; 1734 dev->stats.rx_bytes += rx->skb->len; 1735 1736 ieee80211_deliver_skb(rx); 1737 } 1738 1739 return RX_QUEUED; 1740 } 1741 1742 #ifdef CONFIG_MAC80211_MESH 1743 static ieee80211_rx_result 1744 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 1745 { 1746 struct ieee80211_hdr *hdr; 1747 struct ieee80211s_hdr *mesh_hdr; 1748 unsigned int hdrlen; 1749 struct sk_buff *skb = rx->skb, *fwd_skb; 1750 struct ieee80211_local *local = rx->local; 1751 struct ieee80211_sub_if_data *sdata = rx->sdata; 1752 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1753 1754 hdr = (struct ieee80211_hdr *) skb->data; 1755 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1756 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 1757 1758 if (!ieee80211_is_data(hdr->frame_control)) 1759 return RX_CONTINUE; 1760 1761 if (!mesh_hdr->ttl) 1762 /* illegal frame */ 1763 return RX_DROP_MONITOR; 1764 1765 if (mesh_hdr->flags & MESH_FLAGS_AE) { 1766 struct mesh_path *mppath; 1767 char *proxied_addr; 1768 char *mpp_addr; 1769 1770 if (is_multicast_ether_addr(hdr->addr1)) { 1771 mpp_addr = hdr->addr3; 1772 proxied_addr = mesh_hdr->eaddr1; 1773 } else { 1774 mpp_addr = hdr->addr4; 1775 proxied_addr = mesh_hdr->eaddr2; 1776 } 1777 1778 rcu_read_lock(); 1779 mppath = mpp_path_lookup(proxied_addr, sdata); 1780 if (!mppath) { 1781 mpp_path_add(proxied_addr, mpp_addr, sdata); 1782 } else { 1783 spin_lock_bh(&mppath->state_lock); 1784 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0) 1785 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 1786 spin_unlock_bh(&mppath->state_lock); 1787 } 1788 rcu_read_unlock(); 1789 } 1790 1791 /* Frame has reached destination. Don't forward */ 1792 if (!is_multicast_ether_addr(hdr->addr1) && 1793 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0) 1794 return RX_CONTINUE; 1795 1796 mesh_hdr->ttl--; 1797 1798 if (status->rx_flags & IEEE80211_RX_RA_MATCH) { 1799 if (!mesh_hdr->ttl) 1800 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh, 1801 dropped_frames_ttl); 1802 else { 1803 struct ieee80211_hdr *fwd_hdr; 1804 struct ieee80211_tx_info *info; 1805 1806 fwd_skb = skb_copy(skb, GFP_ATOMIC); 1807 1808 if (!fwd_skb && net_ratelimit()) 1809 printk(KERN_DEBUG "%s: failed to clone mesh frame\n", 1810 sdata->name); 1811 if (!fwd_skb) 1812 goto out; 1813 1814 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 1815 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 1816 info = IEEE80211_SKB_CB(fwd_skb); 1817 memset(info, 0, sizeof(*info)); 1818 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1819 info->control.vif = &rx->sdata->vif; 1820 skb_set_queue_mapping(skb, 1821 ieee80211_select_queue(rx->sdata, fwd_skb)); 1822 ieee80211_set_qos_hdr(local, skb); 1823 if (is_multicast_ether_addr(fwd_hdr->addr1)) 1824 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, 1825 fwded_mcast); 1826 else { 1827 int err; 1828 /* 1829 * Save TA to addr1 to send TA a path error if a 1830 * suitable next hop is not found 1831 */ 1832 memcpy(fwd_hdr->addr1, fwd_hdr->addr2, 1833 ETH_ALEN); 1834 err = mesh_nexthop_lookup(fwd_skb, sdata); 1835 /* Failed to immediately resolve next hop: 1836 * fwded frame was dropped or will be added 1837 * later to the pending skb queue. */ 1838 if (err) 1839 return RX_DROP_MONITOR; 1840 1841 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, 1842 fwded_unicast); 1843 } 1844 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, 1845 fwded_frames); 1846 ieee80211_add_pending_skb(local, fwd_skb); 1847 } 1848 } 1849 1850 out: 1851 if (is_multicast_ether_addr(hdr->addr1) || 1852 sdata->dev->flags & IFF_PROMISC) 1853 return RX_CONTINUE; 1854 else 1855 return RX_DROP_MONITOR; 1856 } 1857 #endif 1858 1859 static ieee80211_rx_result debug_noinline 1860 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 1861 { 1862 struct ieee80211_sub_if_data *sdata = rx->sdata; 1863 struct ieee80211_local *local = rx->local; 1864 struct net_device *dev = sdata->dev; 1865 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1866 __le16 fc = hdr->frame_control; 1867 int err; 1868 1869 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 1870 return RX_CONTINUE; 1871 1872 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 1873 return RX_DROP_MONITOR; 1874 1875 /* 1876 * Allow the cooked monitor interface of an AP to see 4-addr frames so 1877 * that a 4-addr station can be detected and moved into a separate VLAN 1878 */ 1879 if (ieee80211_has_a4(hdr->frame_control) && 1880 sdata->vif.type == NL80211_IFTYPE_AP) 1881 return RX_DROP_MONITOR; 1882 1883 err = __ieee80211_data_to_8023(rx); 1884 if (unlikely(err)) 1885 return RX_DROP_UNUSABLE; 1886 1887 if (!ieee80211_frame_allowed(rx, fc)) 1888 return RX_DROP_MONITOR; 1889 1890 rx->skb->dev = dev; 1891 1892 dev->stats.rx_packets++; 1893 dev->stats.rx_bytes += rx->skb->len; 1894 1895 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 1896 !is_multicast_ether_addr(((struct ethhdr *)rx->skb->data)->h_dest)) { 1897 mod_timer(&local->dynamic_ps_timer, jiffies + 1898 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 1899 } 1900 1901 ieee80211_deliver_skb(rx); 1902 1903 return RX_QUEUED; 1904 } 1905 1906 static ieee80211_rx_result debug_noinline 1907 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) 1908 { 1909 struct ieee80211_local *local = rx->local; 1910 struct ieee80211_hw *hw = &local->hw; 1911 struct sk_buff *skb = rx->skb; 1912 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 1913 struct tid_ampdu_rx *tid_agg_rx; 1914 u16 start_seq_num; 1915 u16 tid; 1916 1917 if (likely(!ieee80211_is_ctl(bar->frame_control))) 1918 return RX_CONTINUE; 1919 1920 if (ieee80211_is_back_req(bar->frame_control)) { 1921 struct { 1922 __le16 control, start_seq_num; 1923 } __packed bar_data; 1924 1925 if (!rx->sta) 1926 return RX_DROP_MONITOR; 1927 1928 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 1929 &bar_data, sizeof(bar_data))) 1930 return RX_DROP_MONITOR; 1931 1932 tid = le16_to_cpu(bar_data.control) >> 12; 1933 1934 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 1935 if (!tid_agg_rx) 1936 return RX_DROP_MONITOR; 1937 1938 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 1939 1940 /* reset session timer */ 1941 if (tid_agg_rx->timeout) 1942 mod_timer(&tid_agg_rx->session_timer, 1943 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 1944 1945 spin_lock(&tid_agg_rx->reorder_lock); 1946 /* release stored frames up to start of BAR */ 1947 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num); 1948 spin_unlock(&tid_agg_rx->reorder_lock); 1949 1950 kfree_skb(skb); 1951 return RX_QUEUED; 1952 } 1953 1954 /* 1955 * After this point, we only want management frames, 1956 * so we can drop all remaining control frames to 1957 * cooked monitor interfaces. 1958 */ 1959 return RX_DROP_MONITOR; 1960 } 1961 1962 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 1963 struct ieee80211_mgmt *mgmt, 1964 size_t len) 1965 { 1966 struct ieee80211_local *local = sdata->local; 1967 struct sk_buff *skb; 1968 struct ieee80211_mgmt *resp; 1969 1970 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) { 1971 /* Not to own unicast address */ 1972 return; 1973 } 1974 1975 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 || 1976 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) { 1977 /* Not from the current AP or not associated yet. */ 1978 return; 1979 } 1980 1981 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 1982 /* Too short SA Query request frame */ 1983 return; 1984 } 1985 1986 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 1987 if (skb == NULL) 1988 return; 1989 1990 skb_reserve(skb, local->hw.extra_tx_headroom); 1991 resp = (struct ieee80211_mgmt *) skb_put(skb, 24); 1992 memset(resp, 0, 24); 1993 memcpy(resp->da, mgmt->sa, ETH_ALEN); 1994 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 1995 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 1996 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 1997 IEEE80211_STYPE_ACTION); 1998 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 1999 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 2000 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 2001 memcpy(resp->u.action.u.sa_query.trans_id, 2002 mgmt->u.action.u.sa_query.trans_id, 2003 WLAN_SA_QUERY_TR_ID_LEN); 2004 2005 ieee80211_tx_skb(sdata, skb); 2006 } 2007 2008 static ieee80211_rx_result debug_noinline 2009 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 2010 { 2011 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2012 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2013 2014 /* 2015 * From here on, look only at management frames. 2016 * Data and control frames are already handled, 2017 * and unknown (reserved) frames are useless. 2018 */ 2019 if (rx->skb->len < 24) 2020 return RX_DROP_MONITOR; 2021 2022 if (!ieee80211_is_mgmt(mgmt->frame_control)) 2023 return RX_DROP_MONITOR; 2024 2025 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 2026 return RX_DROP_MONITOR; 2027 2028 if (ieee80211_drop_unencrypted_mgmt(rx)) 2029 return RX_DROP_UNUSABLE; 2030 2031 return RX_CONTINUE; 2032 } 2033 2034 static ieee80211_rx_result debug_noinline 2035 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 2036 { 2037 struct ieee80211_local *local = rx->local; 2038 struct ieee80211_sub_if_data *sdata = rx->sdata; 2039 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2040 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2041 int len = rx->skb->len; 2042 2043 if (!ieee80211_is_action(mgmt->frame_control)) 2044 return RX_CONTINUE; 2045 2046 /* drop too small frames */ 2047 if (len < IEEE80211_MIN_ACTION_SIZE) 2048 return RX_DROP_UNUSABLE; 2049 2050 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) 2051 return RX_DROP_UNUSABLE; 2052 2053 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 2054 return RX_DROP_UNUSABLE; 2055 2056 switch (mgmt->u.action.category) { 2057 case WLAN_CATEGORY_BACK: 2058 /* 2059 * The aggregation code is not prepared to handle 2060 * anything but STA/AP due to the BSSID handling; 2061 * IBSS could work in the code but isn't supported 2062 * by drivers or the standard. 2063 */ 2064 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2065 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2066 sdata->vif.type != NL80211_IFTYPE_AP) 2067 break; 2068 2069 /* verify action_code is present */ 2070 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2071 break; 2072 2073 switch (mgmt->u.action.u.addba_req.action_code) { 2074 case WLAN_ACTION_ADDBA_REQ: 2075 if (len < (IEEE80211_MIN_ACTION_SIZE + 2076 sizeof(mgmt->u.action.u.addba_req))) 2077 goto invalid; 2078 break; 2079 case WLAN_ACTION_ADDBA_RESP: 2080 if (len < (IEEE80211_MIN_ACTION_SIZE + 2081 sizeof(mgmt->u.action.u.addba_resp))) 2082 goto invalid; 2083 break; 2084 case WLAN_ACTION_DELBA: 2085 if (len < (IEEE80211_MIN_ACTION_SIZE + 2086 sizeof(mgmt->u.action.u.delba))) 2087 goto invalid; 2088 break; 2089 default: 2090 goto invalid; 2091 } 2092 2093 goto queue; 2094 case WLAN_CATEGORY_SPECTRUM_MGMT: 2095 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) 2096 break; 2097 2098 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2099 break; 2100 2101 /* verify action_code is present */ 2102 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2103 break; 2104 2105 switch (mgmt->u.action.u.measurement.action_code) { 2106 case WLAN_ACTION_SPCT_MSR_REQ: 2107 if (len < (IEEE80211_MIN_ACTION_SIZE + 2108 sizeof(mgmt->u.action.u.measurement))) 2109 break; 2110 ieee80211_process_measurement_req(sdata, mgmt, len); 2111 goto handled; 2112 case WLAN_ACTION_SPCT_CHL_SWITCH: 2113 if (len < (IEEE80211_MIN_ACTION_SIZE + 2114 sizeof(mgmt->u.action.u.chan_switch))) 2115 break; 2116 2117 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2118 break; 2119 2120 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) 2121 break; 2122 2123 goto queue; 2124 } 2125 break; 2126 case WLAN_CATEGORY_SA_QUERY: 2127 if (len < (IEEE80211_MIN_ACTION_SIZE + 2128 sizeof(mgmt->u.action.u.sa_query))) 2129 break; 2130 2131 switch (mgmt->u.action.u.sa_query.action) { 2132 case WLAN_ACTION_SA_QUERY_REQUEST: 2133 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2134 break; 2135 ieee80211_process_sa_query_req(sdata, mgmt, len); 2136 goto handled; 2137 } 2138 break; 2139 case WLAN_CATEGORY_MESH_PLINK: 2140 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2141 break; 2142 goto queue; 2143 case WLAN_CATEGORY_MESH_PATH_SEL: 2144 if (!mesh_path_sel_is_hwmp(sdata)) 2145 break; 2146 goto queue; 2147 } 2148 2149 return RX_CONTINUE; 2150 2151 invalid: 2152 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 2153 /* will return in the next handlers */ 2154 return RX_CONTINUE; 2155 2156 handled: 2157 if (rx->sta) 2158 rx->sta->rx_packets++; 2159 dev_kfree_skb(rx->skb); 2160 return RX_QUEUED; 2161 2162 queue: 2163 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 2164 skb_queue_tail(&sdata->skb_queue, rx->skb); 2165 ieee80211_queue_work(&local->hw, &sdata->work); 2166 if (rx->sta) 2167 rx->sta->rx_packets++; 2168 return RX_QUEUED; 2169 } 2170 2171 static ieee80211_rx_result debug_noinline 2172 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 2173 { 2174 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2175 2176 /* skip known-bad action frames and return them in the next handler */ 2177 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 2178 return RX_CONTINUE; 2179 2180 /* 2181 * Getting here means the kernel doesn't know how to handle 2182 * it, but maybe userspace does ... include returned frames 2183 * so userspace can register for those to know whether ones 2184 * it transmitted were processed or returned. 2185 */ 2186 2187 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq, 2188 rx->skb->data, rx->skb->len, 2189 GFP_ATOMIC)) { 2190 if (rx->sta) 2191 rx->sta->rx_packets++; 2192 dev_kfree_skb(rx->skb); 2193 return RX_QUEUED; 2194 } 2195 2196 2197 return RX_CONTINUE; 2198 } 2199 2200 static ieee80211_rx_result debug_noinline 2201 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 2202 { 2203 struct ieee80211_local *local = rx->local; 2204 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2205 struct sk_buff *nskb; 2206 struct ieee80211_sub_if_data *sdata = rx->sdata; 2207 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2208 2209 if (!ieee80211_is_action(mgmt->frame_control)) 2210 return RX_CONTINUE; 2211 2212 /* 2213 * For AP mode, hostapd is responsible for handling any action 2214 * frames that we didn't handle, including returning unknown 2215 * ones. For all other modes we will return them to the sender, 2216 * setting the 0x80 bit in the action category, as required by 2217 * 802.11-2007 7.3.1.11. 2218 * Newer versions of hostapd shall also use the management frame 2219 * registration mechanisms, but older ones still use cooked 2220 * monitor interfaces so push all frames there. 2221 */ 2222 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 2223 (sdata->vif.type == NL80211_IFTYPE_AP || 2224 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 2225 return RX_DROP_MONITOR; 2226 2227 /* do not return rejected action frames */ 2228 if (mgmt->u.action.category & 0x80) 2229 return RX_DROP_UNUSABLE; 2230 2231 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 2232 GFP_ATOMIC); 2233 if (nskb) { 2234 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 2235 2236 nmgmt->u.action.category |= 0x80; 2237 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 2238 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 2239 2240 memset(nskb->cb, 0, sizeof(nskb->cb)); 2241 2242 ieee80211_tx_skb(rx->sdata, nskb); 2243 } 2244 dev_kfree_skb(rx->skb); 2245 return RX_QUEUED; 2246 } 2247 2248 static ieee80211_rx_result debug_noinline 2249 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 2250 { 2251 struct ieee80211_sub_if_data *sdata = rx->sdata; 2252 ieee80211_rx_result rxs; 2253 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 2254 __le16 stype; 2255 2256 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb); 2257 if (rxs != RX_CONTINUE) 2258 return rxs; 2259 2260 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 2261 2262 if (!ieee80211_vif_is_mesh(&sdata->vif) && 2263 sdata->vif.type != NL80211_IFTYPE_ADHOC && 2264 sdata->vif.type != NL80211_IFTYPE_STATION) 2265 return RX_DROP_MONITOR; 2266 2267 switch (stype) { 2268 case cpu_to_le16(IEEE80211_STYPE_BEACON): 2269 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 2270 /* process for all: mesh, mlme, ibss */ 2271 break; 2272 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 2273 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 2274 if (is_multicast_ether_addr(mgmt->da) && 2275 !is_broadcast_ether_addr(mgmt->da)) 2276 return RX_DROP_MONITOR; 2277 2278 /* process only for station */ 2279 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2280 return RX_DROP_MONITOR; 2281 break; 2282 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 2283 case cpu_to_le16(IEEE80211_STYPE_AUTH): 2284 /* process only for ibss */ 2285 if (sdata->vif.type != NL80211_IFTYPE_ADHOC) 2286 return RX_DROP_MONITOR; 2287 break; 2288 default: 2289 return RX_DROP_MONITOR; 2290 } 2291 2292 /* queue up frame and kick off work to process it */ 2293 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 2294 skb_queue_tail(&sdata->skb_queue, rx->skb); 2295 ieee80211_queue_work(&rx->local->hw, &sdata->work); 2296 if (rx->sta) 2297 rx->sta->rx_packets++; 2298 2299 return RX_QUEUED; 2300 } 2301 2302 static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr, 2303 struct ieee80211_rx_data *rx) 2304 { 2305 int keyidx; 2306 unsigned int hdrlen; 2307 2308 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2309 if (rx->skb->len >= hdrlen + 4) 2310 keyidx = rx->skb->data[hdrlen + 3] >> 6; 2311 else 2312 keyidx = -1; 2313 2314 if (!rx->sta) { 2315 /* 2316 * Some hardware seem to generate incorrect Michael MIC 2317 * reports; ignore them to avoid triggering countermeasures. 2318 */ 2319 return; 2320 } 2321 2322 if (!ieee80211_has_protected(hdr->frame_control)) 2323 return; 2324 2325 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) { 2326 /* 2327 * APs with pairwise keys should never receive Michael MIC 2328 * errors for non-zero keyidx because these are reserved for 2329 * group keys and only the AP is sending real multicast 2330 * frames in the BSS. 2331 */ 2332 return; 2333 } 2334 2335 if (!ieee80211_is_data(hdr->frame_control) && 2336 !ieee80211_is_auth(hdr->frame_control)) 2337 return; 2338 2339 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL, 2340 GFP_ATOMIC); 2341 } 2342 2343 /* TODO: use IEEE80211_RX_FRAGMENTED */ 2344 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 2345 struct ieee80211_rate *rate) 2346 { 2347 struct ieee80211_sub_if_data *sdata; 2348 struct ieee80211_local *local = rx->local; 2349 struct ieee80211_rtap_hdr { 2350 struct ieee80211_radiotap_header hdr; 2351 u8 flags; 2352 u8 rate_or_pad; 2353 __le16 chan_freq; 2354 __le16 chan_flags; 2355 } __packed *rthdr; 2356 struct sk_buff *skb = rx->skb, *skb2; 2357 struct net_device *prev_dev = NULL; 2358 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2359 2360 /* 2361 * If cooked monitor has been processed already, then 2362 * don't do it again. If not, set the flag. 2363 */ 2364 if (rx->flags & IEEE80211_RX_CMNTR) 2365 goto out_free_skb; 2366 rx->flags |= IEEE80211_RX_CMNTR; 2367 2368 if (skb_headroom(skb) < sizeof(*rthdr) && 2369 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) 2370 goto out_free_skb; 2371 2372 rthdr = (void *)skb_push(skb, sizeof(*rthdr)); 2373 memset(rthdr, 0, sizeof(*rthdr)); 2374 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr)); 2375 rthdr->hdr.it_present = 2376 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | 2377 (1 << IEEE80211_RADIOTAP_CHANNEL)); 2378 2379 if (rate) { 2380 rthdr->rate_or_pad = rate->bitrate / 5; 2381 rthdr->hdr.it_present |= 2382 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 2383 } 2384 rthdr->chan_freq = cpu_to_le16(status->freq); 2385 2386 if (status->band == IEEE80211_BAND_5GHZ) 2387 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM | 2388 IEEE80211_CHAN_5GHZ); 2389 else 2390 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN | 2391 IEEE80211_CHAN_2GHZ); 2392 2393 skb_set_mac_header(skb, 0); 2394 skb->ip_summed = CHECKSUM_UNNECESSARY; 2395 skb->pkt_type = PACKET_OTHERHOST; 2396 skb->protocol = htons(ETH_P_802_2); 2397 2398 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2399 if (!ieee80211_sdata_running(sdata)) 2400 continue; 2401 2402 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 2403 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) 2404 continue; 2405 2406 if (prev_dev) { 2407 skb2 = skb_clone(skb, GFP_ATOMIC); 2408 if (skb2) { 2409 skb2->dev = prev_dev; 2410 netif_receive_skb(skb2); 2411 } 2412 } 2413 2414 prev_dev = sdata->dev; 2415 sdata->dev->stats.rx_packets++; 2416 sdata->dev->stats.rx_bytes += skb->len; 2417 } 2418 2419 if (prev_dev) { 2420 skb->dev = prev_dev; 2421 netif_receive_skb(skb); 2422 return; 2423 } 2424 2425 out_free_skb: 2426 dev_kfree_skb(skb); 2427 } 2428 2429 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 2430 ieee80211_rx_result res) 2431 { 2432 switch (res) { 2433 case RX_DROP_MONITOR: 2434 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 2435 if (rx->sta) 2436 rx->sta->rx_dropped++; 2437 /* fall through */ 2438 case RX_CONTINUE: { 2439 struct ieee80211_rate *rate = NULL; 2440 struct ieee80211_supported_band *sband; 2441 struct ieee80211_rx_status *status; 2442 2443 status = IEEE80211_SKB_RXCB((rx->skb)); 2444 2445 sband = rx->local->hw.wiphy->bands[status->band]; 2446 if (!(status->flag & RX_FLAG_HT)) 2447 rate = &sband->bitrates[status->rate_idx]; 2448 2449 ieee80211_rx_cooked_monitor(rx, rate); 2450 break; 2451 } 2452 case RX_DROP_UNUSABLE: 2453 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 2454 if (rx->sta) 2455 rx->sta->rx_dropped++; 2456 dev_kfree_skb(rx->skb); 2457 break; 2458 case RX_QUEUED: 2459 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 2460 break; 2461 } 2462 } 2463 2464 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) 2465 { 2466 ieee80211_rx_result res = RX_DROP_MONITOR; 2467 struct sk_buff *skb; 2468 2469 #define CALL_RXH(rxh) \ 2470 do { \ 2471 res = rxh(rx); \ 2472 if (res != RX_CONTINUE) \ 2473 goto rxh_next; \ 2474 } while (0); 2475 2476 spin_lock(&rx->local->rx_skb_queue.lock); 2477 if (rx->local->running_rx_handler) 2478 goto unlock; 2479 2480 rx->local->running_rx_handler = true; 2481 2482 while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) { 2483 spin_unlock(&rx->local->rx_skb_queue.lock); 2484 2485 /* 2486 * all the other fields are valid across frames 2487 * that belong to an aMPDU since they are on the 2488 * same TID from the same station 2489 */ 2490 rx->skb = skb; 2491 rx->flags = 0; 2492 2493 CALL_RXH(ieee80211_rx_h_decrypt) 2494 CALL_RXH(ieee80211_rx_h_check_more_data) 2495 CALL_RXH(ieee80211_rx_h_sta_process) 2496 CALL_RXH(ieee80211_rx_h_defragment) 2497 CALL_RXH(ieee80211_rx_h_ps_poll) 2498 CALL_RXH(ieee80211_rx_h_michael_mic_verify) 2499 /* must be after MMIC verify so header is counted in MPDU mic */ 2500 CALL_RXH(ieee80211_rx_h_remove_qos_control) 2501 CALL_RXH(ieee80211_rx_h_amsdu) 2502 #ifdef CONFIG_MAC80211_MESH 2503 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 2504 CALL_RXH(ieee80211_rx_h_mesh_fwding); 2505 #endif 2506 CALL_RXH(ieee80211_rx_h_data) 2507 CALL_RXH(ieee80211_rx_h_ctrl); 2508 CALL_RXH(ieee80211_rx_h_mgmt_check) 2509 CALL_RXH(ieee80211_rx_h_action) 2510 CALL_RXH(ieee80211_rx_h_userspace_mgmt) 2511 CALL_RXH(ieee80211_rx_h_action_return) 2512 CALL_RXH(ieee80211_rx_h_mgmt) 2513 2514 rxh_next: 2515 ieee80211_rx_handlers_result(rx, res); 2516 spin_lock(&rx->local->rx_skb_queue.lock); 2517 #undef CALL_RXH 2518 } 2519 2520 rx->local->running_rx_handler = false; 2521 2522 unlock: 2523 spin_unlock(&rx->local->rx_skb_queue.lock); 2524 } 2525 2526 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 2527 { 2528 ieee80211_rx_result res = RX_DROP_MONITOR; 2529 2530 #define CALL_RXH(rxh) \ 2531 do { \ 2532 res = rxh(rx); \ 2533 if (res != RX_CONTINUE) \ 2534 goto rxh_next; \ 2535 } while (0); 2536 2537 CALL_RXH(ieee80211_rx_h_passive_scan) 2538 CALL_RXH(ieee80211_rx_h_check) 2539 2540 ieee80211_rx_reorder_ampdu(rx); 2541 2542 ieee80211_rx_handlers(rx); 2543 return; 2544 2545 rxh_next: 2546 ieee80211_rx_handlers_result(rx, res); 2547 2548 #undef CALL_RXH 2549 } 2550 2551 /* 2552 * This function makes calls into the RX path, therefore 2553 * it has to be invoked under RCU read lock. 2554 */ 2555 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 2556 { 2557 struct ieee80211_rx_data rx = { 2558 .sta = sta, 2559 .sdata = sta->sdata, 2560 .local = sta->local, 2561 .queue = tid, 2562 }; 2563 struct tid_ampdu_rx *tid_agg_rx; 2564 2565 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 2566 if (!tid_agg_rx) 2567 return; 2568 2569 spin_lock(&tid_agg_rx->reorder_lock); 2570 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx); 2571 spin_unlock(&tid_agg_rx->reorder_lock); 2572 2573 ieee80211_rx_handlers(&rx); 2574 } 2575 2576 /* main receive path */ 2577 2578 static int prepare_for_handlers(struct ieee80211_rx_data *rx, 2579 struct ieee80211_hdr *hdr) 2580 { 2581 struct ieee80211_sub_if_data *sdata = rx->sdata; 2582 struct sk_buff *skb = rx->skb; 2583 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2584 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 2585 int multicast = is_multicast_ether_addr(hdr->addr1); 2586 2587 switch (sdata->vif.type) { 2588 case NL80211_IFTYPE_STATION: 2589 if (!bssid && !sdata->u.mgd.use_4addr) 2590 return 0; 2591 if (!multicast && 2592 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) { 2593 if (!(sdata->dev->flags & IFF_PROMISC)) 2594 return 0; 2595 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2596 } 2597 break; 2598 case NL80211_IFTYPE_ADHOC: 2599 if (!bssid) 2600 return 0; 2601 if (ieee80211_is_beacon(hdr->frame_control)) { 2602 return 1; 2603 } 2604 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { 2605 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN)) 2606 return 0; 2607 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2608 } else if (!multicast && 2609 compare_ether_addr(sdata->vif.addr, 2610 hdr->addr1) != 0) { 2611 if (!(sdata->dev->flags & IFF_PROMISC)) 2612 return 0; 2613 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2614 } else if (!rx->sta) { 2615 int rate_idx; 2616 if (status->flag & RX_FLAG_HT) 2617 rate_idx = 0; /* TODO: HT rates */ 2618 else 2619 rate_idx = status->rate_idx; 2620 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, 2621 hdr->addr2, BIT(rate_idx), GFP_ATOMIC); 2622 } 2623 break; 2624 case NL80211_IFTYPE_MESH_POINT: 2625 if (!multicast && 2626 compare_ether_addr(sdata->vif.addr, 2627 hdr->addr1) != 0) { 2628 if (!(sdata->dev->flags & IFF_PROMISC)) 2629 return 0; 2630 2631 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2632 } 2633 break; 2634 case NL80211_IFTYPE_AP_VLAN: 2635 case NL80211_IFTYPE_AP: 2636 if (!bssid) { 2637 if (compare_ether_addr(sdata->vif.addr, 2638 hdr->addr1)) 2639 return 0; 2640 } else if (!ieee80211_bssid_match(bssid, 2641 sdata->vif.addr)) { 2642 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN)) 2643 return 0; 2644 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2645 } 2646 break; 2647 case NL80211_IFTYPE_WDS: 2648 if (bssid || !ieee80211_is_data(hdr->frame_control)) 2649 return 0; 2650 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) 2651 return 0; 2652 break; 2653 default: 2654 /* should never get here */ 2655 WARN_ON(1); 2656 break; 2657 } 2658 2659 return 1; 2660 } 2661 2662 /* 2663 * This function returns whether or not the SKB 2664 * was destined for RX processing or not, which, 2665 * if consume is true, is equivalent to whether 2666 * or not the skb was consumed. 2667 */ 2668 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 2669 struct sk_buff *skb, bool consume) 2670 { 2671 struct ieee80211_local *local = rx->local; 2672 struct ieee80211_sub_if_data *sdata = rx->sdata; 2673 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2674 struct ieee80211_hdr *hdr = (void *)skb->data; 2675 int prepares; 2676 2677 rx->skb = skb; 2678 status->rx_flags |= IEEE80211_RX_RA_MATCH; 2679 prepares = prepare_for_handlers(rx, hdr); 2680 2681 if (!prepares) 2682 return false; 2683 2684 if (status->flag & RX_FLAG_MMIC_ERROR) { 2685 if (status->rx_flags & IEEE80211_RX_RA_MATCH) 2686 ieee80211_rx_michael_mic_report(hdr, rx); 2687 return false; 2688 } 2689 2690 if (!consume) { 2691 skb = skb_copy(skb, GFP_ATOMIC); 2692 if (!skb) { 2693 if (net_ratelimit()) 2694 wiphy_debug(local->hw.wiphy, 2695 "failed to copy multicast frame for %s\n", 2696 sdata->name); 2697 return true; 2698 } 2699 2700 rx->skb = skb; 2701 } 2702 2703 ieee80211_invoke_rx_handlers(rx); 2704 return true; 2705 } 2706 2707 /* 2708 * This is the actual Rx frames handler. as it blongs to Rx path it must 2709 * be called with rcu_read_lock protection. 2710 */ 2711 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 2712 struct sk_buff *skb) 2713 { 2714 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2715 struct ieee80211_local *local = hw_to_local(hw); 2716 struct ieee80211_sub_if_data *sdata; 2717 struct ieee80211_hdr *hdr; 2718 __le16 fc; 2719 struct ieee80211_rx_data rx; 2720 struct ieee80211_sub_if_data *prev; 2721 struct sta_info *sta, *tmp, *prev_sta; 2722 int err = 0; 2723 2724 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 2725 memset(&rx, 0, sizeof(rx)); 2726 rx.skb = skb; 2727 rx.local = local; 2728 2729 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 2730 local->dot11ReceivedFragmentCount++; 2731 2732 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || 2733 test_bit(SCAN_OFF_CHANNEL, &local->scanning))) 2734 status->rx_flags |= IEEE80211_RX_IN_SCAN; 2735 2736 if (ieee80211_is_mgmt(fc)) 2737 err = skb_linearize(skb); 2738 else 2739 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 2740 2741 if (err) { 2742 dev_kfree_skb(skb); 2743 return; 2744 } 2745 2746 hdr = (struct ieee80211_hdr *)skb->data; 2747 ieee80211_parse_qos(&rx); 2748 ieee80211_verify_alignment(&rx); 2749 2750 if (ieee80211_is_data(fc)) { 2751 prev_sta = NULL; 2752 2753 for_each_sta_info(local, hdr->addr2, sta, tmp) { 2754 if (!prev_sta) { 2755 prev_sta = sta; 2756 continue; 2757 } 2758 2759 rx.sta = prev_sta; 2760 rx.sdata = prev_sta->sdata; 2761 ieee80211_prepare_and_rx_handle(&rx, skb, false); 2762 2763 prev_sta = sta; 2764 } 2765 2766 if (prev_sta) { 2767 rx.sta = prev_sta; 2768 rx.sdata = prev_sta->sdata; 2769 2770 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 2771 return; 2772 goto out; 2773 } 2774 } 2775 2776 prev = NULL; 2777 2778 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2779 if (!ieee80211_sdata_running(sdata)) 2780 continue; 2781 2782 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 2783 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 2784 continue; 2785 2786 /* 2787 * frame is destined for this interface, but if it's 2788 * not also for the previous one we handle that after 2789 * the loop to avoid copying the SKB once too much 2790 */ 2791 2792 if (!prev) { 2793 prev = sdata; 2794 continue; 2795 } 2796 2797 rx.sta = sta_info_get_bss(prev, hdr->addr2); 2798 rx.sdata = prev; 2799 ieee80211_prepare_and_rx_handle(&rx, skb, false); 2800 2801 prev = sdata; 2802 } 2803 2804 if (prev) { 2805 rx.sta = sta_info_get_bss(prev, hdr->addr2); 2806 rx.sdata = prev; 2807 2808 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 2809 return; 2810 } 2811 2812 out: 2813 dev_kfree_skb(skb); 2814 } 2815 2816 /* 2817 * This is the receive path handler. It is called by a low level driver when an 2818 * 802.11 MPDU is received from the hardware. 2819 */ 2820 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) 2821 { 2822 struct ieee80211_local *local = hw_to_local(hw); 2823 struct ieee80211_rate *rate = NULL; 2824 struct ieee80211_supported_band *sband; 2825 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2826 2827 WARN_ON_ONCE(softirq_count() == 0); 2828 2829 if (WARN_ON(status->band < 0 || 2830 status->band >= IEEE80211_NUM_BANDS)) 2831 goto drop; 2832 2833 sband = local->hw.wiphy->bands[status->band]; 2834 if (WARN_ON(!sband)) 2835 goto drop; 2836 2837 /* 2838 * If we're suspending, it is possible although not too likely 2839 * that we'd be receiving frames after having already partially 2840 * quiesced the stack. We can't process such frames then since 2841 * that might, for example, cause stations to be added or other 2842 * driver callbacks be invoked. 2843 */ 2844 if (unlikely(local->quiescing || local->suspended)) 2845 goto drop; 2846 2847 /* 2848 * The same happens when we're not even started, 2849 * but that's worth a warning. 2850 */ 2851 if (WARN_ON(!local->started)) 2852 goto drop; 2853 2854 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { 2855 /* 2856 * Validate the rate, unless a PLCP error means that 2857 * we probably can't have a valid rate here anyway. 2858 */ 2859 2860 if (status->flag & RX_FLAG_HT) { 2861 /* 2862 * rate_idx is MCS index, which can be [0-76] 2863 * as documented on: 2864 * 2865 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n 2866 * 2867 * Anything else would be some sort of driver or 2868 * hardware error. The driver should catch hardware 2869 * errors. 2870 */ 2871 if (WARN((status->rate_idx < 0 || 2872 status->rate_idx > 76), 2873 "Rate marked as an HT rate but passed " 2874 "status->rate_idx is not " 2875 "an MCS index [0-76]: %d (0x%02x)\n", 2876 status->rate_idx, 2877 status->rate_idx)) 2878 goto drop; 2879 } else { 2880 if (WARN_ON(status->rate_idx < 0 || 2881 status->rate_idx >= sband->n_bitrates)) 2882 goto drop; 2883 rate = &sband->bitrates[status->rate_idx]; 2884 } 2885 } 2886 2887 status->rx_flags = 0; 2888 2889 /* 2890 * key references and virtual interfaces are protected using RCU 2891 * and this requires that we are in a read-side RCU section during 2892 * receive processing 2893 */ 2894 rcu_read_lock(); 2895 2896 /* 2897 * Frames with failed FCS/PLCP checksum are not returned, 2898 * all other frames are returned without radiotap header 2899 * if it was previously present. 2900 * Also, frames with less than 16 bytes are dropped. 2901 */ 2902 skb = ieee80211_rx_monitor(local, skb, rate); 2903 if (!skb) { 2904 rcu_read_unlock(); 2905 return; 2906 } 2907 2908 ieee80211_tpt_led_trig_rx(local, 2909 ((struct ieee80211_hdr *)skb->data)->frame_control, 2910 skb->len); 2911 __ieee80211_rx_handle_packet(hw, skb); 2912 2913 rcu_read_unlock(); 2914 2915 return; 2916 drop: 2917 kfree_skb(skb); 2918 } 2919 EXPORT_SYMBOL(ieee80211_rx); 2920 2921 /* This is a version of the rx handler that can be called from hard irq 2922 * context. Post the skb on the queue and schedule the tasklet */ 2923 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 2924 { 2925 struct ieee80211_local *local = hw_to_local(hw); 2926 2927 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 2928 2929 skb->pkt_type = IEEE80211_RX_MSG; 2930 skb_queue_tail(&local->skb_queue, skb); 2931 tasklet_schedule(&local->tasklet); 2932 } 2933 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 2934