1 /* 2 * Copyright 2002-2005, Instant802 Networks, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * 12 * Transmit and frame generation functions. 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/slab.h> 17 #include <linux/skbuff.h> 18 #include <linux/etherdevice.h> 19 #include <linux/bitmap.h> 20 #include <linux/rcupdate.h> 21 #include <net/net_namespace.h> 22 #include <net/ieee80211_radiotap.h> 23 #include <net/cfg80211.h> 24 #include <net/mac80211.h> 25 #include <asm/unaligned.h> 26 27 #include "ieee80211_i.h" 28 #include "driver-ops.h" 29 #include "led.h" 30 #include "mesh.h" 31 #include "wep.h" 32 #include "wpa.h" 33 #include "wme.h" 34 #include "rate.h" 35 36 /* misc utils */ 37 38 static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, 39 int next_frag_len) 40 { 41 int rate, mrate, erp, dur, i; 42 struct ieee80211_rate *txrate; 43 struct ieee80211_local *local = tx->local; 44 struct ieee80211_supported_band *sband; 45 struct ieee80211_hdr *hdr; 46 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 47 48 /* assume HW handles this */ 49 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) 50 return 0; 51 52 /* uh huh? */ 53 if (WARN_ON_ONCE(info->control.rates[0].idx < 0)) 54 return 0; 55 56 sband = local->hw.wiphy->bands[tx->channel->band]; 57 txrate = &sband->bitrates[info->control.rates[0].idx]; 58 59 erp = txrate->flags & IEEE80211_RATE_ERP_G; 60 61 /* 62 * data and mgmt (except PS Poll): 63 * - during CFP: 32768 64 * - during contention period: 65 * if addr1 is group address: 0 66 * if more fragments = 0 and addr1 is individual address: time to 67 * transmit one ACK plus SIFS 68 * if more fragments = 1 and addr1 is individual address: time to 69 * transmit next fragment plus 2 x ACK plus 3 x SIFS 70 * 71 * IEEE 802.11, 9.6: 72 * - control response frame (CTS or ACK) shall be transmitted using the 73 * same rate as the immediately previous frame in the frame exchange 74 * sequence, if this rate belongs to the PHY mandatory rates, or else 75 * at the highest possible rate belonging to the PHY rates in the 76 * BSSBasicRateSet 77 */ 78 hdr = (struct ieee80211_hdr *)tx->skb->data; 79 if (ieee80211_is_ctl(hdr->frame_control)) { 80 /* TODO: These control frames are not currently sent by 81 * mac80211, but should they be implemented, this function 82 * needs to be updated to support duration field calculation. 83 * 84 * RTS: time needed to transmit pending data/mgmt frame plus 85 * one CTS frame plus one ACK frame plus 3 x SIFS 86 * CTS: duration of immediately previous RTS minus time 87 * required to transmit CTS and its SIFS 88 * ACK: 0 if immediately previous directed data/mgmt had 89 * more=0, with more=1 duration in ACK frame is duration 90 * from previous frame minus time needed to transmit ACK 91 * and its SIFS 92 * PS Poll: BIT(15) | BIT(14) | aid 93 */ 94 return 0; 95 } 96 97 /* data/mgmt */ 98 if (0 /* FIX: data/mgmt during CFP */) 99 return cpu_to_le16(32768); 100 101 if (group_addr) /* Group address as the destination - no ACK */ 102 return 0; 103 104 /* Individual destination address: 105 * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes) 106 * CTS and ACK frames shall be transmitted using the highest rate in 107 * basic rate set that is less than or equal to the rate of the 108 * immediately previous frame and that is using the same modulation 109 * (CCK or OFDM). If no basic rate set matches with these requirements, 110 * the highest mandatory rate of the PHY that is less than or equal to 111 * the rate of the previous frame is used. 112 * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps 113 */ 114 rate = -1; 115 /* use lowest available if everything fails */ 116 mrate = sband->bitrates[0].bitrate; 117 for (i = 0; i < sband->n_bitrates; i++) { 118 struct ieee80211_rate *r = &sband->bitrates[i]; 119 120 if (r->bitrate > txrate->bitrate) 121 break; 122 123 if (tx->sdata->vif.bss_conf.basic_rates & BIT(i)) 124 rate = r->bitrate; 125 126 switch (sband->band) { 127 case IEEE80211_BAND_2GHZ: { 128 u32 flag; 129 if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 130 flag = IEEE80211_RATE_MANDATORY_G; 131 else 132 flag = IEEE80211_RATE_MANDATORY_B; 133 if (r->flags & flag) 134 mrate = r->bitrate; 135 break; 136 } 137 case IEEE80211_BAND_5GHZ: 138 if (r->flags & IEEE80211_RATE_MANDATORY_A) 139 mrate = r->bitrate; 140 break; 141 case IEEE80211_NUM_BANDS: 142 WARN_ON(1); 143 break; 144 } 145 } 146 if (rate == -1) { 147 /* No matching basic rate found; use highest suitable mandatory 148 * PHY rate */ 149 rate = mrate; 150 } 151 152 /* Time needed to transmit ACK 153 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up 154 * to closest integer */ 155 156 dur = ieee80211_frame_duration(local, 10, rate, erp, 157 tx->sdata->vif.bss_conf.use_short_preamble); 158 159 if (next_frag_len) { 160 /* Frame is fragmented: duration increases with time needed to 161 * transmit next fragment plus ACK and 2 x SIFS. */ 162 dur *= 2; /* ACK + SIFS */ 163 /* next fragment */ 164 dur += ieee80211_frame_duration(local, next_frag_len, 165 txrate->bitrate, erp, 166 tx->sdata->vif.bss_conf.use_short_preamble); 167 } 168 169 return cpu_to_le16(dur); 170 } 171 172 static inline int is_ieee80211_device(struct ieee80211_local *local, 173 struct net_device *dev) 174 { 175 return local == wdev_priv(dev->ieee80211_ptr); 176 } 177 178 /* tx handlers */ 179 static ieee80211_tx_result debug_noinline 180 ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) 181 { 182 struct ieee80211_local *local = tx->local; 183 struct ieee80211_if_managed *ifmgd; 184 185 /* driver doesn't support power save */ 186 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) 187 return TX_CONTINUE; 188 189 /* hardware does dynamic power save */ 190 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) 191 return TX_CONTINUE; 192 193 /* dynamic power save disabled */ 194 if (local->hw.conf.dynamic_ps_timeout <= 0) 195 return TX_CONTINUE; 196 197 /* we are scanning, don't enable power save */ 198 if (local->scanning) 199 return TX_CONTINUE; 200 201 if (!local->ps_sdata) 202 return TX_CONTINUE; 203 204 /* No point if we're going to suspend */ 205 if (local->quiescing) 206 return TX_CONTINUE; 207 208 /* dynamic ps is supported only in managed mode */ 209 if (tx->sdata->vif.type != NL80211_IFTYPE_STATION) 210 return TX_CONTINUE; 211 212 ifmgd = &tx->sdata->u.mgd; 213 214 /* 215 * Don't wakeup from power save if u-apsd is enabled, voip ac has 216 * u-apsd enabled and the frame is in voip class. This effectively 217 * means that even if all access categories have u-apsd enabled, in 218 * practise u-apsd is only used with the voip ac. This is a 219 * workaround for the case when received voip class packets do not 220 * have correct qos tag for some reason, due the network or the 221 * peer application. 222 * 223 * Note: local->uapsd_queues access is racy here. If the value is 224 * changed via debugfs, user needs to reassociate manually to have 225 * everything in sync. 226 */ 227 if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) 228 && (local->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 229 && skb_get_queue_mapping(tx->skb) == 0) 230 return TX_CONTINUE; 231 232 if (local->hw.conf.flags & IEEE80211_CONF_PS) { 233 ieee80211_stop_queues_by_reason(&local->hw, 234 IEEE80211_QUEUE_STOP_REASON_PS); 235 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; 236 ieee80211_queue_work(&local->hw, 237 &local->dynamic_ps_disable_work); 238 } 239 240 mod_timer(&local->dynamic_ps_timer, jiffies + 241 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 242 243 return TX_CONTINUE; 244 } 245 246 static ieee80211_tx_result debug_noinline 247 ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) 248 { 249 250 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 251 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 252 u32 sta_flags; 253 254 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) 255 return TX_CONTINUE; 256 257 if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) && 258 test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) && 259 !ieee80211_is_probe_req(hdr->frame_control) && 260 !ieee80211_is_nullfunc(hdr->frame_control)) 261 /* 262 * When software scanning only nullfunc frames (to notify 263 * the sleep state to the AP) and probe requests (for the 264 * active scan) are allowed, all other frames should not be 265 * sent and we should not get here, but if we do 266 * nonetheless, drop them to avoid sending them 267 * off-channel. See the link below and 268 * ieee80211_start_scan() for more. 269 * 270 * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089 271 */ 272 return TX_DROP; 273 274 if (tx->sdata->vif.type == NL80211_IFTYPE_WDS) 275 return TX_CONTINUE; 276 277 if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 278 return TX_CONTINUE; 279 280 if (tx->flags & IEEE80211_TX_PS_BUFFERED) 281 return TX_CONTINUE; 282 283 sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0; 284 285 if (likely(tx->flags & IEEE80211_TX_UNICAST)) { 286 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && 287 tx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 288 ieee80211_is_data(hdr->frame_control))) { 289 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 290 printk(KERN_DEBUG "%s: dropped data frame to not " 291 "associated station %pM\n", 292 tx->sdata->name, hdr->addr1); 293 #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 294 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); 295 return TX_DROP; 296 } 297 } else { 298 if (unlikely(ieee80211_is_data(hdr->frame_control) && 299 tx->local->num_sta == 0 && 300 tx->sdata->vif.type != NL80211_IFTYPE_ADHOC)) { 301 /* 302 * No associated STAs - no need to send multicast 303 * frames. 304 */ 305 return TX_DROP; 306 } 307 return TX_CONTINUE; 308 } 309 310 return TX_CONTINUE; 311 } 312 313 /* This function is called whenever the AP is about to exceed the maximum limit 314 * of buffered frames for power saving STAs. This situation should not really 315 * happen often during normal operation, so dropping the oldest buffered packet 316 * from each queue should be OK to make some room for new frames. */ 317 static void purge_old_ps_buffers(struct ieee80211_local *local) 318 { 319 int total = 0, purged = 0; 320 struct sk_buff *skb; 321 struct ieee80211_sub_if_data *sdata; 322 struct sta_info *sta; 323 324 /* 325 * virtual interfaces are protected by RCU 326 */ 327 rcu_read_lock(); 328 329 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 330 struct ieee80211_if_ap *ap; 331 if (sdata->vif.type != NL80211_IFTYPE_AP) 332 continue; 333 ap = &sdata->u.ap; 334 skb = skb_dequeue(&ap->ps_bc_buf); 335 if (skb) { 336 purged++; 337 dev_kfree_skb(skb); 338 } 339 total += skb_queue_len(&ap->ps_bc_buf); 340 } 341 342 list_for_each_entry_rcu(sta, &local->sta_list, list) { 343 skb = skb_dequeue(&sta->ps_tx_buf); 344 if (skb) { 345 purged++; 346 dev_kfree_skb(skb); 347 } 348 total += skb_queue_len(&sta->ps_tx_buf); 349 } 350 351 rcu_read_unlock(); 352 353 local->total_ps_buffered = total; 354 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 355 wiphy_debug(local->hw.wiphy, "PS buffers full - purged %d frames\n", 356 purged); 357 #endif 358 } 359 360 static ieee80211_tx_result 361 ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) 362 { 363 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 364 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 365 366 /* 367 * broadcast/multicast frame 368 * 369 * If any of the associated stations is in power save mode, 370 * the frame is buffered to be sent after DTIM beacon frame. 371 * This is done either by the hardware or us. 372 */ 373 374 /* powersaving STAs only in AP/VLAN mode */ 375 if (!tx->sdata->bss) 376 return TX_CONTINUE; 377 378 /* no buffering for ordered frames */ 379 if (ieee80211_has_order(hdr->frame_control)) 380 return TX_CONTINUE; 381 382 /* no stations in PS mode */ 383 if (!atomic_read(&tx->sdata->bss->num_sta_ps)) 384 return TX_CONTINUE; 385 386 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; 387 388 /* device releases frame after DTIM beacon */ 389 if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) 390 return TX_CONTINUE; 391 392 /* buffered in mac80211 */ 393 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 394 purge_old_ps_buffers(tx->local); 395 396 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) { 397 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 398 if (net_ratelimit()) 399 printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n", 400 tx->sdata->name); 401 #endif 402 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); 403 } else 404 tx->local->total_ps_buffered++; 405 406 skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb); 407 408 return TX_QUEUED; 409 } 410 411 static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta, 412 struct sk_buff *skb) 413 { 414 if (!ieee80211_is_mgmt(fc)) 415 return 0; 416 417 if (sta == NULL || !test_sta_flags(sta, WLAN_STA_MFP)) 418 return 0; 419 420 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) 421 skb->data)) 422 return 0; 423 424 return 1; 425 } 426 427 static ieee80211_tx_result 428 ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) 429 { 430 struct sta_info *sta = tx->sta; 431 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 432 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 433 struct ieee80211_local *local = tx->local; 434 u32 staflags; 435 436 if (unlikely(!sta || 437 ieee80211_is_probe_resp(hdr->frame_control) || 438 ieee80211_is_auth(hdr->frame_control) || 439 ieee80211_is_assoc_resp(hdr->frame_control) || 440 ieee80211_is_reassoc_resp(hdr->frame_control))) 441 return TX_CONTINUE; 442 443 staflags = get_sta_flags(sta); 444 445 if (unlikely((staflags & (WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) && 446 !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) { 447 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 448 printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries " 449 "before %d)\n", 450 sta->sta.addr, sta->sta.aid, 451 skb_queue_len(&sta->ps_tx_buf)); 452 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 453 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 454 purge_old_ps_buffers(tx->local); 455 if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { 456 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf); 457 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 458 if (net_ratelimit()) { 459 printk(KERN_DEBUG "%s: STA %pM TX " 460 "buffer full - dropping oldest frame\n", 461 tx->sdata->name, sta->sta.addr); 462 } 463 #endif 464 dev_kfree_skb(old); 465 } else 466 tx->local->total_ps_buffered++; 467 468 /* 469 * Queue frame to be sent after STA wakes up/polls, 470 * but don't set the TIM bit if the driver is blocking 471 * wakeup or poll response transmissions anyway. 472 */ 473 if (skb_queue_empty(&sta->ps_tx_buf) && 474 !(staflags & WLAN_STA_PS_DRIVER)) 475 sta_info_set_tim_bit(sta); 476 477 info->control.jiffies = jiffies; 478 info->control.vif = &tx->sdata->vif; 479 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 480 skb_queue_tail(&sta->ps_tx_buf, tx->skb); 481 482 if (!timer_pending(&local->sta_cleanup)) 483 mod_timer(&local->sta_cleanup, 484 round_jiffies(jiffies + 485 STA_INFO_CLEANUP_INTERVAL)); 486 487 return TX_QUEUED; 488 } 489 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 490 else if (unlikely(staflags & WLAN_STA_PS_STA)) { 491 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll " 492 "set -> send frame\n", tx->sdata->name, 493 sta->sta.addr); 494 } 495 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 496 497 return TX_CONTINUE; 498 } 499 500 static ieee80211_tx_result debug_noinline 501 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) 502 { 503 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) 504 return TX_CONTINUE; 505 506 if (tx->flags & IEEE80211_TX_UNICAST) 507 return ieee80211_tx_h_unicast_ps_buf(tx); 508 else 509 return ieee80211_tx_h_multicast_ps_buf(tx); 510 } 511 512 static ieee80211_tx_result debug_noinline 513 ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx) 514 { 515 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 516 517 if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol && 518 tx->sdata->control_port_no_encrypt)) 519 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 520 521 return TX_CONTINUE; 522 } 523 524 static ieee80211_tx_result debug_noinline 525 ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) 526 { 527 struct ieee80211_key *key = NULL; 528 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 529 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 530 531 if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) 532 tx->key = NULL; 533 else if (tx->sta && (key = rcu_dereference(tx->sta->ptk))) 534 tx->key = key; 535 else if (ieee80211_is_mgmt(hdr->frame_control) && 536 is_multicast_ether_addr(hdr->addr1) && 537 ieee80211_is_robust_mgmt_frame(hdr) && 538 (key = rcu_dereference(tx->sdata->default_mgmt_key))) 539 tx->key = key; 540 else if (is_multicast_ether_addr(hdr->addr1) && 541 (key = rcu_dereference(tx->sdata->default_multicast_key))) 542 tx->key = key; 543 else if (!is_multicast_ether_addr(hdr->addr1) && 544 (key = rcu_dereference(tx->sdata->default_unicast_key))) 545 tx->key = key; 546 else if (tx->sdata->drop_unencrypted && 547 (tx->skb->protocol != tx->sdata->control_port_protocol) && 548 !(info->flags & IEEE80211_TX_CTL_INJECTED) && 549 (!ieee80211_is_robust_mgmt_frame(hdr) || 550 (ieee80211_is_action(hdr->frame_control) && 551 tx->sta && test_sta_flags(tx->sta, WLAN_STA_MFP)))) { 552 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); 553 return TX_DROP; 554 } else 555 tx->key = NULL; 556 557 if (tx->key) { 558 bool skip_hw = false; 559 560 tx->key->tx_rx_count++; 561 /* TODO: add threshold stuff again */ 562 563 switch (tx->key->conf.cipher) { 564 case WLAN_CIPHER_SUITE_WEP40: 565 case WLAN_CIPHER_SUITE_WEP104: 566 if (ieee80211_is_auth(hdr->frame_control)) 567 break; 568 case WLAN_CIPHER_SUITE_TKIP: 569 if (!ieee80211_is_data_present(hdr->frame_control)) 570 tx->key = NULL; 571 break; 572 case WLAN_CIPHER_SUITE_CCMP: 573 if (!ieee80211_is_data_present(hdr->frame_control) && 574 !ieee80211_use_mfp(hdr->frame_control, tx->sta, 575 tx->skb)) 576 tx->key = NULL; 577 else 578 skip_hw = (tx->key->conf.flags & 579 IEEE80211_KEY_FLAG_SW_MGMT) && 580 ieee80211_is_mgmt(hdr->frame_control); 581 break; 582 case WLAN_CIPHER_SUITE_AES_CMAC: 583 if (!ieee80211_is_mgmt(hdr->frame_control)) 584 tx->key = NULL; 585 break; 586 } 587 588 if (!skip_hw && tx->key && 589 tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) 590 info->control.hw_key = &tx->key->conf; 591 } 592 593 return TX_CONTINUE; 594 } 595 596 static ieee80211_tx_result debug_noinline 597 ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) 598 { 599 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 600 struct ieee80211_hdr *hdr = (void *)tx->skb->data; 601 struct ieee80211_supported_band *sband; 602 struct ieee80211_rate *rate; 603 int i; 604 u32 len; 605 bool inval = false, rts = false, short_preamble = false; 606 struct ieee80211_tx_rate_control txrc; 607 u32 sta_flags; 608 609 memset(&txrc, 0, sizeof(txrc)); 610 611 sband = tx->local->hw.wiphy->bands[tx->channel->band]; 612 613 len = min_t(u32, tx->skb->len + FCS_LEN, 614 tx->local->hw.wiphy->frag_threshold); 615 616 /* set up the tx rate control struct we give the RC algo */ 617 txrc.hw = local_to_hw(tx->local); 618 txrc.sband = sband; 619 txrc.bss_conf = &tx->sdata->vif.bss_conf; 620 txrc.skb = tx->skb; 621 txrc.reported_rate.idx = -1; 622 txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band]; 623 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1) 624 txrc.max_rate_idx = -1; 625 else 626 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; 627 txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP || 628 tx->sdata->vif.type == NL80211_IFTYPE_ADHOC); 629 630 /* set up RTS protection if desired */ 631 if (len > tx->local->hw.wiphy->rts_threshold) { 632 txrc.rts = rts = true; 633 } 634 635 /* 636 * Use short preamble if the BSS can handle it, but not for 637 * management frames unless we know the receiver can handle 638 * that -- the management frame might be to a station that 639 * just wants a probe response. 640 */ 641 if (tx->sdata->vif.bss_conf.use_short_preamble && 642 (ieee80211_is_data(hdr->frame_control) || 643 (tx->sta && test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE)))) 644 txrc.short_preamble = short_preamble = true; 645 646 sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0; 647 648 /* 649 * Lets not bother rate control if we're associated and cannot 650 * talk to the sta. This should not happen. 651 */ 652 if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && 653 (sta_flags & WLAN_STA_ASSOC) && 654 !rate_usable_index_exists(sband, &tx->sta->sta), 655 "%s: Dropped data frame as no usable bitrate found while " 656 "scanning and associated. Target station: " 657 "%pM on %d GHz band\n", 658 tx->sdata->name, hdr->addr1, 659 tx->channel->band ? 5 : 2)) 660 return TX_DROP; 661 662 /* 663 * If we're associated with the sta at this point we know we can at 664 * least send the frame at the lowest bit rate. 665 */ 666 rate_control_get_rate(tx->sdata, tx->sta, &txrc); 667 668 if (unlikely(info->control.rates[0].idx < 0)) 669 return TX_DROP; 670 671 if (txrc.reported_rate.idx < 0) { 672 txrc.reported_rate = info->control.rates[0]; 673 if (tx->sta && ieee80211_is_data(hdr->frame_control)) 674 tx->sta->last_tx_rate = txrc.reported_rate; 675 } else if (tx->sta) 676 tx->sta->last_tx_rate = txrc.reported_rate; 677 678 if (unlikely(!info->control.rates[0].count)) 679 info->control.rates[0].count = 1; 680 681 if (WARN_ON_ONCE((info->control.rates[0].count > 1) && 682 (info->flags & IEEE80211_TX_CTL_NO_ACK))) 683 info->control.rates[0].count = 1; 684 685 if (is_multicast_ether_addr(hdr->addr1)) { 686 /* 687 * XXX: verify the rate is in the basic rateset 688 */ 689 return TX_CONTINUE; 690 } 691 692 /* 693 * set up the RTS/CTS rate as the fastest basic rate 694 * that is not faster than the data rate 695 * 696 * XXX: Should this check all retry rates? 697 */ 698 if (!(info->control.rates[0].flags & IEEE80211_TX_RC_MCS)) { 699 s8 baserate = 0; 700 701 rate = &sband->bitrates[info->control.rates[0].idx]; 702 703 for (i = 0; i < sband->n_bitrates; i++) { 704 /* must be a basic rate */ 705 if (!(tx->sdata->vif.bss_conf.basic_rates & BIT(i))) 706 continue; 707 /* must not be faster than the data rate */ 708 if (sband->bitrates[i].bitrate > rate->bitrate) 709 continue; 710 /* maximum */ 711 if (sband->bitrates[baserate].bitrate < 712 sband->bitrates[i].bitrate) 713 baserate = i; 714 } 715 716 info->control.rts_cts_rate_idx = baserate; 717 } 718 719 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 720 /* 721 * make sure there's no valid rate following 722 * an invalid one, just in case drivers don't 723 * take the API seriously to stop at -1. 724 */ 725 if (inval) { 726 info->control.rates[i].idx = -1; 727 continue; 728 } 729 if (info->control.rates[i].idx < 0) { 730 inval = true; 731 continue; 732 } 733 734 /* 735 * For now assume MCS is already set up correctly, this 736 * needs to be fixed. 737 */ 738 if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) { 739 WARN_ON(info->control.rates[i].idx > 76); 740 continue; 741 } 742 743 /* set up RTS protection if desired */ 744 if (rts) 745 info->control.rates[i].flags |= 746 IEEE80211_TX_RC_USE_RTS_CTS; 747 748 /* RC is busted */ 749 if (WARN_ON_ONCE(info->control.rates[i].idx >= 750 sband->n_bitrates)) { 751 info->control.rates[i].idx = -1; 752 continue; 753 } 754 755 rate = &sband->bitrates[info->control.rates[i].idx]; 756 757 /* set up short preamble */ 758 if (short_preamble && 759 rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) 760 info->control.rates[i].flags |= 761 IEEE80211_TX_RC_USE_SHORT_PREAMBLE; 762 763 /* set up G protection */ 764 if (!rts && tx->sdata->vif.bss_conf.use_cts_prot && 765 rate->flags & IEEE80211_RATE_ERP_G) 766 info->control.rates[i].flags |= 767 IEEE80211_TX_RC_USE_CTS_PROTECT; 768 } 769 770 return TX_CONTINUE; 771 } 772 773 static ieee80211_tx_result debug_noinline 774 ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) 775 { 776 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 777 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 778 u16 *seq; 779 u8 *qc; 780 int tid; 781 782 /* 783 * Packet injection may want to control the sequence 784 * number, if we have no matching interface then we 785 * neither assign one ourselves nor ask the driver to. 786 */ 787 if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR)) 788 return TX_CONTINUE; 789 790 if (unlikely(ieee80211_is_ctl(hdr->frame_control))) 791 return TX_CONTINUE; 792 793 if (ieee80211_hdrlen(hdr->frame_control) < 24) 794 return TX_CONTINUE; 795 796 /* 797 * Anything but QoS data that has a sequence number field 798 * (is long enough) gets a sequence number from the global 799 * counter. 800 */ 801 if (!ieee80211_is_data_qos(hdr->frame_control)) { 802 /* driver should assign sequence number */ 803 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; 804 /* for pure STA mode without beacons, we can do it */ 805 hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number); 806 tx->sdata->sequence_number += 0x10; 807 return TX_CONTINUE; 808 } 809 810 /* 811 * This should be true for injected/management frames only, for 812 * management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ 813 * above since they are not QoS-data frames. 814 */ 815 if (!tx->sta) 816 return TX_CONTINUE; 817 818 /* include per-STA, per-TID sequence counter */ 819 820 qc = ieee80211_get_qos_ctl(hdr); 821 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 822 seq = &tx->sta->tid_seq[tid]; 823 824 hdr->seq_ctrl = cpu_to_le16(*seq); 825 826 /* Increase the sequence number. */ 827 *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ; 828 829 return TX_CONTINUE; 830 } 831 832 static int ieee80211_fragment(struct ieee80211_local *local, 833 struct sk_buff *skb, int hdrlen, 834 int frag_threshold) 835 { 836 struct sk_buff *tail = skb, *tmp; 837 int per_fragm = frag_threshold - hdrlen - FCS_LEN; 838 int pos = hdrlen + per_fragm; 839 int rem = skb->len - hdrlen - per_fragm; 840 841 if (WARN_ON(rem < 0)) 842 return -EINVAL; 843 844 while (rem) { 845 int fraglen = per_fragm; 846 847 if (fraglen > rem) 848 fraglen = rem; 849 rem -= fraglen; 850 tmp = dev_alloc_skb(local->tx_headroom + 851 frag_threshold + 852 IEEE80211_ENCRYPT_HEADROOM + 853 IEEE80211_ENCRYPT_TAILROOM); 854 if (!tmp) 855 return -ENOMEM; 856 tail->next = tmp; 857 tail = tmp; 858 skb_reserve(tmp, local->tx_headroom + 859 IEEE80211_ENCRYPT_HEADROOM); 860 /* copy control information */ 861 memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); 862 skb_copy_queue_mapping(tmp, skb); 863 tmp->priority = skb->priority; 864 tmp->dev = skb->dev; 865 866 /* copy header and data */ 867 memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen); 868 memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen); 869 870 pos += fraglen; 871 } 872 873 skb->len = hdrlen + per_fragm; 874 return 0; 875 } 876 877 static ieee80211_tx_result debug_noinline 878 ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) 879 { 880 struct sk_buff *skb = tx->skb; 881 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 882 struct ieee80211_hdr *hdr = (void *)skb->data; 883 int frag_threshold = tx->local->hw.wiphy->frag_threshold; 884 int hdrlen; 885 int fragnum; 886 887 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) 888 return TX_CONTINUE; 889 890 /* 891 * Warn when submitting a fragmented A-MPDU frame and drop it. 892 * This scenario is handled in ieee80211_tx_prepare but extra 893 * caution taken here as fragmented ampdu may cause Tx stop. 894 */ 895 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) 896 return TX_DROP; 897 898 hdrlen = ieee80211_hdrlen(hdr->frame_control); 899 900 /* internal error, why is TX_FRAGMENTED set? */ 901 if (WARN_ON(skb->len + FCS_LEN <= frag_threshold)) 902 return TX_DROP; 903 904 /* 905 * Now fragment the frame. This will allocate all the fragments and 906 * chain them (using skb as the first fragment) to skb->next. 907 * During transmission, we will remove the successfully transmitted 908 * fragments from this list. When the low-level driver rejects one 909 * of the fragments then we will simply pretend to accept the skb 910 * but store it away as pending. 911 */ 912 if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold)) 913 return TX_DROP; 914 915 /* update duration/seq/flags of fragments */ 916 fragnum = 0; 917 do { 918 int next_len; 919 const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 920 921 hdr = (void *)skb->data; 922 info = IEEE80211_SKB_CB(skb); 923 924 if (skb->next) { 925 hdr->frame_control |= morefrags; 926 next_len = skb->next->len; 927 /* 928 * No multi-rate retries for fragmented frames, that 929 * would completely throw off the NAV at other STAs. 930 */ 931 info->control.rates[1].idx = -1; 932 info->control.rates[2].idx = -1; 933 info->control.rates[3].idx = -1; 934 info->control.rates[4].idx = -1; 935 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5); 936 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; 937 } else { 938 hdr->frame_control &= ~morefrags; 939 next_len = 0; 940 } 941 hdr->duration_id = ieee80211_duration(tx, 0, next_len); 942 hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG); 943 fragnum++; 944 } while ((skb = skb->next)); 945 946 return TX_CONTINUE; 947 } 948 949 static ieee80211_tx_result debug_noinline 950 ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) 951 { 952 struct sk_buff *skb = tx->skb; 953 954 if (!tx->sta) 955 return TX_CONTINUE; 956 957 tx->sta->tx_packets++; 958 do { 959 tx->sta->tx_fragments++; 960 tx->sta->tx_bytes += skb->len; 961 } while ((skb = skb->next)); 962 963 return TX_CONTINUE; 964 } 965 966 static ieee80211_tx_result debug_noinline 967 ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) 968 { 969 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 970 971 if (!tx->key) 972 return TX_CONTINUE; 973 974 switch (tx->key->conf.cipher) { 975 case WLAN_CIPHER_SUITE_WEP40: 976 case WLAN_CIPHER_SUITE_WEP104: 977 return ieee80211_crypto_wep_encrypt(tx); 978 case WLAN_CIPHER_SUITE_TKIP: 979 return ieee80211_crypto_tkip_encrypt(tx); 980 case WLAN_CIPHER_SUITE_CCMP: 981 return ieee80211_crypto_ccmp_encrypt(tx); 982 case WLAN_CIPHER_SUITE_AES_CMAC: 983 return ieee80211_crypto_aes_cmac_encrypt(tx); 984 default: 985 /* handle hw-only algorithm */ 986 if (info->control.hw_key) { 987 ieee80211_tx_set_protected(tx); 988 return TX_CONTINUE; 989 } 990 break; 991 992 } 993 994 return TX_DROP; 995 } 996 997 static ieee80211_tx_result debug_noinline 998 ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) 999 { 1000 struct sk_buff *skb = tx->skb; 1001 struct ieee80211_hdr *hdr; 1002 int next_len; 1003 bool group_addr; 1004 1005 do { 1006 hdr = (void *) skb->data; 1007 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) 1008 break; /* must not overwrite AID */ 1009 next_len = skb->next ? skb->next->len : 0; 1010 group_addr = is_multicast_ether_addr(hdr->addr1); 1011 1012 hdr->duration_id = 1013 ieee80211_duration(tx, group_addr, next_len); 1014 } while ((skb = skb->next)); 1015 1016 return TX_CONTINUE; 1017 } 1018 1019 /* actual transmit path */ 1020 1021 /* 1022 * deal with packet injection down monitor interface 1023 * with Radiotap Header -- only called for monitor mode interface 1024 */ 1025 static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, 1026 struct sk_buff *skb) 1027 { 1028 /* 1029 * this is the moment to interpret and discard the radiotap header that 1030 * must be at the start of the packet injected in Monitor mode 1031 * 1032 * Need to take some care with endian-ness since radiotap 1033 * args are little-endian 1034 */ 1035 1036 struct ieee80211_radiotap_iterator iterator; 1037 struct ieee80211_radiotap_header *rthdr = 1038 (struct ieee80211_radiotap_header *) skb->data; 1039 struct ieee80211_supported_band *sband; 1040 bool hw_frag; 1041 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1042 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len, 1043 NULL); 1044 1045 sband = tx->local->hw.wiphy->bands[tx->channel->band]; 1046 1047 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 1048 tx->flags &= ~IEEE80211_TX_FRAGMENTED; 1049 1050 /* packet is fragmented in HW if we have a non-NULL driver callback */ 1051 hw_frag = (tx->local->ops->set_frag_threshold != NULL); 1052 1053 /* 1054 * for every radiotap entry that is present 1055 * (ieee80211_radiotap_iterator_next returns -ENOENT when no more 1056 * entries present, or -EINVAL on error) 1057 */ 1058 1059 while (!ret) { 1060 ret = ieee80211_radiotap_iterator_next(&iterator); 1061 1062 if (ret) 1063 continue; 1064 1065 /* see if this argument is something we can use */ 1066 switch (iterator.this_arg_index) { 1067 /* 1068 * You must take care when dereferencing iterator.this_arg 1069 * for multibyte types... the pointer is not aligned. Use 1070 * get_unaligned((type *)iterator.this_arg) to dereference 1071 * iterator.this_arg for type "type" safely on all arches. 1072 */ 1073 case IEEE80211_RADIOTAP_FLAGS: 1074 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) { 1075 /* 1076 * this indicates that the skb we have been 1077 * handed has the 32-bit FCS CRC at the end... 1078 * we should react to that by snipping it off 1079 * because it will be recomputed and added 1080 * on transmission 1081 */ 1082 if (skb->len < (iterator._max_length + FCS_LEN)) 1083 return false; 1084 1085 skb_trim(skb, skb->len - FCS_LEN); 1086 } 1087 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) 1088 info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT; 1089 if ((*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) && 1090 !hw_frag) 1091 tx->flags |= IEEE80211_TX_FRAGMENTED; 1092 break; 1093 1094 /* 1095 * Please update the file 1096 * Documentation/networking/mac80211-injection.txt 1097 * when parsing new fields here. 1098 */ 1099 1100 default: 1101 break; 1102 } 1103 } 1104 1105 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ 1106 return false; 1107 1108 /* 1109 * remove the radiotap header 1110 * iterator->_max_length was sanity-checked against 1111 * skb->len by iterator init 1112 */ 1113 skb_pull(skb, iterator._max_length); 1114 1115 return true; 1116 } 1117 1118 static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, 1119 struct sk_buff *skb, 1120 struct ieee80211_tx_info *info, 1121 struct tid_ampdu_tx *tid_tx, 1122 int tid) 1123 { 1124 bool queued = false; 1125 1126 if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { 1127 info->flags |= IEEE80211_TX_CTL_AMPDU; 1128 } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { 1129 /* 1130 * nothing -- this aggregation session is being started 1131 * but that might still fail with the driver 1132 */ 1133 } else { 1134 spin_lock(&tx->sta->lock); 1135 /* 1136 * Need to re-check now, because we may get here 1137 * 1138 * 1) in the window during which the setup is actually 1139 * already done, but not marked yet because not all 1140 * packets are spliced over to the driver pending 1141 * queue yet -- if this happened we acquire the lock 1142 * either before or after the splice happens, but 1143 * need to recheck which of these cases happened. 1144 * 1145 * 2) during session teardown, if the OPERATIONAL bit 1146 * was cleared due to the teardown but the pointer 1147 * hasn't been assigned NULL yet (or we loaded it 1148 * before it was assigned) -- in this case it may 1149 * now be NULL which means we should just let the 1150 * packet pass through because splicing the frames 1151 * back is already done. 1152 */ 1153 tid_tx = tx->sta->ampdu_mlme.tid_tx[tid]; 1154 1155 if (!tid_tx) { 1156 /* do nothing, let packet pass through */ 1157 } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { 1158 info->flags |= IEEE80211_TX_CTL_AMPDU; 1159 } else { 1160 queued = true; 1161 info->control.vif = &tx->sdata->vif; 1162 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1163 __skb_queue_tail(&tid_tx->pending, skb); 1164 } 1165 spin_unlock(&tx->sta->lock); 1166 } 1167 1168 return queued; 1169 } 1170 1171 /* 1172 * initialises @tx 1173 */ 1174 static ieee80211_tx_result 1175 ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, 1176 struct ieee80211_tx_data *tx, 1177 struct sk_buff *skb) 1178 { 1179 struct ieee80211_local *local = sdata->local; 1180 struct ieee80211_hdr *hdr; 1181 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1182 int hdrlen, tid; 1183 u8 *qc; 1184 1185 memset(tx, 0, sizeof(*tx)); 1186 tx->skb = skb; 1187 tx->local = local; 1188 tx->sdata = sdata; 1189 tx->channel = local->hw.conf.channel; 1190 /* 1191 * Set this flag (used below to indicate "automatic fragmentation"), 1192 * it will be cleared/left by radiotap as desired. 1193 * Only valid when fragmentation is done by the stack. 1194 */ 1195 if (!local->ops->set_frag_threshold) 1196 tx->flags |= IEEE80211_TX_FRAGMENTED; 1197 1198 /* process and remove the injection radiotap header */ 1199 if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) { 1200 if (!__ieee80211_parse_tx_radiotap(tx, skb)) 1201 return TX_DROP; 1202 1203 /* 1204 * __ieee80211_parse_tx_radiotap has now removed 1205 * the radiotap header that was present and pre-filled 1206 * 'tx' with tx control information. 1207 */ 1208 info->flags &= ~IEEE80211_TX_INTFL_HAS_RADIOTAP; 1209 } 1210 1211 /* 1212 * If this flag is set to true anywhere, and we get here, 1213 * we are doing the needed processing, so remove the flag 1214 * now. 1215 */ 1216 info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1217 1218 hdr = (struct ieee80211_hdr *) skb->data; 1219 1220 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 1221 tx->sta = rcu_dereference(sdata->u.vlan.sta); 1222 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) 1223 return TX_DROP; 1224 } else if (info->flags & IEEE80211_TX_CTL_INJECTED) { 1225 tx->sta = sta_info_get_bss(sdata, hdr->addr1); 1226 } 1227 if (!tx->sta) 1228 tx->sta = sta_info_get(sdata, hdr->addr1); 1229 1230 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && 1231 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { 1232 struct tid_ampdu_tx *tid_tx; 1233 1234 qc = ieee80211_get_qos_ctl(hdr); 1235 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1236 1237 tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]); 1238 if (tid_tx) { 1239 bool queued; 1240 1241 queued = ieee80211_tx_prep_agg(tx, skb, info, 1242 tid_tx, tid); 1243 1244 if (unlikely(queued)) 1245 return TX_QUEUED; 1246 } 1247 } 1248 1249 if (is_multicast_ether_addr(hdr->addr1)) { 1250 tx->flags &= ~IEEE80211_TX_UNICAST; 1251 info->flags |= IEEE80211_TX_CTL_NO_ACK; 1252 } else { 1253 tx->flags |= IEEE80211_TX_UNICAST; 1254 if (unlikely(local->wifi_wme_noack_test)) 1255 info->flags |= IEEE80211_TX_CTL_NO_ACK; 1256 else 1257 info->flags &= ~IEEE80211_TX_CTL_NO_ACK; 1258 } 1259 1260 if (tx->flags & IEEE80211_TX_FRAGMENTED) { 1261 if ((tx->flags & IEEE80211_TX_UNICAST) && 1262 skb->len + FCS_LEN > local->hw.wiphy->frag_threshold && 1263 !(info->flags & IEEE80211_TX_CTL_AMPDU)) 1264 tx->flags |= IEEE80211_TX_FRAGMENTED; 1265 else 1266 tx->flags &= ~IEEE80211_TX_FRAGMENTED; 1267 } 1268 1269 if (!tx->sta) 1270 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1271 else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT)) 1272 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1273 1274 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1275 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { 1276 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; 1277 tx->ethertype = (pos[0] << 8) | pos[1]; 1278 } 1279 info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT; 1280 1281 return TX_CONTINUE; 1282 } 1283 1284 /* 1285 * Returns false if the frame couldn't be transmitted but was queued instead. 1286 */ 1287 static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp, 1288 struct sta_info *sta, bool txpending) 1289 { 1290 struct sk_buff *skb = *skbp, *next; 1291 struct ieee80211_tx_info *info; 1292 struct ieee80211_sub_if_data *sdata; 1293 unsigned long flags; 1294 int len; 1295 bool fragm = false; 1296 1297 while (skb) { 1298 int q = skb_get_queue_mapping(skb); 1299 __le16 fc; 1300 1301 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 1302 if (local->queue_stop_reasons[q] || 1303 (!txpending && !skb_queue_empty(&local->pending[q]))) { 1304 /* 1305 * Since queue is stopped, queue up frames for later 1306 * transmission from the tx-pending tasklet when the 1307 * queue is woken again. 1308 */ 1309 1310 do { 1311 next = skb->next; 1312 skb->next = NULL; 1313 /* 1314 * NB: If txpending is true, next must already 1315 * be NULL since we must've gone through this 1316 * loop before already; therefore we can just 1317 * queue the frame to the head without worrying 1318 * about reordering of fragments. 1319 */ 1320 if (unlikely(txpending)) 1321 __skb_queue_head(&local->pending[q], 1322 skb); 1323 else 1324 __skb_queue_tail(&local->pending[q], 1325 skb); 1326 } while ((skb = next)); 1327 1328 spin_unlock_irqrestore(&local->queue_stop_reason_lock, 1329 flags); 1330 return false; 1331 } 1332 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 1333 1334 info = IEEE80211_SKB_CB(skb); 1335 1336 if (fragm) 1337 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | 1338 IEEE80211_TX_CTL_FIRST_FRAGMENT); 1339 1340 next = skb->next; 1341 len = skb->len; 1342 1343 if (next) 1344 info->flags |= IEEE80211_TX_CTL_MORE_FRAMES; 1345 1346 sdata = vif_to_sdata(info->control.vif); 1347 1348 switch (sdata->vif.type) { 1349 case NL80211_IFTYPE_MONITOR: 1350 info->control.vif = NULL; 1351 break; 1352 case NL80211_IFTYPE_AP_VLAN: 1353 info->control.vif = &container_of(sdata->bss, 1354 struct ieee80211_sub_if_data, u.ap)->vif; 1355 break; 1356 default: 1357 /* keep */ 1358 break; 1359 } 1360 1361 if (sta && sta->uploaded) 1362 info->control.sta = &sta->sta; 1363 else 1364 info->control.sta = NULL; 1365 1366 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 1367 drv_tx(local, skb); 1368 1369 ieee80211_tpt_led_trig_tx(local, fc, len); 1370 *skbp = skb = next; 1371 ieee80211_led_tx(local, 1); 1372 fragm = true; 1373 } 1374 1375 return true; 1376 } 1377 1378 /* 1379 * Invoke TX handlers, return 0 on success and non-zero if the 1380 * frame was dropped or queued. 1381 */ 1382 static int invoke_tx_handlers(struct ieee80211_tx_data *tx) 1383 { 1384 struct sk_buff *skb = tx->skb; 1385 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1386 ieee80211_tx_result res = TX_DROP; 1387 1388 #define CALL_TXH(txh) \ 1389 do { \ 1390 res = txh(tx); \ 1391 if (res != TX_CONTINUE) \ 1392 goto txh_done; \ 1393 } while (0) 1394 1395 CALL_TXH(ieee80211_tx_h_dynamic_ps); 1396 CALL_TXH(ieee80211_tx_h_check_assoc); 1397 CALL_TXH(ieee80211_tx_h_ps_buf); 1398 CALL_TXH(ieee80211_tx_h_check_control_port_protocol); 1399 CALL_TXH(ieee80211_tx_h_select_key); 1400 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) 1401 CALL_TXH(ieee80211_tx_h_rate_ctrl); 1402 1403 if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) 1404 goto txh_done; 1405 1406 CALL_TXH(ieee80211_tx_h_michael_mic_add); 1407 CALL_TXH(ieee80211_tx_h_sequence); 1408 CALL_TXH(ieee80211_tx_h_fragment); 1409 /* handlers after fragment must be aware of tx info fragmentation! */ 1410 CALL_TXH(ieee80211_tx_h_stats); 1411 CALL_TXH(ieee80211_tx_h_encrypt); 1412 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) 1413 CALL_TXH(ieee80211_tx_h_calculate_duration); 1414 #undef CALL_TXH 1415 1416 txh_done: 1417 if (unlikely(res == TX_DROP)) { 1418 I802_DEBUG_INC(tx->local->tx_handlers_drop); 1419 while (skb) { 1420 struct sk_buff *next; 1421 1422 next = skb->next; 1423 dev_kfree_skb(skb); 1424 skb = next; 1425 } 1426 return -1; 1427 } else if (unlikely(res == TX_QUEUED)) { 1428 I802_DEBUG_INC(tx->local->tx_handlers_queued); 1429 return -1; 1430 } 1431 1432 return 0; 1433 } 1434 1435 /* 1436 * Returns false if the frame couldn't be transmitted but was queued instead. 1437 */ 1438 static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, 1439 struct sk_buff *skb, bool txpending) 1440 { 1441 struct ieee80211_local *local = sdata->local; 1442 struct ieee80211_tx_data tx; 1443 ieee80211_tx_result res_prepare; 1444 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1445 u16 queue; 1446 bool result = true; 1447 1448 queue = skb_get_queue_mapping(skb); 1449 1450 if (unlikely(skb->len < 10)) { 1451 dev_kfree_skb(skb); 1452 return true; 1453 } 1454 1455 rcu_read_lock(); 1456 1457 /* initialises tx */ 1458 res_prepare = ieee80211_tx_prepare(sdata, &tx, skb); 1459 1460 if (unlikely(res_prepare == TX_DROP)) { 1461 dev_kfree_skb(skb); 1462 goto out; 1463 } else if (unlikely(res_prepare == TX_QUEUED)) { 1464 goto out; 1465 } 1466 1467 tx.channel = local->hw.conf.channel; 1468 info->band = tx.channel->band; 1469 1470 if (!invoke_tx_handlers(&tx)) 1471 result = __ieee80211_tx(local, &tx.skb, tx.sta, txpending); 1472 out: 1473 rcu_read_unlock(); 1474 return result; 1475 } 1476 1477 /* device xmit handlers */ 1478 1479 static int ieee80211_skb_resize(struct ieee80211_local *local, 1480 struct sk_buff *skb, 1481 int head_need, bool may_encrypt) 1482 { 1483 int tail_need = 0; 1484 1485 /* 1486 * This could be optimised, devices that do full hardware 1487 * crypto (including TKIP MMIC) need no tailroom... But we 1488 * have no drivers for such devices currently. 1489 */ 1490 if (may_encrypt) { 1491 tail_need = IEEE80211_ENCRYPT_TAILROOM; 1492 tail_need -= skb_tailroom(skb); 1493 tail_need = max_t(int, tail_need, 0); 1494 } 1495 1496 if (head_need || tail_need) { 1497 /* Sorry. Can't account for this any more */ 1498 skb_orphan(skb); 1499 } 1500 1501 if (skb_cloned(skb)) 1502 I802_DEBUG_INC(local->tx_expand_skb_head_cloned); 1503 else if (head_need || tail_need) 1504 I802_DEBUG_INC(local->tx_expand_skb_head); 1505 else 1506 return 0; 1507 1508 if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) { 1509 wiphy_debug(local->hw.wiphy, 1510 "failed to reallocate TX buffer\n"); 1511 return -ENOMEM; 1512 } 1513 1514 /* update truesize too */ 1515 skb->truesize += head_need + tail_need; 1516 1517 return 0; 1518 } 1519 1520 static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, 1521 struct sk_buff *skb) 1522 { 1523 struct ieee80211_local *local = sdata->local; 1524 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1525 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1526 struct ieee80211_sub_if_data *tmp_sdata; 1527 int headroom; 1528 bool may_encrypt; 1529 1530 rcu_read_lock(); 1531 1532 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) { 1533 int hdrlen; 1534 u16 len_rthdr; 1535 1536 info->flags |= IEEE80211_TX_CTL_INJECTED | 1537 IEEE80211_TX_INTFL_HAS_RADIOTAP; 1538 1539 len_rthdr = ieee80211_get_radiotap_len(skb->data); 1540 hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); 1541 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1542 1543 /* check the header is complete in the frame */ 1544 if (likely(skb->len >= len_rthdr + hdrlen)) { 1545 /* 1546 * We process outgoing injected frames that have a 1547 * local address we handle as though they are our 1548 * own frames. 1549 * This code here isn't entirely correct, the local 1550 * MAC address is not necessarily enough to find 1551 * the interface to use; for that proper VLAN/WDS 1552 * support we will need a different mechanism. 1553 */ 1554 1555 list_for_each_entry_rcu(tmp_sdata, &local->interfaces, 1556 list) { 1557 if (!ieee80211_sdata_running(tmp_sdata)) 1558 continue; 1559 if (tmp_sdata->vif.type == 1560 NL80211_IFTYPE_MONITOR || 1561 tmp_sdata->vif.type == 1562 NL80211_IFTYPE_AP_VLAN || 1563 tmp_sdata->vif.type == 1564 NL80211_IFTYPE_WDS) 1565 continue; 1566 if (compare_ether_addr(tmp_sdata->vif.addr, 1567 hdr->addr2) == 0) { 1568 sdata = tmp_sdata; 1569 break; 1570 } 1571 } 1572 } 1573 } 1574 1575 may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT); 1576 1577 headroom = local->tx_headroom; 1578 if (may_encrypt) 1579 headroom += IEEE80211_ENCRYPT_HEADROOM; 1580 headroom -= skb_headroom(skb); 1581 headroom = max_t(int, 0, headroom); 1582 1583 if (ieee80211_skb_resize(local, skb, headroom, may_encrypt)) { 1584 dev_kfree_skb(skb); 1585 rcu_read_unlock(); 1586 return; 1587 } 1588 1589 hdr = (struct ieee80211_hdr *) skb->data; 1590 info->control.vif = &sdata->vif; 1591 1592 if (ieee80211_vif_is_mesh(&sdata->vif) && 1593 ieee80211_is_data(hdr->frame_control) && 1594 !is_multicast_ether_addr(hdr->addr1)) 1595 if (mesh_nexthop_lookup(skb, sdata)) { 1596 /* skb queued: don't free */ 1597 rcu_read_unlock(); 1598 return; 1599 } 1600 1601 ieee80211_set_qos_hdr(local, skb); 1602 ieee80211_tx(sdata, skb, false); 1603 rcu_read_unlock(); 1604 } 1605 1606 netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, 1607 struct net_device *dev) 1608 { 1609 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1610 struct ieee80211_channel *chan = local->hw.conf.channel; 1611 struct ieee80211_radiotap_header *prthdr = 1612 (struct ieee80211_radiotap_header *)skb->data; 1613 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1614 u16 len_rthdr; 1615 1616 /* 1617 * Frame injection is not allowed if beaconing is not allowed 1618 * or if we need radar detection. Beaconing is usually not allowed when 1619 * the mode or operation (Adhoc, AP, Mesh) does not support DFS. 1620 * Passive scan is also used in world regulatory domains where 1621 * your country is not known and as such it should be treated as 1622 * NO TX unless the channel is explicitly allowed in which case 1623 * your current regulatory domain would not have the passive scan 1624 * flag. 1625 * 1626 * Since AP mode uses monitor interfaces to inject/TX management 1627 * frames we can make AP mode the exception to this rule once it 1628 * supports radar detection as its implementation can deal with 1629 * radar detection by itself. We can do that later by adding a 1630 * monitor flag interfaces used for AP support. 1631 */ 1632 if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR | 1633 IEEE80211_CHAN_PASSIVE_SCAN))) 1634 goto fail; 1635 1636 /* check for not even having the fixed radiotap header part */ 1637 if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) 1638 goto fail; /* too short to be possibly valid */ 1639 1640 /* is it a header version we can trust to find length from? */ 1641 if (unlikely(prthdr->it_version)) 1642 goto fail; /* only version 0 is supported */ 1643 1644 /* then there must be a radiotap header with a length we can use */ 1645 len_rthdr = ieee80211_get_radiotap_len(skb->data); 1646 1647 /* does the skb contain enough to deliver on the alleged length? */ 1648 if (unlikely(skb->len < len_rthdr)) 1649 goto fail; /* skb too short for claimed rt header extent */ 1650 1651 /* 1652 * fix up the pointers accounting for the radiotap 1653 * header still being in there. We are being given 1654 * a precooked IEEE80211 header so no need for 1655 * normal processing 1656 */ 1657 skb_set_mac_header(skb, len_rthdr); 1658 /* 1659 * these are just fixed to the end of the rt area since we 1660 * don't have any better information and at this point, nobody cares 1661 */ 1662 skb_set_network_header(skb, len_rthdr); 1663 skb_set_transport_header(skb, len_rthdr); 1664 1665 memset(info, 0, sizeof(*info)); 1666 1667 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 1668 1669 /* pass the radiotap header up to xmit */ 1670 ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb); 1671 return NETDEV_TX_OK; 1672 1673 fail: 1674 dev_kfree_skb(skb); 1675 return NETDEV_TX_OK; /* meaning, we dealt with the skb */ 1676 } 1677 1678 /** 1679 * ieee80211_subif_start_xmit - netif start_xmit function for Ethernet-type 1680 * subinterfaces (wlan#, WDS, and VLAN interfaces) 1681 * @skb: packet to be sent 1682 * @dev: incoming interface 1683 * 1684 * Returns: 0 on success (and frees skb in this case) or 1 on failure (skb will 1685 * not be freed, and caller is responsible for either retrying later or freeing 1686 * skb). 1687 * 1688 * This function takes in an Ethernet header and encapsulates it with suitable 1689 * IEEE 802.11 header based on which interface the packet is coming in. The 1690 * encapsulated packet will then be passed to master interface, wlan#.11, for 1691 * transmission (through low-level driver). 1692 */ 1693 netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, 1694 struct net_device *dev) 1695 { 1696 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1697 struct ieee80211_local *local = sdata->local; 1698 struct ieee80211_tx_info *info; 1699 int ret = NETDEV_TX_BUSY, head_need; 1700 u16 ethertype, hdrlen, meshhdrlen = 0; 1701 __le16 fc; 1702 struct ieee80211_hdr hdr; 1703 struct ieee80211s_hdr mesh_hdr __maybe_unused; 1704 struct mesh_path __maybe_unused *mppath = NULL; 1705 const u8 *encaps_data; 1706 int encaps_len, skip_header_bytes; 1707 int nh_pos, h_pos; 1708 struct sta_info *sta = NULL; 1709 u32 sta_flags = 0; 1710 struct sk_buff *tmp_skb; 1711 1712 if (unlikely(skb->len < ETH_HLEN)) { 1713 ret = NETDEV_TX_OK; 1714 goto fail; 1715 } 1716 1717 /* convert Ethernet header to proper 802.11 header (based on 1718 * operation mode) */ 1719 ethertype = (skb->data[12] << 8) | skb->data[13]; 1720 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); 1721 1722 switch (sdata->vif.type) { 1723 case NL80211_IFTYPE_AP_VLAN: 1724 rcu_read_lock(); 1725 sta = rcu_dereference(sdata->u.vlan.sta); 1726 if (sta) { 1727 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1728 /* RA TA DA SA */ 1729 memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN); 1730 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); 1731 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1732 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1733 hdrlen = 30; 1734 sta_flags = get_sta_flags(sta); 1735 } 1736 rcu_read_unlock(); 1737 if (sta) 1738 break; 1739 /* fall through */ 1740 case NL80211_IFTYPE_AP: 1741 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); 1742 /* DA BSSID SA */ 1743 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1744 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); 1745 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); 1746 hdrlen = 24; 1747 break; 1748 case NL80211_IFTYPE_WDS: 1749 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1750 /* RA TA DA SA */ 1751 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); 1752 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); 1753 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1754 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1755 hdrlen = 30; 1756 break; 1757 #ifdef CONFIG_MAC80211_MESH 1758 case NL80211_IFTYPE_MESH_POINT: 1759 if (!sdata->u.mesh.mshcfg.dot11MeshTTL) { 1760 /* Do not send frames with mesh_ttl == 0 */ 1761 sdata->u.mesh.mshstats.dropped_frames_ttl++; 1762 ret = NETDEV_TX_OK; 1763 goto fail; 1764 } 1765 if (!is_multicast_ether_addr(skb->data)) 1766 mppath = mpp_path_lookup(skb->data, sdata); 1767 1768 /* 1769 * Use address extension if it is a packet from 1770 * another interface or if we know the destination 1771 * is being proxied by a portal (i.e. portal address 1772 * differs from proxied address) 1773 */ 1774 if (compare_ether_addr(sdata->vif.addr, 1775 skb->data + ETH_ALEN) == 0 && 1776 !(mppath && compare_ether_addr(mppath->mpp, skb->data))) { 1777 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1778 skb->data, skb->data + ETH_ALEN); 1779 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, 1780 sdata, NULL, NULL); 1781 } else { 1782 int is_mesh_mcast = 1; 1783 const u8 *mesh_da; 1784 1785 rcu_read_lock(); 1786 if (is_multicast_ether_addr(skb->data)) 1787 /* DA TA mSA AE:SA */ 1788 mesh_da = skb->data; 1789 else { 1790 static const u8 bcast[ETH_ALEN] = 1791 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 1792 if (mppath) { 1793 /* RA TA mDA mSA AE:DA SA */ 1794 mesh_da = mppath->mpp; 1795 is_mesh_mcast = 0; 1796 } else { 1797 /* DA TA mSA AE:SA */ 1798 mesh_da = bcast; 1799 } 1800 } 1801 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1802 mesh_da, sdata->vif.addr); 1803 rcu_read_unlock(); 1804 if (is_mesh_mcast) 1805 meshhdrlen = 1806 ieee80211_new_mesh_header(&mesh_hdr, 1807 sdata, 1808 skb->data + ETH_ALEN, 1809 NULL); 1810 else 1811 meshhdrlen = 1812 ieee80211_new_mesh_header(&mesh_hdr, 1813 sdata, 1814 skb->data, 1815 skb->data + ETH_ALEN); 1816 1817 } 1818 break; 1819 #endif 1820 case NL80211_IFTYPE_STATION: 1821 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN); 1822 if (sdata->u.mgd.use_4addr && 1823 cpu_to_be16(ethertype) != sdata->control_port_protocol) { 1824 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1825 /* RA TA DA SA */ 1826 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); 1827 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1828 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1829 hdrlen = 30; 1830 } else { 1831 fc |= cpu_to_le16(IEEE80211_FCTL_TODS); 1832 /* BSSID SA DA */ 1833 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); 1834 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1835 hdrlen = 24; 1836 } 1837 break; 1838 case NL80211_IFTYPE_ADHOC: 1839 /* DA SA BSSID */ 1840 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1841 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); 1842 memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN); 1843 hdrlen = 24; 1844 break; 1845 default: 1846 ret = NETDEV_TX_OK; 1847 goto fail; 1848 } 1849 1850 /* 1851 * There's no need to try to look up the destination 1852 * if it is a multicast address (which can only happen 1853 * in AP mode) 1854 */ 1855 if (!is_multicast_ether_addr(hdr.addr1)) { 1856 rcu_read_lock(); 1857 sta = sta_info_get(sdata, hdr.addr1); 1858 if (sta) 1859 sta_flags = get_sta_flags(sta); 1860 rcu_read_unlock(); 1861 } 1862 1863 /* receiver and we are QoS enabled, use a QoS type frame */ 1864 if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) { 1865 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1866 hdrlen += 2; 1867 } 1868 1869 /* 1870 * Drop unicast frames to unauthorised stations unless they are 1871 * EAPOL frames from the local station. 1872 */ 1873 if (!ieee80211_vif_is_mesh(&sdata->vif) && 1874 unlikely(!is_multicast_ether_addr(hdr.addr1) && 1875 !(sta_flags & WLAN_STA_AUTHORIZED) && 1876 !(cpu_to_be16(ethertype) == sdata->control_port_protocol && 1877 compare_ether_addr(sdata->vif.addr, 1878 skb->data + ETH_ALEN) == 0))) { 1879 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1880 if (net_ratelimit()) 1881 printk(KERN_DEBUG "%s: dropped frame to %pM" 1882 " (unauthorized port)\n", dev->name, 1883 hdr.addr1); 1884 #endif 1885 1886 I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); 1887 1888 ret = NETDEV_TX_OK; 1889 goto fail; 1890 } 1891 1892 /* 1893 * If the skb is shared we need to obtain our own copy. 1894 */ 1895 if (skb_shared(skb)) { 1896 tmp_skb = skb; 1897 skb = skb_clone(skb, GFP_ATOMIC); 1898 kfree_skb(tmp_skb); 1899 1900 if (!skb) { 1901 ret = NETDEV_TX_OK; 1902 goto fail; 1903 } 1904 } 1905 1906 hdr.frame_control = fc; 1907 hdr.duration_id = 0; 1908 hdr.seq_ctrl = 0; 1909 1910 skip_header_bytes = ETH_HLEN; 1911 if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { 1912 encaps_data = bridge_tunnel_header; 1913 encaps_len = sizeof(bridge_tunnel_header); 1914 skip_header_bytes -= 2; 1915 } else if (ethertype >= 0x600) { 1916 encaps_data = rfc1042_header; 1917 encaps_len = sizeof(rfc1042_header); 1918 skip_header_bytes -= 2; 1919 } else { 1920 encaps_data = NULL; 1921 encaps_len = 0; 1922 } 1923 1924 nh_pos = skb_network_header(skb) - skb->data; 1925 h_pos = skb_transport_header(skb) - skb->data; 1926 1927 skb_pull(skb, skip_header_bytes); 1928 nh_pos -= skip_header_bytes; 1929 h_pos -= skip_header_bytes; 1930 1931 head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb); 1932 1933 /* 1934 * So we need to modify the skb header and hence need a copy of 1935 * that. The head_need variable above doesn't, so far, include 1936 * the needed header space that we don't need right away. If we 1937 * can, then we don't reallocate right now but only after the 1938 * frame arrives at the master device (if it does...) 1939 * 1940 * If we cannot, however, then we will reallocate to include all 1941 * the ever needed space. Also, if we need to reallocate it anyway, 1942 * make it big enough for everything we may ever need. 1943 */ 1944 1945 if (head_need > 0 || skb_cloned(skb)) { 1946 head_need += IEEE80211_ENCRYPT_HEADROOM; 1947 head_need += local->tx_headroom; 1948 head_need = max_t(int, 0, head_need); 1949 if (ieee80211_skb_resize(local, skb, head_need, true)) 1950 goto fail; 1951 } 1952 1953 if (encaps_data) { 1954 memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); 1955 nh_pos += encaps_len; 1956 h_pos += encaps_len; 1957 } 1958 1959 #ifdef CONFIG_MAC80211_MESH 1960 if (meshhdrlen > 0) { 1961 memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen); 1962 nh_pos += meshhdrlen; 1963 h_pos += meshhdrlen; 1964 } 1965 #endif 1966 1967 if (ieee80211_is_data_qos(fc)) { 1968 __le16 *qos_control; 1969 1970 qos_control = (__le16*) skb_push(skb, 2); 1971 memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2); 1972 /* 1973 * Maybe we could actually set some fields here, for now just 1974 * initialise to zero to indicate no special operation. 1975 */ 1976 *qos_control = 0; 1977 } else 1978 memcpy(skb_push(skb, hdrlen), &hdr, hdrlen); 1979 1980 nh_pos += hdrlen; 1981 h_pos += hdrlen; 1982 1983 dev->stats.tx_packets++; 1984 dev->stats.tx_bytes += skb->len; 1985 1986 /* Update skb pointers to various headers since this modified frame 1987 * is going to go through Linux networking code that may potentially 1988 * need things like pointer to IP header. */ 1989 skb_set_mac_header(skb, 0); 1990 skb_set_network_header(skb, nh_pos); 1991 skb_set_transport_header(skb, h_pos); 1992 1993 info = IEEE80211_SKB_CB(skb); 1994 memset(info, 0, sizeof(*info)); 1995 1996 dev->trans_start = jiffies; 1997 ieee80211_xmit(sdata, skb); 1998 1999 return NETDEV_TX_OK; 2000 2001 fail: 2002 if (ret == NETDEV_TX_OK) 2003 dev_kfree_skb(skb); 2004 2005 return ret; 2006 } 2007 2008 2009 /* 2010 * ieee80211_clear_tx_pending may not be called in a context where 2011 * it is possible that it packets could come in again. 2012 */ 2013 void ieee80211_clear_tx_pending(struct ieee80211_local *local) 2014 { 2015 int i; 2016 2017 for (i = 0; i < local->hw.queues; i++) 2018 skb_queue_purge(&local->pending[i]); 2019 } 2020 2021 /* 2022 * Returns false if the frame couldn't be transmitted but was queued instead, 2023 * which in this case means re-queued -- take as an indication to stop sending 2024 * more pending frames. 2025 */ 2026 static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, 2027 struct sk_buff *skb) 2028 { 2029 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2030 struct ieee80211_sub_if_data *sdata; 2031 struct sta_info *sta; 2032 struct ieee80211_hdr *hdr; 2033 bool result; 2034 2035 sdata = vif_to_sdata(info->control.vif); 2036 2037 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { 2038 result = ieee80211_tx(sdata, skb, true); 2039 } else { 2040 hdr = (struct ieee80211_hdr *)skb->data; 2041 sta = sta_info_get(sdata, hdr->addr1); 2042 2043 result = __ieee80211_tx(local, &skb, sta, true); 2044 } 2045 2046 return result; 2047 } 2048 2049 /* 2050 * Transmit all pending packets. Called from tasklet. 2051 */ 2052 void ieee80211_tx_pending(unsigned long data) 2053 { 2054 struct ieee80211_local *local = (struct ieee80211_local *)data; 2055 struct ieee80211_sub_if_data *sdata; 2056 unsigned long flags; 2057 int i; 2058 bool txok; 2059 2060 rcu_read_lock(); 2061 2062 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 2063 for (i = 0; i < local->hw.queues; i++) { 2064 /* 2065 * If queue is stopped by something other than due to pending 2066 * frames, or we have no pending frames, proceed to next queue. 2067 */ 2068 if (local->queue_stop_reasons[i] || 2069 skb_queue_empty(&local->pending[i])) 2070 continue; 2071 2072 while (!skb_queue_empty(&local->pending[i])) { 2073 struct sk_buff *skb = __skb_dequeue(&local->pending[i]); 2074 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2075 2076 if (WARN_ON(!info->control.vif)) { 2077 kfree_skb(skb); 2078 continue; 2079 } 2080 2081 spin_unlock_irqrestore(&local->queue_stop_reason_lock, 2082 flags); 2083 2084 txok = ieee80211_tx_pending_skb(local, skb); 2085 spin_lock_irqsave(&local->queue_stop_reason_lock, 2086 flags); 2087 if (!txok) 2088 break; 2089 } 2090 2091 if (skb_queue_empty(&local->pending[i])) 2092 list_for_each_entry_rcu(sdata, &local->interfaces, list) 2093 netif_wake_subqueue(sdata->dev, i); 2094 } 2095 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 2096 2097 rcu_read_unlock(); 2098 } 2099 2100 /* functions for drivers to get certain frames */ 2101 2102 static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss, 2103 struct sk_buff *skb, 2104 struct beacon_data *beacon) 2105 { 2106 u8 *pos, *tim; 2107 int aid0 = 0; 2108 int i, have_bits = 0, n1, n2; 2109 2110 /* Generate bitmap for TIM only if there are any STAs in power save 2111 * mode. */ 2112 if (atomic_read(&bss->num_sta_ps) > 0) 2113 /* in the hope that this is faster than 2114 * checking byte-for-byte */ 2115 have_bits = !bitmap_empty((unsigned long*)bss->tim, 2116 IEEE80211_MAX_AID+1); 2117 2118 if (bss->dtim_count == 0) 2119 bss->dtim_count = beacon->dtim_period - 1; 2120 else 2121 bss->dtim_count--; 2122 2123 tim = pos = (u8 *) skb_put(skb, 6); 2124 *pos++ = WLAN_EID_TIM; 2125 *pos++ = 4; 2126 *pos++ = bss->dtim_count; 2127 *pos++ = beacon->dtim_period; 2128 2129 if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf)) 2130 aid0 = 1; 2131 2132 bss->dtim_bc_mc = aid0 == 1; 2133 2134 if (have_bits) { 2135 /* Find largest even number N1 so that bits numbered 1 through 2136 * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits 2137 * (N2 + 1) x 8 through 2007 are 0. */ 2138 n1 = 0; 2139 for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) { 2140 if (bss->tim[i]) { 2141 n1 = i & 0xfe; 2142 break; 2143 } 2144 } 2145 n2 = n1; 2146 for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) { 2147 if (bss->tim[i]) { 2148 n2 = i; 2149 break; 2150 } 2151 } 2152 2153 /* Bitmap control */ 2154 *pos++ = n1 | aid0; 2155 /* Part Virt Bitmap */ 2156 memcpy(pos, bss->tim + n1, n2 - n1 + 1); 2157 2158 tim[1] = n2 - n1 + 4; 2159 skb_put(skb, n2 - n1); 2160 } else { 2161 *pos++ = aid0; /* Bitmap control */ 2162 *pos++ = 0; /* Part Virt Bitmap */ 2163 } 2164 } 2165 2166 struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, 2167 struct ieee80211_vif *vif, 2168 u16 *tim_offset, u16 *tim_length) 2169 { 2170 struct ieee80211_local *local = hw_to_local(hw); 2171 struct sk_buff *skb = NULL; 2172 struct ieee80211_tx_info *info; 2173 struct ieee80211_sub_if_data *sdata = NULL; 2174 struct ieee80211_if_ap *ap = NULL; 2175 struct beacon_data *beacon; 2176 struct ieee80211_supported_band *sband; 2177 enum ieee80211_band band = local->hw.conf.channel->band; 2178 struct ieee80211_tx_rate_control txrc; 2179 2180 sband = local->hw.wiphy->bands[band]; 2181 2182 rcu_read_lock(); 2183 2184 sdata = vif_to_sdata(vif); 2185 2186 if (!ieee80211_sdata_running(sdata)) 2187 goto out; 2188 2189 if (tim_offset) 2190 *tim_offset = 0; 2191 if (tim_length) 2192 *tim_length = 0; 2193 2194 if (sdata->vif.type == NL80211_IFTYPE_AP) { 2195 ap = &sdata->u.ap; 2196 beacon = rcu_dereference(ap->beacon); 2197 if (beacon) { 2198 /* 2199 * headroom, head length, 2200 * tail length and maximum TIM length 2201 */ 2202 skb = dev_alloc_skb(local->tx_headroom + 2203 beacon->head_len + 2204 beacon->tail_len + 256); 2205 if (!skb) 2206 goto out; 2207 2208 skb_reserve(skb, local->tx_headroom); 2209 memcpy(skb_put(skb, beacon->head_len), beacon->head, 2210 beacon->head_len); 2211 2212 /* 2213 * Not very nice, but we want to allow the driver to call 2214 * ieee80211_beacon_get() as a response to the set_tim() 2215 * callback. That, however, is already invoked under the 2216 * sta_lock to guarantee consistent and race-free update 2217 * of the tim bitmap in mac80211 and the driver. 2218 */ 2219 if (local->tim_in_locked_section) { 2220 ieee80211_beacon_add_tim(ap, skb, beacon); 2221 } else { 2222 unsigned long flags; 2223 2224 spin_lock_irqsave(&local->sta_lock, flags); 2225 ieee80211_beacon_add_tim(ap, skb, beacon); 2226 spin_unlock_irqrestore(&local->sta_lock, flags); 2227 } 2228 2229 if (tim_offset) 2230 *tim_offset = beacon->head_len; 2231 if (tim_length) 2232 *tim_length = skb->len - beacon->head_len; 2233 2234 if (beacon->tail) 2235 memcpy(skb_put(skb, beacon->tail_len), 2236 beacon->tail, beacon->tail_len); 2237 } else 2238 goto out; 2239 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 2240 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 2241 struct ieee80211_hdr *hdr; 2242 struct sk_buff *presp = rcu_dereference(ifibss->presp); 2243 2244 if (!presp) 2245 goto out; 2246 2247 skb = skb_copy(presp, GFP_ATOMIC); 2248 if (!skb) 2249 goto out; 2250 2251 hdr = (struct ieee80211_hdr *) skb->data; 2252 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2253 IEEE80211_STYPE_BEACON); 2254 } else if (ieee80211_vif_is_mesh(&sdata->vif)) { 2255 struct ieee80211_mgmt *mgmt; 2256 u8 *pos; 2257 2258 #ifdef CONFIG_MAC80211_MESH 2259 if (!sdata->u.mesh.mesh_id_len) 2260 goto out; 2261 #endif 2262 2263 /* headroom, head length, tail length and maximum TIM length */ 2264 skb = dev_alloc_skb(local->tx_headroom + 400 + 2265 sdata->u.mesh.vendor_ie_len); 2266 if (!skb) 2267 goto out; 2268 2269 skb_reserve(skb, local->hw.extra_tx_headroom); 2270 mgmt = (struct ieee80211_mgmt *) 2271 skb_put(skb, 24 + sizeof(mgmt->u.beacon)); 2272 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); 2273 mgmt->frame_control = 2274 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); 2275 memset(mgmt->da, 0xff, ETH_ALEN); 2276 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 2277 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 2278 mgmt->u.beacon.beacon_int = 2279 cpu_to_le16(sdata->vif.bss_conf.beacon_int); 2280 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ 2281 2282 pos = skb_put(skb, 2); 2283 *pos++ = WLAN_EID_SSID; 2284 *pos++ = 0x0; 2285 2286 mesh_mgmt_ies_add(skb, sdata); 2287 } else { 2288 WARN_ON(1); 2289 goto out; 2290 } 2291 2292 info = IEEE80211_SKB_CB(skb); 2293 2294 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 2295 info->flags |= IEEE80211_TX_CTL_NO_ACK; 2296 info->band = band; 2297 2298 memset(&txrc, 0, sizeof(txrc)); 2299 txrc.hw = hw; 2300 txrc.sband = sband; 2301 txrc.bss_conf = &sdata->vif.bss_conf; 2302 txrc.skb = skb; 2303 txrc.reported_rate.idx = -1; 2304 txrc.rate_idx_mask = sdata->rc_rateidx_mask[band]; 2305 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1) 2306 txrc.max_rate_idx = -1; 2307 else 2308 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; 2309 txrc.bss = true; 2310 rate_control_get_rate(sdata, NULL, &txrc); 2311 2312 info->control.vif = vif; 2313 2314 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT | 2315 IEEE80211_TX_CTL_ASSIGN_SEQ | 2316 IEEE80211_TX_CTL_FIRST_FRAGMENT; 2317 out: 2318 rcu_read_unlock(); 2319 return skb; 2320 } 2321 EXPORT_SYMBOL(ieee80211_beacon_get_tim); 2322 2323 struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, 2324 struct ieee80211_vif *vif) 2325 { 2326 struct ieee80211_sub_if_data *sdata; 2327 struct ieee80211_if_managed *ifmgd; 2328 struct ieee80211_pspoll *pspoll; 2329 struct ieee80211_local *local; 2330 struct sk_buff *skb; 2331 2332 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) 2333 return NULL; 2334 2335 sdata = vif_to_sdata(vif); 2336 ifmgd = &sdata->u.mgd; 2337 local = sdata->local; 2338 2339 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll)); 2340 if (!skb) { 2341 printk(KERN_DEBUG "%s: failed to allocate buffer for " 2342 "pspoll template\n", sdata->name); 2343 return NULL; 2344 } 2345 skb_reserve(skb, local->hw.extra_tx_headroom); 2346 2347 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll)); 2348 memset(pspoll, 0, sizeof(*pspoll)); 2349 pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | 2350 IEEE80211_STYPE_PSPOLL); 2351 pspoll->aid = cpu_to_le16(ifmgd->aid); 2352 2353 /* aid in PS-Poll has its two MSBs each set to 1 */ 2354 pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14); 2355 2356 memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN); 2357 memcpy(pspoll->ta, vif->addr, ETH_ALEN); 2358 2359 return skb; 2360 } 2361 EXPORT_SYMBOL(ieee80211_pspoll_get); 2362 2363 struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, 2364 struct ieee80211_vif *vif) 2365 { 2366 struct ieee80211_hdr_3addr *nullfunc; 2367 struct ieee80211_sub_if_data *sdata; 2368 struct ieee80211_if_managed *ifmgd; 2369 struct ieee80211_local *local; 2370 struct sk_buff *skb; 2371 2372 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) 2373 return NULL; 2374 2375 sdata = vif_to_sdata(vif); 2376 ifmgd = &sdata->u.mgd; 2377 local = sdata->local; 2378 2379 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc)); 2380 if (!skb) { 2381 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc " 2382 "template\n", sdata->name); 2383 return NULL; 2384 } 2385 skb_reserve(skb, local->hw.extra_tx_headroom); 2386 2387 nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb, 2388 sizeof(*nullfunc)); 2389 memset(nullfunc, 0, sizeof(*nullfunc)); 2390 nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 2391 IEEE80211_STYPE_NULLFUNC | 2392 IEEE80211_FCTL_TODS); 2393 memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN); 2394 memcpy(nullfunc->addr2, vif->addr, ETH_ALEN); 2395 memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN); 2396 2397 return skb; 2398 } 2399 EXPORT_SYMBOL(ieee80211_nullfunc_get); 2400 2401 struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, 2402 struct ieee80211_vif *vif, 2403 const u8 *ssid, size_t ssid_len, 2404 const u8 *ie, size_t ie_len) 2405 { 2406 struct ieee80211_sub_if_data *sdata; 2407 struct ieee80211_local *local; 2408 struct ieee80211_hdr_3addr *hdr; 2409 struct sk_buff *skb; 2410 size_t ie_ssid_len; 2411 u8 *pos; 2412 2413 sdata = vif_to_sdata(vif); 2414 local = sdata->local; 2415 ie_ssid_len = 2 + ssid_len; 2416 2417 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) + 2418 ie_ssid_len + ie_len); 2419 if (!skb) { 2420 printk(KERN_DEBUG "%s: failed to allocate buffer for probe " 2421 "request template\n", sdata->name); 2422 return NULL; 2423 } 2424 2425 skb_reserve(skb, local->hw.extra_tx_headroom); 2426 2427 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr)); 2428 memset(hdr, 0, sizeof(*hdr)); 2429 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2430 IEEE80211_STYPE_PROBE_REQ); 2431 memset(hdr->addr1, 0xff, ETH_ALEN); 2432 memcpy(hdr->addr2, vif->addr, ETH_ALEN); 2433 memset(hdr->addr3, 0xff, ETH_ALEN); 2434 2435 pos = skb_put(skb, ie_ssid_len); 2436 *pos++ = WLAN_EID_SSID; 2437 *pos++ = ssid_len; 2438 if (ssid) 2439 memcpy(pos, ssid, ssid_len); 2440 pos += ssid_len; 2441 2442 if (ie) { 2443 pos = skb_put(skb, ie_len); 2444 memcpy(pos, ie, ie_len); 2445 } 2446 2447 return skb; 2448 } 2449 EXPORT_SYMBOL(ieee80211_probereq_get); 2450 2451 void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2452 const void *frame, size_t frame_len, 2453 const struct ieee80211_tx_info *frame_txctl, 2454 struct ieee80211_rts *rts) 2455 { 2456 const struct ieee80211_hdr *hdr = frame; 2457 2458 rts->frame_control = 2459 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); 2460 rts->duration = ieee80211_rts_duration(hw, vif, frame_len, 2461 frame_txctl); 2462 memcpy(rts->ra, hdr->addr1, sizeof(rts->ra)); 2463 memcpy(rts->ta, hdr->addr2, sizeof(rts->ta)); 2464 } 2465 EXPORT_SYMBOL(ieee80211_rts_get); 2466 2467 void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2468 const void *frame, size_t frame_len, 2469 const struct ieee80211_tx_info *frame_txctl, 2470 struct ieee80211_cts *cts) 2471 { 2472 const struct ieee80211_hdr *hdr = frame; 2473 2474 cts->frame_control = 2475 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); 2476 cts->duration = ieee80211_ctstoself_duration(hw, vif, 2477 frame_len, frame_txctl); 2478 memcpy(cts->ra, hdr->addr1, sizeof(cts->ra)); 2479 } 2480 EXPORT_SYMBOL(ieee80211_ctstoself_get); 2481 2482 struct sk_buff * 2483 ieee80211_get_buffered_bc(struct ieee80211_hw *hw, 2484 struct ieee80211_vif *vif) 2485 { 2486 struct ieee80211_local *local = hw_to_local(hw); 2487 struct sk_buff *skb = NULL; 2488 struct sta_info *sta; 2489 struct ieee80211_tx_data tx; 2490 struct ieee80211_sub_if_data *sdata; 2491 struct ieee80211_if_ap *bss = NULL; 2492 struct beacon_data *beacon; 2493 struct ieee80211_tx_info *info; 2494 2495 sdata = vif_to_sdata(vif); 2496 bss = &sdata->u.ap; 2497 2498 rcu_read_lock(); 2499 beacon = rcu_dereference(bss->beacon); 2500 2501 if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head) 2502 goto out; 2503 2504 if (bss->dtim_count != 0 || !bss->dtim_bc_mc) 2505 goto out; /* send buffered bc/mc only after DTIM beacon */ 2506 2507 while (1) { 2508 skb = skb_dequeue(&bss->ps_bc_buf); 2509 if (!skb) 2510 goto out; 2511 local->total_ps_buffered--; 2512 2513 if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) { 2514 struct ieee80211_hdr *hdr = 2515 (struct ieee80211_hdr *) skb->data; 2516 /* more buffered multicast/broadcast frames ==> set 2517 * MoreData flag in IEEE 802.11 header to inform PS 2518 * STAs */ 2519 hdr->frame_control |= 2520 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 2521 } 2522 2523 if (!ieee80211_tx_prepare(sdata, &tx, skb)) 2524 break; 2525 dev_kfree_skb_any(skb); 2526 } 2527 2528 info = IEEE80211_SKB_CB(skb); 2529 2530 sta = tx.sta; 2531 tx.flags |= IEEE80211_TX_PS_BUFFERED; 2532 tx.channel = local->hw.conf.channel; 2533 info->band = tx.channel->band; 2534 2535 if (invoke_tx_handlers(&tx)) 2536 skb = NULL; 2537 out: 2538 rcu_read_unlock(); 2539 2540 return skb; 2541 } 2542 EXPORT_SYMBOL(ieee80211_get_buffered_bc); 2543 2544 void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) 2545 { 2546 skb_set_mac_header(skb, 0); 2547 skb_set_network_header(skb, 0); 2548 skb_set_transport_header(skb, 0); 2549 2550 /* send all internal mgmt frames on VO */ 2551 skb_set_queue_mapping(skb, 0); 2552 2553 /* 2554 * The other path calling ieee80211_xmit is from the tasklet, 2555 * and while we can handle concurrent transmissions locking 2556 * requirements are that we do not come into tx with bhs on. 2557 */ 2558 local_bh_disable(); 2559 ieee80211_xmit(sdata, skb); 2560 local_bh_enable(); 2561 } 2562