1 /* 2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> 3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com> 4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> 5 <http://rt2x00.serialmonkey.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 2 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 /* 22 Module: rt2x00lib 23 Abstract: rt2x00 queue specific routines. 24 */ 25 26 #include <linux/slab.h> 27 #include <linux/kernel.h> 28 #include <linux/module.h> 29 #include <linux/dma-mapping.h> 30 31 #include "rt2x00.h" 32 #include "rt2x00lib.h" 33 34 struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp) 35 { 36 struct data_queue *queue = entry->queue; 37 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 38 struct sk_buff *skb; 39 struct skb_frame_desc *skbdesc; 40 unsigned int frame_size; 41 unsigned int head_size = 0; 42 unsigned int tail_size = 0; 43 44 /* 45 * The frame size includes descriptor size, because the 46 * hardware directly receive the frame into the skbuffer. 47 */ 48 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; 49 50 /* 51 * The payload should be aligned to a 4-byte boundary, 52 * this means we need at least 3 bytes for moving the frame 53 * into the correct offset. 54 */ 55 head_size = 4; 56 57 /* 58 * For IV/EIV/ICV assembly we must make sure there is 59 * at least 8 bytes bytes available in headroom for IV/EIV 60 * and 8 bytes for ICV data as tailroon. 61 */ 62 if (rt2x00_has_cap_hw_crypto(rt2x00dev)) { 63 head_size += 8; 64 tail_size += 8; 65 } 66 67 /* 68 * Allocate skbuffer. 69 */ 70 skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp); 71 if (!skb) 72 return NULL; 73 74 /* 75 * Make sure we not have a frame with the requested bytes 76 * available in the head and tail. 77 */ 78 skb_reserve(skb, head_size); 79 skb_put(skb, frame_size); 80 81 /* 82 * Populate skbdesc. 83 */ 84 skbdesc = get_skb_frame_desc(skb); 85 memset(skbdesc, 0, sizeof(*skbdesc)); 86 87 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) { 88 dma_addr_t skb_dma; 89 90 skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, 91 DMA_FROM_DEVICE); 92 if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) { 93 dev_kfree_skb_any(skb); 94 return NULL; 95 } 96 97 skbdesc->skb_dma = skb_dma; 98 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; 99 } 100 101 return skb; 102 } 103 104 int rt2x00queue_map_txskb(struct queue_entry *entry) 105 { 106 struct device *dev = entry->queue->rt2x00dev->dev; 107 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 108 109 skbdesc->skb_dma = 110 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE); 111 112 if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma))) 113 return -ENOMEM; 114 115 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; 116 rt2x00lib_dmadone(entry); 117 return 0; 118 } 119 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); 120 121 void rt2x00queue_unmap_skb(struct queue_entry *entry) 122 { 123 struct device *dev = entry->queue->rt2x00dev->dev; 124 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 125 126 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { 127 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, 128 DMA_FROM_DEVICE); 129 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; 130 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { 131 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, 132 DMA_TO_DEVICE); 133 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; 134 } 135 } 136 EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb); 137 138 void rt2x00queue_free_skb(struct queue_entry *entry) 139 { 140 if (!entry->skb) 141 return; 142 143 rt2x00queue_unmap_skb(entry); 144 dev_kfree_skb_any(entry->skb); 145 entry->skb = NULL; 146 } 147 148 void rt2x00queue_align_frame(struct sk_buff *skb) 149 { 150 unsigned int frame_length = skb->len; 151 unsigned int align = ALIGN_SIZE(skb, 0); 152 153 if (!align) 154 return; 155 156 skb_push(skb, align); 157 memmove(skb->data, skb->data + align, frame_length); 158 skb_trim(skb, frame_length); 159 } 160 161 /* 162 * H/W needs L2 padding between the header and the paylod if header size 163 * is not 4 bytes aligned. 164 */ 165 void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len) 166 { 167 unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; 168 169 if (!l2pad) 170 return; 171 172 skb_push(skb, l2pad); 173 memmove(skb->data, skb->data + l2pad, hdr_len); 174 } 175 176 void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len) 177 { 178 unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; 179 180 if (!l2pad) 181 return; 182 183 memmove(skb->data + l2pad, skb->data, hdr_len); 184 skb_pull(skb, l2pad); 185 } 186 187 static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, 188 struct sk_buff *skb, 189 struct txentry_desc *txdesc) 190 { 191 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 192 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 193 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); 194 u16 seqno; 195 196 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) 197 return; 198 199 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 200 201 if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) { 202 /* 203 * rt2800 has a H/W (or F/W) bug, device incorrectly increase 204 * seqno on retransmited data (non-QOS) frames. To workaround 205 * the problem let's generate seqno in software if QOS is 206 * disabled. 207 */ 208 if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags)) 209 __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 210 else 211 /* H/W will generate sequence number */ 212 return; 213 } 214 215 /* 216 * The hardware is not able to insert a sequence number. Assign a 217 * software generated one here. 218 * 219 * This is wrong because beacons are not getting sequence 220 * numbers assigned properly. 221 * 222 * A secondary problem exists for drivers that cannot toggle 223 * sequence counting per-frame, since those will override the 224 * sequence counter given by mac80211. 225 */ 226 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 227 seqno = atomic_add_return(0x10, &intf->seqno); 228 else 229 seqno = atomic_read(&intf->seqno); 230 231 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 232 hdr->seq_ctrl |= cpu_to_le16(seqno); 233 } 234 235 static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, 236 struct sk_buff *skb, 237 struct txentry_desc *txdesc, 238 const struct rt2x00_rate *hwrate) 239 { 240 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 241 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 242 unsigned int data_length; 243 unsigned int duration; 244 unsigned int residual; 245 246 /* 247 * Determine with what IFS priority this frame should be send. 248 * Set ifs to IFS_SIFS when the this is not the first fragment, 249 * or this fragment came after RTS/CTS. 250 */ 251 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 252 txdesc->u.plcp.ifs = IFS_BACKOFF; 253 else 254 txdesc->u.plcp.ifs = IFS_SIFS; 255 256 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ 257 data_length = skb->len + 4; 258 data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb); 259 260 /* 261 * PLCP setup 262 * Length calculation depends on OFDM/CCK rate. 263 */ 264 txdesc->u.plcp.signal = hwrate->plcp; 265 txdesc->u.plcp.service = 0x04; 266 267 if (hwrate->flags & DEV_RATE_OFDM) { 268 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f; 269 txdesc->u.plcp.length_low = data_length & 0x3f; 270 } else { 271 /* 272 * Convert length to microseconds. 273 */ 274 residual = GET_DURATION_RES(data_length, hwrate->bitrate); 275 duration = GET_DURATION(data_length, hwrate->bitrate); 276 277 if (residual != 0) { 278 duration++; 279 280 /* 281 * Check if we need to set the Length Extension 282 */ 283 if (hwrate->bitrate == 110 && residual <= 30) 284 txdesc->u.plcp.service |= 0x80; 285 } 286 287 txdesc->u.plcp.length_high = (duration >> 8) & 0xff; 288 txdesc->u.plcp.length_low = duration & 0xff; 289 290 /* 291 * When preamble is enabled we should set the 292 * preamble bit for the signal. 293 */ 294 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 295 txdesc->u.plcp.signal |= 0x08; 296 } 297 } 298 299 static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, 300 struct sk_buff *skb, 301 struct txentry_desc *txdesc, 302 struct ieee80211_sta *sta, 303 const struct rt2x00_rate *hwrate) 304 { 305 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 306 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 307 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 308 struct rt2x00_sta *sta_priv = NULL; 309 u8 density = 0; 310 311 if (sta) { 312 sta_priv = sta_to_rt2x00_sta(sta); 313 txdesc->u.ht.wcid = sta_priv->wcid; 314 density = sta->ht_cap.ampdu_density; 315 } 316 317 /* 318 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the 319 * mcs rate to be used 320 */ 321 if (txrate->flags & IEEE80211_TX_RC_MCS) { 322 txdesc->u.ht.mcs = txrate->idx; 323 324 /* 325 * MIMO PS should be set to 1 for STA's using dynamic SM PS 326 * when using more then one tx stream (>MCS7). 327 */ 328 if (sta && txdesc->u.ht.mcs > 7 && 329 sta->smps_mode == IEEE80211_SMPS_DYNAMIC) 330 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); 331 } else { 332 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs); 333 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 334 txdesc->u.ht.mcs |= 0x08; 335 } 336 337 if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) { 338 if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) 339 txdesc->u.ht.txop = TXOP_SIFS; 340 else 341 txdesc->u.ht.txop = TXOP_BACKOFF; 342 343 /* Left zero on all other settings. */ 344 return; 345 } 346 347 /* 348 * Only one STBC stream is supported for now. 349 */ 350 if (tx_info->flags & IEEE80211_TX_CTL_STBC) 351 txdesc->u.ht.stbc = 1; 352 353 /* 354 * This frame is eligible for an AMPDU, however, don't aggregate 355 * frames that are intended to probe a specific tx rate. 356 */ 357 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU && 358 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 359 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); 360 txdesc->u.ht.mpdu_density = density; 361 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ 362 } 363 364 /* 365 * Set 40Mhz mode if necessary (for legacy rates this will 366 * duplicate the frame to both channels). 367 */ 368 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH || 369 txrate->flags & IEEE80211_TX_RC_DUP_DATA) 370 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags); 371 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) 372 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags); 373 374 /* 375 * Determine IFS values 376 * - Use TXOP_BACKOFF for management frames except beacons 377 * - Use TXOP_SIFS for fragment bursts 378 * - Use TXOP_HTTXOP for everything else 379 * 380 * Note: rt2800 devices won't use CTS protection (if used) 381 * for frames not transmitted with TXOP_HTTXOP 382 */ 383 if (ieee80211_is_mgmt(hdr->frame_control) && 384 !ieee80211_is_beacon(hdr->frame_control)) 385 txdesc->u.ht.txop = TXOP_BACKOFF; 386 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) 387 txdesc->u.ht.txop = TXOP_SIFS; 388 else 389 txdesc->u.ht.txop = TXOP_HTTXOP; 390 } 391 392 static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, 393 struct sk_buff *skb, 394 struct txentry_desc *txdesc, 395 struct ieee80211_sta *sta) 396 { 397 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 398 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 399 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 400 struct ieee80211_rate *rate; 401 const struct rt2x00_rate *hwrate = NULL; 402 403 memset(txdesc, 0, sizeof(*txdesc)); 404 405 /* 406 * Header and frame information. 407 */ 408 txdesc->length = skb->len; 409 txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb); 410 411 /* 412 * Check whether this frame is to be acked. 413 */ 414 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) 415 __set_bit(ENTRY_TXD_ACK, &txdesc->flags); 416 417 /* 418 * Check if this is a RTS/CTS frame 419 */ 420 if (ieee80211_is_rts(hdr->frame_control) || 421 ieee80211_is_cts(hdr->frame_control)) { 422 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 423 if (ieee80211_is_rts(hdr->frame_control)) 424 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags); 425 else 426 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags); 427 if (tx_info->control.rts_cts_rate_idx >= 0) 428 rate = 429 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info); 430 } 431 432 /* 433 * Determine retry information. 434 */ 435 txdesc->retry_limit = tx_info->control.rates[0].count - 1; 436 if (txdesc->retry_limit >= rt2x00dev->long_retry) 437 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags); 438 439 /* 440 * Check if more fragments are pending 441 */ 442 if (ieee80211_has_morefrags(hdr->frame_control)) { 443 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 444 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags); 445 } 446 447 /* 448 * Check if more frames (!= fragments) are pending 449 */ 450 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES) 451 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); 452 453 /* 454 * Beacons and probe responses require the tsf timestamp 455 * to be inserted into the frame. 456 */ 457 if (ieee80211_is_beacon(hdr->frame_control) || 458 ieee80211_is_probe_resp(hdr->frame_control)) 459 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); 460 461 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && 462 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) 463 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); 464 465 /* 466 * Determine rate modulation. 467 */ 468 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) 469 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; 470 else if (txrate->flags & IEEE80211_TX_RC_MCS) 471 txdesc->rate_mode = RATE_MODE_HT_MIX; 472 else { 473 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 474 hwrate = rt2x00_get_rate(rate->hw_value); 475 if (hwrate->flags & DEV_RATE_OFDM) 476 txdesc->rate_mode = RATE_MODE_OFDM; 477 else 478 txdesc->rate_mode = RATE_MODE_CCK; 479 } 480 481 /* 482 * Apply TX descriptor handling by components 483 */ 484 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc); 485 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc); 486 487 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC)) 488 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, 489 sta, hwrate); 490 else 491 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc, 492 hwrate); 493 } 494 495 static int rt2x00queue_write_tx_data(struct queue_entry *entry, 496 struct txentry_desc *txdesc) 497 { 498 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 499 500 /* 501 * This should not happen, we already checked the entry 502 * was ours. When the hardware disagrees there has been 503 * a queue corruption! 504 */ 505 if (unlikely(rt2x00dev->ops->lib->get_entry_state && 506 rt2x00dev->ops->lib->get_entry_state(entry))) { 507 rt2x00_err(rt2x00dev, 508 "Corrupt queue %d, accessing entry which is not ours\n" 509 "Please file bug report to %s\n", 510 entry->queue->qid, DRV_PROJECT); 511 return -EINVAL; 512 } 513 514 /* 515 * Add the requested extra tx headroom in front of the skb. 516 */ 517 skb_push(entry->skb, rt2x00dev->extra_tx_headroom); 518 memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom); 519 520 /* 521 * Call the driver's write_tx_data function, if it exists. 522 */ 523 if (rt2x00dev->ops->lib->write_tx_data) 524 rt2x00dev->ops->lib->write_tx_data(entry, txdesc); 525 526 /* 527 * Map the skb to DMA. 528 */ 529 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) && 530 rt2x00queue_map_txskb(entry)) 531 return -ENOMEM; 532 533 return 0; 534 } 535 536 static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, 537 struct txentry_desc *txdesc) 538 { 539 struct data_queue *queue = entry->queue; 540 541 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); 542 543 /* 544 * All processing on the frame has been completed, this means 545 * it is now ready to be dumped to userspace through debugfs. 546 */ 547 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry); 548 } 549 550 static void rt2x00queue_kick_tx_queue(struct data_queue *queue, 551 struct txentry_desc *txdesc) 552 { 553 /* 554 * Check if we need to kick the queue, there are however a few rules 555 * 1) Don't kick unless this is the last in frame in a burst. 556 * When the burst flag is set, this frame is always followed 557 * by another frame which in some way are related to eachother. 558 * This is true for fragments, RTS or CTS-to-self frames. 559 * 2) Rule 1 can be broken when the available entries 560 * in the queue are less then a certain threshold. 561 */ 562 if (rt2x00queue_threshold(queue) || 563 !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) 564 queue->rt2x00dev->ops->lib->kick_queue(queue); 565 } 566 567 static void rt2x00queue_bar_check(struct queue_entry *entry) 568 { 569 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 570 struct ieee80211_bar *bar = (void *) (entry->skb->data + 571 rt2x00dev->extra_tx_headroom); 572 struct rt2x00_bar_list_entry *bar_entry; 573 574 if (likely(!ieee80211_is_back_req(bar->frame_control))) 575 return; 576 577 bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC); 578 579 /* 580 * If the alloc fails we still send the BAR out but just don't track 581 * it in our bar list. And as a result we will report it to mac80211 582 * back as failed. 583 */ 584 if (!bar_entry) 585 return; 586 587 bar_entry->entry = entry; 588 bar_entry->block_acked = 0; 589 590 /* 591 * Copy the relevant parts of the 802.11 BAR into out check list 592 * such that we can use RCU for less-overhead in the RX path since 593 * sending BARs and processing the according BlockAck should be 594 * the exception. 595 */ 596 memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra)); 597 memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta)); 598 bar_entry->control = bar->control; 599 bar_entry->start_seq_num = bar->start_seq_num; 600 601 /* 602 * Insert BAR into our BAR check list. 603 */ 604 spin_lock_bh(&rt2x00dev->bar_list_lock); 605 list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list); 606 spin_unlock_bh(&rt2x00dev->bar_list_lock); 607 } 608 609 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, 610 struct ieee80211_sta *sta, bool local) 611 { 612 struct ieee80211_tx_info *tx_info; 613 struct queue_entry *entry; 614 struct txentry_desc txdesc; 615 struct skb_frame_desc *skbdesc; 616 u8 rate_idx, rate_flags; 617 int ret = 0; 618 619 /* 620 * Copy all TX descriptor information into txdesc, 621 * after that we are free to use the skb->cb array 622 * for our information. 623 */ 624 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta); 625 626 /* 627 * All information is retrieved from the skb->cb array, 628 * now we should claim ownership of the driver part of that 629 * array, preserving the bitrate index and flags. 630 */ 631 tx_info = IEEE80211_SKB_CB(skb); 632 rate_idx = tx_info->control.rates[0].idx; 633 rate_flags = tx_info->control.rates[0].flags; 634 skbdesc = get_skb_frame_desc(skb); 635 memset(skbdesc, 0, sizeof(*skbdesc)); 636 skbdesc->tx_rate_idx = rate_idx; 637 skbdesc->tx_rate_flags = rate_flags; 638 639 if (local) 640 skbdesc->flags |= SKBDESC_NOT_MAC80211; 641 642 /* 643 * When hardware encryption is supported, and this frame 644 * is to be encrypted, we should strip the IV/EIV data from 645 * the frame so we can provide it to the driver separately. 646 */ 647 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && 648 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { 649 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV)) 650 rt2x00crypto_tx_copy_iv(skb, &txdesc); 651 else 652 rt2x00crypto_tx_remove_iv(skb, &txdesc); 653 } 654 655 /* 656 * When DMA allocation is required we should guarantee to the 657 * driver that the DMA is aligned to a 4-byte boundary. 658 * However some drivers require L2 padding to pad the payload 659 * rather then the header. This could be a requirement for 660 * PCI and USB devices, while header alignment only is valid 661 * for PCI devices. 662 */ 663 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD)) 664 rt2x00queue_insert_l2pad(skb, txdesc.header_length); 665 else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA)) 666 rt2x00queue_align_frame(skb); 667 668 /* 669 * That function must be called with bh disabled. 670 */ 671 spin_lock(&queue->tx_lock); 672 673 if (unlikely(rt2x00queue_full(queue))) { 674 rt2x00_err(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n", 675 queue->qid); 676 ret = -ENOBUFS; 677 goto out; 678 } 679 680 entry = rt2x00queue_get_entry(queue, Q_INDEX); 681 682 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, 683 &entry->flags))) { 684 rt2x00_err(queue->rt2x00dev, 685 "Arrived at non-free entry in the non-full queue %d\n" 686 "Please file bug report to %s\n", 687 queue->qid, DRV_PROJECT); 688 ret = -EINVAL; 689 goto out; 690 } 691 692 entry->skb = skb; 693 694 /* 695 * It could be possible that the queue was corrupted and this 696 * call failed. Since we always return NETDEV_TX_OK to mac80211, 697 * this frame will simply be dropped. 698 */ 699 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) { 700 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 701 entry->skb = NULL; 702 ret = -EIO; 703 goto out; 704 } 705 706 /* 707 * Put BlockAckReqs into our check list for driver BA processing. 708 */ 709 rt2x00queue_bar_check(entry); 710 711 set_bit(ENTRY_DATA_PENDING, &entry->flags); 712 713 rt2x00queue_index_inc(entry, Q_INDEX); 714 rt2x00queue_write_tx_descriptor(entry, &txdesc); 715 rt2x00queue_kick_tx_queue(queue, &txdesc); 716 717 out: 718 /* 719 * Pausing queue has to be serialized with rt2x00lib_txdone(), so we 720 * do this under queue->tx_lock. Bottom halve was already disabled 721 * before ieee80211_xmit() call. 722 */ 723 if (rt2x00queue_threshold(queue)) 724 rt2x00queue_pause_queue(queue); 725 726 spin_unlock(&queue->tx_lock); 727 return ret; 728 } 729 730 int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev, 731 struct ieee80211_vif *vif) 732 { 733 struct rt2x00_intf *intf = vif_to_intf(vif); 734 735 if (unlikely(!intf->beacon)) 736 return -ENOBUFS; 737 738 /* 739 * Clean up the beacon skb. 740 */ 741 rt2x00queue_free_skb(intf->beacon); 742 743 /* 744 * Clear beacon (single bssid devices don't need to clear the beacon 745 * since the beacon queue will get stopped anyway). 746 */ 747 if (rt2x00dev->ops->lib->clear_beacon) 748 rt2x00dev->ops->lib->clear_beacon(intf->beacon); 749 750 return 0; 751 } 752 753 int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, 754 struct ieee80211_vif *vif) 755 { 756 struct rt2x00_intf *intf = vif_to_intf(vif); 757 struct skb_frame_desc *skbdesc; 758 struct txentry_desc txdesc; 759 760 if (unlikely(!intf->beacon)) 761 return -ENOBUFS; 762 763 /* 764 * Clean up the beacon skb. 765 */ 766 rt2x00queue_free_skb(intf->beacon); 767 768 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); 769 if (!intf->beacon->skb) 770 return -ENOMEM; 771 772 /* 773 * Copy all TX descriptor information into txdesc, 774 * after that we are free to use the skb->cb array 775 * for our information. 776 */ 777 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL); 778 779 /* 780 * Fill in skb descriptor 781 */ 782 skbdesc = get_skb_frame_desc(intf->beacon->skb); 783 memset(skbdesc, 0, sizeof(*skbdesc)); 784 785 /* 786 * Send beacon to hardware. 787 */ 788 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); 789 790 return 0; 791 792 } 793 794 bool rt2x00queue_for_each_entry(struct data_queue *queue, 795 enum queue_index start, 796 enum queue_index end, 797 void *data, 798 bool (*fn)(struct queue_entry *entry, 799 void *data)) 800 { 801 unsigned long irqflags; 802 unsigned int index_start; 803 unsigned int index_end; 804 unsigned int i; 805 806 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) { 807 rt2x00_err(queue->rt2x00dev, 808 "Entry requested from invalid index range (%d - %d)\n", 809 start, end); 810 return true; 811 } 812 813 /* 814 * Only protect the range we are going to loop over, 815 * if during our loop a extra entry is set to pending 816 * it should not be kicked during this run, since it 817 * is part of another TX operation. 818 */ 819 spin_lock_irqsave(&queue->index_lock, irqflags); 820 index_start = queue->index[start]; 821 index_end = queue->index[end]; 822 spin_unlock_irqrestore(&queue->index_lock, irqflags); 823 824 /* 825 * Start from the TX done pointer, this guarantees that we will 826 * send out all frames in the correct order. 827 */ 828 if (index_start < index_end) { 829 for (i = index_start; i < index_end; i++) { 830 if (fn(&queue->entries[i], data)) 831 return true; 832 } 833 } else { 834 for (i = index_start; i < queue->limit; i++) { 835 if (fn(&queue->entries[i], data)) 836 return true; 837 } 838 839 for (i = 0; i < index_end; i++) { 840 if (fn(&queue->entries[i], data)) 841 return true; 842 } 843 } 844 845 return false; 846 } 847 EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); 848 849 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, 850 enum queue_index index) 851 { 852 struct queue_entry *entry; 853 unsigned long irqflags; 854 855 if (unlikely(index >= Q_INDEX_MAX)) { 856 rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n", 857 index); 858 return NULL; 859 } 860 861 spin_lock_irqsave(&queue->index_lock, irqflags); 862 863 entry = &queue->entries[queue->index[index]]; 864 865 spin_unlock_irqrestore(&queue->index_lock, irqflags); 866 867 return entry; 868 } 869 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); 870 871 void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index) 872 { 873 struct data_queue *queue = entry->queue; 874 unsigned long irqflags; 875 876 if (unlikely(index >= Q_INDEX_MAX)) { 877 rt2x00_err(queue->rt2x00dev, 878 "Index change on invalid index type (%d)\n", index); 879 return; 880 } 881 882 spin_lock_irqsave(&queue->index_lock, irqflags); 883 884 queue->index[index]++; 885 if (queue->index[index] >= queue->limit) 886 queue->index[index] = 0; 887 888 entry->last_action = jiffies; 889 890 if (index == Q_INDEX) { 891 queue->length++; 892 } else if (index == Q_INDEX_DONE) { 893 queue->length--; 894 queue->count++; 895 } 896 897 spin_unlock_irqrestore(&queue->index_lock, irqflags); 898 } 899 900 static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue) 901 { 902 switch (queue->qid) { 903 case QID_AC_VO: 904 case QID_AC_VI: 905 case QID_AC_BE: 906 case QID_AC_BK: 907 /* 908 * For TX queues, we have to disable the queue 909 * inside mac80211. 910 */ 911 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); 912 break; 913 default: 914 break; 915 } 916 } 917 void rt2x00queue_pause_queue(struct data_queue *queue) 918 { 919 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || 920 !test_bit(QUEUE_STARTED, &queue->flags) || 921 test_and_set_bit(QUEUE_PAUSED, &queue->flags)) 922 return; 923 924 rt2x00queue_pause_queue_nocheck(queue); 925 } 926 EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); 927 928 void rt2x00queue_unpause_queue(struct data_queue *queue) 929 { 930 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || 931 !test_bit(QUEUE_STARTED, &queue->flags) || 932 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags)) 933 return; 934 935 switch (queue->qid) { 936 case QID_AC_VO: 937 case QID_AC_VI: 938 case QID_AC_BE: 939 case QID_AC_BK: 940 /* 941 * For TX queues, we have to enable the queue 942 * inside mac80211. 943 */ 944 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); 945 break; 946 case QID_RX: 947 /* 948 * For RX we need to kick the queue now in order to 949 * receive frames. 950 */ 951 queue->rt2x00dev->ops->lib->kick_queue(queue); 952 default: 953 break; 954 } 955 } 956 EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue); 957 958 void rt2x00queue_start_queue(struct data_queue *queue) 959 { 960 mutex_lock(&queue->status_lock); 961 962 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || 963 test_and_set_bit(QUEUE_STARTED, &queue->flags)) { 964 mutex_unlock(&queue->status_lock); 965 return; 966 } 967 968 set_bit(QUEUE_PAUSED, &queue->flags); 969 970 queue->rt2x00dev->ops->lib->start_queue(queue); 971 972 rt2x00queue_unpause_queue(queue); 973 974 mutex_unlock(&queue->status_lock); 975 } 976 EXPORT_SYMBOL_GPL(rt2x00queue_start_queue); 977 978 void rt2x00queue_stop_queue(struct data_queue *queue) 979 { 980 mutex_lock(&queue->status_lock); 981 982 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) { 983 mutex_unlock(&queue->status_lock); 984 return; 985 } 986 987 rt2x00queue_pause_queue_nocheck(queue); 988 989 queue->rt2x00dev->ops->lib->stop_queue(queue); 990 991 mutex_unlock(&queue->status_lock); 992 } 993 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue); 994 995 void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) 996 { 997 bool tx_queue = 998 (queue->qid == QID_AC_VO) || 999 (queue->qid == QID_AC_VI) || 1000 (queue->qid == QID_AC_BE) || 1001 (queue->qid == QID_AC_BK); 1002 1003 if (rt2x00queue_empty(queue)) 1004 return; 1005 1006 /* 1007 * If we are not supposed to drop any pending 1008 * frames, this means we must force a start (=kick) 1009 * to the queue to make sure the hardware will 1010 * start transmitting. 1011 */ 1012 if (!drop && tx_queue) 1013 queue->rt2x00dev->ops->lib->kick_queue(queue); 1014 1015 /* 1016 * Check if driver supports flushing, if that is the case we can 1017 * defer the flushing to the driver. Otherwise we must use the 1018 * alternative which just waits for the queue to become empty. 1019 */ 1020 if (likely(queue->rt2x00dev->ops->lib->flush_queue)) 1021 queue->rt2x00dev->ops->lib->flush_queue(queue, drop); 1022 1023 /* 1024 * The queue flush has failed... 1025 */ 1026 if (unlikely(!rt2x00queue_empty(queue))) 1027 rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n", 1028 queue->qid); 1029 } 1030 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue); 1031 1032 void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev) 1033 { 1034 struct data_queue *queue; 1035 1036 /* 1037 * rt2x00queue_start_queue will call ieee80211_wake_queue 1038 * for each queue after is has been properly initialized. 1039 */ 1040 tx_queue_for_each(rt2x00dev, queue) 1041 rt2x00queue_start_queue(queue); 1042 rt2x00dev->last_nostatus_check = jiffies; 1043 1044 rt2x00queue_start_queue(rt2x00dev->rx); 1045 } 1046 EXPORT_SYMBOL_GPL(rt2x00queue_start_queues); 1047 1048 void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) 1049 { 1050 struct data_queue *queue; 1051 1052 /* 1053 * rt2x00queue_stop_queue will call ieee80211_stop_queue 1054 * as well, but we are completely shutting doing everything 1055 * now, so it is much safer to stop all TX queues at once, 1056 * and use rt2x00queue_stop_queue for cleaning up. 1057 */ 1058 ieee80211_stop_queues(rt2x00dev->hw); 1059 1060 tx_queue_for_each(rt2x00dev, queue) 1061 rt2x00queue_stop_queue(queue); 1062 1063 rt2x00queue_stop_queue(rt2x00dev->rx); 1064 } 1065 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues); 1066 1067 void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop) 1068 { 1069 struct data_queue *queue; 1070 1071 tx_queue_for_each(rt2x00dev, queue) 1072 rt2x00queue_flush_queue(queue, drop); 1073 1074 rt2x00queue_flush_queue(rt2x00dev->rx, drop); 1075 } 1076 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues); 1077 1078 static void rt2x00queue_reset(struct data_queue *queue) 1079 { 1080 unsigned long irqflags; 1081 unsigned int i; 1082 1083 spin_lock_irqsave(&queue->index_lock, irqflags); 1084 1085 queue->count = 0; 1086 queue->length = 0; 1087 1088 for (i = 0; i < Q_INDEX_MAX; i++) 1089 queue->index[i] = 0; 1090 1091 spin_unlock_irqrestore(&queue->index_lock, irqflags); 1092 } 1093 1094 void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) 1095 { 1096 struct data_queue *queue; 1097 unsigned int i; 1098 1099 queue_for_each(rt2x00dev, queue) { 1100 rt2x00queue_reset(queue); 1101 1102 for (i = 0; i < queue->limit; i++) 1103 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); 1104 } 1105 } 1106 1107 static int rt2x00queue_alloc_entries(struct data_queue *queue) 1108 { 1109 struct queue_entry *entries; 1110 unsigned int entry_size; 1111 unsigned int i; 1112 1113 rt2x00queue_reset(queue); 1114 1115 /* 1116 * Allocate all queue entries. 1117 */ 1118 entry_size = sizeof(*entries) + queue->priv_size; 1119 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); 1120 if (!entries) 1121 return -ENOMEM; 1122 1123 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ 1124 (((char *)(__base)) + ((__limit) * (__esize)) + \ 1125 ((__index) * (__psize))) 1126 1127 for (i = 0; i < queue->limit; i++) { 1128 entries[i].flags = 0; 1129 entries[i].queue = queue; 1130 entries[i].skb = NULL; 1131 entries[i].entry_idx = i; 1132 entries[i].priv_data = 1133 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, 1134 sizeof(*entries), queue->priv_size); 1135 } 1136 1137 #undef QUEUE_ENTRY_PRIV_OFFSET 1138 1139 queue->entries = entries; 1140 1141 return 0; 1142 } 1143 1144 static void rt2x00queue_free_skbs(struct data_queue *queue) 1145 { 1146 unsigned int i; 1147 1148 if (!queue->entries) 1149 return; 1150 1151 for (i = 0; i < queue->limit; i++) { 1152 rt2x00queue_free_skb(&queue->entries[i]); 1153 } 1154 } 1155 1156 static int rt2x00queue_alloc_rxskbs(struct data_queue *queue) 1157 { 1158 unsigned int i; 1159 struct sk_buff *skb; 1160 1161 for (i = 0; i < queue->limit; i++) { 1162 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL); 1163 if (!skb) 1164 return -ENOMEM; 1165 queue->entries[i].skb = skb; 1166 } 1167 1168 return 0; 1169 } 1170 1171 int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) 1172 { 1173 struct data_queue *queue; 1174 int status; 1175 1176 status = rt2x00queue_alloc_entries(rt2x00dev->rx); 1177 if (status) 1178 goto exit; 1179 1180 tx_queue_for_each(rt2x00dev, queue) { 1181 status = rt2x00queue_alloc_entries(queue); 1182 if (status) 1183 goto exit; 1184 } 1185 1186 status = rt2x00queue_alloc_entries(rt2x00dev->bcn); 1187 if (status) 1188 goto exit; 1189 1190 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) { 1191 status = rt2x00queue_alloc_entries(rt2x00dev->atim); 1192 if (status) 1193 goto exit; 1194 } 1195 1196 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx); 1197 if (status) 1198 goto exit; 1199 1200 return 0; 1201 1202 exit: 1203 rt2x00_err(rt2x00dev, "Queue entries allocation failed\n"); 1204 1205 rt2x00queue_uninitialize(rt2x00dev); 1206 1207 return status; 1208 } 1209 1210 void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) 1211 { 1212 struct data_queue *queue; 1213 1214 rt2x00queue_free_skbs(rt2x00dev->rx); 1215 1216 queue_for_each(rt2x00dev, queue) { 1217 kfree(queue->entries); 1218 queue->entries = NULL; 1219 } 1220 } 1221 1222 static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, 1223 struct data_queue *queue, enum data_queue_qid qid) 1224 { 1225 mutex_init(&queue->status_lock); 1226 spin_lock_init(&queue->tx_lock); 1227 spin_lock_init(&queue->index_lock); 1228 1229 queue->rt2x00dev = rt2x00dev; 1230 queue->qid = qid; 1231 queue->txop = 0; 1232 queue->aifs = 2; 1233 queue->cw_min = 5; 1234 queue->cw_max = 10; 1235 1236 rt2x00dev->ops->queue_init(queue); 1237 1238 queue->threshold = DIV_ROUND_UP(queue->limit, 10); 1239 } 1240 1241 int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) 1242 { 1243 struct data_queue *queue; 1244 enum data_queue_qid qid; 1245 unsigned int req_atim = 1246 rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE); 1247 1248 /* 1249 * We need the following queues: 1250 * RX: 1 1251 * TX: ops->tx_queues 1252 * Beacon: 1 1253 * Atim: 1 (if required) 1254 */ 1255 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; 1256 1257 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); 1258 if (!queue) 1259 return -ENOMEM; 1260 1261 /* 1262 * Initialize pointers 1263 */ 1264 rt2x00dev->rx = queue; 1265 rt2x00dev->tx = &queue[1]; 1266 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; 1267 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; 1268 1269 /* 1270 * Initialize queue parameters. 1271 * RX: qid = QID_RX 1272 * TX: qid = QID_AC_VO + index 1273 * TX: cw_min: 2^5 = 32. 1274 * TX: cw_max: 2^10 = 1024. 1275 * BCN: qid = QID_BEACON 1276 * ATIM: qid = QID_ATIM 1277 */ 1278 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); 1279 1280 qid = QID_AC_VO; 1281 tx_queue_for_each(rt2x00dev, queue) 1282 rt2x00queue_init(rt2x00dev, queue, qid++); 1283 1284 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON); 1285 if (req_atim) 1286 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM); 1287 1288 return 0; 1289 } 1290 1291 void rt2x00queue_free(struct rt2x00_dev *rt2x00dev) 1292 { 1293 kfree(rt2x00dev->rx); 1294 rt2x00dev->rx = NULL; 1295 rt2x00dev->tx = NULL; 1296 rt2x00dev->bcn = NULL; 1297 } 1298