1 /* 2 * Copyright (c) 2004-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "core.h" 18 #include "debug.h" 19 20 static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev, 21 u32 *map_no) 22 { 23 struct ath6kl *ar = ath6kl_priv(dev); 24 struct ethhdr *eth_hdr; 25 u32 i, ep_map = -1; 26 u8 *datap; 27 28 *map_no = 0; 29 datap = skb->data; 30 eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr)); 31 32 if (is_multicast_ether_addr(eth_hdr->h_dest)) 33 return ENDPOINT_2; 34 35 for (i = 0; i < ar->node_num; i++) { 36 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr, 37 ETH_ALEN) == 0) { 38 *map_no = i + 1; 39 ar->node_map[i].tx_pend++; 40 return ar->node_map[i].ep_id; 41 } 42 43 if ((ep_map == -1) && !ar->node_map[i].tx_pend) 44 ep_map = i; 45 } 46 47 if (ep_map == -1) { 48 ep_map = ar->node_num; 49 ar->node_num++; 50 if (ar->node_num > MAX_NODE_NUM) 51 return ENDPOINT_UNUSED; 52 } 53 54 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN); 55 56 for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) { 57 if (!ar->tx_pending[i]) { 58 ar->node_map[ep_map].ep_id = i; 59 break; 60 } 61 62 /* 63 * No free endpoint is available, start redistribution on 64 * the inuse endpoints. 65 */ 66 if (i == ENDPOINT_5) { 67 ar->node_map[ep_map].ep_id = ar->next_ep_id; 68 ar->next_ep_id++; 69 if (ar->next_ep_id > ENDPOINT_5) 70 ar->next_ep_id = ENDPOINT_2; 71 } 72 } 73 74 *map_no = ep_map + 1; 75 ar->node_map[ep_map].tx_pend++; 76 77 return ar->node_map[ep_map].ep_id; 78 } 79 80 static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb, 81 bool *more_data) 82 { 83 struct ethhdr *datap = (struct ethhdr *) skb->data; 84 struct ath6kl_sta *conn = NULL; 85 bool ps_queued = false, is_psq_empty = false; 86 87 if (is_multicast_ether_addr(datap->h_dest)) { 88 u8 ctr = 0; 89 bool q_mcast = false; 90 91 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { 92 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) { 93 q_mcast = true; 94 break; 95 } 96 } 97 98 if (q_mcast) { 99 /* 100 * If this transmit is not because of a Dtim Expiry 101 * q it. 102 */ 103 if (!test_bit(DTIM_EXPIRED, &ar->flag)) { 104 bool is_mcastq_empty = false; 105 106 spin_lock_bh(&ar->mcastpsq_lock); 107 is_mcastq_empty = 108 skb_queue_empty(&ar->mcastpsq); 109 skb_queue_tail(&ar->mcastpsq, skb); 110 spin_unlock_bh(&ar->mcastpsq_lock); 111 112 /* 113 * If this is the first Mcast pkt getting 114 * queued indicate to the target to set the 115 * BitmapControl LSB of the TIM IE. 116 */ 117 if (is_mcastq_empty) 118 ath6kl_wmi_set_pvb_cmd(ar->wmi, 119 MCAST_AID, 1); 120 121 ps_queued = true; 122 } else { 123 /* 124 * This transmit is because of Dtim expiry. 125 * Determine if MoreData bit has to be set. 126 */ 127 spin_lock_bh(&ar->mcastpsq_lock); 128 if (!skb_queue_empty(&ar->mcastpsq)) 129 *more_data = true; 130 spin_unlock_bh(&ar->mcastpsq_lock); 131 } 132 } 133 } else { 134 conn = ath6kl_find_sta(ar, datap->h_dest); 135 if (!conn) { 136 dev_kfree_skb(skb); 137 138 /* Inform the caller that the skb is consumed */ 139 return true; 140 } 141 142 if (conn->sta_flags & STA_PS_SLEEP) { 143 if (!(conn->sta_flags & STA_PS_POLLED)) { 144 /* Queue the frames if the STA is sleeping */ 145 spin_lock_bh(&conn->psq_lock); 146 is_psq_empty = skb_queue_empty(&conn->psq); 147 skb_queue_tail(&conn->psq, skb); 148 spin_unlock_bh(&conn->psq_lock); 149 150 /* 151 * If this is the first pkt getting queued 152 * for this STA, update the PVB for this 153 * STA. 154 */ 155 if (is_psq_empty) 156 ath6kl_wmi_set_pvb_cmd(ar->wmi, 157 conn->aid, 1); 158 159 ps_queued = true; 160 } else { 161 /* 162 * This tx is because of a PsPoll. 163 * Determine if MoreData bit has to be set. 164 */ 165 spin_lock_bh(&conn->psq_lock); 166 if (!skb_queue_empty(&conn->psq)) 167 *more_data = true; 168 spin_unlock_bh(&conn->psq_lock); 169 } 170 } 171 } 172 173 return ps_queued; 174 } 175 176 /* Tx functions */ 177 178 int ath6kl_control_tx(void *devt, struct sk_buff *skb, 179 enum htc_endpoint_id eid) 180 { 181 struct ath6kl *ar = devt; 182 int status = 0; 183 struct ath6kl_cookie *cookie = NULL; 184 185 spin_lock_bh(&ar->lock); 186 187 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 188 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__, 189 skb, skb->len, eid); 190 191 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) { 192 /* 193 * Control endpoint is full, don't allocate resources, we 194 * are just going to drop this packet. 195 */ 196 cookie = NULL; 197 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n", 198 skb, skb->len); 199 } else 200 cookie = ath6kl_alloc_cookie(ar); 201 202 if (cookie == NULL) { 203 spin_unlock_bh(&ar->lock); 204 status = -ENOMEM; 205 goto fail_ctrl_tx; 206 } 207 208 ar->tx_pending[eid]++; 209 210 if (eid != ar->ctrl_ep) 211 ar->total_tx_data_pend++; 212 213 spin_unlock_bh(&ar->lock); 214 215 cookie->skb = skb; 216 cookie->map_no = 0; 217 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 218 eid, ATH6KL_CONTROL_PKT_TAG); 219 220 /* 221 * This interface is asynchronous, if there is an error, cleanup 222 * will happen in the TX completion callback. 223 */ 224 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt); 225 226 return 0; 227 228 fail_ctrl_tx: 229 dev_kfree_skb(skb); 230 return status; 231 } 232 233 int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) 234 { 235 struct ath6kl *ar = ath6kl_priv(dev); 236 struct ath6kl_cookie *cookie = NULL; 237 enum htc_endpoint_id eid = ENDPOINT_UNUSED; 238 u32 map_no = 0; 239 u16 htc_tag = ATH6KL_DATA_PKT_TAG; 240 u8 ac = 99 ; /* initialize to unmapped ac */ 241 bool chk_adhoc_ps_mapping = false, more_data = false; 242 int ret; 243 244 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 245 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__, 246 skb, skb->data, skb->len); 247 248 /* If target is not associated */ 249 if (!test_bit(CONNECTED, &ar->flag)) { 250 dev_kfree_skb(skb); 251 return 0; 252 } 253 254 if (!test_bit(WMI_READY, &ar->flag)) 255 goto fail_tx; 256 257 /* AP mode Power saving processing */ 258 if (ar->nw_type == AP_NETWORK) { 259 if (ath6kl_powersave_ap(ar, skb, &more_data)) 260 return 0; 261 } 262 263 if (test_bit(WMI_ENABLED, &ar->flag)) { 264 if (skb_headroom(skb) < dev->needed_headroom) { 265 WARN_ON(1); 266 goto fail_tx; 267 } 268 269 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) { 270 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n"); 271 goto fail_tx; 272 } 273 274 if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE, 275 more_data, 0, 0, NULL)) { 276 ath6kl_err("wmi_data_hdr_add failed\n"); 277 goto fail_tx; 278 } 279 280 if ((ar->nw_type == ADHOC_NETWORK) && 281 ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag)) 282 chk_adhoc_ps_mapping = true; 283 else { 284 /* get the stream mapping */ 285 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb, 286 0, test_bit(WMM_ENABLED, &ar->flag), &ac); 287 if (ret) 288 goto fail_tx; 289 } 290 } else 291 goto fail_tx; 292 293 spin_lock_bh(&ar->lock); 294 295 if (chk_adhoc_ps_mapping) 296 eid = ath6kl_ibss_map_epid(skb, dev, &map_no); 297 else 298 eid = ar->ac2ep_map[ac]; 299 300 if (eid == 0 || eid == ENDPOINT_UNUSED) { 301 ath6kl_err("eid %d is not mapped!\n", eid); 302 spin_unlock_bh(&ar->lock); 303 goto fail_tx; 304 } 305 306 /* allocate resource for this packet */ 307 cookie = ath6kl_alloc_cookie(ar); 308 309 if (!cookie) { 310 spin_unlock_bh(&ar->lock); 311 goto fail_tx; 312 } 313 314 /* update counts while the lock is held */ 315 ar->tx_pending[eid]++; 316 ar->total_tx_data_pend++; 317 318 spin_unlock_bh(&ar->lock); 319 320 cookie->skb = skb; 321 cookie->map_no = map_no; 322 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 323 eid, htc_tag); 324 325 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, skb->data, skb->len); 326 327 /* 328 * HTC interface is asynchronous, if this fails, cleanup will 329 * happen in the ath6kl_tx_complete callback. 330 */ 331 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt); 332 333 return 0; 334 335 fail_tx: 336 dev_kfree_skb(skb); 337 338 ar->net_stats.tx_dropped++; 339 ar->net_stats.tx_aborted_errors++; 340 341 return 0; 342 } 343 344 /* indicate tx activity or inactivity on a WMI stream */ 345 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active) 346 { 347 struct ath6kl *ar = devt; 348 enum htc_endpoint_id eid; 349 int i; 350 351 eid = ar->ac2ep_map[traffic_class]; 352 353 if (!test_bit(WMI_ENABLED, &ar->flag)) 354 goto notify_htc; 355 356 spin_lock_bh(&ar->lock); 357 358 ar->ac_stream_active[traffic_class] = active; 359 360 if (active) { 361 /* 362 * Keep track of the active stream with the highest 363 * priority. 364 */ 365 if (ar->ac_stream_pri_map[traffic_class] > 366 ar->hiac_stream_active_pri) 367 /* set the new highest active priority */ 368 ar->hiac_stream_active_pri = 369 ar->ac_stream_pri_map[traffic_class]; 370 371 } else { 372 /* 373 * We may have to search for the next active stream 374 * that is the highest priority. 375 */ 376 if (ar->hiac_stream_active_pri == 377 ar->ac_stream_pri_map[traffic_class]) { 378 /* 379 * The highest priority stream just went inactive 380 * reset and search for the "next" highest "active" 381 * priority stream. 382 */ 383 ar->hiac_stream_active_pri = 0; 384 385 for (i = 0; i < WMM_NUM_AC; i++) { 386 if (ar->ac_stream_active[i] && 387 (ar->ac_stream_pri_map[i] > 388 ar->hiac_stream_active_pri)) 389 /* 390 * Set the new highest active 391 * priority. 392 */ 393 ar->hiac_stream_active_pri = 394 ar->ac_stream_pri_map[i]; 395 } 396 } 397 } 398 399 spin_unlock_bh(&ar->lock); 400 401 notify_htc: 402 /* notify HTC, this may cause credit distribution changes */ 403 ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active); 404 } 405 406 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, 407 struct htc_packet *packet) 408 { 409 struct ath6kl *ar = target->dev->ar; 410 enum htc_endpoint_id endpoint = packet->endpoint; 411 412 if (endpoint == ar->ctrl_ep) { 413 /* 414 * Under normal WMI if this is getting full, then something 415 * is running rampant the host should not be exhausting the 416 * WMI queue with too many commands the only exception to 417 * this is during testing using endpointping. 418 */ 419 spin_lock_bh(&ar->lock); 420 set_bit(WMI_CTRL_EP_FULL, &ar->flag); 421 spin_unlock_bh(&ar->lock); 422 ath6kl_err("wmi ctrl ep is full\n"); 423 return HTC_SEND_FULL_KEEP; 424 } 425 426 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG) 427 return HTC_SEND_FULL_KEEP; 428 429 if (ar->nw_type == ADHOC_NETWORK) 430 /* 431 * In adhoc mode, we cannot differentiate traffic 432 * priorities so there is no need to continue, however we 433 * should stop the network. 434 */ 435 goto stop_net_queues; 436 437 /* 438 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for 439 * the highest active stream. 440 */ 441 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] < 442 ar->hiac_stream_active_pri && 443 ar->cookie_count <= MAX_HI_COOKIE_NUM) 444 /* 445 * Give preference to the highest priority stream by 446 * dropping the packets which overflowed. 447 */ 448 return HTC_SEND_FULL_DROP; 449 450 stop_net_queues: 451 spin_lock_bh(&ar->lock); 452 set_bit(NETQ_STOPPED, &ar->flag); 453 spin_unlock_bh(&ar->lock); 454 netif_stop_queue(ar->net_dev); 455 456 return HTC_SEND_FULL_KEEP; 457 } 458 459 /* TODO this needs to be looked at */ 460 static void ath6kl_tx_clear_node_map(struct ath6kl *ar, 461 enum htc_endpoint_id eid, u32 map_no) 462 { 463 u32 i; 464 465 if (ar->nw_type != ADHOC_NETWORK) 466 return; 467 468 if (!ar->ibss_ps_enable) 469 return; 470 471 if (eid == ar->ctrl_ep) 472 return; 473 474 if (map_no == 0) 475 return; 476 477 map_no--; 478 ar->node_map[map_no].tx_pend--; 479 480 if (ar->node_map[map_no].tx_pend) 481 return; 482 483 if (map_no != (ar->node_num - 1)) 484 return; 485 486 for (i = ar->node_num; i > 0; i--) { 487 if (ar->node_map[i - 1].tx_pend) 488 break; 489 490 memset(&ar->node_map[i - 1], 0, 491 sizeof(struct ath6kl_node_mapping)); 492 ar->node_num--; 493 } 494 } 495 496 void ath6kl_tx_complete(void *context, struct list_head *packet_queue) 497 { 498 struct ath6kl *ar = context; 499 struct sk_buff_head skb_queue; 500 struct htc_packet *packet; 501 struct sk_buff *skb; 502 struct ath6kl_cookie *ath6kl_cookie; 503 u32 map_no = 0; 504 int status; 505 enum htc_endpoint_id eid; 506 bool wake_event = false; 507 bool flushing = false; 508 509 skb_queue_head_init(&skb_queue); 510 511 /* lock the driver as we update internal state */ 512 spin_lock_bh(&ar->lock); 513 514 /* reap completed packets */ 515 while (!list_empty(packet_queue)) { 516 517 packet = list_first_entry(packet_queue, struct htc_packet, 518 list); 519 list_del(&packet->list); 520 521 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt; 522 if (!ath6kl_cookie) 523 goto fatal; 524 525 status = packet->status; 526 skb = ath6kl_cookie->skb; 527 eid = packet->endpoint; 528 map_no = ath6kl_cookie->map_no; 529 530 if (!skb || !skb->data) 531 goto fatal; 532 533 packet->buf = skb->data; 534 535 __skb_queue_tail(&skb_queue, skb); 536 537 if (!status && (packet->act_len != skb->len)) 538 goto fatal; 539 540 ar->tx_pending[eid]--; 541 542 if (eid != ar->ctrl_ep) 543 ar->total_tx_data_pend--; 544 545 if (eid == ar->ctrl_ep) { 546 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag)) 547 clear_bit(WMI_CTRL_EP_FULL, &ar->flag); 548 549 if (ar->tx_pending[eid] == 0) 550 wake_event = true; 551 } 552 553 if (status) { 554 if (status == -ECANCELED) 555 /* a packet was flushed */ 556 flushing = true; 557 558 ar->net_stats.tx_errors++; 559 560 if (status != -ENOSPC) 561 ath6kl_err("tx error, status: 0x%x\n", status); 562 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 563 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", 564 __func__, skb, packet->buf, packet->act_len, 565 eid, "error!"); 566 } else { 567 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 568 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", 569 __func__, skb, packet->buf, packet->act_len, 570 eid, "OK"); 571 572 flushing = false; 573 ar->net_stats.tx_packets++; 574 ar->net_stats.tx_bytes += skb->len; 575 } 576 577 ath6kl_tx_clear_node_map(ar, eid, map_no); 578 579 ath6kl_free_cookie(ar, ath6kl_cookie); 580 581 if (test_bit(NETQ_STOPPED, &ar->flag)) 582 clear_bit(NETQ_STOPPED, &ar->flag); 583 } 584 585 spin_unlock_bh(&ar->lock); 586 587 __skb_queue_purge(&skb_queue); 588 589 if (test_bit(CONNECTED, &ar->flag)) { 590 if (!flushing) 591 netif_wake_queue(ar->net_dev); 592 } 593 594 if (wake_event) 595 wake_up(&ar->event_wq); 596 597 return; 598 599 fatal: 600 WARN_ON(1); 601 spin_unlock_bh(&ar->lock); 602 return; 603 } 604 605 void ath6kl_tx_data_cleanup(struct ath6kl *ar) 606 { 607 int i; 608 609 /* flush all the data (non-control) streams */ 610 for (i = 0; i < WMM_NUM_AC; i++) 611 ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i], 612 ATH6KL_DATA_PKT_TAG); 613 } 614 615 /* Rx functions */ 616 617 static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev, 618 struct sk_buff *skb) 619 { 620 if (!skb) 621 return; 622 623 skb->dev = dev; 624 625 if (!(skb->dev->flags & IFF_UP)) { 626 dev_kfree_skb(skb); 627 return; 628 } 629 630 skb->protocol = eth_type_trans(skb, skb->dev); 631 632 netif_rx_ni(skb); 633 } 634 635 static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num) 636 { 637 struct sk_buff *skb; 638 639 while (num) { 640 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE); 641 if (!skb) { 642 ath6kl_err("netbuf allocation failed\n"); 643 return; 644 } 645 skb_queue_tail(q, skb); 646 num--; 647 } 648 } 649 650 static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr) 651 { 652 struct sk_buff *skb = NULL; 653 654 if (skb_queue_len(&p_aggr->free_q) < (AGGR_NUM_OF_FREE_NETBUFS >> 2)) 655 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS); 656 657 skb = skb_dequeue(&p_aggr->free_q); 658 659 return skb; 660 } 661 662 void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint) 663 { 664 struct ath6kl *ar = target->dev->ar; 665 struct sk_buff *skb; 666 int rx_buf; 667 int n_buf_refill; 668 struct htc_packet *packet; 669 struct list_head queue; 670 671 n_buf_refill = ATH6KL_MAX_RX_BUFFERS - 672 ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint); 673 674 if (n_buf_refill <= 0) 675 return; 676 677 INIT_LIST_HEAD(&queue); 678 679 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, 680 "%s: providing htc with %d buffers at eid=%d\n", 681 __func__, n_buf_refill, endpoint); 682 683 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) { 684 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE); 685 if (!skb) 686 break; 687 688 packet = (struct htc_packet *) skb->head; 689 if (!IS_ALIGNED((unsigned long) skb->data, 4)) 690 skb->data = PTR_ALIGN(skb->data - 4, 4); 691 set_htc_rxpkt_info(packet, skb, skb->data, 692 ATH6KL_BUFFER_SIZE, endpoint); 693 list_add_tail(&packet->list, &queue); 694 } 695 696 if (!list_empty(&queue)) 697 ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue); 698 } 699 700 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count) 701 { 702 struct htc_packet *packet; 703 struct sk_buff *skb; 704 705 while (count) { 706 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE); 707 if (!skb) 708 return; 709 710 packet = (struct htc_packet *) skb->head; 711 if (!IS_ALIGNED((unsigned long) skb->data, 4)) 712 skb->data = PTR_ALIGN(skb->data - 4, 4); 713 set_htc_rxpkt_info(packet, skb, skb->data, 714 ATH6KL_AMSDU_BUFFER_SIZE, 0); 715 spin_lock_bh(&ar->lock); 716 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue); 717 spin_unlock_bh(&ar->lock); 718 count--; 719 } 720 } 721 722 /* 723 * Callback to allocate a receive buffer for a pending packet. We use a 724 * pre-allocated list of buffers of maximum AMSDU size (4K). 725 */ 726 struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target, 727 enum htc_endpoint_id endpoint, 728 int len) 729 { 730 struct ath6kl *ar = target->dev->ar; 731 struct htc_packet *packet = NULL; 732 struct list_head *pkt_pos; 733 int refill_cnt = 0, depth = 0; 734 735 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n", 736 __func__, endpoint, len); 737 738 if ((len <= ATH6KL_BUFFER_SIZE) || 739 (len > ATH6KL_AMSDU_BUFFER_SIZE)) 740 return NULL; 741 742 spin_lock_bh(&ar->lock); 743 744 if (list_empty(&ar->amsdu_rx_buffer_queue)) { 745 spin_unlock_bh(&ar->lock); 746 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS; 747 goto refill_buf; 748 } 749 750 packet = list_first_entry(&ar->amsdu_rx_buffer_queue, 751 struct htc_packet, list); 752 list_del(&packet->list); 753 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue) 754 depth++; 755 756 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth; 757 spin_unlock_bh(&ar->lock); 758 759 /* set actual endpoint ID */ 760 packet->endpoint = endpoint; 761 762 refill_buf: 763 if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD) 764 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt); 765 766 return packet; 767 } 768 769 static void aggr_slice_amsdu(struct aggr_info *p_aggr, 770 struct rxtid *rxtid, struct sk_buff *skb) 771 { 772 struct sk_buff *new_skb; 773 struct ethhdr *hdr; 774 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len; 775 u8 *framep; 776 777 mac_hdr_len = sizeof(struct ethhdr); 778 framep = skb->data + mac_hdr_len; 779 amsdu_len = skb->len - mac_hdr_len; 780 781 while (amsdu_len > mac_hdr_len) { 782 hdr = (struct ethhdr *) framep; 783 payload_8023_len = ntohs(hdr->h_proto); 784 785 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN || 786 payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) { 787 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n", 788 payload_8023_len); 789 break; 790 } 791 792 frame_8023_len = payload_8023_len + mac_hdr_len; 793 new_skb = aggr_get_free_skb(p_aggr); 794 if (!new_skb) { 795 ath6kl_err("no buffer available\n"); 796 break; 797 } 798 799 memcpy(new_skb->data, framep, frame_8023_len); 800 skb_put(new_skb, frame_8023_len); 801 if (ath6kl_wmi_dot3_2_dix(new_skb)) { 802 ath6kl_err("dot3_2_dix error\n"); 803 dev_kfree_skb(new_skb); 804 break; 805 } 806 807 skb_queue_tail(&rxtid->q, new_skb); 808 809 /* Is this the last subframe within this aggregate ? */ 810 if ((amsdu_len - frame_8023_len) == 0) 811 break; 812 813 /* Add the length of A-MSDU subframe padding bytes - 814 * Round to nearest word. 815 */ 816 frame_8023_len = ALIGN(frame_8023_len, 4); 817 818 framep += frame_8023_len; 819 amsdu_len -= frame_8023_len; 820 } 821 822 dev_kfree_skb(skb); 823 } 824 825 static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid, 826 u16 seq_no, u8 order) 827 { 828 struct sk_buff *skb; 829 struct rxtid *rxtid; 830 struct skb_hold_q *node; 831 u16 idx, idx_end, seq_end; 832 struct rxtid_stats *stats; 833 834 if (!p_aggr) 835 return; 836 837 rxtid = &p_aggr->rx_tid[tid]; 838 stats = &p_aggr->stat[tid]; 839 840 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 841 842 /* 843 * idx_end is typically the last possible frame in the window, 844 * but changes to 'the' seq_no, when BAR comes. If seq_no 845 * is non-zero, we will go up to that and stop. 846 * Note: last seq no in current window will occupy the same 847 * index position as index that is just previous to start. 848 * An imp point : if win_sz is 7, for seq_no space of 4095, 849 * then, there would be holes when sequence wrap around occurs. 850 * Target should judiciously choose the win_sz, based on 851 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz 852 * 2, 4, 8, 16 win_sz works fine). 853 * We must deque from "idx" to "idx_end", including both. 854 */ 855 seq_end = seq_no ? seq_no : rxtid->seq_next; 856 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz); 857 858 spin_lock_bh(&rxtid->lock); 859 860 do { 861 node = &rxtid->hold_q[idx]; 862 if ((order == 1) && (!node->skb)) 863 break; 864 865 if (node->skb) { 866 if (node->is_amsdu) 867 aggr_slice_amsdu(p_aggr, rxtid, node->skb); 868 else 869 skb_queue_tail(&rxtid->q, node->skb); 870 node->skb = NULL; 871 } else 872 stats->num_hole++; 873 874 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next); 875 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 876 } while (idx != idx_end); 877 878 spin_unlock_bh(&rxtid->lock); 879 880 stats->num_delivered += skb_queue_len(&rxtid->q); 881 882 while ((skb = skb_dequeue(&rxtid->q))) 883 ath6kl_deliver_frames_to_nw_stack(p_aggr->dev, skb); 884 } 885 886 static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid, 887 u16 seq_no, 888 bool is_amsdu, struct sk_buff *frame) 889 { 890 struct rxtid *rxtid; 891 struct rxtid_stats *stats; 892 struct sk_buff *skb; 893 struct skb_hold_q *node; 894 u16 idx, st, cur, end; 895 bool is_queued = false; 896 u16 extended_end; 897 898 rxtid = &agg_info->rx_tid[tid]; 899 stats = &agg_info->stat[tid]; 900 901 stats->num_into_aggr++; 902 903 if (!rxtid->aggr) { 904 if (is_amsdu) { 905 aggr_slice_amsdu(agg_info, rxtid, frame); 906 is_queued = true; 907 stats->num_amsdu++; 908 while ((skb = skb_dequeue(&rxtid->q))) 909 ath6kl_deliver_frames_to_nw_stack(agg_info->dev, 910 skb); 911 } 912 return is_queued; 913 } 914 915 /* Check the incoming sequence no, if it's in the window */ 916 st = rxtid->seq_next; 917 cur = seq_no; 918 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO; 919 920 if (((st < end) && (cur < st || cur > end)) || 921 ((st > end) && (cur > end) && (cur < st))) { 922 extended_end = (end + rxtid->hold_q_sz - 1) & 923 ATH6KL_MAX_SEQ_NO; 924 925 if (((end < extended_end) && 926 (cur < end || cur > extended_end)) || 927 ((end > extended_end) && (cur > extended_end) && 928 (cur < end))) { 929 aggr_deque_frms(agg_info, tid, 0, 0); 930 if (cur >= rxtid->hold_q_sz - 1) 931 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1); 932 else 933 rxtid->seq_next = ATH6KL_MAX_SEQ_NO - 934 (rxtid->hold_q_sz - 2 - cur); 935 } else { 936 /* 937 * Dequeue only those frames that are outside the 938 * new shifted window. 939 */ 940 if (cur >= rxtid->hold_q_sz - 1) 941 st = cur - (rxtid->hold_q_sz - 1); 942 else 943 st = ATH6KL_MAX_SEQ_NO - 944 (rxtid->hold_q_sz - 2 - cur); 945 946 aggr_deque_frms(agg_info, tid, st, 0); 947 } 948 949 stats->num_oow++; 950 } 951 952 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz); 953 954 node = &rxtid->hold_q[idx]; 955 956 spin_lock_bh(&rxtid->lock); 957 958 /* 959 * Is the cur frame duplicate or something beyond our window(hold_q 960 * -> which is 2x, already)? 961 * 962 * 1. Duplicate is easy - drop incoming frame. 963 * 2. Not falling in current sliding window. 964 * 2a. is the frame_seq_no preceding current tid_seq_no? 965 * -> drop the frame. perhaps sender did not get our ACK. 966 * this is taken care of above. 967 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ); 968 * -> Taken care of it above, by moving window forward. 969 */ 970 dev_kfree_skb(node->skb); 971 stats->num_dups++; 972 973 node->skb = frame; 974 is_queued = true; 975 node->is_amsdu = is_amsdu; 976 node->seq_no = seq_no; 977 978 if (node->is_amsdu) 979 stats->num_amsdu++; 980 else 981 stats->num_mpdu++; 982 983 spin_unlock_bh(&rxtid->lock); 984 985 aggr_deque_frms(agg_info, tid, 0, 1); 986 987 if (agg_info->timer_scheduled) 988 rxtid->progress = true; 989 else 990 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) { 991 if (rxtid->hold_q[idx].skb) { 992 /* 993 * There is a frame in the queue and no 994 * timer so start a timer to ensure that 995 * the frame doesn't remain stuck 996 * forever. 997 */ 998 agg_info->timer_scheduled = true; 999 mod_timer(&agg_info->timer, 1000 (jiffies + 1001 HZ * (AGGR_RX_TIMEOUT) / 1000)); 1002 rxtid->progress = false; 1003 rxtid->timer_mon = true; 1004 break; 1005 } 1006 } 1007 1008 return is_queued; 1009 } 1010 1011 void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) 1012 { 1013 struct ath6kl *ar = target->dev->ar; 1014 struct sk_buff *skb = packet->pkt_cntxt; 1015 struct wmi_rx_meta_v2 *meta; 1016 struct wmi_data_hdr *dhdr; 1017 int min_hdr_len; 1018 u8 meta_type, dot11_hdr = 0; 1019 int status = packet->status; 1020 enum htc_endpoint_id ept = packet->endpoint; 1021 bool is_amsdu, prev_ps, ps_state = false; 1022 struct ath6kl_sta *conn = NULL; 1023 struct sk_buff *skb1 = NULL; 1024 struct ethhdr *datap = NULL; 1025 u16 seq_no, offset; 1026 u8 tid; 1027 1028 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, 1029 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d", 1030 __func__, ar, ept, skb, packet->buf, 1031 packet->act_len, status); 1032 1033 if (status || !(skb->data + HTC_HDR_LENGTH)) { 1034 ar->net_stats.rx_errors++; 1035 dev_kfree_skb(skb); 1036 return; 1037 } 1038 1039 /* 1040 * Take lock to protect buffer counts and adaptive power throughput 1041 * state. 1042 */ 1043 spin_lock_bh(&ar->lock); 1044 1045 ar->net_stats.rx_packets++; 1046 ar->net_stats.rx_bytes += packet->act_len; 1047 1048 spin_unlock_bh(&ar->lock); 1049 1050 skb_put(skb, packet->act_len + HTC_HDR_LENGTH); 1051 skb_pull(skb, HTC_HDR_LENGTH); 1052 1053 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, skb->data, skb->len); 1054 1055 skb->dev = ar->net_dev; 1056 1057 if (!test_bit(WMI_ENABLED, &ar->flag)) { 1058 if (EPPING_ALIGNMENT_PAD > 0) 1059 skb_pull(skb, EPPING_ALIGNMENT_PAD); 1060 ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb); 1061 return; 1062 } 1063 1064 if (ept == ar->ctrl_ep) { 1065 ath6kl_wmi_control_rx(ar->wmi, skb); 1066 return; 1067 } 1068 1069 min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) + 1070 sizeof(struct ath6kl_llc_snap_hdr); 1071 1072 dhdr = (struct wmi_data_hdr *) skb->data; 1073 1074 /* 1075 * In the case of AP mode we may receive NULL data frames 1076 * that do not have LLC hdr. They are 16 bytes in size. 1077 * Allow these frames in the AP mode. 1078 */ 1079 if (ar->nw_type != AP_NETWORK && 1080 ((packet->act_len < min_hdr_len) || 1081 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) { 1082 ath6kl_info("frame len is too short or too long\n"); 1083 ar->net_stats.rx_errors++; 1084 ar->net_stats.rx_length_errors++; 1085 dev_kfree_skb(skb); 1086 return; 1087 } 1088 1089 /* Get the Power save state of the STA */ 1090 if (ar->nw_type == AP_NETWORK) { 1091 meta_type = wmi_data_hdr_get_meta(dhdr); 1092 1093 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) & 1094 WMI_DATA_HDR_PS_MASK); 1095 1096 offset = sizeof(struct wmi_data_hdr); 1097 1098 switch (meta_type) { 1099 case 0: 1100 break; 1101 case WMI_META_VERSION_1: 1102 offset += sizeof(struct wmi_rx_meta_v1); 1103 break; 1104 case WMI_META_VERSION_2: 1105 offset += sizeof(struct wmi_rx_meta_v2); 1106 break; 1107 default: 1108 break; 1109 } 1110 1111 datap = (struct ethhdr *) (skb->data + offset); 1112 conn = ath6kl_find_sta(ar, datap->h_source); 1113 1114 if (!conn) { 1115 dev_kfree_skb(skb); 1116 return; 1117 } 1118 1119 /* 1120 * If there is a change in PS state of the STA, 1121 * take appropriate steps: 1122 * 1123 * 1. If Sleep-->Awake, flush the psq for the STA 1124 * Clear the PVB for the STA. 1125 * 2. If Awake-->Sleep, Starting queueing frames 1126 * the STA. 1127 */ 1128 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP); 1129 1130 if (ps_state) 1131 conn->sta_flags |= STA_PS_SLEEP; 1132 else 1133 conn->sta_flags &= ~STA_PS_SLEEP; 1134 1135 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) { 1136 if (!(conn->sta_flags & STA_PS_SLEEP)) { 1137 struct sk_buff *skbuff = NULL; 1138 1139 spin_lock_bh(&conn->psq_lock); 1140 while ((skbuff = skb_dequeue(&conn->psq)) 1141 != NULL) { 1142 spin_unlock_bh(&conn->psq_lock); 1143 ath6kl_data_tx(skbuff, ar->net_dev); 1144 spin_lock_bh(&conn->psq_lock); 1145 } 1146 spin_unlock_bh(&conn->psq_lock); 1147 /* Clear the PVB for this STA */ 1148 ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0); 1149 } 1150 } 1151 1152 /* drop NULL data frames here */ 1153 if ((packet->act_len < min_hdr_len) || 1154 (packet->act_len > 1155 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) { 1156 dev_kfree_skb(skb); 1157 return; 1158 } 1159 } 1160 1161 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false; 1162 tid = wmi_data_hdr_get_up(dhdr); 1163 seq_no = wmi_data_hdr_get_seqno(dhdr); 1164 meta_type = wmi_data_hdr_get_meta(dhdr); 1165 dot11_hdr = wmi_data_hdr_get_dot11(dhdr); 1166 skb_pull(skb, sizeof(struct wmi_data_hdr)); 1167 1168 switch (meta_type) { 1169 case WMI_META_VERSION_1: 1170 skb_pull(skb, sizeof(struct wmi_rx_meta_v1)); 1171 break; 1172 case WMI_META_VERSION_2: 1173 meta = (struct wmi_rx_meta_v2 *) skb->data; 1174 if (meta->csum_flags & 0x1) { 1175 skb->ip_summed = CHECKSUM_COMPLETE; 1176 skb->csum = (__force __wsum) meta->csum; 1177 } 1178 skb_pull(skb, sizeof(struct wmi_rx_meta_v2)); 1179 break; 1180 default: 1181 break; 1182 } 1183 1184 if (dot11_hdr) 1185 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb); 1186 else if (!is_amsdu) 1187 status = ath6kl_wmi_dot3_2_dix(skb); 1188 1189 if (status) { 1190 /* 1191 * Drop frames that could not be processed (lack of 1192 * memory, etc.) 1193 */ 1194 dev_kfree_skb(skb); 1195 return; 1196 } 1197 1198 if (!(ar->net_dev->flags & IFF_UP)) { 1199 dev_kfree_skb(skb); 1200 return; 1201 } 1202 1203 if (ar->nw_type == AP_NETWORK) { 1204 datap = (struct ethhdr *) skb->data; 1205 if (is_multicast_ether_addr(datap->h_dest)) 1206 /* 1207 * Bcast/Mcast frames should be sent to the 1208 * OS stack as well as on the air. 1209 */ 1210 skb1 = skb_copy(skb, GFP_ATOMIC); 1211 else { 1212 /* 1213 * Search for a connected STA with dstMac 1214 * as the Mac address. If found send the 1215 * frame to it on the air else send the 1216 * frame up the stack. 1217 */ 1218 struct ath6kl_sta *conn = NULL; 1219 conn = ath6kl_find_sta(ar, datap->h_dest); 1220 1221 if (conn && ar->intra_bss) { 1222 skb1 = skb; 1223 skb = NULL; 1224 } else if (conn && !ar->intra_bss) { 1225 dev_kfree_skb(skb); 1226 skb = NULL; 1227 } 1228 } 1229 if (skb1) 1230 ath6kl_data_tx(skb1, ar->net_dev); 1231 } 1232 1233 datap = (struct ethhdr *) skb->data; 1234 1235 if (is_unicast_ether_addr(datap->h_dest) && 1236 aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no, 1237 is_amsdu, skb)) 1238 /* aggregation code will handle the skb */ 1239 return; 1240 1241 ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb); 1242 } 1243 1244 static void aggr_timeout(unsigned long arg) 1245 { 1246 u8 i, j; 1247 struct aggr_info *p_aggr = (struct aggr_info *) arg; 1248 struct rxtid *rxtid; 1249 struct rxtid_stats *stats; 1250 1251 for (i = 0; i < NUM_OF_TIDS; i++) { 1252 rxtid = &p_aggr->rx_tid[i]; 1253 stats = &p_aggr->stat[i]; 1254 1255 if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress) 1256 continue; 1257 1258 stats->num_timeouts++; 1259 ath6kl_dbg(ATH6KL_DBG_AGGR, 1260 "aggr timeout (st %d end %d)\n", 1261 rxtid->seq_next, 1262 ((rxtid->seq_next + rxtid->hold_q_sz-1) & 1263 ATH6KL_MAX_SEQ_NO)); 1264 aggr_deque_frms(p_aggr, i, 0, 0); 1265 } 1266 1267 p_aggr->timer_scheduled = false; 1268 1269 for (i = 0; i < NUM_OF_TIDS; i++) { 1270 rxtid = &p_aggr->rx_tid[i]; 1271 1272 if (rxtid->aggr && rxtid->hold_q) { 1273 for (j = 0; j < rxtid->hold_q_sz; j++) { 1274 if (rxtid->hold_q[j].skb) { 1275 p_aggr->timer_scheduled = true; 1276 rxtid->timer_mon = true; 1277 rxtid->progress = false; 1278 break; 1279 } 1280 } 1281 1282 if (j >= rxtid->hold_q_sz) 1283 rxtid->timer_mon = false; 1284 } 1285 } 1286 1287 if (p_aggr->timer_scheduled) 1288 mod_timer(&p_aggr->timer, 1289 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT)); 1290 } 1291 1292 static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid) 1293 { 1294 struct rxtid *rxtid; 1295 struct rxtid_stats *stats; 1296 1297 if (!p_aggr || tid >= NUM_OF_TIDS) 1298 return; 1299 1300 rxtid = &p_aggr->rx_tid[tid]; 1301 stats = &p_aggr->stat[tid]; 1302 1303 if (rxtid->aggr) 1304 aggr_deque_frms(p_aggr, tid, 0, 0); 1305 1306 rxtid->aggr = false; 1307 rxtid->progress = false; 1308 rxtid->timer_mon = false; 1309 rxtid->win_sz = 0; 1310 rxtid->seq_next = 0; 1311 rxtid->hold_q_sz = 0; 1312 1313 kfree(rxtid->hold_q); 1314 rxtid->hold_q = NULL; 1315 1316 memset(stats, 0, sizeof(struct rxtid_stats)); 1317 } 1318 1319 void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz) 1320 { 1321 struct aggr_info *p_aggr = ar->aggr_cntxt; 1322 struct rxtid *rxtid; 1323 struct rxtid_stats *stats; 1324 u16 hold_q_size; 1325 1326 if (!p_aggr) 1327 return; 1328 1329 rxtid = &p_aggr->rx_tid[tid]; 1330 stats = &p_aggr->stat[tid]; 1331 1332 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX) 1333 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n", 1334 __func__, win_sz, tid); 1335 1336 if (rxtid->aggr) 1337 aggr_delete_tid_state(p_aggr, tid); 1338 1339 rxtid->seq_next = seq_no; 1340 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q); 1341 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL); 1342 if (!rxtid->hold_q) 1343 return; 1344 1345 rxtid->win_sz = win_sz; 1346 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz); 1347 if (!skb_queue_empty(&rxtid->q)) 1348 return; 1349 1350 rxtid->aggr = true; 1351 } 1352 1353 struct aggr_info *aggr_init(struct net_device *dev) 1354 { 1355 struct aggr_info *p_aggr = NULL; 1356 struct rxtid *rxtid; 1357 u8 i; 1358 1359 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL); 1360 if (!p_aggr) { 1361 ath6kl_err("failed to alloc memory for aggr_node\n"); 1362 return NULL; 1363 } 1364 1365 p_aggr->aggr_sz = AGGR_SZ_DEFAULT; 1366 p_aggr->dev = dev; 1367 init_timer(&p_aggr->timer); 1368 p_aggr->timer.function = aggr_timeout; 1369 p_aggr->timer.data = (unsigned long) p_aggr; 1370 1371 p_aggr->timer_scheduled = false; 1372 skb_queue_head_init(&p_aggr->free_q); 1373 1374 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS); 1375 1376 for (i = 0; i < NUM_OF_TIDS; i++) { 1377 rxtid = &p_aggr->rx_tid[i]; 1378 rxtid->aggr = false; 1379 rxtid->progress = false; 1380 rxtid->timer_mon = false; 1381 skb_queue_head_init(&rxtid->q); 1382 spin_lock_init(&rxtid->lock); 1383 } 1384 1385 return p_aggr; 1386 } 1387 1388 void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid) 1389 { 1390 struct aggr_info *p_aggr = ar->aggr_cntxt; 1391 struct rxtid *rxtid; 1392 1393 if (!p_aggr) 1394 return; 1395 1396 rxtid = &p_aggr->rx_tid[tid]; 1397 1398 if (rxtid->aggr) 1399 aggr_delete_tid_state(p_aggr, tid); 1400 } 1401 1402 void aggr_reset_state(struct aggr_info *aggr_info) 1403 { 1404 u8 tid; 1405 1406 for (tid = 0; tid < NUM_OF_TIDS; tid++) 1407 aggr_delete_tid_state(aggr_info, tid); 1408 } 1409 1410 /* clean up our amsdu buffer list */ 1411 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar) 1412 { 1413 struct htc_packet *packet, *tmp_pkt; 1414 1415 spin_lock_bh(&ar->lock); 1416 if (list_empty(&ar->amsdu_rx_buffer_queue)) { 1417 spin_unlock_bh(&ar->lock); 1418 return; 1419 } 1420 1421 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue, 1422 list) { 1423 list_del(&packet->list); 1424 spin_unlock_bh(&ar->lock); 1425 dev_kfree_skb(packet->pkt_cntxt); 1426 spin_lock_bh(&ar->lock); 1427 } 1428 1429 spin_unlock_bh(&ar->lock); 1430 } 1431 1432 void aggr_module_destroy(struct aggr_info *aggr_info) 1433 { 1434 struct rxtid *rxtid; 1435 u8 i, k; 1436 1437 if (!aggr_info) 1438 return; 1439 1440 if (aggr_info->timer_scheduled) { 1441 del_timer(&aggr_info->timer); 1442 aggr_info->timer_scheduled = false; 1443 } 1444 1445 for (i = 0; i < NUM_OF_TIDS; i++) { 1446 rxtid = &aggr_info->rx_tid[i]; 1447 if (rxtid->hold_q) { 1448 for (k = 0; k < rxtid->hold_q_sz; k++) 1449 dev_kfree_skb(rxtid->hold_q[k].skb); 1450 kfree(rxtid->hold_q); 1451 } 1452 1453 skb_queue_purge(&rxtid->q); 1454 } 1455 1456 skb_queue_purge(&aggr_info->free_q); 1457 kfree(aggr_info); 1458 } 1459