1 /* 2 * Copyright (c) 2004-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "core.h" 18 #include "debug.h" 19 20 static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev, 21 u32 *map_no) 22 { 23 struct ath6kl *ar = ath6kl_priv(dev); 24 struct ethhdr *eth_hdr; 25 u32 i, ep_map = -1; 26 u8 *datap; 27 28 *map_no = 0; 29 datap = skb->data; 30 eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr)); 31 32 if (is_multicast_ether_addr(eth_hdr->h_dest)) 33 return ENDPOINT_2; 34 35 for (i = 0; i < ar->node_num; i++) { 36 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr, 37 ETH_ALEN) == 0) { 38 *map_no = i + 1; 39 ar->node_map[i].tx_pend++; 40 return ar->node_map[i].ep_id; 41 } 42 43 if ((ep_map == -1) && !ar->node_map[i].tx_pend) 44 ep_map = i; 45 } 46 47 if (ep_map == -1) { 48 ep_map = ar->node_num; 49 ar->node_num++; 50 if (ar->node_num > MAX_NODE_NUM) 51 return ENDPOINT_UNUSED; 52 } 53 54 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN); 55 56 for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) { 57 if (!ar->tx_pending[i]) { 58 ar->node_map[ep_map].ep_id = i; 59 break; 60 } 61 62 /* 63 * No free endpoint is available, start redistribution on 64 * the inuse endpoints. 65 */ 66 if (i == ENDPOINT_5) { 67 ar->node_map[ep_map].ep_id = ar->next_ep_id; 68 ar->next_ep_id++; 69 if (ar->next_ep_id > ENDPOINT_5) 70 ar->next_ep_id = ENDPOINT_2; 71 } 72 } 73 74 *map_no = ep_map + 1; 75 ar->node_map[ep_map].tx_pend++; 76 77 return ar->node_map[ep_map].ep_id; 78 } 79 80 static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb, 81 bool *more_data) 82 { 83 struct ethhdr *datap = (struct ethhdr *) skb->data; 84 struct ath6kl_sta *conn = NULL; 85 bool ps_queued = false, is_psq_empty = false; 86 struct ath6kl *ar = vif->ar; 87 88 if (is_multicast_ether_addr(datap->h_dest)) { 89 u8 ctr = 0; 90 bool q_mcast = false; 91 92 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { 93 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) { 94 q_mcast = true; 95 break; 96 } 97 } 98 99 if (q_mcast) { 100 /* 101 * If this transmit is not because of a Dtim Expiry 102 * q it. 103 */ 104 if (!test_bit(DTIM_EXPIRED, &vif->flags)) { 105 bool is_mcastq_empty = false; 106 107 spin_lock_bh(&ar->mcastpsq_lock); 108 is_mcastq_empty = 109 skb_queue_empty(&ar->mcastpsq); 110 skb_queue_tail(&ar->mcastpsq, skb); 111 spin_unlock_bh(&ar->mcastpsq_lock); 112 113 /* 114 * If this is the first Mcast pkt getting 115 * queued indicate to the target to set the 116 * BitmapControl LSB of the TIM IE. 117 */ 118 if (is_mcastq_empty) 119 ath6kl_wmi_set_pvb_cmd(ar->wmi, 120 vif->fw_vif_idx, 121 MCAST_AID, 1); 122 123 ps_queued = true; 124 } else { 125 /* 126 * This transmit is because of Dtim expiry. 127 * Determine if MoreData bit has to be set. 128 */ 129 spin_lock_bh(&ar->mcastpsq_lock); 130 if (!skb_queue_empty(&ar->mcastpsq)) 131 *more_data = true; 132 spin_unlock_bh(&ar->mcastpsq_lock); 133 } 134 } 135 } else { 136 conn = ath6kl_find_sta(vif, datap->h_dest); 137 if (!conn) { 138 dev_kfree_skb(skb); 139 140 /* Inform the caller that the skb is consumed */ 141 return true; 142 } 143 144 if (conn->sta_flags & STA_PS_SLEEP) { 145 if (!(conn->sta_flags & STA_PS_POLLED)) { 146 /* Queue the frames if the STA is sleeping */ 147 spin_lock_bh(&conn->psq_lock); 148 is_psq_empty = skb_queue_empty(&conn->psq); 149 skb_queue_tail(&conn->psq, skb); 150 spin_unlock_bh(&conn->psq_lock); 151 152 /* 153 * If this is the first pkt getting queued 154 * for this STA, update the PVB for this 155 * STA. 156 */ 157 if (is_psq_empty) 158 ath6kl_wmi_set_pvb_cmd(ar->wmi, 159 vif->fw_vif_idx, 160 conn->aid, 1); 161 162 ps_queued = true; 163 } else { 164 /* 165 * This tx is because of a PsPoll. 166 * Determine if MoreData bit has to be set. 167 */ 168 spin_lock_bh(&conn->psq_lock); 169 if (!skb_queue_empty(&conn->psq)) 170 *more_data = true; 171 spin_unlock_bh(&conn->psq_lock); 172 } 173 } 174 } 175 176 return ps_queued; 177 } 178 179 /* Tx functions */ 180 181 int ath6kl_control_tx(void *devt, struct sk_buff *skb, 182 enum htc_endpoint_id eid) 183 { 184 struct ath6kl *ar = devt; 185 int status = 0; 186 struct ath6kl_cookie *cookie = NULL; 187 188 spin_lock_bh(&ar->lock); 189 190 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 191 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__, 192 skb, skb->len, eid); 193 194 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) { 195 /* 196 * Control endpoint is full, don't allocate resources, we 197 * are just going to drop this packet. 198 */ 199 cookie = NULL; 200 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n", 201 skb, skb->len); 202 } else 203 cookie = ath6kl_alloc_cookie(ar); 204 205 if (cookie == NULL) { 206 spin_unlock_bh(&ar->lock); 207 status = -ENOMEM; 208 goto fail_ctrl_tx; 209 } 210 211 ar->tx_pending[eid]++; 212 213 if (eid != ar->ctrl_ep) 214 ar->total_tx_data_pend++; 215 216 spin_unlock_bh(&ar->lock); 217 218 cookie->skb = skb; 219 cookie->map_no = 0; 220 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 221 eid, ATH6KL_CONTROL_PKT_TAG); 222 223 /* 224 * This interface is asynchronous, if there is an error, cleanup 225 * will happen in the TX completion callback. 226 */ 227 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt); 228 229 return 0; 230 231 fail_ctrl_tx: 232 dev_kfree_skb(skb); 233 return status; 234 } 235 236 int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) 237 { 238 struct ath6kl *ar = ath6kl_priv(dev); 239 struct ath6kl_cookie *cookie = NULL; 240 enum htc_endpoint_id eid = ENDPOINT_UNUSED; 241 struct ath6kl_vif *vif = netdev_priv(dev); 242 u32 map_no = 0; 243 u16 htc_tag = ATH6KL_DATA_PKT_TAG; 244 u8 ac = 99 ; /* initialize to unmapped ac */ 245 bool chk_adhoc_ps_mapping = false, more_data = false; 246 int ret; 247 248 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 249 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__, 250 skb, skb->data, skb->len); 251 252 /* If target is not associated */ 253 if (!test_bit(CONNECTED, &vif->flags)) { 254 dev_kfree_skb(skb); 255 return 0; 256 } 257 258 if (!test_bit(WMI_READY, &ar->flag)) 259 goto fail_tx; 260 261 /* AP mode Power saving processing */ 262 if (vif->nw_type == AP_NETWORK) { 263 if (ath6kl_powersave_ap(vif, skb, &more_data)) 264 return 0; 265 } 266 267 if (test_bit(WMI_ENABLED, &ar->flag)) { 268 if (skb_headroom(skb) < dev->needed_headroom) { 269 struct sk_buff *tmp_skb = skb; 270 271 skb = skb_realloc_headroom(skb, dev->needed_headroom); 272 kfree_skb(tmp_skb); 273 if (skb == NULL) { 274 vif->net_stats.tx_dropped++; 275 return 0; 276 } 277 } 278 279 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) { 280 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n"); 281 goto fail_tx; 282 } 283 284 if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE, 285 more_data, 0, 0, NULL, 286 vif->fw_vif_idx)) { 287 ath6kl_err("wmi_data_hdr_add failed\n"); 288 goto fail_tx; 289 } 290 291 if ((vif->nw_type == ADHOC_NETWORK) && 292 ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags)) 293 chk_adhoc_ps_mapping = true; 294 else { 295 /* get the stream mapping */ 296 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, 297 vif->fw_vif_idx, skb, 298 0, test_bit(WMM_ENABLED, &vif->flags), &ac); 299 if (ret) 300 goto fail_tx; 301 } 302 } else 303 goto fail_tx; 304 305 spin_lock_bh(&ar->lock); 306 307 if (chk_adhoc_ps_mapping) 308 eid = ath6kl_ibss_map_epid(skb, dev, &map_no); 309 else 310 eid = ar->ac2ep_map[ac]; 311 312 if (eid == 0 || eid == ENDPOINT_UNUSED) { 313 ath6kl_err("eid %d is not mapped!\n", eid); 314 spin_unlock_bh(&ar->lock); 315 goto fail_tx; 316 } 317 318 /* allocate resource for this packet */ 319 cookie = ath6kl_alloc_cookie(ar); 320 321 if (!cookie) { 322 spin_unlock_bh(&ar->lock); 323 goto fail_tx; 324 } 325 326 /* update counts while the lock is held */ 327 ar->tx_pending[eid]++; 328 ar->total_tx_data_pend++; 329 330 spin_unlock_bh(&ar->lock); 331 332 if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) && 333 skb_cloned(skb)) { 334 /* 335 * We will touch (move the buffer data to align it. Since the 336 * skb buffer is cloned and not only the header is changed, we 337 * have to copy it to allow the changes. Since we are copying 338 * the data here, we may as well align it by reserving suitable 339 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align(). 340 */ 341 struct sk_buff *nskb; 342 343 nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC); 344 if (nskb == NULL) 345 goto fail_tx; 346 kfree_skb(skb); 347 skb = nskb; 348 } 349 350 cookie->skb = skb; 351 cookie->map_no = map_no; 352 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 353 eid, htc_tag); 354 355 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ", 356 skb->data, skb->len); 357 358 /* 359 * HTC interface is asynchronous, if this fails, cleanup will 360 * happen in the ath6kl_tx_complete callback. 361 */ 362 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt); 363 364 return 0; 365 366 fail_tx: 367 dev_kfree_skb(skb); 368 369 vif->net_stats.tx_dropped++; 370 vif->net_stats.tx_aborted_errors++; 371 372 return 0; 373 } 374 375 /* indicate tx activity or inactivity on a WMI stream */ 376 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active) 377 { 378 struct ath6kl *ar = devt; 379 enum htc_endpoint_id eid; 380 int i; 381 382 eid = ar->ac2ep_map[traffic_class]; 383 384 if (!test_bit(WMI_ENABLED, &ar->flag)) 385 goto notify_htc; 386 387 spin_lock_bh(&ar->lock); 388 389 ar->ac_stream_active[traffic_class] = active; 390 391 if (active) { 392 /* 393 * Keep track of the active stream with the highest 394 * priority. 395 */ 396 if (ar->ac_stream_pri_map[traffic_class] > 397 ar->hiac_stream_active_pri) 398 /* set the new highest active priority */ 399 ar->hiac_stream_active_pri = 400 ar->ac_stream_pri_map[traffic_class]; 401 402 } else { 403 /* 404 * We may have to search for the next active stream 405 * that is the highest priority. 406 */ 407 if (ar->hiac_stream_active_pri == 408 ar->ac_stream_pri_map[traffic_class]) { 409 /* 410 * The highest priority stream just went inactive 411 * reset and search for the "next" highest "active" 412 * priority stream. 413 */ 414 ar->hiac_stream_active_pri = 0; 415 416 for (i = 0; i < WMM_NUM_AC; i++) { 417 if (ar->ac_stream_active[i] && 418 (ar->ac_stream_pri_map[i] > 419 ar->hiac_stream_active_pri)) 420 /* 421 * Set the new highest active 422 * priority. 423 */ 424 ar->hiac_stream_active_pri = 425 ar->ac_stream_pri_map[i]; 426 } 427 } 428 } 429 430 spin_unlock_bh(&ar->lock); 431 432 notify_htc: 433 /* notify HTC, this may cause credit distribution changes */ 434 ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active); 435 } 436 437 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, 438 struct htc_packet *packet) 439 { 440 struct ath6kl *ar = target->dev->ar; 441 struct ath6kl_vif *vif; 442 enum htc_endpoint_id endpoint = packet->endpoint; 443 enum htc_send_full_action action = HTC_SEND_FULL_KEEP; 444 445 if (endpoint == ar->ctrl_ep) { 446 /* 447 * Under normal WMI if this is getting full, then something 448 * is running rampant the host should not be exhausting the 449 * WMI queue with too many commands the only exception to 450 * this is during testing using endpointping. 451 */ 452 spin_lock_bh(&ar->lock); 453 set_bit(WMI_CTRL_EP_FULL, &ar->flag); 454 spin_unlock_bh(&ar->lock); 455 ath6kl_err("wmi ctrl ep is full\n"); 456 goto stop_adhoc_netq; 457 } 458 459 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG) 460 goto stop_adhoc_netq; 461 462 /* 463 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for 464 * the highest active stream. 465 */ 466 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] < 467 ar->hiac_stream_active_pri && 468 ar->cookie_count <= MAX_HI_COOKIE_NUM) { 469 /* 470 * Give preference to the highest priority stream by 471 * dropping the packets which overflowed. 472 */ 473 action = HTC_SEND_FULL_DROP; 474 goto stop_adhoc_netq; 475 } 476 477 stop_adhoc_netq: 478 /* FIXME: Locking */ 479 spin_lock_bh(&ar->list_lock); 480 list_for_each_entry(vif, &ar->vif_list, list) { 481 if (vif->nw_type == ADHOC_NETWORK) { 482 spin_unlock_bh(&ar->list_lock); 483 484 spin_lock_bh(&vif->if_lock); 485 set_bit(NETQ_STOPPED, &vif->flags); 486 spin_unlock_bh(&vif->if_lock); 487 netif_stop_queue(vif->ndev); 488 489 return action; 490 } 491 } 492 spin_unlock_bh(&ar->list_lock); 493 494 return action; 495 } 496 497 /* TODO this needs to be looked at */ 498 static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif, 499 enum htc_endpoint_id eid, u32 map_no) 500 { 501 struct ath6kl *ar = vif->ar; 502 u32 i; 503 504 if (vif->nw_type != ADHOC_NETWORK) 505 return; 506 507 if (!ar->ibss_ps_enable) 508 return; 509 510 if (eid == ar->ctrl_ep) 511 return; 512 513 if (map_no == 0) 514 return; 515 516 map_no--; 517 ar->node_map[map_no].tx_pend--; 518 519 if (ar->node_map[map_no].tx_pend) 520 return; 521 522 if (map_no != (ar->node_num - 1)) 523 return; 524 525 for (i = ar->node_num; i > 0; i--) { 526 if (ar->node_map[i - 1].tx_pend) 527 break; 528 529 memset(&ar->node_map[i - 1], 0, 530 sizeof(struct ath6kl_node_mapping)); 531 ar->node_num--; 532 } 533 } 534 535 void ath6kl_tx_complete(void *context, struct list_head *packet_queue) 536 { 537 struct ath6kl *ar = context; 538 struct sk_buff_head skb_queue; 539 struct htc_packet *packet; 540 struct sk_buff *skb; 541 struct ath6kl_cookie *ath6kl_cookie; 542 u32 map_no = 0; 543 int status; 544 enum htc_endpoint_id eid; 545 bool wake_event = false; 546 bool flushing[MAX_NUM_VIF] = {false}; 547 u8 if_idx; 548 struct ath6kl_vif *vif; 549 550 skb_queue_head_init(&skb_queue); 551 552 /* lock the driver as we update internal state */ 553 spin_lock_bh(&ar->lock); 554 555 /* reap completed packets */ 556 while (!list_empty(packet_queue)) { 557 558 packet = list_first_entry(packet_queue, struct htc_packet, 559 list); 560 list_del(&packet->list); 561 562 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt; 563 if (!ath6kl_cookie) 564 goto fatal; 565 566 status = packet->status; 567 skb = ath6kl_cookie->skb; 568 eid = packet->endpoint; 569 map_no = ath6kl_cookie->map_no; 570 571 if (!skb || !skb->data) 572 goto fatal; 573 574 __skb_queue_tail(&skb_queue, skb); 575 576 if (!status && (packet->act_len != skb->len)) 577 goto fatal; 578 579 ar->tx_pending[eid]--; 580 581 if (eid != ar->ctrl_ep) 582 ar->total_tx_data_pend--; 583 584 if (eid == ar->ctrl_ep) { 585 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag)) 586 clear_bit(WMI_CTRL_EP_FULL, &ar->flag); 587 588 if (ar->tx_pending[eid] == 0) 589 wake_event = true; 590 } 591 592 if (eid == ar->ctrl_ep) { 593 if_idx = wmi_cmd_hdr_get_if_idx( 594 (struct wmi_cmd_hdr *) packet->buf); 595 } else { 596 if_idx = wmi_data_hdr_get_if_idx( 597 (struct wmi_data_hdr *) packet->buf); 598 } 599 600 vif = ath6kl_get_vif_by_index(ar, if_idx); 601 if (!vif) { 602 ath6kl_free_cookie(ar, ath6kl_cookie); 603 continue; 604 } 605 606 if (status) { 607 if (status == -ECANCELED) 608 /* a packet was flushed */ 609 flushing[if_idx] = true; 610 611 vif->net_stats.tx_errors++; 612 613 if (status != -ENOSPC && status != -ECANCELED) 614 ath6kl_warn("tx complete error: %d\n", status); 615 616 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 617 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", 618 __func__, skb, packet->buf, packet->act_len, 619 eid, "error!"); 620 } else { 621 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 622 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", 623 __func__, skb, packet->buf, packet->act_len, 624 eid, "OK"); 625 626 flushing[if_idx] = false; 627 vif->net_stats.tx_packets++; 628 vif->net_stats.tx_bytes += skb->len; 629 } 630 631 ath6kl_tx_clear_node_map(vif, eid, map_no); 632 633 ath6kl_free_cookie(ar, ath6kl_cookie); 634 635 if (test_bit(NETQ_STOPPED, &vif->flags)) 636 clear_bit(NETQ_STOPPED, &vif->flags); 637 } 638 639 spin_unlock_bh(&ar->lock); 640 641 __skb_queue_purge(&skb_queue); 642 643 /* FIXME: Locking */ 644 spin_lock_bh(&ar->list_lock); 645 list_for_each_entry(vif, &ar->vif_list, list) { 646 if (test_bit(CONNECTED, &vif->flags) && 647 !flushing[vif->fw_vif_idx]) { 648 spin_unlock_bh(&ar->list_lock); 649 netif_wake_queue(vif->ndev); 650 spin_lock_bh(&ar->list_lock); 651 } 652 } 653 spin_unlock_bh(&ar->list_lock); 654 655 if (wake_event) 656 wake_up(&ar->event_wq); 657 658 return; 659 660 fatal: 661 WARN_ON(1); 662 spin_unlock_bh(&ar->lock); 663 return; 664 } 665 666 void ath6kl_tx_data_cleanup(struct ath6kl *ar) 667 { 668 int i; 669 670 /* flush all the data (non-control) streams */ 671 for (i = 0; i < WMM_NUM_AC; i++) 672 ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i], 673 ATH6KL_DATA_PKT_TAG); 674 } 675 676 /* Rx functions */ 677 678 static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev, 679 struct sk_buff *skb) 680 { 681 if (!skb) 682 return; 683 684 skb->dev = dev; 685 686 if (!(skb->dev->flags & IFF_UP)) { 687 dev_kfree_skb(skb); 688 return; 689 } 690 691 skb->protocol = eth_type_trans(skb, skb->dev); 692 693 netif_rx_ni(skb); 694 } 695 696 static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num) 697 { 698 struct sk_buff *skb; 699 700 while (num) { 701 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE); 702 if (!skb) { 703 ath6kl_err("netbuf allocation failed\n"); 704 return; 705 } 706 skb_queue_tail(q, skb); 707 num--; 708 } 709 } 710 711 static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr) 712 { 713 struct sk_buff *skb = NULL; 714 715 if (skb_queue_len(&p_aggr->free_q) < (AGGR_NUM_OF_FREE_NETBUFS >> 2)) 716 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS); 717 718 skb = skb_dequeue(&p_aggr->free_q); 719 720 return skb; 721 } 722 723 void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint) 724 { 725 struct ath6kl *ar = target->dev->ar; 726 struct sk_buff *skb; 727 int rx_buf; 728 int n_buf_refill; 729 struct htc_packet *packet; 730 struct list_head queue; 731 732 n_buf_refill = ATH6KL_MAX_RX_BUFFERS - 733 ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint); 734 735 if (n_buf_refill <= 0) 736 return; 737 738 INIT_LIST_HEAD(&queue); 739 740 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, 741 "%s: providing htc with %d buffers at eid=%d\n", 742 __func__, n_buf_refill, endpoint); 743 744 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) { 745 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE); 746 if (!skb) 747 break; 748 749 packet = (struct htc_packet *) skb->head; 750 if (!IS_ALIGNED((unsigned long) skb->data, 4)) 751 skb->data = PTR_ALIGN(skb->data - 4, 4); 752 set_htc_rxpkt_info(packet, skb, skb->data, 753 ATH6KL_BUFFER_SIZE, endpoint); 754 list_add_tail(&packet->list, &queue); 755 } 756 757 if (!list_empty(&queue)) 758 ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue); 759 } 760 761 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count) 762 { 763 struct htc_packet *packet; 764 struct sk_buff *skb; 765 766 while (count) { 767 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE); 768 if (!skb) 769 return; 770 771 packet = (struct htc_packet *) skb->head; 772 if (!IS_ALIGNED((unsigned long) skb->data, 4)) 773 skb->data = PTR_ALIGN(skb->data - 4, 4); 774 set_htc_rxpkt_info(packet, skb, skb->data, 775 ATH6KL_AMSDU_BUFFER_SIZE, 0); 776 spin_lock_bh(&ar->lock); 777 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue); 778 spin_unlock_bh(&ar->lock); 779 count--; 780 } 781 } 782 783 /* 784 * Callback to allocate a receive buffer for a pending packet. We use a 785 * pre-allocated list of buffers of maximum AMSDU size (4K). 786 */ 787 struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target, 788 enum htc_endpoint_id endpoint, 789 int len) 790 { 791 struct ath6kl *ar = target->dev->ar; 792 struct htc_packet *packet = NULL; 793 struct list_head *pkt_pos; 794 int refill_cnt = 0, depth = 0; 795 796 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n", 797 __func__, endpoint, len); 798 799 if ((len <= ATH6KL_BUFFER_SIZE) || 800 (len > ATH6KL_AMSDU_BUFFER_SIZE)) 801 return NULL; 802 803 spin_lock_bh(&ar->lock); 804 805 if (list_empty(&ar->amsdu_rx_buffer_queue)) { 806 spin_unlock_bh(&ar->lock); 807 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS; 808 goto refill_buf; 809 } 810 811 packet = list_first_entry(&ar->amsdu_rx_buffer_queue, 812 struct htc_packet, list); 813 list_del(&packet->list); 814 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue) 815 depth++; 816 817 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth; 818 spin_unlock_bh(&ar->lock); 819 820 /* set actual endpoint ID */ 821 packet->endpoint = endpoint; 822 823 refill_buf: 824 if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD) 825 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt); 826 827 return packet; 828 } 829 830 static void aggr_slice_amsdu(struct aggr_info *p_aggr, 831 struct rxtid *rxtid, struct sk_buff *skb) 832 { 833 struct sk_buff *new_skb; 834 struct ethhdr *hdr; 835 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len; 836 u8 *framep; 837 838 mac_hdr_len = sizeof(struct ethhdr); 839 framep = skb->data + mac_hdr_len; 840 amsdu_len = skb->len - mac_hdr_len; 841 842 while (amsdu_len > mac_hdr_len) { 843 hdr = (struct ethhdr *) framep; 844 payload_8023_len = ntohs(hdr->h_proto); 845 846 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN || 847 payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) { 848 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n", 849 payload_8023_len); 850 break; 851 } 852 853 frame_8023_len = payload_8023_len + mac_hdr_len; 854 new_skb = aggr_get_free_skb(p_aggr); 855 if (!new_skb) { 856 ath6kl_err("no buffer available\n"); 857 break; 858 } 859 860 memcpy(new_skb->data, framep, frame_8023_len); 861 skb_put(new_skb, frame_8023_len); 862 if (ath6kl_wmi_dot3_2_dix(new_skb)) { 863 ath6kl_err("dot3_2_dix error\n"); 864 dev_kfree_skb(new_skb); 865 break; 866 } 867 868 skb_queue_tail(&rxtid->q, new_skb); 869 870 /* Is this the last subframe within this aggregate ? */ 871 if ((amsdu_len - frame_8023_len) == 0) 872 break; 873 874 /* Add the length of A-MSDU subframe padding bytes - 875 * Round to nearest word. 876 */ 877 frame_8023_len = ALIGN(frame_8023_len, 4); 878 879 framep += frame_8023_len; 880 amsdu_len -= frame_8023_len; 881 } 882 883 dev_kfree_skb(skb); 884 } 885 886 static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid, 887 u16 seq_no, u8 order) 888 { 889 struct sk_buff *skb; 890 struct rxtid *rxtid; 891 struct skb_hold_q *node; 892 u16 idx, idx_end, seq_end; 893 struct rxtid_stats *stats; 894 895 if (!p_aggr) 896 return; 897 898 rxtid = &p_aggr->rx_tid[tid]; 899 stats = &p_aggr->stat[tid]; 900 901 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 902 903 /* 904 * idx_end is typically the last possible frame in the window, 905 * but changes to 'the' seq_no, when BAR comes. If seq_no 906 * is non-zero, we will go up to that and stop. 907 * Note: last seq no in current window will occupy the same 908 * index position as index that is just previous to start. 909 * An imp point : if win_sz is 7, for seq_no space of 4095, 910 * then, there would be holes when sequence wrap around occurs. 911 * Target should judiciously choose the win_sz, based on 912 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz 913 * 2, 4, 8, 16 win_sz works fine). 914 * We must deque from "idx" to "idx_end", including both. 915 */ 916 seq_end = seq_no ? seq_no : rxtid->seq_next; 917 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz); 918 919 spin_lock_bh(&rxtid->lock); 920 921 do { 922 node = &rxtid->hold_q[idx]; 923 if ((order == 1) && (!node->skb)) 924 break; 925 926 if (node->skb) { 927 if (node->is_amsdu) 928 aggr_slice_amsdu(p_aggr, rxtid, node->skb); 929 else 930 skb_queue_tail(&rxtid->q, node->skb); 931 node->skb = NULL; 932 } else 933 stats->num_hole++; 934 935 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next); 936 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 937 } while (idx != idx_end); 938 939 spin_unlock_bh(&rxtid->lock); 940 941 stats->num_delivered += skb_queue_len(&rxtid->q); 942 943 while ((skb = skb_dequeue(&rxtid->q))) 944 ath6kl_deliver_frames_to_nw_stack(p_aggr->dev, skb); 945 } 946 947 static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid, 948 u16 seq_no, 949 bool is_amsdu, struct sk_buff *frame) 950 { 951 struct rxtid *rxtid; 952 struct rxtid_stats *stats; 953 struct sk_buff *skb; 954 struct skb_hold_q *node; 955 u16 idx, st, cur, end; 956 bool is_queued = false; 957 u16 extended_end; 958 959 rxtid = &agg_info->rx_tid[tid]; 960 stats = &agg_info->stat[tid]; 961 962 stats->num_into_aggr++; 963 964 if (!rxtid->aggr) { 965 if (is_amsdu) { 966 aggr_slice_amsdu(agg_info, rxtid, frame); 967 is_queued = true; 968 stats->num_amsdu++; 969 while ((skb = skb_dequeue(&rxtid->q))) 970 ath6kl_deliver_frames_to_nw_stack(agg_info->dev, 971 skb); 972 } 973 return is_queued; 974 } 975 976 /* Check the incoming sequence no, if it's in the window */ 977 st = rxtid->seq_next; 978 cur = seq_no; 979 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO; 980 981 if (((st < end) && (cur < st || cur > end)) || 982 ((st > end) && (cur > end) && (cur < st))) { 983 extended_end = (end + rxtid->hold_q_sz - 1) & 984 ATH6KL_MAX_SEQ_NO; 985 986 if (((end < extended_end) && 987 (cur < end || cur > extended_end)) || 988 ((end > extended_end) && (cur > extended_end) && 989 (cur < end))) { 990 aggr_deque_frms(agg_info, tid, 0, 0); 991 if (cur >= rxtid->hold_q_sz - 1) 992 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1); 993 else 994 rxtid->seq_next = ATH6KL_MAX_SEQ_NO - 995 (rxtid->hold_q_sz - 2 - cur); 996 } else { 997 /* 998 * Dequeue only those frames that are outside the 999 * new shifted window. 1000 */ 1001 if (cur >= rxtid->hold_q_sz - 1) 1002 st = cur - (rxtid->hold_q_sz - 1); 1003 else 1004 st = ATH6KL_MAX_SEQ_NO - 1005 (rxtid->hold_q_sz - 2 - cur); 1006 1007 aggr_deque_frms(agg_info, tid, st, 0); 1008 } 1009 1010 stats->num_oow++; 1011 } 1012 1013 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz); 1014 1015 node = &rxtid->hold_q[idx]; 1016 1017 spin_lock_bh(&rxtid->lock); 1018 1019 /* 1020 * Is the cur frame duplicate or something beyond our window(hold_q 1021 * -> which is 2x, already)? 1022 * 1023 * 1. Duplicate is easy - drop incoming frame. 1024 * 2. Not falling in current sliding window. 1025 * 2a. is the frame_seq_no preceding current tid_seq_no? 1026 * -> drop the frame. perhaps sender did not get our ACK. 1027 * this is taken care of above. 1028 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ); 1029 * -> Taken care of it above, by moving window forward. 1030 */ 1031 dev_kfree_skb(node->skb); 1032 stats->num_dups++; 1033 1034 node->skb = frame; 1035 is_queued = true; 1036 node->is_amsdu = is_amsdu; 1037 node->seq_no = seq_no; 1038 1039 if (node->is_amsdu) 1040 stats->num_amsdu++; 1041 else 1042 stats->num_mpdu++; 1043 1044 spin_unlock_bh(&rxtid->lock); 1045 1046 aggr_deque_frms(agg_info, tid, 0, 1); 1047 1048 if (agg_info->timer_scheduled) 1049 rxtid->progress = true; 1050 else 1051 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) { 1052 if (rxtid->hold_q[idx].skb) { 1053 /* 1054 * There is a frame in the queue and no 1055 * timer so start a timer to ensure that 1056 * the frame doesn't remain stuck 1057 * forever. 1058 */ 1059 agg_info->timer_scheduled = true; 1060 mod_timer(&agg_info->timer, 1061 (jiffies + 1062 HZ * (AGGR_RX_TIMEOUT) / 1000)); 1063 rxtid->progress = false; 1064 rxtid->timer_mon = true; 1065 break; 1066 } 1067 } 1068 1069 return is_queued; 1070 } 1071 1072 void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) 1073 { 1074 struct ath6kl *ar = target->dev->ar; 1075 struct sk_buff *skb = packet->pkt_cntxt; 1076 struct wmi_rx_meta_v2 *meta; 1077 struct wmi_data_hdr *dhdr; 1078 int min_hdr_len; 1079 u8 meta_type, dot11_hdr = 0; 1080 int status = packet->status; 1081 enum htc_endpoint_id ept = packet->endpoint; 1082 bool is_amsdu, prev_ps, ps_state = false; 1083 struct ath6kl_sta *conn = NULL; 1084 struct sk_buff *skb1 = NULL; 1085 struct ethhdr *datap = NULL; 1086 struct ath6kl_vif *vif; 1087 u16 seq_no, offset; 1088 u8 tid, if_idx; 1089 1090 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, 1091 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d", 1092 __func__, ar, ept, skb, packet->buf, 1093 packet->act_len, status); 1094 1095 if (status || !(skb->data + HTC_HDR_LENGTH)) { 1096 dev_kfree_skb(skb); 1097 return; 1098 } 1099 1100 skb_put(skb, packet->act_len + HTC_HDR_LENGTH); 1101 skb_pull(skb, HTC_HDR_LENGTH); 1102 1103 if (ept == ar->ctrl_ep) { 1104 if_idx = 1105 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data); 1106 } else { 1107 if_idx = 1108 wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data); 1109 } 1110 1111 vif = ath6kl_get_vif_by_index(ar, if_idx); 1112 if (!vif) { 1113 dev_kfree_skb(skb); 1114 return; 1115 } 1116 1117 /* 1118 * Take lock to protect buffer counts and adaptive power throughput 1119 * state. 1120 */ 1121 spin_lock_bh(&vif->if_lock); 1122 1123 vif->net_stats.rx_packets++; 1124 vif->net_stats.rx_bytes += packet->act_len; 1125 1126 spin_unlock_bh(&vif->if_lock); 1127 1128 1129 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ", 1130 skb->data, skb->len); 1131 1132 skb->dev = vif->ndev; 1133 1134 if (!test_bit(WMI_ENABLED, &ar->flag)) { 1135 if (EPPING_ALIGNMENT_PAD > 0) 1136 skb_pull(skb, EPPING_ALIGNMENT_PAD); 1137 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); 1138 return; 1139 } 1140 1141 ath6kl_check_wow_status(ar); 1142 1143 if (ept == ar->ctrl_ep) { 1144 ath6kl_wmi_control_rx(ar->wmi, skb); 1145 return; 1146 } 1147 1148 min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) + 1149 sizeof(struct ath6kl_llc_snap_hdr); 1150 1151 dhdr = (struct wmi_data_hdr *) skb->data; 1152 1153 /* 1154 * In the case of AP mode we may receive NULL data frames 1155 * that do not have LLC hdr. They are 16 bytes in size. 1156 * Allow these frames in the AP mode. 1157 */ 1158 if (vif->nw_type != AP_NETWORK && 1159 ((packet->act_len < min_hdr_len) || 1160 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) { 1161 ath6kl_info("frame len is too short or too long\n"); 1162 vif->net_stats.rx_errors++; 1163 vif->net_stats.rx_length_errors++; 1164 dev_kfree_skb(skb); 1165 return; 1166 } 1167 1168 /* Get the Power save state of the STA */ 1169 if (vif->nw_type == AP_NETWORK) { 1170 meta_type = wmi_data_hdr_get_meta(dhdr); 1171 1172 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) & 1173 WMI_DATA_HDR_PS_MASK); 1174 1175 offset = sizeof(struct wmi_data_hdr); 1176 1177 switch (meta_type) { 1178 case 0: 1179 break; 1180 case WMI_META_VERSION_1: 1181 offset += sizeof(struct wmi_rx_meta_v1); 1182 break; 1183 case WMI_META_VERSION_2: 1184 offset += sizeof(struct wmi_rx_meta_v2); 1185 break; 1186 default: 1187 break; 1188 } 1189 1190 datap = (struct ethhdr *) (skb->data + offset); 1191 conn = ath6kl_find_sta(vif, datap->h_source); 1192 1193 if (!conn) { 1194 dev_kfree_skb(skb); 1195 return; 1196 } 1197 1198 /* 1199 * If there is a change in PS state of the STA, 1200 * take appropriate steps: 1201 * 1202 * 1. If Sleep-->Awake, flush the psq for the STA 1203 * Clear the PVB for the STA. 1204 * 2. If Awake-->Sleep, Starting queueing frames 1205 * the STA. 1206 */ 1207 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP); 1208 1209 if (ps_state) 1210 conn->sta_flags |= STA_PS_SLEEP; 1211 else 1212 conn->sta_flags &= ~STA_PS_SLEEP; 1213 1214 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) { 1215 if (!(conn->sta_flags & STA_PS_SLEEP)) { 1216 struct sk_buff *skbuff = NULL; 1217 1218 spin_lock_bh(&conn->psq_lock); 1219 while ((skbuff = skb_dequeue(&conn->psq)) 1220 != NULL) { 1221 spin_unlock_bh(&conn->psq_lock); 1222 ath6kl_data_tx(skbuff, vif->ndev); 1223 spin_lock_bh(&conn->psq_lock); 1224 } 1225 spin_unlock_bh(&conn->psq_lock); 1226 /* Clear the PVB for this STA */ 1227 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, 1228 conn->aid, 0); 1229 } 1230 } 1231 1232 /* drop NULL data frames here */ 1233 if ((packet->act_len < min_hdr_len) || 1234 (packet->act_len > 1235 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) { 1236 dev_kfree_skb(skb); 1237 return; 1238 } 1239 } 1240 1241 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false; 1242 tid = wmi_data_hdr_get_up(dhdr); 1243 seq_no = wmi_data_hdr_get_seqno(dhdr); 1244 meta_type = wmi_data_hdr_get_meta(dhdr); 1245 dot11_hdr = wmi_data_hdr_get_dot11(dhdr); 1246 skb_pull(skb, sizeof(struct wmi_data_hdr)); 1247 1248 switch (meta_type) { 1249 case WMI_META_VERSION_1: 1250 skb_pull(skb, sizeof(struct wmi_rx_meta_v1)); 1251 break; 1252 case WMI_META_VERSION_2: 1253 meta = (struct wmi_rx_meta_v2 *) skb->data; 1254 if (meta->csum_flags & 0x1) { 1255 skb->ip_summed = CHECKSUM_COMPLETE; 1256 skb->csum = (__force __wsum) meta->csum; 1257 } 1258 skb_pull(skb, sizeof(struct wmi_rx_meta_v2)); 1259 break; 1260 default: 1261 break; 1262 } 1263 1264 if (dot11_hdr) 1265 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb); 1266 else if (!is_amsdu) 1267 status = ath6kl_wmi_dot3_2_dix(skb); 1268 1269 if (status) { 1270 /* 1271 * Drop frames that could not be processed (lack of 1272 * memory, etc.) 1273 */ 1274 dev_kfree_skb(skb); 1275 return; 1276 } 1277 1278 if (!(vif->ndev->flags & IFF_UP)) { 1279 dev_kfree_skb(skb); 1280 return; 1281 } 1282 1283 if (vif->nw_type == AP_NETWORK) { 1284 datap = (struct ethhdr *) skb->data; 1285 if (is_multicast_ether_addr(datap->h_dest)) 1286 /* 1287 * Bcast/Mcast frames should be sent to the 1288 * OS stack as well as on the air. 1289 */ 1290 skb1 = skb_copy(skb, GFP_ATOMIC); 1291 else { 1292 /* 1293 * Search for a connected STA with dstMac 1294 * as the Mac address. If found send the 1295 * frame to it on the air else send the 1296 * frame up the stack. 1297 */ 1298 conn = ath6kl_find_sta(vif, datap->h_dest); 1299 1300 if (conn && ar->intra_bss) { 1301 skb1 = skb; 1302 skb = NULL; 1303 } else if (conn && !ar->intra_bss) { 1304 dev_kfree_skb(skb); 1305 skb = NULL; 1306 } 1307 } 1308 if (skb1) 1309 ath6kl_data_tx(skb1, vif->ndev); 1310 1311 if (skb == NULL) { 1312 /* nothing to deliver up the stack */ 1313 return; 1314 } 1315 } 1316 1317 datap = (struct ethhdr *) skb->data; 1318 1319 if (is_unicast_ether_addr(datap->h_dest) && 1320 aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no, 1321 is_amsdu, skb)) 1322 /* aggregation code will handle the skb */ 1323 return; 1324 1325 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); 1326 } 1327 1328 static void aggr_timeout(unsigned long arg) 1329 { 1330 u8 i, j; 1331 struct aggr_info *p_aggr = (struct aggr_info *) arg; 1332 struct rxtid *rxtid; 1333 struct rxtid_stats *stats; 1334 1335 for (i = 0; i < NUM_OF_TIDS; i++) { 1336 rxtid = &p_aggr->rx_tid[i]; 1337 stats = &p_aggr->stat[i]; 1338 1339 if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress) 1340 continue; 1341 1342 stats->num_timeouts++; 1343 ath6kl_dbg(ATH6KL_DBG_AGGR, 1344 "aggr timeout (st %d end %d)\n", 1345 rxtid->seq_next, 1346 ((rxtid->seq_next + rxtid->hold_q_sz-1) & 1347 ATH6KL_MAX_SEQ_NO)); 1348 aggr_deque_frms(p_aggr, i, 0, 0); 1349 } 1350 1351 p_aggr->timer_scheduled = false; 1352 1353 for (i = 0; i < NUM_OF_TIDS; i++) { 1354 rxtid = &p_aggr->rx_tid[i]; 1355 1356 if (rxtid->aggr && rxtid->hold_q) { 1357 for (j = 0; j < rxtid->hold_q_sz; j++) { 1358 if (rxtid->hold_q[j].skb) { 1359 p_aggr->timer_scheduled = true; 1360 rxtid->timer_mon = true; 1361 rxtid->progress = false; 1362 break; 1363 } 1364 } 1365 1366 if (j >= rxtid->hold_q_sz) 1367 rxtid->timer_mon = false; 1368 } 1369 } 1370 1371 if (p_aggr->timer_scheduled) 1372 mod_timer(&p_aggr->timer, 1373 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT)); 1374 } 1375 1376 static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid) 1377 { 1378 struct rxtid *rxtid; 1379 struct rxtid_stats *stats; 1380 1381 if (!p_aggr || tid >= NUM_OF_TIDS) 1382 return; 1383 1384 rxtid = &p_aggr->rx_tid[tid]; 1385 stats = &p_aggr->stat[tid]; 1386 1387 if (rxtid->aggr) 1388 aggr_deque_frms(p_aggr, tid, 0, 0); 1389 1390 rxtid->aggr = false; 1391 rxtid->progress = false; 1392 rxtid->timer_mon = false; 1393 rxtid->win_sz = 0; 1394 rxtid->seq_next = 0; 1395 rxtid->hold_q_sz = 0; 1396 1397 kfree(rxtid->hold_q); 1398 rxtid->hold_q = NULL; 1399 1400 memset(stats, 0, sizeof(struct rxtid_stats)); 1401 } 1402 1403 void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no, 1404 u8 win_sz) 1405 { 1406 struct aggr_info *p_aggr = vif->aggr_cntxt; 1407 struct rxtid *rxtid; 1408 struct rxtid_stats *stats; 1409 u16 hold_q_size; 1410 1411 if (!p_aggr) 1412 return; 1413 1414 rxtid = &p_aggr->rx_tid[tid]; 1415 stats = &p_aggr->stat[tid]; 1416 1417 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX) 1418 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n", 1419 __func__, win_sz, tid); 1420 1421 if (rxtid->aggr) 1422 aggr_delete_tid_state(p_aggr, tid); 1423 1424 rxtid->seq_next = seq_no; 1425 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q); 1426 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL); 1427 if (!rxtid->hold_q) 1428 return; 1429 1430 rxtid->win_sz = win_sz; 1431 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz); 1432 if (!skb_queue_empty(&rxtid->q)) 1433 return; 1434 1435 rxtid->aggr = true; 1436 } 1437 1438 struct aggr_info *aggr_init(struct net_device *dev) 1439 { 1440 struct aggr_info *p_aggr = NULL; 1441 struct rxtid *rxtid; 1442 u8 i; 1443 1444 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL); 1445 if (!p_aggr) { 1446 ath6kl_err("failed to alloc memory for aggr_node\n"); 1447 return NULL; 1448 } 1449 1450 p_aggr->aggr_sz = AGGR_SZ_DEFAULT; 1451 p_aggr->dev = dev; 1452 init_timer(&p_aggr->timer); 1453 p_aggr->timer.function = aggr_timeout; 1454 p_aggr->timer.data = (unsigned long) p_aggr; 1455 1456 p_aggr->timer_scheduled = false; 1457 skb_queue_head_init(&p_aggr->free_q); 1458 1459 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS); 1460 1461 for (i = 0; i < NUM_OF_TIDS; i++) { 1462 rxtid = &p_aggr->rx_tid[i]; 1463 rxtid->aggr = false; 1464 rxtid->progress = false; 1465 rxtid->timer_mon = false; 1466 skb_queue_head_init(&rxtid->q); 1467 spin_lock_init(&rxtid->lock); 1468 } 1469 1470 return p_aggr; 1471 } 1472 1473 void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid) 1474 { 1475 struct aggr_info *p_aggr = vif->aggr_cntxt; 1476 struct rxtid *rxtid; 1477 1478 if (!p_aggr) 1479 return; 1480 1481 rxtid = &p_aggr->rx_tid[tid]; 1482 1483 if (rxtid->aggr) 1484 aggr_delete_tid_state(p_aggr, tid); 1485 } 1486 1487 void aggr_reset_state(struct aggr_info *aggr_info) 1488 { 1489 u8 tid; 1490 1491 for (tid = 0; tid < NUM_OF_TIDS; tid++) 1492 aggr_delete_tid_state(aggr_info, tid); 1493 } 1494 1495 /* clean up our amsdu buffer list */ 1496 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar) 1497 { 1498 struct htc_packet *packet, *tmp_pkt; 1499 1500 spin_lock_bh(&ar->lock); 1501 if (list_empty(&ar->amsdu_rx_buffer_queue)) { 1502 spin_unlock_bh(&ar->lock); 1503 return; 1504 } 1505 1506 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue, 1507 list) { 1508 list_del(&packet->list); 1509 spin_unlock_bh(&ar->lock); 1510 dev_kfree_skb(packet->pkt_cntxt); 1511 spin_lock_bh(&ar->lock); 1512 } 1513 1514 spin_unlock_bh(&ar->lock); 1515 } 1516 1517 void aggr_module_destroy(struct aggr_info *aggr_info) 1518 { 1519 struct rxtid *rxtid; 1520 u8 i, k; 1521 1522 if (!aggr_info) 1523 return; 1524 1525 if (aggr_info->timer_scheduled) { 1526 del_timer(&aggr_info->timer); 1527 aggr_info->timer_scheduled = false; 1528 } 1529 1530 for (i = 0; i < NUM_OF_TIDS; i++) { 1531 rxtid = &aggr_info->rx_tid[i]; 1532 if (rxtid->hold_q) { 1533 for (k = 0; k < rxtid->hold_q_sz; k++) 1534 dev_kfree_skb(rxtid->hold_q[k].skb); 1535 kfree(rxtid->hold_q); 1536 } 1537 1538 skb_queue_purge(&rxtid->q); 1539 } 1540 1541 skb_queue_purge(&aggr_info->free_q); 1542 kfree(aggr_info); 1543 } 1544