1 /* 2 * Copyright (c) 2004-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include "core.h" 21 #include "debug.h" 22 23 /* 24 * tid - tid_mux0..tid_mux3 25 * aid - tid_mux4..tid_mux7 26 */ 27 #define ATH6KL_TID_MASK 0xf 28 #define ATH6KL_AID_SHIFT 4 29 30 static inline u8 ath6kl_get_tid(u8 tid_mux) 31 { 32 return tid_mux & ATH6KL_TID_MASK; 33 } 34 35 static inline u8 ath6kl_get_aid(u8 tid_mux) 36 { 37 return tid_mux >> ATH6KL_AID_SHIFT; 38 } 39 40 static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev, 41 u32 *map_no) 42 { 43 struct ath6kl *ar = ath6kl_priv(dev); 44 struct ethhdr *eth_hdr; 45 u32 i, ep_map = -1; 46 u8 *datap; 47 48 *map_no = 0; 49 datap = skb->data; 50 eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr)); 51 52 if (is_multicast_ether_addr(eth_hdr->h_dest)) 53 return ENDPOINT_2; 54 55 for (i = 0; i < ar->node_num; i++) { 56 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr, 57 ETH_ALEN) == 0) { 58 *map_no = i + 1; 59 ar->node_map[i].tx_pend++; 60 return ar->node_map[i].ep_id; 61 } 62 63 if ((ep_map == -1) && !ar->node_map[i].tx_pend) 64 ep_map = i; 65 } 66 67 if (ep_map == -1) { 68 ep_map = ar->node_num; 69 ar->node_num++; 70 if (ar->node_num > MAX_NODE_NUM) 71 return ENDPOINT_UNUSED; 72 } 73 74 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN); 75 76 for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) { 77 if (!ar->tx_pending[i]) { 78 ar->node_map[ep_map].ep_id = i; 79 break; 80 } 81 82 /* 83 * No free endpoint is available, start redistribution on 84 * the inuse endpoints. 85 */ 86 if (i == ENDPOINT_5) { 87 ar->node_map[ep_map].ep_id = ar->next_ep_id; 88 ar->next_ep_id++; 89 if (ar->next_ep_id > ENDPOINT_5) 90 ar->next_ep_id = ENDPOINT_2; 91 } 92 } 93 94 *map_no = ep_map + 1; 95 ar->node_map[ep_map].tx_pend++; 96 97 return ar->node_map[ep_map].ep_id; 98 } 99 100 static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn, 101 struct ath6kl_vif *vif, 102 struct sk_buff *skb, 103 u32 *flags) 104 { 105 struct ath6kl *ar = vif->ar; 106 bool is_apsdq_empty = false; 107 struct ethhdr *datap = (struct ethhdr *) skb->data; 108 u8 up = 0, traffic_class, *ip_hdr; 109 u16 ether_type; 110 struct ath6kl_llc_snap_hdr *llc_hdr; 111 112 if (conn->sta_flags & STA_PS_APSD_TRIGGER) { 113 /* 114 * This tx is because of a uAPSD trigger, determine 115 * more and EOSP bit. Set EOSP if queue is empty 116 * or sufficient frames are delivered for this trigger. 117 */ 118 spin_lock_bh(&conn->psq_lock); 119 if (!skb_queue_empty(&conn->apsdq)) 120 *flags |= WMI_DATA_HDR_FLAGS_MORE; 121 else if (conn->sta_flags & STA_PS_APSD_EOSP) 122 *flags |= WMI_DATA_HDR_FLAGS_EOSP; 123 *flags |= WMI_DATA_HDR_FLAGS_UAPSD; 124 spin_unlock_bh(&conn->psq_lock); 125 return false; 126 } else if (!conn->apsd_info) 127 return false; 128 129 if (test_bit(WMM_ENABLED, &vif->flags)) { 130 ether_type = be16_to_cpu(datap->h_proto); 131 if (is_ethertype(ether_type)) { 132 /* packet is in DIX format */ 133 ip_hdr = (u8 *)(datap + 1); 134 } else { 135 /* packet is in 802.3 format */ 136 llc_hdr = (struct ath6kl_llc_snap_hdr *) 137 (datap + 1); 138 ether_type = be16_to_cpu(llc_hdr->eth_type); 139 ip_hdr = (u8 *)(llc_hdr + 1); 140 } 141 142 if (ether_type == IP_ETHERTYPE) 143 up = ath6kl_wmi_determine_user_priority( 144 ip_hdr, 0); 145 } 146 147 traffic_class = ath6kl_wmi_get_traffic_class(up); 148 149 if ((conn->apsd_info & (1 << traffic_class)) == 0) 150 return false; 151 152 /* Queue the frames if the STA is sleeping */ 153 spin_lock_bh(&conn->psq_lock); 154 is_apsdq_empty = skb_queue_empty(&conn->apsdq); 155 skb_queue_tail(&conn->apsdq, skb); 156 spin_unlock_bh(&conn->psq_lock); 157 158 /* 159 * If this is the first pkt getting queued 160 * for this STA, update the PVB for this STA 161 */ 162 if (is_apsdq_empty) { 163 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi, 164 vif->fw_vif_idx, 165 conn->aid, 1, 0); 166 } 167 *flags |= WMI_DATA_HDR_FLAGS_UAPSD; 168 169 return true; 170 } 171 172 static bool ath6kl_process_psq(struct ath6kl_sta *conn, 173 struct ath6kl_vif *vif, 174 struct sk_buff *skb, 175 u32 *flags) 176 { 177 bool is_psq_empty = false; 178 struct ath6kl *ar = vif->ar; 179 180 if (conn->sta_flags & STA_PS_POLLED) { 181 spin_lock_bh(&conn->psq_lock); 182 if (!skb_queue_empty(&conn->psq)) 183 *flags |= WMI_DATA_HDR_FLAGS_MORE; 184 spin_unlock_bh(&conn->psq_lock); 185 return false; 186 } 187 188 /* Queue the frames if the STA is sleeping */ 189 spin_lock_bh(&conn->psq_lock); 190 is_psq_empty = skb_queue_empty(&conn->psq); 191 skb_queue_tail(&conn->psq, skb); 192 spin_unlock_bh(&conn->psq_lock); 193 194 /* 195 * If this is the first pkt getting queued 196 * for this STA, update the PVB for this 197 * STA. 198 */ 199 if (is_psq_empty) 200 ath6kl_wmi_set_pvb_cmd(ar->wmi, 201 vif->fw_vif_idx, 202 conn->aid, 1); 203 return true; 204 } 205 206 static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb, 207 u32 *flags) 208 { 209 struct ethhdr *datap = (struct ethhdr *) skb->data; 210 struct ath6kl_sta *conn = NULL; 211 bool ps_queued = false; 212 struct ath6kl *ar = vif->ar; 213 214 if (is_multicast_ether_addr(datap->h_dest)) { 215 u8 ctr = 0; 216 bool q_mcast = false; 217 218 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { 219 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) { 220 q_mcast = true; 221 break; 222 } 223 } 224 225 if (q_mcast) { 226 /* 227 * If this transmit is not because of a Dtim Expiry 228 * q it. 229 */ 230 if (!test_bit(DTIM_EXPIRED, &vif->flags)) { 231 bool is_mcastq_empty = false; 232 233 spin_lock_bh(&ar->mcastpsq_lock); 234 is_mcastq_empty = 235 skb_queue_empty(&ar->mcastpsq); 236 skb_queue_tail(&ar->mcastpsq, skb); 237 spin_unlock_bh(&ar->mcastpsq_lock); 238 239 /* 240 * If this is the first Mcast pkt getting 241 * queued indicate to the target to set the 242 * BitmapControl LSB of the TIM IE. 243 */ 244 if (is_mcastq_empty) 245 ath6kl_wmi_set_pvb_cmd(ar->wmi, 246 vif->fw_vif_idx, 247 MCAST_AID, 1); 248 249 ps_queued = true; 250 } else { 251 /* 252 * This transmit is because of Dtim expiry. 253 * Determine if MoreData bit has to be set. 254 */ 255 spin_lock_bh(&ar->mcastpsq_lock); 256 if (!skb_queue_empty(&ar->mcastpsq)) 257 *flags |= WMI_DATA_HDR_FLAGS_MORE; 258 spin_unlock_bh(&ar->mcastpsq_lock); 259 } 260 } 261 } else { 262 conn = ath6kl_find_sta(vif, datap->h_dest); 263 if (!conn) { 264 dev_kfree_skb(skb); 265 266 /* Inform the caller that the skb is consumed */ 267 return true; 268 } 269 270 if (conn->sta_flags & STA_PS_SLEEP) { 271 ps_queued = ath6kl_process_uapsdq(conn, 272 vif, skb, flags); 273 if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD)) 274 ps_queued = ath6kl_process_psq(conn, 275 vif, skb, flags); 276 } 277 } 278 return ps_queued; 279 } 280 281 /* Tx functions */ 282 283 int ath6kl_control_tx(void *devt, struct sk_buff *skb, 284 enum htc_endpoint_id eid) 285 { 286 struct ath6kl *ar = devt; 287 int status = 0; 288 struct ath6kl_cookie *cookie = NULL; 289 290 if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) 291 return -EACCES; 292 293 spin_lock_bh(&ar->lock); 294 295 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 296 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__, 297 skb, skb->len, eid); 298 299 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) { 300 /* 301 * Control endpoint is full, don't allocate resources, we 302 * are just going to drop this packet. 303 */ 304 cookie = NULL; 305 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n", 306 skb, skb->len); 307 } else 308 cookie = ath6kl_alloc_cookie(ar); 309 310 if (cookie == NULL) { 311 spin_unlock_bh(&ar->lock); 312 status = -ENOMEM; 313 goto fail_ctrl_tx; 314 } 315 316 ar->tx_pending[eid]++; 317 318 if (eid != ar->ctrl_ep) 319 ar->total_tx_data_pend++; 320 321 spin_unlock_bh(&ar->lock); 322 323 cookie->skb = skb; 324 cookie->map_no = 0; 325 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 326 eid, ATH6KL_CONTROL_PKT_TAG); 327 328 /* 329 * This interface is asynchronous, if there is an error, cleanup 330 * will happen in the TX completion callback. 331 */ 332 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt); 333 334 return 0; 335 336 fail_ctrl_tx: 337 dev_kfree_skb(skb); 338 return status; 339 } 340 341 int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) 342 { 343 struct ath6kl *ar = ath6kl_priv(dev); 344 struct ath6kl_cookie *cookie = NULL; 345 enum htc_endpoint_id eid = ENDPOINT_UNUSED; 346 struct ath6kl_vif *vif = netdev_priv(dev); 347 u32 map_no = 0; 348 u16 htc_tag = ATH6KL_DATA_PKT_TAG; 349 u8 ac = 99 ; /* initialize to unmapped ac */ 350 bool chk_adhoc_ps_mapping = false; 351 int ret; 352 struct wmi_tx_meta_v2 meta_v2; 353 void *meta; 354 u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed; 355 u8 meta_ver = 0; 356 u32 flags = 0; 357 358 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 359 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__, 360 skb, skb->data, skb->len); 361 362 /* If target is not associated */ 363 if (!test_bit(CONNECTED, &vif->flags)) { 364 dev_kfree_skb(skb); 365 return 0; 366 } 367 368 if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON)) { 369 dev_kfree_skb(skb); 370 return 0; 371 } 372 373 if (!test_bit(WMI_READY, &ar->flag)) 374 goto fail_tx; 375 376 /* AP mode Power saving processing */ 377 if (vif->nw_type == AP_NETWORK) { 378 if (ath6kl_powersave_ap(vif, skb, &flags)) 379 return 0; 380 } 381 382 if (test_bit(WMI_ENABLED, &ar->flag)) { 383 if ((dev->features & NETIF_F_IP_CSUM) && 384 (csum == CHECKSUM_PARTIAL)) { 385 csum_start = skb->csum_start - 386 (skb_network_header(skb) - skb->head) + 387 sizeof(struct ath6kl_llc_snap_hdr); 388 csum_dest = skb->csum_offset + csum_start; 389 } 390 391 if (skb_headroom(skb) < dev->needed_headroom) { 392 struct sk_buff *tmp_skb = skb; 393 394 skb = skb_realloc_headroom(skb, dev->needed_headroom); 395 kfree_skb(tmp_skb); 396 if (skb == NULL) { 397 vif->net_stats.tx_dropped++; 398 return 0; 399 } 400 } 401 402 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) { 403 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n"); 404 goto fail_tx; 405 } 406 407 if ((dev->features & NETIF_F_IP_CSUM) && 408 (csum == CHECKSUM_PARTIAL)) { 409 meta_v2.csum_start = csum_start; 410 meta_v2.csum_dest = csum_dest; 411 412 /* instruct target to calculate checksum */ 413 meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD; 414 meta_ver = WMI_META_VERSION_2; 415 meta = &meta_v2; 416 } else { 417 meta_ver = 0; 418 meta = NULL; 419 } 420 421 ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb, 422 DATA_MSGTYPE, flags, 0, 423 meta_ver, 424 meta, vif->fw_vif_idx); 425 426 if (ret) { 427 ath6kl_warn("failed to add wmi data header:%d\n" 428 , ret); 429 goto fail_tx; 430 } 431 432 if ((vif->nw_type == ADHOC_NETWORK) && 433 ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags)) 434 chk_adhoc_ps_mapping = true; 435 else { 436 /* get the stream mapping */ 437 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, 438 vif->fw_vif_idx, skb, 439 0, test_bit(WMM_ENABLED, &vif->flags), &ac); 440 if (ret) 441 goto fail_tx; 442 } 443 } else 444 goto fail_tx; 445 446 spin_lock_bh(&ar->lock); 447 448 if (chk_adhoc_ps_mapping) 449 eid = ath6kl_ibss_map_epid(skb, dev, &map_no); 450 else 451 eid = ar->ac2ep_map[ac]; 452 453 if (eid == 0 || eid == ENDPOINT_UNUSED) { 454 ath6kl_err("eid %d is not mapped!\n", eid); 455 spin_unlock_bh(&ar->lock); 456 goto fail_tx; 457 } 458 459 /* allocate resource for this packet */ 460 cookie = ath6kl_alloc_cookie(ar); 461 462 if (!cookie) { 463 spin_unlock_bh(&ar->lock); 464 goto fail_tx; 465 } 466 467 /* update counts while the lock is held */ 468 ar->tx_pending[eid]++; 469 ar->total_tx_data_pend++; 470 471 spin_unlock_bh(&ar->lock); 472 473 if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) && 474 skb_cloned(skb)) { 475 /* 476 * We will touch (move the buffer data to align it. Since the 477 * skb buffer is cloned and not only the header is changed, we 478 * have to copy it to allow the changes. Since we are copying 479 * the data here, we may as well align it by reserving suitable 480 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align(). 481 */ 482 struct sk_buff *nskb; 483 484 nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC); 485 if (nskb == NULL) 486 goto fail_tx; 487 kfree_skb(skb); 488 skb = nskb; 489 } 490 491 cookie->skb = skb; 492 cookie->map_no = map_no; 493 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 494 eid, htc_tag); 495 496 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ", 497 skb->data, skb->len); 498 499 /* 500 * HTC interface is asynchronous, if this fails, cleanup will 501 * happen in the ath6kl_tx_complete callback. 502 */ 503 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt); 504 505 return 0; 506 507 fail_tx: 508 dev_kfree_skb(skb); 509 510 vif->net_stats.tx_dropped++; 511 vif->net_stats.tx_aborted_errors++; 512 513 return 0; 514 } 515 516 /* indicate tx activity or inactivity on a WMI stream */ 517 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active) 518 { 519 struct ath6kl *ar = devt; 520 enum htc_endpoint_id eid; 521 int i; 522 523 eid = ar->ac2ep_map[traffic_class]; 524 525 if (!test_bit(WMI_ENABLED, &ar->flag)) 526 goto notify_htc; 527 528 spin_lock_bh(&ar->lock); 529 530 ar->ac_stream_active[traffic_class] = active; 531 532 if (active) { 533 /* 534 * Keep track of the active stream with the highest 535 * priority. 536 */ 537 if (ar->ac_stream_pri_map[traffic_class] > 538 ar->hiac_stream_active_pri) 539 /* set the new highest active priority */ 540 ar->hiac_stream_active_pri = 541 ar->ac_stream_pri_map[traffic_class]; 542 543 } else { 544 /* 545 * We may have to search for the next active stream 546 * that is the highest priority. 547 */ 548 if (ar->hiac_stream_active_pri == 549 ar->ac_stream_pri_map[traffic_class]) { 550 /* 551 * The highest priority stream just went inactive 552 * reset and search for the "next" highest "active" 553 * priority stream. 554 */ 555 ar->hiac_stream_active_pri = 0; 556 557 for (i = 0; i < WMM_NUM_AC; i++) { 558 if (ar->ac_stream_active[i] && 559 (ar->ac_stream_pri_map[i] > 560 ar->hiac_stream_active_pri)) 561 /* 562 * Set the new highest active 563 * priority. 564 */ 565 ar->hiac_stream_active_pri = 566 ar->ac_stream_pri_map[i]; 567 } 568 } 569 } 570 571 spin_unlock_bh(&ar->lock); 572 573 notify_htc: 574 /* notify HTC, this may cause credit distribution changes */ 575 ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active); 576 } 577 578 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, 579 struct htc_packet *packet) 580 { 581 struct ath6kl *ar = target->dev->ar; 582 struct ath6kl_vif *vif; 583 enum htc_endpoint_id endpoint = packet->endpoint; 584 enum htc_send_full_action action = HTC_SEND_FULL_KEEP; 585 586 if (endpoint == ar->ctrl_ep) { 587 /* 588 * Under normal WMI if this is getting full, then something 589 * is running rampant the host should not be exhausting the 590 * WMI queue with too many commands the only exception to 591 * this is during testing using endpointping. 592 */ 593 set_bit(WMI_CTRL_EP_FULL, &ar->flag); 594 ath6kl_err("wmi ctrl ep is full\n"); 595 return action; 596 } 597 598 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG) 599 return action; 600 601 /* 602 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for 603 * the highest active stream. 604 */ 605 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] < 606 ar->hiac_stream_active_pri && 607 ar->cookie_count <= 608 target->endpoint[endpoint].tx_drop_packet_threshold) 609 /* 610 * Give preference to the highest priority stream by 611 * dropping the packets which overflowed. 612 */ 613 action = HTC_SEND_FULL_DROP; 614 615 /* FIXME: Locking */ 616 spin_lock_bh(&ar->list_lock); 617 list_for_each_entry(vif, &ar->vif_list, list) { 618 if (vif->nw_type == ADHOC_NETWORK || 619 action != HTC_SEND_FULL_DROP) { 620 spin_unlock_bh(&ar->list_lock); 621 622 set_bit(NETQ_STOPPED, &vif->flags); 623 netif_stop_queue(vif->ndev); 624 625 return action; 626 } 627 } 628 spin_unlock_bh(&ar->list_lock); 629 630 return action; 631 } 632 633 /* TODO this needs to be looked at */ 634 static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif, 635 enum htc_endpoint_id eid, u32 map_no) 636 { 637 struct ath6kl *ar = vif->ar; 638 u32 i; 639 640 if (vif->nw_type != ADHOC_NETWORK) 641 return; 642 643 if (!ar->ibss_ps_enable) 644 return; 645 646 if (eid == ar->ctrl_ep) 647 return; 648 649 if (map_no == 0) 650 return; 651 652 map_no--; 653 ar->node_map[map_no].tx_pend--; 654 655 if (ar->node_map[map_no].tx_pend) 656 return; 657 658 if (map_no != (ar->node_num - 1)) 659 return; 660 661 for (i = ar->node_num; i > 0; i--) { 662 if (ar->node_map[i - 1].tx_pend) 663 break; 664 665 memset(&ar->node_map[i - 1], 0, 666 sizeof(struct ath6kl_node_mapping)); 667 ar->node_num--; 668 } 669 } 670 671 void ath6kl_tx_complete(void *context, struct list_head *packet_queue) 672 { 673 struct ath6kl *ar = context; 674 struct sk_buff_head skb_queue; 675 struct htc_packet *packet; 676 struct sk_buff *skb; 677 struct ath6kl_cookie *ath6kl_cookie; 678 u32 map_no = 0; 679 int status; 680 enum htc_endpoint_id eid; 681 bool wake_event = false; 682 bool flushing[ATH6KL_VIF_MAX] = {false}; 683 u8 if_idx; 684 struct ath6kl_vif *vif; 685 686 skb_queue_head_init(&skb_queue); 687 688 /* lock the driver as we update internal state */ 689 spin_lock_bh(&ar->lock); 690 691 /* reap completed packets */ 692 while (!list_empty(packet_queue)) { 693 694 packet = list_first_entry(packet_queue, struct htc_packet, 695 list); 696 list_del(&packet->list); 697 698 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt; 699 if (!ath6kl_cookie) 700 goto fatal; 701 702 status = packet->status; 703 skb = ath6kl_cookie->skb; 704 eid = packet->endpoint; 705 map_no = ath6kl_cookie->map_no; 706 707 if (!skb || !skb->data) 708 goto fatal; 709 710 __skb_queue_tail(&skb_queue, skb); 711 712 if (!status && (packet->act_len != skb->len)) 713 goto fatal; 714 715 ar->tx_pending[eid]--; 716 717 if (eid != ar->ctrl_ep) 718 ar->total_tx_data_pend--; 719 720 if (eid == ar->ctrl_ep) { 721 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag)) 722 clear_bit(WMI_CTRL_EP_FULL, &ar->flag); 723 724 if (ar->tx_pending[eid] == 0) 725 wake_event = true; 726 } 727 728 if (eid == ar->ctrl_ep) { 729 if_idx = wmi_cmd_hdr_get_if_idx( 730 (struct wmi_cmd_hdr *) packet->buf); 731 } else { 732 if_idx = wmi_data_hdr_get_if_idx( 733 (struct wmi_data_hdr *) packet->buf); 734 } 735 736 vif = ath6kl_get_vif_by_index(ar, if_idx); 737 if (!vif) { 738 ath6kl_free_cookie(ar, ath6kl_cookie); 739 continue; 740 } 741 742 if (status) { 743 if (status == -ECANCELED) 744 /* a packet was flushed */ 745 flushing[if_idx] = true; 746 747 vif->net_stats.tx_errors++; 748 749 if (status != -ENOSPC && status != -ECANCELED) 750 ath6kl_warn("tx complete error: %d\n", status); 751 752 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 753 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", 754 __func__, skb, packet->buf, packet->act_len, 755 eid, "error!"); 756 } else { 757 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 758 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", 759 __func__, skb, packet->buf, packet->act_len, 760 eid, "OK"); 761 762 flushing[if_idx] = false; 763 vif->net_stats.tx_packets++; 764 vif->net_stats.tx_bytes += skb->len; 765 } 766 767 ath6kl_tx_clear_node_map(vif, eid, map_no); 768 769 ath6kl_free_cookie(ar, ath6kl_cookie); 770 771 if (test_bit(NETQ_STOPPED, &vif->flags)) 772 clear_bit(NETQ_STOPPED, &vif->flags); 773 } 774 775 spin_unlock_bh(&ar->lock); 776 777 __skb_queue_purge(&skb_queue); 778 779 /* FIXME: Locking */ 780 spin_lock_bh(&ar->list_lock); 781 list_for_each_entry(vif, &ar->vif_list, list) { 782 if (test_bit(CONNECTED, &vif->flags) && 783 !flushing[vif->fw_vif_idx]) { 784 spin_unlock_bh(&ar->list_lock); 785 netif_wake_queue(vif->ndev); 786 spin_lock_bh(&ar->list_lock); 787 } 788 } 789 spin_unlock_bh(&ar->list_lock); 790 791 if (wake_event) 792 wake_up(&ar->event_wq); 793 794 return; 795 796 fatal: 797 WARN_ON(1); 798 spin_unlock_bh(&ar->lock); 799 return; 800 } 801 802 void ath6kl_tx_data_cleanup(struct ath6kl *ar) 803 { 804 int i; 805 806 /* flush all the data (non-control) streams */ 807 for (i = 0; i < WMM_NUM_AC; i++) 808 ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i], 809 ATH6KL_DATA_PKT_TAG); 810 } 811 812 /* Rx functions */ 813 814 static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev, 815 struct sk_buff *skb) 816 { 817 if (!skb) 818 return; 819 820 skb->dev = dev; 821 822 if (!(skb->dev->flags & IFF_UP)) { 823 dev_kfree_skb(skb); 824 return; 825 } 826 827 skb->protocol = eth_type_trans(skb, skb->dev); 828 829 netif_rx_ni(skb); 830 } 831 832 static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num) 833 { 834 struct sk_buff *skb; 835 836 while (num) { 837 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE); 838 if (!skb) { 839 ath6kl_err("netbuf allocation failed\n"); 840 return; 841 } 842 skb_queue_tail(q, skb); 843 num--; 844 } 845 } 846 847 static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr) 848 { 849 struct sk_buff *skb = NULL; 850 851 if (skb_queue_len(&p_aggr->rx_amsdu_freeq) < 852 (AGGR_NUM_OF_FREE_NETBUFS >> 2)) 853 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, 854 AGGR_NUM_OF_FREE_NETBUFS); 855 856 skb = skb_dequeue(&p_aggr->rx_amsdu_freeq); 857 858 return skb; 859 } 860 861 void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint) 862 { 863 struct ath6kl *ar = target->dev->ar; 864 struct sk_buff *skb; 865 int rx_buf; 866 int n_buf_refill; 867 struct htc_packet *packet; 868 struct list_head queue; 869 870 n_buf_refill = ATH6KL_MAX_RX_BUFFERS - 871 ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint); 872 873 if (n_buf_refill <= 0) 874 return; 875 876 INIT_LIST_HEAD(&queue); 877 878 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, 879 "%s: providing htc with %d buffers at eid=%d\n", 880 __func__, n_buf_refill, endpoint); 881 882 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) { 883 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE); 884 if (!skb) 885 break; 886 887 packet = (struct htc_packet *) skb->head; 888 if (!IS_ALIGNED((unsigned long) skb->data, 4)) 889 skb->data = PTR_ALIGN(skb->data - 4, 4); 890 set_htc_rxpkt_info(packet, skb, skb->data, 891 ATH6KL_BUFFER_SIZE, endpoint); 892 list_add_tail(&packet->list, &queue); 893 } 894 895 if (!list_empty(&queue)) 896 ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue); 897 } 898 899 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count) 900 { 901 struct htc_packet *packet; 902 struct sk_buff *skb; 903 904 while (count) { 905 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE); 906 if (!skb) 907 return; 908 909 packet = (struct htc_packet *) skb->head; 910 if (!IS_ALIGNED((unsigned long) skb->data, 4)) 911 skb->data = PTR_ALIGN(skb->data - 4, 4); 912 set_htc_rxpkt_info(packet, skb, skb->data, 913 ATH6KL_AMSDU_BUFFER_SIZE, 0); 914 spin_lock_bh(&ar->lock); 915 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue); 916 spin_unlock_bh(&ar->lock); 917 count--; 918 } 919 } 920 921 /* 922 * Callback to allocate a receive buffer for a pending packet. We use a 923 * pre-allocated list of buffers of maximum AMSDU size (4K). 924 */ 925 struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target, 926 enum htc_endpoint_id endpoint, 927 int len) 928 { 929 struct ath6kl *ar = target->dev->ar; 930 struct htc_packet *packet = NULL; 931 struct list_head *pkt_pos; 932 int refill_cnt = 0, depth = 0; 933 934 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n", 935 __func__, endpoint, len); 936 937 if ((len <= ATH6KL_BUFFER_SIZE) || 938 (len > ATH6KL_AMSDU_BUFFER_SIZE)) 939 return NULL; 940 941 spin_lock_bh(&ar->lock); 942 943 if (list_empty(&ar->amsdu_rx_buffer_queue)) { 944 spin_unlock_bh(&ar->lock); 945 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS; 946 goto refill_buf; 947 } 948 949 packet = list_first_entry(&ar->amsdu_rx_buffer_queue, 950 struct htc_packet, list); 951 list_del(&packet->list); 952 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue) 953 depth++; 954 955 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth; 956 spin_unlock_bh(&ar->lock); 957 958 /* set actual endpoint ID */ 959 packet->endpoint = endpoint; 960 961 refill_buf: 962 if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD) 963 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt); 964 965 return packet; 966 } 967 968 static void aggr_slice_amsdu(struct aggr_info *p_aggr, 969 struct rxtid *rxtid, struct sk_buff *skb) 970 { 971 struct sk_buff *new_skb; 972 struct ethhdr *hdr; 973 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len; 974 u8 *framep; 975 976 mac_hdr_len = sizeof(struct ethhdr); 977 framep = skb->data + mac_hdr_len; 978 amsdu_len = skb->len - mac_hdr_len; 979 980 while (amsdu_len > mac_hdr_len) { 981 hdr = (struct ethhdr *) framep; 982 payload_8023_len = ntohs(hdr->h_proto); 983 984 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN || 985 payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) { 986 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n", 987 payload_8023_len); 988 break; 989 } 990 991 frame_8023_len = payload_8023_len + mac_hdr_len; 992 new_skb = aggr_get_free_skb(p_aggr); 993 if (!new_skb) { 994 ath6kl_err("no buffer available\n"); 995 break; 996 } 997 998 memcpy(new_skb->data, framep, frame_8023_len); 999 skb_put(new_skb, frame_8023_len); 1000 if (ath6kl_wmi_dot3_2_dix(new_skb)) { 1001 ath6kl_err("dot3_2_dix error\n"); 1002 dev_kfree_skb(new_skb); 1003 break; 1004 } 1005 1006 skb_queue_tail(&rxtid->q, new_skb); 1007 1008 /* Is this the last subframe within this aggregate ? */ 1009 if ((amsdu_len - frame_8023_len) == 0) 1010 break; 1011 1012 /* Add the length of A-MSDU subframe padding bytes - 1013 * Round to nearest word. 1014 */ 1015 frame_8023_len = ALIGN(frame_8023_len, 4); 1016 1017 framep += frame_8023_len; 1018 amsdu_len -= frame_8023_len; 1019 } 1020 1021 dev_kfree_skb(skb); 1022 } 1023 1024 static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid, 1025 u16 seq_no, u8 order) 1026 { 1027 struct sk_buff *skb; 1028 struct rxtid *rxtid; 1029 struct skb_hold_q *node; 1030 u16 idx, idx_end, seq_end; 1031 struct rxtid_stats *stats; 1032 1033 rxtid = &agg_conn->rx_tid[tid]; 1034 stats = &agg_conn->stat[tid]; 1035 1036 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 1037 1038 /* 1039 * idx_end is typically the last possible frame in the window, 1040 * but changes to 'the' seq_no, when BAR comes. If seq_no 1041 * is non-zero, we will go up to that and stop. 1042 * Note: last seq no in current window will occupy the same 1043 * index position as index that is just previous to start. 1044 * An imp point : if win_sz is 7, for seq_no space of 4095, 1045 * then, there would be holes when sequence wrap around occurs. 1046 * Target should judiciously choose the win_sz, based on 1047 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz 1048 * 2, 4, 8, 16 win_sz works fine). 1049 * We must deque from "idx" to "idx_end", including both. 1050 */ 1051 seq_end = seq_no ? seq_no : rxtid->seq_next; 1052 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz); 1053 1054 spin_lock_bh(&rxtid->lock); 1055 1056 do { 1057 node = &rxtid->hold_q[idx]; 1058 if ((order == 1) && (!node->skb)) 1059 break; 1060 1061 if (node->skb) { 1062 if (node->is_amsdu) 1063 aggr_slice_amsdu(agg_conn->aggr_info, rxtid, 1064 node->skb); 1065 else 1066 skb_queue_tail(&rxtid->q, node->skb); 1067 node->skb = NULL; 1068 } else 1069 stats->num_hole++; 1070 1071 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next); 1072 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 1073 } while (idx != idx_end); 1074 1075 spin_unlock_bh(&rxtid->lock); 1076 1077 stats->num_delivered += skb_queue_len(&rxtid->q); 1078 1079 while ((skb = skb_dequeue(&rxtid->q))) 1080 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb); 1081 } 1082 1083 static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid, 1084 u16 seq_no, 1085 bool is_amsdu, struct sk_buff *frame) 1086 { 1087 struct rxtid *rxtid; 1088 struct rxtid_stats *stats; 1089 struct sk_buff *skb; 1090 struct skb_hold_q *node; 1091 u16 idx, st, cur, end; 1092 bool is_queued = false; 1093 u16 extended_end; 1094 1095 rxtid = &agg_conn->rx_tid[tid]; 1096 stats = &agg_conn->stat[tid]; 1097 1098 stats->num_into_aggr++; 1099 1100 if (!rxtid->aggr) { 1101 if (is_amsdu) { 1102 aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame); 1103 is_queued = true; 1104 stats->num_amsdu++; 1105 while ((skb = skb_dequeue(&rxtid->q))) 1106 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, 1107 skb); 1108 } 1109 return is_queued; 1110 } 1111 1112 /* Check the incoming sequence no, if it's in the window */ 1113 st = rxtid->seq_next; 1114 cur = seq_no; 1115 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO; 1116 1117 if (((st < end) && (cur < st || cur > end)) || 1118 ((st > end) && (cur > end) && (cur < st))) { 1119 extended_end = (end + rxtid->hold_q_sz - 1) & 1120 ATH6KL_MAX_SEQ_NO; 1121 1122 if (((end < extended_end) && 1123 (cur < end || cur > extended_end)) || 1124 ((end > extended_end) && (cur > extended_end) && 1125 (cur < end))) { 1126 aggr_deque_frms(agg_conn, tid, 0, 0); 1127 if (cur >= rxtid->hold_q_sz - 1) 1128 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1); 1129 else 1130 rxtid->seq_next = ATH6KL_MAX_SEQ_NO - 1131 (rxtid->hold_q_sz - 2 - cur); 1132 } else { 1133 /* 1134 * Dequeue only those frames that are outside the 1135 * new shifted window. 1136 */ 1137 if (cur >= rxtid->hold_q_sz - 1) 1138 st = cur - (rxtid->hold_q_sz - 1); 1139 else 1140 st = ATH6KL_MAX_SEQ_NO - 1141 (rxtid->hold_q_sz - 2 - cur); 1142 1143 aggr_deque_frms(agg_conn, tid, st, 0); 1144 } 1145 1146 stats->num_oow++; 1147 } 1148 1149 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz); 1150 1151 node = &rxtid->hold_q[idx]; 1152 1153 spin_lock_bh(&rxtid->lock); 1154 1155 /* 1156 * Is the cur frame duplicate or something beyond our window(hold_q 1157 * -> which is 2x, already)? 1158 * 1159 * 1. Duplicate is easy - drop incoming frame. 1160 * 2. Not falling in current sliding window. 1161 * 2a. is the frame_seq_no preceding current tid_seq_no? 1162 * -> drop the frame. perhaps sender did not get our ACK. 1163 * this is taken care of above. 1164 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ); 1165 * -> Taken care of it above, by moving window forward. 1166 */ 1167 dev_kfree_skb(node->skb); 1168 stats->num_dups++; 1169 1170 node->skb = frame; 1171 is_queued = true; 1172 node->is_amsdu = is_amsdu; 1173 node->seq_no = seq_no; 1174 1175 if (node->is_amsdu) 1176 stats->num_amsdu++; 1177 else 1178 stats->num_mpdu++; 1179 1180 spin_unlock_bh(&rxtid->lock); 1181 1182 aggr_deque_frms(agg_conn, tid, 0, 1); 1183 1184 if (agg_conn->timer_scheduled) 1185 rxtid->progress = true; 1186 else 1187 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) { 1188 if (rxtid->hold_q[idx].skb) { 1189 /* 1190 * There is a frame in the queue and no 1191 * timer so start a timer to ensure that 1192 * the frame doesn't remain stuck 1193 * forever. 1194 */ 1195 agg_conn->timer_scheduled = true; 1196 mod_timer(&agg_conn->timer, 1197 (jiffies + 1198 HZ * (AGGR_RX_TIMEOUT) / 1000)); 1199 rxtid->progress = false; 1200 rxtid->timer_mon = true; 1201 break; 1202 } 1203 } 1204 1205 return is_queued; 1206 } 1207 1208 static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif, 1209 struct ath6kl_sta *conn) 1210 { 1211 struct ath6kl *ar = vif->ar; 1212 bool is_apsdq_empty, is_apsdq_empty_at_start; 1213 u32 num_frames_to_deliver, flags; 1214 struct sk_buff *skb = NULL; 1215 1216 /* 1217 * If the APSD q for this STA is not empty, dequeue and 1218 * send a pkt from the head of the q. Also update the 1219 * More data bit in the WMI_DATA_HDR if there are 1220 * more pkts for this STA in the APSD q. 1221 * If there are no more pkts for this STA, 1222 * update the APSD bitmap for this STA. 1223 */ 1224 1225 num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) & 1226 ATH6KL_APSD_FRAME_MASK; 1227 /* 1228 * Number of frames to send in a service period is 1229 * indicated by the station 1230 * in the QOS_INFO of the association request 1231 * If it is zero, send all frames 1232 */ 1233 if (!num_frames_to_deliver) 1234 num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME; 1235 1236 spin_lock_bh(&conn->psq_lock); 1237 is_apsdq_empty = skb_queue_empty(&conn->apsdq); 1238 spin_unlock_bh(&conn->psq_lock); 1239 is_apsdq_empty_at_start = is_apsdq_empty; 1240 1241 while ((!is_apsdq_empty) && (num_frames_to_deliver)) { 1242 1243 spin_lock_bh(&conn->psq_lock); 1244 skb = skb_dequeue(&conn->apsdq); 1245 is_apsdq_empty = skb_queue_empty(&conn->apsdq); 1246 spin_unlock_bh(&conn->psq_lock); 1247 1248 /* 1249 * Set the STA flag to Trigger delivery, 1250 * so that the frame will go out 1251 */ 1252 conn->sta_flags |= STA_PS_APSD_TRIGGER; 1253 num_frames_to_deliver--; 1254 1255 /* Last frame in the service period, set EOSP or queue empty */ 1256 if ((is_apsdq_empty) || (!num_frames_to_deliver)) 1257 conn->sta_flags |= STA_PS_APSD_EOSP; 1258 1259 ath6kl_data_tx(skb, vif->ndev); 1260 conn->sta_flags &= ~(STA_PS_APSD_TRIGGER); 1261 conn->sta_flags &= ~(STA_PS_APSD_EOSP); 1262 } 1263 1264 if (is_apsdq_empty) { 1265 if (is_apsdq_empty_at_start) 1266 flags = WMI_AP_APSD_NO_DELIVERY_FRAMES; 1267 else 1268 flags = 0; 1269 1270 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi, 1271 vif->fw_vif_idx, 1272 conn->aid, 0, flags); 1273 } 1274 1275 return; 1276 } 1277 1278 void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) 1279 { 1280 struct ath6kl *ar = target->dev->ar; 1281 struct sk_buff *skb = packet->pkt_cntxt; 1282 struct wmi_rx_meta_v2 *meta; 1283 struct wmi_data_hdr *dhdr; 1284 int min_hdr_len; 1285 u8 meta_type, dot11_hdr = 0; 1286 int status = packet->status; 1287 enum htc_endpoint_id ept = packet->endpoint; 1288 bool is_amsdu, prev_ps, ps_state = false; 1289 bool trig_state = false; 1290 struct ath6kl_sta *conn = NULL; 1291 struct sk_buff *skb1 = NULL; 1292 struct ethhdr *datap = NULL; 1293 struct ath6kl_vif *vif; 1294 struct aggr_info_conn *aggr_conn; 1295 u16 seq_no, offset; 1296 u8 tid, if_idx; 1297 1298 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, 1299 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d", 1300 __func__, ar, ept, skb, packet->buf, 1301 packet->act_len, status); 1302 1303 if (status || !(skb->data + HTC_HDR_LENGTH)) { 1304 dev_kfree_skb(skb); 1305 return; 1306 } 1307 1308 skb_put(skb, packet->act_len + HTC_HDR_LENGTH); 1309 skb_pull(skb, HTC_HDR_LENGTH); 1310 1311 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ", 1312 skb->data, skb->len); 1313 1314 if (ept == ar->ctrl_ep) { 1315 if (test_bit(WMI_ENABLED, &ar->flag)) { 1316 ath6kl_check_wow_status(ar); 1317 ath6kl_wmi_control_rx(ar->wmi, skb); 1318 return; 1319 } 1320 if_idx = 1321 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data); 1322 } else { 1323 if_idx = 1324 wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data); 1325 } 1326 1327 vif = ath6kl_get_vif_by_index(ar, if_idx); 1328 if (!vif) { 1329 dev_kfree_skb(skb); 1330 return; 1331 } 1332 1333 /* 1334 * Take lock to protect buffer counts and adaptive power throughput 1335 * state. 1336 */ 1337 spin_lock_bh(&vif->if_lock); 1338 1339 vif->net_stats.rx_packets++; 1340 vif->net_stats.rx_bytes += packet->act_len; 1341 1342 spin_unlock_bh(&vif->if_lock); 1343 1344 skb->dev = vif->ndev; 1345 1346 if (!test_bit(WMI_ENABLED, &ar->flag)) { 1347 if (EPPING_ALIGNMENT_PAD > 0) 1348 skb_pull(skb, EPPING_ALIGNMENT_PAD); 1349 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); 1350 return; 1351 } 1352 1353 ath6kl_check_wow_status(ar); 1354 1355 min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) + 1356 sizeof(struct ath6kl_llc_snap_hdr); 1357 1358 dhdr = (struct wmi_data_hdr *) skb->data; 1359 1360 /* 1361 * In the case of AP mode we may receive NULL data frames 1362 * that do not have LLC hdr. They are 16 bytes in size. 1363 * Allow these frames in the AP mode. 1364 */ 1365 if (vif->nw_type != AP_NETWORK && 1366 ((packet->act_len < min_hdr_len) || 1367 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) { 1368 ath6kl_info("frame len is too short or too long\n"); 1369 vif->net_stats.rx_errors++; 1370 vif->net_stats.rx_length_errors++; 1371 dev_kfree_skb(skb); 1372 return; 1373 } 1374 1375 /* Get the Power save state of the STA */ 1376 if (vif->nw_type == AP_NETWORK) { 1377 meta_type = wmi_data_hdr_get_meta(dhdr); 1378 1379 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) & 1380 WMI_DATA_HDR_PS_MASK); 1381 1382 offset = sizeof(struct wmi_data_hdr); 1383 trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG); 1384 1385 switch (meta_type) { 1386 case 0: 1387 break; 1388 case WMI_META_VERSION_1: 1389 offset += sizeof(struct wmi_rx_meta_v1); 1390 break; 1391 case WMI_META_VERSION_2: 1392 offset += sizeof(struct wmi_rx_meta_v2); 1393 break; 1394 default: 1395 break; 1396 } 1397 1398 datap = (struct ethhdr *) (skb->data + offset); 1399 conn = ath6kl_find_sta(vif, datap->h_source); 1400 1401 if (!conn) { 1402 dev_kfree_skb(skb); 1403 return; 1404 } 1405 1406 /* 1407 * If there is a change in PS state of the STA, 1408 * take appropriate steps: 1409 * 1410 * 1. If Sleep-->Awake, flush the psq for the STA 1411 * Clear the PVB for the STA. 1412 * 2. If Awake-->Sleep, Starting queueing frames 1413 * the STA. 1414 */ 1415 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP); 1416 1417 if (ps_state) 1418 conn->sta_flags |= STA_PS_SLEEP; 1419 else 1420 conn->sta_flags &= ~STA_PS_SLEEP; 1421 1422 /* Accept trigger only when the station is in sleep */ 1423 if ((conn->sta_flags & STA_PS_SLEEP) && trig_state) 1424 ath6kl_uapsd_trigger_frame_rx(vif, conn); 1425 1426 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) { 1427 if (!(conn->sta_flags & STA_PS_SLEEP)) { 1428 struct sk_buff *skbuff = NULL; 1429 bool is_apsdq_empty; 1430 struct ath6kl_mgmt_buff *mgmt; 1431 u8 idx; 1432 1433 spin_lock_bh(&conn->psq_lock); 1434 while (conn->mgmt_psq_len > 0) { 1435 mgmt = list_first_entry( 1436 &conn->mgmt_psq, 1437 struct ath6kl_mgmt_buff, 1438 list); 1439 list_del(&mgmt->list); 1440 conn->mgmt_psq_len--; 1441 spin_unlock_bh(&conn->psq_lock); 1442 idx = vif->fw_vif_idx; 1443 1444 ath6kl_wmi_send_mgmt_cmd(ar->wmi, 1445 idx, 1446 mgmt->id, 1447 mgmt->freq, 1448 mgmt->wait, 1449 mgmt->buf, 1450 mgmt->len, 1451 mgmt->no_cck); 1452 1453 kfree(mgmt); 1454 spin_lock_bh(&conn->psq_lock); 1455 } 1456 conn->mgmt_psq_len = 0; 1457 while ((skbuff = skb_dequeue(&conn->psq))) { 1458 spin_unlock_bh(&conn->psq_lock); 1459 ath6kl_data_tx(skbuff, vif->ndev); 1460 spin_lock_bh(&conn->psq_lock); 1461 } 1462 1463 is_apsdq_empty = skb_queue_empty(&conn->apsdq); 1464 while ((skbuff = skb_dequeue(&conn->apsdq))) { 1465 spin_unlock_bh(&conn->psq_lock); 1466 ath6kl_data_tx(skbuff, vif->ndev); 1467 spin_lock_bh(&conn->psq_lock); 1468 } 1469 spin_unlock_bh(&conn->psq_lock); 1470 1471 if (!is_apsdq_empty) 1472 ath6kl_wmi_set_apsd_bfrd_traf( 1473 ar->wmi, 1474 vif->fw_vif_idx, 1475 conn->aid, 0, 0); 1476 1477 /* Clear the PVB for this STA */ 1478 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, 1479 conn->aid, 0); 1480 } 1481 } 1482 1483 /* drop NULL data frames here */ 1484 if ((packet->act_len < min_hdr_len) || 1485 (packet->act_len > 1486 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) { 1487 dev_kfree_skb(skb); 1488 return; 1489 } 1490 } 1491 1492 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false; 1493 tid = wmi_data_hdr_get_up(dhdr); 1494 seq_no = wmi_data_hdr_get_seqno(dhdr); 1495 meta_type = wmi_data_hdr_get_meta(dhdr); 1496 dot11_hdr = wmi_data_hdr_get_dot11(dhdr); 1497 skb_pull(skb, sizeof(struct wmi_data_hdr)); 1498 1499 switch (meta_type) { 1500 case WMI_META_VERSION_1: 1501 skb_pull(skb, sizeof(struct wmi_rx_meta_v1)); 1502 break; 1503 case WMI_META_VERSION_2: 1504 meta = (struct wmi_rx_meta_v2 *) skb->data; 1505 if (meta->csum_flags & 0x1) { 1506 skb->ip_summed = CHECKSUM_COMPLETE; 1507 skb->csum = (__force __wsum) meta->csum; 1508 } 1509 skb_pull(skb, sizeof(struct wmi_rx_meta_v2)); 1510 break; 1511 default: 1512 break; 1513 } 1514 1515 if (dot11_hdr) 1516 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb); 1517 else if (!is_amsdu) 1518 status = ath6kl_wmi_dot3_2_dix(skb); 1519 1520 if (status) { 1521 /* 1522 * Drop frames that could not be processed (lack of 1523 * memory, etc.) 1524 */ 1525 dev_kfree_skb(skb); 1526 return; 1527 } 1528 1529 if (!(vif->ndev->flags & IFF_UP)) { 1530 dev_kfree_skb(skb); 1531 return; 1532 } 1533 1534 if (vif->nw_type == AP_NETWORK) { 1535 datap = (struct ethhdr *) skb->data; 1536 if (is_multicast_ether_addr(datap->h_dest)) 1537 /* 1538 * Bcast/Mcast frames should be sent to the 1539 * OS stack as well as on the air. 1540 */ 1541 skb1 = skb_copy(skb, GFP_ATOMIC); 1542 else { 1543 /* 1544 * Search for a connected STA with dstMac 1545 * as the Mac address. If found send the 1546 * frame to it on the air else send the 1547 * frame up the stack. 1548 */ 1549 conn = ath6kl_find_sta(vif, datap->h_dest); 1550 1551 if (conn && ar->intra_bss) { 1552 skb1 = skb; 1553 skb = NULL; 1554 } else if (conn && !ar->intra_bss) { 1555 dev_kfree_skb(skb); 1556 skb = NULL; 1557 } 1558 } 1559 if (skb1) 1560 ath6kl_data_tx(skb1, vif->ndev); 1561 1562 if (skb == NULL) { 1563 /* nothing to deliver up the stack */ 1564 return; 1565 } 1566 } 1567 1568 datap = (struct ethhdr *) skb->data; 1569 1570 if (is_unicast_ether_addr(datap->h_dest)) { 1571 if (vif->nw_type == AP_NETWORK) { 1572 conn = ath6kl_find_sta(vif, datap->h_source); 1573 if (!conn) 1574 return; 1575 aggr_conn = conn->aggr_conn; 1576 } else 1577 aggr_conn = vif->aggr_cntxt->aggr_conn; 1578 1579 if (aggr_process_recv_frm(aggr_conn, tid, seq_no, 1580 is_amsdu, skb)) { 1581 /* aggregation code will handle the skb */ 1582 return; 1583 } 1584 } 1585 1586 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); 1587 } 1588 1589 static void aggr_timeout(unsigned long arg) 1590 { 1591 u8 i, j; 1592 struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg; 1593 struct rxtid *rxtid; 1594 struct rxtid_stats *stats; 1595 1596 for (i = 0; i < NUM_OF_TIDS; i++) { 1597 rxtid = &aggr_conn->rx_tid[i]; 1598 stats = &aggr_conn->stat[i]; 1599 1600 if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress) 1601 continue; 1602 1603 stats->num_timeouts++; 1604 ath6kl_dbg(ATH6KL_DBG_AGGR, 1605 "aggr timeout (st %d end %d)\n", 1606 rxtid->seq_next, 1607 ((rxtid->seq_next + rxtid->hold_q_sz-1) & 1608 ATH6KL_MAX_SEQ_NO)); 1609 aggr_deque_frms(aggr_conn, i, 0, 0); 1610 } 1611 1612 aggr_conn->timer_scheduled = false; 1613 1614 for (i = 0; i < NUM_OF_TIDS; i++) { 1615 rxtid = &aggr_conn->rx_tid[i]; 1616 1617 if (rxtid->aggr && rxtid->hold_q) { 1618 for (j = 0; j < rxtid->hold_q_sz; j++) { 1619 if (rxtid->hold_q[j].skb) { 1620 aggr_conn->timer_scheduled = true; 1621 rxtid->timer_mon = true; 1622 rxtid->progress = false; 1623 break; 1624 } 1625 } 1626 1627 if (j >= rxtid->hold_q_sz) 1628 rxtid->timer_mon = false; 1629 } 1630 } 1631 1632 if (aggr_conn->timer_scheduled) 1633 mod_timer(&aggr_conn->timer, 1634 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT)); 1635 } 1636 1637 static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid) 1638 { 1639 struct rxtid *rxtid; 1640 struct rxtid_stats *stats; 1641 1642 if (!aggr_conn || tid >= NUM_OF_TIDS) 1643 return; 1644 1645 rxtid = &aggr_conn->rx_tid[tid]; 1646 stats = &aggr_conn->stat[tid]; 1647 1648 if (rxtid->aggr) 1649 aggr_deque_frms(aggr_conn, tid, 0, 0); 1650 1651 rxtid->aggr = false; 1652 rxtid->progress = false; 1653 rxtid->timer_mon = false; 1654 rxtid->win_sz = 0; 1655 rxtid->seq_next = 0; 1656 rxtid->hold_q_sz = 0; 1657 1658 kfree(rxtid->hold_q); 1659 rxtid->hold_q = NULL; 1660 1661 memset(stats, 0, sizeof(struct rxtid_stats)); 1662 } 1663 1664 void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no, 1665 u8 win_sz) 1666 { 1667 struct ath6kl_sta *sta; 1668 struct aggr_info_conn *aggr_conn = NULL; 1669 struct rxtid *rxtid; 1670 struct rxtid_stats *stats; 1671 u16 hold_q_size; 1672 u8 tid, aid; 1673 1674 if (vif->nw_type == AP_NETWORK) { 1675 aid = ath6kl_get_aid(tid_mux); 1676 sta = ath6kl_find_sta_by_aid(vif->ar, aid); 1677 if (sta) 1678 aggr_conn = sta->aggr_conn; 1679 } else 1680 aggr_conn = vif->aggr_cntxt->aggr_conn; 1681 1682 if (!aggr_conn) 1683 return; 1684 1685 tid = ath6kl_get_tid(tid_mux); 1686 if (tid >= NUM_OF_TIDS) 1687 return; 1688 1689 rxtid = &aggr_conn->rx_tid[tid]; 1690 stats = &aggr_conn->stat[tid]; 1691 1692 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX) 1693 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n", 1694 __func__, win_sz, tid); 1695 1696 if (rxtid->aggr) 1697 aggr_delete_tid_state(aggr_conn, tid); 1698 1699 rxtid->seq_next = seq_no; 1700 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q); 1701 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL); 1702 if (!rxtid->hold_q) 1703 return; 1704 1705 rxtid->win_sz = win_sz; 1706 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz); 1707 if (!skb_queue_empty(&rxtid->q)) 1708 return; 1709 1710 rxtid->aggr = true; 1711 } 1712 1713 void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info, 1714 struct aggr_info_conn *aggr_conn) 1715 { 1716 struct rxtid *rxtid; 1717 u8 i; 1718 1719 aggr_conn->aggr_sz = AGGR_SZ_DEFAULT; 1720 aggr_conn->dev = vif->ndev; 1721 init_timer(&aggr_conn->timer); 1722 aggr_conn->timer.function = aggr_timeout; 1723 aggr_conn->timer.data = (unsigned long) aggr_conn; 1724 aggr_conn->aggr_info = aggr_info; 1725 1726 aggr_conn->timer_scheduled = false; 1727 1728 for (i = 0; i < NUM_OF_TIDS; i++) { 1729 rxtid = &aggr_conn->rx_tid[i]; 1730 rxtid->aggr = false; 1731 rxtid->progress = false; 1732 rxtid->timer_mon = false; 1733 skb_queue_head_init(&rxtid->q); 1734 spin_lock_init(&rxtid->lock); 1735 } 1736 1737 } 1738 1739 struct aggr_info *aggr_init(struct ath6kl_vif *vif) 1740 { 1741 struct aggr_info *p_aggr = NULL; 1742 1743 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL); 1744 if (!p_aggr) { 1745 ath6kl_err("failed to alloc memory for aggr_node\n"); 1746 return NULL; 1747 } 1748 1749 p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL); 1750 if (!p_aggr->aggr_conn) { 1751 ath6kl_err("failed to alloc memory for connection specific aggr info\n"); 1752 kfree(p_aggr); 1753 return NULL; 1754 } 1755 1756 aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn); 1757 1758 skb_queue_head_init(&p_aggr->rx_amsdu_freeq); 1759 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS); 1760 1761 return p_aggr; 1762 } 1763 1764 void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux) 1765 { 1766 struct ath6kl_sta *sta; 1767 struct rxtid *rxtid; 1768 struct aggr_info_conn *aggr_conn = NULL; 1769 u8 tid, aid; 1770 1771 if (vif->nw_type == AP_NETWORK) { 1772 aid = ath6kl_get_aid(tid_mux); 1773 sta = ath6kl_find_sta_by_aid(vif->ar, aid); 1774 if (sta) 1775 aggr_conn = sta->aggr_conn; 1776 } else 1777 aggr_conn = vif->aggr_cntxt->aggr_conn; 1778 1779 if (!aggr_conn) 1780 return; 1781 1782 tid = ath6kl_get_tid(tid_mux); 1783 if (tid >= NUM_OF_TIDS) 1784 return; 1785 1786 rxtid = &aggr_conn->rx_tid[tid]; 1787 1788 if (rxtid->aggr) 1789 aggr_delete_tid_state(aggr_conn, tid); 1790 } 1791 1792 void aggr_reset_state(struct aggr_info_conn *aggr_conn) 1793 { 1794 u8 tid; 1795 1796 if (!aggr_conn) 1797 return; 1798 1799 if (aggr_conn->timer_scheduled) { 1800 del_timer(&aggr_conn->timer); 1801 aggr_conn->timer_scheduled = false; 1802 } 1803 1804 for (tid = 0; tid < NUM_OF_TIDS; tid++) 1805 aggr_delete_tid_state(aggr_conn, tid); 1806 } 1807 1808 /* clean up our amsdu buffer list */ 1809 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar) 1810 { 1811 struct htc_packet *packet, *tmp_pkt; 1812 1813 spin_lock_bh(&ar->lock); 1814 if (list_empty(&ar->amsdu_rx_buffer_queue)) { 1815 spin_unlock_bh(&ar->lock); 1816 return; 1817 } 1818 1819 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue, 1820 list) { 1821 list_del(&packet->list); 1822 spin_unlock_bh(&ar->lock); 1823 dev_kfree_skb(packet->pkt_cntxt); 1824 spin_lock_bh(&ar->lock); 1825 } 1826 1827 spin_unlock_bh(&ar->lock); 1828 } 1829 1830 void aggr_module_destroy(struct aggr_info *aggr_info) 1831 { 1832 if (!aggr_info) 1833 return; 1834 1835 aggr_reset_state(aggr_info->aggr_conn); 1836 skb_queue_purge(&aggr_info->rx_amsdu_freeq); 1837 kfree(aggr_info->aggr_conn); 1838 kfree(aggr_info); 1839 } 1840