1 /* 2 * Copyright (c) 2004-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include "core.h" 21 #include "debug.h" 22 #include "htc-ops.h" 23 24 /* 25 * tid - tid_mux0..tid_mux3 26 * aid - tid_mux4..tid_mux7 27 */ 28 #define ATH6KL_TID_MASK 0xf 29 #define ATH6KL_AID_SHIFT 4 30 31 static inline u8 ath6kl_get_tid(u8 tid_mux) 32 { 33 return tid_mux & ATH6KL_TID_MASK; 34 } 35 36 static inline u8 ath6kl_get_aid(u8 tid_mux) 37 { 38 return tid_mux >> ATH6KL_AID_SHIFT; 39 } 40 41 static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev, 42 u32 *map_no) 43 { 44 struct ath6kl *ar = ath6kl_priv(dev); 45 struct ethhdr *eth_hdr; 46 u32 i, ep_map = -1; 47 u8 *datap; 48 49 *map_no = 0; 50 datap = skb->data; 51 eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr)); 52 53 if (is_multicast_ether_addr(eth_hdr->h_dest)) 54 return ENDPOINT_2; 55 56 for (i = 0; i < ar->node_num; i++) { 57 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr, 58 ETH_ALEN) == 0) { 59 *map_no = i + 1; 60 ar->node_map[i].tx_pend++; 61 return ar->node_map[i].ep_id; 62 } 63 64 if ((ep_map == -1) && !ar->node_map[i].tx_pend) 65 ep_map = i; 66 } 67 68 if (ep_map == -1) { 69 ep_map = ar->node_num; 70 ar->node_num++; 71 if (ar->node_num > MAX_NODE_NUM) 72 return ENDPOINT_UNUSED; 73 } 74 75 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN); 76 77 for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) { 78 if (!ar->tx_pending[i]) { 79 ar->node_map[ep_map].ep_id = i; 80 break; 81 } 82 83 /* 84 * No free endpoint is available, start redistribution on 85 * the inuse endpoints. 86 */ 87 if (i == ENDPOINT_5) { 88 ar->node_map[ep_map].ep_id = ar->next_ep_id; 89 ar->next_ep_id++; 90 if (ar->next_ep_id > ENDPOINT_5) 91 ar->next_ep_id = ENDPOINT_2; 92 } 93 } 94 95 *map_no = ep_map + 1; 96 ar->node_map[ep_map].tx_pend++; 97 98 return ar->node_map[ep_map].ep_id; 99 } 100 101 static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn, 102 struct ath6kl_vif *vif, 103 struct sk_buff *skb, 104 u32 *flags) 105 { 106 struct ath6kl *ar = vif->ar; 107 bool is_apsdq_empty = false; 108 struct ethhdr *datap = (struct ethhdr *) skb->data; 109 u8 up = 0, traffic_class, *ip_hdr; 110 u16 ether_type; 111 struct ath6kl_llc_snap_hdr *llc_hdr; 112 113 if (conn->sta_flags & STA_PS_APSD_TRIGGER) { 114 /* 115 * This tx is because of a uAPSD trigger, determine 116 * more and EOSP bit. Set EOSP if queue is empty 117 * or sufficient frames are delivered for this trigger. 118 */ 119 spin_lock_bh(&conn->psq_lock); 120 if (!skb_queue_empty(&conn->apsdq)) 121 *flags |= WMI_DATA_HDR_FLAGS_MORE; 122 else if (conn->sta_flags & STA_PS_APSD_EOSP) 123 *flags |= WMI_DATA_HDR_FLAGS_EOSP; 124 *flags |= WMI_DATA_HDR_FLAGS_UAPSD; 125 spin_unlock_bh(&conn->psq_lock); 126 return false; 127 } else if (!conn->apsd_info) 128 return false; 129 130 if (test_bit(WMM_ENABLED, &vif->flags)) { 131 ether_type = be16_to_cpu(datap->h_proto); 132 if (is_ethertype(ether_type)) { 133 /* packet is in DIX format */ 134 ip_hdr = (u8 *)(datap + 1); 135 } else { 136 /* packet is in 802.3 format */ 137 llc_hdr = (struct ath6kl_llc_snap_hdr *) 138 (datap + 1); 139 ether_type = be16_to_cpu(llc_hdr->eth_type); 140 ip_hdr = (u8 *)(llc_hdr + 1); 141 } 142 143 if (ether_type == IP_ETHERTYPE) 144 up = ath6kl_wmi_determine_user_priority( 145 ip_hdr, 0); 146 } 147 148 traffic_class = ath6kl_wmi_get_traffic_class(up); 149 150 if ((conn->apsd_info & (1 << traffic_class)) == 0) 151 return false; 152 153 /* Queue the frames if the STA is sleeping */ 154 spin_lock_bh(&conn->psq_lock); 155 is_apsdq_empty = skb_queue_empty(&conn->apsdq); 156 skb_queue_tail(&conn->apsdq, skb); 157 spin_unlock_bh(&conn->psq_lock); 158 159 /* 160 * If this is the first pkt getting queued 161 * for this STA, update the PVB for this STA 162 */ 163 if (is_apsdq_empty) { 164 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi, 165 vif->fw_vif_idx, 166 conn->aid, 1, 0); 167 } 168 *flags |= WMI_DATA_HDR_FLAGS_UAPSD; 169 170 return true; 171 } 172 173 static bool ath6kl_process_psq(struct ath6kl_sta *conn, 174 struct ath6kl_vif *vif, 175 struct sk_buff *skb, 176 u32 *flags) 177 { 178 bool is_psq_empty = false; 179 struct ath6kl *ar = vif->ar; 180 181 if (conn->sta_flags & STA_PS_POLLED) { 182 spin_lock_bh(&conn->psq_lock); 183 if (!skb_queue_empty(&conn->psq)) 184 *flags |= WMI_DATA_HDR_FLAGS_MORE; 185 spin_unlock_bh(&conn->psq_lock); 186 return false; 187 } 188 189 /* Queue the frames if the STA is sleeping */ 190 spin_lock_bh(&conn->psq_lock); 191 is_psq_empty = skb_queue_empty(&conn->psq); 192 skb_queue_tail(&conn->psq, skb); 193 spin_unlock_bh(&conn->psq_lock); 194 195 /* 196 * If this is the first pkt getting queued 197 * for this STA, update the PVB for this 198 * STA. 199 */ 200 if (is_psq_empty) 201 ath6kl_wmi_set_pvb_cmd(ar->wmi, 202 vif->fw_vif_idx, 203 conn->aid, 1); 204 return true; 205 } 206 207 static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb, 208 u32 *flags) 209 { 210 struct ethhdr *datap = (struct ethhdr *) skb->data; 211 struct ath6kl_sta *conn = NULL; 212 bool ps_queued = false; 213 struct ath6kl *ar = vif->ar; 214 215 if (is_multicast_ether_addr(datap->h_dest)) { 216 u8 ctr = 0; 217 bool q_mcast = false; 218 219 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { 220 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) { 221 q_mcast = true; 222 break; 223 } 224 } 225 226 if (q_mcast) { 227 /* 228 * If this transmit is not because of a Dtim Expiry 229 * q it. 230 */ 231 if (!test_bit(DTIM_EXPIRED, &vif->flags)) { 232 bool is_mcastq_empty = false; 233 234 spin_lock_bh(&ar->mcastpsq_lock); 235 is_mcastq_empty = 236 skb_queue_empty(&ar->mcastpsq); 237 skb_queue_tail(&ar->mcastpsq, skb); 238 spin_unlock_bh(&ar->mcastpsq_lock); 239 240 /* 241 * If this is the first Mcast pkt getting 242 * queued indicate to the target to set the 243 * BitmapControl LSB of the TIM IE. 244 */ 245 if (is_mcastq_empty) 246 ath6kl_wmi_set_pvb_cmd(ar->wmi, 247 vif->fw_vif_idx, 248 MCAST_AID, 1); 249 250 ps_queued = true; 251 } else { 252 /* 253 * This transmit is because of Dtim expiry. 254 * Determine if MoreData bit has to be set. 255 */ 256 spin_lock_bh(&ar->mcastpsq_lock); 257 if (!skb_queue_empty(&ar->mcastpsq)) 258 *flags |= WMI_DATA_HDR_FLAGS_MORE; 259 spin_unlock_bh(&ar->mcastpsq_lock); 260 } 261 } 262 } else { 263 conn = ath6kl_find_sta(vif, datap->h_dest); 264 if (!conn) { 265 dev_kfree_skb(skb); 266 267 /* Inform the caller that the skb is consumed */ 268 return true; 269 } 270 271 if (conn->sta_flags & STA_PS_SLEEP) { 272 ps_queued = ath6kl_process_uapsdq(conn, 273 vif, skb, flags); 274 if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD)) 275 ps_queued = ath6kl_process_psq(conn, 276 vif, skb, flags); 277 } 278 } 279 return ps_queued; 280 } 281 282 /* Tx functions */ 283 284 int ath6kl_control_tx(void *devt, struct sk_buff *skb, 285 enum htc_endpoint_id eid) 286 { 287 struct ath6kl *ar = devt; 288 int status = 0; 289 struct ath6kl_cookie *cookie = NULL; 290 291 if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) { 292 dev_kfree_skb(skb); 293 return -EACCES; 294 } 295 296 spin_lock_bh(&ar->lock); 297 298 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 299 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__, 300 skb, skb->len, eid); 301 302 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) { 303 /* 304 * Control endpoint is full, don't allocate resources, we 305 * are just going to drop this packet. 306 */ 307 cookie = NULL; 308 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n", 309 skb, skb->len); 310 } else 311 cookie = ath6kl_alloc_cookie(ar); 312 313 if (cookie == NULL) { 314 spin_unlock_bh(&ar->lock); 315 status = -ENOMEM; 316 goto fail_ctrl_tx; 317 } 318 319 ar->tx_pending[eid]++; 320 321 if (eid != ar->ctrl_ep) 322 ar->total_tx_data_pend++; 323 324 spin_unlock_bh(&ar->lock); 325 326 cookie->skb = skb; 327 cookie->map_no = 0; 328 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 329 eid, ATH6KL_CONTROL_PKT_TAG); 330 cookie->htc_pkt.skb = skb; 331 332 /* 333 * This interface is asynchronous, if there is an error, cleanup 334 * will happen in the TX completion callback. 335 */ 336 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt); 337 338 return 0; 339 340 fail_ctrl_tx: 341 dev_kfree_skb(skb); 342 return status; 343 } 344 345 int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) 346 { 347 struct ath6kl *ar = ath6kl_priv(dev); 348 struct ath6kl_cookie *cookie = NULL; 349 enum htc_endpoint_id eid = ENDPOINT_UNUSED; 350 struct ath6kl_vif *vif = netdev_priv(dev); 351 u32 map_no = 0; 352 u16 htc_tag = ATH6KL_DATA_PKT_TAG; 353 u8 ac = 99 ; /* initialize to unmapped ac */ 354 bool chk_adhoc_ps_mapping = false; 355 int ret; 356 struct wmi_tx_meta_v2 meta_v2; 357 void *meta; 358 u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed; 359 u8 meta_ver = 0; 360 u32 flags = 0; 361 362 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 363 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__, 364 skb, skb->data, skb->len); 365 366 /* If target is not associated */ 367 if (!test_bit(CONNECTED, &vif->flags)) 368 goto fail_tx; 369 370 if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON)) 371 goto fail_tx; 372 373 if (!test_bit(WMI_READY, &ar->flag)) 374 goto fail_tx; 375 376 /* AP mode Power saving processing */ 377 if (vif->nw_type == AP_NETWORK) { 378 if (ath6kl_powersave_ap(vif, skb, &flags)) 379 return 0; 380 } 381 382 if (test_bit(WMI_ENABLED, &ar->flag)) { 383 if ((dev->features & NETIF_F_IP_CSUM) && 384 (csum == CHECKSUM_PARTIAL)) { 385 csum_start = skb->csum_start - 386 (skb_network_header(skb) - skb->head) + 387 sizeof(struct ath6kl_llc_snap_hdr); 388 csum_dest = skb->csum_offset + csum_start; 389 } 390 391 if (skb_headroom(skb) < dev->needed_headroom) { 392 struct sk_buff *tmp_skb = skb; 393 394 skb = skb_realloc_headroom(skb, dev->needed_headroom); 395 kfree_skb(tmp_skb); 396 if (skb == NULL) { 397 vif->net_stats.tx_dropped++; 398 return 0; 399 } 400 } 401 402 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) { 403 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n"); 404 goto fail_tx; 405 } 406 407 if ((dev->features & NETIF_F_IP_CSUM) && 408 (csum == CHECKSUM_PARTIAL)) { 409 meta_v2.csum_start = csum_start; 410 meta_v2.csum_dest = csum_dest; 411 412 /* instruct target to calculate checksum */ 413 meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD; 414 meta_ver = WMI_META_VERSION_2; 415 meta = &meta_v2; 416 } else { 417 meta_ver = 0; 418 meta = NULL; 419 } 420 421 ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb, 422 DATA_MSGTYPE, flags, 0, 423 meta_ver, 424 meta, vif->fw_vif_idx); 425 426 if (ret) { 427 ath6kl_warn("failed to add wmi data header:%d\n" 428 , ret); 429 goto fail_tx; 430 } 431 432 if ((vif->nw_type == ADHOC_NETWORK) && 433 ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags)) 434 chk_adhoc_ps_mapping = true; 435 else { 436 /* get the stream mapping */ 437 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, 438 vif->fw_vif_idx, skb, 439 0, test_bit(WMM_ENABLED, &vif->flags), &ac); 440 if (ret) 441 goto fail_tx; 442 } 443 } else 444 goto fail_tx; 445 446 spin_lock_bh(&ar->lock); 447 448 if (chk_adhoc_ps_mapping) 449 eid = ath6kl_ibss_map_epid(skb, dev, &map_no); 450 else 451 eid = ar->ac2ep_map[ac]; 452 453 if (eid == 0 || eid == ENDPOINT_UNUSED) { 454 ath6kl_err("eid %d is not mapped!\n", eid); 455 spin_unlock_bh(&ar->lock); 456 goto fail_tx; 457 } 458 459 /* allocate resource for this packet */ 460 cookie = ath6kl_alloc_cookie(ar); 461 462 if (!cookie) { 463 spin_unlock_bh(&ar->lock); 464 goto fail_tx; 465 } 466 467 /* update counts while the lock is held */ 468 ar->tx_pending[eid]++; 469 ar->total_tx_data_pend++; 470 471 spin_unlock_bh(&ar->lock); 472 473 if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) && 474 skb_cloned(skb)) { 475 /* 476 * We will touch (move the buffer data to align it. Since the 477 * skb buffer is cloned and not only the header is changed, we 478 * have to copy it to allow the changes. Since we are copying 479 * the data here, we may as well align it by reserving suitable 480 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align(). 481 */ 482 struct sk_buff *nskb; 483 484 nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC); 485 if (nskb == NULL) 486 goto fail_tx; 487 kfree_skb(skb); 488 skb = nskb; 489 } 490 491 cookie->skb = skb; 492 cookie->map_no = map_no; 493 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 494 eid, htc_tag); 495 cookie->htc_pkt.skb = skb; 496 497 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ", 498 skb->data, skb->len); 499 500 /* 501 * HTC interface is asynchronous, if this fails, cleanup will 502 * happen in the ath6kl_tx_complete callback. 503 */ 504 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt); 505 506 return 0; 507 508 fail_tx: 509 dev_kfree_skb(skb); 510 511 vif->net_stats.tx_dropped++; 512 vif->net_stats.tx_aborted_errors++; 513 514 return 0; 515 } 516 517 /* indicate tx activity or inactivity on a WMI stream */ 518 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active) 519 { 520 struct ath6kl *ar = devt; 521 enum htc_endpoint_id eid; 522 int i; 523 524 eid = ar->ac2ep_map[traffic_class]; 525 526 if (!test_bit(WMI_ENABLED, &ar->flag)) 527 goto notify_htc; 528 529 spin_lock_bh(&ar->lock); 530 531 ar->ac_stream_active[traffic_class] = active; 532 533 if (active) { 534 /* 535 * Keep track of the active stream with the highest 536 * priority. 537 */ 538 if (ar->ac_stream_pri_map[traffic_class] > 539 ar->hiac_stream_active_pri) 540 /* set the new highest active priority */ 541 ar->hiac_stream_active_pri = 542 ar->ac_stream_pri_map[traffic_class]; 543 544 } else { 545 /* 546 * We may have to search for the next active stream 547 * that is the highest priority. 548 */ 549 if (ar->hiac_stream_active_pri == 550 ar->ac_stream_pri_map[traffic_class]) { 551 /* 552 * The highest priority stream just went inactive 553 * reset and search for the "next" highest "active" 554 * priority stream. 555 */ 556 ar->hiac_stream_active_pri = 0; 557 558 for (i = 0; i < WMM_NUM_AC; i++) { 559 if (ar->ac_stream_active[i] && 560 (ar->ac_stream_pri_map[i] > 561 ar->hiac_stream_active_pri)) 562 /* 563 * Set the new highest active 564 * priority. 565 */ 566 ar->hiac_stream_active_pri = 567 ar->ac_stream_pri_map[i]; 568 } 569 } 570 } 571 572 spin_unlock_bh(&ar->lock); 573 574 notify_htc: 575 /* notify HTC, this may cause credit distribution changes */ 576 ath6kl_htc_activity_changed(ar->htc_target, eid, active); 577 } 578 579 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, 580 struct htc_packet *packet) 581 { 582 struct ath6kl *ar = target->dev->ar; 583 struct ath6kl_vif *vif; 584 enum htc_endpoint_id endpoint = packet->endpoint; 585 enum htc_send_full_action action = HTC_SEND_FULL_KEEP; 586 587 if (endpoint == ar->ctrl_ep) { 588 /* 589 * Under normal WMI if this is getting full, then something 590 * is running rampant the host should not be exhausting the 591 * WMI queue with too many commands the only exception to 592 * this is during testing using endpointping. 593 */ 594 set_bit(WMI_CTRL_EP_FULL, &ar->flag); 595 ath6kl_err("wmi ctrl ep is full\n"); 596 return action; 597 } 598 599 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG) 600 return action; 601 602 /* 603 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for 604 * the highest active stream. 605 */ 606 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] < 607 ar->hiac_stream_active_pri && 608 ar->cookie_count <= 609 target->endpoint[endpoint].tx_drop_packet_threshold) 610 /* 611 * Give preference to the highest priority stream by 612 * dropping the packets which overflowed. 613 */ 614 action = HTC_SEND_FULL_DROP; 615 616 /* FIXME: Locking */ 617 spin_lock_bh(&ar->list_lock); 618 list_for_each_entry(vif, &ar->vif_list, list) { 619 if (vif->nw_type == ADHOC_NETWORK || 620 action != HTC_SEND_FULL_DROP) { 621 spin_unlock_bh(&ar->list_lock); 622 623 set_bit(NETQ_STOPPED, &vif->flags); 624 netif_stop_queue(vif->ndev); 625 626 return action; 627 } 628 } 629 spin_unlock_bh(&ar->list_lock); 630 631 return action; 632 } 633 634 /* TODO this needs to be looked at */ 635 static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif, 636 enum htc_endpoint_id eid, u32 map_no) 637 { 638 struct ath6kl *ar = vif->ar; 639 u32 i; 640 641 if (vif->nw_type != ADHOC_NETWORK) 642 return; 643 644 if (!ar->ibss_ps_enable) 645 return; 646 647 if (eid == ar->ctrl_ep) 648 return; 649 650 if (map_no == 0) 651 return; 652 653 map_no--; 654 ar->node_map[map_no].tx_pend--; 655 656 if (ar->node_map[map_no].tx_pend) 657 return; 658 659 if (map_no != (ar->node_num - 1)) 660 return; 661 662 for (i = ar->node_num; i > 0; i--) { 663 if (ar->node_map[i - 1].tx_pend) 664 break; 665 666 memset(&ar->node_map[i - 1], 0, 667 sizeof(struct ath6kl_node_mapping)); 668 ar->node_num--; 669 } 670 } 671 672 void ath6kl_tx_complete(struct htc_target *target, 673 struct list_head *packet_queue) 674 { 675 struct ath6kl *ar = target->dev->ar; 676 struct sk_buff_head skb_queue; 677 struct htc_packet *packet; 678 struct sk_buff *skb; 679 struct ath6kl_cookie *ath6kl_cookie; 680 u32 map_no = 0; 681 int status; 682 enum htc_endpoint_id eid; 683 bool wake_event = false; 684 bool flushing[ATH6KL_VIF_MAX] = {false}; 685 u8 if_idx; 686 struct ath6kl_vif *vif; 687 688 skb_queue_head_init(&skb_queue); 689 690 /* lock the driver as we update internal state */ 691 spin_lock_bh(&ar->lock); 692 693 /* reap completed packets */ 694 while (!list_empty(packet_queue)) { 695 696 packet = list_first_entry(packet_queue, struct htc_packet, 697 list); 698 list_del(&packet->list); 699 700 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt; 701 if (!ath6kl_cookie) 702 goto fatal; 703 704 status = packet->status; 705 skb = ath6kl_cookie->skb; 706 eid = packet->endpoint; 707 map_no = ath6kl_cookie->map_no; 708 709 if (!skb || !skb->data) 710 goto fatal; 711 712 __skb_queue_tail(&skb_queue, skb); 713 714 if (!status && (packet->act_len != skb->len)) 715 goto fatal; 716 717 ar->tx_pending[eid]--; 718 719 if (eid != ar->ctrl_ep) 720 ar->total_tx_data_pend--; 721 722 if (eid == ar->ctrl_ep) { 723 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag)) 724 clear_bit(WMI_CTRL_EP_FULL, &ar->flag); 725 726 if (ar->tx_pending[eid] == 0) 727 wake_event = true; 728 } 729 730 if (eid == ar->ctrl_ep) { 731 if_idx = wmi_cmd_hdr_get_if_idx( 732 (struct wmi_cmd_hdr *) packet->buf); 733 } else { 734 if_idx = wmi_data_hdr_get_if_idx( 735 (struct wmi_data_hdr *) packet->buf); 736 } 737 738 vif = ath6kl_get_vif_by_index(ar, if_idx); 739 if (!vif) { 740 ath6kl_free_cookie(ar, ath6kl_cookie); 741 continue; 742 } 743 744 if (status) { 745 if (status == -ECANCELED) 746 /* a packet was flushed */ 747 flushing[if_idx] = true; 748 749 vif->net_stats.tx_errors++; 750 751 if (status != -ENOSPC && status != -ECANCELED) 752 ath6kl_warn("tx complete error: %d\n", status); 753 754 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 755 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", 756 __func__, skb, packet->buf, packet->act_len, 757 eid, "error!"); 758 } else { 759 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 760 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", 761 __func__, skb, packet->buf, packet->act_len, 762 eid, "OK"); 763 764 flushing[if_idx] = false; 765 vif->net_stats.tx_packets++; 766 vif->net_stats.tx_bytes += skb->len; 767 } 768 769 ath6kl_tx_clear_node_map(vif, eid, map_no); 770 771 ath6kl_free_cookie(ar, ath6kl_cookie); 772 773 if (test_bit(NETQ_STOPPED, &vif->flags)) 774 clear_bit(NETQ_STOPPED, &vif->flags); 775 } 776 777 spin_unlock_bh(&ar->lock); 778 779 __skb_queue_purge(&skb_queue); 780 781 /* FIXME: Locking */ 782 spin_lock_bh(&ar->list_lock); 783 list_for_each_entry(vif, &ar->vif_list, list) { 784 if (test_bit(CONNECTED, &vif->flags) && 785 !flushing[vif->fw_vif_idx]) { 786 spin_unlock_bh(&ar->list_lock); 787 netif_wake_queue(vif->ndev); 788 spin_lock_bh(&ar->list_lock); 789 } 790 } 791 spin_unlock_bh(&ar->list_lock); 792 793 if (wake_event) 794 wake_up(&ar->event_wq); 795 796 return; 797 798 fatal: 799 WARN_ON(1); 800 spin_unlock_bh(&ar->lock); 801 return; 802 } 803 804 void ath6kl_tx_data_cleanup(struct ath6kl *ar) 805 { 806 int i; 807 808 /* flush all the data (non-control) streams */ 809 for (i = 0; i < WMM_NUM_AC; i++) 810 ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i], 811 ATH6KL_DATA_PKT_TAG); 812 } 813 814 /* Rx functions */ 815 816 static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev, 817 struct sk_buff *skb) 818 { 819 if (!skb) 820 return; 821 822 skb->dev = dev; 823 824 if (!(skb->dev->flags & IFF_UP)) { 825 dev_kfree_skb(skb); 826 return; 827 } 828 829 skb->protocol = eth_type_trans(skb, skb->dev); 830 831 netif_rx_ni(skb); 832 } 833 834 static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num) 835 { 836 struct sk_buff *skb; 837 838 while (num) { 839 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE); 840 if (!skb) { 841 ath6kl_err("netbuf allocation failed\n"); 842 return; 843 } 844 skb_queue_tail(q, skb); 845 num--; 846 } 847 } 848 849 static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr) 850 { 851 struct sk_buff *skb = NULL; 852 853 if (skb_queue_len(&p_aggr->rx_amsdu_freeq) < 854 (AGGR_NUM_OF_FREE_NETBUFS >> 2)) 855 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, 856 AGGR_NUM_OF_FREE_NETBUFS); 857 858 skb = skb_dequeue(&p_aggr->rx_amsdu_freeq); 859 860 return skb; 861 } 862 863 void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint) 864 { 865 struct ath6kl *ar = target->dev->ar; 866 struct sk_buff *skb; 867 int rx_buf; 868 int n_buf_refill; 869 struct htc_packet *packet; 870 struct list_head queue; 871 872 n_buf_refill = ATH6KL_MAX_RX_BUFFERS - 873 ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint); 874 875 if (n_buf_refill <= 0) 876 return; 877 878 INIT_LIST_HEAD(&queue); 879 880 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, 881 "%s: providing htc with %d buffers at eid=%d\n", 882 __func__, n_buf_refill, endpoint); 883 884 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) { 885 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE); 886 if (!skb) 887 break; 888 889 packet = (struct htc_packet *) skb->head; 890 if (!IS_ALIGNED((unsigned long) skb->data, 4)) 891 skb->data = PTR_ALIGN(skb->data - 4, 4); 892 set_htc_rxpkt_info(packet, skb, skb->data, 893 ATH6KL_BUFFER_SIZE, endpoint); 894 packet->skb = skb; 895 list_add_tail(&packet->list, &queue); 896 } 897 898 if (!list_empty(&queue)) 899 ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue); 900 } 901 902 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count) 903 { 904 struct htc_packet *packet; 905 struct sk_buff *skb; 906 907 while (count) { 908 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE); 909 if (!skb) 910 return; 911 912 packet = (struct htc_packet *) skb->head; 913 if (!IS_ALIGNED((unsigned long) skb->data, 4)) 914 skb->data = PTR_ALIGN(skb->data - 4, 4); 915 set_htc_rxpkt_info(packet, skb, skb->data, 916 ATH6KL_AMSDU_BUFFER_SIZE, 0); 917 packet->skb = skb; 918 919 spin_lock_bh(&ar->lock); 920 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue); 921 spin_unlock_bh(&ar->lock); 922 count--; 923 } 924 } 925 926 /* 927 * Callback to allocate a receive buffer for a pending packet. We use a 928 * pre-allocated list of buffers of maximum AMSDU size (4K). 929 */ 930 struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target, 931 enum htc_endpoint_id endpoint, 932 int len) 933 { 934 struct ath6kl *ar = target->dev->ar; 935 struct htc_packet *packet = NULL; 936 struct list_head *pkt_pos; 937 int refill_cnt = 0, depth = 0; 938 939 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n", 940 __func__, endpoint, len); 941 942 if ((len <= ATH6KL_BUFFER_SIZE) || 943 (len > ATH6KL_AMSDU_BUFFER_SIZE)) 944 return NULL; 945 946 spin_lock_bh(&ar->lock); 947 948 if (list_empty(&ar->amsdu_rx_buffer_queue)) { 949 spin_unlock_bh(&ar->lock); 950 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS; 951 goto refill_buf; 952 } 953 954 packet = list_first_entry(&ar->amsdu_rx_buffer_queue, 955 struct htc_packet, list); 956 list_del(&packet->list); 957 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue) 958 depth++; 959 960 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth; 961 spin_unlock_bh(&ar->lock); 962 963 /* set actual endpoint ID */ 964 packet->endpoint = endpoint; 965 966 refill_buf: 967 if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD) 968 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt); 969 970 return packet; 971 } 972 973 static void aggr_slice_amsdu(struct aggr_info *p_aggr, 974 struct rxtid *rxtid, struct sk_buff *skb) 975 { 976 struct sk_buff *new_skb; 977 struct ethhdr *hdr; 978 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len; 979 u8 *framep; 980 981 mac_hdr_len = sizeof(struct ethhdr); 982 framep = skb->data + mac_hdr_len; 983 amsdu_len = skb->len - mac_hdr_len; 984 985 while (amsdu_len > mac_hdr_len) { 986 hdr = (struct ethhdr *) framep; 987 payload_8023_len = ntohs(hdr->h_proto); 988 989 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN || 990 payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) { 991 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n", 992 payload_8023_len); 993 break; 994 } 995 996 frame_8023_len = payload_8023_len + mac_hdr_len; 997 new_skb = aggr_get_free_skb(p_aggr); 998 if (!new_skb) { 999 ath6kl_err("no buffer available\n"); 1000 break; 1001 } 1002 1003 memcpy(new_skb->data, framep, frame_8023_len); 1004 skb_put(new_skb, frame_8023_len); 1005 if (ath6kl_wmi_dot3_2_dix(new_skb)) { 1006 ath6kl_err("dot3_2_dix error\n"); 1007 dev_kfree_skb(new_skb); 1008 break; 1009 } 1010 1011 skb_queue_tail(&rxtid->q, new_skb); 1012 1013 /* Is this the last subframe within this aggregate ? */ 1014 if ((amsdu_len - frame_8023_len) == 0) 1015 break; 1016 1017 /* Add the length of A-MSDU subframe padding bytes - 1018 * Round to nearest word. 1019 */ 1020 frame_8023_len = ALIGN(frame_8023_len, 4); 1021 1022 framep += frame_8023_len; 1023 amsdu_len -= frame_8023_len; 1024 } 1025 1026 dev_kfree_skb(skb); 1027 } 1028 1029 static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid, 1030 u16 seq_no, u8 order) 1031 { 1032 struct sk_buff *skb; 1033 struct rxtid *rxtid; 1034 struct skb_hold_q *node; 1035 u16 idx, idx_end, seq_end; 1036 struct rxtid_stats *stats; 1037 1038 rxtid = &agg_conn->rx_tid[tid]; 1039 stats = &agg_conn->stat[tid]; 1040 1041 spin_lock_bh(&rxtid->lock); 1042 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 1043 1044 /* 1045 * idx_end is typically the last possible frame in the window, 1046 * but changes to 'the' seq_no, when BAR comes. If seq_no 1047 * is non-zero, we will go up to that and stop. 1048 * Note: last seq no in current window will occupy the same 1049 * index position as index that is just previous to start. 1050 * An imp point : if win_sz is 7, for seq_no space of 4095, 1051 * then, there would be holes when sequence wrap around occurs. 1052 * Target should judiciously choose the win_sz, based on 1053 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz 1054 * 2, 4, 8, 16 win_sz works fine). 1055 * We must deque from "idx" to "idx_end", including both. 1056 */ 1057 seq_end = seq_no ? seq_no : rxtid->seq_next; 1058 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz); 1059 1060 do { 1061 node = &rxtid->hold_q[idx]; 1062 if ((order == 1) && (!node->skb)) 1063 break; 1064 1065 if (node->skb) { 1066 if (node->is_amsdu) 1067 aggr_slice_amsdu(agg_conn->aggr_info, rxtid, 1068 node->skb); 1069 else 1070 skb_queue_tail(&rxtid->q, node->skb); 1071 node->skb = NULL; 1072 } else 1073 stats->num_hole++; 1074 1075 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next); 1076 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 1077 } while (idx != idx_end); 1078 1079 spin_unlock_bh(&rxtid->lock); 1080 1081 stats->num_delivered += skb_queue_len(&rxtid->q); 1082 1083 while ((skb = skb_dequeue(&rxtid->q))) 1084 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb); 1085 } 1086 1087 static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid, 1088 u16 seq_no, 1089 bool is_amsdu, struct sk_buff *frame) 1090 { 1091 struct rxtid *rxtid; 1092 struct rxtid_stats *stats; 1093 struct sk_buff *skb; 1094 struct skb_hold_q *node; 1095 u16 idx, st, cur, end; 1096 bool is_queued = false; 1097 u16 extended_end; 1098 1099 rxtid = &agg_conn->rx_tid[tid]; 1100 stats = &agg_conn->stat[tid]; 1101 1102 stats->num_into_aggr++; 1103 1104 if (!rxtid->aggr) { 1105 if (is_amsdu) { 1106 aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame); 1107 is_queued = true; 1108 stats->num_amsdu++; 1109 while ((skb = skb_dequeue(&rxtid->q))) 1110 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, 1111 skb); 1112 } 1113 return is_queued; 1114 } 1115 1116 /* Check the incoming sequence no, if it's in the window */ 1117 st = rxtid->seq_next; 1118 cur = seq_no; 1119 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO; 1120 1121 if (((st < end) && (cur < st || cur > end)) || 1122 ((st > end) && (cur > end) && (cur < st))) { 1123 extended_end = (end + rxtid->hold_q_sz - 1) & 1124 ATH6KL_MAX_SEQ_NO; 1125 1126 if (((end < extended_end) && 1127 (cur < end || cur > extended_end)) || 1128 ((end > extended_end) && (cur > extended_end) && 1129 (cur < end))) { 1130 aggr_deque_frms(agg_conn, tid, 0, 0); 1131 spin_lock_bh(&rxtid->lock); 1132 if (cur >= rxtid->hold_q_sz - 1) 1133 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1); 1134 else 1135 rxtid->seq_next = ATH6KL_MAX_SEQ_NO - 1136 (rxtid->hold_q_sz - 2 - cur); 1137 spin_unlock_bh(&rxtid->lock); 1138 } else { 1139 /* 1140 * Dequeue only those frames that are outside the 1141 * new shifted window. 1142 */ 1143 if (cur >= rxtid->hold_q_sz - 1) 1144 st = cur - (rxtid->hold_q_sz - 1); 1145 else 1146 st = ATH6KL_MAX_SEQ_NO - 1147 (rxtid->hold_q_sz - 2 - cur); 1148 1149 aggr_deque_frms(agg_conn, tid, st, 0); 1150 } 1151 1152 stats->num_oow++; 1153 } 1154 1155 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz); 1156 1157 node = &rxtid->hold_q[idx]; 1158 1159 spin_lock_bh(&rxtid->lock); 1160 1161 /* 1162 * Is the cur frame duplicate or something beyond our window(hold_q 1163 * -> which is 2x, already)? 1164 * 1165 * 1. Duplicate is easy - drop incoming frame. 1166 * 2. Not falling in current sliding window. 1167 * 2a. is the frame_seq_no preceding current tid_seq_no? 1168 * -> drop the frame. perhaps sender did not get our ACK. 1169 * this is taken care of above. 1170 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ); 1171 * -> Taken care of it above, by moving window forward. 1172 */ 1173 dev_kfree_skb(node->skb); 1174 stats->num_dups++; 1175 1176 node->skb = frame; 1177 is_queued = true; 1178 node->is_amsdu = is_amsdu; 1179 node->seq_no = seq_no; 1180 1181 if (node->is_amsdu) 1182 stats->num_amsdu++; 1183 else 1184 stats->num_mpdu++; 1185 1186 spin_unlock_bh(&rxtid->lock); 1187 1188 aggr_deque_frms(agg_conn, tid, 0, 1); 1189 1190 if (agg_conn->timer_scheduled) 1191 return is_queued; 1192 1193 spin_lock_bh(&rxtid->lock); 1194 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) { 1195 if (rxtid->hold_q[idx].skb) { 1196 /* 1197 * There is a frame in the queue and no 1198 * timer so start a timer to ensure that 1199 * the frame doesn't remain stuck 1200 * forever. 1201 */ 1202 agg_conn->timer_scheduled = true; 1203 mod_timer(&agg_conn->timer, 1204 (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000)); 1205 rxtid->timer_mon = true; 1206 break; 1207 } 1208 } 1209 spin_unlock_bh(&rxtid->lock); 1210 1211 return is_queued; 1212 } 1213 1214 static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif, 1215 struct ath6kl_sta *conn) 1216 { 1217 struct ath6kl *ar = vif->ar; 1218 bool is_apsdq_empty, is_apsdq_empty_at_start; 1219 u32 num_frames_to_deliver, flags; 1220 struct sk_buff *skb = NULL; 1221 1222 /* 1223 * If the APSD q for this STA is not empty, dequeue and 1224 * send a pkt from the head of the q. Also update the 1225 * More data bit in the WMI_DATA_HDR if there are 1226 * more pkts for this STA in the APSD q. 1227 * If there are no more pkts for this STA, 1228 * update the APSD bitmap for this STA. 1229 */ 1230 1231 num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) & 1232 ATH6KL_APSD_FRAME_MASK; 1233 /* 1234 * Number of frames to send in a service period is 1235 * indicated by the station 1236 * in the QOS_INFO of the association request 1237 * If it is zero, send all frames 1238 */ 1239 if (!num_frames_to_deliver) 1240 num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME; 1241 1242 spin_lock_bh(&conn->psq_lock); 1243 is_apsdq_empty = skb_queue_empty(&conn->apsdq); 1244 spin_unlock_bh(&conn->psq_lock); 1245 is_apsdq_empty_at_start = is_apsdq_empty; 1246 1247 while ((!is_apsdq_empty) && (num_frames_to_deliver)) { 1248 1249 spin_lock_bh(&conn->psq_lock); 1250 skb = skb_dequeue(&conn->apsdq); 1251 is_apsdq_empty = skb_queue_empty(&conn->apsdq); 1252 spin_unlock_bh(&conn->psq_lock); 1253 1254 /* 1255 * Set the STA flag to Trigger delivery, 1256 * so that the frame will go out 1257 */ 1258 conn->sta_flags |= STA_PS_APSD_TRIGGER; 1259 num_frames_to_deliver--; 1260 1261 /* Last frame in the service period, set EOSP or queue empty */ 1262 if ((is_apsdq_empty) || (!num_frames_to_deliver)) 1263 conn->sta_flags |= STA_PS_APSD_EOSP; 1264 1265 ath6kl_data_tx(skb, vif->ndev); 1266 conn->sta_flags &= ~(STA_PS_APSD_TRIGGER); 1267 conn->sta_flags &= ~(STA_PS_APSD_EOSP); 1268 } 1269 1270 if (is_apsdq_empty) { 1271 if (is_apsdq_empty_at_start) 1272 flags = WMI_AP_APSD_NO_DELIVERY_FRAMES; 1273 else 1274 flags = 0; 1275 1276 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi, 1277 vif->fw_vif_idx, 1278 conn->aid, 0, flags); 1279 } 1280 1281 return; 1282 } 1283 1284 void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) 1285 { 1286 struct ath6kl *ar = target->dev->ar; 1287 struct sk_buff *skb = packet->pkt_cntxt; 1288 struct wmi_rx_meta_v2 *meta; 1289 struct wmi_data_hdr *dhdr; 1290 int min_hdr_len; 1291 u8 meta_type, dot11_hdr = 0; 1292 u8 pad_before_data_start; 1293 int status = packet->status; 1294 enum htc_endpoint_id ept = packet->endpoint; 1295 bool is_amsdu, prev_ps, ps_state = false; 1296 bool trig_state = false; 1297 struct ath6kl_sta *conn = NULL; 1298 struct sk_buff *skb1 = NULL; 1299 struct ethhdr *datap = NULL; 1300 struct ath6kl_vif *vif; 1301 struct aggr_info_conn *aggr_conn; 1302 u16 seq_no, offset; 1303 u8 tid, if_idx; 1304 1305 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, 1306 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d", 1307 __func__, ar, ept, skb, packet->buf, 1308 packet->act_len, status); 1309 1310 if (status || !(skb->data + HTC_HDR_LENGTH)) { 1311 dev_kfree_skb(skb); 1312 return; 1313 } 1314 1315 skb_put(skb, packet->act_len + HTC_HDR_LENGTH); 1316 skb_pull(skb, HTC_HDR_LENGTH); 1317 1318 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ", 1319 skb->data, skb->len); 1320 1321 if (ept == ar->ctrl_ep) { 1322 if (test_bit(WMI_ENABLED, &ar->flag)) { 1323 ath6kl_check_wow_status(ar); 1324 ath6kl_wmi_control_rx(ar->wmi, skb); 1325 return; 1326 } 1327 if_idx = 1328 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data); 1329 } else { 1330 if_idx = 1331 wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data); 1332 } 1333 1334 vif = ath6kl_get_vif_by_index(ar, if_idx); 1335 if (!vif) { 1336 dev_kfree_skb(skb); 1337 return; 1338 } 1339 1340 /* 1341 * Take lock to protect buffer counts and adaptive power throughput 1342 * state. 1343 */ 1344 spin_lock_bh(&vif->if_lock); 1345 1346 vif->net_stats.rx_packets++; 1347 vif->net_stats.rx_bytes += packet->act_len; 1348 1349 spin_unlock_bh(&vif->if_lock); 1350 1351 skb->dev = vif->ndev; 1352 1353 if (!test_bit(WMI_ENABLED, &ar->flag)) { 1354 if (EPPING_ALIGNMENT_PAD > 0) 1355 skb_pull(skb, EPPING_ALIGNMENT_PAD); 1356 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); 1357 return; 1358 } 1359 1360 ath6kl_check_wow_status(ar); 1361 1362 min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) + 1363 sizeof(struct ath6kl_llc_snap_hdr); 1364 1365 dhdr = (struct wmi_data_hdr *) skb->data; 1366 1367 /* 1368 * In the case of AP mode we may receive NULL data frames 1369 * that do not have LLC hdr. They are 16 bytes in size. 1370 * Allow these frames in the AP mode. 1371 */ 1372 if (vif->nw_type != AP_NETWORK && 1373 ((packet->act_len < min_hdr_len) || 1374 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) { 1375 ath6kl_info("frame len is too short or too long\n"); 1376 vif->net_stats.rx_errors++; 1377 vif->net_stats.rx_length_errors++; 1378 dev_kfree_skb(skb); 1379 return; 1380 } 1381 1382 /* Get the Power save state of the STA */ 1383 if (vif->nw_type == AP_NETWORK) { 1384 meta_type = wmi_data_hdr_get_meta(dhdr); 1385 1386 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) & 1387 WMI_DATA_HDR_PS_MASK); 1388 1389 offset = sizeof(struct wmi_data_hdr); 1390 trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG); 1391 1392 switch (meta_type) { 1393 case 0: 1394 break; 1395 case WMI_META_VERSION_1: 1396 offset += sizeof(struct wmi_rx_meta_v1); 1397 break; 1398 case WMI_META_VERSION_2: 1399 offset += sizeof(struct wmi_rx_meta_v2); 1400 break; 1401 default: 1402 break; 1403 } 1404 1405 datap = (struct ethhdr *) (skb->data + offset); 1406 conn = ath6kl_find_sta(vif, datap->h_source); 1407 1408 if (!conn) { 1409 dev_kfree_skb(skb); 1410 return; 1411 } 1412 1413 /* 1414 * If there is a change in PS state of the STA, 1415 * take appropriate steps: 1416 * 1417 * 1. If Sleep-->Awake, flush the psq for the STA 1418 * Clear the PVB for the STA. 1419 * 2. If Awake-->Sleep, Starting queueing frames 1420 * the STA. 1421 */ 1422 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP); 1423 1424 if (ps_state) 1425 conn->sta_flags |= STA_PS_SLEEP; 1426 else 1427 conn->sta_flags &= ~STA_PS_SLEEP; 1428 1429 /* Accept trigger only when the station is in sleep */ 1430 if ((conn->sta_flags & STA_PS_SLEEP) && trig_state) 1431 ath6kl_uapsd_trigger_frame_rx(vif, conn); 1432 1433 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) { 1434 if (!(conn->sta_flags & STA_PS_SLEEP)) { 1435 struct sk_buff *skbuff = NULL; 1436 bool is_apsdq_empty; 1437 struct ath6kl_mgmt_buff *mgmt; 1438 u8 idx; 1439 1440 spin_lock_bh(&conn->psq_lock); 1441 while (conn->mgmt_psq_len > 0) { 1442 mgmt = list_first_entry( 1443 &conn->mgmt_psq, 1444 struct ath6kl_mgmt_buff, 1445 list); 1446 list_del(&mgmt->list); 1447 conn->mgmt_psq_len--; 1448 spin_unlock_bh(&conn->psq_lock); 1449 idx = vif->fw_vif_idx; 1450 1451 ath6kl_wmi_send_mgmt_cmd(ar->wmi, 1452 idx, 1453 mgmt->id, 1454 mgmt->freq, 1455 mgmt->wait, 1456 mgmt->buf, 1457 mgmt->len, 1458 mgmt->no_cck); 1459 1460 kfree(mgmt); 1461 spin_lock_bh(&conn->psq_lock); 1462 } 1463 conn->mgmt_psq_len = 0; 1464 while ((skbuff = skb_dequeue(&conn->psq))) { 1465 spin_unlock_bh(&conn->psq_lock); 1466 ath6kl_data_tx(skbuff, vif->ndev); 1467 spin_lock_bh(&conn->psq_lock); 1468 } 1469 1470 is_apsdq_empty = skb_queue_empty(&conn->apsdq); 1471 while ((skbuff = skb_dequeue(&conn->apsdq))) { 1472 spin_unlock_bh(&conn->psq_lock); 1473 ath6kl_data_tx(skbuff, vif->ndev); 1474 spin_lock_bh(&conn->psq_lock); 1475 } 1476 spin_unlock_bh(&conn->psq_lock); 1477 1478 if (!is_apsdq_empty) 1479 ath6kl_wmi_set_apsd_bfrd_traf( 1480 ar->wmi, 1481 vif->fw_vif_idx, 1482 conn->aid, 0, 0); 1483 1484 /* Clear the PVB for this STA */ 1485 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, 1486 conn->aid, 0); 1487 } 1488 } 1489 1490 /* drop NULL data frames here */ 1491 if ((packet->act_len < min_hdr_len) || 1492 (packet->act_len > 1493 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) { 1494 dev_kfree_skb(skb); 1495 return; 1496 } 1497 } 1498 1499 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false; 1500 tid = wmi_data_hdr_get_up(dhdr); 1501 seq_no = wmi_data_hdr_get_seqno(dhdr); 1502 meta_type = wmi_data_hdr_get_meta(dhdr); 1503 dot11_hdr = wmi_data_hdr_get_dot11(dhdr); 1504 pad_before_data_start = 1505 (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT) 1506 & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK; 1507 1508 skb_pull(skb, sizeof(struct wmi_data_hdr)); 1509 1510 switch (meta_type) { 1511 case WMI_META_VERSION_1: 1512 skb_pull(skb, sizeof(struct wmi_rx_meta_v1)); 1513 break; 1514 case WMI_META_VERSION_2: 1515 meta = (struct wmi_rx_meta_v2 *) skb->data; 1516 if (meta->csum_flags & 0x1) { 1517 skb->ip_summed = CHECKSUM_COMPLETE; 1518 skb->csum = (__force __wsum) meta->csum; 1519 } 1520 skb_pull(skb, sizeof(struct wmi_rx_meta_v2)); 1521 break; 1522 default: 1523 break; 1524 } 1525 1526 skb_pull(skb, pad_before_data_start); 1527 1528 if (dot11_hdr) 1529 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb); 1530 else if (!is_amsdu) 1531 status = ath6kl_wmi_dot3_2_dix(skb); 1532 1533 if (status) { 1534 /* 1535 * Drop frames that could not be processed (lack of 1536 * memory, etc.) 1537 */ 1538 dev_kfree_skb(skb); 1539 return; 1540 } 1541 1542 if (!(vif->ndev->flags & IFF_UP)) { 1543 dev_kfree_skb(skb); 1544 return; 1545 } 1546 1547 if (vif->nw_type == AP_NETWORK) { 1548 datap = (struct ethhdr *) skb->data; 1549 if (is_multicast_ether_addr(datap->h_dest)) 1550 /* 1551 * Bcast/Mcast frames should be sent to the 1552 * OS stack as well as on the air. 1553 */ 1554 skb1 = skb_copy(skb, GFP_ATOMIC); 1555 else { 1556 /* 1557 * Search for a connected STA with dstMac 1558 * as the Mac address. If found send the 1559 * frame to it on the air else send the 1560 * frame up the stack. 1561 */ 1562 conn = ath6kl_find_sta(vif, datap->h_dest); 1563 1564 if (conn && ar->intra_bss) { 1565 skb1 = skb; 1566 skb = NULL; 1567 } else if (conn && !ar->intra_bss) { 1568 dev_kfree_skb(skb); 1569 skb = NULL; 1570 } 1571 } 1572 if (skb1) 1573 ath6kl_data_tx(skb1, vif->ndev); 1574 1575 if (skb == NULL) { 1576 /* nothing to deliver up the stack */ 1577 return; 1578 } 1579 } 1580 1581 datap = (struct ethhdr *) skb->data; 1582 1583 if (is_unicast_ether_addr(datap->h_dest)) { 1584 if (vif->nw_type == AP_NETWORK) { 1585 conn = ath6kl_find_sta(vif, datap->h_source); 1586 if (!conn) 1587 return; 1588 aggr_conn = conn->aggr_conn; 1589 } else 1590 aggr_conn = vif->aggr_cntxt->aggr_conn; 1591 1592 if (aggr_process_recv_frm(aggr_conn, tid, seq_no, 1593 is_amsdu, skb)) { 1594 /* aggregation code will handle the skb */ 1595 return; 1596 } 1597 } else if (!is_broadcast_ether_addr(datap->h_dest)) 1598 vif->net_stats.multicast++; 1599 1600 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); 1601 } 1602 1603 static void aggr_timeout(unsigned long arg) 1604 { 1605 u8 i, j; 1606 struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg; 1607 struct rxtid *rxtid; 1608 struct rxtid_stats *stats; 1609 1610 for (i = 0; i < NUM_OF_TIDS; i++) { 1611 rxtid = &aggr_conn->rx_tid[i]; 1612 stats = &aggr_conn->stat[i]; 1613 1614 if (!rxtid->aggr || !rxtid->timer_mon) 1615 continue; 1616 1617 stats->num_timeouts++; 1618 ath6kl_dbg(ATH6KL_DBG_AGGR, 1619 "aggr timeout (st %d end %d)\n", 1620 rxtid->seq_next, 1621 ((rxtid->seq_next + rxtid->hold_q_sz-1) & 1622 ATH6KL_MAX_SEQ_NO)); 1623 aggr_deque_frms(aggr_conn, i, 0, 0); 1624 } 1625 1626 aggr_conn->timer_scheduled = false; 1627 1628 for (i = 0; i < NUM_OF_TIDS; i++) { 1629 rxtid = &aggr_conn->rx_tid[i]; 1630 1631 if (rxtid->aggr && rxtid->hold_q) { 1632 spin_lock_bh(&rxtid->lock); 1633 for (j = 0; j < rxtid->hold_q_sz; j++) { 1634 if (rxtid->hold_q[j].skb) { 1635 aggr_conn->timer_scheduled = true; 1636 rxtid->timer_mon = true; 1637 break; 1638 } 1639 } 1640 spin_unlock_bh(&rxtid->lock); 1641 1642 if (j >= rxtid->hold_q_sz) 1643 rxtid->timer_mon = false; 1644 } 1645 } 1646 1647 if (aggr_conn->timer_scheduled) 1648 mod_timer(&aggr_conn->timer, 1649 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT)); 1650 } 1651 1652 static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid) 1653 { 1654 struct rxtid *rxtid; 1655 struct rxtid_stats *stats; 1656 1657 if (!aggr_conn || tid >= NUM_OF_TIDS) 1658 return; 1659 1660 rxtid = &aggr_conn->rx_tid[tid]; 1661 stats = &aggr_conn->stat[tid]; 1662 1663 if (rxtid->aggr) 1664 aggr_deque_frms(aggr_conn, tid, 0, 0); 1665 1666 rxtid->aggr = false; 1667 rxtid->timer_mon = false; 1668 rxtid->win_sz = 0; 1669 rxtid->seq_next = 0; 1670 rxtid->hold_q_sz = 0; 1671 1672 kfree(rxtid->hold_q); 1673 rxtid->hold_q = NULL; 1674 1675 memset(stats, 0, sizeof(struct rxtid_stats)); 1676 } 1677 1678 void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no, 1679 u8 win_sz) 1680 { 1681 struct ath6kl_sta *sta; 1682 struct aggr_info_conn *aggr_conn = NULL; 1683 struct rxtid *rxtid; 1684 struct rxtid_stats *stats; 1685 u16 hold_q_size; 1686 u8 tid, aid; 1687 1688 if (vif->nw_type == AP_NETWORK) { 1689 aid = ath6kl_get_aid(tid_mux); 1690 sta = ath6kl_find_sta_by_aid(vif->ar, aid); 1691 if (sta) 1692 aggr_conn = sta->aggr_conn; 1693 } else 1694 aggr_conn = vif->aggr_cntxt->aggr_conn; 1695 1696 if (!aggr_conn) 1697 return; 1698 1699 tid = ath6kl_get_tid(tid_mux); 1700 if (tid >= NUM_OF_TIDS) 1701 return; 1702 1703 rxtid = &aggr_conn->rx_tid[tid]; 1704 stats = &aggr_conn->stat[tid]; 1705 1706 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX) 1707 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n", 1708 __func__, win_sz, tid); 1709 1710 if (rxtid->aggr) 1711 aggr_delete_tid_state(aggr_conn, tid); 1712 1713 rxtid->seq_next = seq_no; 1714 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q); 1715 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL); 1716 if (!rxtid->hold_q) 1717 return; 1718 1719 rxtid->win_sz = win_sz; 1720 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz); 1721 if (!skb_queue_empty(&rxtid->q)) 1722 return; 1723 1724 rxtid->aggr = true; 1725 } 1726 1727 void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info, 1728 struct aggr_info_conn *aggr_conn) 1729 { 1730 struct rxtid *rxtid; 1731 u8 i; 1732 1733 aggr_conn->aggr_sz = AGGR_SZ_DEFAULT; 1734 aggr_conn->dev = vif->ndev; 1735 init_timer(&aggr_conn->timer); 1736 aggr_conn->timer.function = aggr_timeout; 1737 aggr_conn->timer.data = (unsigned long) aggr_conn; 1738 aggr_conn->aggr_info = aggr_info; 1739 1740 aggr_conn->timer_scheduled = false; 1741 1742 for (i = 0; i < NUM_OF_TIDS; i++) { 1743 rxtid = &aggr_conn->rx_tid[i]; 1744 rxtid->aggr = false; 1745 rxtid->timer_mon = false; 1746 skb_queue_head_init(&rxtid->q); 1747 spin_lock_init(&rxtid->lock); 1748 } 1749 1750 } 1751 1752 struct aggr_info *aggr_init(struct ath6kl_vif *vif) 1753 { 1754 struct aggr_info *p_aggr = NULL; 1755 1756 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL); 1757 if (!p_aggr) { 1758 ath6kl_err("failed to alloc memory for aggr_node\n"); 1759 return NULL; 1760 } 1761 1762 p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL); 1763 if (!p_aggr->aggr_conn) { 1764 ath6kl_err("failed to alloc memory for connection specific aggr info\n"); 1765 kfree(p_aggr); 1766 return NULL; 1767 } 1768 1769 aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn); 1770 1771 skb_queue_head_init(&p_aggr->rx_amsdu_freeq); 1772 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS); 1773 1774 return p_aggr; 1775 } 1776 1777 void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux) 1778 { 1779 struct ath6kl_sta *sta; 1780 struct rxtid *rxtid; 1781 struct aggr_info_conn *aggr_conn = NULL; 1782 u8 tid, aid; 1783 1784 if (vif->nw_type == AP_NETWORK) { 1785 aid = ath6kl_get_aid(tid_mux); 1786 sta = ath6kl_find_sta_by_aid(vif->ar, aid); 1787 if (sta) 1788 aggr_conn = sta->aggr_conn; 1789 } else 1790 aggr_conn = vif->aggr_cntxt->aggr_conn; 1791 1792 if (!aggr_conn) 1793 return; 1794 1795 tid = ath6kl_get_tid(tid_mux); 1796 if (tid >= NUM_OF_TIDS) 1797 return; 1798 1799 rxtid = &aggr_conn->rx_tid[tid]; 1800 1801 if (rxtid->aggr) 1802 aggr_delete_tid_state(aggr_conn, tid); 1803 } 1804 1805 void aggr_reset_state(struct aggr_info_conn *aggr_conn) 1806 { 1807 u8 tid; 1808 1809 if (!aggr_conn) 1810 return; 1811 1812 if (aggr_conn->timer_scheduled) { 1813 del_timer(&aggr_conn->timer); 1814 aggr_conn->timer_scheduled = false; 1815 } 1816 1817 for (tid = 0; tid < NUM_OF_TIDS; tid++) 1818 aggr_delete_tid_state(aggr_conn, tid); 1819 } 1820 1821 /* clean up our amsdu buffer list */ 1822 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar) 1823 { 1824 struct htc_packet *packet, *tmp_pkt; 1825 1826 spin_lock_bh(&ar->lock); 1827 if (list_empty(&ar->amsdu_rx_buffer_queue)) { 1828 spin_unlock_bh(&ar->lock); 1829 return; 1830 } 1831 1832 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue, 1833 list) { 1834 list_del(&packet->list); 1835 spin_unlock_bh(&ar->lock); 1836 dev_kfree_skb(packet->pkt_cntxt); 1837 spin_lock_bh(&ar->lock); 1838 } 1839 1840 spin_unlock_bh(&ar->lock); 1841 } 1842 1843 void aggr_module_destroy(struct aggr_info *aggr_info) 1844 { 1845 if (!aggr_info) 1846 return; 1847 1848 aggr_reset_state(aggr_info->aggr_conn); 1849 skb_queue_purge(&aggr_info->rx_amsdu_freeq); 1850 kfree(aggr_info->aggr_conn); 1851 kfree(aggr_info); 1852 } 1853