1 /* 2 * Copyright (c) 2004-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include "core.h" 21 #include "debug.h" 22 #include "htc-ops.h" 23 24 /* 25 * tid - tid_mux0..tid_mux3 26 * aid - tid_mux4..tid_mux7 27 */ 28 #define ATH6KL_TID_MASK 0xf 29 #define ATH6KL_AID_SHIFT 4 30 31 static inline u8 ath6kl_get_tid(u8 tid_mux) 32 { 33 return tid_mux & ATH6KL_TID_MASK; 34 } 35 36 static inline u8 ath6kl_get_aid(u8 tid_mux) 37 { 38 return tid_mux >> ATH6KL_AID_SHIFT; 39 } 40 41 static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev, 42 u32 *map_no) 43 { 44 struct ath6kl *ar = ath6kl_priv(dev); 45 struct ethhdr *eth_hdr; 46 u32 i, ep_map = -1; 47 u8 *datap; 48 49 *map_no = 0; 50 datap = skb->data; 51 eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr)); 52 53 if (is_multicast_ether_addr(eth_hdr->h_dest)) 54 return ENDPOINT_2; 55 56 for (i = 0; i < ar->node_num; i++) { 57 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr, 58 ETH_ALEN) == 0) { 59 *map_no = i + 1; 60 ar->node_map[i].tx_pend++; 61 return ar->node_map[i].ep_id; 62 } 63 64 if ((ep_map == -1) && !ar->node_map[i].tx_pend) 65 ep_map = i; 66 } 67 68 if (ep_map == -1) { 69 ep_map = ar->node_num; 70 ar->node_num++; 71 if (ar->node_num > MAX_NODE_NUM) 72 return ENDPOINT_UNUSED; 73 } 74 75 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN); 76 77 for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) { 78 if (!ar->tx_pending[i]) { 79 ar->node_map[ep_map].ep_id = i; 80 break; 81 } 82 83 /* 84 * No free endpoint is available, start redistribution on 85 * the inuse endpoints. 86 */ 87 if (i == ENDPOINT_5) { 88 ar->node_map[ep_map].ep_id = ar->next_ep_id; 89 ar->next_ep_id++; 90 if (ar->next_ep_id > ENDPOINT_5) 91 ar->next_ep_id = ENDPOINT_2; 92 } 93 } 94 95 *map_no = ep_map + 1; 96 ar->node_map[ep_map].tx_pend++; 97 98 return ar->node_map[ep_map].ep_id; 99 } 100 101 static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn, 102 struct ath6kl_vif *vif, 103 struct sk_buff *skb, 104 u32 *flags) 105 { 106 struct ath6kl *ar = vif->ar; 107 bool is_apsdq_empty = false; 108 struct ethhdr *datap = (struct ethhdr *) skb->data; 109 u8 up = 0, traffic_class, *ip_hdr; 110 u16 ether_type; 111 struct ath6kl_llc_snap_hdr *llc_hdr; 112 113 if (conn->sta_flags & STA_PS_APSD_TRIGGER) { 114 /* 115 * This tx is because of a uAPSD trigger, determine 116 * more and EOSP bit. Set EOSP if queue is empty 117 * or sufficient frames are delivered for this trigger. 118 */ 119 spin_lock_bh(&conn->psq_lock); 120 if (!skb_queue_empty(&conn->apsdq)) 121 *flags |= WMI_DATA_HDR_FLAGS_MORE; 122 else if (conn->sta_flags & STA_PS_APSD_EOSP) 123 *flags |= WMI_DATA_HDR_FLAGS_EOSP; 124 *flags |= WMI_DATA_HDR_FLAGS_UAPSD; 125 spin_unlock_bh(&conn->psq_lock); 126 return false; 127 } else if (!conn->apsd_info) 128 return false; 129 130 if (test_bit(WMM_ENABLED, &vif->flags)) { 131 ether_type = be16_to_cpu(datap->h_proto); 132 if (is_ethertype(ether_type)) { 133 /* packet is in DIX format */ 134 ip_hdr = (u8 *)(datap + 1); 135 } else { 136 /* packet is in 802.3 format */ 137 llc_hdr = (struct ath6kl_llc_snap_hdr *) 138 (datap + 1); 139 ether_type = be16_to_cpu(llc_hdr->eth_type); 140 ip_hdr = (u8 *)(llc_hdr + 1); 141 } 142 143 if (ether_type == IP_ETHERTYPE) 144 up = ath6kl_wmi_determine_user_priority( 145 ip_hdr, 0); 146 } 147 148 traffic_class = ath6kl_wmi_get_traffic_class(up); 149 150 if ((conn->apsd_info & (1 << traffic_class)) == 0) 151 return false; 152 153 /* Queue the frames if the STA is sleeping */ 154 spin_lock_bh(&conn->psq_lock); 155 is_apsdq_empty = skb_queue_empty(&conn->apsdq); 156 skb_queue_tail(&conn->apsdq, skb); 157 spin_unlock_bh(&conn->psq_lock); 158 159 /* 160 * If this is the first pkt getting queued 161 * for this STA, update the PVB for this STA 162 */ 163 if (is_apsdq_empty) { 164 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi, 165 vif->fw_vif_idx, 166 conn->aid, 1, 0); 167 } 168 *flags |= WMI_DATA_HDR_FLAGS_UAPSD; 169 170 return true; 171 } 172 173 static bool ath6kl_process_psq(struct ath6kl_sta *conn, 174 struct ath6kl_vif *vif, 175 struct sk_buff *skb, 176 u32 *flags) 177 { 178 bool is_psq_empty = false; 179 struct ath6kl *ar = vif->ar; 180 181 if (conn->sta_flags & STA_PS_POLLED) { 182 spin_lock_bh(&conn->psq_lock); 183 if (!skb_queue_empty(&conn->psq)) 184 *flags |= WMI_DATA_HDR_FLAGS_MORE; 185 spin_unlock_bh(&conn->psq_lock); 186 return false; 187 } 188 189 /* Queue the frames if the STA is sleeping */ 190 spin_lock_bh(&conn->psq_lock); 191 is_psq_empty = skb_queue_empty(&conn->psq); 192 skb_queue_tail(&conn->psq, skb); 193 spin_unlock_bh(&conn->psq_lock); 194 195 /* 196 * If this is the first pkt getting queued 197 * for this STA, update the PVB for this 198 * STA. 199 */ 200 if (is_psq_empty) 201 ath6kl_wmi_set_pvb_cmd(ar->wmi, 202 vif->fw_vif_idx, 203 conn->aid, 1); 204 return true; 205 } 206 207 static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb, 208 u32 *flags) 209 { 210 struct ethhdr *datap = (struct ethhdr *) skb->data; 211 struct ath6kl_sta *conn = NULL; 212 bool ps_queued = false; 213 struct ath6kl *ar = vif->ar; 214 215 if (is_multicast_ether_addr(datap->h_dest)) { 216 u8 ctr = 0; 217 bool q_mcast = false; 218 219 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { 220 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) { 221 q_mcast = true; 222 break; 223 } 224 } 225 226 if (q_mcast) { 227 /* 228 * If this transmit is not because of a Dtim Expiry 229 * q it. 230 */ 231 if (!test_bit(DTIM_EXPIRED, &vif->flags)) { 232 bool is_mcastq_empty = false; 233 234 spin_lock_bh(&ar->mcastpsq_lock); 235 is_mcastq_empty = 236 skb_queue_empty(&ar->mcastpsq); 237 skb_queue_tail(&ar->mcastpsq, skb); 238 spin_unlock_bh(&ar->mcastpsq_lock); 239 240 /* 241 * If this is the first Mcast pkt getting 242 * queued indicate to the target to set the 243 * BitmapControl LSB of the TIM IE. 244 */ 245 if (is_mcastq_empty) 246 ath6kl_wmi_set_pvb_cmd(ar->wmi, 247 vif->fw_vif_idx, 248 MCAST_AID, 1); 249 250 ps_queued = true; 251 } else { 252 /* 253 * This transmit is because of Dtim expiry. 254 * Determine if MoreData bit has to be set. 255 */ 256 spin_lock_bh(&ar->mcastpsq_lock); 257 if (!skb_queue_empty(&ar->mcastpsq)) 258 *flags |= WMI_DATA_HDR_FLAGS_MORE; 259 spin_unlock_bh(&ar->mcastpsq_lock); 260 } 261 } 262 } else { 263 conn = ath6kl_find_sta(vif, datap->h_dest); 264 if (!conn) { 265 dev_kfree_skb(skb); 266 267 /* Inform the caller that the skb is consumed */ 268 return true; 269 } 270 271 if (conn->sta_flags & STA_PS_SLEEP) { 272 ps_queued = ath6kl_process_uapsdq(conn, 273 vif, skb, flags); 274 if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD)) 275 ps_queued = ath6kl_process_psq(conn, 276 vif, skb, flags); 277 } 278 } 279 return ps_queued; 280 } 281 282 /* Tx functions */ 283 284 int ath6kl_control_tx(void *devt, struct sk_buff *skb, 285 enum htc_endpoint_id eid) 286 { 287 struct ath6kl *ar = devt; 288 int status = 0; 289 struct ath6kl_cookie *cookie = NULL; 290 291 if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) { 292 dev_kfree_skb(skb); 293 return -EACCES; 294 } 295 296 if (WARN_ON_ONCE(eid == ENDPOINT_UNUSED || 297 eid >= ENDPOINT_MAX)) { 298 status = -EINVAL; 299 goto fail_ctrl_tx; 300 } 301 302 spin_lock_bh(&ar->lock); 303 304 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 305 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__, 306 skb, skb->len, eid); 307 308 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) { 309 /* 310 * Control endpoint is full, don't allocate resources, we 311 * are just going to drop this packet. 312 */ 313 cookie = NULL; 314 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n", 315 skb, skb->len); 316 } else 317 cookie = ath6kl_alloc_cookie(ar); 318 319 if (cookie == NULL) { 320 spin_unlock_bh(&ar->lock); 321 status = -ENOMEM; 322 goto fail_ctrl_tx; 323 } 324 325 ar->tx_pending[eid]++; 326 327 if (eid != ar->ctrl_ep) 328 ar->total_tx_data_pend++; 329 330 spin_unlock_bh(&ar->lock); 331 332 cookie->skb = skb; 333 cookie->map_no = 0; 334 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 335 eid, ATH6KL_CONTROL_PKT_TAG); 336 cookie->htc_pkt.skb = skb; 337 338 /* 339 * This interface is asynchronous, if there is an error, cleanup 340 * will happen in the TX completion callback. 341 */ 342 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt); 343 344 return 0; 345 346 fail_ctrl_tx: 347 dev_kfree_skb(skb); 348 return status; 349 } 350 351 int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) 352 { 353 struct ath6kl *ar = ath6kl_priv(dev); 354 struct ath6kl_cookie *cookie = NULL; 355 enum htc_endpoint_id eid = ENDPOINT_UNUSED; 356 struct ath6kl_vif *vif = netdev_priv(dev); 357 u32 map_no = 0; 358 u16 htc_tag = ATH6KL_DATA_PKT_TAG; 359 u8 ac = 99 ; /* initialize to unmapped ac */ 360 bool chk_adhoc_ps_mapping = false; 361 int ret; 362 struct wmi_tx_meta_v2 meta_v2; 363 void *meta; 364 u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed; 365 u8 meta_ver = 0; 366 u32 flags = 0; 367 368 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 369 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__, 370 skb, skb->data, skb->len); 371 372 /* If target is not associated */ 373 if (!test_bit(CONNECTED, &vif->flags)) 374 goto fail_tx; 375 376 if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON)) 377 goto fail_tx; 378 379 if (!test_bit(WMI_READY, &ar->flag)) 380 goto fail_tx; 381 382 /* AP mode Power saving processing */ 383 if (vif->nw_type == AP_NETWORK) { 384 if (ath6kl_powersave_ap(vif, skb, &flags)) 385 return 0; 386 } 387 388 if (test_bit(WMI_ENABLED, &ar->flag)) { 389 if ((dev->features & NETIF_F_IP_CSUM) && 390 (csum == CHECKSUM_PARTIAL)) { 391 csum_start = skb->csum_start - 392 (skb_network_header(skb) - skb->head) + 393 sizeof(struct ath6kl_llc_snap_hdr); 394 csum_dest = skb->csum_offset + csum_start; 395 } 396 397 if (skb_headroom(skb) < dev->needed_headroom) { 398 struct sk_buff *tmp_skb = skb; 399 400 skb = skb_realloc_headroom(skb, dev->needed_headroom); 401 kfree_skb(tmp_skb); 402 if (skb == NULL) { 403 vif->net_stats.tx_dropped++; 404 return 0; 405 } 406 } 407 408 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) { 409 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n"); 410 goto fail_tx; 411 } 412 413 if ((dev->features & NETIF_F_IP_CSUM) && 414 (csum == CHECKSUM_PARTIAL)) { 415 meta_v2.csum_start = csum_start; 416 meta_v2.csum_dest = csum_dest; 417 418 /* instruct target to calculate checksum */ 419 meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD; 420 meta_ver = WMI_META_VERSION_2; 421 meta = &meta_v2; 422 } else { 423 meta_ver = 0; 424 meta = NULL; 425 } 426 427 ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb, 428 DATA_MSGTYPE, flags, 0, 429 meta_ver, 430 meta, vif->fw_vif_idx); 431 432 if (ret) { 433 ath6kl_warn("failed to add wmi data header:%d\n" 434 , ret); 435 goto fail_tx; 436 } 437 438 if ((vif->nw_type == ADHOC_NETWORK) && 439 ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags)) 440 chk_adhoc_ps_mapping = true; 441 else { 442 /* get the stream mapping */ 443 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, 444 vif->fw_vif_idx, skb, 445 0, test_bit(WMM_ENABLED, &vif->flags), &ac); 446 if (ret) 447 goto fail_tx; 448 } 449 } else 450 goto fail_tx; 451 452 spin_lock_bh(&ar->lock); 453 454 if (chk_adhoc_ps_mapping) 455 eid = ath6kl_ibss_map_epid(skb, dev, &map_no); 456 else 457 eid = ar->ac2ep_map[ac]; 458 459 if (eid == 0 || eid == ENDPOINT_UNUSED) { 460 ath6kl_err("eid %d is not mapped!\n", eid); 461 spin_unlock_bh(&ar->lock); 462 goto fail_tx; 463 } 464 465 /* allocate resource for this packet */ 466 cookie = ath6kl_alloc_cookie(ar); 467 468 if (!cookie) { 469 spin_unlock_bh(&ar->lock); 470 goto fail_tx; 471 } 472 473 /* update counts while the lock is held */ 474 ar->tx_pending[eid]++; 475 ar->total_tx_data_pend++; 476 477 spin_unlock_bh(&ar->lock); 478 479 if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) && 480 skb_cloned(skb)) { 481 /* 482 * We will touch (move the buffer data to align it. Since the 483 * skb buffer is cloned and not only the header is changed, we 484 * have to copy it to allow the changes. Since we are copying 485 * the data here, we may as well align it by reserving suitable 486 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align(). 487 */ 488 struct sk_buff *nskb; 489 490 nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC); 491 if (nskb == NULL) 492 goto fail_tx; 493 kfree_skb(skb); 494 skb = nskb; 495 } 496 497 cookie->skb = skb; 498 cookie->map_no = map_no; 499 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 500 eid, htc_tag); 501 cookie->htc_pkt.skb = skb; 502 503 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ", 504 skb->data, skb->len); 505 506 /* 507 * HTC interface is asynchronous, if this fails, cleanup will 508 * happen in the ath6kl_tx_complete callback. 509 */ 510 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt); 511 512 return 0; 513 514 fail_tx: 515 dev_kfree_skb(skb); 516 517 vif->net_stats.tx_dropped++; 518 vif->net_stats.tx_aborted_errors++; 519 520 return 0; 521 } 522 523 /* indicate tx activity or inactivity on a WMI stream */ 524 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active) 525 { 526 struct ath6kl *ar = devt; 527 enum htc_endpoint_id eid; 528 int i; 529 530 eid = ar->ac2ep_map[traffic_class]; 531 532 if (!test_bit(WMI_ENABLED, &ar->flag)) 533 goto notify_htc; 534 535 spin_lock_bh(&ar->lock); 536 537 ar->ac_stream_active[traffic_class] = active; 538 539 if (active) { 540 /* 541 * Keep track of the active stream with the highest 542 * priority. 543 */ 544 if (ar->ac_stream_pri_map[traffic_class] > 545 ar->hiac_stream_active_pri) 546 /* set the new highest active priority */ 547 ar->hiac_stream_active_pri = 548 ar->ac_stream_pri_map[traffic_class]; 549 550 } else { 551 /* 552 * We may have to search for the next active stream 553 * that is the highest priority. 554 */ 555 if (ar->hiac_stream_active_pri == 556 ar->ac_stream_pri_map[traffic_class]) { 557 /* 558 * The highest priority stream just went inactive 559 * reset and search for the "next" highest "active" 560 * priority stream. 561 */ 562 ar->hiac_stream_active_pri = 0; 563 564 for (i = 0; i < WMM_NUM_AC; i++) { 565 if (ar->ac_stream_active[i] && 566 (ar->ac_stream_pri_map[i] > 567 ar->hiac_stream_active_pri)) 568 /* 569 * Set the new highest active 570 * priority. 571 */ 572 ar->hiac_stream_active_pri = 573 ar->ac_stream_pri_map[i]; 574 } 575 } 576 } 577 578 spin_unlock_bh(&ar->lock); 579 580 notify_htc: 581 /* notify HTC, this may cause credit distribution changes */ 582 ath6kl_htc_activity_changed(ar->htc_target, eid, active); 583 } 584 585 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, 586 struct htc_packet *packet) 587 { 588 struct ath6kl *ar = target->dev->ar; 589 struct ath6kl_vif *vif; 590 enum htc_endpoint_id endpoint = packet->endpoint; 591 enum htc_send_full_action action = HTC_SEND_FULL_KEEP; 592 593 if (endpoint == ar->ctrl_ep) { 594 /* 595 * Under normal WMI if this is getting full, then something 596 * is running rampant the host should not be exhausting the 597 * WMI queue with too many commands the only exception to 598 * this is during testing using endpointping. 599 */ 600 set_bit(WMI_CTRL_EP_FULL, &ar->flag); 601 ath6kl_err("wmi ctrl ep is full\n"); 602 ath6kl_recovery_err_notify(ar, ATH6KL_FW_EP_FULL); 603 return action; 604 } 605 606 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG) 607 return action; 608 609 /* 610 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for 611 * the highest active stream. 612 */ 613 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] < 614 ar->hiac_stream_active_pri && 615 ar->cookie_count <= 616 target->endpoint[endpoint].tx_drop_packet_threshold) 617 /* 618 * Give preference to the highest priority stream by 619 * dropping the packets which overflowed. 620 */ 621 action = HTC_SEND_FULL_DROP; 622 623 /* FIXME: Locking */ 624 spin_lock_bh(&ar->list_lock); 625 list_for_each_entry(vif, &ar->vif_list, list) { 626 if (vif->nw_type == ADHOC_NETWORK || 627 action != HTC_SEND_FULL_DROP) { 628 spin_unlock_bh(&ar->list_lock); 629 630 set_bit(NETQ_STOPPED, &vif->flags); 631 netif_stop_queue(vif->ndev); 632 633 return action; 634 } 635 } 636 spin_unlock_bh(&ar->list_lock); 637 638 return action; 639 } 640 641 /* TODO this needs to be looked at */ 642 static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif, 643 enum htc_endpoint_id eid, u32 map_no) 644 { 645 struct ath6kl *ar = vif->ar; 646 u32 i; 647 648 if (vif->nw_type != ADHOC_NETWORK) 649 return; 650 651 if (!ar->ibss_ps_enable) 652 return; 653 654 if (eid == ar->ctrl_ep) 655 return; 656 657 if (map_no == 0) 658 return; 659 660 map_no--; 661 ar->node_map[map_no].tx_pend--; 662 663 if (ar->node_map[map_no].tx_pend) 664 return; 665 666 if (map_no != (ar->node_num - 1)) 667 return; 668 669 for (i = ar->node_num; i > 0; i--) { 670 if (ar->node_map[i - 1].tx_pend) 671 break; 672 673 memset(&ar->node_map[i - 1], 0, 674 sizeof(struct ath6kl_node_mapping)); 675 ar->node_num--; 676 } 677 } 678 679 void ath6kl_tx_complete(struct htc_target *target, 680 struct list_head *packet_queue) 681 { 682 struct ath6kl *ar = target->dev->ar; 683 struct sk_buff_head skb_queue; 684 struct htc_packet *packet; 685 struct sk_buff *skb; 686 struct ath6kl_cookie *ath6kl_cookie; 687 u32 map_no = 0; 688 int status; 689 enum htc_endpoint_id eid; 690 bool wake_event = false; 691 bool flushing[ATH6KL_VIF_MAX] = {false}; 692 u8 if_idx; 693 struct ath6kl_vif *vif; 694 695 skb_queue_head_init(&skb_queue); 696 697 /* lock the driver as we update internal state */ 698 spin_lock_bh(&ar->lock); 699 700 /* reap completed packets */ 701 while (!list_empty(packet_queue)) { 702 703 packet = list_first_entry(packet_queue, struct htc_packet, 704 list); 705 list_del(&packet->list); 706 707 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt; 708 if (WARN_ON_ONCE(!ath6kl_cookie)) 709 continue; 710 711 status = packet->status; 712 skb = ath6kl_cookie->skb; 713 eid = packet->endpoint; 714 map_no = ath6kl_cookie->map_no; 715 716 if (WARN_ON_ONCE(!skb || !skb->data)) { 717 dev_kfree_skb(skb); 718 ath6kl_free_cookie(ar, ath6kl_cookie); 719 continue; 720 } 721 722 __skb_queue_tail(&skb_queue, skb); 723 724 if (WARN_ON_ONCE(!status && (packet->act_len != skb->len))) { 725 ath6kl_free_cookie(ar, ath6kl_cookie); 726 continue; 727 } 728 729 ar->tx_pending[eid]--; 730 731 if (eid != ar->ctrl_ep) 732 ar->total_tx_data_pend--; 733 734 if (eid == ar->ctrl_ep) { 735 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag)) 736 clear_bit(WMI_CTRL_EP_FULL, &ar->flag); 737 738 if (ar->tx_pending[eid] == 0) 739 wake_event = true; 740 } 741 742 if (eid == ar->ctrl_ep) { 743 if_idx = wmi_cmd_hdr_get_if_idx( 744 (struct wmi_cmd_hdr *) packet->buf); 745 } else { 746 if_idx = wmi_data_hdr_get_if_idx( 747 (struct wmi_data_hdr *) packet->buf); 748 } 749 750 vif = ath6kl_get_vif_by_index(ar, if_idx); 751 if (!vif) { 752 ath6kl_free_cookie(ar, ath6kl_cookie); 753 continue; 754 } 755 756 if (status) { 757 if (status == -ECANCELED) 758 /* a packet was flushed */ 759 flushing[if_idx] = true; 760 761 vif->net_stats.tx_errors++; 762 763 if (status != -ENOSPC && status != -ECANCELED) 764 ath6kl_warn("tx complete error: %d\n", status); 765 766 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 767 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", 768 __func__, skb, packet->buf, packet->act_len, 769 eid, "error!"); 770 } else { 771 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 772 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", 773 __func__, skb, packet->buf, packet->act_len, 774 eid, "OK"); 775 776 flushing[if_idx] = false; 777 vif->net_stats.tx_packets++; 778 vif->net_stats.tx_bytes += skb->len; 779 } 780 781 ath6kl_tx_clear_node_map(vif, eid, map_no); 782 783 ath6kl_free_cookie(ar, ath6kl_cookie); 784 785 if (test_bit(NETQ_STOPPED, &vif->flags)) 786 clear_bit(NETQ_STOPPED, &vif->flags); 787 } 788 789 spin_unlock_bh(&ar->lock); 790 791 __skb_queue_purge(&skb_queue); 792 793 /* FIXME: Locking */ 794 spin_lock_bh(&ar->list_lock); 795 list_for_each_entry(vif, &ar->vif_list, list) { 796 if (test_bit(CONNECTED, &vif->flags) && 797 !flushing[vif->fw_vif_idx]) { 798 spin_unlock_bh(&ar->list_lock); 799 netif_wake_queue(vif->ndev); 800 spin_lock_bh(&ar->list_lock); 801 } 802 } 803 spin_unlock_bh(&ar->list_lock); 804 805 if (wake_event) 806 wake_up(&ar->event_wq); 807 808 return; 809 } 810 811 void ath6kl_tx_data_cleanup(struct ath6kl *ar) 812 { 813 int i; 814 815 /* flush all the data (non-control) streams */ 816 for (i = 0; i < WMM_NUM_AC; i++) 817 ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i], 818 ATH6KL_DATA_PKT_TAG); 819 } 820 821 /* Rx functions */ 822 823 static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev, 824 struct sk_buff *skb) 825 { 826 if (!skb) 827 return; 828 829 skb->dev = dev; 830 831 if (!(skb->dev->flags & IFF_UP)) { 832 dev_kfree_skb(skb); 833 return; 834 } 835 836 skb->protocol = eth_type_trans(skb, skb->dev); 837 838 netif_rx_ni(skb); 839 } 840 841 static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num) 842 { 843 struct sk_buff *skb; 844 845 while (num) { 846 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE); 847 if (!skb) { 848 ath6kl_err("netbuf allocation failed\n"); 849 return; 850 } 851 skb_queue_tail(q, skb); 852 num--; 853 } 854 } 855 856 static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr) 857 { 858 struct sk_buff *skb = NULL; 859 860 if (skb_queue_len(&p_aggr->rx_amsdu_freeq) < 861 (AGGR_NUM_OF_FREE_NETBUFS >> 2)) 862 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, 863 AGGR_NUM_OF_FREE_NETBUFS); 864 865 skb = skb_dequeue(&p_aggr->rx_amsdu_freeq); 866 867 return skb; 868 } 869 870 void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint) 871 { 872 struct ath6kl *ar = target->dev->ar; 873 struct sk_buff *skb; 874 int rx_buf; 875 int n_buf_refill; 876 struct htc_packet *packet; 877 struct list_head queue; 878 879 n_buf_refill = ATH6KL_MAX_RX_BUFFERS - 880 ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint); 881 882 if (n_buf_refill <= 0) 883 return; 884 885 INIT_LIST_HEAD(&queue); 886 887 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, 888 "%s: providing htc with %d buffers at eid=%d\n", 889 __func__, n_buf_refill, endpoint); 890 891 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) { 892 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE); 893 if (!skb) 894 break; 895 896 packet = (struct htc_packet *) skb->head; 897 if (!IS_ALIGNED((unsigned long) skb->data, 4)) { 898 size_t len = skb_headlen(skb); 899 skb->data = PTR_ALIGN(skb->data - 4, 4); 900 skb_set_tail_pointer(skb, len); 901 } 902 set_htc_rxpkt_info(packet, skb, skb->data, 903 ATH6KL_BUFFER_SIZE, endpoint); 904 packet->skb = skb; 905 list_add_tail(&packet->list, &queue); 906 } 907 908 if (!list_empty(&queue)) 909 ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue); 910 } 911 912 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count) 913 { 914 struct htc_packet *packet; 915 struct sk_buff *skb; 916 917 while (count) { 918 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE); 919 if (!skb) 920 return; 921 922 packet = (struct htc_packet *) skb->head; 923 if (!IS_ALIGNED((unsigned long) skb->data, 4)) { 924 size_t len = skb_headlen(skb); 925 skb->data = PTR_ALIGN(skb->data - 4, 4); 926 skb_set_tail_pointer(skb, len); 927 } 928 set_htc_rxpkt_info(packet, skb, skb->data, 929 ATH6KL_AMSDU_BUFFER_SIZE, 0); 930 packet->skb = skb; 931 932 spin_lock_bh(&ar->lock); 933 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue); 934 spin_unlock_bh(&ar->lock); 935 count--; 936 } 937 } 938 939 /* 940 * Callback to allocate a receive buffer for a pending packet. We use a 941 * pre-allocated list of buffers of maximum AMSDU size (4K). 942 */ 943 struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target, 944 enum htc_endpoint_id endpoint, 945 int len) 946 { 947 struct ath6kl *ar = target->dev->ar; 948 struct htc_packet *packet = NULL; 949 struct list_head *pkt_pos; 950 int refill_cnt = 0, depth = 0; 951 952 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n", 953 __func__, endpoint, len); 954 955 if ((len <= ATH6KL_BUFFER_SIZE) || 956 (len > ATH6KL_AMSDU_BUFFER_SIZE)) 957 return NULL; 958 959 spin_lock_bh(&ar->lock); 960 961 if (list_empty(&ar->amsdu_rx_buffer_queue)) { 962 spin_unlock_bh(&ar->lock); 963 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS; 964 goto refill_buf; 965 } 966 967 packet = list_first_entry(&ar->amsdu_rx_buffer_queue, 968 struct htc_packet, list); 969 list_del(&packet->list); 970 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue) 971 depth++; 972 973 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth; 974 spin_unlock_bh(&ar->lock); 975 976 /* set actual endpoint ID */ 977 packet->endpoint = endpoint; 978 979 refill_buf: 980 if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD) 981 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt); 982 983 return packet; 984 } 985 986 static void aggr_slice_amsdu(struct aggr_info *p_aggr, 987 struct rxtid *rxtid, struct sk_buff *skb) 988 { 989 struct sk_buff *new_skb; 990 struct ethhdr *hdr; 991 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len; 992 u8 *framep; 993 994 mac_hdr_len = sizeof(struct ethhdr); 995 framep = skb->data + mac_hdr_len; 996 amsdu_len = skb->len - mac_hdr_len; 997 998 while (amsdu_len > mac_hdr_len) { 999 hdr = (struct ethhdr *) framep; 1000 payload_8023_len = ntohs(hdr->h_proto); 1001 1002 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN || 1003 payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) { 1004 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n", 1005 payload_8023_len); 1006 break; 1007 } 1008 1009 frame_8023_len = payload_8023_len + mac_hdr_len; 1010 new_skb = aggr_get_free_skb(p_aggr); 1011 if (!new_skb) { 1012 ath6kl_err("no buffer available\n"); 1013 break; 1014 } 1015 1016 memcpy(new_skb->data, framep, frame_8023_len); 1017 skb_put(new_skb, frame_8023_len); 1018 if (ath6kl_wmi_dot3_2_dix(new_skb)) { 1019 ath6kl_err("dot3_2_dix error\n"); 1020 dev_kfree_skb(new_skb); 1021 break; 1022 } 1023 1024 skb_queue_tail(&rxtid->q, new_skb); 1025 1026 /* Is this the last subframe within this aggregate ? */ 1027 if ((amsdu_len - frame_8023_len) == 0) 1028 break; 1029 1030 /* Add the length of A-MSDU subframe padding bytes - 1031 * Round to nearest word. 1032 */ 1033 frame_8023_len = ALIGN(frame_8023_len, 4); 1034 1035 framep += frame_8023_len; 1036 amsdu_len -= frame_8023_len; 1037 } 1038 1039 dev_kfree_skb(skb); 1040 } 1041 1042 static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid, 1043 u16 seq_no, u8 order) 1044 { 1045 struct sk_buff *skb; 1046 struct rxtid *rxtid; 1047 struct skb_hold_q *node; 1048 u16 idx, idx_end, seq_end; 1049 struct rxtid_stats *stats; 1050 1051 rxtid = &agg_conn->rx_tid[tid]; 1052 stats = &agg_conn->stat[tid]; 1053 1054 spin_lock_bh(&rxtid->lock); 1055 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 1056 1057 /* 1058 * idx_end is typically the last possible frame in the window, 1059 * but changes to 'the' seq_no, when BAR comes. If seq_no 1060 * is non-zero, we will go up to that and stop. 1061 * Note: last seq no in current window will occupy the same 1062 * index position as index that is just previous to start. 1063 * An imp point : if win_sz is 7, for seq_no space of 4095, 1064 * then, there would be holes when sequence wrap around occurs. 1065 * Target should judiciously choose the win_sz, based on 1066 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz 1067 * 2, 4, 8, 16 win_sz works fine). 1068 * We must deque from "idx" to "idx_end", including both. 1069 */ 1070 seq_end = seq_no ? seq_no : rxtid->seq_next; 1071 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz); 1072 1073 do { 1074 node = &rxtid->hold_q[idx]; 1075 if ((order == 1) && (!node->skb)) 1076 break; 1077 1078 if (node->skb) { 1079 if (node->is_amsdu) 1080 aggr_slice_amsdu(agg_conn->aggr_info, rxtid, 1081 node->skb); 1082 else 1083 skb_queue_tail(&rxtid->q, node->skb); 1084 node->skb = NULL; 1085 } else 1086 stats->num_hole++; 1087 1088 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next); 1089 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 1090 } while (idx != idx_end); 1091 1092 spin_unlock_bh(&rxtid->lock); 1093 1094 stats->num_delivered += skb_queue_len(&rxtid->q); 1095 1096 while ((skb = skb_dequeue(&rxtid->q))) 1097 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb); 1098 } 1099 1100 static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid, 1101 u16 seq_no, 1102 bool is_amsdu, struct sk_buff *frame) 1103 { 1104 struct rxtid *rxtid; 1105 struct rxtid_stats *stats; 1106 struct sk_buff *skb; 1107 struct skb_hold_q *node; 1108 u16 idx, st, cur, end; 1109 bool is_queued = false; 1110 u16 extended_end; 1111 1112 rxtid = &agg_conn->rx_tid[tid]; 1113 stats = &agg_conn->stat[tid]; 1114 1115 stats->num_into_aggr++; 1116 1117 if (!rxtid->aggr) { 1118 if (is_amsdu) { 1119 aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame); 1120 is_queued = true; 1121 stats->num_amsdu++; 1122 while ((skb = skb_dequeue(&rxtid->q))) 1123 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, 1124 skb); 1125 } 1126 return is_queued; 1127 } 1128 1129 /* Check the incoming sequence no, if it's in the window */ 1130 st = rxtid->seq_next; 1131 cur = seq_no; 1132 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO; 1133 1134 if (((st < end) && (cur < st || cur > end)) || 1135 ((st > end) && (cur > end) && (cur < st))) { 1136 extended_end = (end + rxtid->hold_q_sz - 1) & 1137 ATH6KL_MAX_SEQ_NO; 1138 1139 if (((end < extended_end) && 1140 (cur < end || cur > extended_end)) || 1141 ((end > extended_end) && (cur > extended_end) && 1142 (cur < end))) { 1143 aggr_deque_frms(agg_conn, tid, 0, 0); 1144 spin_lock_bh(&rxtid->lock); 1145 if (cur >= rxtid->hold_q_sz - 1) 1146 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1); 1147 else 1148 rxtid->seq_next = ATH6KL_MAX_SEQ_NO - 1149 (rxtid->hold_q_sz - 2 - cur); 1150 spin_unlock_bh(&rxtid->lock); 1151 } else { 1152 /* 1153 * Dequeue only those frames that are outside the 1154 * new shifted window. 1155 */ 1156 if (cur >= rxtid->hold_q_sz - 1) 1157 st = cur - (rxtid->hold_q_sz - 1); 1158 else 1159 st = ATH6KL_MAX_SEQ_NO - 1160 (rxtid->hold_q_sz - 2 - cur); 1161 1162 aggr_deque_frms(agg_conn, tid, st, 0); 1163 } 1164 1165 stats->num_oow++; 1166 } 1167 1168 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz); 1169 1170 node = &rxtid->hold_q[idx]; 1171 1172 spin_lock_bh(&rxtid->lock); 1173 1174 /* 1175 * Is the cur frame duplicate or something beyond our window(hold_q 1176 * -> which is 2x, already)? 1177 * 1178 * 1. Duplicate is easy - drop incoming frame. 1179 * 2. Not falling in current sliding window. 1180 * 2a. is the frame_seq_no preceding current tid_seq_no? 1181 * -> drop the frame. perhaps sender did not get our ACK. 1182 * this is taken care of above. 1183 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ); 1184 * -> Taken care of it above, by moving window forward. 1185 */ 1186 dev_kfree_skb(node->skb); 1187 stats->num_dups++; 1188 1189 node->skb = frame; 1190 is_queued = true; 1191 node->is_amsdu = is_amsdu; 1192 node->seq_no = seq_no; 1193 1194 if (node->is_amsdu) 1195 stats->num_amsdu++; 1196 else 1197 stats->num_mpdu++; 1198 1199 spin_unlock_bh(&rxtid->lock); 1200 1201 aggr_deque_frms(agg_conn, tid, 0, 1); 1202 1203 if (agg_conn->timer_scheduled) 1204 return is_queued; 1205 1206 spin_lock_bh(&rxtid->lock); 1207 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) { 1208 if (rxtid->hold_q[idx].skb) { 1209 /* 1210 * There is a frame in the queue and no 1211 * timer so start a timer to ensure that 1212 * the frame doesn't remain stuck 1213 * forever. 1214 */ 1215 agg_conn->timer_scheduled = true; 1216 mod_timer(&agg_conn->timer, 1217 (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000)); 1218 rxtid->timer_mon = true; 1219 break; 1220 } 1221 } 1222 spin_unlock_bh(&rxtid->lock); 1223 1224 return is_queued; 1225 } 1226 1227 static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif, 1228 struct ath6kl_sta *conn) 1229 { 1230 struct ath6kl *ar = vif->ar; 1231 bool is_apsdq_empty, is_apsdq_empty_at_start; 1232 u32 num_frames_to_deliver, flags; 1233 struct sk_buff *skb = NULL; 1234 1235 /* 1236 * If the APSD q for this STA is not empty, dequeue and 1237 * send a pkt from the head of the q. Also update the 1238 * More data bit in the WMI_DATA_HDR if there are 1239 * more pkts for this STA in the APSD q. 1240 * If there are no more pkts for this STA, 1241 * update the APSD bitmap for this STA. 1242 */ 1243 1244 num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) & 1245 ATH6KL_APSD_FRAME_MASK; 1246 /* 1247 * Number of frames to send in a service period is 1248 * indicated by the station 1249 * in the QOS_INFO of the association request 1250 * If it is zero, send all frames 1251 */ 1252 if (!num_frames_to_deliver) 1253 num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME; 1254 1255 spin_lock_bh(&conn->psq_lock); 1256 is_apsdq_empty = skb_queue_empty(&conn->apsdq); 1257 spin_unlock_bh(&conn->psq_lock); 1258 is_apsdq_empty_at_start = is_apsdq_empty; 1259 1260 while ((!is_apsdq_empty) && (num_frames_to_deliver)) { 1261 1262 spin_lock_bh(&conn->psq_lock); 1263 skb = skb_dequeue(&conn->apsdq); 1264 is_apsdq_empty = skb_queue_empty(&conn->apsdq); 1265 spin_unlock_bh(&conn->psq_lock); 1266 1267 /* 1268 * Set the STA flag to Trigger delivery, 1269 * so that the frame will go out 1270 */ 1271 conn->sta_flags |= STA_PS_APSD_TRIGGER; 1272 num_frames_to_deliver--; 1273 1274 /* Last frame in the service period, set EOSP or queue empty */ 1275 if ((is_apsdq_empty) || (!num_frames_to_deliver)) 1276 conn->sta_flags |= STA_PS_APSD_EOSP; 1277 1278 ath6kl_data_tx(skb, vif->ndev); 1279 conn->sta_flags &= ~(STA_PS_APSD_TRIGGER); 1280 conn->sta_flags &= ~(STA_PS_APSD_EOSP); 1281 } 1282 1283 if (is_apsdq_empty) { 1284 if (is_apsdq_empty_at_start) 1285 flags = WMI_AP_APSD_NO_DELIVERY_FRAMES; 1286 else 1287 flags = 0; 1288 1289 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi, 1290 vif->fw_vif_idx, 1291 conn->aid, 0, flags); 1292 } 1293 1294 return; 1295 } 1296 1297 void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) 1298 { 1299 struct ath6kl *ar = target->dev->ar; 1300 struct sk_buff *skb = packet->pkt_cntxt; 1301 struct wmi_rx_meta_v2 *meta; 1302 struct wmi_data_hdr *dhdr; 1303 int min_hdr_len; 1304 u8 meta_type, dot11_hdr = 0; 1305 u8 pad_before_data_start; 1306 int status = packet->status; 1307 enum htc_endpoint_id ept = packet->endpoint; 1308 bool is_amsdu, prev_ps, ps_state = false; 1309 bool trig_state = false; 1310 struct ath6kl_sta *conn = NULL; 1311 struct sk_buff *skb1 = NULL; 1312 struct ethhdr *datap = NULL; 1313 struct ath6kl_vif *vif; 1314 struct aggr_info_conn *aggr_conn; 1315 u16 seq_no, offset; 1316 u8 tid, if_idx; 1317 1318 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, 1319 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d", 1320 __func__, ar, ept, skb, packet->buf, 1321 packet->act_len, status); 1322 1323 if (status || !(skb->data + HTC_HDR_LENGTH)) { 1324 dev_kfree_skb(skb); 1325 return; 1326 } 1327 1328 skb_put(skb, packet->act_len + HTC_HDR_LENGTH); 1329 skb_pull(skb, HTC_HDR_LENGTH); 1330 1331 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ", 1332 skb->data, skb->len); 1333 1334 if (ept == ar->ctrl_ep) { 1335 if (test_bit(WMI_ENABLED, &ar->flag)) { 1336 ath6kl_check_wow_status(ar); 1337 ath6kl_wmi_control_rx(ar->wmi, skb); 1338 return; 1339 } 1340 if_idx = 1341 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data); 1342 } else { 1343 if_idx = 1344 wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data); 1345 } 1346 1347 vif = ath6kl_get_vif_by_index(ar, if_idx); 1348 if (!vif) { 1349 dev_kfree_skb(skb); 1350 return; 1351 } 1352 1353 /* 1354 * Take lock to protect buffer counts and adaptive power throughput 1355 * state. 1356 */ 1357 spin_lock_bh(&vif->if_lock); 1358 1359 vif->net_stats.rx_packets++; 1360 vif->net_stats.rx_bytes += packet->act_len; 1361 1362 spin_unlock_bh(&vif->if_lock); 1363 1364 skb->dev = vif->ndev; 1365 1366 if (!test_bit(WMI_ENABLED, &ar->flag)) { 1367 if (EPPING_ALIGNMENT_PAD > 0) 1368 skb_pull(skb, EPPING_ALIGNMENT_PAD); 1369 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); 1370 return; 1371 } 1372 1373 ath6kl_check_wow_status(ar); 1374 1375 min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) + 1376 sizeof(struct ath6kl_llc_snap_hdr); 1377 1378 dhdr = (struct wmi_data_hdr *) skb->data; 1379 1380 /* 1381 * In the case of AP mode we may receive NULL data frames 1382 * that do not have LLC hdr. They are 16 bytes in size. 1383 * Allow these frames in the AP mode. 1384 */ 1385 if (vif->nw_type != AP_NETWORK && 1386 ((packet->act_len < min_hdr_len) || 1387 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) { 1388 ath6kl_info("frame len is too short or too long\n"); 1389 vif->net_stats.rx_errors++; 1390 vif->net_stats.rx_length_errors++; 1391 dev_kfree_skb(skb); 1392 return; 1393 } 1394 1395 /* Get the Power save state of the STA */ 1396 if (vif->nw_type == AP_NETWORK) { 1397 meta_type = wmi_data_hdr_get_meta(dhdr); 1398 1399 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) & 1400 WMI_DATA_HDR_PS_MASK); 1401 1402 offset = sizeof(struct wmi_data_hdr); 1403 trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG); 1404 1405 switch (meta_type) { 1406 case 0: 1407 break; 1408 case WMI_META_VERSION_1: 1409 offset += sizeof(struct wmi_rx_meta_v1); 1410 break; 1411 case WMI_META_VERSION_2: 1412 offset += sizeof(struct wmi_rx_meta_v2); 1413 break; 1414 default: 1415 break; 1416 } 1417 1418 datap = (struct ethhdr *) (skb->data + offset); 1419 conn = ath6kl_find_sta(vif, datap->h_source); 1420 1421 if (!conn) { 1422 dev_kfree_skb(skb); 1423 return; 1424 } 1425 1426 /* 1427 * If there is a change in PS state of the STA, 1428 * take appropriate steps: 1429 * 1430 * 1. If Sleep-->Awake, flush the psq for the STA 1431 * Clear the PVB for the STA. 1432 * 2. If Awake-->Sleep, Starting queueing frames 1433 * the STA. 1434 */ 1435 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP); 1436 1437 if (ps_state) 1438 conn->sta_flags |= STA_PS_SLEEP; 1439 else 1440 conn->sta_flags &= ~STA_PS_SLEEP; 1441 1442 /* Accept trigger only when the station is in sleep */ 1443 if ((conn->sta_flags & STA_PS_SLEEP) && trig_state) 1444 ath6kl_uapsd_trigger_frame_rx(vif, conn); 1445 1446 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) { 1447 if (!(conn->sta_flags & STA_PS_SLEEP)) { 1448 struct sk_buff *skbuff = NULL; 1449 bool is_apsdq_empty; 1450 struct ath6kl_mgmt_buff *mgmt; 1451 u8 idx; 1452 1453 spin_lock_bh(&conn->psq_lock); 1454 while (conn->mgmt_psq_len > 0) { 1455 mgmt = list_first_entry( 1456 &conn->mgmt_psq, 1457 struct ath6kl_mgmt_buff, 1458 list); 1459 list_del(&mgmt->list); 1460 conn->mgmt_psq_len--; 1461 spin_unlock_bh(&conn->psq_lock); 1462 idx = vif->fw_vif_idx; 1463 1464 ath6kl_wmi_send_mgmt_cmd(ar->wmi, 1465 idx, 1466 mgmt->id, 1467 mgmt->freq, 1468 mgmt->wait, 1469 mgmt->buf, 1470 mgmt->len, 1471 mgmt->no_cck); 1472 1473 kfree(mgmt); 1474 spin_lock_bh(&conn->psq_lock); 1475 } 1476 conn->mgmt_psq_len = 0; 1477 while ((skbuff = skb_dequeue(&conn->psq))) { 1478 spin_unlock_bh(&conn->psq_lock); 1479 ath6kl_data_tx(skbuff, vif->ndev); 1480 spin_lock_bh(&conn->psq_lock); 1481 } 1482 1483 is_apsdq_empty = skb_queue_empty(&conn->apsdq); 1484 while ((skbuff = skb_dequeue(&conn->apsdq))) { 1485 spin_unlock_bh(&conn->psq_lock); 1486 ath6kl_data_tx(skbuff, vif->ndev); 1487 spin_lock_bh(&conn->psq_lock); 1488 } 1489 spin_unlock_bh(&conn->psq_lock); 1490 1491 if (!is_apsdq_empty) 1492 ath6kl_wmi_set_apsd_bfrd_traf( 1493 ar->wmi, 1494 vif->fw_vif_idx, 1495 conn->aid, 0, 0); 1496 1497 /* Clear the PVB for this STA */ 1498 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, 1499 conn->aid, 0); 1500 } 1501 } 1502 1503 /* drop NULL data frames here */ 1504 if ((packet->act_len < min_hdr_len) || 1505 (packet->act_len > 1506 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) { 1507 dev_kfree_skb(skb); 1508 return; 1509 } 1510 } 1511 1512 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false; 1513 tid = wmi_data_hdr_get_up(dhdr); 1514 seq_no = wmi_data_hdr_get_seqno(dhdr); 1515 meta_type = wmi_data_hdr_get_meta(dhdr); 1516 dot11_hdr = wmi_data_hdr_get_dot11(dhdr); 1517 pad_before_data_start = 1518 (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT) 1519 & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK; 1520 1521 skb_pull(skb, sizeof(struct wmi_data_hdr)); 1522 1523 switch (meta_type) { 1524 case WMI_META_VERSION_1: 1525 skb_pull(skb, sizeof(struct wmi_rx_meta_v1)); 1526 break; 1527 case WMI_META_VERSION_2: 1528 meta = (struct wmi_rx_meta_v2 *) skb->data; 1529 if (meta->csum_flags & 0x1) { 1530 skb->ip_summed = CHECKSUM_COMPLETE; 1531 skb->csum = (__force __wsum) meta->csum; 1532 } 1533 skb_pull(skb, sizeof(struct wmi_rx_meta_v2)); 1534 break; 1535 default: 1536 break; 1537 } 1538 1539 skb_pull(skb, pad_before_data_start); 1540 1541 if (dot11_hdr) 1542 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb); 1543 else if (!is_amsdu) 1544 status = ath6kl_wmi_dot3_2_dix(skb); 1545 1546 if (status) { 1547 /* 1548 * Drop frames that could not be processed (lack of 1549 * memory, etc.) 1550 */ 1551 dev_kfree_skb(skb); 1552 return; 1553 } 1554 1555 if (!(vif->ndev->flags & IFF_UP)) { 1556 dev_kfree_skb(skb); 1557 return; 1558 } 1559 1560 if (vif->nw_type == AP_NETWORK) { 1561 datap = (struct ethhdr *) skb->data; 1562 if (is_multicast_ether_addr(datap->h_dest)) 1563 /* 1564 * Bcast/Mcast frames should be sent to the 1565 * OS stack as well as on the air. 1566 */ 1567 skb1 = skb_copy(skb, GFP_ATOMIC); 1568 else { 1569 /* 1570 * Search for a connected STA with dstMac 1571 * as the Mac address. If found send the 1572 * frame to it on the air else send the 1573 * frame up the stack. 1574 */ 1575 conn = ath6kl_find_sta(vif, datap->h_dest); 1576 1577 if (conn && ar->intra_bss) { 1578 skb1 = skb; 1579 skb = NULL; 1580 } else if (conn && !ar->intra_bss) { 1581 dev_kfree_skb(skb); 1582 skb = NULL; 1583 } 1584 } 1585 if (skb1) 1586 ath6kl_data_tx(skb1, vif->ndev); 1587 1588 if (skb == NULL) { 1589 /* nothing to deliver up the stack */ 1590 return; 1591 } 1592 } 1593 1594 datap = (struct ethhdr *) skb->data; 1595 1596 if (is_unicast_ether_addr(datap->h_dest)) { 1597 if (vif->nw_type == AP_NETWORK) { 1598 conn = ath6kl_find_sta(vif, datap->h_source); 1599 if (!conn) 1600 return; 1601 aggr_conn = conn->aggr_conn; 1602 } else 1603 aggr_conn = vif->aggr_cntxt->aggr_conn; 1604 1605 if (aggr_process_recv_frm(aggr_conn, tid, seq_no, 1606 is_amsdu, skb)) { 1607 /* aggregation code will handle the skb */ 1608 return; 1609 } 1610 } else if (!is_broadcast_ether_addr(datap->h_dest)) 1611 vif->net_stats.multicast++; 1612 1613 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); 1614 } 1615 1616 static void aggr_timeout(unsigned long arg) 1617 { 1618 u8 i, j; 1619 struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg; 1620 struct rxtid *rxtid; 1621 struct rxtid_stats *stats; 1622 1623 for (i = 0; i < NUM_OF_TIDS; i++) { 1624 rxtid = &aggr_conn->rx_tid[i]; 1625 stats = &aggr_conn->stat[i]; 1626 1627 if (!rxtid->aggr || !rxtid->timer_mon) 1628 continue; 1629 1630 stats->num_timeouts++; 1631 ath6kl_dbg(ATH6KL_DBG_AGGR, 1632 "aggr timeout (st %d end %d)\n", 1633 rxtid->seq_next, 1634 ((rxtid->seq_next + rxtid->hold_q_sz-1) & 1635 ATH6KL_MAX_SEQ_NO)); 1636 aggr_deque_frms(aggr_conn, i, 0, 0); 1637 } 1638 1639 aggr_conn->timer_scheduled = false; 1640 1641 for (i = 0; i < NUM_OF_TIDS; i++) { 1642 rxtid = &aggr_conn->rx_tid[i]; 1643 1644 if (rxtid->aggr && rxtid->hold_q) { 1645 spin_lock_bh(&rxtid->lock); 1646 for (j = 0; j < rxtid->hold_q_sz; j++) { 1647 if (rxtid->hold_q[j].skb) { 1648 aggr_conn->timer_scheduled = true; 1649 rxtid->timer_mon = true; 1650 break; 1651 } 1652 } 1653 spin_unlock_bh(&rxtid->lock); 1654 1655 if (j >= rxtid->hold_q_sz) 1656 rxtid->timer_mon = false; 1657 } 1658 } 1659 1660 if (aggr_conn->timer_scheduled) 1661 mod_timer(&aggr_conn->timer, 1662 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT)); 1663 } 1664 1665 static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid) 1666 { 1667 struct rxtid *rxtid; 1668 struct rxtid_stats *stats; 1669 1670 if (!aggr_conn || tid >= NUM_OF_TIDS) 1671 return; 1672 1673 rxtid = &aggr_conn->rx_tid[tid]; 1674 stats = &aggr_conn->stat[tid]; 1675 1676 if (rxtid->aggr) 1677 aggr_deque_frms(aggr_conn, tid, 0, 0); 1678 1679 rxtid->aggr = false; 1680 rxtid->timer_mon = false; 1681 rxtid->win_sz = 0; 1682 rxtid->seq_next = 0; 1683 rxtid->hold_q_sz = 0; 1684 1685 kfree(rxtid->hold_q); 1686 rxtid->hold_q = NULL; 1687 1688 memset(stats, 0, sizeof(struct rxtid_stats)); 1689 } 1690 1691 void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no, 1692 u8 win_sz) 1693 { 1694 struct ath6kl_sta *sta; 1695 struct aggr_info_conn *aggr_conn = NULL; 1696 struct rxtid *rxtid; 1697 struct rxtid_stats *stats; 1698 u16 hold_q_size; 1699 u8 tid, aid; 1700 1701 if (vif->nw_type == AP_NETWORK) { 1702 aid = ath6kl_get_aid(tid_mux); 1703 sta = ath6kl_find_sta_by_aid(vif->ar, aid); 1704 if (sta) 1705 aggr_conn = sta->aggr_conn; 1706 } else 1707 aggr_conn = vif->aggr_cntxt->aggr_conn; 1708 1709 if (!aggr_conn) 1710 return; 1711 1712 tid = ath6kl_get_tid(tid_mux); 1713 if (tid >= NUM_OF_TIDS) 1714 return; 1715 1716 rxtid = &aggr_conn->rx_tid[tid]; 1717 stats = &aggr_conn->stat[tid]; 1718 1719 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX) 1720 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n", 1721 __func__, win_sz, tid); 1722 1723 if (rxtid->aggr) 1724 aggr_delete_tid_state(aggr_conn, tid); 1725 1726 rxtid->seq_next = seq_no; 1727 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q); 1728 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL); 1729 if (!rxtid->hold_q) 1730 return; 1731 1732 rxtid->win_sz = win_sz; 1733 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz); 1734 if (!skb_queue_empty(&rxtid->q)) 1735 return; 1736 1737 rxtid->aggr = true; 1738 } 1739 1740 void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info, 1741 struct aggr_info_conn *aggr_conn) 1742 { 1743 struct rxtid *rxtid; 1744 u8 i; 1745 1746 aggr_conn->aggr_sz = AGGR_SZ_DEFAULT; 1747 aggr_conn->dev = vif->ndev; 1748 init_timer(&aggr_conn->timer); 1749 aggr_conn->timer.function = aggr_timeout; 1750 aggr_conn->timer.data = (unsigned long) aggr_conn; 1751 aggr_conn->aggr_info = aggr_info; 1752 1753 aggr_conn->timer_scheduled = false; 1754 1755 for (i = 0; i < NUM_OF_TIDS; i++) { 1756 rxtid = &aggr_conn->rx_tid[i]; 1757 rxtid->aggr = false; 1758 rxtid->timer_mon = false; 1759 skb_queue_head_init(&rxtid->q); 1760 spin_lock_init(&rxtid->lock); 1761 } 1762 1763 } 1764 1765 struct aggr_info *aggr_init(struct ath6kl_vif *vif) 1766 { 1767 struct aggr_info *p_aggr = NULL; 1768 1769 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL); 1770 if (!p_aggr) { 1771 ath6kl_err("failed to alloc memory for aggr_node\n"); 1772 return NULL; 1773 } 1774 1775 p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL); 1776 if (!p_aggr->aggr_conn) { 1777 ath6kl_err("failed to alloc memory for connection specific aggr info\n"); 1778 kfree(p_aggr); 1779 return NULL; 1780 } 1781 1782 aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn); 1783 1784 skb_queue_head_init(&p_aggr->rx_amsdu_freeq); 1785 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS); 1786 1787 return p_aggr; 1788 } 1789 1790 void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux) 1791 { 1792 struct ath6kl_sta *sta; 1793 struct rxtid *rxtid; 1794 struct aggr_info_conn *aggr_conn = NULL; 1795 u8 tid, aid; 1796 1797 if (vif->nw_type == AP_NETWORK) { 1798 aid = ath6kl_get_aid(tid_mux); 1799 sta = ath6kl_find_sta_by_aid(vif->ar, aid); 1800 if (sta) 1801 aggr_conn = sta->aggr_conn; 1802 } else 1803 aggr_conn = vif->aggr_cntxt->aggr_conn; 1804 1805 if (!aggr_conn) 1806 return; 1807 1808 tid = ath6kl_get_tid(tid_mux); 1809 if (tid >= NUM_OF_TIDS) 1810 return; 1811 1812 rxtid = &aggr_conn->rx_tid[tid]; 1813 1814 if (rxtid->aggr) 1815 aggr_delete_tid_state(aggr_conn, tid); 1816 } 1817 1818 void aggr_reset_state(struct aggr_info_conn *aggr_conn) 1819 { 1820 u8 tid; 1821 1822 if (!aggr_conn) 1823 return; 1824 1825 if (aggr_conn->timer_scheduled) { 1826 del_timer(&aggr_conn->timer); 1827 aggr_conn->timer_scheduled = false; 1828 } 1829 1830 for (tid = 0; tid < NUM_OF_TIDS; tid++) 1831 aggr_delete_tid_state(aggr_conn, tid); 1832 } 1833 1834 /* clean up our amsdu buffer list */ 1835 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar) 1836 { 1837 struct htc_packet *packet, *tmp_pkt; 1838 1839 spin_lock_bh(&ar->lock); 1840 if (list_empty(&ar->amsdu_rx_buffer_queue)) { 1841 spin_unlock_bh(&ar->lock); 1842 return; 1843 } 1844 1845 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue, 1846 list) { 1847 list_del(&packet->list); 1848 spin_unlock_bh(&ar->lock); 1849 dev_kfree_skb(packet->pkt_cntxt); 1850 spin_lock_bh(&ar->lock); 1851 } 1852 1853 spin_unlock_bh(&ar->lock); 1854 } 1855 1856 void aggr_module_destroy(struct aggr_info *aggr_info) 1857 { 1858 if (!aggr_info) 1859 return; 1860 1861 aggr_reset_state(aggr_info->aggr_conn); 1862 skb_queue_purge(&aggr_info->rx_amsdu_freeq); 1863 kfree(aggr_info->aggr_conn); 1864 kfree(aggr_info); 1865 } 1866