1*bdcd8170SKalle Valo /* 2*bdcd8170SKalle Valo * Copyright (c) 2004-2011 Atheros Communications Inc. 3*bdcd8170SKalle Valo * 4*bdcd8170SKalle Valo * Permission to use, copy, modify, and/or distribute this software for any 5*bdcd8170SKalle Valo * purpose with or without fee is hereby granted, provided that the above 6*bdcd8170SKalle Valo * copyright notice and this permission notice appear in all copies. 7*bdcd8170SKalle Valo * 8*bdcd8170SKalle Valo * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9*bdcd8170SKalle Valo * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10*bdcd8170SKalle Valo * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11*bdcd8170SKalle Valo * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12*bdcd8170SKalle Valo * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13*bdcd8170SKalle Valo * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14*bdcd8170SKalle Valo * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15*bdcd8170SKalle Valo */ 16*bdcd8170SKalle Valo 17*bdcd8170SKalle Valo #include "core.h" 18*bdcd8170SKalle Valo #include "debug.h" 19*bdcd8170SKalle Valo 20*bdcd8170SKalle Valo static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev, 21*bdcd8170SKalle Valo u32 *map_no) 22*bdcd8170SKalle Valo { 23*bdcd8170SKalle Valo struct ath6kl *ar = ath6kl_priv(dev); 24*bdcd8170SKalle Valo struct ethhdr *eth_hdr; 25*bdcd8170SKalle Valo u32 i, ep_map = -1; 26*bdcd8170SKalle Valo u8 *datap; 27*bdcd8170SKalle Valo 28*bdcd8170SKalle Valo *map_no = 0; 29*bdcd8170SKalle Valo datap = skb->data; 30*bdcd8170SKalle Valo eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr)); 31*bdcd8170SKalle Valo 32*bdcd8170SKalle Valo if (is_multicast_ether_addr(eth_hdr->h_dest)) 33*bdcd8170SKalle Valo return ENDPOINT_2; 34*bdcd8170SKalle Valo 35*bdcd8170SKalle Valo for (i = 0; i < ar->node_num; i++) { 36*bdcd8170SKalle Valo if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr, 37*bdcd8170SKalle Valo ETH_ALEN) == 0) { 38*bdcd8170SKalle Valo *map_no = i + 1; 39*bdcd8170SKalle Valo ar->node_map[i].tx_pend++; 40*bdcd8170SKalle Valo return ar->node_map[i].ep_id; 41*bdcd8170SKalle Valo } 42*bdcd8170SKalle Valo 43*bdcd8170SKalle Valo if ((ep_map == -1) && !ar->node_map[i].tx_pend) 44*bdcd8170SKalle Valo ep_map = i; 45*bdcd8170SKalle Valo } 46*bdcd8170SKalle Valo 47*bdcd8170SKalle Valo if (ep_map == -1) { 48*bdcd8170SKalle Valo ep_map = ar->node_num; 49*bdcd8170SKalle Valo ar->node_num++; 50*bdcd8170SKalle Valo if (ar->node_num > MAX_NODE_NUM) 51*bdcd8170SKalle Valo return ENDPOINT_UNUSED; 52*bdcd8170SKalle Valo } 53*bdcd8170SKalle Valo 54*bdcd8170SKalle Valo memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN); 55*bdcd8170SKalle Valo 56*bdcd8170SKalle Valo for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) { 57*bdcd8170SKalle Valo if (!ar->tx_pending[i]) { 58*bdcd8170SKalle Valo ar->node_map[ep_map].ep_id = i; 59*bdcd8170SKalle Valo break; 60*bdcd8170SKalle Valo } 61*bdcd8170SKalle Valo 62*bdcd8170SKalle Valo /* 63*bdcd8170SKalle Valo * No free endpoint is available, start redistribution on 64*bdcd8170SKalle Valo * the inuse endpoints. 65*bdcd8170SKalle Valo */ 66*bdcd8170SKalle Valo if (i == ENDPOINT_5) { 67*bdcd8170SKalle Valo ar->node_map[ep_map].ep_id = ar->next_ep_id; 68*bdcd8170SKalle Valo ar->next_ep_id++; 69*bdcd8170SKalle Valo if (ar->next_ep_id > ENDPOINT_5) 70*bdcd8170SKalle Valo ar->next_ep_id = ENDPOINT_2; 71*bdcd8170SKalle Valo } 72*bdcd8170SKalle Valo } 73*bdcd8170SKalle Valo 74*bdcd8170SKalle Valo *map_no = ep_map + 1; 75*bdcd8170SKalle Valo ar->node_map[ep_map].tx_pend++; 76*bdcd8170SKalle Valo 77*bdcd8170SKalle Valo return ar->node_map[ep_map].ep_id; 78*bdcd8170SKalle Valo } 79*bdcd8170SKalle Valo 80*bdcd8170SKalle Valo static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb, 81*bdcd8170SKalle Valo bool *more_data) 82*bdcd8170SKalle Valo { 83*bdcd8170SKalle Valo struct ethhdr *datap = (struct ethhdr *) skb->data; 84*bdcd8170SKalle Valo struct ath6kl_sta *conn = NULL; 85*bdcd8170SKalle Valo bool ps_queued = false, is_psq_empty = false; 86*bdcd8170SKalle Valo 87*bdcd8170SKalle Valo if (is_multicast_ether_addr(datap->h_dest)) { 88*bdcd8170SKalle Valo u8 ctr = 0; 89*bdcd8170SKalle Valo bool q_mcast = false; 90*bdcd8170SKalle Valo 91*bdcd8170SKalle Valo for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { 92*bdcd8170SKalle Valo if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) { 93*bdcd8170SKalle Valo q_mcast = true; 94*bdcd8170SKalle Valo break; 95*bdcd8170SKalle Valo } 96*bdcd8170SKalle Valo } 97*bdcd8170SKalle Valo 98*bdcd8170SKalle Valo if (q_mcast) { 99*bdcd8170SKalle Valo /* 100*bdcd8170SKalle Valo * If this transmit is not because of a Dtim Expiry 101*bdcd8170SKalle Valo * q it. 102*bdcd8170SKalle Valo */ 103*bdcd8170SKalle Valo if (!test_bit(DTIM_EXPIRED, &ar->flag)) { 104*bdcd8170SKalle Valo bool is_mcastq_empty = false; 105*bdcd8170SKalle Valo 106*bdcd8170SKalle Valo spin_lock_bh(&ar->mcastpsq_lock); 107*bdcd8170SKalle Valo is_mcastq_empty = 108*bdcd8170SKalle Valo skb_queue_empty(&ar->mcastpsq); 109*bdcd8170SKalle Valo skb_queue_tail(&ar->mcastpsq, skb); 110*bdcd8170SKalle Valo spin_unlock_bh(&ar->mcastpsq_lock); 111*bdcd8170SKalle Valo 112*bdcd8170SKalle Valo /* 113*bdcd8170SKalle Valo * If this is the first Mcast pkt getting 114*bdcd8170SKalle Valo * queued indicate to the target to set the 115*bdcd8170SKalle Valo * BitmapControl LSB of the TIM IE. 116*bdcd8170SKalle Valo */ 117*bdcd8170SKalle Valo if (is_mcastq_empty) 118*bdcd8170SKalle Valo ath6kl_wmi_set_pvb_cmd(ar->wmi, 119*bdcd8170SKalle Valo MCAST_AID, 1); 120*bdcd8170SKalle Valo 121*bdcd8170SKalle Valo ps_queued = true; 122*bdcd8170SKalle Valo } else { 123*bdcd8170SKalle Valo /* 124*bdcd8170SKalle Valo * This transmit is because of Dtim expiry. 125*bdcd8170SKalle Valo * Determine if MoreData bit has to be set. 126*bdcd8170SKalle Valo */ 127*bdcd8170SKalle Valo spin_lock_bh(&ar->mcastpsq_lock); 128*bdcd8170SKalle Valo if (!skb_queue_empty(&ar->mcastpsq)) 129*bdcd8170SKalle Valo *more_data = true; 130*bdcd8170SKalle Valo spin_unlock_bh(&ar->mcastpsq_lock); 131*bdcd8170SKalle Valo } 132*bdcd8170SKalle Valo } 133*bdcd8170SKalle Valo } else { 134*bdcd8170SKalle Valo conn = ath6kl_find_sta(ar, datap->h_dest); 135*bdcd8170SKalle Valo if (!conn) { 136*bdcd8170SKalle Valo dev_kfree_skb(skb); 137*bdcd8170SKalle Valo 138*bdcd8170SKalle Valo /* Inform the caller that the skb is consumed */ 139*bdcd8170SKalle Valo return true; 140*bdcd8170SKalle Valo } 141*bdcd8170SKalle Valo 142*bdcd8170SKalle Valo if (conn->sta_flags & STA_PS_SLEEP) { 143*bdcd8170SKalle Valo if (!(conn->sta_flags & STA_PS_POLLED)) { 144*bdcd8170SKalle Valo /* Queue the frames if the STA is sleeping */ 145*bdcd8170SKalle Valo spin_lock_bh(&conn->psq_lock); 146*bdcd8170SKalle Valo is_psq_empty = skb_queue_empty(&conn->psq); 147*bdcd8170SKalle Valo skb_queue_tail(&conn->psq, skb); 148*bdcd8170SKalle Valo spin_unlock_bh(&conn->psq_lock); 149*bdcd8170SKalle Valo 150*bdcd8170SKalle Valo /* 151*bdcd8170SKalle Valo * If this is the first pkt getting queued 152*bdcd8170SKalle Valo * for this STA, update the PVB for this 153*bdcd8170SKalle Valo * STA. 154*bdcd8170SKalle Valo */ 155*bdcd8170SKalle Valo if (is_psq_empty) 156*bdcd8170SKalle Valo ath6kl_wmi_set_pvb_cmd(ar->wmi, 157*bdcd8170SKalle Valo conn->aid, 1); 158*bdcd8170SKalle Valo 159*bdcd8170SKalle Valo ps_queued = true; 160*bdcd8170SKalle Valo } else { 161*bdcd8170SKalle Valo /* 162*bdcd8170SKalle Valo * This tx is because of a PsPoll. 163*bdcd8170SKalle Valo * Determine if MoreData bit has to be set. 164*bdcd8170SKalle Valo */ 165*bdcd8170SKalle Valo spin_lock_bh(&conn->psq_lock); 166*bdcd8170SKalle Valo if (!skb_queue_empty(&conn->psq)) 167*bdcd8170SKalle Valo *more_data = true; 168*bdcd8170SKalle Valo spin_unlock_bh(&conn->psq_lock); 169*bdcd8170SKalle Valo } 170*bdcd8170SKalle Valo } 171*bdcd8170SKalle Valo } 172*bdcd8170SKalle Valo 173*bdcd8170SKalle Valo return ps_queued; 174*bdcd8170SKalle Valo } 175*bdcd8170SKalle Valo 176*bdcd8170SKalle Valo /* Tx functions */ 177*bdcd8170SKalle Valo 178*bdcd8170SKalle Valo int ath6kl_control_tx(void *devt, struct sk_buff *skb, 179*bdcd8170SKalle Valo enum htc_endpoint_id eid) 180*bdcd8170SKalle Valo { 181*bdcd8170SKalle Valo struct ath6kl *ar = devt; 182*bdcd8170SKalle Valo int status = 0; 183*bdcd8170SKalle Valo struct ath6kl_cookie *cookie = NULL; 184*bdcd8170SKalle Valo 185*bdcd8170SKalle Valo spin_lock_bh(&ar->lock); 186*bdcd8170SKalle Valo 187*bdcd8170SKalle Valo ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 188*bdcd8170SKalle Valo "%s: skb=0x%p, len=0x%x eid =%d\n", __func__, 189*bdcd8170SKalle Valo skb, skb->len, eid); 190*bdcd8170SKalle Valo 191*bdcd8170SKalle Valo if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) { 192*bdcd8170SKalle Valo /* 193*bdcd8170SKalle Valo * Control endpoint is full, don't allocate resources, we 194*bdcd8170SKalle Valo * are just going to drop this packet. 195*bdcd8170SKalle Valo */ 196*bdcd8170SKalle Valo cookie = NULL; 197*bdcd8170SKalle Valo ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n", 198*bdcd8170SKalle Valo skb, skb->len); 199*bdcd8170SKalle Valo } else 200*bdcd8170SKalle Valo cookie = ath6kl_alloc_cookie(ar); 201*bdcd8170SKalle Valo 202*bdcd8170SKalle Valo if (cookie == NULL) { 203*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 204*bdcd8170SKalle Valo status = -ENOMEM; 205*bdcd8170SKalle Valo goto fail_ctrl_tx; 206*bdcd8170SKalle Valo } 207*bdcd8170SKalle Valo 208*bdcd8170SKalle Valo ar->tx_pending[eid]++; 209*bdcd8170SKalle Valo 210*bdcd8170SKalle Valo if (eid != ar->ctrl_ep) 211*bdcd8170SKalle Valo ar->total_tx_data_pend++; 212*bdcd8170SKalle Valo 213*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 214*bdcd8170SKalle Valo 215*bdcd8170SKalle Valo cookie->skb = skb; 216*bdcd8170SKalle Valo cookie->map_no = 0; 217*bdcd8170SKalle Valo set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 218*bdcd8170SKalle Valo eid, ATH6KL_CONTROL_PKT_TAG); 219*bdcd8170SKalle Valo 220*bdcd8170SKalle Valo /* 221*bdcd8170SKalle Valo * This interface is asynchronous, if there is an error, cleanup 222*bdcd8170SKalle Valo * will happen in the TX completion callback. 223*bdcd8170SKalle Valo */ 224*bdcd8170SKalle Valo htc_tx(ar->htc_target, &cookie->htc_pkt); 225*bdcd8170SKalle Valo 226*bdcd8170SKalle Valo return 0; 227*bdcd8170SKalle Valo 228*bdcd8170SKalle Valo fail_ctrl_tx: 229*bdcd8170SKalle Valo dev_kfree_skb(skb); 230*bdcd8170SKalle Valo return status; 231*bdcd8170SKalle Valo } 232*bdcd8170SKalle Valo 233*bdcd8170SKalle Valo int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) 234*bdcd8170SKalle Valo { 235*bdcd8170SKalle Valo struct ath6kl *ar = ath6kl_priv(dev); 236*bdcd8170SKalle Valo struct ath6kl_cookie *cookie = NULL; 237*bdcd8170SKalle Valo enum htc_endpoint_id eid = ENDPOINT_UNUSED; 238*bdcd8170SKalle Valo u32 map_no = 0; 239*bdcd8170SKalle Valo u16 htc_tag = ATH6KL_DATA_PKT_TAG; 240*bdcd8170SKalle Valo u8 ac = 99 ; /* initialize to unmapped ac */ 241*bdcd8170SKalle Valo bool chk_adhoc_ps_mapping = false, more_data = false; 242*bdcd8170SKalle Valo struct wmi_tx_meta_v2 meta_v2; 243*bdcd8170SKalle Valo int ret; 244*bdcd8170SKalle Valo 245*bdcd8170SKalle Valo ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 246*bdcd8170SKalle Valo "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__, 247*bdcd8170SKalle Valo skb, skb->data, skb->len); 248*bdcd8170SKalle Valo 249*bdcd8170SKalle Valo /* If target is not associated */ 250*bdcd8170SKalle Valo if (!test_bit(CONNECTED, &ar->flag)) { 251*bdcd8170SKalle Valo dev_kfree_skb(skb); 252*bdcd8170SKalle Valo return 0; 253*bdcd8170SKalle Valo } 254*bdcd8170SKalle Valo 255*bdcd8170SKalle Valo if (!test_bit(WMI_READY, &ar->flag)) 256*bdcd8170SKalle Valo goto fail_tx; 257*bdcd8170SKalle Valo 258*bdcd8170SKalle Valo /* AP mode Power saving processing */ 259*bdcd8170SKalle Valo if (ar->nw_type == AP_NETWORK) { 260*bdcd8170SKalle Valo if (ath6kl_powersave_ap(ar, skb, &more_data)) 261*bdcd8170SKalle Valo return 0; 262*bdcd8170SKalle Valo } 263*bdcd8170SKalle Valo 264*bdcd8170SKalle Valo if (test_bit(WMI_ENABLED, &ar->flag)) { 265*bdcd8170SKalle Valo memset(&meta_v2, 0, sizeof(meta_v2)); 266*bdcd8170SKalle Valo 267*bdcd8170SKalle Valo if (skb_headroom(skb) < dev->needed_headroom) { 268*bdcd8170SKalle Valo WARN_ON(1); 269*bdcd8170SKalle Valo goto fail_tx; 270*bdcd8170SKalle Valo } 271*bdcd8170SKalle Valo 272*bdcd8170SKalle Valo if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) { 273*bdcd8170SKalle Valo ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n"); 274*bdcd8170SKalle Valo goto fail_tx; 275*bdcd8170SKalle Valo } 276*bdcd8170SKalle Valo 277*bdcd8170SKalle Valo if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE, 278*bdcd8170SKalle Valo more_data, 0, 0, NULL)) { 279*bdcd8170SKalle Valo ath6kl_err("wmi_data_hdr_add failed\n"); 280*bdcd8170SKalle Valo goto fail_tx; 281*bdcd8170SKalle Valo } 282*bdcd8170SKalle Valo 283*bdcd8170SKalle Valo if ((ar->nw_type == ADHOC_NETWORK) && 284*bdcd8170SKalle Valo ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag)) 285*bdcd8170SKalle Valo chk_adhoc_ps_mapping = true; 286*bdcd8170SKalle Valo else { 287*bdcd8170SKalle Valo /* get the stream mapping */ 288*bdcd8170SKalle Valo ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb, 289*bdcd8170SKalle Valo 0, test_bit(WMM_ENABLED, &ar->flag), &ac); 290*bdcd8170SKalle Valo if (ret) 291*bdcd8170SKalle Valo goto fail_tx; 292*bdcd8170SKalle Valo } 293*bdcd8170SKalle Valo } else 294*bdcd8170SKalle Valo goto fail_tx; 295*bdcd8170SKalle Valo 296*bdcd8170SKalle Valo spin_lock_bh(&ar->lock); 297*bdcd8170SKalle Valo 298*bdcd8170SKalle Valo if (chk_adhoc_ps_mapping) 299*bdcd8170SKalle Valo eid = ath6kl_ibss_map_epid(skb, dev, &map_no); 300*bdcd8170SKalle Valo else 301*bdcd8170SKalle Valo eid = ar->ac2ep_map[ac]; 302*bdcd8170SKalle Valo 303*bdcd8170SKalle Valo if (eid == 0 || eid == ENDPOINT_UNUSED) { 304*bdcd8170SKalle Valo ath6kl_err("eid %d is not mapped!\n", eid); 305*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 306*bdcd8170SKalle Valo goto fail_tx; 307*bdcd8170SKalle Valo } 308*bdcd8170SKalle Valo 309*bdcd8170SKalle Valo /* allocate resource for this packet */ 310*bdcd8170SKalle Valo cookie = ath6kl_alloc_cookie(ar); 311*bdcd8170SKalle Valo 312*bdcd8170SKalle Valo if (!cookie) { 313*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 314*bdcd8170SKalle Valo goto fail_tx; 315*bdcd8170SKalle Valo } 316*bdcd8170SKalle Valo 317*bdcd8170SKalle Valo /* update counts while the lock is held */ 318*bdcd8170SKalle Valo ar->tx_pending[eid]++; 319*bdcd8170SKalle Valo ar->total_tx_data_pend++; 320*bdcd8170SKalle Valo 321*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 322*bdcd8170SKalle Valo 323*bdcd8170SKalle Valo cookie->skb = skb; 324*bdcd8170SKalle Valo cookie->map_no = map_no; 325*bdcd8170SKalle Valo set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 326*bdcd8170SKalle Valo eid, htc_tag); 327*bdcd8170SKalle Valo 328*bdcd8170SKalle Valo ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, skb->data, skb->len); 329*bdcd8170SKalle Valo 330*bdcd8170SKalle Valo /* 331*bdcd8170SKalle Valo * HTC interface is asynchronous, if this fails, cleanup will 332*bdcd8170SKalle Valo * happen in the ath6kl_tx_complete callback. 333*bdcd8170SKalle Valo */ 334*bdcd8170SKalle Valo htc_tx(ar->htc_target, &cookie->htc_pkt); 335*bdcd8170SKalle Valo 336*bdcd8170SKalle Valo return 0; 337*bdcd8170SKalle Valo 338*bdcd8170SKalle Valo fail_tx: 339*bdcd8170SKalle Valo dev_kfree_skb(skb); 340*bdcd8170SKalle Valo 341*bdcd8170SKalle Valo ar->net_stats.tx_dropped++; 342*bdcd8170SKalle Valo ar->net_stats.tx_aborted_errors++; 343*bdcd8170SKalle Valo 344*bdcd8170SKalle Valo return 0; 345*bdcd8170SKalle Valo } 346*bdcd8170SKalle Valo 347*bdcd8170SKalle Valo /* indicate tx activity or inactivity on a WMI stream */ 348*bdcd8170SKalle Valo void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active) 349*bdcd8170SKalle Valo { 350*bdcd8170SKalle Valo struct ath6kl *ar = devt; 351*bdcd8170SKalle Valo enum htc_endpoint_id eid; 352*bdcd8170SKalle Valo int i; 353*bdcd8170SKalle Valo 354*bdcd8170SKalle Valo eid = ar->ac2ep_map[traffic_class]; 355*bdcd8170SKalle Valo 356*bdcd8170SKalle Valo if (!test_bit(WMI_ENABLED, &ar->flag)) 357*bdcd8170SKalle Valo goto notify_htc; 358*bdcd8170SKalle Valo 359*bdcd8170SKalle Valo spin_lock_bh(&ar->lock); 360*bdcd8170SKalle Valo 361*bdcd8170SKalle Valo ar->ac_stream_active[traffic_class] = active; 362*bdcd8170SKalle Valo 363*bdcd8170SKalle Valo if (active) { 364*bdcd8170SKalle Valo /* 365*bdcd8170SKalle Valo * Keep track of the active stream with the highest 366*bdcd8170SKalle Valo * priority. 367*bdcd8170SKalle Valo */ 368*bdcd8170SKalle Valo if (ar->ac_stream_pri_map[traffic_class] > 369*bdcd8170SKalle Valo ar->hiac_stream_active_pri) 370*bdcd8170SKalle Valo /* set the new highest active priority */ 371*bdcd8170SKalle Valo ar->hiac_stream_active_pri = 372*bdcd8170SKalle Valo ar->ac_stream_pri_map[traffic_class]; 373*bdcd8170SKalle Valo 374*bdcd8170SKalle Valo } else { 375*bdcd8170SKalle Valo /* 376*bdcd8170SKalle Valo * We may have to search for the next active stream 377*bdcd8170SKalle Valo * that is the highest priority. 378*bdcd8170SKalle Valo */ 379*bdcd8170SKalle Valo if (ar->hiac_stream_active_pri == 380*bdcd8170SKalle Valo ar->ac_stream_pri_map[traffic_class]) { 381*bdcd8170SKalle Valo /* 382*bdcd8170SKalle Valo * The highest priority stream just went inactive 383*bdcd8170SKalle Valo * reset and search for the "next" highest "active" 384*bdcd8170SKalle Valo * priority stream. 385*bdcd8170SKalle Valo */ 386*bdcd8170SKalle Valo ar->hiac_stream_active_pri = 0; 387*bdcd8170SKalle Valo 388*bdcd8170SKalle Valo for (i = 0; i < WMM_NUM_AC; i++) { 389*bdcd8170SKalle Valo if (ar->ac_stream_active[i] && 390*bdcd8170SKalle Valo (ar->ac_stream_pri_map[i] > 391*bdcd8170SKalle Valo ar->hiac_stream_active_pri)) 392*bdcd8170SKalle Valo /* 393*bdcd8170SKalle Valo * Set the new highest active 394*bdcd8170SKalle Valo * priority. 395*bdcd8170SKalle Valo */ 396*bdcd8170SKalle Valo ar->hiac_stream_active_pri = 397*bdcd8170SKalle Valo ar->ac_stream_pri_map[i]; 398*bdcd8170SKalle Valo } 399*bdcd8170SKalle Valo } 400*bdcd8170SKalle Valo } 401*bdcd8170SKalle Valo 402*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 403*bdcd8170SKalle Valo 404*bdcd8170SKalle Valo notify_htc: 405*bdcd8170SKalle Valo /* notify HTC, this may cause credit distribution changes */ 406*bdcd8170SKalle Valo htc_indicate_activity_change(ar->htc_target, eid, active); 407*bdcd8170SKalle Valo } 408*bdcd8170SKalle Valo 409*bdcd8170SKalle Valo enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, 410*bdcd8170SKalle Valo struct htc_packet *packet) 411*bdcd8170SKalle Valo { 412*bdcd8170SKalle Valo struct ath6kl *ar = target->dev->ar; 413*bdcd8170SKalle Valo enum htc_endpoint_id endpoint = packet->endpoint; 414*bdcd8170SKalle Valo 415*bdcd8170SKalle Valo if (endpoint == ar->ctrl_ep) { 416*bdcd8170SKalle Valo /* 417*bdcd8170SKalle Valo * Under normal WMI if this is getting full, then something 418*bdcd8170SKalle Valo * is running rampant the host should not be exhausting the 419*bdcd8170SKalle Valo * WMI queue with too many commands the only exception to 420*bdcd8170SKalle Valo * this is during testing using endpointping. 421*bdcd8170SKalle Valo */ 422*bdcd8170SKalle Valo spin_lock_bh(&ar->lock); 423*bdcd8170SKalle Valo set_bit(WMI_CTRL_EP_FULL, &ar->flag); 424*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 425*bdcd8170SKalle Valo ath6kl_err("wmi ctrl ep is full\n"); 426*bdcd8170SKalle Valo return HTC_SEND_FULL_KEEP; 427*bdcd8170SKalle Valo } 428*bdcd8170SKalle Valo 429*bdcd8170SKalle Valo if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG) 430*bdcd8170SKalle Valo return HTC_SEND_FULL_KEEP; 431*bdcd8170SKalle Valo 432*bdcd8170SKalle Valo if (ar->nw_type == ADHOC_NETWORK) 433*bdcd8170SKalle Valo /* 434*bdcd8170SKalle Valo * In adhoc mode, we cannot differentiate traffic 435*bdcd8170SKalle Valo * priorities so there is no need to continue, however we 436*bdcd8170SKalle Valo * should stop the network. 437*bdcd8170SKalle Valo */ 438*bdcd8170SKalle Valo goto stop_net_queues; 439*bdcd8170SKalle Valo 440*bdcd8170SKalle Valo /* 441*bdcd8170SKalle Valo * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for 442*bdcd8170SKalle Valo * the highest active stream. 443*bdcd8170SKalle Valo */ 444*bdcd8170SKalle Valo if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] < 445*bdcd8170SKalle Valo ar->hiac_stream_active_pri && 446*bdcd8170SKalle Valo ar->cookie_count <= MAX_HI_COOKIE_NUM) 447*bdcd8170SKalle Valo /* 448*bdcd8170SKalle Valo * Give preference to the highest priority stream by 449*bdcd8170SKalle Valo * dropping the packets which overflowed. 450*bdcd8170SKalle Valo */ 451*bdcd8170SKalle Valo return HTC_SEND_FULL_DROP; 452*bdcd8170SKalle Valo 453*bdcd8170SKalle Valo stop_net_queues: 454*bdcd8170SKalle Valo spin_lock_bh(&ar->lock); 455*bdcd8170SKalle Valo set_bit(NETQ_STOPPED, &ar->flag); 456*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 457*bdcd8170SKalle Valo netif_stop_queue(ar->net_dev); 458*bdcd8170SKalle Valo 459*bdcd8170SKalle Valo return HTC_SEND_FULL_KEEP; 460*bdcd8170SKalle Valo } 461*bdcd8170SKalle Valo 462*bdcd8170SKalle Valo /* TODO this needs to be looked at */ 463*bdcd8170SKalle Valo static void ath6kl_tx_clear_node_map(struct ath6kl *ar, 464*bdcd8170SKalle Valo enum htc_endpoint_id eid, u32 map_no) 465*bdcd8170SKalle Valo { 466*bdcd8170SKalle Valo u32 i; 467*bdcd8170SKalle Valo 468*bdcd8170SKalle Valo if (ar->nw_type != ADHOC_NETWORK) 469*bdcd8170SKalle Valo return; 470*bdcd8170SKalle Valo 471*bdcd8170SKalle Valo if (!ar->ibss_ps_enable) 472*bdcd8170SKalle Valo return; 473*bdcd8170SKalle Valo 474*bdcd8170SKalle Valo if (eid == ar->ctrl_ep) 475*bdcd8170SKalle Valo return; 476*bdcd8170SKalle Valo 477*bdcd8170SKalle Valo if (map_no == 0) 478*bdcd8170SKalle Valo return; 479*bdcd8170SKalle Valo 480*bdcd8170SKalle Valo map_no--; 481*bdcd8170SKalle Valo ar->node_map[map_no].tx_pend--; 482*bdcd8170SKalle Valo 483*bdcd8170SKalle Valo if (ar->node_map[map_no].tx_pend) 484*bdcd8170SKalle Valo return; 485*bdcd8170SKalle Valo 486*bdcd8170SKalle Valo if (map_no != (ar->node_num - 1)) 487*bdcd8170SKalle Valo return; 488*bdcd8170SKalle Valo 489*bdcd8170SKalle Valo for (i = ar->node_num; i > 0; i--) { 490*bdcd8170SKalle Valo if (ar->node_map[i - 1].tx_pend) 491*bdcd8170SKalle Valo break; 492*bdcd8170SKalle Valo 493*bdcd8170SKalle Valo memset(&ar->node_map[i - 1], 0, 494*bdcd8170SKalle Valo sizeof(struct ath6kl_node_mapping)); 495*bdcd8170SKalle Valo ar->node_num--; 496*bdcd8170SKalle Valo } 497*bdcd8170SKalle Valo } 498*bdcd8170SKalle Valo 499*bdcd8170SKalle Valo void ath6kl_tx_complete(void *context, struct list_head *packet_queue) 500*bdcd8170SKalle Valo { 501*bdcd8170SKalle Valo struct ath6kl *ar = context; 502*bdcd8170SKalle Valo struct sk_buff_head skb_queue; 503*bdcd8170SKalle Valo struct htc_packet *packet; 504*bdcd8170SKalle Valo struct sk_buff *skb; 505*bdcd8170SKalle Valo struct ath6kl_cookie *ath6kl_cookie; 506*bdcd8170SKalle Valo u32 map_no = 0; 507*bdcd8170SKalle Valo int status; 508*bdcd8170SKalle Valo enum htc_endpoint_id eid; 509*bdcd8170SKalle Valo bool wake_event = false; 510*bdcd8170SKalle Valo bool flushing = false; 511*bdcd8170SKalle Valo 512*bdcd8170SKalle Valo skb_queue_head_init(&skb_queue); 513*bdcd8170SKalle Valo 514*bdcd8170SKalle Valo /* lock the driver as we update internal state */ 515*bdcd8170SKalle Valo spin_lock_bh(&ar->lock); 516*bdcd8170SKalle Valo 517*bdcd8170SKalle Valo /* reap completed packets */ 518*bdcd8170SKalle Valo while (!list_empty(packet_queue)) { 519*bdcd8170SKalle Valo 520*bdcd8170SKalle Valo packet = list_first_entry(packet_queue, struct htc_packet, 521*bdcd8170SKalle Valo list); 522*bdcd8170SKalle Valo list_del(&packet->list); 523*bdcd8170SKalle Valo 524*bdcd8170SKalle Valo ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt; 525*bdcd8170SKalle Valo if (!ath6kl_cookie) 526*bdcd8170SKalle Valo goto fatal; 527*bdcd8170SKalle Valo 528*bdcd8170SKalle Valo status = packet->status; 529*bdcd8170SKalle Valo skb = ath6kl_cookie->skb; 530*bdcd8170SKalle Valo eid = packet->endpoint; 531*bdcd8170SKalle Valo map_no = ath6kl_cookie->map_no; 532*bdcd8170SKalle Valo 533*bdcd8170SKalle Valo if (!skb || !skb->data) 534*bdcd8170SKalle Valo goto fatal; 535*bdcd8170SKalle Valo 536*bdcd8170SKalle Valo packet->buf = skb->data; 537*bdcd8170SKalle Valo 538*bdcd8170SKalle Valo __skb_queue_tail(&skb_queue, skb); 539*bdcd8170SKalle Valo 540*bdcd8170SKalle Valo if (!status && (packet->act_len != skb->len)) 541*bdcd8170SKalle Valo goto fatal; 542*bdcd8170SKalle Valo 543*bdcd8170SKalle Valo ar->tx_pending[eid]--; 544*bdcd8170SKalle Valo 545*bdcd8170SKalle Valo if (eid != ar->ctrl_ep) 546*bdcd8170SKalle Valo ar->total_tx_data_pend--; 547*bdcd8170SKalle Valo 548*bdcd8170SKalle Valo if (eid == ar->ctrl_ep) { 549*bdcd8170SKalle Valo if (test_bit(WMI_CTRL_EP_FULL, &ar->flag)) 550*bdcd8170SKalle Valo clear_bit(WMI_CTRL_EP_FULL, &ar->flag); 551*bdcd8170SKalle Valo 552*bdcd8170SKalle Valo if (ar->tx_pending[eid] == 0) 553*bdcd8170SKalle Valo wake_event = true; 554*bdcd8170SKalle Valo } 555*bdcd8170SKalle Valo 556*bdcd8170SKalle Valo if (status) { 557*bdcd8170SKalle Valo if (status == -ECANCELED) 558*bdcd8170SKalle Valo /* a packet was flushed */ 559*bdcd8170SKalle Valo flushing = true; 560*bdcd8170SKalle Valo 561*bdcd8170SKalle Valo ar->net_stats.tx_errors++; 562*bdcd8170SKalle Valo 563*bdcd8170SKalle Valo if (status != -ENOSPC) 564*bdcd8170SKalle Valo ath6kl_err("tx error, status: 0x%x\n", status); 565*bdcd8170SKalle Valo ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 566*bdcd8170SKalle Valo "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", 567*bdcd8170SKalle Valo __func__, skb, packet->buf, packet->act_len, 568*bdcd8170SKalle Valo eid, "error!"); 569*bdcd8170SKalle Valo } else { 570*bdcd8170SKalle Valo ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 571*bdcd8170SKalle Valo "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", 572*bdcd8170SKalle Valo __func__, skb, packet->buf, packet->act_len, 573*bdcd8170SKalle Valo eid, "OK"); 574*bdcd8170SKalle Valo 575*bdcd8170SKalle Valo flushing = false; 576*bdcd8170SKalle Valo ar->net_stats.tx_packets++; 577*bdcd8170SKalle Valo ar->net_stats.tx_bytes += skb->len; 578*bdcd8170SKalle Valo } 579*bdcd8170SKalle Valo 580*bdcd8170SKalle Valo ath6kl_tx_clear_node_map(ar, eid, map_no); 581*bdcd8170SKalle Valo 582*bdcd8170SKalle Valo ath6kl_free_cookie(ar, ath6kl_cookie); 583*bdcd8170SKalle Valo 584*bdcd8170SKalle Valo if (test_bit(NETQ_STOPPED, &ar->flag)) 585*bdcd8170SKalle Valo clear_bit(NETQ_STOPPED, &ar->flag); 586*bdcd8170SKalle Valo } 587*bdcd8170SKalle Valo 588*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 589*bdcd8170SKalle Valo 590*bdcd8170SKalle Valo __skb_queue_purge(&skb_queue); 591*bdcd8170SKalle Valo 592*bdcd8170SKalle Valo if (test_bit(CONNECTED, &ar->flag)) { 593*bdcd8170SKalle Valo if (!flushing) 594*bdcd8170SKalle Valo netif_wake_queue(ar->net_dev); 595*bdcd8170SKalle Valo } 596*bdcd8170SKalle Valo 597*bdcd8170SKalle Valo if (wake_event) 598*bdcd8170SKalle Valo wake_up(&ar->event_wq); 599*bdcd8170SKalle Valo 600*bdcd8170SKalle Valo return; 601*bdcd8170SKalle Valo 602*bdcd8170SKalle Valo fatal: 603*bdcd8170SKalle Valo WARN_ON(1); 604*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 605*bdcd8170SKalle Valo return; 606*bdcd8170SKalle Valo } 607*bdcd8170SKalle Valo 608*bdcd8170SKalle Valo void ath6kl_tx_data_cleanup(struct ath6kl *ar) 609*bdcd8170SKalle Valo { 610*bdcd8170SKalle Valo int i; 611*bdcd8170SKalle Valo 612*bdcd8170SKalle Valo /* flush all the data (non-control) streams */ 613*bdcd8170SKalle Valo for (i = 0; i < WMM_NUM_AC; i++) 614*bdcd8170SKalle Valo htc_flush_txep(ar->htc_target, ar->ac2ep_map[i], 615*bdcd8170SKalle Valo ATH6KL_DATA_PKT_TAG); 616*bdcd8170SKalle Valo } 617*bdcd8170SKalle Valo 618*bdcd8170SKalle Valo /* Rx functions */ 619*bdcd8170SKalle Valo 620*bdcd8170SKalle Valo static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev, 621*bdcd8170SKalle Valo struct sk_buff *skb) 622*bdcd8170SKalle Valo { 623*bdcd8170SKalle Valo if (!skb) 624*bdcd8170SKalle Valo return; 625*bdcd8170SKalle Valo 626*bdcd8170SKalle Valo skb->dev = dev; 627*bdcd8170SKalle Valo 628*bdcd8170SKalle Valo if (!(skb->dev->flags & IFF_UP)) { 629*bdcd8170SKalle Valo dev_kfree_skb(skb); 630*bdcd8170SKalle Valo return; 631*bdcd8170SKalle Valo } 632*bdcd8170SKalle Valo 633*bdcd8170SKalle Valo skb->protocol = eth_type_trans(skb, skb->dev); 634*bdcd8170SKalle Valo 635*bdcd8170SKalle Valo netif_rx_ni(skb); 636*bdcd8170SKalle Valo } 637*bdcd8170SKalle Valo 638*bdcd8170SKalle Valo static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num) 639*bdcd8170SKalle Valo { 640*bdcd8170SKalle Valo struct sk_buff *skb; 641*bdcd8170SKalle Valo 642*bdcd8170SKalle Valo while (num) { 643*bdcd8170SKalle Valo skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE); 644*bdcd8170SKalle Valo if (!skb) { 645*bdcd8170SKalle Valo ath6kl_err("netbuf allocation failed\n"); 646*bdcd8170SKalle Valo return; 647*bdcd8170SKalle Valo } 648*bdcd8170SKalle Valo skb_queue_tail(q, skb); 649*bdcd8170SKalle Valo num--; 650*bdcd8170SKalle Valo } 651*bdcd8170SKalle Valo } 652*bdcd8170SKalle Valo 653*bdcd8170SKalle Valo static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr) 654*bdcd8170SKalle Valo { 655*bdcd8170SKalle Valo struct sk_buff *skb = NULL; 656*bdcd8170SKalle Valo 657*bdcd8170SKalle Valo if (skb_queue_len(&p_aggr->free_q) < (AGGR_NUM_OF_FREE_NETBUFS >> 2)) 658*bdcd8170SKalle Valo ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS); 659*bdcd8170SKalle Valo 660*bdcd8170SKalle Valo skb = skb_dequeue(&p_aggr->free_q); 661*bdcd8170SKalle Valo 662*bdcd8170SKalle Valo return skb; 663*bdcd8170SKalle Valo } 664*bdcd8170SKalle Valo 665*bdcd8170SKalle Valo void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint) 666*bdcd8170SKalle Valo { 667*bdcd8170SKalle Valo struct ath6kl *ar = target->dev->ar; 668*bdcd8170SKalle Valo struct sk_buff *skb; 669*bdcd8170SKalle Valo int rx_buf; 670*bdcd8170SKalle Valo int n_buf_refill; 671*bdcd8170SKalle Valo struct htc_packet *packet; 672*bdcd8170SKalle Valo struct list_head queue; 673*bdcd8170SKalle Valo 674*bdcd8170SKalle Valo n_buf_refill = ATH6KL_MAX_RX_BUFFERS - 675*bdcd8170SKalle Valo htc_get_rxbuf_num(ar->htc_target, endpoint); 676*bdcd8170SKalle Valo 677*bdcd8170SKalle Valo if (n_buf_refill <= 0) 678*bdcd8170SKalle Valo return; 679*bdcd8170SKalle Valo 680*bdcd8170SKalle Valo INIT_LIST_HEAD(&queue); 681*bdcd8170SKalle Valo 682*bdcd8170SKalle Valo ath6kl_dbg(ATH6KL_DBG_WLAN_RX, 683*bdcd8170SKalle Valo "%s: providing htc with %d buffers at eid=%d\n", 684*bdcd8170SKalle Valo __func__, n_buf_refill, endpoint); 685*bdcd8170SKalle Valo 686*bdcd8170SKalle Valo for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) { 687*bdcd8170SKalle Valo skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE); 688*bdcd8170SKalle Valo if (!skb) 689*bdcd8170SKalle Valo break; 690*bdcd8170SKalle Valo 691*bdcd8170SKalle Valo packet = (struct htc_packet *) skb->head; 692*bdcd8170SKalle Valo set_htc_rxpkt_info(packet, skb, skb->data, 693*bdcd8170SKalle Valo ATH6KL_BUFFER_SIZE, endpoint); 694*bdcd8170SKalle Valo list_add_tail(&packet->list, &queue); 695*bdcd8170SKalle Valo } 696*bdcd8170SKalle Valo 697*bdcd8170SKalle Valo if (!list_empty(&queue)) 698*bdcd8170SKalle Valo htc_add_rxbuf_multiple(ar->htc_target, &queue); 699*bdcd8170SKalle Valo } 700*bdcd8170SKalle Valo 701*bdcd8170SKalle Valo void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count) 702*bdcd8170SKalle Valo { 703*bdcd8170SKalle Valo struct htc_packet *packet; 704*bdcd8170SKalle Valo struct sk_buff *skb; 705*bdcd8170SKalle Valo 706*bdcd8170SKalle Valo while (count) { 707*bdcd8170SKalle Valo skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE); 708*bdcd8170SKalle Valo if (!skb) 709*bdcd8170SKalle Valo return; 710*bdcd8170SKalle Valo 711*bdcd8170SKalle Valo packet = (struct htc_packet *) skb->head; 712*bdcd8170SKalle Valo set_htc_rxpkt_info(packet, skb, skb->data, 713*bdcd8170SKalle Valo ATH6KL_AMSDU_BUFFER_SIZE, 0); 714*bdcd8170SKalle Valo spin_lock_bh(&ar->lock); 715*bdcd8170SKalle Valo list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue); 716*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 717*bdcd8170SKalle Valo count--; 718*bdcd8170SKalle Valo } 719*bdcd8170SKalle Valo } 720*bdcd8170SKalle Valo 721*bdcd8170SKalle Valo /* 722*bdcd8170SKalle Valo * Callback to allocate a receive buffer for a pending packet. We use a 723*bdcd8170SKalle Valo * pre-allocated list of buffers of maximum AMSDU size (4K). 724*bdcd8170SKalle Valo */ 725*bdcd8170SKalle Valo struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target, 726*bdcd8170SKalle Valo enum htc_endpoint_id endpoint, 727*bdcd8170SKalle Valo int len) 728*bdcd8170SKalle Valo { 729*bdcd8170SKalle Valo struct ath6kl *ar = target->dev->ar; 730*bdcd8170SKalle Valo struct htc_packet *packet = NULL; 731*bdcd8170SKalle Valo struct list_head *pkt_pos; 732*bdcd8170SKalle Valo int refill_cnt = 0, depth = 0; 733*bdcd8170SKalle Valo 734*bdcd8170SKalle Valo ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n", 735*bdcd8170SKalle Valo __func__, endpoint, len); 736*bdcd8170SKalle Valo 737*bdcd8170SKalle Valo if ((len <= ATH6KL_BUFFER_SIZE) || 738*bdcd8170SKalle Valo (len > ATH6KL_AMSDU_BUFFER_SIZE)) 739*bdcd8170SKalle Valo return NULL; 740*bdcd8170SKalle Valo 741*bdcd8170SKalle Valo spin_lock_bh(&ar->lock); 742*bdcd8170SKalle Valo 743*bdcd8170SKalle Valo if (list_empty(&ar->amsdu_rx_buffer_queue)) { 744*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 745*bdcd8170SKalle Valo refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS; 746*bdcd8170SKalle Valo goto refill_buf; 747*bdcd8170SKalle Valo } 748*bdcd8170SKalle Valo 749*bdcd8170SKalle Valo packet = list_first_entry(&ar->amsdu_rx_buffer_queue, 750*bdcd8170SKalle Valo struct htc_packet, list); 751*bdcd8170SKalle Valo list_del(&packet->list); 752*bdcd8170SKalle Valo list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue) 753*bdcd8170SKalle Valo depth++; 754*bdcd8170SKalle Valo 755*bdcd8170SKalle Valo refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth; 756*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 757*bdcd8170SKalle Valo 758*bdcd8170SKalle Valo /* set actual endpoint ID */ 759*bdcd8170SKalle Valo packet->endpoint = endpoint; 760*bdcd8170SKalle Valo 761*bdcd8170SKalle Valo refill_buf: 762*bdcd8170SKalle Valo if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD) 763*bdcd8170SKalle Valo ath6kl_refill_amsdu_rxbufs(ar, refill_cnt); 764*bdcd8170SKalle Valo 765*bdcd8170SKalle Valo return packet; 766*bdcd8170SKalle Valo } 767*bdcd8170SKalle Valo 768*bdcd8170SKalle Valo static void aggr_slice_amsdu(struct aggr_info *p_aggr, 769*bdcd8170SKalle Valo struct rxtid *rxtid, struct sk_buff *skb) 770*bdcd8170SKalle Valo { 771*bdcd8170SKalle Valo struct sk_buff *new_skb; 772*bdcd8170SKalle Valo struct ethhdr *hdr; 773*bdcd8170SKalle Valo u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len; 774*bdcd8170SKalle Valo u8 *framep; 775*bdcd8170SKalle Valo 776*bdcd8170SKalle Valo mac_hdr_len = sizeof(struct ethhdr); 777*bdcd8170SKalle Valo framep = skb->data + mac_hdr_len; 778*bdcd8170SKalle Valo amsdu_len = skb->len - mac_hdr_len; 779*bdcd8170SKalle Valo 780*bdcd8170SKalle Valo while (amsdu_len > mac_hdr_len) { 781*bdcd8170SKalle Valo hdr = (struct ethhdr *) framep; 782*bdcd8170SKalle Valo payload_8023_len = ntohs(hdr->h_proto); 783*bdcd8170SKalle Valo 784*bdcd8170SKalle Valo if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN || 785*bdcd8170SKalle Valo payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) { 786*bdcd8170SKalle Valo ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n", 787*bdcd8170SKalle Valo payload_8023_len); 788*bdcd8170SKalle Valo break; 789*bdcd8170SKalle Valo } 790*bdcd8170SKalle Valo 791*bdcd8170SKalle Valo frame_8023_len = payload_8023_len + mac_hdr_len; 792*bdcd8170SKalle Valo new_skb = aggr_get_free_skb(p_aggr); 793*bdcd8170SKalle Valo if (!new_skb) { 794*bdcd8170SKalle Valo ath6kl_err("no buffer available\n"); 795*bdcd8170SKalle Valo break; 796*bdcd8170SKalle Valo } 797*bdcd8170SKalle Valo 798*bdcd8170SKalle Valo memcpy(new_skb->data, framep, frame_8023_len); 799*bdcd8170SKalle Valo skb_put(new_skb, frame_8023_len); 800*bdcd8170SKalle Valo if (ath6kl_wmi_dot3_2_dix(new_skb)) { 801*bdcd8170SKalle Valo ath6kl_err("dot3_2_dix error\n"); 802*bdcd8170SKalle Valo dev_kfree_skb(new_skb); 803*bdcd8170SKalle Valo break; 804*bdcd8170SKalle Valo } 805*bdcd8170SKalle Valo 806*bdcd8170SKalle Valo skb_queue_tail(&rxtid->q, new_skb); 807*bdcd8170SKalle Valo 808*bdcd8170SKalle Valo /* Is this the last subframe within this aggregate ? */ 809*bdcd8170SKalle Valo if ((amsdu_len - frame_8023_len) == 0) 810*bdcd8170SKalle Valo break; 811*bdcd8170SKalle Valo 812*bdcd8170SKalle Valo /* Add the length of A-MSDU subframe padding bytes - 813*bdcd8170SKalle Valo * Round to nearest word. 814*bdcd8170SKalle Valo */ 815*bdcd8170SKalle Valo frame_8023_len = ALIGN(frame_8023_len + 3, 3); 816*bdcd8170SKalle Valo 817*bdcd8170SKalle Valo framep += frame_8023_len; 818*bdcd8170SKalle Valo amsdu_len -= frame_8023_len; 819*bdcd8170SKalle Valo } 820*bdcd8170SKalle Valo 821*bdcd8170SKalle Valo dev_kfree_skb(skb); 822*bdcd8170SKalle Valo } 823*bdcd8170SKalle Valo 824*bdcd8170SKalle Valo static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid, 825*bdcd8170SKalle Valo u16 seq_no, u8 order) 826*bdcd8170SKalle Valo { 827*bdcd8170SKalle Valo struct sk_buff *skb; 828*bdcd8170SKalle Valo struct rxtid *rxtid; 829*bdcd8170SKalle Valo struct skb_hold_q *node; 830*bdcd8170SKalle Valo u16 idx, idx_end, seq_end; 831*bdcd8170SKalle Valo struct rxtid_stats *stats; 832*bdcd8170SKalle Valo 833*bdcd8170SKalle Valo if (!p_aggr) 834*bdcd8170SKalle Valo return; 835*bdcd8170SKalle Valo 836*bdcd8170SKalle Valo rxtid = &p_aggr->rx_tid[tid]; 837*bdcd8170SKalle Valo stats = &p_aggr->stat[tid]; 838*bdcd8170SKalle Valo 839*bdcd8170SKalle Valo idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 840*bdcd8170SKalle Valo 841*bdcd8170SKalle Valo /* 842*bdcd8170SKalle Valo * idx_end is typically the last possible frame in the window, 843*bdcd8170SKalle Valo * but changes to 'the' seq_no, when BAR comes. If seq_no 844*bdcd8170SKalle Valo * is non-zero, we will go up to that and stop. 845*bdcd8170SKalle Valo * Note: last seq no in current window will occupy the same 846*bdcd8170SKalle Valo * index position as index that is just previous to start. 847*bdcd8170SKalle Valo * An imp point : if win_sz is 7, for seq_no space of 4095, 848*bdcd8170SKalle Valo * then, there would be holes when sequence wrap around occurs. 849*bdcd8170SKalle Valo * Target should judiciously choose the win_sz, based on 850*bdcd8170SKalle Valo * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz 851*bdcd8170SKalle Valo * 2, 4, 8, 16 win_sz works fine). 852*bdcd8170SKalle Valo * We must deque from "idx" to "idx_end", including both. 853*bdcd8170SKalle Valo */ 854*bdcd8170SKalle Valo seq_end = seq_no ? seq_no : rxtid->seq_next; 855*bdcd8170SKalle Valo idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz); 856*bdcd8170SKalle Valo 857*bdcd8170SKalle Valo spin_lock_bh(&rxtid->lock); 858*bdcd8170SKalle Valo 859*bdcd8170SKalle Valo do { 860*bdcd8170SKalle Valo node = &rxtid->hold_q[idx]; 861*bdcd8170SKalle Valo if ((order == 1) && (!node->skb)) 862*bdcd8170SKalle Valo break; 863*bdcd8170SKalle Valo 864*bdcd8170SKalle Valo if (node->skb) { 865*bdcd8170SKalle Valo if (node->is_amsdu) 866*bdcd8170SKalle Valo aggr_slice_amsdu(p_aggr, rxtid, node->skb); 867*bdcd8170SKalle Valo else 868*bdcd8170SKalle Valo skb_queue_tail(&rxtid->q, node->skb); 869*bdcd8170SKalle Valo node->skb = NULL; 870*bdcd8170SKalle Valo } else 871*bdcd8170SKalle Valo stats->num_hole++; 872*bdcd8170SKalle Valo 873*bdcd8170SKalle Valo rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next); 874*bdcd8170SKalle Valo idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 875*bdcd8170SKalle Valo } while (idx != idx_end); 876*bdcd8170SKalle Valo 877*bdcd8170SKalle Valo spin_unlock_bh(&rxtid->lock); 878*bdcd8170SKalle Valo 879*bdcd8170SKalle Valo stats->num_delivered += skb_queue_len(&rxtid->q); 880*bdcd8170SKalle Valo 881*bdcd8170SKalle Valo while ((skb = skb_dequeue(&rxtid->q))) 882*bdcd8170SKalle Valo ath6kl_deliver_frames_to_nw_stack(p_aggr->dev, skb); 883*bdcd8170SKalle Valo } 884*bdcd8170SKalle Valo 885*bdcd8170SKalle Valo static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid, 886*bdcd8170SKalle Valo u16 seq_no, 887*bdcd8170SKalle Valo bool is_amsdu, struct sk_buff *frame) 888*bdcd8170SKalle Valo { 889*bdcd8170SKalle Valo struct rxtid *rxtid; 890*bdcd8170SKalle Valo struct rxtid_stats *stats; 891*bdcd8170SKalle Valo struct sk_buff *skb; 892*bdcd8170SKalle Valo struct skb_hold_q *node; 893*bdcd8170SKalle Valo u16 idx, st, cur, end; 894*bdcd8170SKalle Valo bool is_queued = false; 895*bdcd8170SKalle Valo u16 extended_end; 896*bdcd8170SKalle Valo 897*bdcd8170SKalle Valo rxtid = &agg_info->rx_tid[tid]; 898*bdcd8170SKalle Valo stats = &agg_info->stat[tid]; 899*bdcd8170SKalle Valo 900*bdcd8170SKalle Valo stats->num_into_aggr++; 901*bdcd8170SKalle Valo 902*bdcd8170SKalle Valo if (!rxtid->aggr) { 903*bdcd8170SKalle Valo if (is_amsdu) { 904*bdcd8170SKalle Valo aggr_slice_amsdu(agg_info, rxtid, frame); 905*bdcd8170SKalle Valo is_queued = true; 906*bdcd8170SKalle Valo stats->num_amsdu++; 907*bdcd8170SKalle Valo while ((skb = skb_dequeue(&rxtid->q))) 908*bdcd8170SKalle Valo ath6kl_deliver_frames_to_nw_stack(agg_info->dev, 909*bdcd8170SKalle Valo skb); 910*bdcd8170SKalle Valo } 911*bdcd8170SKalle Valo return is_queued; 912*bdcd8170SKalle Valo } 913*bdcd8170SKalle Valo 914*bdcd8170SKalle Valo /* Check the incoming sequence no, if it's in the window */ 915*bdcd8170SKalle Valo st = rxtid->seq_next; 916*bdcd8170SKalle Valo cur = seq_no; 917*bdcd8170SKalle Valo end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO; 918*bdcd8170SKalle Valo 919*bdcd8170SKalle Valo if (((st < end) && (cur < st || cur > end)) || 920*bdcd8170SKalle Valo ((st > end) && (cur > end) && (cur < st))) { 921*bdcd8170SKalle Valo extended_end = (end + rxtid->hold_q_sz - 1) & 922*bdcd8170SKalle Valo ATH6KL_MAX_SEQ_NO; 923*bdcd8170SKalle Valo 924*bdcd8170SKalle Valo if (((end < extended_end) && 925*bdcd8170SKalle Valo (cur < end || cur > extended_end)) || 926*bdcd8170SKalle Valo ((end > extended_end) && (cur > extended_end) && 927*bdcd8170SKalle Valo (cur < end))) { 928*bdcd8170SKalle Valo aggr_deque_frms(agg_info, tid, 0, 0); 929*bdcd8170SKalle Valo if (cur >= rxtid->hold_q_sz - 1) 930*bdcd8170SKalle Valo rxtid->seq_next = cur - (rxtid->hold_q_sz - 1); 931*bdcd8170SKalle Valo else 932*bdcd8170SKalle Valo rxtid->seq_next = ATH6KL_MAX_SEQ_NO - 933*bdcd8170SKalle Valo (rxtid->hold_q_sz - 2 - cur); 934*bdcd8170SKalle Valo } else { 935*bdcd8170SKalle Valo /* 936*bdcd8170SKalle Valo * Dequeue only those frames that are outside the 937*bdcd8170SKalle Valo * new shifted window. 938*bdcd8170SKalle Valo */ 939*bdcd8170SKalle Valo if (cur >= rxtid->hold_q_sz - 1) 940*bdcd8170SKalle Valo st = cur - (rxtid->hold_q_sz - 1); 941*bdcd8170SKalle Valo else 942*bdcd8170SKalle Valo st = ATH6KL_MAX_SEQ_NO - 943*bdcd8170SKalle Valo (rxtid->hold_q_sz - 2 - cur); 944*bdcd8170SKalle Valo 945*bdcd8170SKalle Valo aggr_deque_frms(agg_info, tid, st, 0); 946*bdcd8170SKalle Valo } 947*bdcd8170SKalle Valo 948*bdcd8170SKalle Valo stats->num_oow++; 949*bdcd8170SKalle Valo } 950*bdcd8170SKalle Valo 951*bdcd8170SKalle Valo idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz); 952*bdcd8170SKalle Valo 953*bdcd8170SKalle Valo node = &rxtid->hold_q[idx]; 954*bdcd8170SKalle Valo 955*bdcd8170SKalle Valo spin_lock_bh(&rxtid->lock); 956*bdcd8170SKalle Valo 957*bdcd8170SKalle Valo /* 958*bdcd8170SKalle Valo * Is the cur frame duplicate or something beyond our window(hold_q 959*bdcd8170SKalle Valo * -> which is 2x, already)? 960*bdcd8170SKalle Valo * 961*bdcd8170SKalle Valo * 1. Duplicate is easy - drop incoming frame. 962*bdcd8170SKalle Valo * 2. Not falling in current sliding window. 963*bdcd8170SKalle Valo * 2a. is the frame_seq_no preceding current tid_seq_no? 964*bdcd8170SKalle Valo * -> drop the frame. perhaps sender did not get our ACK. 965*bdcd8170SKalle Valo * this is taken care of above. 966*bdcd8170SKalle Valo * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ); 967*bdcd8170SKalle Valo * -> Taken care of it above, by moving window forward. 968*bdcd8170SKalle Valo */ 969*bdcd8170SKalle Valo dev_kfree_skb(node->skb); 970*bdcd8170SKalle Valo stats->num_dups++; 971*bdcd8170SKalle Valo 972*bdcd8170SKalle Valo node->skb = frame; 973*bdcd8170SKalle Valo is_queued = true; 974*bdcd8170SKalle Valo node->is_amsdu = is_amsdu; 975*bdcd8170SKalle Valo node->seq_no = seq_no; 976*bdcd8170SKalle Valo 977*bdcd8170SKalle Valo if (node->is_amsdu) 978*bdcd8170SKalle Valo stats->num_amsdu++; 979*bdcd8170SKalle Valo else 980*bdcd8170SKalle Valo stats->num_mpdu++; 981*bdcd8170SKalle Valo 982*bdcd8170SKalle Valo spin_unlock_bh(&rxtid->lock); 983*bdcd8170SKalle Valo 984*bdcd8170SKalle Valo aggr_deque_frms(agg_info, tid, 0, 1); 985*bdcd8170SKalle Valo 986*bdcd8170SKalle Valo if (agg_info->timer_scheduled) 987*bdcd8170SKalle Valo rxtid->progress = true; 988*bdcd8170SKalle Valo else 989*bdcd8170SKalle Valo for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) { 990*bdcd8170SKalle Valo if (rxtid->hold_q[idx].skb) { 991*bdcd8170SKalle Valo /* 992*bdcd8170SKalle Valo * There is a frame in the queue and no 993*bdcd8170SKalle Valo * timer so start a timer to ensure that 994*bdcd8170SKalle Valo * the frame doesn't remain stuck 995*bdcd8170SKalle Valo * forever. 996*bdcd8170SKalle Valo */ 997*bdcd8170SKalle Valo agg_info->timer_scheduled = true; 998*bdcd8170SKalle Valo mod_timer(&agg_info->timer, 999*bdcd8170SKalle Valo (jiffies + 1000*bdcd8170SKalle Valo HZ * (AGGR_RX_TIMEOUT) / 1000)); 1001*bdcd8170SKalle Valo rxtid->progress = false; 1002*bdcd8170SKalle Valo rxtid->timer_mon = true; 1003*bdcd8170SKalle Valo break; 1004*bdcd8170SKalle Valo } 1005*bdcd8170SKalle Valo } 1006*bdcd8170SKalle Valo 1007*bdcd8170SKalle Valo return is_queued; 1008*bdcd8170SKalle Valo } 1009*bdcd8170SKalle Valo 1010*bdcd8170SKalle Valo void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) 1011*bdcd8170SKalle Valo { 1012*bdcd8170SKalle Valo struct ath6kl *ar = target->dev->ar; 1013*bdcd8170SKalle Valo struct sk_buff *skb = packet->pkt_cntxt; 1014*bdcd8170SKalle Valo struct wmi_rx_meta_v2 *meta; 1015*bdcd8170SKalle Valo struct wmi_data_hdr *dhdr; 1016*bdcd8170SKalle Valo int min_hdr_len; 1017*bdcd8170SKalle Valo u8 meta_type, dot11_hdr = 0; 1018*bdcd8170SKalle Valo int status = packet->status; 1019*bdcd8170SKalle Valo enum htc_endpoint_id ept = packet->endpoint; 1020*bdcd8170SKalle Valo bool is_amsdu, prev_ps, ps_state = false; 1021*bdcd8170SKalle Valo struct ath6kl_sta *conn = NULL; 1022*bdcd8170SKalle Valo struct sk_buff *skb1 = NULL; 1023*bdcd8170SKalle Valo struct ethhdr *datap = NULL; 1024*bdcd8170SKalle Valo u16 seq_no, offset; 1025*bdcd8170SKalle Valo u8 tid; 1026*bdcd8170SKalle Valo 1027*bdcd8170SKalle Valo ath6kl_dbg(ATH6KL_DBG_WLAN_RX, 1028*bdcd8170SKalle Valo "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d", 1029*bdcd8170SKalle Valo __func__, ar, ept, skb, packet->buf, 1030*bdcd8170SKalle Valo packet->act_len, status); 1031*bdcd8170SKalle Valo 1032*bdcd8170SKalle Valo if (status || !(skb->data + HTC_HDR_LENGTH)) { 1033*bdcd8170SKalle Valo ar->net_stats.rx_errors++; 1034*bdcd8170SKalle Valo dev_kfree_skb(skb); 1035*bdcd8170SKalle Valo return; 1036*bdcd8170SKalle Valo } 1037*bdcd8170SKalle Valo 1038*bdcd8170SKalle Valo /* 1039*bdcd8170SKalle Valo * Take lock to protect buffer counts and adaptive power throughput 1040*bdcd8170SKalle Valo * state. 1041*bdcd8170SKalle Valo */ 1042*bdcd8170SKalle Valo spin_lock_bh(&ar->lock); 1043*bdcd8170SKalle Valo 1044*bdcd8170SKalle Valo ar->net_stats.rx_packets++; 1045*bdcd8170SKalle Valo ar->net_stats.rx_bytes += packet->act_len; 1046*bdcd8170SKalle Valo 1047*bdcd8170SKalle Valo skb_put(skb, packet->act_len + HTC_HDR_LENGTH); 1048*bdcd8170SKalle Valo skb_pull(skb, HTC_HDR_LENGTH); 1049*bdcd8170SKalle Valo 1050*bdcd8170SKalle Valo ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, skb->data, skb->len); 1051*bdcd8170SKalle Valo 1052*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 1053*bdcd8170SKalle Valo 1054*bdcd8170SKalle Valo skb->dev = ar->net_dev; 1055*bdcd8170SKalle Valo 1056*bdcd8170SKalle Valo if (!test_bit(WMI_ENABLED, &ar->flag)) { 1057*bdcd8170SKalle Valo if (EPPING_ALIGNMENT_PAD > 0) 1058*bdcd8170SKalle Valo skb_pull(skb, EPPING_ALIGNMENT_PAD); 1059*bdcd8170SKalle Valo ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb); 1060*bdcd8170SKalle Valo return; 1061*bdcd8170SKalle Valo } 1062*bdcd8170SKalle Valo 1063*bdcd8170SKalle Valo if (ept == ar->ctrl_ep) { 1064*bdcd8170SKalle Valo ath6kl_wmi_control_rx(ar->wmi, skb); 1065*bdcd8170SKalle Valo return; 1066*bdcd8170SKalle Valo } 1067*bdcd8170SKalle Valo 1068*bdcd8170SKalle Valo min_hdr_len = sizeof(struct ethhdr); 1069*bdcd8170SKalle Valo min_hdr_len += sizeof(struct wmi_data_hdr) + 1070*bdcd8170SKalle Valo sizeof(struct ath6kl_llc_snap_hdr); 1071*bdcd8170SKalle Valo 1072*bdcd8170SKalle Valo dhdr = (struct wmi_data_hdr *) skb->data; 1073*bdcd8170SKalle Valo 1074*bdcd8170SKalle Valo /* 1075*bdcd8170SKalle Valo * In the case of AP mode we may receive NULL data frames 1076*bdcd8170SKalle Valo * that do not have LLC hdr. They are 16 bytes in size. 1077*bdcd8170SKalle Valo * Allow these frames in the AP mode. 1078*bdcd8170SKalle Valo */ 1079*bdcd8170SKalle Valo if (ar->nw_type != AP_NETWORK && 1080*bdcd8170SKalle Valo ((packet->act_len < min_hdr_len) || 1081*bdcd8170SKalle Valo (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) { 1082*bdcd8170SKalle Valo ath6kl_info("frame len is too short or too long\n"); 1083*bdcd8170SKalle Valo ar->net_stats.rx_errors++; 1084*bdcd8170SKalle Valo ar->net_stats.rx_length_errors++; 1085*bdcd8170SKalle Valo dev_kfree_skb(skb); 1086*bdcd8170SKalle Valo return; 1087*bdcd8170SKalle Valo } 1088*bdcd8170SKalle Valo 1089*bdcd8170SKalle Valo /* Get the Power save state of the STA */ 1090*bdcd8170SKalle Valo if (ar->nw_type == AP_NETWORK) { 1091*bdcd8170SKalle Valo meta_type = wmi_data_hdr_get_meta(dhdr); 1092*bdcd8170SKalle Valo 1093*bdcd8170SKalle Valo ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) & 1094*bdcd8170SKalle Valo WMI_DATA_HDR_PS_MASK); 1095*bdcd8170SKalle Valo 1096*bdcd8170SKalle Valo offset = sizeof(struct wmi_data_hdr); 1097*bdcd8170SKalle Valo 1098*bdcd8170SKalle Valo switch (meta_type) { 1099*bdcd8170SKalle Valo case 0: 1100*bdcd8170SKalle Valo break; 1101*bdcd8170SKalle Valo case WMI_META_VERSION_1: 1102*bdcd8170SKalle Valo offset += sizeof(struct wmi_rx_meta_v1); 1103*bdcd8170SKalle Valo break; 1104*bdcd8170SKalle Valo case WMI_META_VERSION_2: 1105*bdcd8170SKalle Valo offset += sizeof(struct wmi_rx_meta_v2); 1106*bdcd8170SKalle Valo break; 1107*bdcd8170SKalle Valo default: 1108*bdcd8170SKalle Valo break; 1109*bdcd8170SKalle Valo } 1110*bdcd8170SKalle Valo 1111*bdcd8170SKalle Valo datap = (struct ethhdr *) (skb->data + offset); 1112*bdcd8170SKalle Valo conn = ath6kl_find_sta(ar, datap->h_source); 1113*bdcd8170SKalle Valo 1114*bdcd8170SKalle Valo if (!conn) { 1115*bdcd8170SKalle Valo dev_kfree_skb(skb); 1116*bdcd8170SKalle Valo return; 1117*bdcd8170SKalle Valo } 1118*bdcd8170SKalle Valo 1119*bdcd8170SKalle Valo /* 1120*bdcd8170SKalle Valo * If there is a change in PS state of the STA, 1121*bdcd8170SKalle Valo * take appropriate steps: 1122*bdcd8170SKalle Valo * 1123*bdcd8170SKalle Valo * 1. If Sleep-->Awake, flush the psq for the STA 1124*bdcd8170SKalle Valo * Clear the PVB for the STA. 1125*bdcd8170SKalle Valo * 2. If Awake-->Sleep, Starting queueing frames 1126*bdcd8170SKalle Valo * the STA. 1127*bdcd8170SKalle Valo */ 1128*bdcd8170SKalle Valo prev_ps = !!(conn->sta_flags & STA_PS_SLEEP); 1129*bdcd8170SKalle Valo 1130*bdcd8170SKalle Valo if (ps_state) 1131*bdcd8170SKalle Valo conn->sta_flags |= STA_PS_SLEEP; 1132*bdcd8170SKalle Valo else 1133*bdcd8170SKalle Valo conn->sta_flags &= ~STA_PS_SLEEP; 1134*bdcd8170SKalle Valo 1135*bdcd8170SKalle Valo if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) { 1136*bdcd8170SKalle Valo if (!(conn->sta_flags & STA_PS_SLEEP)) { 1137*bdcd8170SKalle Valo struct sk_buff *skbuff = NULL; 1138*bdcd8170SKalle Valo 1139*bdcd8170SKalle Valo spin_lock_bh(&conn->psq_lock); 1140*bdcd8170SKalle Valo while ((skbuff = skb_dequeue(&conn->psq)) 1141*bdcd8170SKalle Valo != NULL) { 1142*bdcd8170SKalle Valo spin_unlock_bh(&conn->psq_lock); 1143*bdcd8170SKalle Valo ath6kl_data_tx(skbuff, ar->net_dev); 1144*bdcd8170SKalle Valo spin_lock_bh(&conn->psq_lock); 1145*bdcd8170SKalle Valo } 1146*bdcd8170SKalle Valo spin_unlock_bh(&conn->psq_lock); 1147*bdcd8170SKalle Valo /* Clear the PVB for this STA */ 1148*bdcd8170SKalle Valo ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0); 1149*bdcd8170SKalle Valo } 1150*bdcd8170SKalle Valo } 1151*bdcd8170SKalle Valo 1152*bdcd8170SKalle Valo /* drop NULL data frames here */ 1153*bdcd8170SKalle Valo if ((packet->act_len < min_hdr_len) || 1154*bdcd8170SKalle Valo (packet->act_len > 1155*bdcd8170SKalle Valo WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) { 1156*bdcd8170SKalle Valo dev_kfree_skb(skb); 1157*bdcd8170SKalle Valo return; 1158*bdcd8170SKalle Valo } 1159*bdcd8170SKalle Valo } 1160*bdcd8170SKalle Valo 1161*bdcd8170SKalle Valo is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false; 1162*bdcd8170SKalle Valo tid = wmi_data_hdr_get_up(dhdr); 1163*bdcd8170SKalle Valo seq_no = wmi_data_hdr_get_seqno(dhdr); 1164*bdcd8170SKalle Valo meta_type = wmi_data_hdr_get_meta(dhdr); 1165*bdcd8170SKalle Valo dot11_hdr = wmi_data_hdr_get_dot11(dhdr); 1166*bdcd8170SKalle Valo 1167*bdcd8170SKalle Valo ath6kl_wmi_data_hdr_remove(ar->wmi, skb); 1168*bdcd8170SKalle Valo 1169*bdcd8170SKalle Valo switch (meta_type) { 1170*bdcd8170SKalle Valo case WMI_META_VERSION_1: 1171*bdcd8170SKalle Valo skb_pull(skb, sizeof(struct wmi_rx_meta_v1)); 1172*bdcd8170SKalle Valo break; 1173*bdcd8170SKalle Valo case WMI_META_VERSION_2: 1174*bdcd8170SKalle Valo meta = (struct wmi_rx_meta_v2 *) skb->data; 1175*bdcd8170SKalle Valo if (meta->csum_flags & 0x1) { 1176*bdcd8170SKalle Valo skb->ip_summed = CHECKSUM_COMPLETE; 1177*bdcd8170SKalle Valo skb->csum = (__force __wsum) meta->csum; 1178*bdcd8170SKalle Valo } 1179*bdcd8170SKalle Valo skb_pull(skb, sizeof(struct wmi_rx_meta_v2)); 1180*bdcd8170SKalle Valo break; 1181*bdcd8170SKalle Valo default: 1182*bdcd8170SKalle Valo break; 1183*bdcd8170SKalle Valo } 1184*bdcd8170SKalle Valo 1185*bdcd8170SKalle Valo if (dot11_hdr) 1186*bdcd8170SKalle Valo status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb); 1187*bdcd8170SKalle Valo else if (!is_amsdu) 1188*bdcd8170SKalle Valo status = ath6kl_wmi_dot3_2_dix(skb); 1189*bdcd8170SKalle Valo 1190*bdcd8170SKalle Valo if (status) { 1191*bdcd8170SKalle Valo /* 1192*bdcd8170SKalle Valo * Drop frames that could not be processed (lack of 1193*bdcd8170SKalle Valo * memory, etc.) 1194*bdcd8170SKalle Valo */ 1195*bdcd8170SKalle Valo dev_kfree_skb(skb); 1196*bdcd8170SKalle Valo return; 1197*bdcd8170SKalle Valo } 1198*bdcd8170SKalle Valo 1199*bdcd8170SKalle Valo if (!(ar->net_dev->flags & IFF_UP)) { 1200*bdcd8170SKalle Valo dev_kfree_skb(skb); 1201*bdcd8170SKalle Valo return; 1202*bdcd8170SKalle Valo } 1203*bdcd8170SKalle Valo 1204*bdcd8170SKalle Valo if (ar->nw_type == AP_NETWORK) { 1205*bdcd8170SKalle Valo datap = (struct ethhdr *) skb->data; 1206*bdcd8170SKalle Valo if (is_multicast_ether_addr(datap->h_dest)) 1207*bdcd8170SKalle Valo /* 1208*bdcd8170SKalle Valo * Bcast/Mcast frames should be sent to the 1209*bdcd8170SKalle Valo * OS stack as well as on the air. 1210*bdcd8170SKalle Valo */ 1211*bdcd8170SKalle Valo skb1 = skb_copy(skb, GFP_ATOMIC); 1212*bdcd8170SKalle Valo else { 1213*bdcd8170SKalle Valo /* 1214*bdcd8170SKalle Valo * Search for a connected STA with dstMac 1215*bdcd8170SKalle Valo * as the Mac address. If found send the 1216*bdcd8170SKalle Valo * frame to it on the air else send the 1217*bdcd8170SKalle Valo * frame up the stack. 1218*bdcd8170SKalle Valo */ 1219*bdcd8170SKalle Valo struct ath6kl_sta *conn = NULL; 1220*bdcd8170SKalle Valo conn = ath6kl_find_sta(ar, datap->h_dest); 1221*bdcd8170SKalle Valo 1222*bdcd8170SKalle Valo if (conn && ar->intra_bss) { 1223*bdcd8170SKalle Valo skb1 = skb; 1224*bdcd8170SKalle Valo skb = NULL; 1225*bdcd8170SKalle Valo } else if (conn && !ar->intra_bss) { 1226*bdcd8170SKalle Valo dev_kfree_skb(skb); 1227*bdcd8170SKalle Valo skb = NULL; 1228*bdcd8170SKalle Valo } 1229*bdcd8170SKalle Valo } 1230*bdcd8170SKalle Valo if (skb1) 1231*bdcd8170SKalle Valo ath6kl_data_tx(skb1, ar->net_dev); 1232*bdcd8170SKalle Valo } 1233*bdcd8170SKalle Valo 1234*bdcd8170SKalle Valo if (!aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no, 1235*bdcd8170SKalle Valo is_amsdu, skb)) 1236*bdcd8170SKalle Valo ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb); 1237*bdcd8170SKalle Valo } 1238*bdcd8170SKalle Valo 1239*bdcd8170SKalle Valo static void aggr_timeout(unsigned long arg) 1240*bdcd8170SKalle Valo { 1241*bdcd8170SKalle Valo u8 i, j; 1242*bdcd8170SKalle Valo struct aggr_info *p_aggr = (struct aggr_info *) arg; 1243*bdcd8170SKalle Valo struct rxtid *rxtid; 1244*bdcd8170SKalle Valo struct rxtid_stats *stats; 1245*bdcd8170SKalle Valo 1246*bdcd8170SKalle Valo for (i = 0; i < NUM_OF_TIDS; i++) { 1247*bdcd8170SKalle Valo rxtid = &p_aggr->rx_tid[i]; 1248*bdcd8170SKalle Valo stats = &p_aggr->stat[i]; 1249*bdcd8170SKalle Valo 1250*bdcd8170SKalle Valo if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress) 1251*bdcd8170SKalle Valo continue; 1252*bdcd8170SKalle Valo 1253*bdcd8170SKalle Valo stats->num_timeouts++; 1254*bdcd8170SKalle Valo ath6kl_err("aggr timeout (st %d end %d)\n", 1255*bdcd8170SKalle Valo rxtid->seq_next, 1256*bdcd8170SKalle Valo ((rxtid->seq_next + rxtid->hold_q_sz-1) & 1257*bdcd8170SKalle Valo ATH6KL_MAX_SEQ_NO)); 1258*bdcd8170SKalle Valo aggr_deque_frms(p_aggr, i, 0, 0); 1259*bdcd8170SKalle Valo } 1260*bdcd8170SKalle Valo 1261*bdcd8170SKalle Valo p_aggr->timer_scheduled = false; 1262*bdcd8170SKalle Valo 1263*bdcd8170SKalle Valo for (i = 0; i < NUM_OF_TIDS; i++) { 1264*bdcd8170SKalle Valo rxtid = &p_aggr->rx_tid[i]; 1265*bdcd8170SKalle Valo 1266*bdcd8170SKalle Valo if (rxtid->aggr && rxtid->hold_q) { 1267*bdcd8170SKalle Valo for (j = 0; j < rxtid->hold_q_sz; j++) { 1268*bdcd8170SKalle Valo if (rxtid->hold_q[j].skb) { 1269*bdcd8170SKalle Valo p_aggr->timer_scheduled = true; 1270*bdcd8170SKalle Valo rxtid->timer_mon = true; 1271*bdcd8170SKalle Valo rxtid->progress = false; 1272*bdcd8170SKalle Valo break; 1273*bdcd8170SKalle Valo } 1274*bdcd8170SKalle Valo } 1275*bdcd8170SKalle Valo 1276*bdcd8170SKalle Valo if (j >= rxtid->hold_q_sz) 1277*bdcd8170SKalle Valo rxtid->timer_mon = false; 1278*bdcd8170SKalle Valo } 1279*bdcd8170SKalle Valo } 1280*bdcd8170SKalle Valo 1281*bdcd8170SKalle Valo if (p_aggr->timer_scheduled) 1282*bdcd8170SKalle Valo mod_timer(&p_aggr->timer, 1283*bdcd8170SKalle Valo jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT)); 1284*bdcd8170SKalle Valo } 1285*bdcd8170SKalle Valo 1286*bdcd8170SKalle Valo static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid) 1287*bdcd8170SKalle Valo { 1288*bdcd8170SKalle Valo struct rxtid *rxtid; 1289*bdcd8170SKalle Valo struct rxtid_stats *stats; 1290*bdcd8170SKalle Valo 1291*bdcd8170SKalle Valo if (!p_aggr || tid >= NUM_OF_TIDS) 1292*bdcd8170SKalle Valo return; 1293*bdcd8170SKalle Valo 1294*bdcd8170SKalle Valo rxtid = &p_aggr->rx_tid[tid]; 1295*bdcd8170SKalle Valo stats = &p_aggr->stat[tid]; 1296*bdcd8170SKalle Valo 1297*bdcd8170SKalle Valo if (rxtid->aggr) 1298*bdcd8170SKalle Valo aggr_deque_frms(p_aggr, tid, 0, 0); 1299*bdcd8170SKalle Valo 1300*bdcd8170SKalle Valo rxtid->aggr = false; 1301*bdcd8170SKalle Valo rxtid->progress = false; 1302*bdcd8170SKalle Valo rxtid->timer_mon = false; 1303*bdcd8170SKalle Valo rxtid->win_sz = 0; 1304*bdcd8170SKalle Valo rxtid->seq_next = 0; 1305*bdcd8170SKalle Valo rxtid->hold_q_sz = 0; 1306*bdcd8170SKalle Valo 1307*bdcd8170SKalle Valo kfree(rxtid->hold_q); 1308*bdcd8170SKalle Valo rxtid->hold_q = NULL; 1309*bdcd8170SKalle Valo 1310*bdcd8170SKalle Valo memset(stats, 0, sizeof(struct rxtid_stats)); 1311*bdcd8170SKalle Valo } 1312*bdcd8170SKalle Valo 1313*bdcd8170SKalle Valo void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz) 1314*bdcd8170SKalle Valo { 1315*bdcd8170SKalle Valo struct aggr_info *p_aggr = ar->aggr_cntxt; 1316*bdcd8170SKalle Valo struct rxtid *rxtid; 1317*bdcd8170SKalle Valo struct rxtid_stats *stats; 1318*bdcd8170SKalle Valo u16 hold_q_size; 1319*bdcd8170SKalle Valo 1320*bdcd8170SKalle Valo if (!p_aggr) 1321*bdcd8170SKalle Valo return; 1322*bdcd8170SKalle Valo 1323*bdcd8170SKalle Valo rxtid = &p_aggr->rx_tid[tid]; 1324*bdcd8170SKalle Valo stats = &p_aggr->stat[tid]; 1325*bdcd8170SKalle Valo 1326*bdcd8170SKalle Valo if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX) 1327*bdcd8170SKalle Valo ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n", 1328*bdcd8170SKalle Valo __func__, win_sz, tid); 1329*bdcd8170SKalle Valo 1330*bdcd8170SKalle Valo if (rxtid->aggr) 1331*bdcd8170SKalle Valo aggr_delete_tid_state(p_aggr, tid); 1332*bdcd8170SKalle Valo 1333*bdcd8170SKalle Valo rxtid->seq_next = seq_no; 1334*bdcd8170SKalle Valo hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q); 1335*bdcd8170SKalle Valo rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL); 1336*bdcd8170SKalle Valo if (!rxtid->hold_q) 1337*bdcd8170SKalle Valo return; 1338*bdcd8170SKalle Valo 1339*bdcd8170SKalle Valo rxtid->win_sz = win_sz; 1340*bdcd8170SKalle Valo rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz); 1341*bdcd8170SKalle Valo if (!skb_queue_empty(&rxtid->q)) 1342*bdcd8170SKalle Valo return; 1343*bdcd8170SKalle Valo 1344*bdcd8170SKalle Valo rxtid->aggr = true; 1345*bdcd8170SKalle Valo } 1346*bdcd8170SKalle Valo 1347*bdcd8170SKalle Valo struct aggr_info *aggr_init(struct net_device *dev) 1348*bdcd8170SKalle Valo { 1349*bdcd8170SKalle Valo struct aggr_info *p_aggr = NULL; 1350*bdcd8170SKalle Valo struct rxtid *rxtid; 1351*bdcd8170SKalle Valo u8 i; 1352*bdcd8170SKalle Valo 1353*bdcd8170SKalle Valo p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL); 1354*bdcd8170SKalle Valo if (!p_aggr) { 1355*bdcd8170SKalle Valo ath6kl_err("failed to alloc memory for aggr_node\n"); 1356*bdcd8170SKalle Valo return NULL; 1357*bdcd8170SKalle Valo } 1358*bdcd8170SKalle Valo 1359*bdcd8170SKalle Valo p_aggr->aggr_sz = AGGR_SZ_DEFAULT; 1360*bdcd8170SKalle Valo p_aggr->dev = dev; 1361*bdcd8170SKalle Valo init_timer(&p_aggr->timer); 1362*bdcd8170SKalle Valo p_aggr->timer.function = aggr_timeout; 1363*bdcd8170SKalle Valo p_aggr->timer.data = (unsigned long) p_aggr; 1364*bdcd8170SKalle Valo 1365*bdcd8170SKalle Valo p_aggr->timer_scheduled = false; 1366*bdcd8170SKalle Valo skb_queue_head_init(&p_aggr->free_q); 1367*bdcd8170SKalle Valo 1368*bdcd8170SKalle Valo ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS); 1369*bdcd8170SKalle Valo 1370*bdcd8170SKalle Valo for (i = 0; i < NUM_OF_TIDS; i++) { 1371*bdcd8170SKalle Valo rxtid = &p_aggr->rx_tid[i]; 1372*bdcd8170SKalle Valo rxtid->aggr = false; 1373*bdcd8170SKalle Valo rxtid->progress = false; 1374*bdcd8170SKalle Valo rxtid->timer_mon = false; 1375*bdcd8170SKalle Valo skb_queue_head_init(&rxtid->q); 1376*bdcd8170SKalle Valo spin_lock_init(&rxtid->lock); 1377*bdcd8170SKalle Valo } 1378*bdcd8170SKalle Valo 1379*bdcd8170SKalle Valo return p_aggr; 1380*bdcd8170SKalle Valo } 1381*bdcd8170SKalle Valo 1382*bdcd8170SKalle Valo void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid) 1383*bdcd8170SKalle Valo { 1384*bdcd8170SKalle Valo struct aggr_info *p_aggr = ar->aggr_cntxt; 1385*bdcd8170SKalle Valo struct rxtid *rxtid; 1386*bdcd8170SKalle Valo 1387*bdcd8170SKalle Valo if (!p_aggr) 1388*bdcd8170SKalle Valo return; 1389*bdcd8170SKalle Valo 1390*bdcd8170SKalle Valo rxtid = &p_aggr->rx_tid[tid]; 1391*bdcd8170SKalle Valo 1392*bdcd8170SKalle Valo if (rxtid->aggr) 1393*bdcd8170SKalle Valo aggr_delete_tid_state(p_aggr, tid); 1394*bdcd8170SKalle Valo } 1395*bdcd8170SKalle Valo 1396*bdcd8170SKalle Valo void aggr_reset_state(struct aggr_info *aggr_info) 1397*bdcd8170SKalle Valo { 1398*bdcd8170SKalle Valo u8 tid; 1399*bdcd8170SKalle Valo 1400*bdcd8170SKalle Valo for (tid = 0; tid < NUM_OF_TIDS; tid++) 1401*bdcd8170SKalle Valo aggr_delete_tid_state(aggr_info, tid); 1402*bdcd8170SKalle Valo } 1403*bdcd8170SKalle Valo 1404*bdcd8170SKalle Valo /* clean up our amsdu buffer list */ 1405*bdcd8170SKalle Valo void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar) 1406*bdcd8170SKalle Valo { 1407*bdcd8170SKalle Valo struct htc_packet *packet, *tmp_pkt; 1408*bdcd8170SKalle Valo 1409*bdcd8170SKalle Valo spin_lock_bh(&ar->lock); 1410*bdcd8170SKalle Valo if (list_empty(&ar->amsdu_rx_buffer_queue)) { 1411*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 1412*bdcd8170SKalle Valo return; 1413*bdcd8170SKalle Valo } 1414*bdcd8170SKalle Valo 1415*bdcd8170SKalle Valo list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue, 1416*bdcd8170SKalle Valo list) { 1417*bdcd8170SKalle Valo list_del(&packet->list); 1418*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 1419*bdcd8170SKalle Valo dev_kfree_skb(packet->pkt_cntxt); 1420*bdcd8170SKalle Valo spin_lock_bh(&ar->lock); 1421*bdcd8170SKalle Valo } 1422*bdcd8170SKalle Valo 1423*bdcd8170SKalle Valo spin_unlock_bh(&ar->lock); 1424*bdcd8170SKalle Valo } 1425*bdcd8170SKalle Valo 1426*bdcd8170SKalle Valo void aggr_module_destroy(struct aggr_info *aggr_info) 1427*bdcd8170SKalle Valo { 1428*bdcd8170SKalle Valo struct rxtid *rxtid; 1429*bdcd8170SKalle Valo u8 i, k; 1430*bdcd8170SKalle Valo 1431*bdcd8170SKalle Valo if (!aggr_info) 1432*bdcd8170SKalle Valo return; 1433*bdcd8170SKalle Valo 1434*bdcd8170SKalle Valo if (aggr_info->timer_scheduled) { 1435*bdcd8170SKalle Valo del_timer(&aggr_info->timer); 1436*bdcd8170SKalle Valo aggr_info->timer_scheduled = false; 1437*bdcd8170SKalle Valo } 1438*bdcd8170SKalle Valo 1439*bdcd8170SKalle Valo for (i = 0; i < NUM_OF_TIDS; i++) { 1440*bdcd8170SKalle Valo rxtid = &aggr_info->rx_tid[i]; 1441*bdcd8170SKalle Valo if (rxtid->hold_q) { 1442*bdcd8170SKalle Valo for (k = 0; k < rxtid->hold_q_sz; k++) 1443*bdcd8170SKalle Valo dev_kfree_skb(rxtid->hold_q[k].skb); 1444*bdcd8170SKalle Valo kfree(rxtid->hold_q); 1445*bdcd8170SKalle Valo } 1446*bdcd8170SKalle Valo 1447*bdcd8170SKalle Valo skb_queue_purge(&rxtid->q); 1448*bdcd8170SKalle Valo } 1449*bdcd8170SKalle Valo 1450*bdcd8170SKalle Valo skb_queue_purge(&aggr_info->free_q); 1451*bdcd8170SKalle Valo kfree(aggr_info); 1452*bdcd8170SKalle Valo } 1453