txrx.c (458ce2910aa83d8a2cafb489d727f7da839e73c6) txrx.c (81db48dc295e16aced8ece912098fda078b1ba8c)
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "debug.h"
19
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "core.h"
19#include "debug.h"
20
21/*
22 * tid - tid_mux0..tid_mux3
23 * aid - tid_mux4..tid_mux7
24 */
25#define ATH6KL_TID_MASK 0xf
26#define ATH6KL_AID_SHIFT 4
27
28static inline u8 ath6kl_get_tid(u8 tid_mux)
29{
30 return tid_mux & ATH6KL_TID_MASK;
31}
32
33static inline u8 ath6kl_get_aid(u8 tid_mux)
34{
35 return tid_mux >> ATH6KL_AID_SHIFT;
36}
37
20static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
21 u32 *map_no)
22{
23 struct ath6kl *ar = ath6kl_priv(dev);
24 struct ethhdr *eth_hdr;
25 u32 i, ep_map = -1;
26 u8 *datap;
27

--- 44 unchanged lines hidden (view full) ---

72 }
73
74 *map_no = ep_map + 1;
75 ar->node_map[ep_map].tx_pend++;
76
77 return ar->node_map[ep_map].ep_id;
78}
79
38static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
39 u32 *map_no)
40{
41 struct ath6kl *ar = ath6kl_priv(dev);
42 struct ethhdr *eth_hdr;
43 u32 i, ep_map = -1;
44 u8 *datap;
45

--- 44 unchanged lines hidden (view full) ---

90 }
91
92 *map_no = ep_map + 1;
93 ar->node_map[ep_map].tx_pend++;
94
95 return ar->node_map[ep_map].ep_id;
96}
97
98static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
99 struct ath6kl_vif *vif,
100 struct sk_buff *skb,
101 u32 *flags)
102{
103 struct ath6kl *ar = vif->ar;
104 bool is_apsdq_empty = false;
105 struct ethhdr *datap = (struct ethhdr *) skb->data;
106 u8 up = 0, traffic_class, *ip_hdr;
107 u16 ether_type;
108 struct ath6kl_llc_snap_hdr *llc_hdr;
109
110 if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
111 /*
112 * This tx is because of a uAPSD trigger, determine
113 * more and EOSP bit. Set EOSP if queue is empty
114 * or sufficient frames are delivered for this trigger.
115 */
116 spin_lock_bh(&conn->psq_lock);
117 if (!skb_queue_empty(&conn->apsdq))
118 *flags |= WMI_DATA_HDR_FLAGS_MORE;
119 else if (conn->sta_flags & STA_PS_APSD_EOSP)
120 *flags |= WMI_DATA_HDR_FLAGS_EOSP;
121 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
122 spin_unlock_bh(&conn->psq_lock);
123 return false;
124 } else if (!conn->apsd_info)
125 return false;
126
127 if (test_bit(WMM_ENABLED, &vif->flags)) {
128 ether_type = be16_to_cpu(datap->h_proto);
129 if (is_ethertype(ether_type)) {
130 /* packet is in DIX format */
131 ip_hdr = (u8 *)(datap + 1);
132 } else {
133 /* packet is in 802.3 format */
134 llc_hdr = (struct ath6kl_llc_snap_hdr *)
135 (datap + 1);
136 ether_type = be16_to_cpu(llc_hdr->eth_type);
137 ip_hdr = (u8 *)(llc_hdr + 1);
138 }
139
140 if (ether_type == IP_ETHERTYPE)
141 up = ath6kl_wmi_determine_user_priority(
142 ip_hdr, 0);
143 }
144
145 traffic_class = ath6kl_wmi_get_traffic_class(up);
146
147 if ((conn->apsd_info & (1 << traffic_class)) == 0)
148 return false;
149
150 /* Queue the frames if the STA is sleeping */
151 spin_lock_bh(&conn->psq_lock);
152 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
153 skb_queue_tail(&conn->apsdq, skb);
154 spin_unlock_bh(&conn->psq_lock);
155
156 /*
157 * If this is the first pkt getting queued
158 * for this STA, update the PVB for this STA
159 */
160 if (is_apsdq_empty) {
161 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
162 vif->fw_vif_idx,
163 conn->aid, 1, 0);
164 }
165 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
166
167 return true;
168}
169
170static bool ath6kl_process_psq(struct ath6kl_sta *conn,
171 struct ath6kl_vif *vif,
172 struct sk_buff *skb,
173 u32 *flags)
174{
175 bool is_psq_empty = false;
176 struct ath6kl *ar = vif->ar;
177
178 if (conn->sta_flags & STA_PS_POLLED) {
179 spin_lock_bh(&conn->psq_lock);
180 if (!skb_queue_empty(&conn->psq))
181 *flags |= WMI_DATA_HDR_FLAGS_MORE;
182 spin_unlock_bh(&conn->psq_lock);
183 return false;
184 }
185
186 /* Queue the frames if the STA is sleeping */
187 spin_lock_bh(&conn->psq_lock);
188 is_psq_empty = skb_queue_empty(&conn->psq);
189 skb_queue_tail(&conn->psq, skb);
190 spin_unlock_bh(&conn->psq_lock);
191
192 /*
193 * If this is the first pkt getting queued
194 * for this STA, update the PVB for this
195 * STA.
196 */
197 if (is_psq_empty)
198 ath6kl_wmi_set_pvb_cmd(ar->wmi,
199 vif->fw_vif_idx,
200 conn->aid, 1);
201 return true;
202}
203
80static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
204static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
81 bool *more_data)
205 u32 *flags)
82{
83 struct ethhdr *datap = (struct ethhdr *) skb->data;
84 struct ath6kl_sta *conn = NULL;
206{
207 struct ethhdr *datap = (struct ethhdr *) skb->data;
208 struct ath6kl_sta *conn = NULL;
85 bool ps_queued = false, is_psq_empty = false;
209 bool ps_queued = false;
86 struct ath6kl *ar = vif->ar;
87
88 if (is_multicast_ether_addr(datap->h_dest)) {
89 u8 ctr = 0;
90 bool q_mcast = false;
91
92 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
93 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {

--- 29 unchanged lines hidden (view full) ---

123 ps_queued = true;
124 } else {
125 /*
126 * This transmit is because of Dtim expiry.
127 * Determine if MoreData bit has to be set.
128 */
129 spin_lock_bh(&ar->mcastpsq_lock);
130 if (!skb_queue_empty(&ar->mcastpsq))
210 struct ath6kl *ar = vif->ar;
211
212 if (is_multicast_ether_addr(datap->h_dest)) {
213 u8 ctr = 0;
214 bool q_mcast = false;
215
216 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
217 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {

--- 29 unchanged lines hidden (view full) ---

247 ps_queued = true;
248 } else {
249 /*
250 * This transmit is because of Dtim expiry.
251 * Determine if MoreData bit has to be set.
252 */
253 spin_lock_bh(&ar->mcastpsq_lock);
254 if (!skb_queue_empty(&ar->mcastpsq))
131 *more_data = true;
255 *flags |= WMI_DATA_HDR_FLAGS_MORE;
132 spin_unlock_bh(&ar->mcastpsq_lock);
133 }
134 }
135 } else {
136 conn = ath6kl_find_sta(vif, datap->h_dest);
137 if (!conn) {
138 dev_kfree_skb(skb);
139
140 /* Inform the caller that the skb is consumed */
141 return true;
142 }
143
144 if (conn->sta_flags & STA_PS_SLEEP) {
256 spin_unlock_bh(&ar->mcastpsq_lock);
257 }
258 }
259 } else {
260 conn = ath6kl_find_sta(vif, datap->h_dest);
261 if (!conn) {
262 dev_kfree_skb(skb);
263
264 /* Inform the caller that the skb is consumed */
265 return true;
266 }
267
268 if (conn->sta_flags & STA_PS_SLEEP) {
145 if (!(conn->sta_flags & STA_PS_POLLED)) {
146 /* Queue the frames if the STA is sleeping */
147 spin_lock_bh(&conn->psq_lock);
148 is_psq_empty = skb_queue_empty(&conn->psq);
149 skb_queue_tail(&conn->psq, skb);
150 spin_unlock_bh(&conn->psq_lock);
151
152 /*
153 * If this is the first pkt getting queued
154 * for this STA, update the PVB for this
155 * STA.
156 */
157 if (is_psq_empty)
158 ath6kl_wmi_set_pvb_cmd(ar->wmi,
159 vif->fw_vif_idx,
160 conn->aid, 1);
161
162 ps_queued = true;
163 } else {
164 /*
165 * This tx is because of a PsPoll.
166 * Determine if MoreData bit has to be set.
167 */
168 spin_lock_bh(&conn->psq_lock);
169 if (!skb_queue_empty(&conn->psq))
170 *more_data = true;
171 spin_unlock_bh(&conn->psq_lock);
172 }
269 ps_queued = ath6kl_process_uapsdq(conn,
270 vif, skb, flags);
271 if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
272 ps_queued = ath6kl_process_psq(conn,
273 vif, skb, flags);
173 }
174 }
274 }
275 }
175
176 return ps_queued;
177}
178
179/* Tx functions */
180
181int ath6kl_control_tx(void *devt, struct sk_buff *skb,
182 enum htc_endpoint_id eid)
183{

--- 53 unchanged lines hidden (view full) ---

237{
238 struct ath6kl *ar = ath6kl_priv(dev);
239 struct ath6kl_cookie *cookie = NULL;
240 enum htc_endpoint_id eid = ENDPOINT_UNUSED;
241 struct ath6kl_vif *vif = netdev_priv(dev);
242 u32 map_no = 0;
243 u16 htc_tag = ATH6KL_DATA_PKT_TAG;
244 u8 ac = 99 ; /* initialize to unmapped ac */
276 return ps_queued;
277}
278
279/* Tx functions */
280
281int ath6kl_control_tx(void *devt, struct sk_buff *skb,
282 enum htc_endpoint_id eid)
283{

--- 53 unchanged lines hidden (view full) ---

337{
338 struct ath6kl *ar = ath6kl_priv(dev);
339 struct ath6kl_cookie *cookie = NULL;
340 enum htc_endpoint_id eid = ENDPOINT_UNUSED;
341 struct ath6kl_vif *vif = netdev_priv(dev);
342 u32 map_no = 0;
343 u16 htc_tag = ATH6KL_DATA_PKT_TAG;
344 u8 ac = 99 ; /* initialize to unmapped ac */
245 bool chk_adhoc_ps_mapping = false, more_data = false;
345 bool chk_adhoc_ps_mapping = false;
246 int ret;
346 int ret;
347 struct wmi_tx_meta_v2 meta_v2;
348 void *meta;
349 u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
350 u8 meta_ver = 0;
351 u32 flags = 0;
247
248 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
249 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
250 skb, skb->data, skb->len);
251
252 /* If target is not associated */
253 if (!test_bit(CONNECTED, &vif->flags)) {
254 dev_kfree_skb(skb);
255 return 0;
256 }
257
258 if (!test_bit(WMI_READY, &ar->flag))
259 goto fail_tx;
260
261 /* AP mode Power saving processing */
262 if (vif->nw_type == AP_NETWORK) {
352
353 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
354 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
355 skb, skb->data, skb->len);
356
357 /* If target is not associated */
358 if (!test_bit(CONNECTED, &vif->flags)) {
359 dev_kfree_skb(skb);
360 return 0;
361 }
362
363 if (!test_bit(WMI_READY, &ar->flag))
364 goto fail_tx;
365
366 /* AP mode Power saving processing */
367 if (vif->nw_type == AP_NETWORK) {
263 if (ath6kl_powersave_ap(vif, skb, &more_data))
368 if (ath6kl_powersave_ap(vif, skb, &flags))
264 return 0;
265 }
266
267 if (test_bit(WMI_ENABLED, &ar->flag)) {
369 return 0;
370 }
371
372 if (test_bit(WMI_ENABLED, &ar->flag)) {
373 if ((dev->features & NETIF_F_IP_CSUM) &&
374 (csum == CHECKSUM_PARTIAL)) {
375 csum_start = skb->csum_start -
376 (skb_network_header(skb) - skb->head) +
377 sizeof(struct ath6kl_llc_snap_hdr);
378 csum_dest = skb->csum_offset + csum_start;
379 }
380
268 if (skb_headroom(skb) < dev->needed_headroom) {
269 struct sk_buff *tmp_skb = skb;
270
271 skb = skb_realloc_headroom(skb, dev->needed_headroom);
272 kfree_skb(tmp_skb);
273 if (skb == NULL) {
274 vif->net_stats.tx_dropped++;
275 return 0;
276 }
277 }
278
279 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
280 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
281 goto fail_tx;
282 }
283
381 if (skb_headroom(skb) < dev->needed_headroom) {
382 struct sk_buff *tmp_skb = skb;
383
384 skb = skb_realloc_headroom(skb, dev->needed_headroom);
385 kfree_skb(tmp_skb);
386 if (skb == NULL) {
387 vif->net_stats.tx_dropped++;
388 return 0;
389 }
390 }
391
392 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
393 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
394 goto fail_tx;
395 }
396
284 if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
285 more_data, 0, 0, NULL,
286 vif->fw_vif_idx)) {
287 ath6kl_err("wmi_data_hdr_add failed\n");
397 if ((dev->features & NETIF_F_IP_CSUM) &&
398 (csum == CHECKSUM_PARTIAL)) {
399 meta_v2.csum_start = csum_start;
400 meta_v2.csum_dest = csum_dest;
401
402 /* instruct target to calculate checksum */
403 meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
404 meta_ver = WMI_META_VERSION_2;
405 meta = &meta_v2;
406 } else {
407 meta_ver = 0;
408 meta = NULL;
409 }
410
411 ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
412 DATA_MSGTYPE, flags, 0,
413 meta_ver,
414 meta, vif->fw_vif_idx);
415
416 if (ret) {
417 ath6kl_warn("failed to add wmi data header:%d\n"
418 , ret);
288 goto fail_tx;
289 }
290
291 if ((vif->nw_type == ADHOC_NETWORK) &&
292 ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
293 chk_adhoc_ps_mapping = true;
294 else {
295 /* get the stream mapping */

--- 148 unchanged lines hidden (view full) ---

444
445 if (endpoint == ar->ctrl_ep) {
446 /*
447 * Under normal WMI if this is getting full, then something
448 * is running rampant the host should not be exhausting the
449 * WMI queue with too many commands the only exception to
450 * this is during testing using endpointping.
451 */
419 goto fail_tx;
420 }
421
422 if ((vif->nw_type == ADHOC_NETWORK) &&
423 ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
424 chk_adhoc_ps_mapping = true;
425 else {
426 /* get the stream mapping */

--- 148 unchanged lines hidden (view full) ---

575
576 if (endpoint == ar->ctrl_ep) {
577 /*
578 * Under normal WMI if this is getting full, then something
579 * is running rampant the host should not be exhausting the
580 * WMI queue with too many commands the only exception to
581 * this is during testing using endpointping.
582 */
452 spin_lock_bh(&ar->lock);
453 set_bit(WMI_CTRL_EP_FULL, &ar->flag);
583 set_bit(WMI_CTRL_EP_FULL, &ar->flag);
454 spin_unlock_bh(&ar->lock);
455 ath6kl_err("wmi ctrl ep is full\n");
456 return action;
457 }
458
459 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
460 return action;
461
462 /*
463 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
464 * the highest active stream.
465 */
466 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
467 ar->hiac_stream_active_pri &&
584 ath6kl_err("wmi ctrl ep is full\n");
585 return action;
586 }
587
588 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
589 return action;
590
591 /*
592 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
593 * the highest active stream.
594 */
595 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
596 ar->hiac_stream_active_pri &&
468 ar->cookie_count <= MAX_HI_COOKIE_NUM)
597 ar->cookie_count <=
598 target->endpoint[endpoint].tx_drop_packet_threshold)
469 /*
470 * Give preference to the highest priority stream by
471 * dropping the packets which overflowed.
472 */
473 action = HTC_SEND_FULL_DROP;
474
475 /* FIXME: Locking */
476 spin_lock_bh(&ar->list_lock);
477 list_for_each_entry(vif, &ar->vif_list, list) {
478 if (vif->nw_type == ADHOC_NETWORK ||
479 action != HTC_SEND_FULL_DROP) {
480 spin_unlock_bh(&ar->list_lock);
481
599 /*
600 * Give preference to the highest priority stream by
601 * dropping the packets which overflowed.
602 */
603 action = HTC_SEND_FULL_DROP;
604
605 /* FIXME: Locking */
606 spin_lock_bh(&ar->list_lock);
607 list_for_each_entry(vif, &ar->vif_list, list) {
608 if (vif->nw_type == ADHOC_NETWORK ||
609 action != HTC_SEND_FULL_DROP) {
610 spin_unlock_bh(&ar->list_lock);
611
482 spin_lock_bh(&vif->if_lock);
483 set_bit(NETQ_STOPPED, &vif->flags);
612 set_bit(NETQ_STOPPED, &vif->flags);
484 spin_unlock_bh(&vif->if_lock);
485 netif_stop_queue(vif->ndev);
486
487 return action;
488 }
489 }
490 spin_unlock_bh(&ar->list_lock);
491
492 return action;

--- 212 unchanged lines hidden (view full) ---

705 num--;
706 }
707}
708
709static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
710{
711 struct sk_buff *skb = NULL;
712
613 netif_stop_queue(vif->ndev);
614
615 return action;
616 }
617 }
618 spin_unlock_bh(&ar->list_lock);
619
620 return action;

--- 212 unchanged lines hidden (view full) ---

833 num--;
834 }
835}
836
837static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
838{
839 struct sk_buff *skb = NULL;
840
713 if (skb_queue_len(&p_aggr->free_q) < (AGGR_NUM_OF_FREE_NETBUFS >> 2))
714 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
841 if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
842 (AGGR_NUM_OF_FREE_NETBUFS >> 2))
843 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
844 AGGR_NUM_OF_FREE_NETBUFS);
715
845
716 skb = skb_dequeue(&p_aggr->free_q);
846 skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
717
718 return skb;
719}
720
721void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
722{
723 struct ath6kl *ar = target->dev->ar;
724 struct sk_buff *skb;

--- 151 unchanged lines hidden (view full) ---

876
877 framep += frame_8023_len;
878 amsdu_len -= frame_8023_len;
879 }
880
881 dev_kfree_skb(skb);
882}
883
847
848 return skb;
849}
850
851void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
852{
853 struct ath6kl *ar = target->dev->ar;
854 struct sk_buff *skb;

--- 151 unchanged lines hidden (view full) ---

1006
1007 framep += frame_8023_len;
1008 amsdu_len -= frame_8023_len;
1009 }
1010
1011 dev_kfree_skb(skb);
1012}
1013
884static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
1014static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
885 u16 seq_no, u8 order)
886{
887 struct sk_buff *skb;
888 struct rxtid *rxtid;
889 struct skb_hold_q *node;
890 u16 idx, idx_end, seq_end;
891 struct rxtid_stats *stats;
892
1015 u16 seq_no, u8 order)
1016{
1017 struct sk_buff *skb;
1018 struct rxtid *rxtid;
1019 struct skb_hold_q *node;
1020 u16 idx, idx_end, seq_end;
1021 struct rxtid_stats *stats;
1022
893 if (!p_aggr)
894 return;
1023 rxtid = &agg_conn->rx_tid[tid];
1024 stats = &agg_conn->stat[tid];
895
1025
896 rxtid = &p_aggr->rx_tid[tid];
897 stats = &p_aggr->stat[tid];
898
899 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
900
901 /*
902 * idx_end is typically the last possible frame in the window,
903 * but changes to 'the' seq_no, when BAR comes. If seq_no
904 * is non-zero, we will go up to that and stop.
905 * Note: last seq no in current window will occupy the same
906 * index position as index that is just previous to start.

--- 11 unchanged lines hidden (view full) ---

918
919 do {
920 node = &rxtid->hold_q[idx];
921 if ((order == 1) && (!node->skb))
922 break;
923
924 if (node->skb) {
925 if (node->is_amsdu)
1026 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1027
1028 /*
1029 * idx_end is typically the last possible frame in the window,
1030 * but changes to 'the' seq_no, when BAR comes. If seq_no
1031 * is non-zero, we will go up to that and stop.
1032 * Note: last seq no in current window will occupy the same
1033 * index position as index that is just previous to start.

--- 11 unchanged lines hidden (view full) ---

1045
1046 do {
1047 node = &rxtid->hold_q[idx];
1048 if ((order == 1) && (!node->skb))
1049 break;
1050
1051 if (node->skb) {
1052 if (node->is_amsdu)
926 aggr_slice_amsdu(p_aggr, rxtid, node->skb);
1053 aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
1054 node->skb);
927 else
928 skb_queue_tail(&rxtid->q, node->skb);
929 node->skb = NULL;
930 } else
931 stats->num_hole++;
932
933 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
934 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
935 } while (idx != idx_end);
936
937 spin_unlock_bh(&rxtid->lock);
938
939 stats->num_delivered += skb_queue_len(&rxtid->q);
940
941 while ((skb = skb_dequeue(&rxtid->q)))
1055 else
1056 skb_queue_tail(&rxtid->q, node->skb);
1057 node->skb = NULL;
1058 } else
1059 stats->num_hole++;
1060
1061 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
1062 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1063 } while (idx != idx_end);
1064
1065 spin_unlock_bh(&rxtid->lock);
1066
1067 stats->num_delivered += skb_queue_len(&rxtid->q);
1068
1069 while ((skb = skb_dequeue(&rxtid->q)))
942 ath6kl_deliver_frames_to_nw_stack(p_aggr->dev, skb);
1070 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
943}
944
1071}
1072
945static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
1073static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
946 u16 seq_no,
947 bool is_amsdu, struct sk_buff *frame)
948{
949 struct rxtid *rxtid;
950 struct rxtid_stats *stats;
951 struct sk_buff *skb;
952 struct skb_hold_q *node;
953 u16 idx, st, cur, end;
954 bool is_queued = false;
955 u16 extended_end;
956
1074 u16 seq_no,
1075 bool is_amsdu, struct sk_buff *frame)
1076{
1077 struct rxtid *rxtid;
1078 struct rxtid_stats *stats;
1079 struct sk_buff *skb;
1080 struct skb_hold_q *node;
1081 u16 idx, st, cur, end;
1082 bool is_queued = false;
1083 u16 extended_end;
1084
957 rxtid = &agg_info->rx_tid[tid];
958 stats = &agg_info->stat[tid];
1085 rxtid = &agg_conn->rx_tid[tid];
1086 stats = &agg_conn->stat[tid];
959
960 stats->num_into_aggr++;
961
962 if (!rxtid->aggr) {
963 if (is_amsdu) {
1087
1088 stats->num_into_aggr++;
1089
1090 if (!rxtid->aggr) {
1091 if (is_amsdu) {
964 aggr_slice_amsdu(agg_info, rxtid, frame);
1092 aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
965 is_queued = true;
966 stats->num_amsdu++;
967 while ((skb = skb_dequeue(&rxtid->q)))
1093 is_queued = true;
1094 stats->num_amsdu++;
1095 while ((skb = skb_dequeue(&rxtid->q)))
968 ath6kl_deliver_frames_to_nw_stack(agg_info->dev,
1096 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
969 skb);
970 }
971 return is_queued;
972 }
973
974 /* Check the incoming sequence no, if it's in the window */
975 st = rxtid->seq_next;
976 cur = seq_no;
977 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
978
979 if (((st < end) && (cur < st || cur > end)) ||
980 ((st > end) && (cur > end) && (cur < st))) {
981 extended_end = (end + rxtid->hold_q_sz - 1) &
982 ATH6KL_MAX_SEQ_NO;
983
984 if (((end < extended_end) &&
985 (cur < end || cur > extended_end)) ||
986 ((end > extended_end) && (cur > extended_end) &&
987 (cur < end))) {
1097 skb);
1098 }
1099 return is_queued;
1100 }
1101
1102 /* Check the incoming sequence no, if it's in the window */
1103 st = rxtid->seq_next;
1104 cur = seq_no;
1105 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
1106
1107 if (((st < end) && (cur < st || cur > end)) ||
1108 ((st > end) && (cur > end) && (cur < st))) {
1109 extended_end = (end + rxtid->hold_q_sz - 1) &
1110 ATH6KL_MAX_SEQ_NO;
1111
1112 if (((end < extended_end) &&
1113 (cur < end || cur > extended_end)) ||
1114 ((end > extended_end) && (cur > extended_end) &&
1115 (cur < end))) {
988 aggr_deque_frms(agg_info, tid, 0, 0);
1116 aggr_deque_frms(agg_conn, tid, 0, 0);
989 if (cur >= rxtid->hold_q_sz - 1)
990 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
991 else
992 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
993 (rxtid->hold_q_sz - 2 - cur);
994 } else {
995 /*
996 * Dequeue only those frames that are outside the
997 * new shifted window.
998 */
999 if (cur >= rxtid->hold_q_sz - 1)
1000 st = cur - (rxtid->hold_q_sz - 1);
1001 else
1002 st = ATH6KL_MAX_SEQ_NO -
1003 (rxtid->hold_q_sz - 2 - cur);
1004
1117 if (cur >= rxtid->hold_q_sz - 1)
1118 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
1119 else
1120 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
1121 (rxtid->hold_q_sz - 2 - cur);
1122 } else {
1123 /*
1124 * Dequeue only those frames that are outside the
1125 * new shifted window.
1126 */
1127 if (cur >= rxtid->hold_q_sz - 1)
1128 st = cur - (rxtid->hold_q_sz - 1);
1129 else
1130 st = ATH6KL_MAX_SEQ_NO -
1131 (rxtid->hold_q_sz - 2 - cur);
1132
1005 aggr_deque_frms(agg_info, tid, st, 0);
1133 aggr_deque_frms(agg_conn, tid, st, 0);
1006 }
1007
1008 stats->num_oow++;
1009 }
1010
1011 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
1012
1013 node = &rxtid->hold_q[idx];

--- 22 unchanged lines hidden (view full) ---

1036
1037 if (node->is_amsdu)
1038 stats->num_amsdu++;
1039 else
1040 stats->num_mpdu++;
1041
1042 spin_unlock_bh(&rxtid->lock);
1043
1134 }
1135
1136 stats->num_oow++;
1137 }
1138
1139 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
1140
1141 node = &rxtid->hold_q[idx];

--- 22 unchanged lines hidden (view full) ---

1164
1165 if (node->is_amsdu)
1166 stats->num_amsdu++;
1167 else
1168 stats->num_mpdu++;
1169
1170 spin_unlock_bh(&rxtid->lock);
1171
1044 aggr_deque_frms(agg_info, tid, 0, 1);
1172 aggr_deque_frms(agg_conn, tid, 0, 1);
1045
1173
1046 if (agg_info->timer_scheduled)
1174 if (agg_conn->timer_scheduled)
1047 rxtid->progress = true;
1048 else
1049 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
1050 if (rxtid->hold_q[idx].skb) {
1051 /*
1052 * There is a frame in the queue and no
1053 * timer so start a timer to ensure that
1054 * the frame doesn't remain stuck
1055 * forever.
1056 */
1175 rxtid->progress = true;
1176 else
1177 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
1178 if (rxtid->hold_q[idx].skb) {
1179 /*
1180 * There is a frame in the queue and no
1181 * timer so start a timer to ensure that
1182 * the frame doesn't remain stuck
1183 * forever.
1184 */
1057 agg_info->timer_scheduled = true;
1058 mod_timer(&agg_info->timer,
1185 agg_conn->timer_scheduled = true;
1186 mod_timer(&agg_conn->timer,
1059 (jiffies +
1060 HZ * (AGGR_RX_TIMEOUT) / 1000));
1061 rxtid->progress = false;
1062 rxtid->timer_mon = true;
1063 break;
1064 }
1065 }
1066
1067 return is_queued;
1068}
1069
1187 (jiffies +
1188 HZ * (AGGR_RX_TIMEOUT) / 1000));
1189 rxtid->progress = false;
1190 rxtid->timer_mon = true;
1191 break;
1192 }
1193 }
1194
1195 return is_queued;
1196}
1197
1198static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
1199 struct ath6kl_sta *conn)
1200{
1201 struct ath6kl *ar = vif->ar;
1202 bool is_apsdq_empty, is_apsdq_empty_at_start;
1203 u32 num_frames_to_deliver, flags;
1204 struct sk_buff *skb = NULL;
1205
1206 /*
1207 * If the APSD q for this STA is not empty, dequeue and
1208 * send a pkt from the head of the q. Also update the
1209 * More data bit in the WMI_DATA_HDR if there are
1210 * more pkts for this STA in the APSD q.
1211 * If there are no more pkts for this STA,
1212 * update the APSD bitmap for this STA.
1213 */
1214
1215 num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
1216 ATH6KL_APSD_FRAME_MASK;
1217 /*
1218 * Number of frames to send in a service period is
1219 * indicated by the station
1220 * in the QOS_INFO of the association request
1221 * If it is zero, send all frames
1222 */
1223 if (!num_frames_to_deliver)
1224 num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
1225
1226 spin_lock_bh(&conn->psq_lock);
1227 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1228 spin_unlock_bh(&conn->psq_lock);
1229 is_apsdq_empty_at_start = is_apsdq_empty;
1230
1231 while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
1232
1233 spin_lock_bh(&conn->psq_lock);
1234 skb = skb_dequeue(&conn->apsdq);
1235 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1236 spin_unlock_bh(&conn->psq_lock);
1237
1238 /*
1239 * Set the STA flag to Trigger delivery,
1240 * so that the frame will go out
1241 */
1242 conn->sta_flags |= STA_PS_APSD_TRIGGER;
1243 num_frames_to_deliver--;
1244
1245 /* Last frame in the service period, set EOSP or queue empty */
1246 if ((is_apsdq_empty) || (!num_frames_to_deliver))
1247 conn->sta_flags |= STA_PS_APSD_EOSP;
1248
1249 ath6kl_data_tx(skb, vif->ndev);
1250 conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
1251 conn->sta_flags &= ~(STA_PS_APSD_EOSP);
1252 }
1253
1254 if (is_apsdq_empty) {
1255 if (is_apsdq_empty_at_start)
1256 flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
1257 else
1258 flags = 0;
1259
1260 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
1261 vif->fw_vif_idx,
1262 conn->aid, 0, flags);
1263 }
1264
1265 return;
1266}
1267
1070void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1071{
1072 struct ath6kl *ar = target->dev->ar;
1073 struct sk_buff *skb = packet->pkt_cntxt;
1074 struct wmi_rx_meta_v2 *meta;
1075 struct wmi_data_hdr *dhdr;
1076 int min_hdr_len;
1077 u8 meta_type, dot11_hdr = 0;
1078 int status = packet->status;
1079 enum htc_endpoint_id ept = packet->endpoint;
1080 bool is_amsdu, prev_ps, ps_state = false;
1268void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1269{
1270 struct ath6kl *ar = target->dev->ar;
1271 struct sk_buff *skb = packet->pkt_cntxt;
1272 struct wmi_rx_meta_v2 *meta;
1273 struct wmi_data_hdr *dhdr;
1274 int min_hdr_len;
1275 u8 meta_type, dot11_hdr = 0;
1276 int status = packet->status;
1277 enum htc_endpoint_id ept = packet->endpoint;
1278 bool is_amsdu, prev_ps, ps_state = false;
1279 bool trig_state = false;
1081 struct ath6kl_sta *conn = NULL;
1082 struct sk_buff *skb1 = NULL;
1083 struct ethhdr *datap = NULL;
1084 struct ath6kl_vif *vif;
1280 struct ath6kl_sta *conn = NULL;
1281 struct sk_buff *skb1 = NULL;
1282 struct ethhdr *datap = NULL;
1283 struct ath6kl_vif *vif;
1284 struct aggr_info_conn *aggr_conn;
1085 u16 seq_no, offset;
1086 u8 tid, if_idx;
1087
1088 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1089 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1090 __func__, ar, ept, skb, packet->buf,
1091 packet->act_len, status);
1092
1093 if (status || !(skb->data + HTC_HDR_LENGTH)) {
1094 dev_kfree_skb(skb);
1095 return;
1096 }
1097
1098 skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1099 skb_pull(skb, HTC_HDR_LENGTH);
1100
1285 u16 seq_no, offset;
1286 u8 tid, if_idx;
1287
1288 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1289 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1290 __func__, ar, ept, skb, packet->buf,
1291 packet->act_len, status);
1292
1293 if (status || !(skb->data + HTC_HDR_LENGTH)) {
1294 dev_kfree_skb(skb);
1295 return;
1296 }
1297
1298 skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1299 skb_pull(skb, HTC_HDR_LENGTH);
1300
1301 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1302 skb->data, skb->len);
1303
1101 if (ept == ar->ctrl_ep) {
1304 if (ept == ar->ctrl_ep) {
1305 if (test_bit(WMI_ENABLED, &ar->flag)) {
1306 ath6kl_check_wow_status(ar);
1307 ath6kl_wmi_control_rx(ar->wmi, skb);
1308 return;
1309 }
1102 if_idx =
1103 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
1104 } else {
1105 if_idx =
1106 wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
1107 }
1108
1109 vif = ath6kl_get_vif_by_index(ar, if_idx);

--- 8 unchanged lines hidden (view full) ---

1118 */
1119 spin_lock_bh(&vif->if_lock);
1120
1121 vif->net_stats.rx_packets++;
1122 vif->net_stats.rx_bytes += packet->act_len;
1123
1124 spin_unlock_bh(&vif->if_lock);
1125
1310 if_idx =
1311 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
1312 } else {
1313 if_idx =
1314 wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
1315 }
1316
1317 vif = ath6kl_get_vif_by_index(ar, if_idx);

--- 8 unchanged lines hidden (view full) ---

1326 */
1327 spin_lock_bh(&vif->if_lock);
1328
1329 vif->net_stats.rx_packets++;
1330 vif->net_stats.rx_bytes += packet->act_len;
1331
1332 spin_unlock_bh(&vif->if_lock);
1333
1126
1127 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1128 skb->data, skb->len);
1129
1130 skb->dev = vif->ndev;
1131
1132 if (!test_bit(WMI_ENABLED, &ar->flag)) {
1133 if (EPPING_ALIGNMENT_PAD > 0)
1134 skb_pull(skb, EPPING_ALIGNMENT_PAD);
1135 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1136 return;
1137 }
1138
1139 ath6kl_check_wow_status(ar);
1140
1334 skb->dev = vif->ndev;
1335
1336 if (!test_bit(WMI_ENABLED, &ar->flag)) {
1337 if (EPPING_ALIGNMENT_PAD > 0)
1338 skb_pull(skb, EPPING_ALIGNMENT_PAD);
1339 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1340 return;
1341 }
1342
1343 ath6kl_check_wow_status(ar);
1344
1141 if (ept == ar->ctrl_ep) {
1142 ath6kl_wmi_control_rx(ar->wmi, skb);
1143 return;
1144 }
1145
1146 min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1147 sizeof(struct ath6kl_llc_snap_hdr);
1148
1149 dhdr = (struct wmi_data_hdr *) skb->data;
1150
1151 /*
1152 * In the case of AP mode we may receive NULL data frames
1153 * that do not have LLC hdr. They are 16 bytes in size.

--- 12 unchanged lines hidden (view full) ---

1166 /* Get the Power save state of the STA */
1167 if (vif->nw_type == AP_NETWORK) {
1168 meta_type = wmi_data_hdr_get_meta(dhdr);
1169
1170 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1171 WMI_DATA_HDR_PS_MASK);
1172
1173 offset = sizeof(struct wmi_data_hdr);
1345 min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1346 sizeof(struct ath6kl_llc_snap_hdr);
1347
1348 dhdr = (struct wmi_data_hdr *) skb->data;
1349
1350 /*
1351 * In the case of AP mode we may receive NULL data frames
1352 * that do not have LLC hdr. They are 16 bytes in size.

--- 12 unchanged lines hidden (view full) ---

1365 /* Get the Power save state of the STA */
1366 if (vif->nw_type == AP_NETWORK) {
1367 meta_type = wmi_data_hdr_get_meta(dhdr);
1368
1369 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1370 WMI_DATA_HDR_PS_MASK);
1371
1372 offset = sizeof(struct wmi_data_hdr);
1373 trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
1174
1175 switch (meta_type) {
1176 case 0:
1177 break;
1178 case WMI_META_VERSION_1:
1179 offset += sizeof(struct wmi_rx_meta_v1);
1180 break;
1181 case WMI_META_VERSION_2:

--- 22 unchanged lines hidden (view full) ---

1204 */
1205 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1206
1207 if (ps_state)
1208 conn->sta_flags |= STA_PS_SLEEP;
1209 else
1210 conn->sta_flags &= ~STA_PS_SLEEP;
1211
1374
1375 switch (meta_type) {
1376 case 0:
1377 break;
1378 case WMI_META_VERSION_1:
1379 offset += sizeof(struct wmi_rx_meta_v1);
1380 break;
1381 case WMI_META_VERSION_2:

--- 22 unchanged lines hidden (view full) ---

1404 */
1405 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1406
1407 if (ps_state)
1408 conn->sta_flags |= STA_PS_SLEEP;
1409 else
1410 conn->sta_flags &= ~STA_PS_SLEEP;
1411
1412 /* Accept trigger only when the station is in sleep */
1413 if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
1414 ath6kl_uapsd_trigger_frame_rx(vif, conn);
1415
1212 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1213 if (!(conn->sta_flags & STA_PS_SLEEP)) {
1214 struct sk_buff *skbuff = NULL;
1416 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1417 if (!(conn->sta_flags & STA_PS_SLEEP)) {
1418 struct sk_buff *skbuff = NULL;
1419 bool is_apsdq_empty;
1420 struct ath6kl_mgmt_buff *mgmt;
1421 u8 idx;
1215
1216 spin_lock_bh(&conn->psq_lock);
1422
1423 spin_lock_bh(&conn->psq_lock);
1217 while ((skbuff = skb_dequeue(&conn->psq))
1218 != NULL) {
1424 while (conn->mgmt_psq_len > 0) {
1425 mgmt = list_first_entry(
1426 &conn->mgmt_psq,
1427 struct ath6kl_mgmt_buff,
1428 list);
1429 list_del(&mgmt->list);
1430 conn->mgmt_psq_len--;
1219 spin_unlock_bh(&conn->psq_lock);
1431 spin_unlock_bh(&conn->psq_lock);
1432 idx = vif->fw_vif_idx;
1433
1434 ath6kl_wmi_send_mgmt_cmd(ar->wmi,
1435 idx,
1436 mgmt->id,
1437 mgmt->freq,
1438 mgmt->wait,
1439 mgmt->buf,
1440 mgmt->len,
1441 mgmt->no_cck);
1442
1443 kfree(mgmt);
1444 spin_lock_bh(&conn->psq_lock);
1445 }
1446 conn->mgmt_psq_len = 0;
1447 while ((skbuff = skb_dequeue(&conn->psq))) {
1448 spin_unlock_bh(&conn->psq_lock);
1220 ath6kl_data_tx(skbuff, vif->ndev);
1221 spin_lock_bh(&conn->psq_lock);
1222 }
1449 ath6kl_data_tx(skbuff, vif->ndev);
1450 spin_lock_bh(&conn->psq_lock);
1451 }
1452
1453 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1454 while ((skbuff = skb_dequeue(&conn->apsdq))) {
1455 spin_unlock_bh(&conn->psq_lock);
1456 ath6kl_data_tx(skbuff, vif->ndev);
1457 spin_lock_bh(&conn->psq_lock);
1458 }
1223 spin_unlock_bh(&conn->psq_lock);
1459 spin_unlock_bh(&conn->psq_lock);
1460
1461 if (!is_apsdq_empty)
1462 ath6kl_wmi_set_apsd_bfrd_traf(
1463 ar->wmi,
1464 vif->fw_vif_idx,
1465 conn->aid, 0, 0);
1466
1224 /* Clear the PVB for this STA */
1225 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1226 conn->aid, 0);
1227 }
1228 }
1229
1230 /* drop NULL data frames here */
1231 if ((packet->act_len < min_hdr_len) ||

--- 77 unchanged lines hidden (view full) ---

1309 if (skb == NULL) {
1310 /* nothing to deliver up the stack */
1311 return;
1312 }
1313 }
1314
1315 datap = (struct ethhdr *) skb->data;
1316
1467 /* Clear the PVB for this STA */
1468 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1469 conn->aid, 0);
1470 }
1471 }
1472
1473 /* drop NULL data frames here */
1474 if ((packet->act_len < min_hdr_len) ||

--- 77 unchanged lines hidden (view full) ---

1552 if (skb == NULL) {
1553 /* nothing to deliver up the stack */
1554 return;
1555 }
1556 }
1557
1558 datap = (struct ethhdr *) skb->data;
1559
1317 if (is_unicast_ether_addr(datap->h_dest) &&
1318 aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no,
1319 is_amsdu, skb))
1320 /* aggregation code will handle the skb */
1321 return;
1560 if (is_unicast_ether_addr(datap->h_dest)) {
1561 if (vif->nw_type == AP_NETWORK) {
1562 conn = ath6kl_find_sta(vif, datap->h_source);
1563 if (!conn)
1564 return;
1565 aggr_conn = conn->aggr_conn;
1566 } else
1567 aggr_conn = vif->aggr_cntxt->aggr_conn;
1322
1568
1569 if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
1570 is_amsdu, skb)) {
1571 /* aggregation code will handle the skb */
1572 return;
1573 }
1574 }
1575
1323 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1324}
1325
1326static void aggr_timeout(unsigned long arg)
1327{
1328 u8 i, j;
1576 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1577}
1578
1579static void aggr_timeout(unsigned long arg)
1580{
1581 u8 i, j;
1329 struct aggr_info *p_aggr = (struct aggr_info *) arg;
1582 struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg;
1330 struct rxtid *rxtid;
1331 struct rxtid_stats *stats;
1332
1333 for (i = 0; i < NUM_OF_TIDS; i++) {
1583 struct rxtid *rxtid;
1584 struct rxtid_stats *stats;
1585
1586 for (i = 0; i < NUM_OF_TIDS; i++) {
1334 rxtid = &p_aggr->rx_tid[i];
1335 stats = &p_aggr->stat[i];
1587 rxtid = &aggr_conn->rx_tid[i];
1588 stats = &aggr_conn->stat[i];
1336
1337 if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
1338 continue;
1339
1340 stats->num_timeouts++;
1341 ath6kl_dbg(ATH6KL_DBG_AGGR,
1342 "aggr timeout (st %d end %d)\n",
1343 rxtid->seq_next,
1344 ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1345 ATH6KL_MAX_SEQ_NO));
1589
1590 if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
1591 continue;
1592
1593 stats->num_timeouts++;
1594 ath6kl_dbg(ATH6KL_DBG_AGGR,
1595 "aggr timeout (st %d end %d)\n",
1596 rxtid->seq_next,
1597 ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1598 ATH6KL_MAX_SEQ_NO));
1346 aggr_deque_frms(p_aggr, i, 0, 0);
1599 aggr_deque_frms(aggr_conn, i, 0, 0);
1347 }
1348
1600 }
1601
1349 p_aggr->timer_scheduled = false;
1602 aggr_conn->timer_scheduled = false;
1350
1351 for (i = 0; i < NUM_OF_TIDS; i++) {
1603
1604 for (i = 0; i < NUM_OF_TIDS; i++) {
1352 rxtid = &p_aggr->rx_tid[i];
1605 rxtid = &aggr_conn->rx_tid[i];
1353
1354 if (rxtid->aggr && rxtid->hold_q) {
1355 for (j = 0; j < rxtid->hold_q_sz; j++) {
1356 if (rxtid->hold_q[j].skb) {
1606
1607 if (rxtid->aggr && rxtid->hold_q) {
1608 for (j = 0; j < rxtid->hold_q_sz; j++) {
1609 if (rxtid->hold_q[j].skb) {
1357 p_aggr->timer_scheduled = true;
1610 aggr_conn->timer_scheduled = true;
1358 rxtid->timer_mon = true;
1359 rxtid->progress = false;
1360 break;
1361 }
1362 }
1363
1364 if (j >= rxtid->hold_q_sz)
1365 rxtid->timer_mon = false;
1366 }
1367 }
1368
1611 rxtid->timer_mon = true;
1612 rxtid->progress = false;
1613 break;
1614 }
1615 }
1616
1617 if (j >= rxtid->hold_q_sz)
1618 rxtid->timer_mon = false;
1619 }
1620 }
1621
1369 if (p_aggr->timer_scheduled)
1370 mod_timer(&p_aggr->timer,
1622 if (aggr_conn->timer_scheduled)
1623 mod_timer(&aggr_conn->timer,
1371 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1372}
1373
1624 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1625}
1626
1374static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
1627static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
1375{
1376 struct rxtid *rxtid;
1377 struct rxtid_stats *stats;
1378
1628{
1629 struct rxtid *rxtid;
1630 struct rxtid_stats *stats;
1631
1379 if (!p_aggr || tid >= NUM_OF_TIDS)
1632 if (!aggr_conn || tid >= NUM_OF_TIDS)
1380 return;
1381
1633 return;
1634
1382 rxtid = &p_aggr->rx_tid[tid];
1383 stats = &p_aggr->stat[tid];
1635 rxtid = &aggr_conn->rx_tid[tid];
1636 stats = &aggr_conn->stat[tid];
1384
1385 if (rxtid->aggr)
1637
1638 if (rxtid->aggr)
1386 aggr_deque_frms(p_aggr, tid, 0, 0);
1639 aggr_deque_frms(aggr_conn, tid, 0, 0);
1387
1388 rxtid->aggr = false;
1389 rxtid->progress = false;
1390 rxtid->timer_mon = false;
1391 rxtid->win_sz = 0;
1392 rxtid->seq_next = 0;
1393 rxtid->hold_q_sz = 0;
1394
1395 kfree(rxtid->hold_q);
1396 rxtid->hold_q = NULL;
1397
1398 memset(stats, 0, sizeof(struct rxtid_stats));
1399}
1400
1640
1641 rxtid->aggr = false;
1642 rxtid->progress = false;
1643 rxtid->timer_mon = false;
1644 rxtid->win_sz = 0;
1645 rxtid->seq_next = 0;
1646 rxtid->hold_q_sz = 0;
1647
1648 kfree(rxtid->hold_q);
1649 rxtid->hold_q = NULL;
1650
1651 memset(stats, 0, sizeof(struct rxtid_stats));
1652}
1653
1401void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
1654void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
1402 u8 win_sz)
1403{
1655 u8 win_sz)
1656{
1404 struct aggr_info *p_aggr = vif->aggr_cntxt;
1657 struct ath6kl_sta *sta;
1658 struct aggr_info_conn *aggr_conn = NULL;
1405 struct rxtid *rxtid;
1406 struct rxtid_stats *stats;
1407 u16 hold_q_size;
1659 struct rxtid *rxtid;
1660 struct rxtid_stats *stats;
1661 u16 hold_q_size;
1662 u8 tid, aid;
1408
1663
1409 if (!p_aggr)
1664 if (vif->nw_type == AP_NETWORK) {
1665 aid = ath6kl_get_aid(tid_mux);
1666 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1667 if (sta)
1668 aggr_conn = sta->aggr_conn;
1669 } else
1670 aggr_conn = vif->aggr_cntxt->aggr_conn;
1671
1672 if (!aggr_conn)
1410 return;
1411
1673 return;
1674
1412 rxtid = &p_aggr->rx_tid[tid];
1413 stats = &p_aggr->stat[tid];
1675 tid = ath6kl_get_tid(tid_mux);
1676 if (tid >= NUM_OF_TIDS)
1677 return;
1414
1678
1679 rxtid = &aggr_conn->rx_tid[tid];
1680 stats = &aggr_conn->stat[tid];
1681
1415 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1416 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1417 __func__, win_sz, tid);
1418
1419 if (rxtid->aggr)
1682 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1683 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1684 __func__, win_sz, tid);
1685
1686 if (rxtid->aggr)
1420 aggr_delete_tid_state(p_aggr, tid);
1687 aggr_delete_tid_state(aggr_conn, tid);
1421
1422 rxtid->seq_next = seq_no;
1423 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1424 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1425 if (!rxtid->hold_q)
1426 return;
1427
1428 rxtid->win_sz = win_sz;
1429 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1430 if (!skb_queue_empty(&rxtid->q))
1431 return;
1432
1433 rxtid->aggr = true;
1434}
1435
1688
1689 rxtid->seq_next = seq_no;
1690 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1691 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1692 if (!rxtid->hold_q)
1693 return;
1694
1695 rxtid->win_sz = win_sz;
1696 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1697 if (!skb_queue_empty(&rxtid->q))
1698 return;
1699
1700 rxtid->aggr = true;
1701}
1702
1436struct aggr_info *aggr_init(struct net_device *dev)
1703void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
1704 struct aggr_info_conn *aggr_conn)
1437{
1705{
1438 struct aggr_info *p_aggr = NULL;
1439 struct rxtid *rxtid;
1440 u8 i;
1441
1706 struct rxtid *rxtid;
1707 u8 i;
1708
1442 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1443 if (!p_aggr) {
1444 ath6kl_err("failed to alloc memory for aggr_node\n");
1445 return NULL;
1446 }
1709 aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
1710 aggr_conn->dev = vif->ndev;
1711 init_timer(&aggr_conn->timer);
1712 aggr_conn->timer.function = aggr_timeout;
1713 aggr_conn->timer.data = (unsigned long) aggr_conn;
1714 aggr_conn->aggr_info = aggr_info;
1447
1715
1448 p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
1449 p_aggr->dev = dev;
1450 init_timer(&p_aggr->timer);
1451 p_aggr->timer.function = aggr_timeout;
1452 p_aggr->timer.data = (unsigned long) p_aggr;
1716 aggr_conn->timer_scheduled = false;
1453
1717
1454 p_aggr->timer_scheduled = false;
1455 skb_queue_head_init(&p_aggr->free_q);
1456
1457 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
1458
1459 for (i = 0; i < NUM_OF_TIDS; i++) {
1718 for (i = 0; i < NUM_OF_TIDS; i++) {
1460 rxtid = &p_aggr->rx_tid[i];
1719 rxtid = &aggr_conn->rx_tid[i];
1461 rxtid->aggr = false;
1462 rxtid->progress = false;
1463 rxtid->timer_mon = false;
1464 skb_queue_head_init(&rxtid->q);
1465 spin_lock_init(&rxtid->lock);
1466 }
1467
1720 rxtid->aggr = false;
1721 rxtid->progress = false;
1722 rxtid->timer_mon = false;
1723 skb_queue_head_init(&rxtid->q);
1724 spin_lock_init(&rxtid->lock);
1725 }
1726
1727}
1728
1729struct aggr_info *aggr_init(struct ath6kl_vif *vif)
1730{
1731 struct aggr_info *p_aggr = NULL;
1732
1733 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1734 if (!p_aggr) {
1735 ath6kl_err("failed to alloc memory for aggr_node\n");
1736 return NULL;
1737 }
1738
1739 p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
1740 if (!p_aggr->aggr_conn) {
1741 ath6kl_err("failed to alloc memory for connection specific aggr info\n");
1742 kfree(p_aggr);
1743 return NULL;
1744 }
1745
1746 aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
1747
1748 skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
1749 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
1750
1468 return p_aggr;
1469}
1470
1751 return p_aggr;
1752}
1753
1471void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid)
1754void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
1472{
1755{
1473 struct aggr_info *p_aggr = vif->aggr_cntxt;
1756 struct ath6kl_sta *sta;
1474 struct rxtid *rxtid;
1757 struct rxtid *rxtid;
1758 struct aggr_info_conn *aggr_conn = NULL;
1759 u8 tid, aid;
1475
1760
1476 if (!p_aggr)
1761 if (vif->nw_type == AP_NETWORK) {
1762 aid = ath6kl_get_aid(tid_mux);
1763 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1764 if (sta)
1765 aggr_conn = sta->aggr_conn;
1766 } else
1767 aggr_conn = vif->aggr_cntxt->aggr_conn;
1768
1769 if (!aggr_conn)
1477 return;
1478
1770 return;
1771
1479 rxtid = &p_aggr->rx_tid[tid];
1772 tid = ath6kl_get_tid(tid_mux);
1773 if (tid >= NUM_OF_TIDS)
1774 return;
1480
1775
1776 rxtid = &aggr_conn->rx_tid[tid];
1777
1481 if (rxtid->aggr)
1778 if (rxtid->aggr)
1482 aggr_delete_tid_state(p_aggr, tid);
1779 aggr_delete_tid_state(aggr_conn, tid);
1483}
1484
1780}
1781
1485void aggr_reset_state(struct aggr_info *aggr_info)
1782void aggr_reset_state(struct aggr_info_conn *aggr_conn)
1486{
1487 u8 tid;
1488
1783{
1784 u8 tid;
1785
1786 if (!aggr_conn)
1787 return;
1788
1789 if (aggr_conn->timer_scheduled) {
1790 del_timer(&aggr_conn->timer);
1791 aggr_conn->timer_scheduled = false;
1792 }
1793
1489 for (tid = 0; tid < NUM_OF_TIDS; tid++)
1794 for (tid = 0; tid < NUM_OF_TIDS; tid++)
1490 aggr_delete_tid_state(aggr_info, tid);
1795 aggr_delete_tid_state(aggr_conn, tid);
1491}
1492
1493/* clean up our amsdu buffer list */
1494void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1495{
1496 struct htc_packet *packet, *tmp_pkt;
1497
1498 spin_lock_bh(&ar->lock);

--- 10 unchanged lines hidden (view full) ---

1509 spin_lock_bh(&ar->lock);
1510 }
1511
1512 spin_unlock_bh(&ar->lock);
1513}
1514
1515void aggr_module_destroy(struct aggr_info *aggr_info)
1516{
1796}
1797
1798/* clean up our amsdu buffer list */
1799void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1800{
1801 struct htc_packet *packet, *tmp_pkt;
1802
1803 spin_lock_bh(&ar->lock);

--- 10 unchanged lines hidden (view full) ---

1814 spin_lock_bh(&ar->lock);
1815 }
1816
1817 spin_unlock_bh(&ar->lock);
1818}
1819
1820void aggr_module_destroy(struct aggr_info *aggr_info)
1821{
1517 struct rxtid *rxtid;
1518 u8 i, k;
1519
1520 if (!aggr_info)
1521 return;
1522
1822 if (!aggr_info)
1823 return;
1824
1523 if (aggr_info->timer_scheduled) {
1524 del_timer(&aggr_info->timer);
1525 aggr_info->timer_scheduled = false;
1526 }
1527
1528 for (i = 0; i < NUM_OF_TIDS; i++) {
1529 rxtid = &aggr_info->rx_tid[i];
1530 if (rxtid->hold_q) {
1531 for (k = 0; k < rxtid->hold_q_sz; k++)
1532 dev_kfree_skb(rxtid->hold_q[k].skb);
1533 kfree(rxtid->hold_q);
1534 }
1535
1536 skb_queue_purge(&rxtid->q);
1537 }
1538
1539 skb_queue_purge(&aggr_info->free_q);
1825 aggr_reset_state(aggr_info->aggr_conn);
1826 skb_queue_purge(&aggr_info->rx_amsdu_freeq);
1827 kfree(aggr_info->aggr_conn);
1540 kfree(aggr_info);
1541}
1828 kfree(aggr_info);
1829}