1 /*
2  * Copyright (c) 2004-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "core.h"
18 #include "debug.h"
19 
20 static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
21 			       u32 *map_no)
22 {
23 	struct ath6kl *ar = ath6kl_priv(dev);
24 	struct ethhdr *eth_hdr;
25 	u32 i, ep_map = -1;
26 	u8 *datap;
27 
28 	*map_no = 0;
29 	datap = skb->data;
30 	eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
31 
32 	if (is_multicast_ether_addr(eth_hdr->h_dest))
33 		return ENDPOINT_2;
34 
35 	for (i = 0; i < ar->node_num; i++) {
36 		if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
37 			   ETH_ALEN) == 0) {
38 			*map_no = i + 1;
39 			ar->node_map[i].tx_pend++;
40 			return ar->node_map[i].ep_id;
41 		}
42 
43 		if ((ep_map == -1) && !ar->node_map[i].tx_pend)
44 			ep_map = i;
45 	}
46 
47 	if (ep_map == -1) {
48 		ep_map = ar->node_num;
49 		ar->node_num++;
50 		if (ar->node_num > MAX_NODE_NUM)
51 			return ENDPOINT_UNUSED;
52 	}
53 
54 	memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
55 
56 	for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
57 		if (!ar->tx_pending[i]) {
58 			ar->node_map[ep_map].ep_id = i;
59 			break;
60 		}
61 
62 		/*
63 		 * No free endpoint is available, start redistribution on
64 		 * the inuse endpoints.
65 		 */
66 		if (i == ENDPOINT_5) {
67 			ar->node_map[ep_map].ep_id = ar->next_ep_id;
68 			ar->next_ep_id++;
69 			if (ar->next_ep_id > ENDPOINT_5)
70 				ar->next_ep_id = ENDPOINT_2;
71 		}
72 	}
73 
74 	*map_no = ep_map + 1;
75 	ar->node_map[ep_map].tx_pend++;
76 
77 	return ar->node_map[ep_map].ep_id;
78 }
79 
80 static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
81 				bool *more_data)
82 {
83 	struct ethhdr *datap = (struct ethhdr *) skb->data;
84 	struct ath6kl_sta *conn = NULL;
85 	bool ps_queued = false, is_psq_empty = false;
86 	/* TODO: Findout vif */
87 	struct ath6kl_vif *vif = ar->vif;
88 
89 	if (is_multicast_ether_addr(datap->h_dest)) {
90 		u8 ctr = 0;
91 		bool q_mcast = false;
92 
93 		for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
94 			if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
95 				q_mcast = true;
96 				break;
97 			}
98 		}
99 
100 		if (q_mcast) {
101 			/*
102 			 * If this transmit is not because of a Dtim Expiry
103 			 * q it.
104 			 */
105 			if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
106 				bool is_mcastq_empty = false;
107 
108 				spin_lock_bh(&ar->mcastpsq_lock);
109 				is_mcastq_empty =
110 					skb_queue_empty(&ar->mcastpsq);
111 				skb_queue_tail(&ar->mcastpsq, skb);
112 				spin_unlock_bh(&ar->mcastpsq_lock);
113 
114 				/*
115 				 * If this is the first Mcast pkt getting
116 				 * queued indicate to the target to set the
117 				 * BitmapControl LSB of the TIM IE.
118 				 */
119 				if (is_mcastq_empty)
120 					ath6kl_wmi_set_pvb_cmd(ar->wmi,
121 							       MCAST_AID, 1);
122 
123 				ps_queued = true;
124 			} else {
125 				/*
126 				 * This transmit is because of Dtim expiry.
127 				 * Determine if MoreData bit has to be set.
128 				 */
129 				spin_lock_bh(&ar->mcastpsq_lock);
130 				if (!skb_queue_empty(&ar->mcastpsq))
131 					*more_data = true;
132 				spin_unlock_bh(&ar->mcastpsq_lock);
133 			}
134 		}
135 	} else {
136 		conn = ath6kl_find_sta(ar, datap->h_dest);
137 		if (!conn) {
138 			dev_kfree_skb(skb);
139 
140 			/* Inform the caller that the skb is consumed */
141 			return true;
142 		}
143 
144 		if (conn->sta_flags & STA_PS_SLEEP) {
145 			if (!(conn->sta_flags & STA_PS_POLLED)) {
146 				/* Queue the frames if the STA is sleeping */
147 				spin_lock_bh(&conn->psq_lock);
148 				is_psq_empty = skb_queue_empty(&conn->psq);
149 				skb_queue_tail(&conn->psq, skb);
150 				spin_unlock_bh(&conn->psq_lock);
151 
152 				/*
153 				 * If this is the first pkt getting queued
154 				 * for this STA, update the PVB for this
155 				 * STA.
156 				 */
157 				if (is_psq_empty)
158 					ath6kl_wmi_set_pvb_cmd(ar->wmi,
159 							       conn->aid, 1);
160 
161 				ps_queued = true;
162 			} else {
163 				/*
164 				 * This tx is because of a PsPoll.
165 				 * Determine if MoreData bit has to be set.
166 				 */
167 				spin_lock_bh(&conn->psq_lock);
168 				if (!skb_queue_empty(&conn->psq))
169 					*more_data = true;
170 				spin_unlock_bh(&conn->psq_lock);
171 			}
172 		}
173 	}
174 
175 	return ps_queued;
176 }
177 
178 /* Tx functions */
179 
180 int ath6kl_control_tx(void *devt, struct sk_buff *skb,
181 		      enum htc_endpoint_id eid)
182 {
183 	struct ath6kl *ar = devt;
184 	int status = 0;
185 	struct ath6kl_cookie *cookie = NULL;
186 
187 	spin_lock_bh(&ar->lock);
188 
189 	ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
190 		   "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
191 		   skb, skb->len, eid);
192 
193 	if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
194 		/*
195 		 * Control endpoint is full, don't allocate resources, we
196 		 * are just going to drop this packet.
197 		 */
198 		cookie = NULL;
199 		ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
200 			   skb, skb->len);
201 	} else
202 		cookie = ath6kl_alloc_cookie(ar);
203 
204 	if (cookie == NULL) {
205 		spin_unlock_bh(&ar->lock);
206 		status = -ENOMEM;
207 		goto fail_ctrl_tx;
208 	}
209 
210 	ar->tx_pending[eid]++;
211 
212 	if (eid != ar->ctrl_ep)
213 		ar->total_tx_data_pend++;
214 
215 	spin_unlock_bh(&ar->lock);
216 
217 	cookie->skb = skb;
218 	cookie->map_no = 0;
219 	set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
220 			 eid, ATH6KL_CONTROL_PKT_TAG);
221 
222 	/*
223 	 * This interface is asynchronous, if there is an error, cleanup
224 	 * will happen in the TX completion callback.
225 	 */
226 	ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
227 
228 	return 0;
229 
230 fail_ctrl_tx:
231 	dev_kfree_skb(skb);
232 	return status;
233 }
234 
235 int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
236 {
237 	struct ath6kl *ar = ath6kl_priv(dev);
238 	struct ath6kl_cookie *cookie = NULL;
239 	enum htc_endpoint_id eid = ENDPOINT_UNUSED;
240 	struct ath6kl_vif *vif = netdev_priv(dev);
241 	u32 map_no = 0;
242 	u16 htc_tag = ATH6KL_DATA_PKT_TAG;
243 	u8 ac = 99 ; /* initialize to unmapped ac */
244 	bool chk_adhoc_ps_mapping = false, more_data = false;
245 	int ret;
246 
247 	ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
248 		   "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
249 		   skb, skb->data, skb->len);
250 
251 	/* If target is not associated */
252 	if (!test_bit(CONNECTED, &vif->flags)) {
253 		dev_kfree_skb(skb);
254 		return 0;
255 	}
256 
257 	if (!test_bit(WMI_READY, &ar->flag))
258 		goto fail_tx;
259 
260 	/* AP mode Power saving processing */
261 	if (vif->nw_type == AP_NETWORK) {
262 		if (ath6kl_powersave_ap(ar, skb, &more_data))
263 			return 0;
264 	}
265 
266 	if (test_bit(WMI_ENABLED, &ar->flag)) {
267 		if (skb_headroom(skb) < dev->needed_headroom) {
268 			WARN_ON(1);
269 			goto fail_tx;
270 		}
271 
272 		if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
273 			ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
274 			goto fail_tx;
275 		}
276 
277 		if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
278 					    more_data, 0, 0, NULL)) {
279 			ath6kl_err("wmi_data_hdr_add failed\n");
280 			goto fail_tx;
281 		}
282 
283 		if ((vif->nw_type == ADHOC_NETWORK) &&
284 		     ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
285 			chk_adhoc_ps_mapping = true;
286 		else {
287 			/* get the stream mapping */
288 			ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb,
289 				    0, test_bit(WMM_ENABLED, &vif->flags), &ac);
290 			if (ret)
291 				goto fail_tx;
292 		}
293 	} else
294 		goto fail_tx;
295 
296 	spin_lock_bh(&ar->lock);
297 
298 	if (chk_adhoc_ps_mapping)
299 		eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
300 	else
301 		eid = ar->ac2ep_map[ac];
302 
303 	if (eid == 0 || eid == ENDPOINT_UNUSED) {
304 		ath6kl_err("eid %d is not mapped!\n", eid);
305 		spin_unlock_bh(&ar->lock);
306 		goto fail_tx;
307 	}
308 
309 	/* allocate resource for this packet */
310 	cookie = ath6kl_alloc_cookie(ar);
311 
312 	if (!cookie) {
313 		spin_unlock_bh(&ar->lock);
314 		goto fail_tx;
315 	}
316 
317 	/* update counts while the lock is held */
318 	ar->tx_pending[eid]++;
319 	ar->total_tx_data_pend++;
320 
321 	spin_unlock_bh(&ar->lock);
322 
323 	if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
324 	    skb_cloned(skb)) {
325 		/*
326 		 * We will touch (move the buffer data to align it. Since the
327 		 * skb buffer is cloned and not only the header is changed, we
328 		 * have to copy it to allow the changes. Since we are copying
329 		 * the data here, we may as well align it by reserving suitable
330 		 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
331 		 */
332 		struct sk_buff *nskb;
333 
334 		nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
335 		if (nskb == NULL)
336 			goto fail_tx;
337 		kfree_skb(skb);
338 		skb = nskb;
339 	}
340 
341 	cookie->skb = skb;
342 	cookie->map_no = map_no;
343 	set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
344 			 eid, htc_tag);
345 
346 	ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
347 			skb->data, skb->len);
348 
349 	/*
350 	 * HTC interface is asynchronous, if this fails, cleanup will
351 	 * happen in the ath6kl_tx_complete callback.
352 	 */
353 	ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
354 
355 	return 0;
356 
357 fail_tx:
358 	dev_kfree_skb(skb);
359 
360 	ar->net_stats.tx_dropped++;
361 	ar->net_stats.tx_aborted_errors++;
362 
363 	return 0;
364 }
365 
366 /* indicate tx activity or inactivity on a WMI stream */
367 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
368 {
369 	struct ath6kl *ar = devt;
370 	enum htc_endpoint_id eid;
371 	int i;
372 
373 	eid = ar->ac2ep_map[traffic_class];
374 
375 	if (!test_bit(WMI_ENABLED, &ar->flag))
376 		goto notify_htc;
377 
378 	spin_lock_bh(&ar->lock);
379 
380 	ar->ac_stream_active[traffic_class] = active;
381 
382 	if (active) {
383 		/*
384 		 * Keep track of the active stream with the highest
385 		 * priority.
386 		 */
387 		if (ar->ac_stream_pri_map[traffic_class] >
388 		    ar->hiac_stream_active_pri)
389 			/* set the new highest active priority */
390 			ar->hiac_stream_active_pri =
391 					ar->ac_stream_pri_map[traffic_class];
392 
393 	} else {
394 		/*
395 		 * We may have to search for the next active stream
396 		 * that is the highest priority.
397 		 */
398 		if (ar->hiac_stream_active_pri ==
399 			ar->ac_stream_pri_map[traffic_class]) {
400 			/*
401 			 * The highest priority stream just went inactive
402 			 * reset and search for the "next" highest "active"
403 			 * priority stream.
404 			 */
405 			ar->hiac_stream_active_pri = 0;
406 
407 			for (i = 0; i < WMM_NUM_AC; i++) {
408 				if (ar->ac_stream_active[i] &&
409 				    (ar->ac_stream_pri_map[i] >
410 				     ar->hiac_stream_active_pri))
411 					/*
412 					 * Set the new highest active
413 					 * priority.
414 					 */
415 					ar->hiac_stream_active_pri =
416 						ar->ac_stream_pri_map[i];
417 			}
418 		}
419 	}
420 
421 	spin_unlock_bh(&ar->lock);
422 
423 notify_htc:
424 	/* notify HTC, this may cause credit distribution changes */
425 	ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active);
426 }
427 
428 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
429 					       struct htc_packet *packet)
430 {
431 	struct ath6kl *ar = target->dev->ar;
432 	/* TODO: Findout vif properly */
433 	struct ath6kl_vif *vif = ar->vif;
434 	enum htc_endpoint_id endpoint = packet->endpoint;
435 
436 	if (endpoint == ar->ctrl_ep) {
437 		/*
438 		 * Under normal WMI if this is getting full, then something
439 		 * is running rampant the host should not be exhausting the
440 		 * WMI queue with too many commands the only exception to
441 		 * this is during testing using endpointping.
442 		 */
443 		spin_lock_bh(&ar->lock);
444 		set_bit(WMI_CTRL_EP_FULL, &ar->flag);
445 		spin_unlock_bh(&ar->lock);
446 		ath6kl_err("wmi ctrl ep is full\n");
447 		return HTC_SEND_FULL_KEEP;
448 	}
449 
450 	if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
451 		return HTC_SEND_FULL_KEEP;
452 
453 	if (vif->nw_type == ADHOC_NETWORK)
454 		/*
455 		 * In adhoc mode, we cannot differentiate traffic
456 		 * priorities so there is no need to continue, however we
457 		 * should stop the network.
458 		 */
459 		goto stop_net_queues;
460 
461 	/*
462 	 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
463 	 * the highest active stream.
464 	 */
465 	if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
466 	    ar->hiac_stream_active_pri &&
467 	    ar->cookie_count <= MAX_HI_COOKIE_NUM)
468 		/*
469 		 * Give preference to the highest priority stream by
470 		 * dropping the packets which overflowed.
471 		 */
472 		return HTC_SEND_FULL_DROP;
473 
474 stop_net_queues:
475 	spin_lock_bh(&ar->lock);
476 	set_bit(NETQ_STOPPED, &vif->flags);
477 	spin_unlock_bh(&ar->lock);
478 	netif_stop_queue(ar->net_dev);
479 
480 	return HTC_SEND_FULL_KEEP;
481 }
482 
483 /* TODO this needs to be looked at */
484 static void ath6kl_tx_clear_node_map(struct ath6kl *ar,
485 				     enum htc_endpoint_id eid, u32 map_no)
486 {
487 	/* TODO: Findout vif */
488 	struct ath6kl_vif *vif = ar->vif;
489 	u32 i;
490 
491 	if (vif->nw_type != ADHOC_NETWORK)
492 		return;
493 
494 	if (!ar->ibss_ps_enable)
495 		return;
496 
497 	if (eid == ar->ctrl_ep)
498 		return;
499 
500 	if (map_no == 0)
501 		return;
502 
503 	map_no--;
504 	ar->node_map[map_no].tx_pend--;
505 
506 	if (ar->node_map[map_no].tx_pend)
507 		return;
508 
509 	if (map_no != (ar->node_num - 1))
510 		return;
511 
512 	for (i = ar->node_num; i > 0; i--) {
513 		if (ar->node_map[i - 1].tx_pend)
514 			break;
515 
516 		memset(&ar->node_map[i - 1], 0,
517 		       sizeof(struct ath6kl_node_mapping));
518 		ar->node_num--;
519 	}
520 }
521 
522 void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
523 {
524 	struct ath6kl *ar = context;
525 	struct sk_buff_head skb_queue;
526 	struct htc_packet *packet;
527 	struct sk_buff *skb;
528 	struct ath6kl_cookie *ath6kl_cookie;
529 	u32 map_no = 0;
530 	int status;
531 	enum htc_endpoint_id eid;
532 	bool wake_event = false;
533 	bool flushing = false;
534 	/* TODO: Findout vif */
535 	struct ath6kl_vif *vif = ar->vif;
536 
537 	skb_queue_head_init(&skb_queue);
538 
539 	/* lock the driver as we update internal state */
540 	spin_lock_bh(&ar->lock);
541 
542 	/* reap completed packets */
543 	while (!list_empty(packet_queue)) {
544 
545 		packet = list_first_entry(packet_queue, struct htc_packet,
546 					  list);
547 		list_del(&packet->list);
548 
549 		ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
550 		if (!ath6kl_cookie)
551 			goto fatal;
552 
553 		status = packet->status;
554 		skb = ath6kl_cookie->skb;
555 		eid = packet->endpoint;
556 		map_no = ath6kl_cookie->map_no;
557 
558 		if (!skb || !skb->data)
559 			goto fatal;
560 
561 		packet->buf = skb->data;
562 
563 		__skb_queue_tail(&skb_queue, skb);
564 
565 		if (!status && (packet->act_len != skb->len))
566 			goto fatal;
567 
568 		ar->tx_pending[eid]--;
569 
570 		if (eid != ar->ctrl_ep)
571 			ar->total_tx_data_pend--;
572 
573 		if (eid == ar->ctrl_ep) {
574 			if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
575 				clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
576 
577 			if (ar->tx_pending[eid] == 0)
578 				wake_event = true;
579 		}
580 
581 		if (status) {
582 			if (status == -ECANCELED)
583 				/* a packet was flushed  */
584 				flushing = true;
585 
586 			ar->net_stats.tx_errors++;
587 
588 			if (status != -ENOSPC)
589 				ath6kl_err("tx error, status: 0x%x\n", status);
590 			ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
591 				   "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
592 				   __func__, skb, packet->buf, packet->act_len,
593 				   eid, "error!");
594 		} else {
595 			ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
596 				   "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
597 				   __func__, skb, packet->buf, packet->act_len,
598 				   eid, "OK");
599 
600 			flushing = false;
601 			ar->net_stats.tx_packets++;
602 			ar->net_stats.tx_bytes += skb->len;
603 		}
604 
605 		ath6kl_tx_clear_node_map(ar, eid, map_no);
606 
607 		ath6kl_free_cookie(ar, ath6kl_cookie);
608 
609 		if (test_bit(NETQ_STOPPED, &vif->flags))
610 			clear_bit(NETQ_STOPPED, &vif->flags);
611 	}
612 
613 	spin_unlock_bh(&ar->lock);
614 
615 	__skb_queue_purge(&skb_queue);
616 
617 	if (test_bit(CONNECTED, &vif->flags)) {
618 		if (!flushing)
619 			netif_wake_queue(ar->net_dev);
620 	}
621 
622 	if (wake_event)
623 		wake_up(&ar->event_wq);
624 
625 	return;
626 
627 fatal:
628 	WARN_ON(1);
629 	spin_unlock_bh(&ar->lock);
630 	return;
631 }
632 
633 void ath6kl_tx_data_cleanup(struct ath6kl *ar)
634 {
635 	int i;
636 
637 	/* flush all the data (non-control) streams */
638 	for (i = 0; i < WMM_NUM_AC; i++)
639 		ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
640 				      ATH6KL_DATA_PKT_TAG);
641 }
642 
643 /* Rx functions */
644 
645 static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
646 					      struct sk_buff *skb)
647 {
648 	if (!skb)
649 		return;
650 
651 	skb->dev = dev;
652 
653 	if (!(skb->dev->flags & IFF_UP)) {
654 		dev_kfree_skb(skb);
655 		return;
656 	}
657 
658 	skb->protocol = eth_type_trans(skb, skb->dev);
659 
660 	netif_rx_ni(skb);
661 }
662 
663 static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
664 {
665 	struct sk_buff *skb;
666 
667 	while (num) {
668 		skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
669 		if (!skb) {
670 			ath6kl_err("netbuf allocation failed\n");
671 			return;
672 		}
673 		skb_queue_tail(q, skb);
674 		num--;
675 	}
676 }
677 
678 static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
679 {
680 	struct sk_buff *skb = NULL;
681 
682 	if (skb_queue_len(&p_aggr->free_q) < (AGGR_NUM_OF_FREE_NETBUFS >> 2))
683 		ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
684 
685 	skb = skb_dequeue(&p_aggr->free_q);
686 
687 	return skb;
688 }
689 
690 void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
691 {
692 	struct ath6kl *ar = target->dev->ar;
693 	struct sk_buff *skb;
694 	int rx_buf;
695 	int n_buf_refill;
696 	struct htc_packet *packet;
697 	struct list_head queue;
698 
699 	n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
700 			  ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
701 
702 	if (n_buf_refill <= 0)
703 		return;
704 
705 	INIT_LIST_HEAD(&queue);
706 
707 	ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
708 		   "%s: providing htc with %d buffers at eid=%d\n",
709 		   __func__, n_buf_refill, endpoint);
710 
711 	for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
712 		skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
713 		if (!skb)
714 			break;
715 
716 		packet = (struct htc_packet *) skb->head;
717 		if (!IS_ALIGNED((unsigned long) skb->data, 4))
718 			skb->data = PTR_ALIGN(skb->data - 4, 4);
719 		set_htc_rxpkt_info(packet, skb, skb->data,
720 				ATH6KL_BUFFER_SIZE, endpoint);
721 		list_add_tail(&packet->list, &queue);
722 	}
723 
724 	if (!list_empty(&queue))
725 		ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
726 }
727 
728 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
729 {
730 	struct htc_packet *packet;
731 	struct sk_buff *skb;
732 
733 	while (count) {
734 		skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
735 		if (!skb)
736 			return;
737 
738 		packet = (struct htc_packet *) skb->head;
739 		if (!IS_ALIGNED((unsigned long) skb->data, 4))
740 			skb->data = PTR_ALIGN(skb->data - 4, 4);
741 		set_htc_rxpkt_info(packet, skb, skb->data,
742 				   ATH6KL_AMSDU_BUFFER_SIZE, 0);
743 		spin_lock_bh(&ar->lock);
744 		list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
745 		spin_unlock_bh(&ar->lock);
746 		count--;
747 	}
748 }
749 
750 /*
751  * Callback to allocate a receive buffer for a pending packet. We use a
752  * pre-allocated list of buffers of maximum AMSDU size (4K).
753  */
754 struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
755 					    enum htc_endpoint_id endpoint,
756 					    int len)
757 {
758 	struct ath6kl *ar = target->dev->ar;
759 	struct htc_packet *packet = NULL;
760 	struct list_head *pkt_pos;
761 	int refill_cnt = 0, depth = 0;
762 
763 	ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
764 		   __func__, endpoint, len);
765 
766 	if ((len <= ATH6KL_BUFFER_SIZE) ||
767 	    (len > ATH6KL_AMSDU_BUFFER_SIZE))
768 		return NULL;
769 
770 	spin_lock_bh(&ar->lock);
771 
772 	if (list_empty(&ar->amsdu_rx_buffer_queue)) {
773 		spin_unlock_bh(&ar->lock);
774 		refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
775 		goto refill_buf;
776 	}
777 
778 	packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
779 				  struct htc_packet, list);
780 	list_del(&packet->list);
781 	list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
782 		depth++;
783 
784 	refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
785 	spin_unlock_bh(&ar->lock);
786 
787 	/* set actual endpoint ID */
788 	packet->endpoint = endpoint;
789 
790 refill_buf:
791 	if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
792 		ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
793 
794 	return packet;
795 }
796 
797 static void aggr_slice_amsdu(struct aggr_info *p_aggr,
798 			     struct rxtid *rxtid, struct sk_buff *skb)
799 {
800 	struct sk_buff *new_skb;
801 	struct ethhdr *hdr;
802 	u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
803 	u8 *framep;
804 
805 	mac_hdr_len = sizeof(struct ethhdr);
806 	framep = skb->data + mac_hdr_len;
807 	amsdu_len = skb->len - mac_hdr_len;
808 
809 	while (amsdu_len > mac_hdr_len) {
810 		hdr = (struct ethhdr *) framep;
811 		payload_8023_len = ntohs(hdr->h_proto);
812 
813 		if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
814 		    payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
815 			ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
816 				   payload_8023_len);
817 			break;
818 		}
819 
820 		frame_8023_len = payload_8023_len + mac_hdr_len;
821 		new_skb = aggr_get_free_skb(p_aggr);
822 		if (!new_skb) {
823 			ath6kl_err("no buffer available\n");
824 			break;
825 		}
826 
827 		memcpy(new_skb->data, framep, frame_8023_len);
828 		skb_put(new_skb, frame_8023_len);
829 		if (ath6kl_wmi_dot3_2_dix(new_skb)) {
830 			ath6kl_err("dot3_2_dix error\n");
831 			dev_kfree_skb(new_skb);
832 			break;
833 		}
834 
835 		skb_queue_tail(&rxtid->q, new_skb);
836 
837 		/* Is this the last subframe within this aggregate ? */
838 		if ((amsdu_len - frame_8023_len) == 0)
839 			break;
840 
841 		/* Add the length of A-MSDU subframe padding bytes -
842 		 * Round to nearest word.
843 		 */
844 		frame_8023_len = ALIGN(frame_8023_len, 4);
845 
846 		framep += frame_8023_len;
847 		amsdu_len -= frame_8023_len;
848 	}
849 
850 	dev_kfree_skb(skb);
851 }
852 
853 static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
854 			    u16 seq_no, u8 order)
855 {
856 	struct sk_buff *skb;
857 	struct rxtid *rxtid;
858 	struct skb_hold_q *node;
859 	u16 idx, idx_end, seq_end;
860 	struct rxtid_stats *stats;
861 
862 	if (!p_aggr)
863 		return;
864 
865 	rxtid = &p_aggr->rx_tid[tid];
866 	stats = &p_aggr->stat[tid];
867 
868 	idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
869 
870 	/*
871 	 * idx_end is typically the last possible frame in the window,
872 	 * but changes to 'the' seq_no, when BAR comes. If seq_no
873 	 * is non-zero, we will go up to that and stop.
874 	 * Note: last seq no in current window will occupy the same
875 	 * index position as index that is just previous to start.
876 	 * An imp point : if win_sz is 7, for seq_no space of 4095,
877 	 * then, there would be holes when sequence wrap around occurs.
878 	 * Target should judiciously choose the win_sz, based on
879 	 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
880 	 * 2, 4, 8, 16 win_sz works fine).
881 	 * We must deque from "idx" to "idx_end", including both.
882 	 */
883 	seq_end = seq_no ? seq_no : rxtid->seq_next;
884 	idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
885 
886 	spin_lock_bh(&rxtid->lock);
887 
888 	do {
889 		node = &rxtid->hold_q[idx];
890 		if ((order == 1) && (!node->skb))
891 			break;
892 
893 		if (node->skb) {
894 			if (node->is_amsdu)
895 				aggr_slice_amsdu(p_aggr, rxtid, node->skb);
896 			else
897 				skb_queue_tail(&rxtid->q, node->skb);
898 			node->skb = NULL;
899 		} else
900 			stats->num_hole++;
901 
902 		rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
903 		idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
904 	} while (idx != idx_end);
905 
906 	spin_unlock_bh(&rxtid->lock);
907 
908 	stats->num_delivered += skb_queue_len(&rxtid->q);
909 
910 	while ((skb = skb_dequeue(&rxtid->q)))
911 		ath6kl_deliver_frames_to_nw_stack(p_aggr->dev, skb);
912 }
913 
914 static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
915 				  u16 seq_no,
916 				  bool is_amsdu, struct sk_buff *frame)
917 {
918 	struct rxtid *rxtid;
919 	struct rxtid_stats *stats;
920 	struct sk_buff *skb;
921 	struct skb_hold_q *node;
922 	u16 idx, st, cur, end;
923 	bool is_queued = false;
924 	u16 extended_end;
925 
926 	rxtid = &agg_info->rx_tid[tid];
927 	stats = &agg_info->stat[tid];
928 
929 	stats->num_into_aggr++;
930 
931 	if (!rxtid->aggr) {
932 		if (is_amsdu) {
933 			aggr_slice_amsdu(agg_info, rxtid, frame);
934 			is_queued = true;
935 			stats->num_amsdu++;
936 			while ((skb = skb_dequeue(&rxtid->q)))
937 				ath6kl_deliver_frames_to_nw_stack(agg_info->dev,
938 								  skb);
939 		}
940 		return is_queued;
941 	}
942 
943 	/* Check the incoming sequence no, if it's in the window */
944 	st = rxtid->seq_next;
945 	cur = seq_no;
946 	end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
947 
948 	if (((st < end) && (cur < st || cur > end)) ||
949 	    ((st > end) && (cur > end) && (cur < st))) {
950 		extended_end = (end + rxtid->hold_q_sz - 1) &
951 			ATH6KL_MAX_SEQ_NO;
952 
953 		if (((end < extended_end) &&
954 		     (cur < end || cur > extended_end)) ||
955 		    ((end > extended_end) && (cur > extended_end) &&
956 		     (cur < end))) {
957 			aggr_deque_frms(agg_info, tid, 0, 0);
958 			if (cur >= rxtid->hold_q_sz - 1)
959 				rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
960 			else
961 				rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
962 						  (rxtid->hold_q_sz - 2 - cur);
963 		} else {
964 			/*
965 			 * Dequeue only those frames that are outside the
966 			 * new shifted window.
967 			 */
968 			if (cur >= rxtid->hold_q_sz - 1)
969 				st = cur - (rxtid->hold_q_sz - 1);
970 			else
971 				st = ATH6KL_MAX_SEQ_NO -
972 					(rxtid->hold_q_sz - 2 - cur);
973 
974 			aggr_deque_frms(agg_info, tid, st, 0);
975 		}
976 
977 		stats->num_oow++;
978 	}
979 
980 	idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
981 
982 	node = &rxtid->hold_q[idx];
983 
984 	spin_lock_bh(&rxtid->lock);
985 
986 	/*
987 	 * Is the cur frame duplicate or something beyond our window(hold_q
988 	 * -> which is 2x, already)?
989 	 *
990 	 * 1. Duplicate is easy - drop incoming frame.
991 	 * 2. Not falling in current sliding window.
992 	 *  2a. is the frame_seq_no preceding current tid_seq_no?
993 	 *      -> drop the frame. perhaps sender did not get our ACK.
994 	 *         this is taken care of above.
995 	 *  2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
996 	 *      -> Taken care of it above, by moving window forward.
997 	 */
998 	dev_kfree_skb(node->skb);
999 	stats->num_dups++;
1000 
1001 	node->skb = frame;
1002 	is_queued = true;
1003 	node->is_amsdu = is_amsdu;
1004 	node->seq_no = seq_no;
1005 
1006 	if (node->is_amsdu)
1007 		stats->num_amsdu++;
1008 	else
1009 		stats->num_mpdu++;
1010 
1011 	spin_unlock_bh(&rxtid->lock);
1012 
1013 	aggr_deque_frms(agg_info, tid, 0, 1);
1014 
1015 	if (agg_info->timer_scheduled)
1016 		rxtid->progress = true;
1017 	else
1018 		for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
1019 			if (rxtid->hold_q[idx].skb) {
1020 				/*
1021 				 * There is a frame in the queue and no
1022 				 * timer so start a timer to ensure that
1023 				 * the frame doesn't remain stuck
1024 				 * forever.
1025 				 */
1026 				agg_info->timer_scheduled = true;
1027 				mod_timer(&agg_info->timer,
1028 					  (jiffies +
1029 					   HZ * (AGGR_RX_TIMEOUT) / 1000));
1030 				rxtid->progress = false;
1031 				rxtid->timer_mon = true;
1032 				break;
1033 			}
1034 		}
1035 
1036 	return is_queued;
1037 }
1038 
1039 void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1040 {
1041 	struct ath6kl *ar = target->dev->ar;
1042 	struct sk_buff *skb = packet->pkt_cntxt;
1043 	struct wmi_rx_meta_v2 *meta;
1044 	struct wmi_data_hdr *dhdr;
1045 	int min_hdr_len;
1046 	u8 meta_type, dot11_hdr = 0;
1047 	int status = packet->status;
1048 	enum htc_endpoint_id ept = packet->endpoint;
1049 	bool is_amsdu, prev_ps, ps_state = false;
1050 	struct ath6kl_sta *conn = NULL;
1051 	struct sk_buff *skb1 = NULL;
1052 	struct ethhdr *datap = NULL;
1053 	/* TODO: Findout vif */
1054 	struct ath6kl_vif *vif = ar->vif;
1055 	u16 seq_no, offset;
1056 	u8 tid;
1057 
1058 	ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1059 		   "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1060 		   __func__, ar, ept, skb, packet->buf,
1061 		   packet->act_len, status);
1062 
1063 	if (status || !(skb->data + HTC_HDR_LENGTH)) {
1064 		ar->net_stats.rx_errors++;
1065 		dev_kfree_skb(skb);
1066 		return;
1067 	}
1068 
1069 	/*
1070 	 * Take lock to protect buffer counts and adaptive power throughput
1071 	 * state.
1072 	 */
1073 	spin_lock_bh(&ar->lock);
1074 
1075 	ar->net_stats.rx_packets++;
1076 	ar->net_stats.rx_bytes += packet->act_len;
1077 
1078 	spin_unlock_bh(&ar->lock);
1079 
1080 	skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1081 	skb_pull(skb, HTC_HDR_LENGTH);
1082 
1083 	ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1084 			skb->data, skb->len);
1085 
1086 	skb->dev = ar->net_dev;
1087 
1088 	if (!test_bit(WMI_ENABLED, &ar->flag)) {
1089 		if (EPPING_ALIGNMENT_PAD > 0)
1090 			skb_pull(skb, EPPING_ALIGNMENT_PAD);
1091 		ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
1092 		return;
1093 	}
1094 
1095 	if (ept == ar->ctrl_ep) {
1096 		ath6kl_wmi_control_rx(ar->wmi, skb);
1097 		return;
1098 	}
1099 
1100 	min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1101 		      sizeof(struct ath6kl_llc_snap_hdr);
1102 
1103 	dhdr = (struct wmi_data_hdr *) skb->data;
1104 
1105 	/*
1106 	 * In the case of AP mode we may receive NULL data frames
1107 	 * that do not have LLC hdr. They are 16 bytes in size.
1108 	 * Allow these frames in the AP mode.
1109 	 */
1110 	if (vif->nw_type != AP_NETWORK &&
1111 	    ((packet->act_len < min_hdr_len) ||
1112 	     (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1113 		ath6kl_info("frame len is too short or too long\n");
1114 		ar->net_stats.rx_errors++;
1115 		ar->net_stats.rx_length_errors++;
1116 		dev_kfree_skb(skb);
1117 		return;
1118 	}
1119 
1120 	/* Get the Power save state of the STA */
1121 	if (vif->nw_type == AP_NETWORK) {
1122 		meta_type = wmi_data_hdr_get_meta(dhdr);
1123 
1124 		ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1125 			      WMI_DATA_HDR_PS_MASK);
1126 
1127 		offset = sizeof(struct wmi_data_hdr);
1128 
1129 		switch (meta_type) {
1130 		case 0:
1131 			break;
1132 		case WMI_META_VERSION_1:
1133 			offset += sizeof(struct wmi_rx_meta_v1);
1134 			break;
1135 		case WMI_META_VERSION_2:
1136 			offset += sizeof(struct wmi_rx_meta_v2);
1137 			break;
1138 		default:
1139 			break;
1140 		}
1141 
1142 		datap = (struct ethhdr *) (skb->data + offset);
1143 		conn = ath6kl_find_sta(ar, datap->h_source);
1144 
1145 		if (!conn) {
1146 			dev_kfree_skb(skb);
1147 			return;
1148 		}
1149 
1150 		/*
1151 		 * If there is a change in PS state of the STA,
1152 		 * take appropriate steps:
1153 		 *
1154 		 * 1. If Sleep-->Awake, flush the psq for the STA
1155 		 *    Clear the PVB for the STA.
1156 		 * 2. If Awake-->Sleep, Starting queueing frames
1157 		 *    the STA.
1158 		 */
1159 		prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1160 
1161 		if (ps_state)
1162 			conn->sta_flags |= STA_PS_SLEEP;
1163 		else
1164 			conn->sta_flags &= ~STA_PS_SLEEP;
1165 
1166 		if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1167 			if (!(conn->sta_flags & STA_PS_SLEEP)) {
1168 				struct sk_buff *skbuff = NULL;
1169 
1170 				spin_lock_bh(&conn->psq_lock);
1171 				while ((skbuff = skb_dequeue(&conn->psq))
1172 				       != NULL) {
1173 					spin_unlock_bh(&conn->psq_lock);
1174 					ath6kl_data_tx(skbuff, ar->net_dev);
1175 					spin_lock_bh(&conn->psq_lock);
1176 				}
1177 				spin_unlock_bh(&conn->psq_lock);
1178 				/* Clear the PVB for this STA */
1179 				ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
1180 			}
1181 		}
1182 
1183 		/* drop NULL data frames here */
1184 		if ((packet->act_len < min_hdr_len) ||
1185 		    (packet->act_len >
1186 		     WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1187 			dev_kfree_skb(skb);
1188 			return;
1189 		}
1190 	}
1191 
1192 	is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1193 	tid = wmi_data_hdr_get_up(dhdr);
1194 	seq_no = wmi_data_hdr_get_seqno(dhdr);
1195 	meta_type = wmi_data_hdr_get_meta(dhdr);
1196 	dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1197 	skb_pull(skb, sizeof(struct wmi_data_hdr));
1198 
1199 	switch (meta_type) {
1200 	case WMI_META_VERSION_1:
1201 		skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1202 		break;
1203 	case WMI_META_VERSION_2:
1204 		meta = (struct wmi_rx_meta_v2 *) skb->data;
1205 		if (meta->csum_flags & 0x1) {
1206 			skb->ip_summed = CHECKSUM_COMPLETE;
1207 			skb->csum = (__force __wsum) meta->csum;
1208 		}
1209 		skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1210 		break;
1211 	default:
1212 		break;
1213 	}
1214 
1215 	if (dot11_hdr)
1216 		status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1217 	else if (!is_amsdu)
1218 		status = ath6kl_wmi_dot3_2_dix(skb);
1219 
1220 	if (status) {
1221 		/*
1222 		 * Drop frames that could not be processed (lack of
1223 		 * memory, etc.)
1224 		 */
1225 		dev_kfree_skb(skb);
1226 		return;
1227 	}
1228 
1229 	if (!(ar->net_dev->flags & IFF_UP)) {
1230 		dev_kfree_skb(skb);
1231 		return;
1232 	}
1233 
1234 	if (vif->nw_type == AP_NETWORK) {
1235 		datap = (struct ethhdr *) skb->data;
1236 		if (is_multicast_ether_addr(datap->h_dest))
1237 			/*
1238 			 * Bcast/Mcast frames should be sent to the
1239 			 * OS stack as well as on the air.
1240 			 */
1241 			skb1 = skb_copy(skb, GFP_ATOMIC);
1242 		else {
1243 			/*
1244 			 * Search for a connected STA with dstMac
1245 			 * as the Mac address. If found send the
1246 			 * frame to it on the air else send the
1247 			 * frame up the stack.
1248 			 */
1249 			conn = ath6kl_find_sta(ar, datap->h_dest);
1250 
1251 			if (conn && ar->intra_bss) {
1252 				skb1 = skb;
1253 				skb = NULL;
1254 			} else if (conn && !ar->intra_bss) {
1255 				dev_kfree_skb(skb);
1256 				skb = NULL;
1257 			}
1258 		}
1259 		if (skb1)
1260 			ath6kl_data_tx(skb1, ar->net_dev);
1261 
1262 		if (skb == NULL) {
1263 			/* nothing to deliver up the stack */
1264 			return;
1265 		}
1266 	}
1267 
1268 	datap = (struct ethhdr *) skb->data;
1269 
1270 	if (is_unicast_ether_addr(datap->h_dest) &&
1271 	    aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no,
1272 				  is_amsdu, skb))
1273 		/* aggregation code will handle the skb */
1274 		return;
1275 
1276 	ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
1277 }
1278 
1279 static void aggr_timeout(unsigned long arg)
1280 {
1281 	u8 i, j;
1282 	struct aggr_info *p_aggr = (struct aggr_info *) arg;
1283 	struct rxtid *rxtid;
1284 	struct rxtid_stats *stats;
1285 
1286 	for (i = 0; i < NUM_OF_TIDS; i++) {
1287 		rxtid = &p_aggr->rx_tid[i];
1288 		stats = &p_aggr->stat[i];
1289 
1290 		if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
1291 			continue;
1292 
1293 		stats->num_timeouts++;
1294 		ath6kl_dbg(ATH6KL_DBG_AGGR,
1295 			   "aggr timeout (st %d end %d)\n",
1296 			   rxtid->seq_next,
1297 			   ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1298 			    ATH6KL_MAX_SEQ_NO));
1299 		aggr_deque_frms(p_aggr, i, 0, 0);
1300 	}
1301 
1302 	p_aggr->timer_scheduled = false;
1303 
1304 	for (i = 0; i < NUM_OF_TIDS; i++) {
1305 		rxtid = &p_aggr->rx_tid[i];
1306 
1307 		if (rxtid->aggr && rxtid->hold_q) {
1308 			for (j = 0; j < rxtid->hold_q_sz; j++) {
1309 				if (rxtid->hold_q[j].skb) {
1310 					p_aggr->timer_scheduled = true;
1311 					rxtid->timer_mon = true;
1312 					rxtid->progress = false;
1313 					break;
1314 				}
1315 			}
1316 
1317 			if (j >= rxtid->hold_q_sz)
1318 				rxtid->timer_mon = false;
1319 		}
1320 	}
1321 
1322 	if (p_aggr->timer_scheduled)
1323 		mod_timer(&p_aggr->timer,
1324 			  jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1325 }
1326 
1327 static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
1328 {
1329 	struct rxtid *rxtid;
1330 	struct rxtid_stats *stats;
1331 
1332 	if (!p_aggr || tid >= NUM_OF_TIDS)
1333 		return;
1334 
1335 	rxtid = &p_aggr->rx_tid[tid];
1336 	stats = &p_aggr->stat[tid];
1337 
1338 	if (rxtid->aggr)
1339 		aggr_deque_frms(p_aggr, tid, 0, 0);
1340 
1341 	rxtid->aggr = false;
1342 	rxtid->progress = false;
1343 	rxtid->timer_mon = false;
1344 	rxtid->win_sz = 0;
1345 	rxtid->seq_next = 0;
1346 	rxtid->hold_q_sz = 0;
1347 
1348 	kfree(rxtid->hold_q);
1349 	rxtid->hold_q = NULL;
1350 
1351 	memset(stats, 0, sizeof(struct rxtid_stats));
1352 }
1353 
1354 void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz)
1355 {
1356 	struct aggr_info *p_aggr = ar->aggr_cntxt;
1357 	struct rxtid *rxtid;
1358 	struct rxtid_stats *stats;
1359 	u16 hold_q_size;
1360 
1361 	if (!p_aggr)
1362 		return;
1363 
1364 	rxtid = &p_aggr->rx_tid[tid];
1365 	stats = &p_aggr->stat[tid];
1366 
1367 	if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1368 		ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1369 			   __func__, win_sz, tid);
1370 
1371 	if (rxtid->aggr)
1372 		aggr_delete_tid_state(p_aggr, tid);
1373 
1374 	rxtid->seq_next = seq_no;
1375 	hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1376 	rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1377 	if (!rxtid->hold_q)
1378 		return;
1379 
1380 	rxtid->win_sz = win_sz;
1381 	rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1382 	if (!skb_queue_empty(&rxtid->q))
1383 		return;
1384 
1385 	rxtid->aggr = true;
1386 }
1387 
1388 struct aggr_info *aggr_init(struct net_device *dev)
1389 {
1390 	struct aggr_info *p_aggr = NULL;
1391 	struct rxtid *rxtid;
1392 	u8 i;
1393 
1394 	p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1395 	if (!p_aggr) {
1396 		ath6kl_err("failed to alloc memory for aggr_node\n");
1397 		return NULL;
1398 	}
1399 
1400 	p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
1401 	p_aggr->dev = dev;
1402 	init_timer(&p_aggr->timer);
1403 	p_aggr->timer.function = aggr_timeout;
1404 	p_aggr->timer.data = (unsigned long) p_aggr;
1405 
1406 	p_aggr->timer_scheduled = false;
1407 	skb_queue_head_init(&p_aggr->free_q);
1408 
1409 	ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
1410 
1411 	for (i = 0; i < NUM_OF_TIDS; i++) {
1412 		rxtid = &p_aggr->rx_tid[i];
1413 		rxtid->aggr = false;
1414 		rxtid->progress = false;
1415 		rxtid->timer_mon = false;
1416 		skb_queue_head_init(&rxtid->q);
1417 		spin_lock_init(&rxtid->lock);
1418 	}
1419 
1420 	return p_aggr;
1421 }
1422 
1423 void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid)
1424 {
1425 	struct aggr_info *p_aggr = ar->aggr_cntxt;
1426 	struct rxtid *rxtid;
1427 
1428 	if (!p_aggr)
1429 		return;
1430 
1431 	rxtid = &p_aggr->rx_tid[tid];
1432 
1433 	if (rxtid->aggr)
1434 		aggr_delete_tid_state(p_aggr, tid);
1435 }
1436 
1437 void aggr_reset_state(struct aggr_info *aggr_info)
1438 {
1439 	u8 tid;
1440 
1441 	for (tid = 0; tid < NUM_OF_TIDS; tid++)
1442 		aggr_delete_tid_state(aggr_info, tid);
1443 }
1444 
1445 /* clean up our amsdu buffer list */
1446 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1447 {
1448 	struct htc_packet *packet, *tmp_pkt;
1449 
1450 	spin_lock_bh(&ar->lock);
1451 	if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1452 		spin_unlock_bh(&ar->lock);
1453 		return;
1454 	}
1455 
1456 	list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1457 				 list) {
1458 		list_del(&packet->list);
1459 		spin_unlock_bh(&ar->lock);
1460 		dev_kfree_skb(packet->pkt_cntxt);
1461 		spin_lock_bh(&ar->lock);
1462 	}
1463 
1464 	spin_unlock_bh(&ar->lock);
1465 }
1466 
1467 void aggr_module_destroy(struct aggr_info *aggr_info)
1468 {
1469 	struct rxtid *rxtid;
1470 	u8 i, k;
1471 
1472 	if (!aggr_info)
1473 		return;
1474 
1475 	if (aggr_info->timer_scheduled) {
1476 		del_timer(&aggr_info->timer);
1477 		aggr_info->timer_scheduled = false;
1478 	}
1479 
1480 	for (i = 0; i < NUM_OF_TIDS; i++) {
1481 		rxtid = &aggr_info->rx_tid[i];
1482 		if (rxtid->hold_q) {
1483 			for (k = 0; k < rxtid->hold_q_sz; k++)
1484 				dev_kfree_skb(rxtid->hold_q[k].skb);
1485 			kfree(rxtid->hold_q);
1486 		}
1487 
1488 		skb_queue_purge(&rxtid->q);
1489 	}
1490 
1491 	skb_queue_purge(&aggr_info->free_q);
1492 	kfree(aggr_info);
1493 }
1494