xref: /openbmc/linux/net/mac80211/wme.c (revision f42b3800)
1 /*
2  * Copyright 2004, Instant802 Networks, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/module.h>
12 #include <linux/if_arp.h>
13 #include <linux/types.h>
14 #include <net/ip.h>
15 #include <net/pkt_sched.h>
16 
17 #include <net/mac80211.h>
18 #include "ieee80211_i.h"
19 #include "wme.h"
20 
21 /* maximum number of hardware queues we support. */
22 #define TC_80211_MAX_QUEUES 16
23 
24 const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
25 
26 struct ieee80211_sched_data
27 {
28 	unsigned long qdisc_pool[BITS_TO_LONGS(TC_80211_MAX_QUEUES)];
29 	struct tcf_proto *filter_list;
30 	struct Qdisc *queues[TC_80211_MAX_QUEUES];
31 	struct sk_buff_head requeued[TC_80211_MAX_QUEUES];
32 };
33 
34 static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
35 
36 /* given a data frame determine the 802.1p/1d tag to use */
37 static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd)
38 {
39 	struct iphdr *ip;
40 	int dscp;
41 	int offset;
42 
43 	struct ieee80211_sched_data *q = qdisc_priv(qd);
44 	struct tcf_result res = { -1, 0 };
45 
46 	/* if there is a user set filter list, call out to that */
47 	if (q->filter_list) {
48 		tc_classify(skb, q->filter_list, &res);
49 		if (res.class != -1)
50 			return res.class;
51 	}
52 
53 	/* skb->priority values from 256->263 are magic values to
54 	 * directly indicate a specific 802.1d priority.
55 	 * This is used to allow 802.1d priority to be passed directly in
56 	 * from VLAN tags, etc. */
57 	if (skb->priority >= 256 && skb->priority <= 263)
58 		return skb->priority - 256;
59 
60 	/* check there is a valid IP header present */
61 	offset = ieee80211_get_hdrlen_from_skb(skb);
62 	if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) ||
63 	    memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr)))
64 		return 0;
65 
66 	ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr));
67 
68 	dscp = ip->tos & 0xfc;
69 	if (dscp & 0x1c)
70 		return 0;
71 	return dscp >> 5;
72 }
73 
74 
75 static inline int wme_downgrade_ac(struct sk_buff *skb)
76 {
77 	switch (skb->priority) {
78 	case 6:
79 	case 7:
80 		skb->priority = 5; /* VO -> VI */
81 		return 0;
82 	case 4:
83 	case 5:
84 		skb->priority = 3; /* VI -> BE */
85 		return 0;
86 	case 0:
87 	case 3:
88 		skb->priority = 2; /* BE -> BK */
89 		return 0;
90 	default:
91 		return -1;
92 	}
93 }
94 
95 
96 /* positive return value indicates which queue to use
97  * negative return value indicates to drop the frame */
98 static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd)
99 {
100 	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
101 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
102 	unsigned short fc = le16_to_cpu(hdr->frame_control);
103 	int qos;
104 
105 	/* see if frame is data or non data frame */
106 	if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) {
107 		/* management frames go on AC_VO queue, but are sent
108 		* without QoS control fields */
109 		return IEEE80211_TX_QUEUE_DATA0;
110 	}
111 
112 	if (0 /* injected */) {
113 		/* use AC from radiotap */
114 	}
115 
116 	/* is this a QoS frame? */
117 	qos = fc & IEEE80211_STYPE_QOS_DATA;
118 
119 	if (!qos) {
120 		skb->priority = 0; /* required for correct WPA/11i MIC */
121 		return ieee802_1d_to_ac[skb->priority];
122 	}
123 
124 	/* use the data classifier to determine what 802.1d tag the
125 	 * data frame has */
126 	skb->priority = classify_1d(skb, qd);
127 
128 	/* in case we are a client verify acm is not set for this ac */
129 	while (unlikely(local->wmm_acm & BIT(skb->priority))) {
130 		if (wme_downgrade_ac(skb)) {
131 			/* No AC with lower priority has acm=0, drop packet. */
132 			return -1;
133 		}
134 	}
135 
136 	/* look up which queue to use for frames with this 1d tag */
137 	return ieee802_1d_to_ac[skb->priority];
138 }
139 
140 
141 static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
142 {
143 	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
144 	struct ieee80211_sched_data *q = qdisc_priv(qd);
145 	struct ieee80211_tx_packet_data *pkt_data =
146 		(struct ieee80211_tx_packet_data *) skb->cb;
147 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
148 	unsigned short fc = le16_to_cpu(hdr->frame_control);
149 	struct Qdisc *qdisc;
150 	int err, queue;
151 	struct sta_info *sta;
152 	u8 tid;
153 
154 	if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) {
155 		queue = pkt_data->queue;
156 		rcu_read_lock();
157 		sta = sta_info_get(local, hdr->addr1);
158 		tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
159 		if (sta) {
160 			int ampdu_queue = sta->tid_to_tx_q[tid];
161 			if ((ampdu_queue < local->hw.queues) &&
162 			    test_bit(ampdu_queue, q->qdisc_pool)) {
163 				queue = ampdu_queue;
164 				pkt_data->flags |= IEEE80211_TXPD_AMPDU;
165 			} else {
166 				pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
167 			}
168 		}
169 		rcu_read_unlock();
170 		skb_queue_tail(&q->requeued[queue], skb);
171 		qd->q.qlen++;
172 		return 0;
173 	}
174 
175 	queue = classify80211(skb, qd);
176 
177 	/* now we know the 1d priority, fill in the QoS header if there is one
178 	 */
179 	if (WLAN_FC_IS_QOS_DATA(fc)) {
180 		u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2;
181 		u8 ack_policy = 0;
182 		tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
183 		if (local->wifi_wme_noack_test)
184 			ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
185 					QOS_CONTROL_ACK_POLICY_SHIFT;
186 		/* qos header is 2 bytes, second reserved */
187 		*p = ack_policy | tid;
188 		p++;
189 		*p = 0;
190 
191 		rcu_read_lock();
192 
193 		sta = sta_info_get(local, hdr->addr1);
194 		if (sta) {
195 			int ampdu_queue = sta->tid_to_tx_q[tid];
196 			if ((ampdu_queue < local->hw.queues) &&
197 				test_bit(ampdu_queue, q->qdisc_pool)) {
198 				queue = ampdu_queue;
199 				pkt_data->flags |= IEEE80211_TXPD_AMPDU;
200 			} else {
201 				pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
202 			}
203 		}
204 
205 		rcu_read_unlock();
206 	}
207 
208 	if (unlikely(queue >= local->hw.queues)) {
209 #if 0
210 		if (net_ratelimit()) {
211 			printk(KERN_DEBUG "%s - queue=%d (hw does not "
212 			       "support) -> %d\n",
213 			       __func__, queue, local->hw.queues - 1);
214 		}
215 #endif
216 		queue = local->hw.queues - 1;
217 	}
218 
219 	if (unlikely(queue < 0)) {
220 			kfree_skb(skb);
221 			err = NET_XMIT_DROP;
222 	} else {
223 		tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
224 		pkt_data->queue = (unsigned int) queue;
225 		qdisc = q->queues[queue];
226 		err = qdisc->enqueue(skb, qdisc);
227 		if (err == NET_XMIT_SUCCESS) {
228 			qd->q.qlen++;
229 			qd->bstats.bytes += skb->len;
230 			qd->bstats.packets++;
231 			return NET_XMIT_SUCCESS;
232 		}
233 	}
234 	qd->qstats.drops++;
235 	return err;
236 }
237 
238 
239 /* TODO: clean up the cases where master_hard_start_xmit
240  * returns non 0 - it shouldn't ever do that. Once done we
241  * can remove this function */
242 static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
243 {
244 	struct ieee80211_sched_data *q = qdisc_priv(qd);
245 	struct ieee80211_tx_packet_data *pkt_data =
246 		(struct ieee80211_tx_packet_data *) skb->cb;
247 	struct Qdisc *qdisc;
248 	int err;
249 
250 	/* we recorded which queue to use earlier! */
251 	qdisc = q->queues[pkt_data->queue];
252 
253 	if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
254 		qd->q.qlen++;
255 		return 0;
256 	}
257 	qd->qstats.drops++;
258 	return err;
259 }
260 
261 
262 static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
263 {
264 	struct ieee80211_sched_data *q = qdisc_priv(qd);
265 	struct net_device *dev = qd->dev;
266 	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
267 	struct ieee80211_hw *hw = &local->hw;
268 	struct sk_buff *skb;
269 	struct Qdisc *qdisc;
270 	int queue;
271 
272 	/* check all the h/w queues in numeric/priority order */
273 	for (queue = 0; queue < hw->queues; queue++) {
274 		/* see if there is room in this hardware queue */
275 		if ((test_bit(IEEE80211_LINK_STATE_XOFF,
276 				&local->state[queue])) ||
277 		    (test_bit(IEEE80211_LINK_STATE_PENDING,
278 				&local->state[queue])) ||
279 			 (!test_bit(queue, q->qdisc_pool)))
280 			continue;
281 
282 		/* there is space - try and get a frame */
283 		skb = skb_dequeue(&q->requeued[queue]);
284 		if (skb) {
285 			qd->q.qlen--;
286 			return skb;
287 		}
288 
289 		qdisc = q->queues[queue];
290 		skb = qdisc->dequeue(qdisc);
291 		if (skb) {
292 			qd->q.qlen--;
293 			return skb;
294 		}
295 	}
296 	/* returning a NULL here when all the h/w queues are full means we
297 	 * never need to call netif_stop_queue in the driver */
298 	return NULL;
299 }
300 
301 
302 static void wme_qdiscop_reset(struct Qdisc* qd)
303 {
304 	struct ieee80211_sched_data *q = qdisc_priv(qd);
305 	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
306 	struct ieee80211_hw *hw = &local->hw;
307 	int queue;
308 
309 	/* QUESTION: should we have some hardware flush functionality here? */
310 
311 	for (queue = 0; queue < hw->queues; queue++) {
312 		skb_queue_purge(&q->requeued[queue]);
313 		qdisc_reset(q->queues[queue]);
314 	}
315 	qd->q.qlen = 0;
316 }
317 
318 
319 static void wme_qdiscop_destroy(struct Qdisc* qd)
320 {
321 	struct ieee80211_sched_data *q = qdisc_priv(qd);
322 	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
323 	struct ieee80211_hw *hw = &local->hw;
324 	int queue;
325 
326 	tcf_destroy_chain(q->filter_list);
327 	q->filter_list = NULL;
328 
329 	for (queue=0; queue < hw->queues; queue++) {
330 		skb_queue_purge(&q->requeued[queue]);
331 		qdisc_destroy(q->queues[queue]);
332 		q->queues[queue] = &noop_qdisc;
333 	}
334 }
335 
336 
337 /* called whenever parameters are updated on existing qdisc */
338 static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
339 {
340 /*	struct ieee80211_sched_data *q = qdisc_priv(qd);
341 */
342 	/* check our options block is the right size */
343 	/* copy any options to our local structure */
344 /*	Ignore options block for now - always use static mapping
345 	struct tc_ieee80211_qopt *qopt = nla_data(opt);
346 
347 	if (opt->nla_len < nla_attr_size(sizeof(*qopt)))
348 		return -EINVAL;
349 	memcpy(q->tag2queue, qopt->tag2queue, sizeof(qopt->tag2queue));
350 */
351 	return 0;
352 }
353 
354 
355 /* called during initial creation of qdisc on device */
356 static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
357 {
358 	struct ieee80211_sched_data *q = qdisc_priv(qd);
359 	struct net_device *dev = qd->dev;
360 	struct ieee80211_local *local;
361 	int queues;
362 	int err = 0, i;
363 
364 	/* check that device is a mac80211 device */
365 	if (!dev->ieee80211_ptr ||
366 	    dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
367 		return -EINVAL;
368 
369 	/* check this device is an ieee80211 master type device */
370 	if (dev->type != ARPHRD_IEEE80211)
371 		return -EINVAL;
372 
373 	/* check that there is no qdisc currently attached to device
374 	 * this ensures that we will be the root qdisc. (I can't find a better
375 	 * way to test this explicitly) */
376 	if (dev->qdisc_sleeping != &noop_qdisc)
377 		return -EINVAL;
378 
379 	if (qd->flags & TCQ_F_INGRESS)
380 		return -EINVAL;
381 
382 	local = wdev_priv(dev->ieee80211_ptr);
383 	queues = local->hw.queues;
384 
385 	/* if options were passed in, set them */
386 	if (opt) {
387 		err = wme_qdiscop_tune(qd, opt);
388 	}
389 
390 	/* create child queues */
391 	for (i = 0; i < queues; i++) {
392 		skb_queue_head_init(&q->requeued[i]);
393 		q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
394 						 qd->handle);
395 		if (!q->queues[i]) {
396 			q->queues[i] = &noop_qdisc;
397 			printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i);
398 		}
399 	}
400 
401 	/* reserve all legacy QoS queues */
402 	for (i = 0; i < min(IEEE80211_TX_QUEUE_DATA4, queues); i++)
403 		set_bit(i, q->qdisc_pool);
404 
405 	return err;
406 }
407 
408 static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
409 {
410 /*	struct ieee80211_sched_data *q = qdisc_priv(qd);
411 	unsigned char *p = skb->tail;
412 	struct tc_ieee80211_qopt opt;
413 
414 	memcpy(&opt.tag2queue, q->tag2queue, TC_80211_MAX_TAG + 1);
415 	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
416 */	return skb->len;
417 /*
418 nla_put_failure:
419 	skb_trim(skb, p - skb->data);*/
420 	return -1;
421 }
422 
423 
424 static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
425 			     struct Qdisc *new, struct Qdisc **old)
426 {
427 	struct ieee80211_sched_data *q = qdisc_priv(qd);
428 	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
429 	struct ieee80211_hw *hw = &local->hw;
430 	unsigned long queue = arg - 1;
431 
432 	if (queue >= hw->queues)
433 		return -EINVAL;
434 
435 	if (!new)
436 		new = &noop_qdisc;
437 
438 	sch_tree_lock(qd);
439 	*old = q->queues[queue];
440 	q->queues[queue] = new;
441 	qdisc_reset(*old);
442 	sch_tree_unlock(qd);
443 
444 	return 0;
445 }
446 
447 
448 static struct Qdisc *
449 wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
450 {
451 	struct ieee80211_sched_data *q = qdisc_priv(qd);
452 	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
453 	struct ieee80211_hw *hw = &local->hw;
454 	unsigned long queue = arg - 1;
455 
456 	if (queue >= hw->queues)
457 		return NULL;
458 
459 	return q->queues[queue];
460 }
461 
462 
463 static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
464 {
465 	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
466 	struct ieee80211_hw *hw = &local->hw;
467 	unsigned long queue = TC_H_MIN(classid);
468 
469 	if (queue - 1 >= hw->queues)
470 		return 0;
471 
472 	return queue;
473 }
474 
475 
476 static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
477 				      u32 classid)
478 {
479 	return wme_classop_get(qd, classid);
480 }
481 
482 
483 static void wme_classop_put(struct Qdisc *q, unsigned long cl)
484 {
485 }
486 
487 
488 static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
489 			      struct nlattr **tca, unsigned long *arg)
490 {
491 	unsigned long cl = *arg;
492 	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
493 	struct ieee80211_hw *hw = &local->hw;
494 
495 	if (cl - 1 > hw->queues)
496 		return -ENOENT;
497 
498 	/* TODO: put code to program hardware queue parameters here,
499 	 * to allow programming from tc command line */
500 
501 	return 0;
502 }
503 
504 
505 /* we don't support deleting hardware queues
506  * when we add WMM-SA support - TSPECs may be deleted here */
507 static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
508 {
509 	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
510 	struct ieee80211_hw *hw = &local->hw;
511 
512 	if (cl - 1 > hw->queues)
513 		return -ENOENT;
514 	return 0;
515 }
516 
517 
518 static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
519 				  struct sk_buff *skb, struct tcmsg *tcm)
520 {
521 	struct ieee80211_sched_data *q = qdisc_priv(qd);
522 	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
523 	struct ieee80211_hw *hw = &local->hw;
524 
525 	if (cl - 1 > hw->queues)
526 		return -ENOENT;
527 	tcm->tcm_handle = TC_H_MIN(cl);
528 	tcm->tcm_parent = qd->handle;
529 	tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
530 	return 0;
531 }
532 
533 
534 static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
535 {
536 	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
537 	struct ieee80211_hw *hw = &local->hw;
538 	int queue;
539 
540 	if (arg->stop)
541 		return;
542 
543 	for (queue = 0; queue < hw->queues; queue++) {
544 		if (arg->count < arg->skip) {
545 			arg->count++;
546 			continue;
547 		}
548 		/* we should return classids for our internal queues here
549 		 * as well as the external ones */
550 		if (arg->fn(qd, queue+1, arg) < 0) {
551 			arg->stop = 1;
552 			break;
553 		}
554 		arg->count++;
555 	}
556 }
557 
558 
559 static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
560 						unsigned long cl)
561 {
562 	struct ieee80211_sched_data *q = qdisc_priv(qd);
563 
564 	if (cl)
565 		return NULL;
566 
567 	return &q->filter_list;
568 }
569 
570 
571 /* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
572  * - these are the operations on the classes */
573 static const struct Qdisc_class_ops class_ops =
574 {
575 	.graft = wme_classop_graft,
576 	.leaf = wme_classop_leaf,
577 
578 	.get = wme_classop_get,
579 	.put = wme_classop_put,
580 	.change = wme_classop_change,
581 	.delete = wme_classop_delete,
582 	.walk = wme_classop_walk,
583 
584 	.tcf_chain = wme_classop_find_tcf,
585 	.bind_tcf = wme_classop_bind,
586 	.unbind_tcf = wme_classop_put,
587 
588 	.dump = wme_classop_dump_class,
589 };
590 
591 
592 /* queueing discipline operations */
593 static struct Qdisc_ops wme_qdisc_ops __read_mostly =
594 {
595 	.next = NULL,
596 	.cl_ops = &class_ops,
597 	.id = "ieee80211",
598 	.priv_size = sizeof(struct ieee80211_sched_data),
599 
600 	.enqueue = wme_qdiscop_enqueue,
601 	.dequeue = wme_qdiscop_dequeue,
602 	.requeue = wme_qdiscop_requeue,
603 	.drop = NULL, /* drop not needed since we are always the root qdisc */
604 
605 	.init = wme_qdiscop_init,
606 	.reset = wme_qdiscop_reset,
607 	.destroy = wme_qdiscop_destroy,
608 	.change = wme_qdiscop_tune,
609 
610 	.dump = wme_qdiscop_dump,
611 };
612 
613 
614 void ieee80211_install_qdisc(struct net_device *dev)
615 {
616 	struct Qdisc *qdisc;
617 
618 	qdisc = qdisc_create_dflt(dev, &wme_qdisc_ops, TC_H_ROOT);
619 	if (!qdisc) {
620 		printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
621 		return;
622 	}
623 
624 	/* same handle as would be allocated by qdisc_alloc_handle() */
625 	qdisc->handle = 0x80010000;
626 
627 	qdisc_lock_tree(dev);
628 	list_add_tail(&qdisc->list, &dev->qdisc_list);
629 	dev->qdisc_sleeping = qdisc;
630 	qdisc_unlock_tree(dev);
631 }
632 
633 
634 int ieee80211_qdisc_installed(struct net_device *dev)
635 {
636 	return dev->qdisc_sleeping->ops == &wme_qdisc_ops;
637 }
638 
639 
640 int ieee80211_wme_register(void)
641 {
642 	return register_qdisc(&wme_qdisc_ops);
643 }
644 
645 
646 void ieee80211_wme_unregister(void)
647 {
648 	unregister_qdisc(&wme_qdisc_ops);
649 }
650 
651 int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
652 			struct sta_info *sta, u16 tid)
653 {
654 	int i;
655 	struct ieee80211_sched_data *q =
656 			qdisc_priv(local->mdev->qdisc_sleeping);
657 	DECLARE_MAC_BUF(mac);
658 
659 	/* prepare the filter and save it for the SW queue
660 	 * matching the recieved HW queue */
661 
662 	/* try to get a Qdisc from the pool */
663 	for (i = IEEE80211_TX_QUEUE_BEACON; i < local->hw.queues; i++)
664 		if (!test_and_set_bit(i, q->qdisc_pool)) {
665 			ieee80211_stop_queue(local_to_hw(local), i);
666 			sta->tid_to_tx_q[tid] = i;
667 
668 			/* IF there are already pending packets
669 			 * on this tid first we need to drain them
670 			 * on the previous queue
671 			 * since HT is strict in order */
672 #ifdef CONFIG_MAC80211_HT_DEBUG
673 			if (net_ratelimit())
674 				printk(KERN_DEBUG "allocated aggregation queue"
675 					" %d tid %d addr %s pool=0x%lX",
676 					i, tid, print_mac(mac, sta->addr),
677 					q->qdisc_pool[0]);
678 #endif /* CONFIG_MAC80211_HT_DEBUG */
679 			return 0;
680 		}
681 
682 	return -EAGAIN;
683 }
684 
685 /**
686  * the caller needs to hold local->mdev->queue_lock
687  */
688 void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
689 				   struct sta_info *sta, u16 tid,
690 				   u8 requeue)
691 {
692 	struct ieee80211_sched_data *q =
693 		qdisc_priv(local->mdev->qdisc_sleeping);
694 	int agg_queue = sta->tid_to_tx_q[tid];
695 
696 	/* return the qdisc to the pool */
697 	clear_bit(agg_queue, q->qdisc_pool);
698 	sta->tid_to_tx_q[tid] = local->hw.queues;
699 
700 	if (requeue)
701 		ieee80211_requeue(local, agg_queue);
702 	else
703 		q->queues[agg_queue]->ops->reset(q->queues[agg_queue]);
704 }
705 
706 void ieee80211_requeue(struct ieee80211_local *local, int queue)
707 {
708 	struct Qdisc *root_qd = local->mdev->qdisc_sleeping;
709 	struct ieee80211_sched_data *q = qdisc_priv(root_qd);
710 	struct Qdisc *qdisc = q->queues[queue];
711 	struct sk_buff *skb = NULL;
712 	u32 len = qdisc->q.qlen;
713 
714 	if (!qdisc || !qdisc->dequeue)
715 		return;
716 
717 	printk(KERN_DEBUG "requeue: qlen = %d\n", qdisc->q.qlen);
718 	for (len = qdisc->q.qlen; len > 0; len--) {
719 		skb = qdisc->dequeue(qdisc);
720 		root_qd->q.qlen--;
721 		/* packet will be classified again and */
722 		/* skb->packet_data->queue will be overridden if needed */
723 		if (skb)
724 			wme_qdiscop_enqueue(skb, root_qd);
725 	}
726 }
727