Lines Matching refs:queue

27 static inline void __cw1200_queue_lock(struct cw1200_queue *queue)  in __cw1200_queue_lock()  argument
29 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock()
30 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock()
32 queue->queue_id); in __cw1200_queue_lock()
33 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock()
37 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument
39 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock()
40 BUG_ON(!queue->tx_locked_cnt); in __cw1200_queue_unlock()
41 if (--queue->tx_locked_cnt == 0) { in __cw1200_queue_unlock()
43 queue->queue_id); in __cw1200_queue_unlock()
44 ieee80211_wake_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_unlock()
89 static void __cw1200_queue_gc(struct cw1200_queue *queue, in __cw1200_queue_gc() argument
93 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_gc()
97 list_for_each_entry_safe(iter, tmp, &queue->queue, head) { in __cw1200_queue_gc()
98 if (time_is_after_jiffies(iter->queue_timestamp + queue->ttl)) { in __cw1200_queue_gc()
102 --queue->num_queued; in __cw1200_queue_gc()
103 --queue->link_map_cache[iter->txpriv.link_id]; in __cw1200_queue_gc()
112 list_move_tail(&iter->head, &queue->free_pool); in __cw1200_queue_gc()
118 if (queue->overfull) { in __cw1200_queue_gc()
119 if (queue->num_queued <= (queue->capacity >> 1)) { in __cw1200_queue_gc()
120 queue->overfull = false; in __cw1200_queue_gc()
122 __cw1200_queue_unlock(queue); in __cw1200_queue_gc()
124 unsigned long tmo = item->queue_timestamp + queue->ttl; in __cw1200_queue_gc()
125 mod_timer(&queue->gc, tmo); in __cw1200_queue_gc()
135 struct cw1200_queue *queue = in cw1200_queue_gc() local
136 from_timer(queue, t, gc); in cw1200_queue_gc()
138 spin_lock_bh(&queue->lock); in cw1200_queue_gc()
139 __cw1200_queue_gc(queue, &list, true); in cw1200_queue_gc()
140 spin_unlock_bh(&queue->lock); in cw1200_queue_gc()
141 cw1200_queue_post_gc(queue->stats, &list); in cw1200_queue_gc()
164 int cw1200_queue_init(struct cw1200_queue *queue, in cw1200_queue_init() argument
172 memset(queue, 0, sizeof(*queue)); in cw1200_queue_init()
173 queue->stats = stats; in cw1200_queue_init()
174 queue->capacity = capacity; in cw1200_queue_init()
175 queue->queue_id = queue_id; in cw1200_queue_init()
176 queue->ttl = ttl; in cw1200_queue_init()
177 INIT_LIST_HEAD(&queue->queue); in cw1200_queue_init()
178 INIT_LIST_HEAD(&queue->pending); in cw1200_queue_init()
179 INIT_LIST_HEAD(&queue->free_pool); in cw1200_queue_init()
180 spin_lock_init(&queue->lock); in cw1200_queue_init()
181 timer_setup(&queue->gc, cw1200_queue_gc, 0); in cw1200_queue_init()
183 queue->pool = kcalloc(capacity, sizeof(struct cw1200_queue_item), in cw1200_queue_init()
185 if (!queue->pool) in cw1200_queue_init()
188 queue->link_map_cache = kcalloc(stats->map_capacity, sizeof(int), in cw1200_queue_init()
190 if (!queue->link_map_cache) { in cw1200_queue_init()
191 kfree(queue->pool); in cw1200_queue_init()
192 queue->pool = NULL; in cw1200_queue_init()
197 list_add_tail(&queue->pool[i].head, &queue->free_pool); in cw1200_queue_init()
202 int cw1200_queue_clear(struct cw1200_queue *queue) in cw1200_queue_clear() argument
206 struct cw1200_queue_stats *stats = queue->stats; in cw1200_queue_clear()
209 spin_lock_bh(&queue->lock); in cw1200_queue_clear()
210 queue->generation++; in cw1200_queue_clear()
211 list_splice_tail_init(&queue->queue, &queue->pending); in cw1200_queue_clear()
212 list_for_each_entry_safe(item, tmp, &queue->pending, head) { in cw1200_queue_clear()
216 list_move_tail(&item->head, &queue->free_pool); in cw1200_queue_clear()
218 queue->num_queued = 0; in cw1200_queue_clear()
219 queue->num_pending = 0; in cw1200_queue_clear()
223 stats->num_queued -= queue->link_map_cache[i]; in cw1200_queue_clear()
224 stats->link_map_cache[i] -= queue->link_map_cache[i]; in cw1200_queue_clear()
225 queue->link_map_cache[i] = 0; in cw1200_queue_clear()
228 if (queue->overfull) { in cw1200_queue_clear()
229 queue->overfull = false; in cw1200_queue_clear()
230 __cw1200_queue_unlock(queue); in cw1200_queue_clear()
232 spin_unlock_bh(&queue->lock); in cw1200_queue_clear()
244 void cw1200_queue_deinit(struct cw1200_queue *queue) in cw1200_queue_deinit() argument
246 cw1200_queue_clear(queue); in cw1200_queue_deinit()
247 del_timer_sync(&queue->gc); in cw1200_queue_deinit()
248 INIT_LIST_HEAD(&queue->free_pool); in cw1200_queue_deinit()
249 kfree(queue->pool); in cw1200_queue_deinit()
250 kfree(queue->link_map_cache); in cw1200_queue_deinit()
251 queue->pool = NULL; in cw1200_queue_deinit()
252 queue->link_map_cache = NULL; in cw1200_queue_deinit()
253 queue->capacity = 0; in cw1200_queue_deinit()
256 size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue, in cw1200_queue_get_num_queued() argument
261 size_t map_capacity = queue->stats->map_capacity; in cw1200_queue_get_num_queued()
266 spin_lock_bh(&queue->lock); in cw1200_queue_get_num_queued()
268 ret = queue->num_queued - queue->num_pending; in cw1200_queue_get_num_queued()
273 ret += queue->link_map_cache[i]; in cw1200_queue_get_num_queued()
276 spin_unlock_bh(&queue->lock); in cw1200_queue_get_num_queued()
280 int cw1200_queue_put(struct cw1200_queue *queue, in cw1200_queue_put() argument
285 struct cw1200_queue_stats *stats = queue->stats; in cw1200_queue_put()
287 if (txpriv->link_id >= queue->stats->map_capacity) in cw1200_queue_put()
290 spin_lock_bh(&queue->lock); in cw1200_queue_put()
291 if (!WARN_ON(list_empty(&queue->free_pool))) { in cw1200_queue_put()
293 &queue->free_pool, struct cw1200_queue_item, head); in cw1200_queue_put()
296 list_move_tail(&item->head, &queue->queue); in cw1200_queue_put()
300 item->packet_id = cw1200_queue_mk_packet_id(queue->generation, in cw1200_queue_put()
301 queue->queue_id, in cw1200_queue_put()
303 item - queue->pool); in cw1200_queue_put()
306 ++queue->num_queued; in cw1200_queue_put()
307 ++queue->link_map_cache[txpriv->link_id]; in cw1200_queue_put()
317 if (queue->overfull == false && in cw1200_queue_put()
318 queue->num_queued >= in cw1200_queue_put()
319 (queue->capacity - (num_present_cpus() - 1))) { in cw1200_queue_put()
320 queue->overfull = true; in cw1200_queue_put()
321 __cw1200_queue_lock(queue); in cw1200_queue_put()
322 mod_timer(&queue->gc, jiffies); in cw1200_queue_put()
327 spin_unlock_bh(&queue->lock); in cw1200_queue_put()
331 int cw1200_queue_get(struct cw1200_queue *queue, in cw1200_queue_get() argument
339 struct cw1200_queue_stats *stats = queue->stats; in cw1200_queue_get()
342 spin_lock_bh(&queue->lock); in cw1200_queue_get()
343 list_for_each_entry(item, &queue->queue, head) { in cw1200_queue_get()
355 list_move_tail(&item->head, &queue->pending); in cw1200_queue_get()
356 ++queue->num_pending; in cw1200_queue_get()
357 --queue->link_map_cache[item->txpriv.link_id]; in cw1200_queue_get()
366 spin_unlock_bh(&queue->lock); in cw1200_queue_get()
372 int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id) in cw1200_queue_requeue() argument
377 struct cw1200_queue_stats *stats = queue->stats; in cw1200_queue_requeue()
382 item = &queue->pool[item_id]; in cw1200_queue_requeue()
384 spin_lock_bh(&queue->lock); in cw1200_queue_requeue()
385 BUG_ON(queue_id != queue->queue_id); in cw1200_queue_requeue()
386 if (queue_generation != queue->generation) { in cw1200_queue_requeue()
388 } else if (item_id >= (unsigned) queue->capacity) { in cw1200_queue_requeue()
395 --queue->num_pending; in cw1200_queue_requeue()
396 ++queue->link_map_cache[item->txpriv.link_id]; in cw1200_queue_requeue()
408 list_move(&item->head, &queue->queue); in cw1200_queue_requeue()
410 spin_unlock_bh(&queue->lock); in cw1200_queue_requeue()
414 int cw1200_queue_requeue_all(struct cw1200_queue *queue) in cw1200_queue_requeue_all() argument
417 struct cw1200_queue_stats *stats = queue->stats; in cw1200_queue_requeue_all()
418 spin_lock_bh(&queue->lock); in cw1200_queue_requeue_all()
420 list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) { in cw1200_queue_requeue_all()
421 --queue->num_pending; in cw1200_queue_requeue_all()
422 ++queue->link_map_cache[item->txpriv.link_id]; in cw1200_queue_requeue_all()
430 item->packet_id = cw1200_queue_mk_packet_id(queue->generation, in cw1200_queue_requeue_all()
431 queue->queue_id, in cw1200_queue_requeue_all()
433 item - queue->pool); in cw1200_queue_requeue_all()
434 list_move(&item->head, &queue->queue); in cw1200_queue_requeue_all()
436 spin_unlock_bh(&queue->lock); in cw1200_queue_requeue_all()
441 int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id) in cw1200_queue_remove() argument
446 struct cw1200_queue_stats *stats = queue->stats; in cw1200_queue_remove()
453 item = &queue->pool[item_id]; in cw1200_queue_remove()
455 spin_lock_bh(&queue->lock); in cw1200_queue_remove()
456 BUG_ON(queue_id != queue->queue_id); in cw1200_queue_remove()
457 if (queue_generation != queue->generation) { in cw1200_queue_remove()
459 } else if (item_id >= (unsigned) queue->capacity) { in cw1200_queue_remove()
469 --queue->num_pending; in cw1200_queue_remove()
470 --queue->num_queued; in cw1200_queue_remove()
471 ++queue->num_sent; in cw1200_queue_remove()
476 list_move(&item->head, &queue->free_pool); in cw1200_queue_remove()
478 if (queue->overfull && in cw1200_queue_remove()
479 (queue->num_queued <= (queue->capacity >> 1))) { in cw1200_queue_remove()
480 queue->overfull = false; in cw1200_queue_remove()
481 __cw1200_queue_unlock(queue); in cw1200_queue_remove()
484 spin_unlock_bh(&queue->lock); in cw1200_queue_remove()
492 int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, in cw1200_queue_get_skb() argument
502 item = &queue->pool[item_id]; in cw1200_queue_get_skb()
504 spin_lock_bh(&queue->lock); in cw1200_queue_get_skb()
505 BUG_ON(queue_id != queue->queue_id); in cw1200_queue_get_skb()
506 if (queue_generation != queue->generation) { in cw1200_queue_get_skb()
508 } else if (item_id >= (unsigned) queue->capacity) { in cw1200_queue_get_skb()
518 spin_unlock_bh(&queue->lock); in cw1200_queue_get_skb()
522 void cw1200_queue_lock(struct cw1200_queue *queue) in cw1200_queue_lock() argument
524 spin_lock_bh(&queue->lock); in cw1200_queue_lock()
525 __cw1200_queue_lock(queue); in cw1200_queue_lock()
526 spin_unlock_bh(&queue->lock); in cw1200_queue_lock()
529 void cw1200_queue_unlock(struct cw1200_queue *queue) in cw1200_queue_unlock() argument
531 spin_lock_bh(&queue->lock); in cw1200_queue_unlock()
532 __cw1200_queue_unlock(queue); in cw1200_queue_unlock()
533 spin_unlock_bh(&queue->lock); in cw1200_queue_unlock()
536 bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, in cw1200_queue_get_xmit_timestamp() argument
543 spin_lock_bh(&queue->lock); in cw1200_queue_get_xmit_timestamp()
544 ret = !list_empty(&queue->pending); in cw1200_queue_get_xmit_timestamp()
546 list_for_each_entry(item, &queue->pending, head) { in cw1200_queue_get_xmit_timestamp()
553 spin_unlock_bh(&queue->lock); in cw1200_queue_get_xmit_timestamp()