1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
4  *
5  * Copyright (c) 2010, ST-Ericsson
6  * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
7  */
8 
9 #include <net/mac80211.h>
10 #include <linux/sched.h>
11 #include <linux/jiffies.h>
12 #include "queue.h"
13 #include "cw1200.h"
14 #include "debug.h"
15 
16 /* private */ struct cw1200_queue_item
17 {
18 	struct list_head	head;
19 	struct sk_buff		*skb;
20 	u32			packet_id;
21 	unsigned long		queue_timestamp;
22 	unsigned long		xmit_timestamp;
23 	struct cw1200_txpriv	txpriv;
24 	u8			generation;
25 };
26 
__cw1200_queue_lock(struct cw1200_queue * queue)27 static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
28 {
29 	struct cw1200_queue_stats *stats = queue->stats;
30 	if (queue->tx_locked_cnt++ == 0) {
31 		pr_debug("[TX] Queue %d is locked.\n",
32 			 queue->queue_id);
33 		ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
34 	}
35 }
36 
__cw1200_queue_unlock(struct cw1200_queue * queue)37 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
38 {
39 	struct cw1200_queue_stats *stats = queue->stats;
40 	BUG_ON(!queue->tx_locked_cnt);
41 	if (--queue->tx_locked_cnt == 0) {
42 		pr_debug("[TX] Queue %d is unlocked.\n",
43 			 queue->queue_id);
44 		ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
45 	}
46 }
47 
cw1200_queue_parse_id(u32 packet_id,u8 * queue_generation,u8 * queue_id,u8 * item_generation,u8 * item_id)48 static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation,
49 					 u8 *queue_id, u8 *item_generation,
50 					 u8 *item_id)
51 {
52 	*item_id		= (packet_id >>  0) & 0xFF;
53 	*item_generation	= (packet_id >>  8) & 0xFF;
54 	*queue_id		= (packet_id >> 16) & 0xFF;
55 	*queue_generation	= (packet_id >> 24) & 0xFF;
56 }
57 
cw1200_queue_mk_packet_id(u8 queue_generation,u8 queue_id,u8 item_generation,u8 item_id)58 static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id,
59 					    u8 item_generation, u8 item_id)
60 {
61 	return ((u32)item_id << 0) |
62 		((u32)item_generation << 8) |
63 		((u32)queue_id << 16) |
64 		((u32)queue_generation << 24);
65 }
66 
cw1200_queue_post_gc(struct cw1200_queue_stats * stats,struct list_head * gc_list)67 static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
68 				 struct list_head *gc_list)
69 {
70 	struct cw1200_queue_item *item, *tmp;
71 
72 	list_for_each_entry_safe(item, tmp, gc_list, head) {
73 		list_del(&item->head);
74 		stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
75 		kfree(item);
76 	}
77 }
78 
cw1200_queue_register_post_gc(struct list_head * gc_list,struct cw1200_queue_item * item)79 static void cw1200_queue_register_post_gc(struct list_head *gc_list,
80 					  struct cw1200_queue_item *item)
81 {
82 	struct cw1200_queue_item *gc_item;
83 	gc_item = kmemdup(item, sizeof(struct cw1200_queue_item),
84 			GFP_ATOMIC);
85 	BUG_ON(!gc_item);
86 	list_add_tail(&gc_item->head, gc_list);
87 }
88 
__cw1200_queue_gc(struct cw1200_queue * queue,struct list_head * head,bool unlock)89 static void __cw1200_queue_gc(struct cw1200_queue *queue,
90 			      struct list_head *head,
91 			      bool unlock)
92 {
93 	struct cw1200_queue_stats *stats = queue->stats;
94 	struct cw1200_queue_item *item = NULL, *iter, *tmp;
95 	bool wakeup_stats = false;
96 
97 	list_for_each_entry_safe(iter, tmp, &queue->queue, head) {
98 		if (time_is_after_jiffies(iter->queue_timestamp + queue->ttl)) {
99 			item = iter;
100 			break;
101 		}
102 		--queue->num_queued;
103 		--queue->link_map_cache[iter->txpriv.link_id];
104 		spin_lock_bh(&stats->lock);
105 		--stats->num_queued;
106 		if (!--stats->link_map_cache[iter->txpriv.link_id])
107 			wakeup_stats = true;
108 		spin_unlock_bh(&stats->lock);
109 		cw1200_debug_tx_ttl(stats->priv);
110 		cw1200_queue_register_post_gc(head, iter);
111 		iter->skb = NULL;
112 		list_move_tail(&iter->head, &queue->free_pool);
113 	}
114 
115 	if (wakeup_stats)
116 		wake_up(&stats->wait_link_id_empty);
117 
118 	if (queue->overfull) {
119 		if (queue->num_queued <= (queue->capacity >> 1)) {
120 			queue->overfull = false;
121 			if (unlock)
122 				__cw1200_queue_unlock(queue);
123 		} else if (item) {
124 			unsigned long tmo = item->queue_timestamp + queue->ttl;
125 			mod_timer(&queue->gc, tmo);
126 			cw1200_pm_stay_awake(&stats->priv->pm_state,
127 					     tmo - jiffies);
128 		}
129 	}
130 }
131 
cw1200_queue_gc(struct timer_list * t)132 static void cw1200_queue_gc(struct timer_list *t)
133 {
134 	LIST_HEAD(list);
135 	struct cw1200_queue *queue =
136 		from_timer(queue, t, gc);
137 
138 	spin_lock_bh(&queue->lock);
139 	__cw1200_queue_gc(queue, &list, true);
140 	spin_unlock_bh(&queue->lock);
141 	cw1200_queue_post_gc(queue->stats, &list);
142 }
143 
cw1200_queue_stats_init(struct cw1200_queue_stats * stats,size_t map_capacity,cw1200_queue_skb_dtor_t skb_dtor,struct cw1200_common * priv)144 int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
145 			    size_t map_capacity,
146 			    cw1200_queue_skb_dtor_t skb_dtor,
147 			    struct cw1200_common *priv)
148 {
149 	memset(stats, 0, sizeof(*stats));
150 	stats->map_capacity = map_capacity;
151 	stats->skb_dtor = skb_dtor;
152 	stats->priv = priv;
153 	spin_lock_init(&stats->lock);
154 	init_waitqueue_head(&stats->wait_link_id_empty);
155 
156 	stats->link_map_cache = kcalloc(map_capacity, sizeof(int),
157 					GFP_KERNEL);
158 	if (!stats->link_map_cache)
159 		return -ENOMEM;
160 
161 	return 0;
162 }
163 
cw1200_queue_init(struct cw1200_queue * queue,struct cw1200_queue_stats * stats,u8 queue_id,size_t capacity,unsigned long ttl)164 int cw1200_queue_init(struct cw1200_queue *queue,
165 		      struct cw1200_queue_stats *stats,
166 		      u8 queue_id,
167 		      size_t capacity,
168 		      unsigned long ttl)
169 {
170 	size_t i;
171 
172 	memset(queue, 0, sizeof(*queue));
173 	queue->stats = stats;
174 	queue->capacity = capacity;
175 	queue->queue_id = queue_id;
176 	queue->ttl = ttl;
177 	INIT_LIST_HEAD(&queue->queue);
178 	INIT_LIST_HEAD(&queue->pending);
179 	INIT_LIST_HEAD(&queue->free_pool);
180 	spin_lock_init(&queue->lock);
181 	timer_setup(&queue->gc, cw1200_queue_gc, 0);
182 
183 	queue->pool = kcalloc(capacity, sizeof(struct cw1200_queue_item),
184 			      GFP_KERNEL);
185 	if (!queue->pool)
186 		return -ENOMEM;
187 
188 	queue->link_map_cache = kcalloc(stats->map_capacity, sizeof(int),
189 					GFP_KERNEL);
190 	if (!queue->link_map_cache) {
191 		kfree(queue->pool);
192 		queue->pool = NULL;
193 		return -ENOMEM;
194 	}
195 
196 	for (i = 0; i < capacity; ++i)
197 		list_add_tail(&queue->pool[i].head, &queue->free_pool);
198 
199 	return 0;
200 }
201 
cw1200_queue_clear(struct cw1200_queue * queue)202 int cw1200_queue_clear(struct cw1200_queue *queue)
203 {
204 	int i;
205 	LIST_HEAD(gc_list);
206 	struct cw1200_queue_stats *stats = queue->stats;
207 	struct cw1200_queue_item *item, *tmp;
208 
209 	spin_lock_bh(&queue->lock);
210 	queue->generation++;
211 	list_splice_tail_init(&queue->queue, &queue->pending);
212 	list_for_each_entry_safe(item, tmp, &queue->pending, head) {
213 		WARN_ON(!item->skb);
214 		cw1200_queue_register_post_gc(&gc_list, item);
215 		item->skb = NULL;
216 		list_move_tail(&item->head, &queue->free_pool);
217 	}
218 	queue->num_queued = 0;
219 	queue->num_pending = 0;
220 
221 	spin_lock_bh(&stats->lock);
222 	for (i = 0; i < stats->map_capacity; ++i) {
223 		stats->num_queued -= queue->link_map_cache[i];
224 		stats->link_map_cache[i] -= queue->link_map_cache[i];
225 		queue->link_map_cache[i] = 0;
226 	}
227 	spin_unlock_bh(&stats->lock);
228 	if (queue->overfull) {
229 		queue->overfull = false;
230 		__cw1200_queue_unlock(queue);
231 	}
232 	spin_unlock_bh(&queue->lock);
233 	wake_up(&stats->wait_link_id_empty);
234 	cw1200_queue_post_gc(stats, &gc_list);
235 	return 0;
236 }
237 
cw1200_queue_stats_deinit(struct cw1200_queue_stats * stats)238 void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
239 {
240 	kfree(stats->link_map_cache);
241 	stats->link_map_cache = NULL;
242 }
243 
cw1200_queue_deinit(struct cw1200_queue * queue)244 void cw1200_queue_deinit(struct cw1200_queue *queue)
245 {
246 	cw1200_queue_clear(queue);
247 	del_timer_sync(&queue->gc);
248 	INIT_LIST_HEAD(&queue->free_pool);
249 	kfree(queue->pool);
250 	kfree(queue->link_map_cache);
251 	queue->pool = NULL;
252 	queue->link_map_cache = NULL;
253 	queue->capacity = 0;
254 }
255 
cw1200_queue_get_num_queued(struct cw1200_queue * queue,u32 link_id_map)256 size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
257 				   u32 link_id_map)
258 {
259 	size_t ret;
260 	int i, bit;
261 	size_t map_capacity = queue->stats->map_capacity;
262 
263 	if (!link_id_map)
264 		return 0;
265 
266 	spin_lock_bh(&queue->lock);
267 	if (link_id_map == (u32)-1) {
268 		ret = queue->num_queued - queue->num_pending;
269 	} else {
270 		ret = 0;
271 		for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
272 			if (link_id_map & bit)
273 				ret += queue->link_map_cache[i];
274 		}
275 	}
276 	spin_unlock_bh(&queue->lock);
277 	return ret;
278 }
279 
cw1200_queue_put(struct cw1200_queue * queue,struct sk_buff * skb,struct cw1200_txpriv * txpriv)280 int cw1200_queue_put(struct cw1200_queue *queue,
281 		     struct sk_buff *skb,
282 		     struct cw1200_txpriv *txpriv)
283 {
284 	int ret = 0;
285 	struct cw1200_queue_stats *stats = queue->stats;
286 
287 	if (txpriv->link_id >= queue->stats->map_capacity)
288 		return -EINVAL;
289 
290 	spin_lock_bh(&queue->lock);
291 	if (!WARN_ON(list_empty(&queue->free_pool))) {
292 		struct cw1200_queue_item *item = list_first_entry(
293 			&queue->free_pool, struct cw1200_queue_item, head);
294 		BUG_ON(item->skb);
295 
296 		list_move_tail(&item->head, &queue->queue);
297 		item->skb = skb;
298 		item->txpriv = *txpriv;
299 		item->generation = 0;
300 		item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
301 							    queue->queue_id,
302 							    item->generation,
303 							    item - queue->pool);
304 		item->queue_timestamp = jiffies;
305 
306 		++queue->num_queued;
307 		++queue->link_map_cache[txpriv->link_id];
308 
309 		spin_lock_bh(&stats->lock);
310 		++stats->num_queued;
311 		++stats->link_map_cache[txpriv->link_id];
312 		spin_unlock_bh(&stats->lock);
313 
314 		/* TX may happen in parallel sometimes.
315 		 * Leave extra queue slots so we don't overflow.
316 		 */
317 		if (queue->overfull == false &&
318 		    queue->num_queued >=
319 		    (queue->capacity - (num_present_cpus() - 1))) {
320 			queue->overfull = true;
321 			__cw1200_queue_lock(queue);
322 			mod_timer(&queue->gc, jiffies);
323 		}
324 	} else {
325 		ret = -ENOENT;
326 	}
327 	spin_unlock_bh(&queue->lock);
328 	return ret;
329 }
330 
cw1200_queue_get(struct cw1200_queue * queue,u32 link_id_map,struct wsm_tx ** tx,struct ieee80211_tx_info ** tx_info,const struct cw1200_txpriv ** txpriv)331 int cw1200_queue_get(struct cw1200_queue *queue,
332 		     u32 link_id_map,
333 		     struct wsm_tx **tx,
334 		     struct ieee80211_tx_info **tx_info,
335 		     const struct cw1200_txpriv **txpriv)
336 {
337 	int ret = -ENOENT;
338 	struct cw1200_queue_item *item;
339 	struct cw1200_queue_stats *stats = queue->stats;
340 	bool wakeup_stats = false;
341 
342 	spin_lock_bh(&queue->lock);
343 	list_for_each_entry(item, &queue->queue, head) {
344 		if (link_id_map & BIT(item->txpriv.link_id)) {
345 			ret = 0;
346 			break;
347 		}
348 	}
349 
350 	if (!WARN_ON(ret)) {
351 		*tx = (struct wsm_tx *)item->skb->data;
352 		*tx_info = IEEE80211_SKB_CB(item->skb);
353 		*txpriv = &item->txpriv;
354 		(*tx)->packet_id = item->packet_id;
355 		list_move_tail(&item->head, &queue->pending);
356 		++queue->num_pending;
357 		--queue->link_map_cache[item->txpriv.link_id];
358 		item->xmit_timestamp = jiffies;
359 
360 		spin_lock_bh(&stats->lock);
361 		--stats->num_queued;
362 		if (!--stats->link_map_cache[item->txpriv.link_id])
363 			wakeup_stats = true;
364 		spin_unlock_bh(&stats->lock);
365 	}
366 	spin_unlock_bh(&queue->lock);
367 	if (wakeup_stats)
368 		wake_up(&stats->wait_link_id_empty);
369 	return ret;
370 }
371 
cw1200_queue_requeue(struct cw1200_queue * queue,u32 packet_id)372 int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id)
373 {
374 	int ret = 0;
375 	u8 queue_generation, queue_id, item_generation, item_id;
376 	struct cw1200_queue_item *item;
377 	struct cw1200_queue_stats *stats = queue->stats;
378 
379 	cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
380 			      &item_generation, &item_id);
381 
382 	item = &queue->pool[item_id];
383 
384 	spin_lock_bh(&queue->lock);
385 	BUG_ON(queue_id != queue->queue_id);
386 	if (queue_generation != queue->generation) {
387 		ret = -ENOENT;
388 	} else if (item_id >= (unsigned) queue->capacity) {
389 		WARN_ON(1);
390 		ret = -EINVAL;
391 	} else if (item->generation != item_generation) {
392 		WARN_ON(1);
393 		ret = -ENOENT;
394 	} else {
395 		--queue->num_pending;
396 		++queue->link_map_cache[item->txpriv.link_id];
397 
398 		spin_lock_bh(&stats->lock);
399 		++stats->num_queued;
400 		++stats->link_map_cache[item->txpriv.link_id];
401 		spin_unlock_bh(&stats->lock);
402 
403 		item->generation = ++item_generation;
404 		item->packet_id = cw1200_queue_mk_packet_id(queue_generation,
405 							    queue_id,
406 							    item_generation,
407 							    item_id);
408 		list_move(&item->head, &queue->queue);
409 	}
410 	spin_unlock_bh(&queue->lock);
411 	return ret;
412 }
413 
cw1200_queue_requeue_all(struct cw1200_queue * queue)414 int cw1200_queue_requeue_all(struct cw1200_queue *queue)
415 {
416 	struct cw1200_queue_item *item, *tmp;
417 	struct cw1200_queue_stats *stats = queue->stats;
418 	spin_lock_bh(&queue->lock);
419 
420 	list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) {
421 		--queue->num_pending;
422 		++queue->link_map_cache[item->txpriv.link_id];
423 
424 		spin_lock_bh(&stats->lock);
425 		++stats->num_queued;
426 		++stats->link_map_cache[item->txpriv.link_id];
427 		spin_unlock_bh(&stats->lock);
428 
429 		++item->generation;
430 		item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
431 							    queue->queue_id,
432 							    item->generation,
433 							    item - queue->pool);
434 		list_move(&item->head, &queue->queue);
435 	}
436 	spin_unlock_bh(&queue->lock);
437 
438 	return 0;
439 }
440 
cw1200_queue_remove(struct cw1200_queue * queue,u32 packet_id)441 int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id)
442 {
443 	int ret = 0;
444 	u8 queue_generation, queue_id, item_generation, item_id;
445 	struct cw1200_queue_item *item;
446 	struct cw1200_queue_stats *stats = queue->stats;
447 	struct sk_buff *gc_skb = NULL;
448 	struct cw1200_txpriv gc_txpriv;
449 
450 	cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
451 			      &item_generation, &item_id);
452 
453 	item = &queue->pool[item_id];
454 
455 	spin_lock_bh(&queue->lock);
456 	BUG_ON(queue_id != queue->queue_id);
457 	if (queue_generation != queue->generation) {
458 		ret = -ENOENT;
459 	} else if (item_id >= (unsigned) queue->capacity) {
460 		WARN_ON(1);
461 		ret = -EINVAL;
462 	} else if (item->generation != item_generation) {
463 		WARN_ON(1);
464 		ret = -ENOENT;
465 	} else {
466 		gc_txpriv = item->txpriv;
467 		gc_skb = item->skb;
468 		item->skb = NULL;
469 		--queue->num_pending;
470 		--queue->num_queued;
471 		++queue->num_sent;
472 		++item->generation;
473 		/* Do not use list_move_tail here, but list_move:
474 		 * try to utilize cache row.
475 		 */
476 		list_move(&item->head, &queue->free_pool);
477 
478 		if (queue->overfull &&
479 		    (queue->num_queued <= (queue->capacity >> 1))) {
480 			queue->overfull = false;
481 			__cw1200_queue_unlock(queue);
482 		}
483 	}
484 	spin_unlock_bh(&queue->lock);
485 
486 	if (gc_skb)
487 		stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
488 
489 	return ret;
490 }
491 
cw1200_queue_get_skb(struct cw1200_queue * queue,u32 packet_id,struct sk_buff ** skb,const struct cw1200_txpriv ** txpriv)492 int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
493 			 struct sk_buff **skb,
494 			 const struct cw1200_txpriv **txpriv)
495 {
496 	int ret = 0;
497 	u8 queue_generation, queue_id, item_generation, item_id;
498 	struct cw1200_queue_item *item;
499 	cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
500 			      &item_generation, &item_id);
501 
502 	item = &queue->pool[item_id];
503 
504 	spin_lock_bh(&queue->lock);
505 	BUG_ON(queue_id != queue->queue_id);
506 	if (queue_generation != queue->generation) {
507 		ret = -ENOENT;
508 	} else if (item_id >= (unsigned) queue->capacity) {
509 		WARN_ON(1);
510 		ret = -EINVAL;
511 	} else if (item->generation != item_generation) {
512 		WARN_ON(1);
513 		ret = -ENOENT;
514 	} else {
515 		*skb = item->skb;
516 		*txpriv = &item->txpriv;
517 	}
518 	spin_unlock_bh(&queue->lock);
519 	return ret;
520 }
521 
cw1200_queue_lock(struct cw1200_queue * queue)522 void cw1200_queue_lock(struct cw1200_queue *queue)
523 {
524 	spin_lock_bh(&queue->lock);
525 	__cw1200_queue_lock(queue);
526 	spin_unlock_bh(&queue->lock);
527 }
528 
cw1200_queue_unlock(struct cw1200_queue * queue)529 void cw1200_queue_unlock(struct cw1200_queue *queue)
530 {
531 	spin_lock_bh(&queue->lock);
532 	__cw1200_queue_unlock(queue);
533 	spin_unlock_bh(&queue->lock);
534 }
535 
cw1200_queue_get_xmit_timestamp(struct cw1200_queue * queue,unsigned long * timestamp,u32 pending_frame_id)536 bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
537 				     unsigned long *timestamp,
538 				     u32 pending_frame_id)
539 {
540 	struct cw1200_queue_item *item;
541 	bool ret;
542 
543 	spin_lock_bh(&queue->lock);
544 	ret = !list_empty(&queue->pending);
545 	if (ret) {
546 		list_for_each_entry(item, &queue->pending, head) {
547 			if (item->packet_id != pending_frame_id)
548 				if (time_before(item->xmit_timestamp,
549 						*timestamp))
550 					*timestamp = item->xmit_timestamp;
551 		}
552 	}
553 	spin_unlock_bh(&queue->lock);
554 	return ret;
555 }
556 
cw1200_queue_stats_is_empty(struct cw1200_queue_stats * stats,u32 link_id_map)557 bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
558 				 u32 link_id_map)
559 {
560 	bool empty = true;
561 
562 	spin_lock_bh(&stats->lock);
563 	if (link_id_map == (u32)-1) {
564 		empty = stats->num_queued == 0;
565 	} else {
566 		int i;
567 		for (i = 0; i < stats->map_capacity; ++i) {
568 			if (link_id_map & BIT(i)) {
569 				if (stats->link_map_cache[i]) {
570 					empty = false;
571 					break;
572 				}
573 			}
574 		}
575 	}
576 	spin_unlock_bh(&stats->lock);
577 
578 	return empty;
579 }
580