1 /* $NetBSD: queue.h,v 1.68 2014/11/19 08:10:01 uebayasi Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)queue.h 8.5 (Berkeley) 8/20/94
32 */
33
34 #ifndef _SYS_QUEUE_H_
35 #define _SYS_QUEUE_H_
36
37 /*
38 * This file defines five types of data structures: singly-linked lists,
39 * lists, simple queues, tail queues, and circular queues.
40 *
41 * A singly-linked list is headed by a single forward pointer. The
42 * elements are singly linked for minimum space and pointer manipulation
43 * overhead at the expense of O(n) removal for arbitrary elements. New
44 * elements can be added to the list after an existing element or at the
45 * head of the list. Elements being removed from the head of the list
46 * should use the explicit macro for this purpose for optimum
47 * efficiency. A singly-linked list may only be traversed in the forward
48 * direction. Singly-linked lists are ideal for applications with large
49 * datasets and few or no removals or for implementing a LIFO queue.
50 *
51 * A list is headed by a single forward pointer (or an array of forward
52 * pointers for a hash table header). The elements are doubly linked
53 * so that an arbitrary element can be removed without a need to
54 * traverse the list. New elements can be added to the list before
55 * or after an existing element or at the head of the list. A list
56 * may only be traversed in the forward direction.
57 *
58 * A simple queue is headed by a pair of pointers, one the head of the
59 * list and the other to the tail of the list. The elements are singly
60 * linked to save space, so elements can only be removed from the
61 * head of the list. New elements can be added to the list after
62 * an existing element, at the head of the list, or at the end of the
63 * list. A simple queue may only be traversed in the forward direction.
64 *
65 * A tail queue is headed by a pair of pointers, one to the head of the
66 * list and the other to the tail of the list. The elements are doubly
67 * linked so that an arbitrary element can be removed without a need to
68 * traverse the list. New elements can be added to the list before or
69 * after an existing element, at the head of the list, or at the end of
70 * the list. A tail queue may be traversed in either direction.
71 *
72 * A circle queue is headed by a pair of pointers, one to the head of the
73 * list and the other to the tail of the list. The elements are doubly
74 * linked so that an arbitrary element can be removed without a need to
75 * traverse the list. New elements can be added to the list before or after
76 * an existing element, at the head of the list, or at the end of the list.
77 * A circle queue may be traversed in either direction, but has a more
78 * complex end of list detection.
79 *
80 * For details on the use of these macros, see the queue(3) manual page.
81 */
82
83 /*
84 * Include the definition of NULL only on NetBSD because sys/null.h
85 * is not available elsewhere. This conditional makes the header
86 * portable and it can simply be dropped verbatim into any system.
87 * The caveat is that on other systems some other header
88 * must provide NULL before the macros can be used.
89 */
90 #ifdef __NetBSD__
91 #include <sys/null.h>
92 #endif
93
94 #if defined(QUEUEDEBUG)
95 # if defined(_KERNEL)
96 # define QUEUEDEBUG_ABORT(...) panic(__VA_ARGS__)
97 # else
98 # include <err.h>
99 # define QUEUEDEBUG_ABORT(...) err(1, __VA_ARGS__)
100 # endif
101 #endif
102
103 /*
104 * Singly-linked List definitions.
105 */
106 #define SLIST_HEAD(name, type) \
107 struct name { \
108 struct type *slh_first; /* first element */ \
109 }
110
111 #define SLIST_HEAD_INITIALIZER(head) \
112 { NULL }
113
114 #define SLIST_ENTRY(type) \
115 struct { \
116 struct type *sle_next; /* next element */ \
117 }
118
119 /*
120 * Singly-linked List access methods.
121 */
122 #define SLIST_FIRST(head) ((head)->slh_first)
123 #define SLIST_END(head) NULL
124 #define SLIST_EMPTY(head) ((head)->slh_first == NULL)
125 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
126
127 #define SLIST_FOREACH(var, head, field) \
128 for((var) = (head)->slh_first; \
129 (var) != SLIST_END(head); \
130 (var) = (var)->field.sle_next)
131
132 #define SLIST_FOREACH_SAFE(var, head, field, tvar) \
133 for ((var) = SLIST_FIRST((head)); \
134 (var) != SLIST_END(head) && \
135 ((tvar) = SLIST_NEXT((var), field), 1); \
136 (var) = (tvar))
137
138 /*
139 * Singly-linked List functions.
140 */
141 #define SLIST_INIT(head) do { \
142 (head)->slh_first = SLIST_END(head); \
143 } while (/*CONSTCOND*/0)
144
145 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
146 (elm)->field.sle_next = (slistelm)->field.sle_next; \
147 (slistelm)->field.sle_next = (elm); \
148 } while (/*CONSTCOND*/0)
149
150 #define SLIST_INSERT_HEAD(head, elm, field) do { \
151 (elm)->field.sle_next = (head)->slh_first; \
152 (head)->slh_first = (elm); \
153 } while (/*CONSTCOND*/0)
154
155 #define SLIST_REMOVE_AFTER(slistelm, field) do { \
156 (slistelm)->field.sle_next = \
157 SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \
158 } while (/*CONSTCOND*/0)
159
160 #define SLIST_REMOVE_HEAD(head, field) do { \
161 (head)->slh_first = (head)->slh_first->field.sle_next; \
162 } while (/*CONSTCOND*/0)
163
164 #define SLIST_REMOVE(head, elm, type, field) do { \
165 if ((head)->slh_first == (elm)) { \
166 SLIST_REMOVE_HEAD((head), field); \
167 } \
168 else { \
169 struct type *curelm = (head)->slh_first; \
170 while(curelm->field.sle_next != (elm)) \
171 curelm = curelm->field.sle_next; \
172 curelm->field.sle_next = \
173 curelm->field.sle_next->field.sle_next; \
174 } \
175 } while (/*CONSTCOND*/0)
176
177
178 /*
179 * List definitions.
180 */
181 #define LIST_HEAD(name, type) \
182 struct name { \
183 struct type *lh_first; /* first element */ \
184 }
185
186 #define LIST_HEAD_INITIALIZER(head) \
187 { NULL }
188
189 #define LIST_ENTRY(type) \
190 struct { \
191 struct type *le_next; /* next element */ \
192 struct type **le_prev; /* address of previous next element */ \
193 }
194
195 /*
196 * List access methods.
197 */
198 #define LIST_FIRST(head) ((head)->lh_first)
199 #define LIST_END(head) NULL
200 #define LIST_EMPTY(head) ((head)->lh_first == LIST_END(head))
201 #define LIST_NEXT(elm, field) ((elm)->field.le_next)
202
203 #define LIST_FOREACH(var, head, field) \
204 for ((var) = ((head)->lh_first); \
205 (var) != LIST_END(head); \
206 (var) = ((var)->field.le_next))
207
208 #define LIST_FOREACH_SAFE(var, head, field, tvar) \
209 for ((var) = LIST_FIRST((head)); \
210 (var) != LIST_END(head) && \
211 ((tvar) = LIST_NEXT((var), field), 1); \
212 (var) = (tvar))
213
214 #define LIST_MOVE(head1, head2) do { \
215 LIST_INIT((head2)); \
216 if (!LIST_EMPTY((head1))) { \
217 (head2)->lh_first = (head1)->lh_first; \
218 LIST_INIT((head1)); \
219 } \
220 } while (/*CONSTCOND*/0)
221
222 /*
223 * List functions.
224 */
225 #if defined(QUEUEDEBUG)
226 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
227 if ((head)->lh_first && \
228 (head)->lh_first->field.le_prev != &(head)->lh_first) \
229 QUEUEDEBUG_ABORT("LIST_INSERT_HEAD %p %s:%d", (head), \
230 __FILE__, __LINE__);
231 #define QUEUEDEBUG_LIST_OP(elm, field) \
232 if ((elm)->field.le_next && \
233 (elm)->field.le_next->field.le_prev != \
234 &(elm)->field.le_next) \
235 QUEUEDEBUG_ABORT("LIST_* forw %p %s:%d", (elm), \
236 __FILE__, __LINE__); \
237 if (*(elm)->field.le_prev != (elm)) \
238 QUEUEDEBUG_ABORT("LIST_* back %p %s:%d", (elm), \
239 __FILE__, __LINE__);
240 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
241 (elm)->field.le_next = (void *)1L; \
242 (elm)->field.le_prev = (void *)1L;
243 #else
244 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
245 #define QUEUEDEBUG_LIST_OP(elm, field)
246 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
247 #endif
248
249 #define LIST_INIT(head) do { \
250 (head)->lh_first = LIST_END(head); \
251 } while (/*CONSTCOND*/0)
252
253 #define LIST_INSERT_AFTER(listelm, elm, field) do { \
254 QUEUEDEBUG_LIST_OP((listelm), field) \
255 if (((elm)->field.le_next = (listelm)->field.le_next) != \
256 LIST_END(head)) \
257 (listelm)->field.le_next->field.le_prev = \
258 &(elm)->field.le_next; \
259 (listelm)->field.le_next = (elm); \
260 (elm)->field.le_prev = &(listelm)->field.le_next; \
261 } while (/*CONSTCOND*/0)
262
263 #define LIST_INSERT_BEFORE(listelm, elm, field) do { \
264 QUEUEDEBUG_LIST_OP((listelm), field) \
265 (elm)->field.le_prev = (listelm)->field.le_prev; \
266 (elm)->field.le_next = (listelm); \
267 *(listelm)->field.le_prev = (elm); \
268 (listelm)->field.le_prev = &(elm)->field.le_next; \
269 } while (/*CONSTCOND*/0)
270
271 #define LIST_INSERT_HEAD(head, elm, field) do { \
272 QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
273 if (((elm)->field.le_next = (head)->lh_first) != LIST_END(head))\
274 (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
275 (head)->lh_first = (elm); \
276 (elm)->field.le_prev = &(head)->lh_first; \
277 } while (/*CONSTCOND*/0)
278
279 #define LIST_REMOVE(elm, field) do { \
280 QUEUEDEBUG_LIST_OP((elm), field) \
281 if ((elm)->field.le_next != NULL) \
282 (elm)->field.le_next->field.le_prev = \
283 (elm)->field.le_prev; \
284 *(elm)->field.le_prev = (elm)->field.le_next; \
285 QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
286 } while (/*CONSTCOND*/0)
287
288 #define LIST_REPLACE(elm, elm2, field) do { \
289 if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
290 (elm2)->field.le_next->field.le_prev = \
291 &(elm2)->field.le_next; \
292 (elm2)->field.le_prev = (elm)->field.le_prev; \
293 *(elm2)->field.le_prev = (elm2); \
294 QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
295 } while (/*CONSTCOND*/0)
296
297 /*
298 * Simple queue definitions.
299 */
300 #define SIMPLEQ_HEAD(name, type) \
301 struct name { \
302 struct type *sqh_first; /* first element */ \
303 struct type **sqh_last; /* addr of last next element */ \
304 }
305
306 #define SIMPLEQ_HEAD_INITIALIZER(head) \
307 { NULL, &(head).sqh_first }
308
309 #define SIMPLEQ_ENTRY(type) \
310 struct { \
311 struct type *sqe_next; /* next element */ \
312 }
313
314 /*
315 * Simple queue access methods.
316 */
317 #define SIMPLEQ_FIRST(head) ((head)->sqh_first)
318 #define SIMPLEQ_END(head) NULL
319 #define SIMPLEQ_EMPTY(head) ((head)->sqh_first == SIMPLEQ_END(head))
320 #define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
321
322 #define SIMPLEQ_FOREACH(var, head, field) \
323 for ((var) = ((head)->sqh_first); \
324 (var) != SIMPLEQ_END(head); \
325 (var) = ((var)->field.sqe_next))
326
327 #define SIMPLEQ_FOREACH_SAFE(var, head, field, next) \
328 for ((var) = ((head)->sqh_first); \
329 (var) != SIMPLEQ_END(head) && \
330 ((next = ((var)->field.sqe_next)), 1); \
331 (var) = (next))
332
333 /*
334 * Simple queue functions.
335 */
336 #define SIMPLEQ_INIT(head) do { \
337 (head)->sqh_first = NULL; \
338 (head)->sqh_last = &(head)->sqh_first; \
339 } while (/*CONSTCOND*/0)
340
341 #define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
342 if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
343 (head)->sqh_last = &(elm)->field.sqe_next; \
344 (head)->sqh_first = (elm); \
345 } while (/*CONSTCOND*/0)
346
347 #define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
348 (elm)->field.sqe_next = NULL; \
349 *(head)->sqh_last = (elm); \
350 (head)->sqh_last = &(elm)->field.sqe_next; \
351 } while (/*CONSTCOND*/0)
352
353 #define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
354 if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
355 (head)->sqh_last = &(elm)->field.sqe_next; \
356 (listelm)->field.sqe_next = (elm); \
357 } while (/*CONSTCOND*/0)
358
359 #define SIMPLEQ_REMOVE_HEAD(head, field) do { \
360 if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
361 (head)->sqh_last = &(head)->sqh_first; \
362 } while (/*CONSTCOND*/0)
363
364 #define SIMPLEQ_REMOVE_AFTER(head, elm, field) do { \
365 if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \
366 == NULL) \
367 (head)->sqh_last = &(elm)->field.sqe_next; \
368 } while (/*CONSTCOND*/0)
369
370 #define SIMPLEQ_REMOVE(head, elm, type, field) do { \
371 if ((head)->sqh_first == (elm)) { \
372 SIMPLEQ_REMOVE_HEAD((head), field); \
373 } else { \
374 struct type *curelm = (head)->sqh_first; \
375 while (curelm->field.sqe_next != (elm)) \
376 curelm = curelm->field.sqe_next; \
377 if ((curelm->field.sqe_next = \
378 curelm->field.sqe_next->field.sqe_next) == NULL) \
379 (head)->sqh_last = &(curelm)->field.sqe_next; \
380 } \
381 } while (/*CONSTCOND*/0)
382
383 #define SIMPLEQ_CONCAT(head1, head2) do { \
384 if (!SIMPLEQ_EMPTY((head2))) { \
385 *(head1)->sqh_last = (head2)->sqh_first; \
386 (head1)->sqh_last = (head2)->sqh_last; \
387 SIMPLEQ_INIT((head2)); \
388 } \
389 } while (/*CONSTCOND*/0)
390
391 #define SIMPLEQ_LAST(head, type, field) \
392 (SIMPLEQ_EMPTY((head)) ? \
393 NULL : \
394 ((struct type *)(void *) \
395 ((char *)((head)->sqh_last) - offsetof(struct type, field))))
396
397 /*
398 * Tail queue definitions.
399 */
400 #define _TAILQ_HEAD(name, type, qual) \
401 struct name { \
402 qual type *tqh_first; /* first element */ \
403 qual type *qual *tqh_last; /* addr of last next element */ \
404 }
405 #define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,)
406
407 #define TAILQ_HEAD_INITIALIZER(head) \
408 { TAILQ_END(head), &(head).tqh_first }
409
410 #define _TAILQ_ENTRY(type, qual) \
411 struct { \
412 qual type *tqe_next; /* next element */ \
413 qual type *qual *tqe_prev; /* address of previous next element */\
414 }
415 #define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,)
416
417 /*
418 * Tail queue access methods.
419 */
420 #define TAILQ_FIRST(head) ((head)->tqh_first)
421 #define TAILQ_END(head) (NULL)
422 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
423 #define TAILQ_LAST(head, headname) \
424 (*(((struct headname *)((head)->tqh_last))->tqh_last))
425 #define TAILQ_PREV(elm, headname, field) \
426 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
427 #define TAILQ_EMPTY(head) (TAILQ_FIRST(head) == TAILQ_END(head))
428
429
430 #define TAILQ_FOREACH(var, head, field) \
431 for ((var) = ((head)->tqh_first); \
432 (var) != TAILQ_END(head); \
433 (var) = ((var)->field.tqe_next))
434
435 #define TAILQ_FOREACH_SAFE(var, head, field, next) \
436 for ((var) = ((head)->tqh_first); \
437 (var) != TAILQ_END(head) && \
438 ((next) = TAILQ_NEXT(var, field), 1); (var) = (next))
439
440 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
441 for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\
442 (var) != TAILQ_END(head); \
443 (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
444
445 #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \
446 for ((var) = TAILQ_LAST((head), headname); \
447 (var) != TAILQ_END(head) && \
448 ((prev) = TAILQ_PREV((var), headname, field), 1); (var) = (prev))
449
450 /*
451 * Tail queue functions.
452 */
453 #if defined(QUEUEDEBUG)
454 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
455 if ((head)->tqh_first && \
456 (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
457 QUEUEDEBUG_ABORT("TAILQ_INSERT_HEAD %p %s:%d", (head), \
458 __FILE__, __LINE__);
459 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
460 if (*(head)->tqh_last != NULL) \
461 QUEUEDEBUG_ABORT("TAILQ_INSERT_TAIL %p %s:%d", (head), \
462 __FILE__, __LINE__);
463 #define QUEUEDEBUG_TAILQ_OP(elm, field) \
464 if ((elm)->field.tqe_next && \
465 (elm)->field.tqe_next->field.tqe_prev != \
466 &(elm)->field.tqe_next) \
467 QUEUEDEBUG_ABORT("TAILQ_* forw %p %s:%d", (elm), \
468 __FILE__, __LINE__); \
469 if (*(elm)->field.tqe_prev != (elm)) \
470 QUEUEDEBUG_ABORT("TAILQ_* back %p %s:%d", (elm), \
471 __FILE__, __LINE__);
472 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \
473 if ((elm)->field.tqe_next == NULL && \
474 (head)->tqh_last != &(elm)->field.tqe_next) \
475 QUEUEDEBUG_ABORT("TAILQ_PREREMOVE head %p elm %p %s:%d",\
476 (head), (elm), __FILE__, __LINE__);
477 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \
478 (elm)->field.tqe_next = (void *)1L; \
479 (elm)->field.tqe_prev = (void *)1L;
480 #else
481 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
482 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
483 #define QUEUEDEBUG_TAILQ_OP(elm, field)
484 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
485 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
486 #endif
487
488 #define TAILQ_INIT(head) do { \
489 (head)->tqh_first = TAILQ_END(head); \
490 (head)->tqh_last = &(head)->tqh_first; \
491 } while (/*CONSTCOND*/0)
492
493 #define TAILQ_INSERT_HEAD(head, elm, field) do { \
494 QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \
495 if (((elm)->field.tqe_next = (head)->tqh_first) != TAILQ_END(head))\
496 (head)->tqh_first->field.tqe_prev = \
497 &(elm)->field.tqe_next; \
498 else \
499 (head)->tqh_last = &(elm)->field.tqe_next; \
500 (head)->tqh_first = (elm); \
501 (elm)->field.tqe_prev = &(head)->tqh_first; \
502 } while (/*CONSTCOND*/0)
503
504 #define TAILQ_INSERT_TAIL(head, elm, field) do { \
505 QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \
506 (elm)->field.tqe_next = TAILQ_END(head); \
507 (elm)->field.tqe_prev = (head)->tqh_last; \
508 *(head)->tqh_last = (elm); \
509 (head)->tqh_last = &(elm)->field.tqe_next; \
510 } while (/*CONSTCOND*/0)
511
512 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
513 QUEUEDEBUG_TAILQ_OP((listelm), field) \
514 if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != \
515 TAILQ_END(head)) \
516 (elm)->field.tqe_next->field.tqe_prev = \
517 &(elm)->field.tqe_next; \
518 else \
519 (head)->tqh_last = &(elm)->field.tqe_next; \
520 (listelm)->field.tqe_next = (elm); \
521 (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
522 } while (/*CONSTCOND*/0)
523
524 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
525 QUEUEDEBUG_TAILQ_OP((listelm), field) \
526 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
527 (elm)->field.tqe_next = (listelm); \
528 *(listelm)->field.tqe_prev = (elm); \
529 (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
530 } while (/*CONSTCOND*/0)
531
532 #define TAILQ_REMOVE(head, elm, field) do { \
533 QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \
534 QUEUEDEBUG_TAILQ_OP((elm), field) \
535 if (((elm)->field.tqe_next) != TAILQ_END(head)) \
536 (elm)->field.tqe_next->field.tqe_prev = \
537 (elm)->field.tqe_prev; \
538 else \
539 (head)->tqh_last = (elm)->field.tqe_prev; \
540 *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
541 QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
542 } while (/*CONSTCOND*/0)
543
544 #define TAILQ_REPLACE(head, elm, elm2, field) do { \
545 if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != \
546 TAILQ_END(head)) \
547 (elm2)->field.tqe_next->field.tqe_prev = \
548 &(elm2)->field.tqe_next; \
549 else \
550 (head)->tqh_last = &(elm2)->field.tqe_next; \
551 (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
552 *(elm2)->field.tqe_prev = (elm2); \
553 QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
554 } while (/*CONSTCOND*/0)
555
556 #define TAILQ_CONCAT(head1, head2, field) do { \
557 if (!TAILQ_EMPTY(head2)) { \
558 *(head1)->tqh_last = (head2)->tqh_first; \
559 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
560 (head1)->tqh_last = (head2)->tqh_last; \
561 TAILQ_INIT((head2)); \
562 } \
563 } while (/*CONSTCOND*/0)
564
565 /*
566 * Singly-linked Tail queue declarations.
567 */
568 #define STAILQ_HEAD(name, type) \
569 struct name { \
570 struct type *stqh_first; /* first element */ \
571 struct type **stqh_last; /* addr of last next element */ \
572 }
573
574 #define STAILQ_HEAD_INITIALIZER(head) \
575 { NULL, &(head).stqh_first }
576
577 #define STAILQ_ENTRY(type) \
578 struct { \
579 struct type *stqe_next; /* next element */ \
580 }
581
582 /*
583 * Singly-linked Tail queue access methods.
584 */
585 #define STAILQ_FIRST(head) ((head)->stqh_first)
586 #define STAILQ_END(head) NULL
587 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
588 #define STAILQ_EMPTY(head) (STAILQ_FIRST(head) == STAILQ_END(head))
589
590 /*
591 * Singly-linked Tail queue functions.
592 */
593 #define STAILQ_INIT(head) do { \
594 (head)->stqh_first = NULL; \
595 (head)->stqh_last = &(head)->stqh_first; \
596 } while (/*CONSTCOND*/0)
597
598 #define STAILQ_INSERT_HEAD(head, elm, field) do { \
599 if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
600 (head)->stqh_last = &(elm)->field.stqe_next; \
601 (head)->stqh_first = (elm); \
602 } while (/*CONSTCOND*/0)
603
604 #define STAILQ_INSERT_TAIL(head, elm, field) do { \
605 (elm)->field.stqe_next = NULL; \
606 *(head)->stqh_last = (elm); \
607 (head)->stqh_last = &(elm)->field.stqe_next; \
608 } while (/*CONSTCOND*/0)
609
610 #define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
611 if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
612 (head)->stqh_last = &(elm)->field.stqe_next; \
613 (listelm)->field.stqe_next = (elm); \
614 } while (/*CONSTCOND*/0)
615
616 #define STAILQ_REMOVE_HEAD(head, field) do { \
617 if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
618 (head)->stqh_last = &(head)->stqh_first; \
619 } while (/*CONSTCOND*/0)
620
621 #define STAILQ_REMOVE(head, elm, type, field) do { \
622 if ((head)->stqh_first == (elm)) { \
623 STAILQ_REMOVE_HEAD((head), field); \
624 } else { \
625 struct type *curelm = (head)->stqh_first; \
626 while (curelm->field.stqe_next != (elm)) \
627 curelm = curelm->field.stqe_next; \
628 if ((curelm->field.stqe_next = \
629 curelm->field.stqe_next->field.stqe_next) == NULL) \
630 (head)->stqh_last = &(curelm)->field.stqe_next; \
631 } \
632 } while (/*CONSTCOND*/0)
633
634 #define STAILQ_FOREACH(var, head, field) \
635 for ((var) = ((head)->stqh_first); \
636 (var); \
637 (var) = ((var)->field.stqe_next))
638
639 #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
640 for ((var) = STAILQ_FIRST((head)); \
641 (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
642 (var) = (tvar))
643
644 #define STAILQ_CONCAT(head1, head2) do { \
645 if (!STAILQ_EMPTY((head2))) { \
646 *(head1)->stqh_last = (head2)->stqh_first; \
647 (head1)->stqh_last = (head2)->stqh_last; \
648 STAILQ_INIT((head2)); \
649 } \
650 } while (/*CONSTCOND*/0)
651
652 #define STAILQ_LAST(head, type, field) \
653 (STAILQ_EMPTY((head)) ? \
654 NULL : \
655 ((struct type *)(void *) \
656 ((char *)((head)->stqh_last) - offsetof(struct type, field))))
657
658
659 #ifndef _KERNEL
660 /*
661 * Circular queue definitions. Do not use. We still keep the macros
662 * for compatibility but because of pointer aliasing issues their use
663 * is discouraged!
664 */
665
666 /*
667 * __launder_type(): We use this ugly hack to work around the the compiler
668 * noticing that two types may not alias each other and elide tests in code.
669 * We hit this in the CIRCLEQ macros when comparing 'struct name *' and
670 * 'struct type *' (see CIRCLEQ_HEAD()). Modern compilers (such as GCC
671 * 4.8) declare these comparisons as always false, causing the code to
672 * not run as designed.
673 *
674 * This hack is only to be used for comparisons and thus can be fully const.
675 * Do not use for assignment.
676 *
677 * If we ever choose to change the ABI of the CIRCLEQ macros, we could fix
678 * this by changing the head/tail sentinal values, but see the note above
679 * this one.
680 */
681 static __inline const void * __launder_type(const void *);
682 static __inline const void *
__launder_type(const void * __x)683 __launder_type(const void *__x)
684 {
685 __asm __volatile("" : "+r" (__x));
686 return __x;
687 }
688
689 #if defined(QUEUEDEBUG)
690 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \
691 if ((head)->cqh_first != CIRCLEQ_ENDC(head) && \
692 (head)->cqh_first->field.cqe_prev != CIRCLEQ_ENDC(head)) \
693 QUEUEDEBUG_ABORT("CIRCLEQ head forw %p %s:%d", (head), \
694 __FILE__, __LINE__); \
695 if ((head)->cqh_last != CIRCLEQ_ENDC(head) && \
696 (head)->cqh_last->field.cqe_next != CIRCLEQ_ENDC(head)) \
697 QUEUEDEBUG_ABORT("CIRCLEQ head back %p %s:%d", (head), \
698 __FILE__, __LINE__);
699 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \
700 if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) { \
701 if ((head)->cqh_last != (elm)) \
702 QUEUEDEBUG_ABORT("CIRCLEQ elm last %p %s:%d", \
703 (elm), __FILE__, __LINE__); \
704 } else { \
705 if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \
706 QUEUEDEBUG_ABORT("CIRCLEQ elm forw %p %s:%d", \
707 (elm), __FILE__, __LINE__); \
708 } \
709 if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) { \
710 if ((head)->cqh_first != (elm)) \
711 QUEUEDEBUG_ABORT("CIRCLEQ elm first %p %s:%d", \
712 (elm), __FILE__, __LINE__); \
713 } else { \
714 if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \
715 QUEUEDEBUG_ABORT("CIRCLEQ elm prev %p %s:%d", \
716 (elm), __FILE__, __LINE__); \
717 }
718 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \
719 (elm)->field.cqe_next = (void *)1L; \
720 (elm)->field.cqe_prev = (void *)1L;
721 #else
722 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
723 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
724 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
725 #endif
726
727 #define CIRCLEQ_HEAD(name, type) \
728 struct name { \
729 struct type *cqh_first; /* first element */ \
730 struct type *cqh_last; /* last element */ \
731 }
732
733 #define CIRCLEQ_HEAD_INITIALIZER(head) \
734 { CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
735
736 #define CIRCLEQ_ENTRY(type) \
737 struct { \
738 struct type *cqe_next; /* next element */ \
739 struct type *cqe_prev; /* previous element */ \
740 }
741
742 /*
743 * Circular queue functions.
744 */
745 #define CIRCLEQ_INIT(head) do { \
746 (head)->cqh_first = CIRCLEQ_END(head); \
747 (head)->cqh_last = CIRCLEQ_END(head); \
748 } while (/*CONSTCOND*/0)
749
750 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
751 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
752 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
753 (elm)->field.cqe_next = (listelm)->field.cqe_next; \
754 (elm)->field.cqe_prev = (listelm); \
755 if ((listelm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
756 (head)->cqh_last = (elm); \
757 else \
758 (listelm)->field.cqe_next->field.cqe_prev = (elm); \
759 (listelm)->field.cqe_next = (elm); \
760 } while (/*CONSTCOND*/0)
761
762 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
763 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
764 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
765 (elm)->field.cqe_next = (listelm); \
766 (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
767 if ((listelm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
768 (head)->cqh_first = (elm); \
769 else \
770 (listelm)->field.cqe_prev->field.cqe_next = (elm); \
771 (listelm)->field.cqe_prev = (elm); \
772 } while (/*CONSTCOND*/0)
773
774 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
775 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
776 (elm)->field.cqe_next = (head)->cqh_first; \
777 (elm)->field.cqe_prev = CIRCLEQ_END(head); \
778 if ((head)->cqh_last == CIRCLEQ_ENDC(head)) \
779 (head)->cqh_last = (elm); \
780 else \
781 (head)->cqh_first->field.cqe_prev = (elm); \
782 (head)->cqh_first = (elm); \
783 } while (/*CONSTCOND*/0)
784
785 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
786 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
787 (elm)->field.cqe_next = CIRCLEQ_END(head); \
788 (elm)->field.cqe_prev = (head)->cqh_last; \
789 if ((head)->cqh_first == CIRCLEQ_ENDC(head)) \
790 (head)->cqh_first = (elm); \
791 else \
792 (head)->cqh_last->field.cqe_next = (elm); \
793 (head)->cqh_last = (elm); \
794 } while (/*CONSTCOND*/0)
795
796 #define CIRCLEQ_REMOVE(head, elm, field) do { \
797 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
798 QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \
799 if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
800 (head)->cqh_last = (elm)->field.cqe_prev; \
801 else \
802 (elm)->field.cqe_next->field.cqe_prev = \
803 (elm)->field.cqe_prev; \
804 if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
805 (head)->cqh_first = (elm)->field.cqe_next; \
806 else \
807 (elm)->field.cqe_prev->field.cqe_next = \
808 (elm)->field.cqe_next; \
809 QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \
810 } while (/*CONSTCOND*/0)
811
812 #define CIRCLEQ_FOREACH(var, head, field) \
813 for ((var) = ((head)->cqh_first); \
814 (var) != CIRCLEQ_ENDC(head); \
815 (var) = ((var)->field.cqe_next))
816
817 #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
818 for ((var) = ((head)->cqh_last); \
819 (var) != CIRCLEQ_ENDC(head); \
820 (var) = ((var)->field.cqe_prev))
821
822 /*
823 * Circular queue access methods.
824 */
825 #define CIRCLEQ_FIRST(head) ((head)->cqh_first)
826 #define CIRCLEQ_LAST(head) ((head)->cqh_last)
827 /* For comparisons */
828 #define CIRCLEQ_ENDC(head) (__launder_type(head))
829 /* For assignments */
830 #define CIRCLEQ_END(head) ((void *)(head))
831 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
832 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
833 #define CIRCLEQ_EMPTY(head) \
834 (CIRCLEQ_FIRST(head) == CIRCLEQ_ENDC(head))
835
836 #define CIRCLEQ_LOOP_NEXT(head, elm, field) \
837 (((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
838 ? ((head)->cqh_first) \
839 : (elm->field.cqe_next))
840 #define CIRCLEQ_LOOP_PREV(head, elm, field) \
841 (((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
842 ? ((head)->cqh_last) \
843 : (elm->field.cqe_prev))
844 #endif /* !_KERNEL */
845
846 #endif /* !_SYS_QUEUE_H_ */
847