aboutsummaryrefslogtreecommitdiffstats
path: root/main/libc-dev
diff options
context:
space:
mode:
authorTimo Teräs <timo.teras@iki.fi>2014-12-11 11:33:05 +0200
committerTimo Teräs <timo.teras@iki.fi>2014-12-12 11:36:49 +0200
commite3725c0af137717d6883265a92db3838900b5cee (patch)
tree2268ab2e8e8481dad26e82be47f82f93276c2558 /main/libc-dev
parente6d8679e165be21b45d24d026d1e9327d4f3ac07 (diff)
downloadaports-e3725c0af137717d6883265a92db3838900b5cee.tar.bz2
aports-e3725c0af137717d6883265a92db3838900b5cee.tar.xz
main/libc-dev: split bsd compatibility headers to separate subpkg
also refresh sys/queue.h and add sys/tree.h
Diffstat (limited to 'main/libc-dev')
-rw-r--r--main/libc-dev/APKBUILD35
-rw-r--r--main/libc-dev/sys-queue.h643
-rw-r--r--main/libc-dev/sys-tree.h761
3 files changed, 1155 insertions, 284 deletions
diff --git a/main/libc-dev/APKBUILD b/main/libc-dev/APKBUILD
index 5d214ad605..0aef1840d8 100644
--- a/main/libc-dev/APKBUILD
+++ b/main/libc-dev/APKBUILD
@@ -1,7 +1,7 @@
# Contributor: Natanael Copa <ncopa@alpinelinux.org>
# Maintainer: Natanael Copa <ncopa@alpinelinux.org>
pkgname=libc-dev
-pkgver=0.6
+pkgver=0.7
pkgrel=0
pkgdesc="Meta package to pull in correct libc"
url="http://alpinelinux.org"
@@ -11,9 +11,10 @@ makedepends_build=" "
makedepends_host=" "
makedepends="$makedepends_build $makedepends_host"
depends="$CLIBC-dev"
-subpackages="libc-utils:utils"
+subpackages="libc-utils:utils bsd-compat-headers:bsdcompat"
source="sys-cdefs.h
sys-queue.h
+ sys-tree.h
"
_builddir="$srcdir"/$pkgname-$pkgver
@@ -29,13 +30,6 @@ build() {
package() {
mkdir -p "$pkgdir"
- case "$CLIBC" in
- musl)
- depends="$depends linux-headers"
- install -D "$srcdir"/sys-cdefs.h "$pkgdir"/usr/include/sys/cdefs.h
- install -D "$srcdir"/sys-queue.h "$pkgdir"/usr/include/sys/queue.h
- ;;
- esac
}
utils() {
@@ -44,9 +38,26 @@ utils() {
mkdir -p "$subpkgdir"
}
+bsdcompat() {
+ pkgdesc="BSD compatibility headers (cdefs, queue, tree)"
+ depends=""
+
+ mkdir -p "$subpkgdir"
+ case "$CLIBC" in
+ musl)
+ install -D "$srcdir"/sys-cdefs.h "$subpkgdir"/usr/include/sys/cdefs.h
+ install -D "$srcdir"/sys-queue.h "$subpkgdir"/usr/include/sys/queue.h
+ install -D "$srcdir"/sys-tree.h "$subpkgdir"/usr/include/sys/tree.h
+ ;;
+ esac
+}
+
md5sums="5b125c4b64aef6d89766f657709cf0e4 sys-cdefs.h
-555be8a6d1523a2272f4a04e4e936d5e sys-queue.h"
+c2784afaed83839546ba733e8a0435c7 sys-queue.h
+8d0bcd3f4b10dfc5c74d5bb4c0d6722b sys-tree.h"
sha256sums="30bb6d7e0e0b61fcd95d830c376c829a614bce4683c1b97e06c201ec2c6e839a sys-cdefs.h
-e4db36f500692efb12a7b97112e0f5c0985f13310a052d8e9f3a4f7648693711 sys-queue.h"
+3659cd137c320991a78413dd370a92fd18e0a8bc36d017d554f08677a37d7d5a sys-queue.h
+e1e498a79bf160a5766fa560f2b07b206fe89fe21a62600c77d72e00a6992f92 sys-tree.h"
sha512sums="8c3fddd73b696a38e633953715c79c47703739be27ee085fc6c960a57b6746ca05bf6406f7e6126cc1a13204254fd5830afb566624e1f298f4d6b58216013c28 sys-cdefs.h
-af87b810fbc4e7ed97b20412025ff5766e503fec163b27c81415dc0377b4f80f39548ebf8b6403e0dc5d35eddf8d08ba2a8dc549b3c8c1bf503c9da7c24b523b sys-queue.h"
+19cec4d39310285eb49ad1beae86884080e0152f9477757513050211b31f95ee7283a64ae95939683df09231cfb021ace816b26981ad05f4fff0a39519f935fe sys-queue.h
+07cb70f2f0ddb31e23dd913c6f561fc9885667c5803fdf3a559676c99d08834b4104589bacb5d17b4a0b379c68c81a1cf3173832b3da33a7b936fa7b93706844 sys-tree.h"
diff --git a/main/libc-dev/sys-queue.h b/main/libc-dev/sys-queue.h
index 8632eb8335..99d01a55b1 100644
--- a/main/libc-dev/sys-queue.h
+++ b/main/libc-dev/sys-queue.h
@@ -1,4 +1,4 @@
-/* $NetBSD: queue.h,v 1.54 2013/04/10 22:22:16 christos Exp $ */
+/* $NetBSD: queue.h,v 1.68 2014/11/19 08:10:01 uebayasi Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -34,8 +34,6 @@
#ifndef _SYS_QUEUE_H_
#define _SYS_QUEUE_H_
-#include <stddef.h>
-
/*
* This file defines five types of data structures: singly-linked lists,
* lists, simple queues, tail queues, and circular queues.
@@ -83,102 +81,24 @@
*/
/*
- * List definitions.
- */
-#define LIST_HEAD(name, type) \
-struct name { \
- struct type *lh_first; /* first element */ \
-}
-
-#define LIST_HEAD_INITIALIZER(head) \
- { NULL }
-
-#define LIST_ENTRY(type) \
-struct { \
- struct type *le_next; /* next element */ \
- struct type **le_prev; /* address of previous next element */ \
-}
-
-/*
- * List functions.
+ * Include the definition of NULL only on NetBSD because sys/null.h
+ * is not available elsewhere. This conditional makes the header
+ * portable and it can simply be dropped verbatim into any system.
+ * The caveat is that on other systems some other header
+ * must provide NULL before the macros can be used.
*/
-#if defined(_KERNEL) && defined(QUEUEDEBUG)
-#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
- if ((head)->lh_first && \
- (head)->lh_first->field.le_prev != &(head)->lh_first) \
- panic("LIST_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
-#define QUEUEDEBUG_LIST_OP(elm, field) \
- if ((elm)->field.le_next && \
- (elm)->field.le_next->field.le_prev != \
- &(elm)->field.le_next) \
- panic("LIST_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
- if (*(elm)->field.le_prev != (elm)) \
- panic("LIST_* back %p %s:%d", (elm), __FILE__, __LINE__);
-#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
- (elm)->field.le_next = (void *)1L; \
- (elm)->field.le_prev = (void *)1L;
-#else
-#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
-#define QUEUEDEBUG_LIST_OP(elm, field)
-#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
+#ifdef __NetBSD__
+#include <sys/null.h>
#endif
-#define LIST_INIT(head) do { \
- (head)->lh_first = LIST_END(head); \
-} while (/*CONSTCOND*/0)
-
-#define LIST_INSERT_AFTER(listelm, elm, field) do { \
- QUEUEDEBUG_LIST_OP((listelm), field) \
- if (((elm)->field.le_next = (listelm)->field.le_next) != \
- LIST_END(head)) \
- (listelm)->field.le_next->field.le_prev = \
- &(elm)->field.le_next; \
- (listelm)->field.le_next = (elm); \
- (elm)->field.le_prev = &(listelm)->field.le_next; \
-} while (/*CONSTCOND*/0)
-
-#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
- QUEUEDEBUG_LIST_OP((listelm), field) \
- (elm)->field.le_prev = (listelm)->field.le_prev; \
- (elm)->field.le_next = (listelm); \
- *(listelm)->field.le_prev = (elm); \
- (listelm)->field.le_prev = &(elm)->field.le_next; \
-} while (/*CONSTCOND*/0)
-
-#define LIST_INSERT_HEAD(head, elm, field) do { \
- QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
- if (((elm)->field.le_next = (head)->lh_first) != LIST_END(head))\
- (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
- (head)->lh_first = (elm); \
- (elm)->field.le_prev = &(head)->lh_first; \
-} while (/*CONSTCOND*/0)
-
-#define LIST_REMOVE(elm, field) do { \
- QUEUEDEBUG_LIST_OP((elm), field) \
- if ((elm)->field.le_next != NULL) \
- (elm)->field.le_next->field.le_prev = \
- (elm)->field.le_prev; \
- *(elm)->field.le_prev = (elm)->field.le_next; \
- QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
-} while (/*CONSTCOND*/0)
-
-#define LIST_FOREACH(var, head, field) \
- for ((var) = ((head)->lh_first); \
- (var); \
- (var) = ((var)->field.le_next))
-
-#define LIST_FOREACH_SAFE(var, head, field, tvar) \
- for ((var) = LIST_FIRST((head)); \
- (var) && ((tvar) = LIST_NEXT((var), field), 1); \
- (var) = (tvar))
-/*
- * List access methods.
- */
-#define LIST_EMPTY(head) ((head)->lh_first == LIST_END(head))
-#define LIST_END(head) NULL
-#define LIST_FIRST(head) ((head)->lh_first)
-#define LIST_NEXT(elm, field) ((elm)->field.le_next)
-
+#if defined(QUEUEDEBUG)
+# if defined(_KERNEL)
+# define QUEUEDEBUG_ABORT(...) panic(__VA_ARGS__)
+# else
+# include <err.h>
+# define QUEUEDEBUG_ABORT(...) err(1, __VA_ARGS__)
+# endif
+#endif
/*
* Singly-linked List definitions.
@@ -197,10 +117,29 @@ struct { \
}
/*
+ * Singly-linked List access methods.
+ */
+#define SLIST_FIRST(head) ((head)->slh_first)
+#define SLIST_END(head) NULL
+#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
+#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+#define SLIST_FOREACH(var, head, field) \
+ for((var) = (head)->slh_first; \
+ (var) != SLIST_END(head); \
+ (var) = (var)->field.sle_next)
+
+#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = SLIST_FIRST((head)); \
+ (var) != SLIST_END(head) && \
+ ((tvar) = SLIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+/*
* Singly-linked List functions.
*/
#define SLIST_INIT(head) do { \
- (head)->slh_first = NULL; \
+ (head)->slh_first = SLIST_END(head); \
} while (/*CONSTCOND*/0)
#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
@@ -213,6 +152,11 @@ struct { \
(head)->slh_first = (elm); \
} while (/*CONSTCOND*/0)
+#define SLIST_REMOVE_AFTER(slistelm, field) do { \
+ (slistelm)->field.sle_next = \
+ SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \
+} while (/*CONSTCOND*/0)
+
#define SLIST_REMOVE_HEAD(head, field) do { \
(head)->slh_first = (head)->slh_first->field.sle_next; \
} while (/*CONSTCOND*/0)
@@ -230,119 +174,125 @@ struct { \
} \
} while (/*CONSTCOND*/0)
-#define SLIST_REMOVE_AFTER(slistelm, field) do { \
- (slistelm)->field.sle_next = \
- SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \
-} while (/*CONSTCOND*/0)
-
-#define SLIST_FOREACH(var, head, field) \
- for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
-
-#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
- for ((var) = SLIST_FIRST((head)); \
- (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
- (var) = (tvar))
-
-/*
- * Singly-linked List access methods.
- */
-#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
-#define SLIST_FIRST(head) ((head)->slh_first)
-#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
-
/*
- * Singly-linked Tail queue declarations.
+ * List definitions.
*/
-#define STAILQ_HEAD(name, type) \
+#define LIST_HEAD(name, type) \
struct name { \
- struct type *stqh_first; /* first element */ \
- struct type **stqh_last; /* addr of last next element */ \
+ struct type *lh_first; /* first element */ \
}
-#define STAILQ_HEAD_INITIALIZER(head) \
- { NULL, &(head).stqh_first }
+#define LIST_HEAD_INITIALIZER(head) \
+ { NULL }
-#define STAILQ_ENTRY(type) \
+#define LIST_ENTRY(type) \
struct { \
- struct type *stqe_next; /* next element */ \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
}
/*
- * Singly-linked Tail queue functions.
+ * List access methods.
*/
-#define STAILQ_INIT(head) do { \
- (head)->stqh_first = NULL; \
- (head)->stqh_last = &(head)->stqh_first; \
-} while (/*CONSTCOND*/0)
+#define LIST_FIRST(head) ((head)->lh_first)
+#define LIST_END(head) NULL
+#define LIST_EMPTY(head) ((head)->lh_first == LIST_END(head))
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
-#define STAILQ_INSERT_HEAD(head, elm, field) do { \
- if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
- (head)->stqh_last = &(elm)->field.stqe_next; \
- (head)->stqh_first = (elm); \
-} while (/*CONSTCOND*/0)
+#define LIST_FOREACH(var, head, field) \
+ for ((var) = ((head)->lh_first); \
+ (var) != LIST_END(head); \
+ (var) = ((var)->field.le_next))
-#define STAILQ_INSERT_TAIL(head, elm, field) do { \
- (elm)->field.stqe_next = NULL; \
- *(head)->stqh_last = (elm); \
- (head)->stqh_last = &(elm)->field.stqe_next; \
-} while (/*CONSTCOND*/0)
+#define LIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = LIST_FIRST((head)); \
+ (var) != LIST_END(head) && \
+ ((tvar) = LIST_NEXT((var), field), 1); \
+ (var) = (tvar))
-#define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
- if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
- (head)->stqh_last = &(elm)->field.stqe_next; \
- (listelm)->field.stqe_next = (elm); \
+#define LIST_MOVE(head1, head2) do { \
+ LIST_INIT((head2)); \
+ if (!LIST_EMPTY((head1))) { \
+ (head2)->lh_first = (head1)->lh_first; \
+ LIST_INIT((head1)); \
+ } \
} while (/*CONSTCOND*/0)
-#define STAILQ_REMOVE_HEAD(head, field) do { \
- if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
- (head)->stqh_last = &(head)->stqh_first; \
-} while (/*CONSTCOND*/0)
+/*
+ * List functions.
+ */
+#if defined(QUEUEDEBUG)
+#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
+ if ((head)->lh_first && \
+ (head)->lh_first->field.le_prev != &(head)->lh_first) \
+ QUEUEDEBUG_ABORT("LIST_INSERT_HEAD %p %s:%d", (head), \
+ __FILE__, __LINE__);
+#define QUEUEDEBUG_LIST_OP(elm, field) \
+ if ((elm)->field.le_next && \
+ (elm)->field.le_next->field.le_prev != \
+ &(elm)->field.le_next) \
+ QUEUEDEBUG_ABORT("LIST_* forw %p %s:%d", (elm), \
+ __FILE__, __LINE__); \
+ if (*(elm)->field.le_prev != (elm)) \
+ QUEUEDEBUG_ABORT("LIST_* back %p %s:%d", (elm), \
+ __FILE__, __LINE__);
+#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
+ (elm)->field.le_next = (void *)1L; \
+ (elm)->field.le_prev = (void *)1L;
+#else
+#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
+#define QUEUEDEBUG_LIST_OP(elm, field)
+#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
+#endif
-#define STAILQ_REMOVE(head, elm, type, field) do { \
- if ((head)->stqh_first == (elm)) { \
- STAILQ_REMOVE_HEAD((head), field); \
- } else { \
- struct type *curelm = (head)->stqh_first; \
- while (curelm->field.stqe_next != (elm)) \
- curelm = curelm->field.stqe_next; \
- if ((curelm->field.stqe_next = \
- curelm->field.stqe_next->field.stqe_next) == NULL) \
- (head)->stqh_last = &(curelm)->field.stqe_next; \
- } \
+#define LIST_INIT(head) do { \
+ (head)->lh_first = LIST_END(head); \
} while (/*CONSTCOND*/0)
-#define STAILQ_FOREACH(var, head, field) \
- for ((var) = ((head)->stqh_first); \
- (var); \
- (var) = ((var)->field.stqe_next))
-
-#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
- for ((var) = STAILQ_FIRST((head)); \
- (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
- (var) = (tvar))
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ QUEUEDEBUG_LIST_OP((listelm), field) \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != \
+ LIST_END(head)) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (/*CONSTCOND*/0)
-#define STAILQ_CONCAT(head1, head2) do { \
- if (!STAILQ_EMPTY((head2))) { \
- *(head1)->stqh_last = (head2)->stqh_first; \
- (head1)->stqh_last = (head2)->stqh_last; \
- STAILQ_INIT((head2)); \
- } \
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ QUEUEDEBUG_LIST_OP((listelm), field) \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
} while (/*CONSTCOND*/0)
-#define STAILQ_LAST(head, type, field) \
- (STAILQ_EMPTY((head)) ? \
- NULL : \
- ((struct type *)(void *) \
- ((char *)((head)->stqh_last) - offsetof(struct type, field))))
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
+ if (((elm)->field.le_next = (head)->lh_first) != LIST_END(head))\
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (/*CONSTCOND*/0)
-/*
- * Singly-linked Tail queue access methods.
- */
-#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
-#define STAILQ_FIRST(head) ((head)->stqh_first)
-#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
+#define LIST_REMOVE(elm, field) do { \
+ QUEUEDEBUG_LIST_OP((elm), field) \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+ QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
+} while (/*CONSTCOND*/0)
+#define LIST_REPLACE(elm, elm2, field) do { \
+ if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
+ (elm2)->field.le_next->field.le_prev = \
+ &(elm2)->field.le_next; \
+ (elm2)->field.le_prev = (elm)->field.le_prev; \
+ *(elm2)->field.le_prev = (elm2); \
+ QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
+} while (/*CONSTCOND*/0)
/*
* Simple queue definitions.
@@ -362,6 +312,25 @@ struct { \
}
/*
+ * Simple queue access methods.
+ */
+#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
+#define SIMPLEQ_END(head) NULL
+#define SIMPLEQ_EMPTY(head) ((head)->sqh_first == SIMPLEQ_END(head))
+#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
+
+#define SIMPLEQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->sqh_first); \
+ (var) != SIMPLEQ_END(head); \
+ (var) = ((var)->field.sqe_next))
+
+#define SIMPLEQ_FOREACH_SAFE(var, head, field, next) \
+ for ((var) = ((head)->sqh_first); \
+ (var) != SIMPLEQ_END(head) && \
+ ((next = ((var)->field.sqe_next)), 1); \
+ (var) = (next))
+
+/*
* Simple queue functions.
*/
#define SIMPLEQ_INIT(head) do { \
@@ -392,6 +361,12 @@ struct { \
(head)->sqh_last = &(head)->sqh_first; \
} while (/*CONSTCOND*/0)
+#define SIMPLEQ_REMOVE_AFTER(head, elm, field) do { \
+ if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \
+ == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+} while (/*CONSTCOND*/0)
+
#define SIMPLEQ_REMOVE(head, elm, type, field) do { \
if ((head)->sqh_first == (elm)) { \
SIMPLEQ_REMOVE_HEAD((head), field); \
@@ -405,16 +380,6 @@ struct { \
} \
} while (/*CONSTCOND*/0)
-#define SIMPLEQ_FOREACH(var, head, field) \
- for ((var) = ((head)->sqh_first); \
- (var); \
- (var) = ((var)->field.sqe_next))
-
-#define SIMPLEQ_FOREACH_SAFE(var, head, field, next) \
- for ((var) = ((head)->sqh_first); \
- (var) && ((next = ((var)->field.sqe_next)), 1); \
- (var) = (next))
-
#define SIMPLEQ_CONCAT(head1, head2) do { \
if (!SIMPLEQ_EMPTY((head2))) { \
*(head1)->sqh_last = (head2)->sqh_first; \
@@ -430,14 +395,6 @@ struct { \
((char *)((head)->sqh_last) - offsetof(struct type, field))))
/*
- * Simple queue access methods.
- */
-#define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
-#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
-#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
-
-
-/*
* Tail queue definitions.
*/
#define _TAILQ_HEAD(name, type, qual) \
@@ -458,28 +415,65 @@ struct { \
#define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,)
/*
+ * Tail queue access methods.
+ */
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+#define TAILQ_END(head) (NULL)
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+#define TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define TAILQ_EMPTY(head) (TAILQ_FIRST(head) == TAILQ_END(head))
+
+
+#define TAILQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->tqh_first); \
+ (var) != TAILQ_END(head); \
+ (var) = ((var)->field.tqe_next))
+
+#define TAILQ_FOREACH_SAFE(var, head, field, next) \
+ for ((var) = ((head)->tqh_first); \
+ (var) != TAILQ_END(head) && \
+ ((next) = TAILQ_NEXT(var, field), 1); (var) = (next))
+
+#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\
+ (var) != TAILQ_END(head); \
+ (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
+
+#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \
+ for ((var) = TAILQ_LAST((head), headname); \
+ (var) != TAILQ_END(head) && \
+ ((prev) = TAILQ_PREV((var), headname, field), 1); (var) = (prev))
+
+/*
* Tail queue functions.
*/
-#if defined(_KERNEL) && defined(QUEUEDEBUG)
+#if defined(QUEUEDEBUG)
#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
if ((head)->tqh_first && \
(head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
- panic("TAILQ_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
+ QUEUEDEBUG_ABORT("TAILQ_INSERT_HEAD %p %s:%d", (head), \
+ __FILE__, __LINE__);
#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
if (*(head)->tqh_last != NULL) \
- panic("TAILQ_INSERT_TAIL %p %s:%d", (head), __FILE__, __LINE__);
+ QUEUEDEBUG_ABORT("TAILQ_INSERT_TAIL %p %s:%d", (head), \
+ __FILE__, __LINE__);
#define QUEUEDEBUG_TAILQ_OP(elm, field) \
if ((elm)->field.tqe_next && \
(elm)->field.tqe_next->field.tqe_prev != \
&(elm)->field.tqe_next) \
- panic("TAILQ_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
+ QUEUEDEBUG_ABORT("TAILQ_* forw %p %s:%d", (elm), \
+ __FILE__, __LINE__); \
if (*(elm)->field.tqe_prev != (elm)) \
- panic("TAILQ_* back %p %s:%d", (elm), __FILE__, __LINE__);
+ QUEUEDEBUG_ABORT("TAILQ_* back %p %s:%d", (elm), \
+ __FILE__, __LINE__);
#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \
if ((elm)->field.tqe_next == NULL && \
(head)->tqh_last != &(elm)->field.tqe_next) \
- panic("TAILQ_PREREMOVE head %p elm %p %s:%d", \
- (head), (elm), __FILE__, __LINE__);
+ QUEUEDEBUG_ABORT("TAILQ_PREREMOVE head %p elm %p %s:%d",\
+ (head), (elm), __FILE__, __LINE__);
#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \
(elm)->field.tqe_next = (void *)1L; \
(elm)->field.tqe_prev = (void *)1L;
@@ -547,25 +541,17 @@ struct { \
QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
} while (/*CONSTCOND*/0)
-#define TAILQ_FOREACH(var, head, field) \
- for ((var) = ((head)->tqh_first); \
- (var) != TAILQ_END(head); \
- (var) = ((var)->field.tqe_next))
-
-#define TAILQ_FOREACH_SAFE(var, head, field, next) \
- for ((var) = ((head)->tqh_first); \
- (var) != TAILQ_END(head) && \
- ((next) = TAILQ_NEXT(var, field), 1); (var) = (next))
-
-#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
- for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\
- (var) != TAILQ_END(head); \
- (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
-
-#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \
- for ((var) = TAILQ_LAST((head), headname); \
- (var) != TAILQ_END(head) && \
- ((prev) = TAILQ_PREV((var), headname, field), 1); (var) = (prev))
+#define TAILQ_REPLACE(head, elm, elm2, field) do { \
+ if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != \
+ TAILQ_END(head)) \
+ (elm2)->field.tqe_next->field.tqe_prev = \
+ &(elm2)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm2)->field.tqe_next; \
+ (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
+ *(elm2)->field.tqe_prev = (elm2); \
+ QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
+} while (/*CONSTCOND*/0)
#define TAILQ_CONCAT(head1, head2, field) do { \
if (!TAILQ_EMPTY(head2)) { \
@@ -577,50 +563,157 @@ struct { \
} while (/*CONSTCOND*/0)
/*
- * Tail queue access methods.
+ * Singly-linked Tail queue declarations.
*/
-#define TAILQ_EMPTY(head) ((head)->tqh_first == TAILQ_END(head))
-#define TAILQ_FIRST(head) ((head)->tqh_first)
-#define TAILQ_END(head) (NULL)
-#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#define STAILQ_HEAD(name, type) \
+struct name { \
+ struct type *stqh_first; /* first element */ \
+ struct type **stqh_last; /* addr of last next element */ \
+}
-#define TAILQ_LAST(head, headname) \
- (*(((struct headname *)((head)->tqh_last))->tqh_last))
-#define TAILQ_PREV(elm, headname, field) \
- (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define STAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).stqh_first }
+
+#define STAILQ_ENTRY(type) \
+struct { \
+ struct type *stqe_next; /* next element */ \
+}
+
+/*
+ * Singly-linked Tail queue access methods.
+ */
+#define STAILQ_FIRST(head) ((head)->stqh_first)
+#define STAILQ_END(head) NULL
+#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
+#define STAILQ_EMPTY(head) (STAILQ_FIRST(head) == STAILQ_END(head))
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define STAILQ_INIT(head) do { \
+ (head)->stqh_first = NULL; \
+ (head)->stqh_last = &(head)->stqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
+ (head)->stqh_last = &(elm)->field.stqe_next; \
+ (head)->stqh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.stqe_next = NULL; \
+ *(head)->stqh_last = (elm); \
+ (head)->stqh_last = &(elm)->field.stqe_next; \
+} while (/*CONSTCOND*/0)
+#define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
+ (head)->stqh_last = &(elm)->field.stqe_next; \
+ (listelm)->field.stqe_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_REMOVE_HEAD(head, field) do { \
+ if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
+ (head)->stqh_last = &(head)->stqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_REMOVE(head, elm, type, field) do { \
+ if ((head)->stqh_first == (elm)) { \
+ STAILQ_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->stqh_first; \
+ while (curelm->field.stqe_next != (elm)) \
+ curelm = curelm->field.stqe_next; \
+ if ((curelm->field.stqe_next = \
+ curelm->field.stqe_next->field.stqe_next) == NULL) \
+ (head)->stqh_last = &(curelm)->field.stqe_next; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->stqh_first); \
+ (var); \
+ (var) = ((var)->field.stqe_next))
+#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = STAILQ_FIRST((head)); \
+ (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define STAILQ_CONCAT(head1, head2) do { \
+ if (!STAILQ_EMPTY((head2))) { \
+ *(head1)->stqh_last = (head2)->stqh_first; \
+ (head1)->stqh_last = (head2)->stqh_last; \
+ STAILQ_INIT((head2)); \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_LAST(head, type, field) \
+ (STAILQ_EMPTY((head)) ? \
+ NULL : \
+ ((struct type *)(void *) \
+ ((char *)((head)->stqh_last) - offsetof(struct type, field))))
+
+
+#ifndef _KERNEL
/*
- * Circular queue definitions.
+ * Circular queue definitions. Do not use. We still keep the macros
+ * for compatibility but because of pointer aliasing issues their use
+ * is discouraged!
*/
-#if defined(_KERNEL) && defined(QUEUEDEBUG)
+
+/*
+ * __launder_type(): We use this ugly hack to work around the the compiler
+ * noticing that two types may not alias each other and elide tests in code.
+ * We hit this in the CIRCLEQ macros when comparing 'struct name *' and
+ * 'struct type *' (see CIRCLEQ_HEAD()). Modern compilers (such as GCC
+ * 4.8) declare these comparisons as always false, causing the code to
+ * not run as designed.
+ *
+ * This hack is only to be used for comparisons and thus can be fully const.
+ * Do not use for assignment.
+ *
+ * If we ever choose to change the ABI of the CIRCLEQ macros, we could fix
+ * this by changing the head/tail sentinal values, but see the note above
+ * this one.
+ */
+static __inline const void * __launder_type(const void *);
+static __inline const void *
+__launder_type(const void *__x)
+{
+ __asm __volatile("" : "+r" (__x));
+ return __x;
+}
+
+#if defined(QUEUEDEBUG)
#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \
- if ((head)->cqh_first != (void *)(head) && \
- (head)->cqh_first->field.cqe_prev != (void *)(head)) \
- panic("CIRCLEQ head forw %p %s:%d", (head), \
+ if ((head)->cqh_first != CIRCLEQ_ENDC(head) && \
+ (head)->cqh_first->field.cqe_prev != CIRCLEQ_ENDC(head)) \
+ QUEUEDEBUG_ABORT("CIRCLEQ head forw %p %s:%d", (head), \
__FILE__, __LINE__); \
- if ((head)->cqh_last != (void *)(head) && \
- (head)->cqh_last->field.cqe_next != (void *)(head)) \
- panic("CIRCLEQ head back %p %s:%d", (head), \
+ if ((head)->cqh_last != CIRCLEQ_ENDC(head) && \
+ (head)->cqh_last->field.cqe_next != CIRCLEQ_ENDC(head)) \
+ QUEUEDEBUG_ABORT("CIRCLEQ head back %p %s:%d", (head), \
__FILE__, __LINE__);
#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \
- if ((elm)->field.cqe_next == (void *)(head)) { \
+ if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) { \
if ((head)->cqh_last != (elm)) \
- panic("CIRCLEQ elm last %p %s:%d", (elm), \
- __FILE__, __LINE__); \
+ QUEUEDEBUG_ABORT("CIRCLEQ elm last %p %s:%d", \
+ (elm), __FILE__, __LINE__); \
} else { \
if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \
- panic("CIRCLEQ elm forw %p %s:%d", (elm), \
- __FILE__, __LINE__); \
+ QUEUEDEBUG_ABORT("CIRCLEQ elm forw %p %s:%d", \
+ (elm), __FILE__, __LINE__); \
} \
- if ((elm)->field.cqe_prev == (void *)(head)) { \
+ if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) { \
if ((head)->cqh_first != (elm)) \
- panic("CIRCLEQ elm first %p %s:%d", (elm), \
- __FILE__, __LINE__); \
+ QUEUEDEBUG_ABORT("CIRCLEQ elm first %p %s:%d", \
+ (elm), __FILE__, __LINE__); \
} else { \
if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \
- panic("CIRCLEQ elm prev %p %s:%d", (elm), \
- __FILE__, __LINE__); \
+ QUEUEDEBUG_ABORT("CIRCLEQ elm prev %p %s:%d", \
+ (elm), __FILE__, __LINE__); \
}
#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \
(elm)->field.cqe_next = (void *)1L; \
@@ -638,7 +731,7 @@ struct name { \
}
#define CIRCLEQ_HEAD_INITIALIZER(head) \
- { (void *)&head, (void *)&head }
+ { CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
#define CIRCLEQ_ENTRY(type) \
struct { \
@@ -650,8 +743,8 @@ struct { \
* Circular queue functions.
*/
#define CIRCLEQ_INIT(head) do { \
- (head)->cqh_first = (void *)(head); \
- (head)->cqh_last = (void *)(head); \
+ (head)->cqh_first = CIRCLEQ_END(head); \
+ (head)->cqh_last = CIRCLEQ_END(head); \
} while (/*CONSTCOND*/0)
#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
@@ -659,7 +752,7 @@ struct { \
QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
(elm)->field.cqe_next = (listelm)->field.cqe_next; \
(elm)->field.cqe_prev = (listelm); \
- if ((listelm)->field.cqe_next == (void *)(head)) \
+ if ((listelm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
(head)->cqh_last = (elm); \
else \
(listelm)->field.cqe_next->field.cqe_prev = (elm); \
@@ -671,7 +764,7 @@ struct { \
QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
(elm)->field.cqe_next = (listelm); \
(elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
- if ((listelm)->field.cqe_prev == (void *)(head)) \
+ if ((listelm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
(head)->cqh_first = (elm); \
else \
(listelm)->field.cqe_prev->field.cqe_next = (elm); \
@@ -681,8 +774,8 @@ struct { \
#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
(elm)->field.cqe_next = (head)->cqh_first; \
- (elm)->field.cqe_prev = (void *)(head); \
- if ((head)->cqh_last == (void *)(head)) \
+ (elm)->field.cqe_prev = CIRCLEQ_END(head); \
+ if ((head)->cqh_last == CIRCLEQ_ENDC(head)) \
(head)->cqh_last = (elm); \
else \
(head)->cqh_first->field.cqe_prev = (elm); \
@@ -691,9 +784,9 @@ struct { \
#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
- (elm)->field.cqe_next = (void *)(head); \
+ (elm)->field.cqe_next = CIRCLEQ_END(head); \
(elm)->field.cqe_prev = (head)->cqh_last; \
- if ((head)->cqh_first == (void *)(head)) \
+ if ((head)->cqh_first == CIRCLEQ_ENDC(head)) \
(head)->cqh_first = (elm); \
else \
(head)->cqh_last->field.cqe_next = (elm); \
@@ -703,12 +796,12 @@ struct { \
#define CIRCLEQ_REMOVE(head, elm, field) do { \
QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \
- if ((elm)->field.cqe_next == (void *)(head)) \
+ if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
(head)->cqh_last = (elm)->field.cqe_prev; \
else \
(elm)->field.cqe_next->field.cqe_prev = \
(elm)->field.cqe_prev; \
- if ((elm)->field.cqe_prev == (void *)(head)) \
+ if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
(head)->cqh_first = (elm)->field.cqe_next; \
else \
(elm)->field.cqe_prev->field.cqe_next = \
@@ -718,30 +811,36 @@ struct { \
#define CIRCLEQ_FOREACH(var, head, field) \
for ((var) = ((head)->cqh_first); \
- (var) != (const void *)(head); \
+ (var) != CIRCLEQ_ENDC(head); \
(var) = ((var)->field.cqe_next))
#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
for ((var) = ((head)->cqh_last); \
- (var) != (const void *)(head); \
+ (var) != CIRCLEQ_ENDC(head); \
(var) = ((var)->field.cqe_prev))
/*
* Circular queue access methods.
*/
-#define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
#define CIRCLEQ_LAST(head) ((head)->cqh_last)
+/* For comparisons */
+#define CIRCLEQ_ENDC(head) (__launder_type(head))
+/* For assignments */
+#define CIRCLEQ_END(head) ((void *)(head))
#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
+#define CIRCLEQ_EMPTY(head) \
+ (CIRCLEQ_FIRST(head) == CIRCLEQ_ENDC(head))
#define CIRCLEQ_LOOP_NEXT(head, elm, field) \
- (((elm)->field.cqe_next == (void *)(head)) \
+ (((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
? ((head)->cqh_first) \
: (elm->field.cqe_next))
#define CIRCLEQ_LOOP_PREV(head, elm, field) \
- (((elm)->field.cqe_prev == (void *)(head)) \
+ (((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
? ((head)->cqh_last) \
: (elm->field.cqe_prev))
+#endif /* !_KERNEL */
#endif /* !_SYS_QUEUE_H_ */
diff --git a/main/libc-dev/sys-tree.h b/main/libc-dev/sys-tree.h
new file mode 100644
index 0000000000..eaea56aae3
--- /dev/null
+++ b/main/libc-dev/sys-tree.h
@@ -0,0 +1,761 @@
+/* $NetBSD: tree.h,v 1.20 2013/09/14 13:20:45 joerg Exp $ */
+/* $OpenBSD: tree.h,v 1.13 2011/07/09 00:19:45 pirofti Exp $ */
+/*
+ * Copyright 2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SYS_TREE_H_
+#define _SYS_TREE_H_
+
+/*
+ * This file defines data structures for different types of trees:
+ * splay trees and red-black trees.
+ *
+ * A splay tree is a self-organizing data structure. Every operation
+ * on the tree causes a splay to happen. The splay moves the requested
+ * node to the root of the tree and partly rebalances it.
+ *
+ * This has the benefit that request locality causes faster lookups as
+ * the requested nodes move to the top of the tree. On the other hand,
+ * every lookup causes memory writes.
+ *
+ * The Balance Theorem bounds the total access time for m operations
+ * and n inserts on an initially empty tree as O((m + n)lg n). The
+ * amortized cost for a sequence of m accesses to a splay tree is O(lg n);
+ *
+ * A red-black tree is a binary search tree with the node color as an
+ * extra attribute. It fulfills a set of conditions:
+ * - every search path from the root to a leaf consists of the
+ * same number of black nodes,
+ * - each red node (except for the root) has a black parent,
+ * - each leaf node is black.
+ *
+ * Every operation on a red-black tree is bounded as O(lg n).
+ * The maximum height of a red-black tree is 2lg (n+1).
+ */
+
+#define SPLAY_HEAD(name, type) \
+struct name { \
+ struct type *sph_root; /* root of the tree */ \
+}
+
+#define SPLAY_INITIALIZER(root) \
+ { NULL }
+
+#define SPLAY_INIT(root) do { \
+ (root)->sph_root = NULL; \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ENTRY(type) \
+struct { \
+ struct type *spe_left; /* left element */ \
+ struct type *spe_right; /* right element */ \
+}
+
+#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
+#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
+#define SPLAY_ROOT(head) (head)->sph_root
+#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
+
+/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
+#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_LINKLEFT(head, tmp, field) do { \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_LINKRIGHT(head, tmp, field) do { \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
+ SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
+ SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
+} while (/*CONSTCOND*/ 0)
+
+/* Generates prototypes and inline functions */
+
+#define SPLAY_PROTOTYPE(name, type, field, cmp) \
+void name##_SPLAY(struct name *, struct type *); \
+void name##_SPLAY_MINMAX(struct name *, int); \
+struct type *name##_SPLAY_INSERT(struct name *, struct type *); \
+struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \
+ \
+/* Finds the node with the same key as elm */ \
+static __inline struct type * \
+name##_SPLAY_FIND(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) \
+ return(NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) \
+ return (head->sph_root); \
+ return (NULL); \
+} \
+ \
+static __inline __unused struct type * \
+name##_SPLAY_NEXT(struct name *head, struct type *elm) \
+{ \
+ name##_SPLAY(head, elm); \
+ if (SPLAY_RIGHT(elm, field) != NULL) { \
+ elm = SPLAY_RIGHT(elm, field); \
+ while (SPLAY_LEFT(elm, field) != NULL) { \
+ elm = SPLAY_LEFT(elm, field); \
+ } \
+ } else \
+ elm = NULL; \
+ return (elm); \
+} \
+ \
+static __unused __inline struct type * \
+name##_SPLAY_MIN_MAX(struct name *head, int val) \
+{ \
+ name##_SPLAY_MINMAX(head, val); \
+ return (SPLAY_ROOT(head)); \
+}
+
+/* Main splay operation.
+ * Moves node close to the key of elm to top
+ */
+#define SPLAY_GENERATE(name, type, field, cmp) \
+struct type * \
+name##_SPLAY_INSERT(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) { \
+ SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
+ } else { \
+ int __comp; \
+ name##_SPLAY(head, elm); \
+ __comp = (cmp)(elm, (head)->sph_root); \
+ if(__comp < 0) { \
+ SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\
+ SPLAY_RIGHT(elm, field) = (head)->sph_root; \
+ SPLAY_LEFT((head)->sph_root, field) = NULL; \
+ } else if (__comp > 0) { \
+ SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\
+ SPLAY_LEFT(elm, field) = (head)->sph_root; \
+ SPLAY_RIGHT((head)->sph_root, field) = NULL; \
+ } else \
+ return ((head)->sph_root); \
+ } \
+ (head)->sph_root = (elm); \
+ return (NULL); \
+} \
+ \
+struct type * \
+name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *__tmp; \
+ if (SPLAY_EMPTY(head)) \
+ return (NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) { \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\
+ } else { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\
+ name##_SPLAY(head, elm); \
+ SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
+ } \
+ return (elm); \
+ } \
+ return (NULL); \
+} \
+ \
+void \
+name##_SPLAY(struct name *head, struct type *elm) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+ int __comp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) > 0){ \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+} \
+ \
+/* Splay with either the minimum or the maximum element \
+ * Used to find minimum or maximum element in tree. \
+ */ \
+void name##_SPLAY_MINMAX(struct name *head, int __comp) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while (1) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp > 0) { \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+}
+
+#define SPLAY_NEGINF -1
+#define SPLAY_INF 1
+
+#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
+#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
+#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
+#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
+#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
+#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
+
+#define SPLAY_FOREACH(x, name, head) \
+ for ((x) = SPLAY_MIN(name, head); \
+ (x) != NULL; \
+ (x) = SPLAY_NEXT(name, head, x))
+
+/* Macros that define a red-black tree */
+#define RB_HEAD(name, type) \
+struct name { \
+ struct type *rbh_root; /* root of the tree */ \
+}
+
+#define RB_INITIALIZER(root) \
+ { NULL }
+
+#define RB_INIT(root) do { \
+ (root)->rbh_root = NULL; \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_BLACK 0
+#define RB_RED 1
+#define RB_ENTRY(type) \
+struct { \
+ struct type *rbe_left; /* left element */ \
+ struct type *rbe_right; /* right element */ \
+ struct type *rbe_parent; /* parent element */ \
+ int rbe_color; /* node color */ \
+}
+
+#define RB_LEFT(elm, field) (elm)->field.rbe_left
+#define RB_RIGHT(elm, field) (elm)->field.rbe_right
+#define RB_PARENT(elm, field) (elm)->field.rbe_parent
+#define RB_COLOR(elm, field) (elm)->field.rbe_color
+#define RB_ROOT(head) (head)->rbh_root
+#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
+
+#define RB_SET(elm, parent, field) do { \
+ RB_PARENT(elm, field) = parent; \
+ RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
+ RB_COLOR(elm, field) = RB_RED; \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_SET_BLACKRED(black, red, field) do { \
+ RB_COLOR(black, field) = RB_BLACK; \
+ RB_COLOR(red, field) = RB_RED; \
+} while (/*CONSTCOND*/ 0)
+
+#ifndef RB_AUGMENT
+#define RB_AUGMENT(x) do {} while (/*CONSTCOND*/ 0)
+#endif
+
+#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
+ (tmp) = RB_RIGHT(elm, field); \
+ if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \
+ RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_LEFT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
+ (tmp) = RB_LEFT(elm, field); \
+ if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \
+ RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_RIGHT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (/*CONSTCOND*/ 0)
+
+/* Generates prototypes and inline functions */
+#define RB_PROTOTYPE(name, type, field, cmp) \
+ RB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
+#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \
+ RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static)
+#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
+attr void name##_RB_INSERT_COLOR(struct name *, struct type *); \
+attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
+attr struct type *name##_RB_REMOVE(struct name *, struct type *); \
+attr struct type *name##_RB_INSERT(struct name *, struct type *); \
+attr struct type *name##_RB_FIND(struct name *, struct type *); \
+attr struct type *name##_RB_NFIND(struct name *, struct type *); \
+attr struct type *name##_RB_NEXT(struct type *); \
+attr struct type *name##_RB_PREV(struct type *); \
+attr struct type *name##_RB_MINMAX(struct name *, int); \
+ \
+
+/* Main rb operation.
+ * Moves node close to the key of elm to top
+ */
+#define RB_GENERATE(name, type, field, cmp) \
+ RB_GENERATE_INTERNAL(name, type, field, cmp,)
+#define RB_GENERATE_STATIC(name, type, field, cmp) \
+ RB_GENERATE_INTERNAL(name, type, field, cmp, __unused static)
+#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \
+attr void \
+name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
+{ \
+ struct type *parent, *gparent, *tmp; \
+ while ((parent = RB_PARENT(elm, field)) != NULL && \
+ RB_COLOR(parent, field) == RB_RED) { \
+ gparent = RB_PARENT(parent, field); \
+ if (parent == RB_LEFT(gparent, field)) { \
+ tmp = RB_RIGHT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_RIGHT(parent, field) == elm) { \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_RIGHT(head, gparent, tmp, field); \
+ } else { \
+ tmp = RB_LEFT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_LEFT(parent, field) == elm) { \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_LEFT(head, gparent, tmp, field); \
+ } \
+ } \
+ RB_COLOR(head->rbh_root, field) = RB_BLACK; \
+} \
+ \
+attr void \
+name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \
+{ \
+ struct type *tmp; \
+ while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
+ elm != RB_ROOT(head)) { \
+ if (RB_LEFT(parent, field) == elm) { \
+ tmp = RB_RIGHT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\
+ struct type *oleft; \
+ if ((oleft = RB_LEFT(tmp, field)) \
+ != NULL) \
+ RB_COLOR(oleft, field) = RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_RIGHT(head, tmp, oleft, field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_RIGHT(tmp, field)) \
+ RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } else { \
+ tmp = RB_LEFT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\
+ struct type *oright; \
+ if ((oright = RB_RIGHT(tmp, field)) \
+ != NULL) \
+ RB_COLOR(oright, field) = RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_LEFT(head, tmp, oright, field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_LEFT(tmp, field)) \
+ RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } \
+ } \
+ if (elm) \
+ RB_COLOR(elm, field) = RB_BLACK; \
+} \
+ \
+attr struct type * \
+name##_RB_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *child, *parent, *old = elm; \
+ int color; \
+ if (RB_LEFT(elm, field) == NULL) \
+ child = RB_RIGHT(elm, field); \
+ else if (RB_RIGHT(elm, field) == NULL) \
+ child = RB_LEFT(elm, field); \
+ else { \
+ struct type *left; \
+ elm = RB_RIGHT(elm, field); \
+ while ((left = RB_LEFT(elm, field)) != NULL) \
+ elm = left; \
+ child = RB_RIGHT(elm, field); \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+ if (RB_PARENT(elm, field) == old) \
+ parent = elm; \
+ (elm)->field = (old)->field; \
+ if (RB_PARENT(old, field)) { \
+ if (RB_LEFT(RB_PARENT(old, field), field) == old)\
+ RB_LEFT(RB_PARENT(old, field), field) = elm;\
+ else \
+ RB_RIGHT(RB_PARENT(old, field), field) = elm;\
+ RB_AUGMENT(RB_PARENT(old, field)); \
+ } else \
+ RB_ROOT(head) = elm; \
+ RB_PARENT(RB_LEFT(old, field), field) = elm; \
+ if (RB_RIGHT(old, field)) \
+ RB_PARENT(RB_RIGHT(old, field), field) = elm; \
+ if (parent) { \
+ left = parent; \
+ do { \
+ RB_AUGMENT(left); \
+ } while ((left = RB_PARENT(left, field)) != NULL); \
+ } \
+ goto color; \
+ } \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+color: \
+ if (color == RB_BLACK) \
+ name##_RB_REMOVE_COLOR(head, parent, child); \
+ return (old); \
+} \
+ \
+/* Inserts a node into the RB tree */ \
+attr struct type * \
+name##_RB_INSERT(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp; \
+ struct type *parent = NULL; \
+ int comp = 0; \
+ tmp = RB_ROOT(head); \
+ while (tmp) { \
+ parent = tmp; \
+ comp = (cmp)(elm, parent); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ RB_SET(elm, parent, field); \
+ if (parent != NULL) { \
+ if (comp < 0) \
+ RB_LEFT(parent, field) = elm; \
+ else \
+ RB_RIGHT(parent, field) = elm; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = elm; \
+ name##_RB_INSERT_COLOR(head, elm); \
+ return (NULL); \
+} \
+ \
+/* Finds the node with the same key as elm */ \
+attr struct type * \
+name##_RB_FIND(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ int comp; \
+ while (tmp) { \
+ comp = cmp(elm, tmp); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ return (NULL); \
+} \
+ \
+/* Finds the first node greater than or equal to the search key */ \
+attr struct type * \
+name##_RB_NFIND(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ struct type *res = NULL; \
+ int comp; \
+ while (tmp) { \
+ comp = cmp(elm, tmp); \
+ if (comp < 0) { \
+ res = tmp; \
+ tmp = RB_LEFT(tmp, field); \
+ } \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ return (res); \
+} \
+ \
+/* ARGSUSED */ \
+attr struct type * \
+name##_RB_NEXT(struct type *elm) \
+{ \
+ if (RB_RIGHT(elm, field)) { \
+ elm = RB_RIGHT(elm, field); \
+ while (RB_LEFT(elm, field)) \
+ elm = RB_LEFT(elm, field); \
+ } else { \
+ if (RB_PARENT(elm, field) && \
+ (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ else { \
+ while (RB_PARENT(elm, field) && \
+ (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\
+ elm = RB_PARENT(elm, field); \
+ elm = RB_PARENT(elm, field); \
+ } \
+ } \
+ return (elm); \
+} \
+ \
+/* ARGSUSED */ \
+attr struct type * \
+name##_RB_PREV(struct type *elm) \
+{ \
+ if (RB_LEFT(elm, field)) { \
+ elm = RB_LEFT(elm, field); \
+ while (RB_RIGHT(elm, field)) \
+ elm = RB_RIGHT(elm, field); \
+ } else { \
+ if (RB_PARENT(elm, field) && \
+ (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ else { \
+ while (RB_PARENT(elm, field) && \
+ (elm == RB_LEFT(RB_PARENT(elm, field), field)))\
+ elm = RB_PARENT(elm, field); \
+ elm = RB_PARENT(elm, field); \
+ } \
+ } \
+ return (elm); \
+} \
+ \
+attr struct type * \
+name##_RB_MINMAX(struct name *head, int val) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ struct type *parent = NULL; \
+ while (tmp) { \
+ parent = tmp; \
+ if (val < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else \
+ tmp = RB_RIGHT(tmp, field); \
+ } \
+ return (parent); \
+}
+
+#define RB_NEGINF -1
+#define RB_INF 1
+
+#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
+#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
+#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
+#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
+#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
+#define RB_PREV(name, x, y) name##_RB_PREV(y)
+#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
+#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
+
+#define RB_FOREACH(x, name, head) \
+ for ((x) = RB_MIN(name, head); \
+ (x) != NULL; \
+ (x) = name##_RB_NEXT(x))
+
+#define RB_FOREACH_FROM(x, name, y) \
+ for ((x) = (y); \
+ ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
+ (x) = (y))
+
+#define RB_FOREACH_SAFE(x, name, head, y) \
+ for ((x) = RB_MIN(name, head); \
+ ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
+ (x) = (y))
+
+#define RB_FOREACH_REVERSE(x, name, head) \
+ for ((x) = RB_MAX(name, head); \
+ (x) != NULL; \
+ (x) = name##_RB_PREV(x))
+
+#define RB_FOREACH_REVERSE_FROM(x, name, y) \
+ for ((x) = (y); \
+ ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
+ (x) = (y))
+
+#define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \
+ for ((x) = RB_MAX(name, head); \
+ ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
+ (x) = (y))
+
+#endif /* _SYS_TREE_H_ */