musl does not provide sys/queue.h implementation. Borrow queue.h from the NetBSD project http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/sys/queue.h?rev=1.68 Upstream-Status: Inappropriate [musl specific] Signed-off-by: Jörg Krause Signed-off-by: Maxin B. John --- diff -Naur libtirpc-1.0.1-orig/src/clnt_bcast.c libtirpc-1.0.1/src/clnt_bcast.c --- libtirpc-1.0.1-orig/src/clnt_bcast.c 2015-10-30 17:15:14.000000000 +0200 +++ libtirpc-1.0.1/src/clnt_bcast.c 2015-12-21 17:03:52.066008311 +0200 @@ -40,7 +40,6 @@ */ #include #include -#include #include #include @@ -62,6 +61,7 @@ #include #include +#include "queue.h" #include "rpc_com.h" #include "debug.h" diff -Naur libtirpc-1.0.1-orig/src/clnt_bcast.c.orig libtirpc-1.0.1/src/clnt_bcast.c.orig --- libtirpc-1.0.1-orig/src/clnt_bcast.c.orig 1970-01-01 02:00:00.000000000 +0200 +++ libtirpc-1.0.1/src/clnt_bcast.c.orig 2015-10-30 17:15:14.000000000 +0200 @@ -0,0 +1,697 @@ +/* + * Copyright (c) 2009, Sun Microsystems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * - Neither the name of Sun Microsystems, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +/* + * Copyright (c) 1986-1991 by Sun Microsystems Inc. + */ + +/* + * clnt_bcast.c + * Client interface to broadcast service. + * + * Copyright (C) 1988, Sun Microsystems, Inc. + * + * The following is kludged-up support for simple rpc broadcasts. + * Someday a large, complicated system will replace these routines. + */ +#include +#include +#include + +#include +#include +#include +#include +#include +#ifdef PORTMAP +#include +#include +#include +#endif /* PORTMAP */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rpc_com.h" +#include "debug.h" + +#define MAXBCAST 20 /* Max no of broadcasting transports */ +#define INITTIME 4000 /* Time to wait initially */ +#define WAITTIME 8000 /* Maximum time to wait */ + +# define POLLRDNORM 0x040 /* Normal data may be read. */ +# define POLLRDBAND 0x080 /* Priority data may be read. */ + + + +/* + * If nettype is NULL, it broadcasts on all the available + * datagram_n transports. May potentially lead to broadacst storms + * and hence should be used with caution, care and courage. + * + * The current parameter xdr packet size is limited by the max tsdu + * size of the transport. If the max tsdu size of any transport is + * smaller than the parameter xdr packet, then broadcast is not + * sent on that transport. + * + * Also, the packet size should be less the packet size of + * the data link layer (for ethernet it is 1400 bytes). There is + * no easy way to find out the max size of the data link layer and + * we are assuming that the args would be smaller than that. + * + * The result size has to be smaller than the transport tsdu size. + * + * If PORTMAP has been defined, we send two packets for UDP, one for + * rpcbind and one for portmap. For those machines which support + * both rpcbind and portmap, it will cause them to reply twice, and + * also here it will get two responses ... inefficient and clumsy. + */ + +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) + +#define TAILQ_FIRST(head) ((head)->tqh_first) + + +struct broadif { + int index; + struct sockaddr_storage broadaddr; + TAILQ_ENTRY(broadif) link; +}; + +typedef TAILQ_HEAD(, broadif) broadlist_t; + +int __rpc_getbroadifs(int, int, int, broadlist_t *); +void __rpc_freebroadifs(broadlist_t *); +int __rpc_broadenable(int, int, struct broadif *); + +int __rpc_lowvers = 0; + +int +__rpc_getbroadifs(int af, int proto, int socktype, broadlist_t *list) +{ + int count = 0; + struct broadif *bip; + struct ifaddrs *ifap, *ifp; +#ifdef INET6 + struct sockaddr_in6 *sin6; +#endif + struct sockaddr_in *sin; + struct addrinfo hints, *res; + + if (getifaddrs(&ifp) < 0) + return 0; + + memset(&hints, 0, sizeof hints); + + hints.ai_family = af; + hints.ai_protocol = proto; + hints.ai_socktype = socktype; + + if (getaddrinfo(NULL, "sunrpc", &hints, &res) != 0) + return 0; + + for (ifap = ifp; ifap != NULL; ifap = ifap->ifa_next) { + if (ifap->ifa_addr == NULL || /* happens for eg tuntap devices */ + ifap->ifa_addr->sa_family != af || + !(ifap->ifa_flags & IFF_UP)) + continue; + bip = (struct broadif *)malloc(sizeof *bip); + if (bip == NULL) + break; + bip->index = if_nametoindex(ifap->ifa_name); + if ( +#ifdef INET6 + af != AF_INET6 && +#endif + (ifap->ifa_flags & IFF_BROADCAST) && + ifap->ifa_broadaddr) { + /* memcpy(&bip->broadaddr, ifap->ifa_broadaddr, + (size_t)ifap->ifa_broadaddr->sa_len);*/ + memcpy(&bip->broadaddr, ifap->ifa_broadaddr, + sizeof(bip->broadaddr)); + sin = (struct sockaddr_in *)(void *)&bip->broadaddr; + sin->sin_port = + ((struct sockaddr_in *) + (void *)res->ai_addr)->sin_port; + } else +#ifdef INET6 + if (af == AF_INET6 && (ifap->ifa_flags & IFF_MULTICAST)) { + sin6 = (struct sockaddr_in6 *)(void *)&bip->broadaddr; + inet_pton(af, RPCB_MULTICAST_ADDR, &sin6->sin6_addr); + sin6->sin6_family = af; + sin6->sin6_port = + ((struct sockaddr_in6 *) + (void *)res->ai_addr)->sin6_port; + sin6->sin6_scope_id = bip->index; + } else +#endif + { + free(bip); + continue; + } + TAILQ_INSERT_TAIL(list, bip, link); + count++; + } + freeifaddrs(ifp); + freeaddrinfo(res); + + return count; +} + +void +__rpc_freebroadifs(broadlist_t *list) +{ + struct broadif *bip, *next; + + bip = TAILQ_FIRST(list); + + while (bip != NULL) { + next = TAILQ_NEXT(bip, link); + free(bip); + bip = next; + } +} + +int +/*ARGSUSED*/ +__rpc_broadenable(int af, int s, struct broadif *bip) +{ + int o = 1; + +#if 0 + if (af == AF_INET6) { + fprintf(stderr, "set v6 multicast if to %d\n", bip->index); + if (setsockopt(s, IPPROTO_IPV6, IPV6_MULTICAST_IF, &bip->index, + sizeof bip->index) < 0) + return -1; + } else +#endif + if (setsockopt(s, SOL_SOCKET, SO_BROADCAST, &o, sizeof o) < 0) + return -1; + + return 0; +} + +/* + * Some rpcbind implementations use an IPv6 socket to serve both + * IPv4 and IPv6 messages, but neglect to check for the caller's + * address family when sending broadcast replies. These rpcbind + * implementations return an IPv6 address in reply to an IPv4 + * broadcast. We can either ignore them, or try to patch them up. + */ +static struct netbuf * +__ipv6v4_fixup(struct sockaddr_storage *ss, const char *uaddr) +{ + struct sockaddr_in sin; + struct netbuf *np; + + /* ss is the remote rpcbind server's address */ + if (ss->ss_family != AF_INET) + return NULL; + memcpy(&sin, ss, sizeof(sin)); + + np = __rpc_uaddr2taddr_af(AF_INET6, uaddr); + if (np == NULL) + return NULL; + + /* Overwrite the port with that of the service we + * wanted to talk to. */ + sin.sin_port = ((struct sockaddr_in6 *) np)->sin6_port; + + /* We know netbuf holds a sockaddr_in6, so it can easily + * hold a sockaddr_in as well. */ + memcpy(np->buf, &sin, sizeof(sin)); + np->len = sizeof(sin); + + return np; +} + +enum clnt_stat +rpc_broadcast_exp(prog, vers, proc, xargs, argsp, xresults, resultsp, + eachresult, inittime, waittime, nettype) + rpcprog_t prog; /* program number */ + rpcvers_t vers; /* version number */ + rpcproc_t proc; /* procedure number */ + xdrproc_t xargs; /* xdr routine for args */ + caddr_t argsp; /* pointer to args */ + xdrproc_t xresults; /* xdr routine for results */ + caddr_t resultsp; /* pointer to results */ + resultproc_t eachresult; /* call with each result obtained */ + int inittime; /* how long to wait initially */ + int waittime; /* maximum time to wait */ + const char *nettype; /* transport type */ +{ + enum clnt_stat stat = RPC_SUCCESS; /* Return status */ + XDR xdr_stream; /* XDR stream */ + XDR *xdrs = &xdr_stream; + struct rpc_msg msg; /* RPC message */ + struct timeval t; + char *outbuf = NULL; /* Broadcast msg buffer */ + char *inbuf = NULL; /* Reply buf */ + int inlen; + u_int maxbufsize = 0; + AUTH *sys_auth = authunix_create_default(); + int i; + void *handle; + char uaddress[1024]; /* A self imposed limit */ + char *uaddrp = uaddress; + int pmap_reply_flag; /* reply recvd from PORTMAP */ + /* An array of all the suitable broadcast transports */ + struct { + int fd; /* File descriptor */ + int af; + int proto; + struct netconfig *nconf; /* Netconfig structure */ + u_int asize; /* Size of the addr buf */ + u_int dsize; /* Size of the data buf */ + struct sockaddr_storage raddr; /* Remote address */ + broadlist_t nal; + } fdlist[MAXBCAST]; + struct pollfd pfd[MAXBCAST]; + size_t fdlistno = 0; + struct r_rpcb_rmtcallargs barg; /* Remote arguments */ + struct r_rpcb_rmtcallres bres; /* Remote results */ + size_t outlen; + struct netconfig *nconf; + int msec; + int pollretval; + int fds_found; + +#ifdef PORTMAP + size_t outlen_pmap = 0; + u_long port; /* Remote port number */ + int pmap_flag = 0; /* UDP exists ? */ + char *outbuf_pmap = NULL; + struct rmtcallargs barg_pmap; /* Remote arguments */ + struct rmtcallres bres_pmap; /* Remote results */ + u_int udpbufsz = 0; +#endif /* PORTMAP */ + + if (sys_auth == NULL) { + return (RPC_SYSTEMERROR); + } + /* + * initialization: create a fd, a broadcast address, and send the + * request on the broadcast transport. + * Listen on all of them and on replies, call the user supplied + * function. + */ + + if (nettype == NULL) + nettype = "datagram_n"; + if ((handle = __rpc_setconf(nettype)) == NULL) { + return (RPC_UNKNOWNPROTO); + } + while ((nconf = __rpc_getconf(handle)) != NULL) { + int fd; + struct __rpc_sockinfo si; + + if (nconf->nc_semantics != NC_TPI_CLTS) + continue; + if (fdlistno >= MAXBCAST) + break; /* No more slots available */ + if (!__rpc_nconf2sockinfo(nconf, &si)) + continue; + + TAILQ_INIT(&fdlist[fdlistno].nal); + if (__rpc_getbroadifs(si.si_af, si.si_proto, si.si_socktype, + &fdlist[fdlistno].nal) == 0) + continue; + + fd = socket(si.si_af, si.si_socktype, si.si_proto); + if (fd < 0) { + stat = RPC_CANTSEND; + continue; + } + fdlist[fdlistno].af = si.si_af; + fdlist[fdlistno].proto = si.si_proto; + fdlist[fdlistno].fd = fd; + fdlist[fdlistno].nconf = nconf; + fdlist[fdlistno].asize = __rpc_get_a_size(si.si_af); + pfd[fdlistno].events = POLLIN | POLLPRI | + POLLRDNORM | POLLRDBAND; + pfd[fdlistno].fd = fdlist[fdlistno].fd = fd; + fdlist[fdlistno].dsize = __rpc_get_t_size(si.si_af, si.si_proto, + 0); + + if (maxbufsize <= fdlist[fdlistno].dsize) + maxbufsize = fdlist[fdlistno].dsize; + +#ifdef PORTMAP + if (si.si_af == AF_INET && si.si_proto == IPPROTO_UDP) { + udpbufsz = fdlist[fdlistno].dsize; + if ((outbuf_pmap = malloc(udpbufsz)) == NULL) { + close(fd); + stat = RPC_SYSTEMERROR; + goto done_broad; + } + pmap_flag = 1; + } +#endif /* PORTMAP */ + fdlistno++; + } + + if (fdlistno == 0) { + if (stat == RPC_SUCCESS) + stat = RPC_UNKNOWNPROTO; + goto done_broad; + } + if (maxbufsize == 0) { + if (stat == RPC_SUCCESS) + stat = RPC_CANTSEND; + goto done_broad; + } + inbuf = malloc(maxbufsize); + outbuf = malloc(maxbufsize); + if ((inbuf == NULL) || (outbuf == NULL)) { + stat = RPC_SYSTEMERROR; + goto done_broad; + } + + /* Serialize all the arguments which have to be sent */ + (void) gettimeofday(&t, NULL); + msg.rm_xid = __RPC_GETXID(&t); + msg.rm_direction = CALL; + msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; + msg.rm_call.cb_prog = RPCBPROG; + msg.rm_call.cb_vers = RPCBVERS; + msg.rm_call.cb_proc = RPCBPROC_CALLIT; + barg.prog = prog; + barg.vers = vers; + barg.proc = proc; + barg.args.args_val = argsp; + barg.xdr_args = xargs; + bres.addr = uaddrp; + bres.results.results_val = resultsp; + bres.xdr_res = xresults; + msg.rm_call.cb_cred = sys_auth->ah_cred; + msg.rm_call.cb_verf = sys_auth->ah_verf; + xdrmem_create(xdrs, outbuf, maxbufsize, XDR_ENCODE); + if ((!xdr_callmsg(xdrs, &msg)) || + (!xdr_rpcb_rmtcallargs(xdrs, + (struct rpcb_rmtcallargs *)(void *)&barg))) { + stat = RPC_CANTENCODEARGS; + goto done_broad; + } + outlen = xdr_getpos(xdrs); + xdr_destroy(xdrs); + +#ifdef PORTMAP + /* Prepare the packet for version 2 PORTMAP */ + if (pmap_flag) { + msg.rm_xid++; /* One way to distinguish */ + msg.rm_call.cb_prog = PMAPPROG; + msg.rm_call.cb_vers = PMAPVERS; + msg.rm_call.cb_proc = PMAPPROC_CALLIT; + barg_pmap.prog = prog; + barg_pmap.vers = vers; + barg_pmap.proc = proc; + barg_pmap.args_ptr = argsp; + barg_pmap.xdr_args = xargs; + bres_pmap.port_ptr = &port; + bres_pmap.xdr_results = xresults; + bres_pmap.results_ptr = resultsp; + xdrmem_create(xdrs, outbuf_pmap, udpbufsz, XDR_ENCODE); + if ((! xdr_callmsg(xdrs, &msg)) || + (! xdr_rmtcall_args(xdrs, &barg_pmap))) { + stat = RPC_CANTENCODEARGS; + goto done_broad; + } + outlen_pmap = xdr_getpos(xdrs); + xdr_destroy(xdrs); + } +#endif /* PORTMAP */ + + /* + * Basic loop: broadcast the packets to transports which + * support data packets of size such that one can encode + * all the arguments. + * Wait a while for response(s). + * The response timeout grows larger per iteration. + */ + for (msec = inittime; msec <= waittime; msec += msec) { + struct broadif *bip; + + /* Broadcast all the packets now */ + for (i = 0; i < fdlistno; i++) { + if (fdlist[i].dsize < outlen) { + stat = RPC_CANTSEND; + continue; + } + for (bip = TAILQ_FIRST(&fdlist[i].nal); bip != NULL; + bip = TAILQ_NEXT(bip, link)) { + void *addr; + + addr = &bip->broadaddr; + + __rpc_broadenable(fdlist[i].af, fdlist[i].fd, + bip); + + /* + * Only use version 3 if lowvers is not set + */ + + if (!__rpc_lowvers) + if (sendto(fdlist[i].fd, outbuf, + outlen, 0, (struct sockaddr*)addr, + (size_t)fdlist[i].asize) != + outlen) { + LIBTIRPC_DEBUG(1, + ("rpc_broadcast_exp: sendto failed: errno %d", errno)); + warnx("rpc_broadcast_exp: cannot send broadcast packet"); + stat = RPC_CANTSEND; + continue; + }; + if (!__rpc_lowvers) + LIBTIRPC_DEBUG(3, ("rpc_broadcast_exp: Broadcast packet sent for %s\n", + fdlist[i].nconf->nc_netid)); +#ifdef PORTMAP + /* + * Send the version 2 packet also + * for UDP/IP + */ + if (pmap_flag && + fdlist[i].proto == IPPROTO_UDP) { + if (sendto(fdlist[i].fd, outbuf_pmap, + outlen_pmap, 0, addr, + (size_t)fdlist[i].asize) != + outlen_pmap) { + warnx("clnt_bcast: " + "Cannot send broadcast packet"); + stat = RPC_CANTSEND; + continue; + } + } + LIBTIRPC_DEBUG(3, ("rpc_broadcast_exp: PMAP Broadcast packet sent for %s\n", + fdlist[i].nconf->nc_netid)); +#endif /* PORTMAP */ + } + /* End for sending all packets on this transport */ + } /* End for sending on all transports */ + + if (eachresult == NULL) { + stat = RPC_SUCCESS; + goto done_broad; + } + + /* + * Get all the replies from these broadcast requests + */ + recv_again: + + switch (pollretval = poll(pfd, fdlistno, msec)) { + case 0: /* timed out */ + stat = RPC_TIMEDOUT; + continue; + case -1: /* some kind of error - we ignore it */ + goto recv_again; + } /* end of poll results switch */ + + for (i = fds_found = 0; + i < fdlistno && fds_found < pollretval; i++) { + bool_t done = FALSE; + + if (pfd[i].revents == 0) + continue; + else if (pfd[i].revents & POLLNVAL) { + /* + * Something bad has happened to this descri- + * ptor. We can cause _poll() to ignore + * it simply by using a negative fd. We do that + * rather than compacting the pfd[] and fdlist[] + * arrays. + */ + pfd[i].fd = -1; + fds_found++; + continue; + } else + fds_found++; + LIBTIRPC_DEBUG(3, ("rpc_broadcast_exp: response for %s\n", + fdlist[i].nconf->nc_netid)); + try_again: + inlen = recvfrom(fdlist[i].fd, inbuf, fdlist[i].dsize, + 0, (struct sockaddr *)(void *)&fdlist[i].raddr, + &fdlist[i].asize); + if (inlen < 0) { + if (errno == EINTR) + goto try_again; + warnx("clnt_bcast: Cannot receive reply to " + "broadcast"); + stat = RPC_CANTRECV; + continue; + } + if (inlen < sizeof (u_int32_t)) + continue; /* Drop that and go ahead */ + /* + * see if reply transaction id matches sent id. + * If so, decode the results. If return id is xid + 1 + * it was a PORTMAP reply + */ + if (*((u_int32_t *)(void *)(inbuf)) == + *((u_int32_t *)(void *)(outbuf))) { + pmap_reply_flag = 0; + msg.acpted_rply.ar_verf = _null_auth; + msg.acpted_rply.ar_results.where = + (caddr_t)(void *)&bres; + msg.acpted_rply.ar_results.proc = + (xdrproc_t)xdr_rpcb_rmtcallres; +#ifdef PORTMAP + } else if (pmap_flag && + *((u_int32_t *)(void *)(inbuf)) == + *((u_int32_t *)(void *)(outbuf_pmap))) { + pmap_reply_flag = 1; + msg.acpted_rply.ar_verf = _null_auth; + msg.acpted_rply.ar_results.where = + (caddr_t)(void *)&bres_pmap; + msg.acpted_rply.ar_results.proc = + (xdrproc_t)xdr_rmtcallres; +#endif /* PORTMAP */ + } else + continue; + xdrmem_create(xdrs, inbuf, (u_int)inlen, XDR_DECODE); + if (xdr_replymsg(xdrs, &msg)) { + if ((msg.rm_reply.rp_stat == MSG_ACCEPTED) && + (msg.acpted_rply.ar_stat == SUCCESS)) { + struct netbuf *np; +#ifdef PORTMAP + struct netbuf taddr; + struct sockaddr_in sin; + + if (pmap_flag && pmap_reply_flag) { + memcpy(&sin, &fdlist[i].raddr, sizeof(sin)); + sin.sin_port = htons((u_short)port); + memcpy(&fdlist[i].raddr, &sin, sizeof(sin)); + taddr.len = taddr.maxlen = + sizeof(fdlist[i].raddr); + taddr.buf = &fdlist[i].raddr; + done = (*eachresult)(resultsp, + &taddr, fdlist[i].nconf); + } else { +#endif /* PORTMAP */ + LIBTIRPC_DEBUG(3, ("rpc_broadcast_exp: uaddr %s\n", uaddrp)); + np = uaddr2taddr( + fdlist[i].nconf, uaddrp); + /* Some misguided rpcbind implemenations + * seem to return an IPv6 uaddr in IPv4 + * responses. */ + if (np == NULL) + np = __ipv6v4_fixup( + &fdlist[i].raddr, + uaddrp); + if (np != NULL) { + done = (*eachresult)(resultsp, + np, fdlist[i].nconf); + free(np); + } +#ifdef PORTMAP + } +#endif /* PORTMAP */ + } + /* otherwise, we just ignore the errors ... */ + } + /* else some kind of deserialization problem ... */ + + xdrs->x_op = XDR_FREE; + msg.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void; + (void) xdr_replymsg(xdrs, &msg); + (void) (*xresults)(xdrs, resultsp); + XDR_DESTROY(xdrs); + if (done) { + stat = RPC_SUCCESS; + goto done_broad; + } else { + goto recv_again; + } + } /* The recv for loop */ + } /* The giant for loop */ + +done_broad: + if (inbuf) + (void) free(inbuf); + if (outbuf) + (void) free(outbuf); +#ifdef PORTMAP + if (outbuf_pmap) + (void) free(outbuf_pmap); +#endif /* PORTMAP */ + for (i = 0; i < fdlistno; i++) { + (void)close(fdlist[i].fd); + __rpc_freebroadifs(&fdlist[i].nal); + } + AUTH_DESTROY(sys_auth); + (void) __rpc_endconf(handle); + + return (stat); +} + + +enum clnt_stat +rpc_broadcast(prog, vers, proc, xargs, argsp, xresults, resultsp, + eachresult, nettype) + rpcprog_t prog; /* program number */ + rpcvers_t vers; /* version number */ + rpcproc_t proc; /* procedure number */ + xdrproc_t xargs; /* xdr routine for args */ + caddr_t argsp; /* pointer to args */ + xdrproc_t xresults; /* xdr routine for results */ + caddr_t resultsp; /* pointer to results */ + resultproc_t eachresult; /* call with each result obtained */ + const char *nettype; /* transport type */ +{ + enum clnt_stat dummy; + + dummy = rpc_broadcast_exp(prog, vers, proc, xargs, argsp, + xresults, resultsp, eachresult, + INITTIME, WAITTIME, nettype); + return (dummy); +} diff -Naur libtirpc-1.0.1-orig/tirpc/queue.h libtirpc-1.0.1/tirpc/queue.h --- libtirpc-1.0.1-orig/tirpc/queue.h 1970-01-01 02:00:00.000000000 +0200 +++ libtirpc-1.0.1/tirpc/queue.h 2015-12-21 17:02:44.427853905 +0200 @@ -0,0 +1,846 @@ +/* $NetBSD: queue.h,v 1.68 2014/11/19 08:10:01 uebayasi Exp $ */ + +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)queue.h 8.5 (Berkeley) 8/20/94 + */ + +#ifndef _SYS_QUEUE_H_ +#define _SYS_QUEUE_H_ + +/* + * This file defines five types of data structures: singly-linked lists, + * lists, simple queues, tail queues, and circular queues. + * + * A singly-linked list is headed by a single forward pointer. The + * elements are singly linked for minimum space and pointer manipulation + * overhead at the expense of O(n) removal for arbitrary elements. New + * elements can be added to the list after an existing element or at the + * head of the list. Elements being removed from the head of the list + * should use the explicit macro for this purpose for optimum + * efficiency. A singly-linked list may only be traversed in the forward + * direction. Singly-linked lists are ideal for applications with large + * datasets and few or no removals or for implementing a LIFO queue. + * + * A list is headed by a single forward pointer (or an array of forward + * pointers for a hash table header). The elements are doubly linked + * so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before + * or after an existing element or at the head of the list. A list + * may only be traversed in the forward direction. + * + * A simple queue is headed by a pair of pointers, one the head of the + * list and the other to the tail of the list. The elements are singly + * linked to save space, so elements can only be removed from the + * head of the list. New elements can be added to the list after + * an existing element, at the head of the list, or at the end of the + * list. A simple queue may only be traversed in the forward direction. + * + * A tail queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or + * after an existing element, at the head of the list, or at the end of + * the list. A tail queue may be traversed in either direction. + * + * A circle queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or after + * an existing element, at the head of the list, or at the end of the list. + * A circle queue may be traversed in either direction, but has a more + * complex end of list detection. + * + * For details on the use of these macros, see the queue(3) manual page. + */ + +/* + * Include the definition of NULL only on NetBSD because sys/null.h + * is not available elsewhere. This conditional makes the header + * portable and it can simply be dropped verbatim into any system. + * The caveat is that on other systems some other header + * must provide NULL before the macros can be used. + */ +#ifdef __NetBSD__ +#include +#endif + +#if defined(QUEUEDEBUG) +# if defined(_KERNEL) +# define QUEUEDEBUG_ABORT(...) panic(__VA_ARGS__) +# else +# include +# define QUEUEDEBUG_ABORT(...) err(1, __VA_ARGS__) +# endif +#endif + +/* + * Singly-linked List definitions. + */ +#define SLIST_HEAD(name, type) \ +struct name { \ + struct type *slh_first; /* first element */ \ +} + +#define SLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define SLIST_ENTRY(type) \ +struct { \ + struct type *sle_next; /* next element */ \ +} + +/* + * Singly-linked List access methods. + */ +#define SLIST_FIRST(head) ((head)->slh_first) +#define SLIST_END(head) NULL +#define SLIST_EMPTY(head) ((head)->slh_first == NULL) +#define SLIST_NEXT(elm, field) ((elm)->field.sle_next) + +#define SLIST_FOREACH(var, head, field) \ + for((var) = (head)->slh_first; \ + (var) != SLIST_END(head); \ + (var) = (var)->field.sle_next) + +#define SLIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = SLIST_FIRST((head)); \ + (var) != SLIST_END(head) && \ + ((tvar) = SLIST_NEXT((var), field), 1); \ + (var) = (tvar)) + +/* + * Singly-linked List functions. + */ +#define SLIST_INIT(head) do { \ + (head)->slh_first = SLIST_END(head); \ +} while (/*CONSTCOND*/0) + +#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ + (elm)->field.sle_next = (slistelm)->field.sle_next; \ + (slistelm)->field.sle_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define SLIST_INSERT_HEAD(head, elm, field) do { \ + (elm)->field.sle_next = (head)->slh_first; \ + (head)->slh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define SLIST_REMOVE_AFTER(slistelm, field) do { \ + (slistelm)->field.sle_next = \ + SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \ +} while (/*CONSTCOND*/0) + +#define SLIST_REMOVE_HEAD(head, field) do { \ + (head)->slh_first = (head)->slh_first->field.sle_next; \ +} while (/*CONSTCOND*/0) + +#define SLIST_REMOVE(head, elm, type, field) do { \ + if ((head)->slh_first == (elm)) { \ + SLIST_REMOVE_HEAD((head), field); \ + } \ + else { \ + struct type *curelm = (head)->slh_first; \ + while(curelm->field.sle_next != (elm)) \ + curelm = curelm->field.sle_next; \ + curelm->field.sle_next = \ + curelm->field.sle_next->field.sle_next; \ + } \ +} while (/*CONSTCOND*/0) + + +/* + * List definitions. + */ +#define LIST_HEAD(name, type) \ +struct name { \ + struct type *lh_first; /* first element */ \ +} + +#define LIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define LIST_ENTRY(type) \ +struct { \ + struct type *le_next; /* next element */ \ + struct type **le_prev; /* address of previous next element */ \ +} + +/* + * List access methods. + */ +#define LIST_FIRST(head) ((head)->lh_first) +#define LIST_END(head) NULL +#define LIST_EMPTY(head) ((head)->lh_first == LIST_END(head)) +#define LIST_NEXT(elm, field) ((elm)->field.le_next) + +#define LIST_FOREACH(var, head, field) \ + for ((var) = ((head)->lh_first); \ + (var) != LIST_END(head); \ + (var) = ((var)->field.le_next)) + +#define LIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = LIST_FIRST((head)); \ + (var) != LIST_END(head) && \ + ((tvar) = LIST_NEXT((var), field), 1); \ + (var) = (tvar)) + +#define LIST_MOVE(head1, head2) do { \ + LIST_INIT((head2)); \ + if (!LIST_EMPTY((head1))) { \ + (head2)->lh_first = (head1)->lh_first; \ + LIST_INIT((head1)); \ + } \ +} while (/*CONSTCOND*/0) + +/* + * List functions. + */ +#if defined(QUEUEDEBUG) +#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \ + if ((head)->lh_first && \ + (head)->lh_first->field.le_prev != &(head)->lh_first) \ + QUEUEDEBUG_ABORT("LIST_INSERT_HEAD %p %s:%d", (head), \ + __FILE__, __LINE__); +#define QUEUEDEBUG_LIST_OP(elm, field) \ + if ((elm)->field.le_next && \ + (elm)->field.le_next->field.le_prev != \ + &(elm)->field.le_next) \ + QUEUEDEBUG_ABORT("LIST_* forw %p %s:%d", (elm), \ + __FILE__, __LINE__); \ + if (*(elm)->field.le_prev != (elm)) \ + QUEUEDEBUG_ABORT("LIST_* back %p %s:%d", (elm), \ + __FILE__, __LINE__); +#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \ + (elm)->field.le_next = (void *)1L; \ + (elm)->field.le_prev = (void *)1L; +#else +#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) +#define QUEUEDEBUG_LIST_OP(elm, field) +#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) +#endif + +#define LIST_INIT(head) do { \ + (head)->lh_first = LIST_END(head); \ +} while (/*CONSTCOND*/0) + +#define LIST_INSERT_AFTER(listelm, elm, field) do { \ + QUEUEDEBUG_LIST_OP((listelm), field) \ + if (((elm)->field.le_next = (listelm)->field.le_next) != \ + LIST_END(head)) \ + (listelm)->field.le_next->field.le_prev = \ + &(elm)->field.le_next; \ + (listelm)->field.le_next = (elm); \ + (elm)->field.le_prev = &(listelm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ + QUEUEDEBUG_LIST_OP((listelm), field) \ + (elm)->field.le_prev = (listelm)->field.le_prev; \ + (elm)->field.le_next = (listelm); \ + *(listelm)->field.le_prev = (elm); \ + (listelm)->field.le_prev = &(elm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define LIST_INSERT_HEAD(head, elm, field) do { \ + QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \ + if (((elm)->field.le_next = (head)->lh_first) != LIST_END(head))\ + (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ + (head)->lh_first = (elm); \ + (elm)->field.le_prev = &(head)->lh_first; \ +} while (/*CONSTCOND*/0) + +#define LIST_REMOVE(elm, field) do { \ + QUEUEDEBUG_LIST_OP((elm), field) \ + if ((elm)->field.le_next != NULL) \ + (elm)->field.le_next->field.le_prev = \ + (elm)->field.le_prev; \ + *(elm)->field.le_prev = (elm)->field.le_next; \ + QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \ +} while (/*CONSTCOND*/0) + +#define LIST_REPLACE(elm, elm2, field) do { \ + if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \ + (elm2)->field.le_next->field.le_prev = \ + &(elm2)->field.le_next; \ + (elm2)->field.le_prev = (elm)->field.le_prev; \ + *(elm2)->field.le_prev = (elm2); \ + QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \ +} while (/*CONSTCOND*/0) + +/* + * Simple queue definitions. + */ +#define SIMPLEQ_HEAD(name, type) \ +struct name { \ + struct type *sqh_first; /* first element */ \ + struct type **sqh_last; /* addr of last next element */ \ +} + +#define SIMPLEQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).sqh_first } + +#define SIMPLEQ_ENTRY(type) \ +struct { \ + struct type *sqe_next; /* next element */ \ +} + +/* + * Simple queue access methods. + */ +#define SIMPLEQ_FIRST(head) ((head)->sqh_first) +#define SIMPLEQ_END(head) NULL +#define SIMPLEQ_EMPTY(head) ((head)->sqh_first == SIMPLEQ_END(head)) +#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) + +#define SIMPLEQ_FOREACH(var, head, field) \ + for ((var) = ((head)->sqh_first); \ + (var) != SIMPLEQ_END(head); \ + (var) = ((var)->field.sqe_next)) + +#define SIMPLEQ_FOREACH_SAFE(var, head, field, next) \ + for ((var) = ((head)->sqh_first); \ + (var) != SIMPLEQ_END(head) && \ + ((next = ((var)->field.sqe_next)), 1); \ + (var) = (next)) + +/* + * Simple queue functions. + */ +#define SIMPLEQ_INIT(head) do { \ + (head)->sqh_first = NULL; \ + (head)->sqh_last = &(head)->sqh_first; \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (head)->sqh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.sqe_next = NULL; \ + *(head)->sqh_last = (elm); \ + (head)->sqh_last = &(elm)->field.sqe_next; \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (listelm)->field.sqe_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_REMOVE_HEAD(head, field) do { \ + if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ + (head)->sqh_last = &(head)->sqh_first; \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_REMOVE_AFTER(head, elm, field) do { \ + if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \ + == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_REMOVE(head, elm, type, field) do { \ + if ((head)->sqh_first == (elm)) { \ + SIMPLEQ_REMOVE_HEAD((head), field); \ + } else { \ + struct type *curelm = (head)->sqh_first; \ + while (curelm->field.sqe_next != (elm)) \ + curelm = curelm->field.sqe_next; \ + if ((curelm->field.sqe_next = \ + curelm->field.sqe_next->field.sqe_next) == NULL) \ + (head)->sqh_last = &(curelm)->field.sqe_next; \ + } \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_CONCAT(head1, head2) do { \ + if (!SIMPLEQ_EMPTY((head2))) { \ + *(head1)->sqh_last = (head2)->sqh_first; \ + (head1)->sqh_last = (head2)->sqh_last; \ + SIMPLEQ_INIT((head2)); \ + } \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_LAST(head, type, field) \ + (SIMPLEQ_EMPTY((head)) ? \ + NULL : \ + ((struct type *)(void *) \ + ((char *)((head)->sqh_last) - offsetof(struct type, field)))) + +/* + * Tail queue definitions. + */ +#define _TAILQ_HEAD(name, type, qual) \ +struct name { \ + qual type *tqh_first; /* first element */ \ + qual type *qual *tqh_last; /* addr of last next element */ \ +} +#define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,) + +#define TAILQ_HEAD_INITIALIZER(head) \ + { TAILQ_END(head), &(head).tqh_first } + +#define _TAILQ_ENTRY(type, qual) \ +struct { \ + qual type *tqe_next; /* next element */ \ + qual type *qual *tqe_prev; /* address of previous next element */\ +} +#define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,) + +/* + * Tail queue access methods. + */ +#define TAILQ_FIRST(head) ((head)->tqh_first) +#define TAILQ_END(head) (NULL) +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) +#define TAILQ_LAST(head, headname) \ + (*(((struct headname *)((head)->tqh_last))->tqh_last)) +#define TAILQ_PREV(elm, headname, field) \ + (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) +#define TAILQ_EMPTY(head) (TAILQ_FIRST(head) == TAILQ_END(head)) + + +#define TAILQ_FOREACH(var, head, field) \ + for ((var) = ((head)->tqh_first); \ + (var) != TAILQ_END(head); \ + (var) = ((var)->field.tqe_next)) + +#define TAILQ_FOREACH_SAFE(var, head, field, next) \ + for ((var) = ((head)->tqh_first); \ + (var) != TAILQ_END(head) && \ + ((next) = TAILQ_NEXT(var, field), 1); (var) = (next)) + +#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ + for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\ + (var) != TAILQ_END(head); \ + (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) + +#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \ + for ((var) = TAILQ_LAST((head), headname); \ + (var) != TAILQ_END(head) && \ + ((prev) = TAILQ_PREV((var), headname, field), 1); (var) = (prev)) + +/* + * Tail queue functions. + */ +#if defined(QUEUEDEBUG) +#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \ + if ((head)->tqh_first && \ + (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \ + QUEUEDEBUG_ABORT("TAILQ_INSERT_HEAD %p %s:%d", (head), \ + __FILE__, __LINE__); +#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \ + if (*(head)->tqh_last != NULL) \ + QUEUEDEBUG_ABORT("TAILQ_INSERT_TAIL %p %s:%d", (head), \ + __FILE__, __LINE__); +#define QUEUEDEBUG_TAILQ_OP(elm, field) \ + if ((elm)->field.tqe_next && \ + (elm)->field.tqe_next->field.tqe_prev != \ + &(elm)->field.tqe_next) \ + QUEUEDEBUG_ABORT("TAILQ_* forw %p %s:%d", (elm), \ + __FILE__, __LINE__); \ + if (*(elm)->field.tqe_prev != (elm)) \ + QUEUEDEBUG_ABORT("TAILQ_* back %p %s:%d", (elm), \ + __FILE__, __LINE__); +#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \ + if ((elm)->field.tqe_next == NULL && \ + (head)->tqh_last != &(elm)->field.tqe_next) \ + QUEUEDEBUG_ABORT("TAILQ_PREREMOVE head %p elm %p %s:%d",\ + (head), (elm), __FILE__, __LINE__); +#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \ + (elm)->field.tqe_next = (void *)1L; \ + (elm)->field.tqe_prev = (void *)1L; +#else +#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) +#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) +#define QUEUEDEBUG_TAILQ_OP(elm, field) +#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) +#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) +#endif + +#define TAILQ_INIT(head) do { \ + (head)->tqh_first = TAILQ_END(head); \ + (head)->tqh_last = &(head)->tqh_first; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_INSERT_HEAD(head, elm, field) do { \ + QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \ + if (((elm)->field.tqe_next = (head)->tqh_first) != TAILQ_END(head))\ + (head)->tqh_first->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (head)->tqh_first = (elm); \ + (elm)->field.tqe_prev = &(head)->tqh_first; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_INSERT_TAIL(head, elm, field) do { \ + QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \ + (elm)->field.tqe_next = TAILQ_END(head); \ + (elm)->field.tqe_prev = (head)->tqh_last; \ + *(head)->tqh_last = (elm); \ + (head)->tqh_last = &(elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + QUEUEDEBUG_TAILQ_OP((listelm), field) \ + if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != \ + TAILQ_END(head)) \ + (elm)->field.tqe_next->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (listelm)->field.tqe_next = (elm); \ + (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + QUEUEDEBUG_TAILQ_OP((listelm), field) \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + (elm)->field.tqe_next = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_REMOVE(head, elm, field) do { \ + QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \ + QUEUEDEBUG_TAILQ_OP((elm), field) \ + if (((elm)->field.tqe_next) != TAILQ_END(head)) \ + (elm)->field.tqe_next->field.tqe_prev = \ + (elm)->field.tqe_prev; \ + else \ + (head)->tqh_last = (elm)->field.tqe_prev; \ + *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ + QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \ +} while (/*CONSTCOND*/0) + +#define TAILQ_REPLACE(head, elm, elm2, field) do { \ + if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != \ + TAILQ_END(head)) \ + (elm2)->field.tqe_next->field.tqe_prev = \ + &(elm2)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm2)->field.tqe_next; \ + (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \ + *(elm2)->field.tqe_prev = (elm2); \ + QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \ +} while (/*CONSTCOND*/0) + +#define TAILQ_CONCAT(head1, head2, field) do { \ + if (!TAILQ_EMPTY(head2)) { \ + *(head1)->tqh_last = (head2)->tqh_first; \ + (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ + (head1)->tqh_last = (head2)->tqh_last; \ + TAILQ_INIT((head2)); \ + } \ +} while (/*CONSTCOND*/0) + +/* + * Singly-linked Tail queue declarations. + */ +#define STAILQ_HEAD(name, type) \ +struct name { \ + struct type *stqh_first; /* first element */ \ + struct type **stqh_last; /* addr of last next element */ \ +} + +#define STAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).stqh_first } + +#define STAILQ_ENTRY(type) \ +struct { \ + struct type *stqe_next; /* next element */ \ +} + +/* + * Singly-linked Tail queue access methods. + */ +#define STAILQ_FIRST(head) ((head)->stqh_first) +#define STAILQ_END(head) NULL +#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) +#define STAILQ_EMPTY(head) (STAILQ_FIRST(head) == STAILQ_END(head)) + +/* + * Singly-linked Tail queue functions. + */ +#define STAILQ_INIT(head) do { \ + (head)->stqh_first = NULL; \ + (head)->stqh_last = &(head)->stqh_first; \ +} while (/*CONSTCOND*/0) + +#define STAILQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \ + (head)->stqh_last = &(elm)->field.stqe_next; \ + (head)->stqh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define STAILQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.stqe_next = NULL; \ + *(head)->stqh_last = (elm); \ + (head)->stqh_last = &(elm)->field.stqe_next; \ +} while (/*CONSTCOND*/0) + +#define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\ + (head)->stqh_last = &(elm)->field.stqe_next; \ + (listelm)->field.stqe_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define STAILQ_REMOVE_HEAD(head, field) do { \ + if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \ + (head)->stqh_last = &(head)->stqh_first; \ +} while (/*CONSTCOND*/0) + +#define STAILQ_REMOVE(head, elm, type, field) do { \ + if ((head)->stqh_first == (elm)) { \ + STAILQ_REMOVE_HEAD((head), field); \ + } else { \ + struct type *curelm = (head)->stqh_first; \ + while (curelm->field.stqe_next != (elm)) \ + curelm = curelm->field.stqe_next; \ + if ((curelm->field.stqe_next = \ + curelm->field.stqe_next->field.stqe_next) == NULL) \ + (head)->stqh_last = &(curelm)->field.stqe_next; \ + } \ +} while (/*CONSTCOND*/0) + +#define STAILQ_FOREACH(var, head, field) \ + for ((var) = ((head)->stqh_first); \ + (var); \ + (var) = ((var)->field.stqe_next)) + +#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = STAILQ_FIRST((head)); \ + (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ + (var) = (tvar)) + +#define STAILQ_CONCAT(head1, head2) do { \ + if (!STAILQ_EMPTY((head2))) { \ + *(head1)->stqh_last = (head2)->stqh_first; \ + (head1)->stqh_last = (head2)->stqh_last; \ + STAILQ_INIT((head2)); \ + } \ +} while (/*CONSTCOND*/0) + +#define STAILQ_LAST(head, type, field) \ + (STAILQ_EMPTY((head)) ? \ + NULL : \ + ((struct type *)(void *) \ + ((char *)((head)->stqh_last) - offsetof(struct type, field)))) + + +#ifndef _KERNEL +/* + * Circular queue definitions. Do not use. We still keep the macros + * for compatibility but because of pointer aliasing issues their use + * is discouraged! + */ + +/* + * __launder_type(): We use this ugly hack to work around the the compiler + * noticing that two types may not alias each other and elide tests in code. + * We hit this in the CIRCLEQ macros when comparing 'struct name *' and + * 'struct type *' (see CIRCLEQ_HEAD()). Modern compilers (such as GCC + * 4.8) declare these comparisons as always false, causing the code to + * not run as designed. + * + * This hack is only to be used for comparisons and thus can be fully const. + * Do not use for assignment. + * + * If we ever choose to change the ABI of the CIRCLEQ macros, we could fix + * this by changing the head/tail sentinal values, but see the note above + * this one. + */ +static __inline const void * __launder_type(const void *); +static __inline const void * +__launder_type(const void *__x) +{ + __asm __volatile("" : "+r" (__x)); + return __x; +} + +#if defined(QUEUEDEBUG) +#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \ + if ((head)->cqh_first != CIRCLEQ_ENDC(head) && \ + (head)->cqh_first->field.cqe_prev != CIRCLEQ_ENDC(head)) \ + QUEUEDEBUG_ABORT("CIRCLEQ head forw %p %s:%d", (head), \ + __FILE__, __LINE__); \ + if ((head)->cqh_last != CIRCLEQ_ENDC(head) && \ + (head)->cqh_last->field.cqe_next != CIRCLEQ_ENDC(head)) \ + QUEUEDEBUG_ABORT("CIRCLEQ head back %p %s:%d", (head), \ + __FILE__, __LINE__); +#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \ + if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) { \ + if ((head)->cqh_last != (elm)) \ + QUEUEDEBUG_ABORT("CIRCLEQ elm last %p %s:%d", \ + (elm), __FILE__, __LINE__); \ + } else { \ + if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \ + QUEUEDEBUG_ABORT("CIRCLEQ elm forw %p %s:%d", \ + (elm), __FILE__, __LINE__); \ + } \ + if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) { \ + if ((head)->cqh_first != (elm)) \ + QUEUEDEBUG_ABORT("CIRCLEQ elm first %p %s:%d", \ + (elm), __FILE__, __LINE__); \ + } else { \ + if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \ + QUEUEDEBUG_ABORT("CIRCLEQ elm prev %p %s:%d", \ + (elm), __FILE__, __LINE__); \ + } +#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \ + (elm)->field.cqe_next = (void *)1L; \ + (elm)->field.cqe_prev = (void *)1L; +#else +#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) +#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) +#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) +#endif + +#define CIRCLEQ_HEAD(name, type) \ +struct name { \ + struct type *cqh_first; /* first element */ \ + struct type *cqh_last; /* last element */ \ +} + +#define CIRCLEQ_HEAD_INITIALIZER(head) \ + { CIRCLEQ_END(&head), CIRCLEQ_END(&head) } + +#define CIRCLEQ_ENTRY(type) \ +struct { \ + struct type *cqe_next; /* next element */ \ + struct type *cqe_prev; /* previous element */ \ +} + +/* + * Circular queue functions. + */ +#define CIRCLEQ_INIT(head) do { \ + (head)->cqh_first = CIRCLEQ_END(head); \ + (head)->cqh_last = CIRCLEQ_END(head); \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ + QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \ + (elm)->field.cqe_next = (listelm)->field.cqe_next; \ + (elm)->field.cqe_prev = (listelm); \ + if ((listelm)->field.cqe_next == CIRCLEQ_ENDC(head)) \ + (head)->cqh_last = (elm); \ + else \ + (listelm)->field.cqe_next->field.cqe_prev = (elm); \ + (listelm)->field.cqe_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ + QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ + QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \ + (elm)->field.cqe_next = (listelm); \ + (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ + if ((listelm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \ + (head)->cqh_first = (elm); \ + else \ + (listelm)->field.cqe_prev->field.cqe_next = (elm); \ + (listelm)->field.cqe_prev = (elm); \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ + QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ + (elm)->field.cqe_next = (head)->cqh_first; \ + (elm)->field.cqe_prev = CIRCLEQ_END(head); \ + if ((head)->cqh_last == CIRCLEQ_ENDC(head)) \ + (head)->cqh_last = (elm); \ + else \ + (head)->cqh_first->field.cqe_prev = (elm); \ + (head)->cqh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ + QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ + (elm)->field.cqe_next = CIRCLEQ_END(head); \ + (elm)->field.cqe_prev = (head)->cqh_last; \ + if ((head)->cqh_first == CIRCLEQ_ENDC(head)) \ + (head)->cqh_first = (elm); \ + else \ + (head)->cqh_last->field.cqe_next = (elm); \ + (head)->cqh_last = (elm); \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_REMOVE(head, elm, field) do { \ + QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ + QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \ + if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \ + (head)->cqh_last = (elm)->field.cqe_prev; \ + else \ + (elm)->field.cqe_next->field.cqe_prev = \ + (elm)->field.cqe_prev; \ + if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \ + (head)->cqh_first = (elm)->field.cqe_next; \ + else \ + (elm)->field.cqe_prev->field.cqe_next = \ + (elm)->field.cqe_next; \ + QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_FOREACH(var, head, field) \ + for ((var) = ((head)->cqh_first); \ + (var) != CIRCLEQ_ENDC(head); \ + (var) = ((var)->field.cqe_next)) + +#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ + for ((var) = ((head)->cqh_last); \ + (var) != CIRCLEQ_ENDC(head); \ + (var) = ((var)->field.cqe_prev)) + +/* + * Circular queue access methods. + */ +#define CIRCLEQ_FIRST(head) ((head)->cqh_first) +#define CIRCLEQ_LAST(head) ((head)->cqh_last) +/* For comparisons */ +#define CIRCLEQ_ENDC(head) (__launder_type(head)) +/* For assignments */ +#define CIRCLEQ_END(head) ((void *)(head)) +#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next) +#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev) +#define CIRCLEQ_EMPTY(head) \ + (CIRCLEQ_FIRST(head) == CIRCLEQ_ENDC(head)) + +#define CIRCLEQ_LOOP_NEXT(head, elm, field) \ + (((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \ + ? ((head)->cqh_first) \ + : (elm->field.cqe_next)) +#define CIRCLEQ_LOOP_PREV(head, elm, field) \ + (((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \ + ? ((head)->cqh_last) \ + : (elm->field.cqe_prev)) +#endif /* !_KERNEL */ + +#endif /* !_SYS_QUEUE_H_ */