diff --git a/3rdparty/uthash/src/utarray.h b/3rdparty/uthash/src/utarray.h deleted file mode 100644 index 6ed0dcebcb170136209d3e35d6882694b0e5ec1f..0000000000000000000000000000000000000000 --- a/3rdparty/uthash/src/utarray.h +++ /dev/null @@ -1,238 +0,0 @@ -/* -Copyright (c) 2008-2018, Troy D. Hanson http://troydhanson.github.com/uthash/ -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER -OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -/* a dynamic array implementation using macros - */ -#ifndef UTARRAY_H -#define UTARRAY_H - -#define UTARRAY_VERSION 2.1.0 - -#include <stddef.h> /* size_t */ -#include <string.h> /* memset, etc */ -#include <stdlib.h> /* exit */ - -#ifdef __GNUC__ -#define UTARRAY_UNUSED __attribute__((__unused__)) -#else -#define UTARRAY_UNUSED -#endif - -#ifndef oom -#define oom() exit(-1) -#endif - -typedef void (ctor_f)(void *dst, const void *src); -typedef void (dtor_f)(void *elt); -typedef void (init_f)(void *elt); -typedef struct { - size_t sz; - init_f *init; - ctor_f *copy; - dtor_f *dtor; -} UT_icd; - -typedef struct { - unsigned i,n;/* i: index of next available slot, n: num slots */ - UT_icd icd; /* initializer, copy and destructor functions */ - char *d; /* n slots of size icd->sz*/ -} UT_array; - -#define utarray_init(a,_icd) do { \ - memset(a,0,sizeof(UT_array)); \ - (a)->icd = *(_icd); \ -} while(0) - -#define utarray_done(a) do { \ - if ((a)->n) { \ - if ((a)->icd.dtor) { \ - unsigned _ut_i; \ - for(_ut_i=0; _ut_i < (a)->i; _ut_i++) { \ - (a)->icd.dtor(utarray_eltptr(a,_ut_i)); \ - } \ - } \ - free((a)->d); \ - } \ - (a)->n=0; \ -} while(0) - -#define utarray_new(a,_icd) do { \ - (a) = (UT_array*)malloc(sizeof(UT_array)); \ - if ((a) == NULL) oom(); \ - utarray_init(a,_icd); \ -} while(0) - -#define utarray_free(a) do { \ - utarray_done(a); \ - free(a); \ -} while(0) - -#define utarray_reserve(a,by) do { \ - if (((a)->i+(by)) > (a)->n) { \ - char *utarray_tmp; \ - while (((a)->i+(by)) > (a)->n) { (a)->n = ((a)->n ? (2*(a)->n) : 8); } \ - utarray_tmp=(char*)realloc((a)->d, (a)->n*(a)->icd.sz); \ - if (utarray_tmp == NULL) oom(); \ - (a)->d=utarray_tmp; \ - } \ -} while(0) - -#define utarray_push_back(a,p) do { \ - utarray_reserve(a,1); \ - if ((a)->icd.copy) { (a)->icd.copy( _utarray_eltptr(a,(a)->i++), p); } \ - else { memcpy(_utarray_eltptr(a,(a)->i++), p, (a)->icd.sz); }; \ -} while(0) - -#define utarray_pop_back(a) do { \ - if ((a)->icd.dtor) { (a)->icd.dtor( _utarray_eltptr(a,--((a)->i))); } \ - else { (a)->i--; } \ -} while(0) - -#define utarray_extend_back(a) do { \ - utarray_reserve(a,1); \ - if ((a)->icd.init) { (a)->icd.init(_utarray_eltptr(a,(a)->i)); } \ - else { memset(_utarray_eltptr(a,(a)->i),0,(a)->icd.sz); } \ - (a)->i++; \ -} while(0) - -#define utarray_len(a) ((a)->i) - -#define utarray_eltptr(a,j) (((j) < (a)->i) ? _utarray_eltptr(a,j) : NULL) -#define _utarray_eltptr(a,j) ((a)->d + ((a)->icd.sz * (j))) - -#define utarray_insert(a,p,j) do { \ - if ((j) > (a)->i) utarray_resize(a,j); \ - utarray_reserve(a,1); \ - if ((j) < (a)->i) { \ - memmove( _utarray_eltptr(a,(j)+1), _utarray_eltptr(a,j), \ - ((a)->i - (j))*((a)->icd.sz)); \ - } \ - if ((a)->icd.copy) { (a)->icd.copy( _utarray_eltptr(a,j), p); } \ - else { memcpy(_utarray_eltptr(a,j), p, (a)->icd.sz); }; \ - (a)->i++; \ -} while(0) - -#define utarray_inserta(a,w,j) do { \ - if (utarray_len(w) == 0) break; \ - if ((j) > (a)->i) utarray_resize(a,j); \ - utarray_reserve(a,utarray_len(w)); \ - if ((j) < (a)->i) { \ - memmove(_utarray_eltptr(a,(j)+utarray_len(w)), \ - _utarray_eltptr(a,j), \ - ((a)->i - (j))*((a)->icd.sz)); \ - } \ - if ((a)->icd.copy) { \ - unsigned _ut_i; \ - for(_ut_i=0;_ut_i<(w)->i;_ut_i++) { \ - (a)->icd.copy(_utarray_eltptr(a, (j) + _ut_i), _utarray_eltptr(w, _ut_i)); \ - } \ - } else { \ - memcpy(_utarray_eltptr(a,j), _utarray_eltptr(w,0), \ - utarray_len(w)*((a)->icd.sz)); \ - } \ - (a)->i += utarray_len(w); \ -} while(0) - -#define utarray_resize(dst,num) do { \ - unsigned _ut_i; \ - if ((dst)->i > (unsigned)(num)) { \ - if ((dst)->icd.dtor) { \ - for (_ut_i = (num); _ut_i < (dst)->i; ++_ut_i) { \ - (dst)->icd.dtor(_utarray_eltptr(dst, _ut_i)); \ - } \ - } \ - } else if ((dst)->i < (unsigned)(num)) { \ - utarray_reserve(dst, (num) - (dst)->i); \ - if ((dst)->icd.init) { \ - for (_ut_i = (dst)->i; _ut_i < (unsigned)(num); ++_ut_i) { \ - (dst)->icd.init(_utarray_eltptr(dst, _ut_i)); \ - } \ - } else { \ - memset(_utarray_eltptr(dst, (dst)->i), 0, (dst)->icd.sz*((num) - (dst)->i)); \ - } \ - } \ - (dst)->i = (num); \ -} while(0) - -#define utarray_concat(dst,src) do { \ - utarray_inserta(dst, src, utarray_len(dst)); \ -} while(0) - -#define utarray_erase(a,pos,len) do { \ - if ((a)->icd.dtor) { \ - unsigned _ut_i; \ - for (_ut_i = 0; _ut_i < (len); _ut_i++) { \ - (a)->icd.dtor(utarray_eltptr(a, (pos) + _ut_i)); \ - } \ - } \ - if ((a)->i > ((pos) + (len))) { \ - memmove(_utarray_eltptr(a, pos), _utarray_eltptr(a, (pos) + (len)), \ - ((a)->i - ((pos) + (len))) * (a)->icd.sz); \ - } \ - (a)->i -= (len); \ -} while(0) - -#define utarray_renew(a,u) do { \ - if (a) utarray_clear(a); \ - else utarray_new(a, u); \ -} while(0) - -#define utarray_clear(a) do { \ - if ((a)->i > 0) { \ - if ((a)->icd.dtor) { \ - unsigned _ut_i; \ - for(_ut_i=0; _ut_i < (a)->i; _ut_i++) { \ - (a)->icd.dtor(_utarray_eltptr(a, _ut_i)); \ - } \ - } \ - (a)->i = 0; \ - } \ -} while(0) - -#define utarray_sort(a,cmp) do { \ - qsort((a)->d, (a)->i, (a)->icd.sz, cmp); \ -} while(0) - -#define utarray_find(a,v,cmp) bsearch((v),(a)->d,(a)->i,(a)->icd.sz,cmp) - -#define utarray_front(a) (((a)->i) ? (_utarray_eltptr(a,0)) : NULL) -#define utarray_next(a,e) (((e)==NULL) ? utarray_front(a) : ((((a)->i) > (utarray_eltidx(a,e)+1)) ? _utarray_eltptr(a,utarray_eltidx(a,e)+1) : NULL)) -#define utarray_prev(a,e) (((e)==NULL) ? utarray_back(a) : ((utarray_eltidx(a,e) > 0) ? _utarray_eltptr(a,utarray_eltidx(a,e)-1) : NULL)) -#define utarray_back(a) (((a)->i) ? (_utarray_eltptr(a,(a)->i-1)) : NULL) -#define utarray_eltidx(a,e) (((char*)(e) >= (a)->d) ? (((char*)(e) - (a)->d)/(a)->icd.sz) : -1) - -/* last we pre-define a few icd for common utarrays of ints and strings */ -static void utarray_str_cpy(void *dst, const void *src) { - char **_src = (char**)src, **_dst = (char**)dst; - *_dst = (*_src == NULL) ? NULL : strdup(*_src); -} -static void utarray_str_dtor(void *elt) { - char **eltc = (char**)elt; - if (*eltc != NULL) free(*eltc); -} -static const UT_icd ut_str_icd UTARRAY_UNUSED = {sizeof(char*),NULL,utarray_str_cpy,utarray_str_dtor}; -static const UT_icd ut_int_icd UTARRAY_UNUSED = {sizeof(int),NULL,NULL,NULL}; -static const UT_icd ut_ptr_icd UTARRAY_UNUSED = {sizeof(void*),NULL,NULL,NULL}; - - -#endif /* UTARRAY_H */ diff --git a/3rdparty/uthash/src/uthash.h b/3rdparty/uthash/src/uthash.h deleted file mode 100644 index 76bdca64199f11dbb6387633e2a651dcaee1bb4f..0000000000000000000000000000000000000000 --- a/3rdparty/uthash/src/uthash.h +++ /dev/null @@ -1,1227 +0,0 @@ -/* -Copyright (c) 2003-2018, Troy D. Hanson http://troydhanson.github.com/uthash/ -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER -OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifndef UTHASH_H -#define UTHASH_H - -#define UTHASH_VERSION 2.1.0 - -#include <string.h> /* memcmp, memset, strlen */ -#include <stddef.h> /* ptrdiff_t */ -#include <stdlib.h> /* exit */ - -/* These macros use decltype or the earlier __typeof GNU extension. - As decltype is only available in newer compilers (VS2010 or gcc 4.3+ - when compiling c++ source) this code uses whatever method is needed - or, for VS2008 where neither is available, uses casting workarounds. */ -#if !defined(DECLTYPE) && !defined(NO_DECLTYPE) -#if defined(_MSC_VER) /* MS compiler */ -#if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ -#define DECLTYPE(x) (decltype(x)) -#else /* VS2008 or older (or VS2010 in C mode) */ -#define NO_DECLTYPE -#endif -#elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__) -#define NO_DECLTYPE -#else /* GNU, Sun and other compilers */ -#define DECLTYPE(x) (__typeof(x)) -#endif -#endif - -#ifdef NO_DECLTYPE -#define DECLTYPE(x) -#define DECLTYPE_ASSIGN(dst,src) \ -do { \ - char **_da_dst = (char**)(&(dst)); \ - *_da_dst = (char*)(src); \ -} while (0) -#else -#define DECLTYPE_ASSIGN(dst,src) \ -do { \ - (dst) = DECLTYPE(dst)(src); \ -} while (0) -#endif - -/* a number of the hash function use uint32_t which isn't defined on Pre VS2010 */ -#if defined(_WIN32) -#if defined(_MSC_VER) && _MSC_VER >= 1600 -#include <stdint.h> -#elif defined(__WATCOMC__) || defined(__MINGW32__) || defined(__CYGWIN__) -#include <stdint.h> -#else -typedef unsigned int uint32_t; -typedef unsigned char uint8_t; -#endif -#elif defined(__GNUC__) && !defined(__VXWORKS__) -#include <stdint.h> -#else -typedef unsigned int uint32_t; -typedef unsigned char uint8_t; -#endif - -#ifndef uthash_malloc -#define uthash_malloc(sz) malloc(sz) /* malloc fcn */ -#endif -#ifndef uthash_free -#define uthash_free(ptr,sz) free(ptr) /* free fcn */ -#endif -#ifndef uthash_bzero -#define uthash_bzero(a,n) memset(a,'\0',n) -#endif -#ifndef uthash_strlen -#define uthash_strlen(s) strlen(s) -#endif - -#ifdef uthash_memcmp -/* This warning will not catch programs that define uthash_memcmp AFTER including uthash.h. */ -#warning "uthash_memcmp is deprecated; please use HASH_KEYCMP instead" -#else -#define uthash_memcmp(a,b,n) memcmp(a,b,n) -#endif - -#ifndef HASH_KEYCMP -#define HASH_KEYCMP(a,b,n) uthash_memcmp(a,b,n) -#endif - -#ifndef uthash_noexpand_fyi -#define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */ -#endif -#ifndef uthash_expand_fyi -#define uthash_expand_fyi(tbl) /* can be defined to log expands */ -#endif - -#ifndef HASH_NONFATAL_OOM -#define HASH_NONFATAL_OOM 0 -#endif - -#if HASH_NONFATAL_OOM -/* malloc failures can be recovered from */ - -#ifndef uthash_nonfatal_oom -#define uthash_nonfatal_oom(obj) do {} while (0) /* non-fatal OOM error */ -#endif - -#define HASH_RECORD_OOM(oomed) do { (oomed) = 1; } while (0) -#define IF_HASH_NONFATAL_OOM(x) x - -#else -/* malloc failures result in lost memory, hash tables are unusable */ - -#ifndef uthash_fatal -#define uthash_fatal(msg) exit(-1) /* fatal OOM error */ -#endif - -#define HASH_RECORD_OOM(oomed) uthash_fatal("out of memory") -#define IF_HASH_NONFATAL_OOM(x) - -#endif - -/* initial number of buckets */ -#define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */ -#define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */ -#define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */ - -/* calculate the element whose hash handle address is hhp */ -#define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho))) -/* calculate the hash handle from element address elp */ -#define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle *)(((char*)(elp)) + ((tbl)->hho))) - -#define HASH_ROLLBACK_BKT(hh, head, itemptrhh) \ -do { \ - struct UT_hash_handle *_hd_hh_item = (itemptrhh); \ - unsigned _hd_bkt; \ - HASH_TO_BKT(_hd_hh_item->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ - (head)->hh.tbl->buckets[_hd_bkt].count++; \ - _hd_hh_item->hh_next = NULL; \ - _hd_hh_item->hh_prev = NULL; \ -} while (0) - -#define HASH_VALUE(keyptr,keylen,hashv) \ -do { \ - HASH_FCN(keyptr, keylen, hashv); \ -} while (0) - -#define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out) \ -do { \ - (out) = NULL; \ - if (head) { \ - unsigned _hf_bkt; \ - HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt); \ - if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) { \ - HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \ - } \ - } \ -} while (0) - -#define HASH_FIND(hh,head,keyptr,keylen,out) \ -do { \ - unsigned _hf_hashv; \ - HASH_VALUE(keyptr, keylen, _hf_hashv); \ - HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out); \ -} while (0) - -#ifdef HASH_BLOOM -#define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM) -#define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL) -#define HASH_BLOOM_MAKE(tbl,oomed) \ -do { \ - (tbl)->bloom_nbits = HASH_BLOOM; \ - (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \ - if (!(tbl)->bloom_bv) { \ - HASH_RECORD_OOM(oomed); \ - } else { \ - uthash_bzero((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ - (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \ - } \ -} while (0) - -#define HASH_BLOOM_FREE(tbl) \ -do { \ - uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ -} while (0) - -#define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U))) -#define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U))) - -#define HASH_BLOOM_ADD(tbl,hashv) \ - HASH_BLOOM_BITSET((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U))) - -#define HASH_BLOOM_TEST(tbl,hashv) \ - HASH_BLOOM_BITTEST((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U))) - -#else -#define HASH_BLOOM_MAKE(tbl,oomed) -#define HASH_BLOOM_FREE(tbl) -#define HASH_BLOOM_ADD(tbl,hashv) -#define HASH_BLOOM_TEST(tbl,hashv) (1) -#define HASH_BLOOM_BYTELEN 0U -#endif - -#define HASH_MAKE_TABLE(hh,head,oomed) \ -do { \ - (head)->hh.tbl = (UT_hash_table*)uthash_malloc(sizeof(UT_hash_table)); \ - if (!(head)->hh.tbl) { \ - HASH_RECORD_OOM(oomed); \ - } else { \ - uthash_bzero((head)->hh.tbl, sizeof(UT_hash_table)); \ - (head)->hh.tbl->tail = &((head)->hh); \ - (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \ - (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \ - (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \ - (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \ - HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ - (head)->hh.tbl->signature = HASH_SIGNATURE; \ - if (!(head)->hh.tbl->buckets) { \ - HASH_RECORD_OOM(oomed); \ - uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ - } else { \ - uthash_bzero((head)->hh.tbl->buckets, \ - HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ - HASH_BLOOM_MAKE((head)->hh.tbl, oomed); \ - IF_HASH_NONFATAL_OOM( \ - if (oomed) { \ - uthash_free((head)->hh.tbl->buckets, \ - HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ - uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ - } \ - ) \ - } \ - } \ -} while (0) - -#define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \ -do { \ - (replaced) = NULL; \ - HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ - if (replaced) { \ - HASH_DELETE(hh, head, replaced); \ - } \ - HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \ -} while (0) - -#define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \ -do { \ - (replaced) = NULL; \ - HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ - if (replaced) { \ - HASH_DELETE(hh, head, replaced); \ - } \ - HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \ -} while (0) - -#define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \ -do { \ - unsigned _hr_hashv; \ - HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ - HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \ -} while (0) - -#define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn) \ -do { \ - unsigned _hr_hashv; \ - HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ - HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \ -} while (0) - -#define HASH_APPEND_LIST(hh, head, add) \ -do { \ - (add)->hh.next = NULL; \ - (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \ - (head)->hh.tbl->tail->next = (add); \ - (head)->hh.tbl->tail = &((add)->hh); \ -} while (0) - -#define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ -do { \ - do { \ - if (cmpfcn(DECLTYPE(head)(_hs_iter), add) > 0) { \ - break; \ - } \ - } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \ -} while (0) - -#ifdef NO_DECLTYPE -#undef HASH_AKBI_INNER_LOOP -#define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ -do { \ - char *_hs_saved_head = (char*)(head); \ - do { \ - DECLTYPE_ASSIGN(head, _hs_iter); \ - if (cmpfcn(head, add) > 0) { \ - DECLTYPE_ASSIGN(head, _hs_saved_head); \ - break; \ - } \ - DECLTYPE_ASSIGN(head, _hs_saved_head); \ - } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \ -} while (0) -#endif - -#if HASH_NONFATAL_OOM - -#define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \ -do { \ - if (!(oomed)) { \ - unsigned _ha_bkt; \ - (head)->hh.tbl->num_items++; \ - HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \ - HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \ - if (oomed) { \ - HASH_ROLLBACK_BKT(hh, head, &(add)->hh); \ - HASH_DELETE_HH(hh, head, &(add)->hh); \ - (add)->hh.tbl = NULL; \ - uthash_nonfatal_oom(add); \ - } else { \ - HASH_BLOOM_ADD((head)->hh.tbl, hashval); \ - HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ - } \ - } else { \ - (add)->hh.tbl = NULL; \ - uthash_nonfatal_oom(add); \ - } \ -} while (0) - -#else - -#define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \ -do { \ - unsigned _ha_bkt; \ - (head)->hh.tbl->num_items++; \ - HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \ - HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \ - HASH_BLOOM_ADD((head)->hh.tbl, hashval); \ - HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ -} while (0) - -#endif - - -#define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \ -do { \ - IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \ - (add)->hh.hashv = (hashval); \ - (add)->hh.key = (char*) (keyptr); \ - (add)->hh.keylen = (unsigned) (keylen_in); \ - if (!(head)) { \ - (add)->hh.next = NULL; \ - (add)->hh.prev = NULL; \ - HASH_MAKE_TABLE(hh, add, _ha_oomed); \ - IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \ - (head) = (add); \ - IF_HASH_NONFATAL_OOM( } ) \ - } else { \ - void *_hs_iter = (head); \ - (add)->hh.tbl = (head)->hh.tbl; \ - HASH_AKBI_INNER_LOOP(hh, head, add, cmpfcn); \ - if (_hs_iter) { \ - (add)->hh.next = _hs_iter; \ - if (((add)->hh.prev = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev)) { \ - HH_FROM_ELMT((head)->hh.tbl, (add)->hh.prev)->next = (add); \ - } else { \ - (head) = (add); \ - } \ - HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev = (add); \ - } else { \ - HASH_APPEND_LIST(hh, head, add); \ - } \ - } \ - HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \ - HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE_INORDER"); \ -} while (0) - -#define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn) \ -do { \ - unsigned _hs_hashv; \ - HASH_VALUE(keyptr, keylen_in, _hs_hashv); \ - HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \ -} while (0) - -#define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \ - HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn) - -#define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn) \ - HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn) - -#define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add) \ -do { \ - IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \ - (add)->hh.hashv = (hashval); \ - (add)->hh.key = (char*) (keyptr); \ - (add)->hh.keylen = (unsigned) (keylen_in); \ - if (!(head)) { \ - (add)->hh.next = NULL; \ - (add)->hh.prev = NULL; \ - HASH_MAKE_TABLE(hh, add, _ha_oomed); \ - IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \ - (head) = (add); \ - IF_HASH_NONFATAL_OOM( } ) \ - } else { \ - (add)->hh.tbl = (head)->hh.tbl; \ - HASH_APPEND_LIST(hh, head, add); \ - } \ - HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \ - HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE"); \ -} while (0) - -#define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \ -do { \ - unsigned _ha_hashv; \ - HASH_VALUE(keyptr, keylen_in, _ha_hashv); \ - HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add); \ -} while (0) - -#define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add) \ - HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add) - -#define HASH_ADD(hh,head,fieldname,keylen_in,add) \ - HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add) - -#define HASH_TO_BKT(hashv,num_bkts,bkt) \ -do { \ - bkt = ((hashv) & ((num_bkts) - 1U)); \ -} while (0) - -/* delete "delptr" from the hash table. - * "the usual" patch-up process for the app-order doubly-linked-list. - * The use of _hd_hh_del below deserves special explanation. - * These used to be expressed using (delptr) but that led to a bug - * if someone used the same symbol for the head and deletee, like - * HASH_DELETE(hh,users,users); - * We want that to work, but by changing the head (users) below - * we were forfeiting our ability to further refer to the deletee (users) - * in the patch-up process. Solution: use scratch space to - * copy the deletee pointer, then the latter references are via that - * scratch pointer rather than through the repointed (users) symbol. - */ -#define HASH_DELETE(hh,head,delptr) \ - HASH_DELETE_HH(hh, head, &(delptr)->hh) - -#define HASH_DELETE_HH(hh,head,delptrhh) \ -do { \ - struct UT_hash_handle *_hd_hh_del = (delptrhh); \ - if ((_hd_hh_del->prev == NULL) && (_hd_hh_del->next == NULL)) { \ - HASH_BLOOM_FREE((head)->hh.tbl); \ - uthash_free((head)->hh.tbl->buckets, \ - (head)->hh.tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ - uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ - (head) = NULL; \ - } else { \ - unsigned _hd_bkt; \ - if (_hd_hh_del == (head)->hh.tbl->tail) { \ - (head)->hh.tbl->tail = HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev); \ - } \ - if (_hd_hh_del->prev != NULL) { \ - HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev)->next = _hd_hh_del->next; \ - } else { \ - DECLTYPE_ASSIGN(head, _hd_hh_del->next); \ - } \ - if (_hd_hh_del->next != NULL) { \ - HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->next)->prev = _hd_hh_del->prev; \ - } \ - HASH_TO_BKT(_hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ - HASH_DEL_IN_BKT((head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \ - (head)->hh.tbl->num_items--; \ - } \ - HASH_FSCK(hh, head, "HASH_DELETE_HH"); \ -} while (0) - -/* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */ -#define HASH_FIND_STR(head,findstr,out) \ -do { \ - unsigned _uthash_hfstr_keylen = (unsigned)uthash_strlen(findstr); \ - HASH_FIND(hh, head, findstr, _uthash_hfstr_keylen, out); \ -} while (0) -#define HASH_ADD_STR(head,strfield,add) \ -do { \ - unsigned _uthash_hastr_keylen = (unsigned)uthash_strlen((add)->strfield); \ - HASH_ADD(hh, head, strfield[0], _uthash_hastr_keylen, add); \ -} while (0) -#define HASH_REPLACE_STR(head,strfield,add,replaced) \ -do { \ - unsigned _uthash_hrstr_keylen = (unsigned)uthash_strlen((add)->strfield); \ - HASH_REPLACE(hh, head, strfield[0], _uthash_hrstr_keylen, add, replaced); \ -} while (0) -#define HASH_FIND_INT(head,findint,out) \ - HASH_FIND(hh,head,findint,sizeof(int),out) -#define HASH_ADD_INT(head,intfield,add) \ - HASH_ADD(hh,head,intfield,sizeof(int),add) -#define HASH_REPLACE_INT(head,intfield,add,replaced) \ - HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced) -#define HASH_FIND_PTR(head,findptr,out) \ - HASH_FIND(hh,head,findptr,sizeof(void *),out) -#define HASH_ADD_PTR(head,ptrfield,add) \ - HASH_ADD(hh,head,ptrfield,sizeof(void *),add) -#define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \ - HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced) -#define HASH_DEL(head,delptr) \ - HASH_DELETE(hh,head,delptr) - -/* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined. - * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined. - */ -#ifdef HASH_DEBUG -#define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0) -#define HASH_FSCK(hh,head,where) \ -do { \ - struct UT_hash_handle *_thh; \ - if (head) { \ - unsigned _bkt_i; \ - unsigned _count = 0; \ - char *_prev; \ - for (_bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; ++_bkt_i) { \ - unsigned _bkt_count = 0; \ - _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \ - _prev = NULL; \ - while (_thh) { \ - if (_prev != (char*)(_thh->hh_prev)) { \ - HASH_OOPS("%s: invalid hh_prev %p, actual %p\n", \ - (where), (void*)_thh->hh_prev, (void*)_prev); \ - } \ - _bkt_count++; \ - _prev = (char*)(_thh); \ - _thh = _thh->hh_next; \ - } \ - _count += _bkt_count; \ - if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \ - HASH_OOPS("%s: invalid bucket count %u, actual %u\n", \ - (where), (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \ - } \ - } \ - if (_count != (head)->hh.tbl->num_items) { \ - HASH_OOPS("%s: invalid hh item count %u, actual %u\n", \ - (where), (head)->hh.tbl->num_items, _count); \ - } \ - _count = 0; \ - _prev = NULL; \ - _thh = &(head)->hh; \ - while (_thh) { \ - _count++; \ - if (_prev != (char*)_thh->prev) { \ - HASH_OOPS("%s: invalid prev %p, actual %p\n", \ - (where), (void*)_thh->prev, (void*)_prev); \ - } \ - _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \ - _thh = (_thh->next ? HH_FROM_ELMT((head)->hh.tbl, _thh->next) : NULL); \ - } \ - if (_count != (head)->hh.tbl->num_items) { \ - HASH_OOPS("%s: invalid app item count %u, actual %u\n", \ - (where), (head)->hh.tbl->num_items, _count); \ - } \ - } \ -} while (0) -#else -#define HASH_FSCK(hh,head,where) -#endif - -/* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to - * the descriptor to which this macro is defined for tuning the hash function. - * The app can #include <unistd.h> to get the prototype for write(2). */ -#ifdef HASH_EMIT_KEYS -#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \ -do { \ - unsigned _klen = fieldlen; \ - write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ - write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \ -} while (0) -#else -#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) -#endif - -/* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */ -#ifdef HASH_FUNCTION -#define HASH_FCN HASH_FUNCTION -#else -#define HASH_FCN HASH_JEN -#endif - -/* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */ -#define HASH_BER(key,keylen,hashv) \ -do { \ - unsigned _hb_keylen = (unsigned)keylen; \ - const unsigned char *_hb_key = (const unsigned char*)(key); \ - (hashv) = 0; \ - while (_hb_keylen-- != 0U) { \ - (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \ - } \ -} while (0) - - -/* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at - * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */ -#define HASH_SAX(key,keylen,hashv) \ -do { \ - unsigned _sx_i; \ - const unsigned char *_hs_key = (const unsigned char*)(key); \ - hashv = 0; \ - for (_sx_i=0; _sx_i < keylen; _sx_i++) { \ - hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \ - } \ -} while (0) -/* FNV-1a variation */ -#define HASH_FNV(key,keylen,hashv) \ -do { \ - unsigned _fn_i; \ - const unsigned char *_hf_key = (const unsigned char*)(key); \ - (hashv) = 2166136261U; \ - for (_fn_i=0; _fn_i < keylen; _fn_i++) { \ - hashv = hashv ^ _hf_key[_fn_i]; \ - hashv = hashv * 16777619U; \ - } \ -} while (0) - -#define HASH_OAT(key,keylen,hashv) \ -do { \ - unsigned _ho_i; \ - const unsigned char *_ho_key=(const unsigned char*)(key); \ - hashv = 0; \ - for(_ho_i=0; _ho_i < keylen; _ho_i++) { \ - hashv += _ho_key[_ho_i]; \ - hashv += (hashv << 10); \ - hashv ^= (hashv >> 6); \ - } \ - hashv += (hashv << 3); \ - hashv ^= (hashv >> 11); \ - hashv += (hashv << 15); \ -} while (0) - -#define HASH_JEN_MIX(a,b,c) \ -do { \ - a -= b; a -= c; a ^= ( c >> 13 ); \ - b -= c; b -= a; b ^= ( a << 8 ); \ - c -= a; c -= b; c ^= ( b >> 13 ); \ - a -= b; a -= c; a ^= ( c >> 12 ); \ - b -= c; b -= a; b ^= ( a << 16 ); \ - c -= a; c -= b; c ^= ( b >> 5 ); \ - a -= b; a -= c; a ^= ( c >> 3 ); \ - b -= c; b -= a; b ^= ( a << 10 ); \ - c -= a; c -= b; c ^= ( b >> 15 ); \ -} while (0) - -#define HASH_JEN(key,keylen,hashv) \ -do { \ - unsigned _hj_i,_hj_j,_hj_k; \ - unsigned const char *_hj_key=(unsigned const char*)(key); \ - hashv = 0xfeedbeefu; \ - _hj_i = _hj_j = 0x9e3779b9u; \ - _hj_k = (unsigned)(keylen); \ - while (_hj_k >= 12U) { \ - _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \ - + ( (unsigned)_hj_key[2] << 16 ) \ - + ( (unsigned)_hj_key[3] << 24 ) ); \ - _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \ - + ( (unsigned)_hj_key[6] << 16 ) \ - + ( (unsigned)_hj_key[7] << 24 ) ); \ - hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \ - + ( (unsigned)_hj_key[10] << 16 ) \ - + ( (unsigned)_hj_key[11] << 24 ) ); \ - \ - HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ - \ - _hj_key += 12; \ - _hj_k -= 12U; \ - } \ - hashv += (unsigned)(keylen); \ - switch ( _hj_k ) { \ - case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \ - case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \ - case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \ - case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \ - case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \ - case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \ - case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \ - case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \ - case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \ - case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \ - case 1: _hj_i += _hj_key[0]; \ - } \ - HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ -} while (0) - -/* The Paul Hsieh hash function */ -#undef get16bits -#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \ - || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) -#define get16bits(d) (*((const uint16_t *) (d))) -#endif - -#if !defined (get16bits) -#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \ - +(uint32_t)(((const uint8_t *)(d))[0]) ) -#endif -#define HASH_SFH(key,keylen,hashv) \ -do { \ - unsigned const char *_sfh_key=(unsigned const char*)(key); \ - uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \ - \ - unsigned _sfh_rem = _sfh_len & 3U; \ - _sfh_len >>= 2; \ - hashv = 0xcafebabeu; \ - \ - /* Main loop */ \ - for (;_sfh_len > 0U; _sfh_len--) { \ - hashv += get16bits (_sfh_key); \ - _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \ - hashv = (hashv << 16) ^ _sfh_tmp; \ - _sfh_key += 2U*sizeof (uint16_t); \ - hashv += hashv >> 11; \ - } \ - \ - /* Handle end cases */ \ - switch (_sfh_rem) { \ - case 3: hashv += get16bits (_sfh_key); \ - hashv ^= hashv << 16; \ - hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \ - hashv += hashv >> 11; \ - break; \ - case 2: hashv += get16bits (_sfh_key); \ - hashv ^= hashv << 11; \ - hashv += hashv >> 17; \ - break; \ - case 1: hashv += *_sfh_key; \ - hashv ^= hashv << 10; \ - hashv += hashv >> 1; \ - } \ - \ - /* Force "avalanching" of final 127 bits */ \ - hashv ^= hashv << 3; \ - hashv += hashv >> 5; \ - hashv ^= hashv << 4; \ - hashv += hashv >> 17; \ - hashv ^= hashv << 25; \ - hashv += hashv >> 6; \ -} while (0) - -#ifdef HASH_USING_NO_STRICT_ALIASING -/* The MurmurHash exploits some CPU's (x86,x86_64) tolerance for unaligned reads. - * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error. - * MurmurHash uses the faster approach only on CPU's where we know it's safe. - * - * Note the preprocessor built-in defines can be emitted using: - * - * gcc -m64 -dM -E - < /dev/null (on gcc) - * cc -## a.c (where a.c is a simple test file) (Sun Studio) - */ -#if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86)) -#define MUR_GETBLOCK(p,i) p[i] -#else /* non intel */ -#define MUR_PLUS0_ALIGNED(p) (((unsigned long)p & 3UL) == 0UL) -#define MUR_PLUS1_ALIGNED(p) (((unsigned long)p & 3UL) == 1UL) -#define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 3UL) == 2UL) -#define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 3UL) == 3UL) -#define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL)) -#if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__)) -#define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24)) -#define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16)) -#define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8)) -#else /* assume little endian non-intel */ -#define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24)) -#define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16)) -#define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8)) -#endif -#define MUR_GETBLOCK(p,i) (MUR_PLUS0_ALIGNED(p) ? ((p)[i]) : \ - (MUR_PLUS1_ALIGNED(p) ? MUR_THREE_ONE(p) : \ - (MUR_PLUS2_ALIGNED(p) ? MUR_TWO_TWO(p) : \ - MUR_ONE_THREE(p)))) -#endif -#define MUR_ROTL32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) -#define MUR_FMIX(_h) \ -do { \ - _h ^= _h >> 16; \ - _h *= 0x85ebca6bu; \ - _h ^= _h >> 13; \ - _h *= 0xc2b2ae35u; \ - _h ^= _h >> 16; \ -} while (0) - -#define HASH_MUR(key,keylen,hashv) \ -do { \ - const uint8_t *_mur_data = (const uint8_t*)(key); \ - const int _mur_nblocks = (int)(keylen) / 4; \ - uint32_t _mur_h1 = 0xf88D5353u; \ - uint32_t _mur_c1 = 0xcc9e2d51u; \ - uint32_t _mur_c2 = 0x1b873593u; \ - uint32_t _mur_k1 = 0; \ - const uint8_t *_mur_tail; \ - const uint32_t *_mur_blocks = (const uint32_t*)(_mur_data+(_mur_nblocks*4)); \ - int _mur_i; \ - for (_mur_i = -_mur_nblocks; _mur_i != 0; _mur_i++) { \ - _mur_k1 = MUR_GETBLOCK(_mur_blocks,_mur_i); \ - _mur_k1 *= _mur_c1; \ - _mur_k1 = MUR_ROTL32(_mur_k1,15); \ - _mur_k1 *= _mur_c2; \ - \ - _mur_h1 ^= _mur_k1; \ - _mur_h1 = MUR_ROTL32(_mur_h1,13); \ - _mur_h1 = (_mur_h1*5U) + 0xe6546b64u; \ - } \ - _mur_tail = (const uint8_t*)(_mur_data + (_mur_nblocks*4)); \ - _mur_k1=0; \ - switch ((keylen) & 3U) { \ - case 0: break; \ - case 3: _mur_k1 ^= (uint32_t)_mur_tail[2] << 16; /* FALLTHROUGH */ \ - case 2: _mur_k1 ^= (uint32_t)_mur_tail[1] << 8; /* FALLTHROUGH */ \ - case 1: _mur_k1 ^= (uint32_t)_mur_tail[0]; \ - _mur_k1 *= _mur_c1; \ - _mur_k1 = MUR_ROTL32(_mur_k1,15); \ - _mur_k1 *= _mur_c2; \ - _mur_h1 ^= _mur_k1; \ - } \ - _mur_h1 ^= (uint32_t)(keylen); \ - MUR_FMIX(_mur_h1); \ - hashv = _mur_h1; \ -} while (0) -#endif /* HASH_USING_NO_STRICT_ALIASING */ - -/* iterate over items in a known bucket to find desired item */ -#define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out) \ -do { \ - if ((head).hh_head != NULL) { \ - DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head)); \ - } else { \ - (out) = NULL; \ - } \ - while ((out) != NULL) { \ - if ((out)->hh.hashv == (hashval) && (out)->hh.keylen == (keylen_in)) { \ - if (HASH_KEYCMP((out)->hh.key, keyptr, keylen_in) == 0) { \ - break; \ - } \ - } \ - if ((out)->hh.hh_next != NULL) { \ - DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh.hh_next)); \ - } else { \ - (out) = NULL; \ - } \ - } \ -} while (0) - -/* add an item to a bucket */ -#define HASH_ADD_TO_BKT(head,hh,addhh,oomed) \ -do { \ - UT_hash_bucket *_ha_head = &(head); \ - _ha_head->count++; \ - (addhh)->hh_next = _ha_head->hh_head; \ - (addhh)->hh_prev = NULL; \ - if (_ha_head->hh_head != NULL) { \ - _ha_head->hh_head->hh_prev = (addhh); \ - } \ - _ha_head->hh_head = (addhh); \ - if ((_ha_head->count >= ((_ha_head->expand_mult + 1U) * HASH_BKT_CAPACITY_THRESH)) \ - && !(addhh)->tbl->noexpand) { \ - HASH_EXPAND_BUCKETS(addhh,(addhh)->tbl, oomed); \ - IF_HASH_NONFATAL_OOM( \ - if (oomed) { \ - HASH_DEL_IN_BKT(head,addhh); \ - } \ - ) \ - } \ -} while (0) - -/* remove an item from a given bucket */ -#define HASH_DEL_IN_BKT(head,delhh) \ -do { \ - UT_hash_bucket *_hd_head = &(head); \ - _hd_head->count--; \ - if (_hd_head->hh_head == (delhh)) { \ - _hd_head->hh_head = (delhh)->hh_next; \ - } \ - if ((delhh)->hh_prev) { \ - (delhh)->hh_prev->hh_next = (delhh)->hh_next; \ - } \ - if ((delhh)->hh_next) { \ - (delhh)->hh_next->hh_prev = (delhh)->hh_prev; \ - } \ -} while (0) - -/* Bucket expansion has the effect of doubling the number of buckets - * and redistributing the items into the new buckets. Ideally the - * items will distribute more or less evenly into the new buckets - * (the extent to which this is true is a measure of the quality of - * the hash function as it applies to the key domain). - * - * With the items distributed into more buckets, the chain length - * (item count) in each bucket is reduced. Thus by expanding buckets - * the hash keeps a bound on the chain length. This bounded chain - * length is the essence of how a hash provides constant time lookup. - * - * The calculation of tbl->ideal_chain_maxlen below deserves some - * explanation. First, keep in mind that we're calculating the ideal - * maximum chain length based on the *new* (doubled) bucket count. - * In fractions this is just n/b (n=number of items,b=new num buckets). - * Since the ideal chain length is an integer, we want to calculate - * ceil(n/b). We don't depend on floating point arithmetic in this - * hash, so to calculate ceil(n/b) with integers we could write - * - * ceil(n/b) = (n/b) + ((n%b)?1:0) - * - * and in fact a previous version of this hash did just that. - * But now we have improved things a bit by recognizing that b is - * always a power of two. We keep its base 2 log handy (call it lb), - * so now we can write this with a bit shift and logical AND: - * - * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0) - * - */ -#define HASH_EXPAND_BUCKETS(hh,tbl,oomed) \ -do { \ - unsigned _he_bkt; \ - unsigned _he_bkt_i; \ - struct UT_hash_handle *_he_thh, *_he_hh_nxt; \ - UT_hash_bucket *_he_new_buckets, *_he_newbkt; \ - _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \ - 2UL * (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \ - if (!_he_new_buckets) { \ - HASH_RECORD_OOM(oomed); \ - } else { \ - uthash_bzero(_he_new_buckets, \ - 2UL * (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \ - (tbl)->ideal_chain_maxlen = \ - ((tbl)->num_items >> ((tbl)->log2_num_buckets+1U)) + \ - ((((tbl)->num_items & (((tbl)->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \ - (tbl)->nonideal_items = 0; \ - for (_he_bkt_i = 0; _he_bkt_i < (tbl)->num_buckets; _he_bkt_i++) { \ - _he_thh = (tbl)->buckets[ _he_bkt_i ].hh_head; \ - while (_he_thh != NULL) { \ - _he_hh_nxt = _he_thh->hh_next; \ - HASH_TO_BKT(_he_thh->hashv, (tbl)->num_buckets * 2U, _he_bkt); \ - _he_newbkt = &(_he_new_buckets[_he_bkt]); \ - if (++(_he_newbkt->count) > (tbl)->ideal_chain_maxlen) { \ - (tbl)->nonideal_items++; \ - if (_he_newbkt->count > _he_newbkt->expand_mult * (tbl)->ideal_chain_maxlen) { \ - _he_newbkt->expand_mult++; \ - } \ - } \ - _he_thh->hh_prev = NULL; \ - _he_thh->hh_next = _he_newbkt->hh_head; \ - if (_he_newbkt->hh_head != NULL) { \ - _he_newbkt->hh_head->hh_prev = _he_thh; \ - } \ - _he_newbkt->hh_head = _he_thh; \ - _he_thh = _he_hh_nxt; \ - } \ - } \ - uthash_free((tbl)->buckets, (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \ - (tbl)->num_buckets *= 2U; \ - (tbl)->log2_num_buckets++; \ - (tbl)->buckets = _he_new_buckets; \ - (tbl)->ineff_expands = ((tbl)->nonideal_items > ((tbl)->num_items >> 1)) ? \ - ((tbl)->ineff_expands+1U) : 0U; \ - if ((tbl)->ineff_expands > 1U) { \ - (tbl)->noexpand = 1; \ - uthash_noexpand_fyi(tbl); \ - } \ - uthash_expand_fyi(tbl); \ - } \ -} while (0) - - -/* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ -/* Note that HASH_SORT assumes the hash handle name to be hh. - * HASH_SRT was added to allow the hash handle name to be passed in. */ -#define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn) -#define HASH_SRT(hh,head,cmpfcn) \ -do { \ - unsigned _hs_i; \ - unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \ - struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \ - if (head != NULL) { \ - _hs_insize = 1; \ - _hs_looping = 1; \ - _hs_list = &((head)->hh); \ - while (_hs_looping != 0U) { \ - _hs_p = _hs_list; \ - _hs_list = NULL; \ - _hs_tail = NULL; \ - _hs_nmerges = 0; \ - while (_hs_p != NULL) { \ - _hs_nmerges++; \ - _hs_q = _hs_p; \ - _hs_psize = 0; \ - for (_hs_i = 0; _hs_i < _hs_insize; ++_hs_i) { \ - _hs_psize++; \ - _hs_q = ((_hs_q->next != NULL) ? \ - HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ - if (_hs_q == NULL) { \ - break; \ - } \ - } \ - _hs_qsize = _hs_insize; \ - while ((_hs_psize != 0U) || ((_hs_qsize != 0U) && (_hs_q != NULL))) { \ - if (_hs_psize == 0U) { \ - _hs_e = _hs_q; \ - _hs_q = ((_hs_q->next != NULL) ? \ - HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ - _hs_qsize--; \ - } else if ((_hs_qsize == 0U) || (_hs_q == NULL)) { \ - _hs_e = _hs_p; \ - if (_hs_p != NULL) { \ - _hs_p = ((_hs_p->next != NULL) ? \ - HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \ - } \ - _hs_psize--; \ - } else if ((cmpfcn( \ - DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_p)), \ - DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_q)) \ - )) <= 0) { \ - _hs_e = _hs_p; \ - if (_hs_p != NULL) { \ - _hs_p = ((_hs_p->next != NULL) ? \ - HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \ - } \ - _hs_psize--; \ - } else { \ - _hs_e = _hs_q; \ - _hs_q = ((_hs_q->next != NULL) ? \ - HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ - _hs_qsize--; \ - } \ - if ( _hs_tail != NULL ) { \ - _hs_tail->next = ((_hs_e != NULL) ? \ - ELMT_FROM_HH((head)->hh.tbl, _hs_e) : NULL); \ - } else { \ - _hs_list = _hs_e; \ - } \ - if (_hs_e != NULL) { \ - _hs_e->prev = ((_hs_tail != NULL) ? \ - ELMT_FROM_HH((head)->hh.tbl, _hs_tail) : NULL); \ - } \ - _hs_tail = _hs_e; \ - } \ - _hs_p = _hs_q; \ - } \ - if (_hs_tail != NULL) { \ - _hs_tail->next = NULL; \ - } \ - if (_hs_nmerges <= 1U) { \ - _hs_looping = 0; \ - (head)->hh.tbl->tail = _hs_tail; \ - DECLTYPE_ASSIGN(head, ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \ - } \ - _hs_insize *= 2U; \ - } \ - HASH_FSCK(hh, head, "HASH_SRT"); \ - } \ -} while (0) - -/* This function selects items from one hash into another hash. - * The end result is that the selected items have dual presence - * in both hashes. There is no copy of the items made; rather - * they are added into the new hash through a secondary hash - * hash handle that must be present in the structure. */ -#define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ -do { \ - unsigned _src_bkt, _dst_bkt; \ - void *_last_elt = NULL, *_elt; \ - UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \ - ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \ - if ((src) != NULL) { \ - for (_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \ - for (_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \ - _src_hh != NULL; \ - _src_hh = _src_hh->hh_next) { \ - _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \ - if (cond(_elt)) { \ - IF_HASH_NONFATAL_OOM( int _hs_oomed = 0; ) \ - _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \ - _dst_hh->key = _src_hh->key; \ - _dst_hh->keylen = _src_hh->keylen; \ - _dst_hh->hashv = _src_hh->hashv; \ - _dst_hh->prev = _last_elt; \ - _dst_hh->next = NULL; \ - if (_last_elt_hh != NULL) { \ - _last_elt_hh->next = _elt; \ - } \ - if ((dst) == NULL) { \ - DECLTYPE_ASSIGN(dst, _elt); \ - HASH_MAKE_TABLE(hh_dst, dst, _hs_oomed); \ - IF_HASH_NONFATAL_OOM( \ - if (_hs_oomed) { \ - uthash_nonfatal_oom(_elt); \ - (dst) = NULL; \ - continue; \ - } \ - ) \ - } else { \ - _dst_hh->tbl = (dst)->hh_dst.tbl; \ - } \ - HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \ - HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt], hh_dst, _dst_hh, _hs_oomed); \ - (dst)->hh_dst.tbl->num_items++; \ - IF_HASH_NONFATAL_OOM( \ - if (_hs_oomed) { \ - HASH_ROLLBACK_BKT(hh_dst, dst, _dst_hh); \ - HASH_DELETE_HH(hh_dst, dst, _dst_hh); \ - _dst_hh->tbl = NULL; \ - uthash_nonfatal_oom(_elt); \ - continue; \ - } \ - ) \ - HASH_BLOOM_ADD(_dst_hh->tbl, _dst_hh->hashv); \ - _last_elt = _elt; \ - _last_elt_hh = _dst_hh; \ - } \ - } \ - } \ - } \ - HASH_FSCK(hh_dst, dst, "HASH_SELECT"); \ -} while (0) - -#define HASH_CLEAR(hh,head) \ -do { \ - if ((head) != NULL) { \ - HASH_BLOOM_FREE((head)->hh.tbl); \ - uthash_free((head)->hh.tbl->buckets, \ - (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \ - uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ - (head) = NULL; \ - } \ -} while (0) - -#define HASH_OVERHEAD(hh,head) \ - (((head) != NULL) ? ( \ - (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \ - ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \ - sizeof(UT_hash_table) + \ - (HASH_BLOOM_BYTELEN))) : 0U) - -#ifdef NO_DECLTYPE -#define HASH_ITER(hh,head,el,tmp) \ -for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \ - (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL))) -#else -#define HASH_ITER(hh,head,el,tmp) \ -for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \ - (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL))) -#endif - -/* obtain a count of items in the hash */ -#define HASH_COUNT(head) HASH_CNT(hh,head) -#define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U) - -typedef struct UT_hash_bucket { - struct UT_hash_handle *hh_head; - unsigned count; - - /* expand_mult is normally set to 0. In this situation, the max chain length - * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If - * the bucket's chain exceeds this length, bucket expansion is triggered). - * However, setting expand_mult to a non-zero value delays bucket expansion - * (that would be triggered by additions to this particular bucket) - * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. - * (The multiplier is simply expand_mult+1). The whole idea of this - * multiplier is to reduce bucket expansions, since they are expensive, in - * situations where we know that a particular bucket tends to be overused. - * It is better to let its chain length grow to a longer yet-still-bounded - * value, than to do an O(n) bucket expansion too often. - */ - unsigned expand_mult; - -} UT_hash_bucket; - -/* random signature used only to find hash tables in external analysis */ -#define HASH_SIGNATURE 0xa0111fe1u -#define HASH_BLOOM_SIGNATURE 0xb12220f2u - -typedef struct UT_hash_table { - UT_hash_bucket *buckets; - unsigned num_buckets, log2_num_buckets; - unsigned num_items; - struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ - ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ - - /* in an ideal situation (all buckets used equally), no bucket would have - * more than ceil(#items/#buckets) items. that's the ideal chain length. */ - unsigned ideal_chain_maxlen; - - /* nonideal_items is the number of items in the hash whose chain position - * exceeds the ideal chain maxlen. these items pay the penalty for an uneven - * hash distribution; reaching them in a chain traversal takes >ideal steps */ - unsigned nonideal_items; - - /* ineffective expands occur when a bucket doubling was performed, but - * afterward, more than half the items in the hash had nonideal chain - * positions. If this happens on two consecutive expansions we inhibit any - * further expansion, as it's not helping; this happens when the hash - * function isn't a good fit for the key domain. When expansion is inhibited - * the hash will still work, albeit no longer in constant time. */ - unsigned ineff_expands, noexpand; - - uint32_t signature; /* used only to find hash tables in external analysis */ -#ifdef HASH_BLOOM - uint32_t bloom_sig; /* used only to test bloom exists in external analysis */ - uint8_t *bloom_bv; - uint8_t bloom_nbits; -#endif - -} UT_hash_table; - -typedef struct UT_hash_handle { - struct UT_hash_table *tbl; - void *prev; /* prev element in app order */ - void *next; /* next element in app order */ - struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ - struct UT_hash_handle *hh_next; /* next hh in bucket order */ - void *key; /* ptr to enclosing struct's key */ - unsigned keylen; /* enclosing struct's key len */ - unsigned hashv; /* result of hash-fcn(key) */ -} UT_hash_handle; - -#endif /* UTHASH_H */ diff --git a/3rdparty/uthash/src/utlist.h b/3rdparty/uthash/src/utlist.h deleted file mode 100644 index 5bb1ac9b72e556ea35d50a2fa2377bd50640e7f6..0000000000000000000000000000000000000000 --- a/3rdparty/uthash/src/utlist.h +++ /dev/null @@ -1,1073 +0,0 @@ -/* -Copyright (c) 2007-2018, Troy D. Hanson http://troydhanson.github.com/uthash/ -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER -OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifndef UTLIST_H -#define UTLIST_H - -#define UTLIST_VERSION 2.1.0 - -#include <assert.h> - -/* - * This file contains macros to manipulate singly and doubly-linked lists. - * - * 1. LL_ macros: singly-linked lists. - * 2. DL_ macros: doubly-linked lists. - * 3. CDL_ macros: circular doubly-linked lists. - * - * To use singly-linked lists, your structure must have a "next" pointer. - * To use doubly-linked lists, your structure must "prev" and "next" pointers. - * Either way, the pointer to the head of the list must be initialized to NULL. - * - * ----------------.EXAMPLE ------------------------- - * struct item { - * int id; - * struct item *prev, *next; - * } - * - * struct item *list = NULL: - * - * int main() { - * struct item *item; - * ... allocate and populate item ... - * DL_APPEND(list, item); - * } - * -------------------------------------------------- - * - * For doubly-linked lists, the append and delete macros are O(1) - * For singly-linked lists, append and delete are O(n) but prepend is O(1) - * The sort macro is O(n log(n)) for all types of single/double/circular lists. - */ - -/* These macros use decltype or the earlier __typeof GNU extension. - As decltype is only available in newer compilers (VS2010 or gcc 4.3+ - when compiling c++ source) this code uses whatever method is needed - or, for VS2008 where neither is available, uses casting workarounds. */ -#if !defined(LDECLTYPE) && !defined(NO_DECLTYPE) -#if defined(_MSC_VER) /* MS compiler */ -#if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ -#define LDECLTYPE(x) decltype(x) -#else /* VS2008 or older (or VS2010 in C mode) */ -#define NO_DECLTYPE -#endif -#elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__) -#define NO_DECLTYPE -#else /* GNU, Sun and other compilers */ -#define LDECLTYPE(x) __typeof(x) -#endif -#endif - -/* for VS2008 we use some workarounds to get around the lack of decltype, - * namely, we always reassign our tmp variable to the list head if we need - * to dereference its prev/next pointers, and save/restore the real head.*/ -#ifdef NO_DECLTYPE -#define IF_NO_DECLTYPE(x) x -#define LDECLTYPE(x) char* -#define UTLIST_SV(elt,list) _tmp = (char*)(list); {char **_alias = (char**)&(list); *_alias = (elt); } -#define UTLIST_NEXT(elt,list,next) ((char*)((list)->next)) -#define UTLIST_NEXTASGN(elt,list,to,next) { char **_alias = (char**)&((list)->next); *_alias=(char*)(to); } -/* #define UTLIST_PREV(elt,list,prev) ((char*)((list)->prev)) */ -#define UTLIST_PREVASGN(elt,list,to,prev) { char **_alias = (char**)&((list)->prev); *_alias=(char*)(to); } -#define UTLIST_RS(list) { char **_alias = (char**)&(list); *_alias=_tmp; } -#define UTLIST_CASTASGN(a,b) { char **_alias = (char**)&(a); *_alias=(char*)(b); } -#else -#define IF_NO_DECLTYPE(x) -#define UTLIST_SV(elt,list) -#define UTLIST_NEXT(elt,list,next) ((elt)->next) -#define UTLIST_NEXTASGN(elt,list,to,next) ((elt)->next)=(to) -/* #define UTLIST_PREV(elt,list,prev) ((elt)->prev) */ -#define UTLIST_PREVASGN(elt,list,to,prev) ((elt)->prev)=(to) -#define UTLIST_RS(list) -#define UTLIST_CASTASGN(a,b) (a)=(b) -#endif - -/****************************************************************************** - * The sort macro is an adaptation of Simon Tatham's O(n log(n)) mergesort * - * Unwieldy variable names used here to avoid shadowing passed-in variables. * - *****************************************************************************/ -#define LL_SORT(list, cmp) \ - LL_SORT2(list, cmp, next) - -#define LL_SORT2(list, cmp, next) \ -do { \ - LDECLTYPE(list) _ls_p; \ - LDECLTYPE(list) _ls_q; \ - LDECLTYPE(list) _ls_e; \ - LDECLTYPE(list) _ls_tail; \ - IF_NO_DECLTYPE(LDECLTYPE(list) _tmp;) \ - int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \ - if (list) { \ - _ls_insize = 1; \ - _ls_looping = 1; \ - while (_ls_looping) { \ - UTLIST_CASTASGN(_ls_p,list); \ - (list) = NULL; \ - _ls_tail = NULL; \ - _ls_nmerges = 0; \ - while (_ls_p) { \ - _ls_nmerges++; \ - _ls_q = _ls_p; \ - _ls_psize = 0; \ - for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \ - _ls_psize++; \ - UTLIST_SV(_ls_q,list); _ls_q = UTLIST_NEXT(_ls_q,list,next); UTLIST_RS(list); \ - if (!_ls_q) break; \ - } \ - _ls_qsize = _ls_insize; \ - while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) { \ - if (_ls_psize == 0) { \ - _ls_e = _ls_q; UTLIST_SV(_ls_q,list); _ls_q = \ - UTLIST_NEXT(_ls_q,list,next); UTLIST_RS(list); _ls_qsize--; \ - } else if (_ls_qsize == 0 || !_ls_q) { \ - _ls_e = _ls_p; UTLIST_SV(_ls_p,list); _ls_p = \ - UTLIST_NEXT(_ls_p,list,next); UTLIST_RS(list); _ls_psize--; \ - } else if (cmp(_ls_p,_ls_q) <= 0) { \ - _ls_e = _ls_p; UTLIST_SV(_ls_p,list); _ls_p = \ - UTLIST_NEXT(_ls_p,list,next); UTLIST_RS(list); _ls_psize--; \ - } else { \ - _ls_e = _ls_q; UTLIST_SV(_ls_q,list); _ls_q = \ - UTLIST_NEXT(_ls_q,list,next); UTLIST_RS(list); _ls_qsize--; \ - } \ - if (_ls_tail) { \ - UTLIST_SV(_ls_tail,list); UTLIST_NEXTASGN(_ls_tail,list,_ls_e,next); UTLIST_RS(list); \ - } else { \ - UTLIST_CASTASGN(list,_ls_e); \ - } \ - _ls_tail = _ls_e; \ - } \ - _ls_p = _ls_q; \ - } \ - if (_ls_tail) { \ - UTLIST_SV(_ls_tail,list); UTLIST_NEXTASGN(_ls_tail,list,NULL,next); UTLIST_RS(list); \ - } \ - if (_ls_nmerges <= 1) { \ - _ls_looping=0; \ - } \ - _ls_insize *= 2; \ - } \ - } \ -} while (0) - - -#define DL_SORT(list, cmp) \ - DL_SORT2(list, cmp, prev, next) - -#define DL_SORT2(list, cmp, prev, next) \ -do { \ - LDECLTYPE(list) _ls_p; \ - LDECLTYPE(list) _ls_q; \ - LDECLTYPE(list) _ls_e; \ - LDECLTYPE(list) _ls_tail; \ - IF_NO_DECLTYPE(LDECLTYPE(list) _tmp;) \ - int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \ - if (list) { \ - _ls_insize = 1; \ - _ls_looping = 1; \ - while (_ls_looping) { \ - UTLIST_CASTASGN(_ls_p,list); \ - (list) = NULL; \ - _ls_tail = NULL; \ - _ls_nmerges = 0; \ - while (_ls_p) { \ - _ls_nmerges++; \ - _ls_q = _ls_p; \ - _ls_psize = 0; \ - for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \ - _ls_psize++; \ - UTLIST_SV(_ls_q,list); _ls_q = UTLIST_NEXT(_ls_q,list,next); UTLIST_RS(list); \ - if (!_ls_q) break; \ - } \ - _ls_qsize = _ls_insize; \ - while ((_ls_psize > 0) || ((_ls_qsize > 0) && _ls_q)) { \ - if (_ls_psize == 0) { \ - _ls_e = _ls_q; UTLIST_SV(_ls_q,list); _ls_q = \ - UTLIST_NEXT(_ls_q,list,next); UTLIST_RS(list); _ls_qsize--; \ - } else if ((_ls_qsize == 0) || (!_ls_q)) { \ - _ls_e = _ls_p; UTLIST_SV(_ls_p,list); _ls_p = \ - UTLIST_NEXT(_ls_p,list,next); UTLIST_RS(list); _ls_psize--; \ - } else if (cmp(_ls_p,_ls_q) <= 0) { \ - _ls_e = _ls_p; UTLIST_SV(_ls_p,list); _ls_p = \ - UTLIST_NEXT(_ls_p,list,next); UTLIST_RS(list); _ls_psize--; \ - } else { \ - _ls_e = _ls_q; UTLIST_SV(_ls_q,list); _ls_q = \ - UTLIST_NEXT(_ls_q,list,next); UTLIST_RS(list); _ls_qsize--; \ - } \ - if (_ls_tail) { \ - UTLIST_SV(_ls_tail,list); UTLIST_NEXTASGN(_ls_tail,list,_ls_e,next); UTLIST_RS(list); \ - } else { \ - UTLIST_CASTASGN(list,_ls_e); \ - } \ - UTLIST_SV(_ls_e,list); UTLIST_PREVASGN(_ls_e,list,_ls_tail,prev); UTLIST_RS(list); \ - _ls_tail = _ls_e; \ - } \ - _ls_p = _ls_q; \ - } \ - UTLIST_CASTASGN((list)->prev, _ls_tail); \ - UTLIST_SV(_ls_tail,list); UTLIST_NEXTASGN(_ls_tail,list,NULL,next); UTLIST_RS(list); \ - if (_ls_nmerges <= 1) { \ - _ls_looping=0; \ - } \ - _ls_insize *= 2; \ - } \ - } \ -} while (0) - -#define CDL_SORT(list, cmp) \ - CDL_SORT2(list, cmp, prev, next) - -#define CDL_SORT2(list, cmp, prev, next) \ -do { \ - LDECLTYPE(list) _ls_p; \ - LDECLTYPE(list) _ls_q; \ - LDECLTYPE(list) _ls_e; \ - LDECLTYPE(list) _ls_tail; \ - LDECLTYPE(list) _ls_oldhead; \ - LDECLTYPE(list) _tmp; \ - int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \ - if (list) { \ - _ls_insize = 1; \ - _ls_looping = 1; \ - while (_ls_looping) { \ - UTLIST_CASTASGN(_ls_p,list); \ - UTLIST_CASTASGN(_ls_oldhead,list); \ - (list) = NULL; \ - _ls_tail = NULL; \ - _ls_nmerges = 0; \ - while (_ls_p) { \ - _ls_nmerges++; \ - _ls_q = _ls_p; \ - _ls_psize = 0; \ - for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \ - _ls_psize++; \ - UTLIST_SV(_ls_q,list); \ - if (UTLIST_NEXT(_ls_q,list,next) == _ls_oldhead) { \ - _ls_q = NULL; \ - } else { \ - _ls_q = UTLIST_NEXT(_ls_q,list,next); \ - } \ - UTLIST_RS(list); \ - if (!_ls_q) break; \ - } \ - _ls_qsize = _ls_insize; \ - while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) { \ - if (_ls_psize == 0) { \ - _ls_e = _ls_q; UTLIST_SV(_ls_q,list); _ls_q = \ - UTLIST_NEXT(_ls_q,list,next); UTLIST_RS(list); _ls_qsize--; \ - if (_ls_q == _ls_oldhead) { _ls_q = NULL; } \ - } else if (_ls_qsize == 0 || !_ls_q) { \ - _ls_e = _ls_p; UTLIST_SV(_ls_p,list); _ls_p = \ - UTLIST_NEXT(_ls_p,list,next); UTLIST_RS(list); _ls_psize--; \ - if (_ls_p == _ls_oldhead) { _ls_p = NULL; } \ - } else if (cmp(_ls_p,_ls_q) <= 0) { \ - _ls_e = _ls_p; UTLIST_SV(_ls_p,list); _ls_p = \ - UTLIST_NEXT(_ls_p,list,next); UTLIST_RS(list); _ls_psize--; \ - if (_ls_p == _ls_oldhead) { _ls_p = NULL; } \ - } else { \ - _ls_e = _ls_q; UTLIST_SV(_ls_q,list); _ls_q = \ - UTLIST_NEXT(_ls_q,list,next); UTLIST_RS(list); _ls_qsize--; \ - if (_ls_q == _ls_oldhead) { _ls_q = NULL; } \ - } \ - if (_ls_tail) { \ - UTLIST_SV(_ls_tail,list); UTLIST_NEXTASGN(_ls_tail,list,_ls_e,next); UTLIST_RS(list); \ - } else { \ - UTLIST_CASTASGN(list,_ls_e); \ - } \ - UTLIST_SV(_ls_e,list); UTLIST_PREVASGN(_ls_e,list,_ls_tail,prev); UTLIST_RS(list); \ - _ls_tail = _ls_e; \ - } \ - _ls_p = _ls_q; \ - } \ - UTLIST_CASTASGN((list)->prev,_ls_tail); \ - UTLIST_CASTASGN(_tmp,list); \ - UTLIST_SV(_ls_tail,list); UTLIST_NEXTASGN(_ls_tail,list,_tmp,next); UTLIST_RS(list); \ - if (_ls_nmerges <= 1) { \ - _ls_looping=0; \ - } \ - _ls_insize *= 2; \ - } \ - } \ -} while (0) - -/****************************************************************************** - * singly linked list macros (non-circular) * - *****************************************************************************/ -#define LL_PREPEND(head,add) \ - LL_PREPEND2(head,add,next) - -#define LL_PREPEND2(head,add,next) \ -do { \ - (add)->next = (head); \ - (head) = (add); \ -} while (0) - -#define LL_CONCAT(head1,head2) \ - LL_CONCAT2(head1,head2,next) - -#define LL_CONCAT2(head1,head2,next) \ -do { \ - LDECLTYPE(head1) _tmp; \ - if (head1) { \ - _tmp = (head1); \ - while (_tmp->next) { _tmp = _tmp->next; } \ - _tmp->next=(head2); \ - } else { \ - (head1)=(head2); \ - } \ -} while (0) - -#define LL_APPEND(head,add) \ - LL_APPEND2(head,add,next) - -#define LL_APPEND2(head,add,next) \ -do { \ - LDECLTYPE(head) _tmp; \ - (add)->next=NULL; \ - if (head) { \ - _tmp = (head); \ - while (_tmp->next) { _tmp = _tmp->next; } \ - _tmp->next=(add); \ - } else { \ - (head)=(add); \ - } \ -} while (0) - -#define LL_INSERT_INORDER(head,add,cmp) \ - LL_INSERT_INORDER2(head,add,cmp,next) - -#define LL_INSERT_INORDER2(head,add,cmp,next) \ -do { \ - LDECLTYPE(head) _tmp; \ - if (head) { \ - LL_LOWER_BOUND2(head, _tmp, add, cmp, next); \ - LL_APPEND_ELEM2(head, _tmp, add, next); \ - } else { \ - (head) = (add); \ - (head)->next = NULL; \ - } \ -} while (0) - -#define LL_LOWER_BOUND(head,elt,like,cmp) \ - LL_LOWER_BOUND2(head,elt,like,cmp,next) - -#define LL_LOWER_BOUND2(head,elt,like,cmp,next) \ - do { \ - if ((head) == NULL || (cmp(head, like)) >= 0) { \ - (elt) = NULL; \ - } else { \ - for ((elt) = (head); (elt)->next != NULL; (elt) = (elt)->next) { \ - if (cmp((elt)->next, like) >= 0) { \ - break; \ - } \ - } \ - } \ - } while (0) - -#define LL_DELETE(head,del) \ - LL_DELETE2(head,del,next) - -#define LL_DELETE2(head,del,next) \ -do { \ - LDECLTYPE(head) _tmp; \ - if ((head) == (del)) { \ - (head)=(head)->next; \ - } else { \ - _tmp = (head); \ - while (_tmp->next && (_tmp->next != (del))) { \ - _tmp = _tmp->next; \ - } \ - if (_tmp->next) { \ - _tmp->next = (del)->next; \ - } \ - } \ -} while (0) - -#define LL_COUNT(head,el,counter) \ - LL_COUNT2(head,el,counter,next) \ - -#define LL_COUNT2(head,el,counter,next) \ -do { \ - (counter) = 0; \ - LL_FOREACH2(head,el,next) { ++(counter); } \ -} while (0) - -#define LL_FOREACH(head,el) \ - LL_FOREACH2(head,el,next) - -#define LL_FOREACH2(head,el,next) \ - for ((el) = (head); el; (el) = (el)->next) - -#define LL_FOREACH_SAFE(head,el,tmp) \ - LL_FOREACH_SAFE2(head,el,tmp,next) - -#define LL_FOREACH_SAFE2(head,el,tmp,next) \ - for ((el) = (head); (el) && ((tmp) = (el)->next, 1); (el) = (tmp)) - -#define LL_SEARCH_SCALAR(head,out,field,val) \ - LL_SEARCH_SCALAR2(head,out,field,val,next) - -#define LL_SEARCH_SCALAR2(head,out,field,val,next) \ -do { \ - LL_FOREACH2(head,out,next) { \ - if ((out)->field == (val)) break; \ - } \ -} while (0) - -#define LL_SEARCH(head,out,elt,cmp) \ - LL_SEARCH2(head,out,elt,cmp,next) - -#define LL_SEARCH2(head,out,elt,cmp,next) \ -do { \ - LL_FOREACH2(head,out,next) { \ - if ((cmp(out,elt))==0) break; \ - } \ -} while (0) - -#define LL_REPLACE_ELEM2(head, el, add, next) \ -do { \ - LDECLTYPE(head) _tmp; \ - assert((head) != NULL); \ - assert((el) != NULL); \ - assert((add) != NULL); \ - (add)->next = (el)->next; \ - if ((head) == (el)) { \ - (head) = (add); \ - } else { \ - _tmp = (head); \ - while (_tmp->next && (_tmp->next != (el))) { \ - _tmp = _tmp->next; \ - } \ - if (_tmp->next) { \ - _tmp->next = (add); \ - } \ - } \ -} while (0) - -#define LL_REPLACE_ELEM(head, el, add) \ - LL_REPLACE_ELEM2(head, el, add, next) - -#define LL_PREPEND_ELEM2(head, el, add, next) \ -do { \ - if (el) { \ - LDECLTYPE(head) _tmp; \ - assert((head) != NULL); \ - assert((add) != NULL); \ - (add)->next = (el); \ - if ((head) == (el)) { \ - (head) = (add); \ - } else { \ - _tmp = (head); \ - while (_tmp->next && (_tmp->next != (el))) { \ - _tmp = _tmp->next; \ - } \ - if (_tmp->next) { \ - _tmp->next = (add); \ - } \ - } \ - } else { \ - LL_APPEND2(head, add, next); \ - } \ -} while (0) \ - -#define LL_PREPEND_ELEM(head, el, add) \ - LL_PREPEND_ELEM2(head, el, add, next) - -#define LL_APPEND_ELEM2(head, el, add, next) \ -do { \ - if (el) { \ - assert((head) != NULL); \ - assert((add) != NULL); \ - (add)->next = (el)->next; \ - (el)->next = (add); \ - } else { \ - LL_PREPEND2(head, add, next); \ - } \ -} while (0) \ - -#define LL_APPEND_ELEM(head, el, add) \ - LL_APPEND_ELEM2(head, el, add, next) - -#ifdef NO_DECLTYPE -/* Here are VS2008 / NO_DECLTYPE replacements for a few functions */ - -#undef LL_CONCAT2 -#define LL_CONCAT2(head1,head2,next) \ -do { \ - char *_tmp; \ - if (head1) { \ - _tmp = (char*)(head1); \ - while ((head1)->next) { (head1) = (head1)->next; } \ - (head1)->next = (head2); \ - UTLIST_RS(head1); \ - } else { \ - (head1)=(head2); \ - } \ -} while (0) - -#undef LL_APPEND2 -#define LL_APPEND2(head,add,next) \ -do { \ - if (head) { \ - (add)->next = head; /* use add->next as a temp variable */ \ - while ((add)->next->next) { (add)->next = (add)->next->next; } \ - (add)->next->next=(add); \ - } else { \ - (head)=(add); \ - } \ - (add)->next=NULL; \ -} while (0) - -#undef LL_INSERT_INORDER2 -#define LL_INSERT_INORDER2(head,add,cmp,next) \ -do { \ - if ((head) == NULL || (cmp(head, add)) >= 0) { \ - (add)->next = (head); \ - (head) = (add); \ - } else { \ - char *_tmp = (char*)(head); \ - while ((head)->next != NULL && (cmp((head)->next, add)) < 0) { \ - (head) = (head)->next; \ - } \ - (add)->next = (head)->next; \ - (head)->next = (add); \ - UTLIST_RS(head); \ - } \ -} while (0) - -#undef LL_DELETE2 -#define LL_DELETE2(head,del,next) \ -do { \ - if ((head) == (del)) { \ - (head)=(head)->next; \ - } else { \ - char *_tmp = (char*)(head); \ - while ((head)->next && ((head)->next != (del))) { \ - (head) = (head)->next; \ - } \ - if ((head)->next) { \ - (head)->next = ((del)->next); \ - } \ - UTLIST_RS(head); \ - } \ -} while (0) - -#undef LL_REPLACE_ELEM2 -#define LL_REPLACE_ELEM2(head, el, add, next) \ -do { \ - assert((head) != NULL); \ - assert((el) != NULL); \ - assert((add) != NULL); \ - if ((head) == (el)) { \ - (head) = (add); \ - } else { \ - (add)->next = head; \ - while ((add)->next->next && ((add)->next->next != (el))) { \ - (add)->next = (add)->next->next; \ - } \ - if ((add)->next->next) { \ - (add)->next->next = (add); \ - } \ - } \ - (add)->next = (el)->next; \ -} while (0) - -#undef LL_PREPEND_ELEM2 -#define LL_PREPEND_ELEM2(head, el, add, next) \ -do { \ - if (el) { \ - assert((head) != NULL); \ - assert((add) != NULL); \ - if ((head) == (el)) { \ - (head) = (add); \ - } else { \ - (add)->next = (head); \ - while ((add)->next->next && ((add)->next->next != (el))) { \ - (add)->next = (add)->next->next; \ - } \ - if ((add)->next->next) { \ - (add)->next->next = (add); \ - } \ - } \ - (add)->next = (el); \ - } else { \ - LL_APPEND2(head, add, next); \ - } \ -} while (0) \ - -#endif /* NO_DECLTYPE */ - -/****************************************************************************** - * doubly linked list macros (non-circular) * - *****************************************************************************/ -#define DL_PREPEND(head,add) \ - DL_PREPEND2(head,add,prev,next) - -#define DL_PREPEND2(head,add,prev,next) \ -do { \ - (add)->next = (head); \ - if (head) { \ - (add)->prev = (head)->prev; \ - (head)->prev = (add); \ - } else { \ - (add)->prev = (add); \ - } \ - (head) = (add); \ -} while (0) - -#define DL_APPEND(head,add) \ - DL_APPEND2(head,add,prev,next) - -#define DL_APPEND2(head,add,prev,next) \ -do { \ - if (head) { \ - (add)->prev = (head)->prev; \ - (head)->prev->next = (add); \ - (head)->prev = (add); \ - (add)->next = NULL; \ - } else { \ - (head)=(add); \ - (head)->prev = (head); \ - (head)->next = NULL; \ - } \ -} while (0) - -#define DL_INSERT_INORDER(head,add,cmp) \ - DL_INSERT_INORDER2(head,add,cmp,prev,next) - -#define DL_INSERT_INORDER2(head,add,cmp,prev,next) \ -do { \ - LDECLTYPE(head) _tmp; \ - if (head) { \ - DL_LOWER_BOUND2(head, _tmp, add, cmp, next); \ - DL_APPEND_ELEM2(head, _tmp, add, prev, next); \ - } else { \ - (head) = (add); \ - (head)->prev = (head); \ - (head)->next = NULL; \ - } \ -} while (0) - -#define DL_LOWER_BOUND(head,elt,like,cmp) \ - DL_LOWER_BOUND2(head,elt,like,cmp,next) - -#define DL_LOWER_BOUND2(head,elt,like,cmp,next) \ -do { \ - if ((head) == NULL || (cmp(head, like)) >= 0) { \ - (elt) = NULL; \ - } else { \ - for ((elt) = (head); (elt)->next != NULL; (elt) = (elt)->next) { \ - if ((cmp((elt)->next, like)) >= 0) { \ - break; \ - } \ - } \ - } \ -} while (0) - -#define DL_CONCAT(head1,head2) \ - DL_CONCAT2(head1,head2,prev,next) - -#define DL_CONCAT2(head1,head2,prev,next) \ -do { \ - LDECLTYPE(head1) _tmp; \ - if (head2) { \ - if (head1) { \ - UTLIST_CASTASGN(_tmp, (head2)->prev); \ - (head2)->prev = (head1)->prev; \ - (head1)->prev->next = (head2); \ - UTLIST_CASTASGN((head1)->prev, _tmp); \ - } else { \ - (head1)=(head2); \ - } \ - } \ -} while (0) - -#define DL_DELETE(head,del) \ - DL_DELETE2(head,del,prev,next) - -#define DL_DELETE2(head,del,prev,next) \ -do { \ - assert((head) != NULL); \ - assert((del)->prev != NULL); \ - if ((del)->prev == (del)) { \ - (head)=NULL; \ - } else if ((del)==(head)) { \ - (del)->next->prev = (del)->prev; \ - (head) = (del)->next; \ - } else { \ - (del)->prev->next = (del)->next; \ - if ((del)->next) { \ - (del)->next->prev = (del)->prev; \ - } else { \ - (head)->prev = (del)->prev; \ - } \ - } \ -} while (0) - -#define DL_COUNT(head,el,counter) \ - DL_COUNT2(head,el,counter,next) \ - -#define DL_COUNT2(head,el,counter,next) \ -do { \ - (counter) = 0; \ - DL_FOREACH2(head,el,next) { ++(counter); } \ -} while (0) - -#define DL_FOREACH(head,el) \ - DL_FOREACH2(head,el,next) - -#define DL_FOREACH2(head,el,next) \ - for ((el) = (head); el; (el) = (el)->next) - -/* this version is safe for deleting the elements during iteration */ -#define DL_FOREACH_SAFE(head,el,tmp) \ - DL_FOREACH_SAFE2(head,el,tmp,next) - -#define DL_FOREACH_SAFE2(head,el,tmp,next) \ - for ((el) = (head); (el) && ((tmp) = (el)->next, 1); (el) = (tmp)) - -/* these are identical to their singly-linked list counterparts */ -#define DL_SEARCH_SCALAR LL_SEARCH_SCALAR -#define DL_SEARCH LL_SEARCH -#define DL_SEARCH_SCALAR2 LL_SEARCH_SCALAR2 -#define DL_SEARCH2 LL_SEARCH2 - -#define DL_REPLACE_ELEM2(head, el, add, prev, next) \ -do { \ - assert((head) != NULL); \ - assert((el) != NULL); \ - assert((add) != NULL); \ - if ((head) == (el)) { \ - (head) = (add); \ - (add)->next = (el)->next; \ - if ((el)->next == NULL) { \ - (add)->prev = (add); \ - } else { \ - (add)->prev = (el)->prev; \ - (add)->next->prev = (add); \ - } \ - } else { \ - (add)->next = (el)->next; \ - (add)->prev = (el)->prev; \ - (add)->prev->next = (add); \ - if ((el)->next == NULL) { \ - (head)->prev = (add); \ - } else { \ - (add)->next->prev = (add); \ - } \ - } \ -} while (0) - -#define DL_REPLACE_ELEM(head, el, add) \ - DL_REPLACE_ELEM2(head, el, add, prev, next) - -#define DL_PREPEND_ELEM2(head, el, add, prev, next) \ -do { \ - if (el) { \ - assert((head) != NULL); \ - assert((add) != NULL); \ - (add)->next = (el); \ - (add)->prev = (el)->prev; \ - (el)->prev = (add); \ - if ((head) == (el)) { \ - (head) = (add); \ - } else { \ - (add)->prev->next = (add); \ - } \ - } else { \ - DL_APPEND2(head, add, prev, next); \ - } \ -} while (0) \ - -#define DL_PREPEND_ELEM(head, el, add) \ - DL_PREPEND_ELEM2(head, el, add, prev, next) - -#define DL_APPEND_ELEM2(head, el, add, prev, next) \ -do { \ - if (el) { \ - assert((head) != NULL); \ - assert((add) != NULL); \ - (add)->next = (el)->next; \ - (add)->prev = (el); \ - (el)->next = (add); \ - if ((add)->next) { \ - (add)->next->prev = (add); \ - } else { \ - (head)->prev = (add); \ - } \ - } else { \ - DL_PREPEND2(head, add, prev, next); \ - } \ -} while (0) \ - -#define DL_APPEND_ELEM(head, el, add) \ - DL_APPEND_ELEM2(head, el, add, prev, next) - -#ifdef NO_DECLTYPE -/* Here are VS2008 / NO_DECLTYPE replacements for a few functions */ - -#undef DL_INSERT_INORDER2 -#define DL_INSERT_INORDER2(head,add,cmp,prev,next) \ -do { \ - if ((head) == NULL) { \ - (add)->prev = (add); \ - (add)->next = NULL; \ - (head) = (add); \ - } else if ((cmp(head, add)) >= 0) { \ - (add)->prev = (head)->prev; \ - (add)->next = (head); \ - (head)->prev = (add); \ - (head) = (add); \ - } else { \ - char *_tmp = (char*)(head); \ - while ((head)->next && (cmp((head)->next, add)) < 0) { \ - (head) = (head)->next; \ - } \ - (add)->prev = (head); \ - (add)->next = (head)->next; \ - (head)->next = (add); \ - UTLIST_RS(head); \ - if ((add)->next) { \ - (add)->next->prev = (add); \ - } else { \ - (head)->prev = (add); \ - } \ - } \ -} while (0) -#endif /* NO_DECLTYPE */ - -/****************************************************************************** - * circular doubly linked list macros * - *****************************************************************************/ -#define CDL_APPEND(head,add) \ - CDL_APPEND2(head,add,prev,next) - -#define CDL_APPEND2(head,add,prev,next) \ -do { \ - if (head) { \ - (add)->prev = (head)->prev; \ - (add)->next = (head); \ - (head)->prev = (add); \ - (add)->prev->next = (add); \ - } else { \ - (add)->prev = (add); \ - (add)->next = (add); \ - (head) = (add); \ - } \ -} while (0) - -#define CDL_PREPEND(head,add) \ - CDL_PREPEND2(head,add,prev,next) - -#define CDL_PREPEND2(head,add,prev,next) \ -do { \ - if (head) { \ - (add)->prev = (head)->prev; \ - (add)->next = (head); \ - (head)->prev = (add); \ - (add)->prev->next = (add); \ - } else { \ - (add)->prev = (add); \ - (add)->next = (add); \ - } \ - (head) = (add); \ -} while (0) - -#define CDL_INSERT_INORDER(head,add,cmp) \ - CDL_INSERT_INORDER2(head,add,cmp,prev,next) - -#define CDL_INSERT_INORDER2(head,add,cmp,prev,next) \ -do { \ - LDECLTYPE(head) _tmp; \ - if (head) { \ - CDL_LOWER_BOUND2(head, _tmp, add, cmp, next); \ - CDL_APPEND_ELEM2(head, _tmp, add, prev, next); \ - } else { \ - (head) = (add); \ - (head)->next = (head); \ - (head)->prev = (head); \ - } \ -} while (0) - -#define CDL_LOWER_BOUND(head,elt,like,cmp) \ - CDL_LOWER_BOUND2(head,elt,like,cmp,next) - -#define CDL_LOWER_BOUND2(head,elt,like,cmp,next) \ -do { \ - if ((head) == NULL || (cmp(head, like)) >= 0) { \ - (elt) = NULL; \ - } else { \ - for ((elt) = (head); (elt)->next != (head); (elt) = (elt)->next) { \ - if ((cmp((elt)->next, like)) >= 0) { \ - break; \ - } \ - } \ - } \ -} while (0) - -#define CDL_DELETE(head,del) \ - CDL_DELETE2(head,del,prev,next) - -#define CDL_DELETE2(head,del,prev,next) \ -do { \ - if (((head)==(del)) && ((head)->next == (head))) { \ - (head) = NULL; \ - } else { \ - (del)->next->prev = (del)->prev; \ - (del)->prev->next = (del)->next; \ - if ((del) == (head)) (head)=(del)->next; \ - } \ -} while (0) - -#define CDL_COUNT(head,el,counter) \ - CDL_COUNT2(head,el,counter,next) \ - -#define CDL_COUNT2(head, el, counter,next) \ -do { \ - (counter) = 0; \ - CDL_FOREACH2(head,el,next) { ++(counter); } \ -} while (0) - -#define CDL_FOREACH(head,el) \ - CDL_FOREACH2(head,el,next) - -#define CDL_FOREACH2(head,el,next) \ - for ((el)=(head);el;(el)=(((el)->next==(head)) ? NULL : (el)->next)) - -#define CDL_FOREACH_SAFE(head,el,tmp1,tmp2) \ - CDL_FOREACH_SAFE2(head,el,tmp1,tmp2,prev,next) - -#define CDL_FOREACH_SAFE2(head,el,tmp1,tmp2,prev,next) \ - for ((el) = (head), (tmp1) = (head) ? (head)->prev : NULL; \ - (el) && ((tmp2) = (el)->next, 1); \ - (el) = ((el) == (tmp1) ? NULL : (tmp2))) - -#define CDL_SEARCH_SCALAR(head,out,field,val) \ - CDL_SEARCH_SCALAR2(head,out,field,val,next) - -#define CDL_SEARCH_SCALAR2(head,out,field,val,next) \ -do { \ - CDL_FOREACH2(head,out,next) { \ - if ((out)->field == (val)) break; \ - } \ -} while (0) - -#define CDL_SEARCH(head,out,elt,cmp) \ - CDL_SEARCH2(head,out,elt,cmp,next) - -#define CDL_SEARCH2(head,out,elt,cmp,next) \ -do { \ - CDL_FOREACH2(head,out,next) { \ - if ((cmp(out,elt))==0) break; \ - } \ -} while (0) - -#define CDL_REPLACE_ELEM2(head, el, add, prev, next) \ -do { \ - assert((head) != NULL); \ - assert((el) != NULL); \ - assert((add) != NULL); \ - if ((el)->next == (el)) { \ - (add)->next = (add); \ - (add)->prev = (add); \ - (head) = (add); \ - } else { \ - (add)->next = (el)->next; \ - (add)->prev = (el)->prev; \ - (add)->next->prev = (add); \ - (add)->prev->next = (add); \ - if ((head) == (el)) { \ - (head) = (add); \ - } \ - } \ -} while (0) - -#define CDL_REPLACE_ELEM(head, el, add) \ - CDL_REPLACE_ELEM2(head, el, add, prev, next) - -#define CDL_PREPEND_ELEM2(head, el, add, prev, next) \ -do { \ - if (el) { \ - assert((head) != NULL); \ - assert((add) != NULL); \ - (add)->next = (el); \ - (add)->prev = (el)->prev; \ - (el)->prev = (add); \ - (add)->prev->next = (add); \ - if ((head) == (el)) { \ - (head) = (add); \ - } \ - } else { \ - CDL_APPEND2(head, add, prev, next); \ - } \ -} while (0) - -#define CDL_PREPEND_ELEM(head, el, add) \ - CDL_PREPEND_ELEM2(head, el, add, prev, next) - -#define CDL_APPEND_ELEM2(head, el, add, prev, next) \ -do { \ - if (el) { \ - assert((head) != NULL); \ - assert((add) != NULL); \ - (add)->next = (el)->next; \ - (add)->prev = (el); \ - (el)->next = (add); \ - (add)->next->prev = (add); \ - } else { \ - CDL_PREPEND2(head, add, prev, next); \ - } \ -} while (0) - -#define CDL_APPEND_ELEM(head, el, add) \ - CDL_APPEND_ELEM2(head, el, add, prev, next) - -#ifdef NO_DECLTYPE -/* Here are VS2008 / NO_DECLTYPE replacements for a few functions */ - -#undef CDL_INSERT_INORDER2 -#define CDL_INSERT_INORDER2(head,add,cmp,prev,next) \ -do { \ - if ((head) == NULL) { \ - (add)->prev = (add); \ - (add)->next = (add); \ - (head) = (add); \ - } else if ((cmp(head, add)) >= 0) { \ - (add)->prev = (head)->prev; \ - (add)->next = (head); \ - (add)->prev->next = (add); \ - (head)->prev = (add); \ - (head) = (add); \ - } else { \ - char *_tmp = (char*)(head); \ - while ((char*)(head)->next != _tmp && (cmp((head)->next, add)) < 0) { \ - (head) = (head)->next; \ - } \ - (add)->prev = (head); \ - (add)->next = (head)->next; \ - (add)->next->prev = (add); \ - (head)->next = (add); \ - UTLIST_RS(head); \ - } \ -} while (0) -#endif /* NO_DECLTYPE */ - -#endif /* UTLIST_H */ diff --git a/3rdparty/uthash/src/utringbuffer.h b/3rdparty/uthash/src/utringbuffer.h deleted file mode 100644 index ce2890e60cd4a94b17424dab522b39bc0995cc7c..0000000000000000000000000000000000000000 --- a/3rdparty/uthash/src/utringbuffer.h +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright (c) 2015-2018, Troy D. Hanson http://troydhanson.github.com/uthash/ -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER -OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -/* a ring-buffer implementation using macros - */ -#ifndef UTRINGBUFFER_H -#define UTRINGBUFFER_H - -#define UTRINGBUFFER_VERSION 2.1.0 - -#include <stdlib.h> -#include <string.h> -#include "utarray.h" // for "UT_icd" - -typedef struct { - unsigned i; /* index of next available slot; wraps at n */ - unsigned n; /* capacity */ - unsigned char f; /* full */ - UT_icd icd; /* initializer, copy and destructor functions */ - char *d; /* n slots of size icd->sz */ -} UT_ringbuffer; - -#define utringbuffer_init(a, _n, _icd) do { \ - memset(a, 0, sizeof(UT_ringbuffer)); \ - (a)->icd = *(_icd); \ - (a)->n = (_n); \ - if ((a)->n) { (a)->d = (char*)malloc((a)->n * (_icd)->sz); } \ -} while(0) - -#define utringbuffer_clear(a) do { \ - if ((a)->icd.dtor) { \ - if ((a)->f) { \ - unsigned _ut_i; \ - for (_ut_i = 0; _ut_i < (a)->n; ++_ut_i) { \ - (a)->icd.dtor(utringbuffer_eltptr(a, _ut_i)); \ - } \ - } else { \ - unsigned _ut_i; \ - for (_ut_i = 0; _ut_i < (a)->i; ++_ut_i) { \ - (a)->icd.dtor(utringbuffer_eltptr(a, _ut_i)); \ - } \ - } \ - } \ - (a)->i = 0; \ - (a)->f = 0; \ -} while(0) - -#define utringbuffer_done(a) do { \ - utringbuffer_clear(a); \ - free((a)->d); (a)->d = NULL; \ - (a)->n = 0; \ -} while(0) - -#define utringbuffer_new(a,n,_icd) do { \ - a = (UT_ringbuffer*)malloc(sizeof(UT_ringbuffer)); \ - utringbuffer_init(a, n, _icd); \ -} while(0) - -#define utringbuffer_free(a) do { \ - utringbuffer_done(a); \ - free(a); \ -} while(0) - -#define utringbuffer_push_back(a,p) do { \ - if ((a)->icd.dtor && (a)->f) { (a)->icd.dtor(_utringbuffer_internalptr(a,(a)->i)); } \ - if ((a)->icd.copy) { (a)->icd.copy( _utringbuffer_internalptr(a,(a)->i), p); } \ - else { memcpy(_utringbuffer_internalptr(a,(a)->i), p, (a)->icd.sz); }; \ - if (++(a)->i == (a)->n) { (a)->i = 0; (a)->f = 1; } \ -} while(0) - -#define utringbuffer_len(a) ((a)->f ? (a)->n : (a)->i) -#define utringbuffer_empty(a) ((a)->i == 0 && !(a)->f) -#define utringbuffer_full(a) ((a)->f != 0) - -#define _utringbuffer_real_idx(a,j) ((a)->f ? ((j) + (a)->i) % (a)->n : (j)) -#define _utringbuffer_internalptr(a,j) ((void*)((a)->d + ((a)->icd.sz * (j)))) -#define utringbuffer_eltptr(a,j) ((0 <= (j) && (j) < utringbuffer_len(a)) ? _utringbuffer_internalptr(a,_utringbuffer_real_idx(a,j)) : NULL) - -#define _utringbuffer_fake_idx(a,j) ((a)->f ? ((j) + (a)->n - (a)->i) % (a)->n : (j)) -#define _utringbuffer_internalidx(a,e) (((char*)(e) >= (a)->d) ? (((char*)(e) - (a)->d)/(a)->icd.sz) : -1) -#define utringbuffer_eltidx(a,e) _utringbuffer_fake_idx(a, _utringbuffer_internalidx(a,e)) - -#define utringbuffer_front(a) utringbuffer_eltptr(a,0) -#define utringbuffer_next(a,e) ((e)==NULL ? utringbuffer_front(a) : utringbuffer_eltptr(a, utringbuffer_eltidx(a,e)+1)) -#define utringbuffer_prev(a,e) ((e)==NULL ? utringbuffer_back(a) : utringbuffer_eltptr(a, utringbuffer_eltidx(a,e)-1)) -#define utringbuffer_back(a) (utringbuffer_empty(a) ? NULL : utringbuffer_eltptr(a, utringbuffer_len(a) - 1)) - -#endif /* UTRINGBUFFER_H */ diff --git a/3rdparty/uthash/src/utstack.h b/3rdparty/uthash/src/utstack.h deleted file mode 100644 index 3b0c1a0dff1860a0383ecfe5946de3a378f28ffe..0000000000000000000000000000000000000000 --- a/3rdparty/uthash/src/utstack.h +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright (c) 2018-2018, Troy D. Hanson http://troydhanson.github.com/uthash/ -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER -OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifndef UTSTACK_H -#define UTSTACK_H - -#define UTSTACK_VERSION 2.1.0 - -/* - * This file contains macros to manipulate a singly-linked list as a stack. - * - * To use utstack, your structure must have a "next" pointer. - * - * ----------------.EXAMPLE ------------------------- - * struct item { - * int id; - * struct item *next; - * } - * - * struct item *stack = NULL: - * - * int main() { - * int count; - * struct item *tmp; - * struct item *item = malloc(sizeof *item); - * item->id = 42; - * STACK_COUNT(stack, tmp, count); assert(count == 0); - * STACK_PUSH(stack, item); - * STACK_COUNT(stack, tmp, count); assert(count == 1); - * STACK_POP(stack, item); - * free(item); - * STACK_COUNT(stack, tmp, count); assert(count == 0); - * } - * -------------------------------------------------- - */ - -#define STACK_TOP(head) (head) - -#define STACK_EMPTY(head) (!(head)) - -#define STACK_PUSH(head,add) \ - STACK_PUSH2(head,add,next) - -#define STACK_PUSH2(head,add,next) \ -do { \ - (add)->next = (head); \ - (head) = (add); \ -} while (0) - -#define STACK_POP(head,result) \ - STACK_POP2(head,result,next) - -#define STACK_POP2(head,result,next) \ -do { \ - (result) = (head); \ - (head) = (head)->next; \ -} while (0) - -#define STACK_COUNT(head,el,counter) \ - STACK_COUNT2(head,el,counter,next) \ - -#define STACK_COUNT2(head,el,counter,next) \ -do { \ - (counter) = 0; \ - for ((el) = (head); el; (el) = (el)->next) { ++(counter); } \ -} while (0) - -#endif /* UTSTACK_H */ diff --git a/3rdparty/uthash/src/utstring.h b/3rdparty/uthash/src/utstring.h deleted file mode 100644 index ca25c902ca585720fe6427037ac5eac74f677ea5..0000000000000000000000000000000000000000 --- a/3rdparty/uthash/src/utstring.h +++ /dev/null @@ -1,398 +0,0 @@ -/* -Copyright (c) 2008-2018, Troy D. Hanson http://troydhanson.github.com/uthash/ -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER -OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -/* a dynamic string implementation using macros - */ -#ifndef UTSTRING_H -#define UTSTRING_H - -#define UTSTRING_VERSION 2.1.0 - -#include <stdlib.h> -#include <string.h> -#include <stdio.h> -#include <stdarg.h> - -#ifdef __GNUC__ -#define UTSTRING_UNUSED __attribute__((__unused__)) -#else -#define UTSTRING_UNUSED -#endif - -#ifndef oom -#define oom() exit(-1) -#endif - -typedef struct { - char *d; /* pointer to allocated buffer */ - size_t n; /* allocated capacity */ - size_t i; /* index of first unused byte */ -} UT_string; - -#define utstring_reserve(s,amt) \ -do { \ - if (((s)->n - (s)->i) < (size_t)(amt)) { \ - char *utstring_tmp = (char*)realloc( \ - (s)->d, (s)->n + (amt)); \ - if (utstring_tmp == NULL) oom(); \ - (s)->d = utstring_tmp; \ - (s)->n += (amt); \ - } \ -} while(0) - -#define utstring_init(s) \ -do { \ - (s)->n = 0; (s)->i = 0; (s)->d = NULL; \ - utstring_reserve(s,100); \ - (s)->d[0] = '\0'; \ -} while(0) - -#define utstring_done(s) \ -do { \ - if ((s)->d != NULL) free((s)->d); \ - (s)->n = 0; \ -} while(0) - -#define utstring_free(s) \ -do { \ - utstring_done(s); \ - free(s); \ -} while(0) - -#define utstring_new(s) \ -do { \ - (s) = (UT_string*)malloc(sizeof(UT_string)); \ - if (!(s)) oom(); \ - utstring_init(s); \ -} while(0) - -#define utstring_renew(s) \ -do { \ - if (s) { \ - utstring_clear(s); \ - } else { \ - utstring_new(s); \ - } \ -} while(0) - -#define utstring_clear(s) \ -do { \ - (s)->i = 0; \ - (s)->d[0] = '\0'; \ -} while(0) - -#define utstring_bincpy(s,b,l) \ -do { \ - utstring_reserve((s),(l)+1); \ - if (l) memcpy(&(s)->d[(s)->i], b, l); \ - (s)->i += (l); \ - (s)->d[(s)->i]='\0'; \ -} while(0) - -#define utstring_concat(dst,src) \ -do { \ - utstring_reserve((dst),((src)->i)+1); \ - if ((src)->i) memcpy(&(dst)->d[(dst)->i], (src)->d, (src)->i); \ - (dst)->i += (src)->i; \ - (dst)->d[(dst)->i]='\0'; \ -} while(0) - -#define utstring_len(s) ((s)->i) - -#define utstring_body(s) ((s)->d) - -UTSTRING_UNUSED static void utstring_printf_va(UT_string *s, const char *fmt, va_list ap) { - int n; - va_list cp; - for (;;) { -#ifdef _WIN32 - cp = ap; -#else - va_copy(cp, ap); -#endif - n = vsnprintf (&s->d[s->i], s->n-s->i, fmt, cp); - va_end(cp); - - if ((n > -1) && ((size_t) n < (s->n-s->i))) { - s->i += n; - return; - } - - /* Else try again with more space. */ - if (n > -1) utstring_reserve(s,n+1); /* exact */ - else utstring_reserve(s,(s->n)*2); /* 2x */ - } -} -#ifdef __GNUC__ -/* support printf format checking (2=the format string, 3=start of varargs) */ -static void utstring_printf(UT_string *s, const char *fmt, ...) - __attribute__ (( format( printf, 2, 3) )); -#endif -UTSTRING_UNUSED static void utstring_printf(UT_string *s, const char *fmt, ...) { - va_list ap; - va_start(ap,fmt); - utstring_printf_va(s,fmt,ap); - va_end(ap); -} - -/******************************************************************************* - * begin substring search functions * - ******************************************************************************/ -/* Build KMP table from left to right. */ -UTSTRING_UNUSED static void _utstring_BuildTable( - const char *P_Needle, - size_t P_NeedleLen, - long *P_KMP_Table) -{ - long i, j; - - i = 0; - j = i - 1; - P_KMP_Table[i] = j; - while (i < (long) P_NeedleLen) - { - while ( (j > -1) && (P_Needle[i] != P_Needle[j]) ) - { - j = P_KMP_Table[j]; - } - i++; - j++; - if (i < (long) P_NeedleLen) - { - if (P_Needle[i] == P_Needle[j]) - { - P_KMP_Table[i] = P_KMP_Table[j]; - } - else - { - P_KMP_Table[i] = j; - } - } - else - { - P_KMP_Table[i] = j; - } - } - - return; -} - - -/* Build KMP table from right to left. */ -UTSTRING_UNUSED static void _utstring_BuildTableR( - const char *P_Needle, - size_t P_NeedleLen, - long *P_KMP_Table) -{ - long i, j; - - i = P_NeedleLen - 1; - j = i + 1; - P_KMP_Table[i + 1] = j; - while (i >= 0) - { - while ( (j < (long) P_NeedleLen) && (P_Needle[i] != P_Needle[j]) ) - { - j = P_KMP_Table[j + 1]; - } - i--; - j--; - if (i >= 0) - { - if (P_Needle[i] == P_Needle[j]) - { - P_KMP_Table[i + 1] = P_KMP_Table[j + 1]; - } - else - { - P_KMP_Table[i + 1] = j; - } - } - else - { - P_KMP_Table[i + 1] = j; - } - } - - return; -} - - -/* Search data from left to right. ( Multiple search mode. ) */ -UTSTRING_UNUSED static long _utstring_find( - const char *P_Haystack, - size_t P_HaystackLen, - const char *P_Needle, - size_t P_NeedleLen, - long *P_KMP_Table) -{ - long i, j; - long V_FindPosition = -1; - - /* Search from left to right. */ - i = j = 0; - while ( (j < (int)P_HaystackLen) && (((P_HaystackLen - j) + i) >= P_NeedleLen) ) - { - while ( (i > -1) && (P_Needle[i] != P_Haystack[j]) ) - { - i = P_KMP_Table[i]; - } - i++; - j++; - if (i >= (int)P_NeedleLen) - { - /* Found. */ - V_FindPosition = j - i; - break; - } - } - - return V_FindPosition; -} - - -/* Search data from right to left. ( Multiple search mode. ) */ -UTSTRING_UNUSED static long _utstring_findR( - const char *P_Haystack, - size_t P_HaystackLen, - const char *P_Needle, - size_t P_NeedleLen, - long *P_KMP_Table) -{ - long i, j; - long V_FindPosition = -1; - - /* Search from right to left. */ - j = (P_HaystackLen - 1); - i = (P_NeedleLen - 1); - while ( (j >= 0) && (j >= i) ) - { - while ( (i < (int)P_NeedleLen) && (P_Needle[i] != P_Haystack[j]) ) - { - i = P_KMP_Table[i + 1]; - } - i--; - j--; - if (i < 0) - { - /* Found. */ - V_FindPosition = j + 1; - break; - } - } - - return V_FindPosition; -} - - -/* Search data from left to right. ( One time search mode. ) */ -UTSTRING_UNUSED static long utstring_find( - UT_string *s, - long P_StartPosition, /* Start from 0. -1 means last position. */ - const char *P_Needle, - size_t P_NeedleLen) -{ - long V_StartPosition; - long V_HaystackLen; - long *V_KMP_Table; - long V_FindPosition = -1; - - if (P_StartPosition < 0) - { - V_StartPosition = s->i + P_StartPosition; - } - else - { - V_StartPosition = P_StartPosition; - } - V_HaystackLen = s->i - V_StartPosition; - if ( (V_HaystackLen >= (long) P_NeedleLen) && (P_NeedleLen > 0) ) - { - V_KMP_Table = (long *)malloc(sizeof(long) * (P_NeedleLen + 1)); - if (V_KMP_Table != NULL) - { - _utstring_BuildTable(P_Needle, P_NeedleLen, V_KMP_Table); - - V_FindPosition = _utstring_find(s->d + V_StartPosition, - V_HaystackLen, - P_Needle, - P_NeedleLen, - V_KMP_Table); - if (V_FindPosition >= 0) - { - V_FindPosition += V_StartPosition; - } - - free(V_KMP_Table); - } - } - - return V_FindPosition; -} - - -/* Search data from right to left. ( One time search mode. ) */ -UTSTRING_UNUSED static long utstring_findR( - UT_string *s, - long P_StartPosition, /* Start from 0. -1 means last position. */ - const char *P_Needle, - size_t P_NeedleLen) -{ - long V_StartPosition; - long V_HaystackLen; - long *V_KMP_Table; - long V_FindPosition = -1; - - if (P_StartPosition < 0) - { - V_StartPosition = s->i + P_StartPosition; - } - else - { - V_StartPosition = P_StartPosition; - } - V_HaystackLen = V_StartPosition + 1; - if ( (V_HaystackLen >= (long) P_NeedleLen) && (P_NeedleLen > 0) ) - { - V_KMP_Table = (long *)malloc(sizeof(long) * (P_NeedleLen + 1)); - if (V_KMP_Table != NULL) - { - _utstring_BuildTableR(P_Needle, P_NeedleLen, V_KMP_Table); - - V_FindPosition = _utstring_findR(s->d, - V_HaystackLen, - P_Needle, - P_NeedleLen, - V_KMP_Table); - - free(V_KMP_Table); - } - } - - return V_FindPosition; -} -/******************************************************************************* - * end substring search functions * - ******************************************************************************/ - -#endif /* UTSTRING_H */ diff --git a/3rdparty/wepoll/LICENSE b/3rdparty/wepoll/LICENSE deleted file mode 100644 index 6c8b1c842b1a4c319c9398e95228994c2fbcbb6c..0000000000000000000000000000000000000000 --- a/3rdparty/wepoll/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -wepoll - epoll for Windows -https://github.com/piscisaureus/wepoll - -Copyright 2012-2019, Bert Belder <bertbelder@gmail.com> -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/3rdparty/wepoll/README.md b/3rdparty/wepoll/README.md deleted file mode 100644 index d334d0833c9fba4d564ea8bbbe7d279288cd5964..0000000000000000000000000000000000000000 --- a/3rdparty/wepoll/README.md +++ /dev/null @@ -1,202 +0,0 @@ -# wepoll - epoll for windows - -[![][ci status badge]][ci status link] - -This library implements the [epoll][man epoll] API for Windows -applications. It is fast and scalable, and it closely resembles the API -and behavior of Linux' epoll. - -## Rationale - -Unlike Linux, OS X, and many other operating systems, Windows doesn't -have a good API for receiving socket state notifications. It only -supports the `select` and `WSAPoll` APIs, but they -[don't scale][select scale] and suffer from -[other issues][wsapoll broken]. - -Using I/O completion ports isn't always practical when software is -designed to be cross-platform. Wepoll offers an alternative that is -much closer to a drop-in replacement for software that was designed -to run on Linux. - -## Features - -* Can poll 100000s of sockets efficiently. -* Fully thread-safe. -* Multiple threads can poll the same epoll port. -* Sockets can be added to multiple epoll sets. -* All epoll events (`EPOLLIN`, `EPOLLOUT`, `EPOLLPRI`, `EPOLLRDHUP`) - are supported. -* Level-triggered and one-shot (`EPOLLONESTHOT`) modes are supported -* Trivial to embed: you need [only two files][dist]. - -## Limitations - -* Only works with sockets. -* Edge-triggered (`EPOLLET`) mode isn't supported. - -## How to use - -The library is [distributed][dist] as a single source file -([wepoll.c][wepoll.c]) and a single header file ([wepoll.h][wepoll.h]).<br> -Compile the .c file as part of your project, and include the header wherever -needed. - -## Compatibility - -* Requires Windows Vista or higher. -* Can be compiled with recent versions of MSVC, Clang, and GCC. - -## API - -### General remarks - -* The epoll port is a `HANDLE`, not a file descriptor. -* All functions set both `errno` and `GetLastError()` on failure. -* For more extensive documentation, see the [epoll(7) man page][man epoll], - and the per-function man pages that are linked below. - -### epoll_create/epoll_create1 - -```c -HANDLE epoll_create(int size); -HANDLE epoll_create1(int flags); -``` - -* Create a new epoll instance (port). -* `size` is ignored but most be greater than zero. -* `flags` must be zero as there are no supported flags. -* Returns `NULL` on failure. -* [Linux man page][man epoll_create] - -### epoll_close - -```c -int epoll_close(HANDLE ephnd); -``` - -* Close an epoll port. -* Do not attempt to close the epoll port with `close()`, - `CloseHandle()` or `closesocket()`. - -### epoll_ctl - -```c -int epoll_ctl(HANDLE ephnd, - int op, - SOCKET sock, - struct epoll_event* event); -``` - -* Control which socket events are monitored by an epoll port. -* `ephnd` must be a HANDLE created by - [`epoll_create()`](#epoll_createepoll_create1) or - [`epoll_create1()`](#epoll_createepoll_create1). -* `op` must be one of `EPOLL_CTL_ADD`, `EPOLL_CTL_MOD`, `EPOLL_CTL_DEL`. -* `sock` must be a valid socket created by [`socket()`][msdn socket], - [`WSASocket()`][msdn wsasocket], or [`accept()`][msdn accept]. -* `event` should be a pointer to a [`struct epoll_event`](#struct-epoll_event).<br> - If `op` is `EPOLL_CTL_DEL` then the `event` parameter is ignored, and it - may be `NULL`. -* Returns 0 on success, -1 on failure. -* It is recommended to always explicitly remove a socket from its epoll - set using `EPOLL_CTL_DEL` *before* closing it.<br> - As on Linux, closed sockets are automatically removed from the epoll set, but - wepoll may not be able to detect that a socket was closed until the next call - to [`epoll_wait()`](#epoll_wait). -* [Linux man page][man epoll_ctl] - -### epoll_wait - -```c -int epoll_wait(HANDLE ephnd, - struct epoll_event* events, - int maxevents, - int timeout); -``` - -* Receive socket events from an epoll port. -* `events` should point to a caller-allocated array of - [`epoll_event`](#struct-epoll_event) structs, which will receive the - reported events. -* `maxevents` is the maximum number of events that will be written to the - `events` array, and must be greater than zero. -* `timeout` specifies whether to block when no events are immediately available. - - `<0` block indefinitely - - `0` report any events that are already waiting, but don't block - - `≥1` block for at most N milliseconds -* Return value: - - `-1` an error occurred - - `0` timed out without any events to report - - `≥1` the number of events stored in the `events` buffer -* [Linux man page][man epoll_wait] - -### struct epoll_event - -```c -typedef union epoll_data { - void* ptr; - int fd; - uint32_t u32; - uint64_t u64; - SOCKET sock; /* Windows specific */ - HANDLE hnd; /* Windows specific */ -} epoll_data_t; -``` - -```c -struct epoll_event { - uint32_t events; /* Epoll events and flags */ - epoll_data_t data; /* User data variable */ -}; -``` - -* The `events` field is a bit mask containing the events being - monitored/reported, and optional flags.<br> - Flags are accepted by [`epoll_ctl()`](#epoll_ctl), but they are not reported - back by [`epoll_wait()`](#epoll_wait). -* The `data` field can be used to associate application-specific information - with a socket; its value will be returned unmodified by - [`epoll_wait()`](#epoll_wait). -* [Linux man page][man epoll_ctl] - -| Event | Description | -|---------------|----------------------------------------------------------------------| -| `EPOLLIN` | incoming data available, or incoming connection ready to be accepted | -| `EPOLLOUT` | ready to send data, or outgoing connection successfully established | -| `EPOLLRDHUP` | remote peer initiated graceful socket shutdown | -| `EPOLLPRI` | out-of-band data available for reading | -| `EPOLLERR` | socket error<sup>1</sup> | -| `EPOLLHUP` | socket hang-up<sup>1</sup> | -| `EPOLLRDNORM` | same as `EPOLLIN` | -| `EPOLLRDBAND` | same as `EPOLLPRI` | -| `EPOLLWRNORM` | same as `EPOLLOUT` | -| `EPOLLWRBAND` | same as `EPOLLOUT` | -| `EPOLLMSG` | never reported | - -| Flag | Description | -|------------------|---------------------------| -| `EPOLLONESHOT` | report event(s) only once | -| `EPOLLET` | not supported by wepoll | -| `EPOLLEXCLUSIVE` | not supported by wepoll | -| `EPOLLWAKEUP` | not supported by wepoll | - -<sup>1</sup>: the `EPOLLERR` and `EPOLLHUP` events may always be reported by -[`epoll_wait()`](#epoll_wait), regardless of the event mask that was passed to -[`epoll_ctl()`](#epoll_ctl). - - -[ci status badge]: https://ci.appveyor.com/api/projects/status/github/piscisaureus/wepoll?branch=master&svg=true -[ci status link]: https://ci.appveyor.com/project/piscisaureus/wepoll/branch/master -[dist]: https://github.com/piscisaureus/wepoll/tree/dist -[man epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html -[man epoll_create]: http://man7.org/linux/man-pages/man2/epoll_create.2.html -[man epoll_ctl]: http://man7.org/linux/man-pages/man2/epoll_ctl.2.html -[man epoll_wait]: http://man7.org/linux/man-pages/man2/epoll_wait.2.html -[msdn accept]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms737526(v=vs.85).aspx -[msdn socket]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms740506(v=vs.85).aspx -[msdn wsasocket]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms742212(v=vs.85).aspx -[select scale]: https://daniel.haxx.se/docs/poll-vs-select.html -[wsapoll broken]: https://daniel.haxx.se/blog/2012/10/10/wsapoll-is-broken/ -[wepoll.c]: https://github.com/piscisaureus/wepoll/blob/dist/wepoll.c -[wepoll.h]: https://github.com/piscisaureus/wepoll/blob/dist/wepoll.h diff --git a/3rdparty/wepoll/wepoll.c b/3rdparty/wepoll/wepoll.c deleted file mode 100644 index 651673aad37227314985327a42f6d94790fdb653..0000000000000000000000000000000000000000 --- a/3rdparty/wepoll/wepoll.c +++ /dev/null @@ -1,2189 +0,0 @@ -/* - * wepoll - epoll for Windows - * https://github.com/piscisaureus/wepoll - * - * Copyright 2012-2019, Bert Belder <bertbelder@gmail.com> - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef WEPOLL_EXPORT -#define WEPOLL_EXPORT -#endif - -#include <stdint.h> - -enum EPOLL_EVENTS { - EPOLLIN = (int) (1U << 0), - EPOLLPRI = (int) (1U << 1), - EPOLLOUT = (int) (1U << 2), - EPOLLERR = (int) (1U << 3), - EPOLLHUP = (int) (1U << 4), - EPOLLRDNORM = (int) (1U << 6), - EPOLLRDBAND = (int) (1U << 7), - EPOLLWRNORM = (int) (1U << 8), - EPOLLWRBAND = (int) (1U << 9), - EPOLLMSG = (int) (1U << 10), /* Never reported. */ - EPOLLRDHUP = (int) (1U << 13), - EPOLLONESHOT = (int) (1U << 31) -}; - -#define EPOLLIN (1U << 0) -#define EPOLLPRI (1U << 1) -#define EPOLLOUT (1U << 2) -#define EPOLLERR (1U << 3) -#define EPOLLHUP (1U << 4) -#define EPOLLRDNORM (1U << 6) -#define EPOLLRDBAND (1U << 7) -#define EPOLLWRNORM (1U << 8) -#define EPOLLWRBAND (1U << 9) -#define EPOLLMSG (1U << 10) -#define EPOLLRDHUP (1U << 13) -#define EPOLLONESHOT (1U << 31) - -#define EPOLL_CTL_ADD 1 -#define EPOLL_CTL_MOD 2 -#define EPOLL_CTL_DEL 3 - -typedef void* HANDLE; -typedef uintptr_t SOCKET; - -typedef union epoll_data { - void* ptr; - int fd; - uint32_t u32; - uint64_t u64; - SOCKET sock; /* Windows specific */ - HANDLE hnd; /* Windows specific */ -} epoll_data_t; - -struct epoll_event { - uint32_t events; /* Epoll events and flags */ - epoll_data_t data; /* User data variable */ -}; - -#ifdef __cplusplus -extern "C" { -#endif - -WEPOLL_EXPORT HANDLE epoll_create(int size); -WEPOLL_EXPORT HANDLE epoll_create1(int flags); - -WEPOLL_EXPORT int epoll_close(HANDLE ephnd); - -WEPOLL_EXPORT int epoll_ctl(HANDLE ephnd, - int op, - SOCKET sock, - struct epoll_event* event); - -WEPOLL_EXPORT int epoll_wait(HANDLE ephnd, - struct epoll_event* events, - int maxevents, - int timeout); - -#ifdef __cplusplus -} /* extern "C" */ -#endif - -#include <malloc.h> -#include <stdlib.h> - -#define WEPOLL_INTERNAL static -#define WEPOLL_INTERNAL_VAR static - -#ifndef WIN32_LEAN_AND_MEAN -#define WIN32_LEAN_AND_MEAN -#endif - -#ifdef __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wreserved-id-macro" -#endif - -#ifdef _WIN32_WINNT -#undef _WIN32_WINNT -#endif - -#define _WIN32_WINNT 0x0600 - -#ifdef __clang__ -#pragma clang diagnostic pop -#endif - -#ifndef __GNUC__ -#pragma warning(push, 1) -#endif - -#include <WS2tcpip.h> -#include <WinSock2.h> -#include <Windows.h> - -#ifndef __GNUC__ -#pragma warning(pop) -#endif - -WEPOLL_INTERNAL int nt_global_init(void); - -typedef LONG NTSTATUS; -typedef NTSTATUS* PNTSTATUS; - -#ifndef NT_SUCCESS -#define NT_SUCCESS(status) (((NTSTATUS)(status)) >= 0) -#endif - -#ifndef STATUS_SUCCESS -#define STATUS_SUCCESS ((NTSTATUS) 0x00000000L) -#endif - -#ifndef STATUS_PENDING -#define STATUS_PENDING ((NTSTATUS) 0x00000103L) -#endif - -#ifndef STATUS_CANCELLED -#define STATUS_CANCELLED ((NTSTATUS) 0xC0000120L) -#endif - -typedef struct _IO_STATUS_BLOCK { - NTSTATUS Status; - ULONG_PTR Information; -} IO_STATUS_BLOCK, *PIO_STATUS_BLOCK; - -typedef VOID(NTAPI* PIO_APC_ROUTINE)(PVOID ApcContext, - PIO_STATUS_BLOCK IoStatusBlock, - ULONG Reserved); - -typedef struct _UNICODE_STRING { - USHORT Length; - USHORT MaximumLength; - PWSTR Buffer; -} UNICODE_STRING, *PUNICODE_STRING; - -#define RTL_CONSTANT_STRING(s) \ - { sizeof(s) - sizeof((s)[0]), sizeof(s), s } - -typedef struct _OBJECT_ATTRIBUTES { - ULONG Length; - HANDLE RootDirectory; - PUNICODE_STRING ObjectName; - ULONG Attributes; - PVOID SecurityDescriptor; - PVOID SecurityQualityOfService; -} OBJECT_ATTRIBUTES, *POBJECT_ATTRIBUTES; - -#define RTL_CONSTANT_OBJECT_ATTRIBUTES(ObjectName, Attributes) \ - { sizeof(OBJECT_ATTRIBUTES), NULL, ObjectName, Attributes, NULL, NULL } - -#ifndef FILE_OPEN -#define FILE_OPEN 0x00000001UL -#endif - -#define KEYEDEVENT_WAIT 0x00000001UL -#define KEYEDEVENT_WAKE 0x00000002UL -#define KEYEDEVENT_ALL_ACCESS \ - (STANDARD_RIGHTS_REQUIRED | KEYEDEVENT_WAIT | KEYEDEVENT_WAKE) - -#define NT_NTDLL_IMPORT_LIST(X) \ - X(NTSTATUS, \ - NTAPI, \ - NtCreateFile, \ - (PHANDLE FileHandle, \ - ACCESS_MASK DesiredAccess, \ - POBJECT_ATTRIBUTES ObjectAttributes, \ - PIO_STATUS_BLOCK IoStatusBlock, \ - PLARGE_INTEGER AllocationSize, \ - ULONG FileAttributes, \ - ULONG ShareAccess, \ - ULONG CreateDisposition, \ - ULONG CreateOptions, \ - PVOID EaBuffer, \ - ULONG EaLength)) \ - \ - X(NTSTATUS, \ - NTAPI, \ - NtCreateKeyedEvent, \ - (PHANDLE KeyedEventHandle, \ - ACCESS_MASK DesiredAccess, \ - POBJECT_ATTRIBUTES ObjectAttributes, \ - ULONG Flags)) \ - \ - X(NTSTATUS, \ - NTAPI, \ - NtDeviceIoControlFile, \ - (HANDLE FileHandle, \ - HANDLE Event, \ - PIO_APC_ROUTINE ApcRoutine, \ - PVOID ApcContext, \ - PIO_STATUS_BLOCK IoStatusBlock, \ - ULONG IoControlCode, \ - PVOID InputBuffer, \ - ULONG InputBufferLength, \ - PVOID OutputBuffer, \ - ULONG OutputBufferLength)) \ - \ - X(NTSTATUS, \ - NTAPI, \ - NtReleaseKeyedEvent, \ - (HANDLE KeyedEventHandle, \ - PVOID KeyValue, \ - BOOLEAN Alertable, \ - PLARGE_INTEGER Timeout)) \ - \ - X(NTSTATUS, \ - NTAPI, \ - NtWaitForKeyedEvent, \ - (HANDLE KeyedEventHandle, \ - PVOID KeyValue, \ - BOOLEAN Alertable, \ - PLARGE_INTEGER Timeout)) \ - \ - X(ULONG, WINAPI, RtlNtStatusToDosError, (NTSTATUS Status)) - -#define X(return_type, attributes, name, parameters) \ - WEPOLL_INTERNAL_VAR return_type(attributes* name) parameters; -NT_NTDLL_IMPORT_LIST(X) -#undef X - -#include <assert.h> -#include <stddef.h> - -#ifndef _SSIZE_T_DEFINED -typedef intptr_t ssize_t; -#endif - -#define array_count(a) (sizeof(a) / (sizeof((a)[0]))) - -#define container_of(ptr, type, member) \ - ((type*) ((uintptr_t) (ptr) - offsetof(type, member))) - -#define unused_var(v) ((void) (v)) - -/* Polyfill `inline` for older versions of msvc (up to Visual Studio 2013) */ -#if defined(_MSC_VER) && _MSC_VER < 1900 -#define inline __inline -#endif - -#define AFD_POLL_RECEIVE 0x0001 -#define AFD_POLL_RECEIVE_EXPEDITED 0x0002 -#define AFD_POLL_SEND 0x0004 -#define AFD_POLL_DISCONNECT 0x0008 -#define AFD_POLL_ABORT 0x0010 -#define AFD_POLL_LOCAL_CLOSE 0x0020 -#define AFD_POLL_ACCEPT 0x0080 -#define AFD_POLL_CONNECT_FAIL 0x0100 - -typedef struct _AFD_POLL_HANDLE_INFO { - HANDLE Handle; - ULONG Events; - NTSTATUS Status; -} AFD_POLL_HANDLE_INFO, *PAFD_POLL_HANDLE_INFO; - -typedef struct _AFD_POLL_INFO { - LARGE_INTEGER Timeout; - ULONG NumberOfHandles; - ULONG Exclusive; - AFD_POLL_HANDLE_INFO Handles[1]; -} AFD_POLL_INFO, *PAFD_POLL_INFO; - -WEPOLL_INTERNAL int afd_create_helper_handle(HANDLE iocp, - HANDLE* afd_helper_handle_out); - -WEPOLL_INTERNAL int afd_poll(HANDLE afd_helper_handle, - AFD_POLL_INFO* poll_info, - OVERLAPPED* overlapped); - -#define return_map_error(value) \ - do { \ - err_map_win_error(); \ - return (value); \ - } while (0) - -#define return_set_error(value, error) \ - do { \ - err_set_win_error(error); \ - return (value); \ - } while (0) - -WEPOLL_INTERNAL void err_map_win_error(void); -WEPOLL_INTERNAL void err_set_win_error(DWORD error); -WEPOLL_INTERNAL int err_check_handle(HANDLE handle); - -WEPOLL_INTERNAL int ws_global_init(void); -WEPOLL_INTERNAL SOCKET ws_get_base_socket(SOCKET socket); - -#define IOCTL_AFD_POLL 0x00012024 - -static UNICODE_STRING afd__helper_name = - RTL_CONSTANT_STRING(L"\\Device\\Afd\\Wepoll"); - -static OBJECT_ATTRIBUTES afd__helper_attributes = - RTL_CONSTANT_OBJECT_ATTRIBUTES(&afd__helper_name, 0); - -int afd_create_helper_handle(HANDLE iocp, HANDLE* afd_helper_handle_out) { - HANDLE afd_helper_handle; - IO_STATUS_BLOCK iosb; - NTSTATUS status; - - /* By opening \Device\Afd without specifying any extended attributes, we'll - * get a handle that lets us talk to the AFD driver, but that doesn't have an - * associated endpoint (so it's not a socket). */ - status = NtCreateFile(&afd_helper_handle, - SYNCHRONIZE, - &afd__helper_attributes, - &iosb, - NULL, - 0, - FILE_SHARE_READ | FILE_SHARE_WRITE, - FILE_OPEN, - 0, - NULL, - 0); - if (status != STATUS_SUCCESS) - return_set_error(-1, RtlNtStatusToDosError(status)); - - if (CreateIoCompletionPort(afd_helper_handle, iocp, 0, 0) == NULL) - goto error; - - if (!SetFileCompletionNotificationModes(afd_helper_handle, - FILE_SKIP_SET_EVENT_ON_HANDLE)) - goto error; - - *afd_helper_handle_out = afd_helper_handle; - return 0; - -error: - CloseHandle(afd_helper_handle); - return_map_error(-1); -} - -int afd_poll(HANDLE afd_helper_handle, - AFD_POLL_INFO* poll_info, - OVERLAPPED* overlapped) { - IO_STATUS_BLOCK* iosb; - HANDLE event; - void* apc_context; - NTSTATUS status; - - /* Blocking operation is not supported. */ - assert(overlapped != NULL); - - iosb = (IO_STATUS_BLOCK*) &overlapped->Internal; - event = overlapped->hEvent; - - /* Do what other windows APIs would do: if hEvent has it's lowest bit set, - * don't post a completion to the completion port. */ - if ((uintptr_t) event & 1) { - event = (HANDLE)((uintptr_t) event & ~(uintptr_t) 1); - apc_context = NULL; - } else { - apc_context = overlapped; - } - - iosb->Status = STATUS_PENDING; - status = NtDeviceIoControlFile(afd_helper_handle, - event, - NULL, - apc_context, - iosb, - IOCTL_AFD_POLL, - poll_info, - sizeof *poll_info, - poll_info, - sizeof *poll_info); - - if (status == STATUS_SUCCESS) - return 0; - else if (status == STATUS_PENDING) - return_set_error(-1, ERROR_IO_PENDING); - else - return_set_error(-1, RtlNtStatusToDosError(status)); -} - -WEPOLL_INTERNAL int epoll_global_init(void); - -WEPOLL_INTERNAL int init(void); - -#include <stdbool.h> - -typedef struct queue_node queue_node_t; - -typedef struct queue_node { - queue_node_t* prev; - queue_node_t* next; -} queue_node_t; - -typedef struct queue { - queue_node_t head; -} queue_t; - -WEPOLL_INTERNAL void queue_init(queue_t* queue); -WEPOLL_INTERNAL void queue_node_init(queue_node_t* node); - -WEPOLL_INTERNAL queue_node_t* queue_first(const queue_t* queue); -WEPOLL_INTERNAL queue_node_t* queue_last(const queue_t* queue); - -WEPOLL_INTERNAL void queue_prepend(queue_t* queue, queue_node_t* node); -WEPOLL_INTERNAL void queue_append(queue_t* queue, queue_node_t* node); -WEPOLL_INTERNAL void queue_move_first(queue_t* queue, queue_node_t* node); -WEPOLL_INTERNAL void queue_move_last(queue_t* queue, queue_node_t* node); -WEPOLL_INTERNAL void queue_remove(queue_node_t* node); - -WEPOLL_INTERNAL bool queue_empty(const queue_t* queue); -WEPOLL_INTERNAL bool queue_enqueued(const queue_node_t* node); - -typedef struct port_state port_state_t; -typedef struct poll_group poll_group_t; - -WEPOLL_INTERNAL poll_group_t* poll_group_acquire(port_state_t* port); -WEPOLL_INTERNAL void poll_group_release(poll_group_t* poll_group); - -WEPOLL_INTERNAL void poll_group_delete(poll_group_t* poll_group); - -WEPOLL_INTERNAL poll_group_t* poll_group_from_queue_node( - queue_node_t* queue_node); -WEPOLL_INTERNAL HANDLE - poll_group_get_afd_helper_handle(poll_group_t* poll_group); - -/* N.b.: the tree functions do not set errno or LastError when they fail. Each - * of the API functions has at most one failure mode. It is up to the caller to - * set an appropriate error code when necessary. */ - -typedef struct tree tree_t; -typedef struct tree_node tree_node_t; - -typedef struct tree { - tree_node_t* root; -} tree_t; - -typedef struct tree_node { - tree_node_t* left; - tree_node_t* right; - tree_node_t* parent; - uintptr_t key; - bool red; -} tree_node_t; - -WEPOLL_INTERNAL void tree_init(tree_t* tree); -WEPOLL_INTERNAL void tree_node_init(tree_node_t* node); - -WEPOLL_INTERNAL int tree_add(tree_t* tree, tree_node_t* node, uintptr_t key); -WEPOLL_INTERNAL void tree_del(tree_t* tree, tree_node_t* node); - -WEPOLL_INTERNAL tree_node_t* tree_find(const tree_t* tree, uintptr_t key); -WEPOLL_INTERNAL tree_node_t* tree_root(const tree_t* tree); - -typedef struct port_state port_state_t; -typedef struct sock_state sock_state_t; - -WEPOLL_INTERNAL sock_state_t* sock_new(port_state_t* port_state, - SOCKET socket); -WEPOLL_INTERNAL void sock_delete(port_state_t* port_state, - sock_state_t* sock_state); -WEPOLL_INTERNAL void sock_force_delete(port_state_t* port_state, - sock_state_t* sock_state); - -WEPOLL_INTERNAL int sock_set_event(port_state_t* port_state, - sock_state_t* sock_state, - const struct epoll_event* ev); - -WEPOLL_INTERNAL int sock_update(port_state_t* port_state, - sock_state_t* sock_state); -WEPOLL_INTERNAL int sock_feed_event(port_state_t* port_state, - OVERLAPPED* overlapped, - struct epoll_event* ev); - -WEPOLL_INTERNAL sock_state_t* sock_state_from_queue_node( - queue_node_t* queue_node); -WEPOLL_INTERNAL queue_node_t* sock_state_to_queue_node( - sock_state_t* sock_state); -WEPOLL_INTERNAL sock_state_t* sock_state_from_tree_node( - tree_node_t* tree_node); -WEPOLL_INTERNAL tree_node_t* sock_state_to_tree_node(sock_state_t* sock_state); - -/* The reflock is a special kind of lock that normally prevents a chunk of - * memory from being freed, but does allow the chunk of memory to eventually be - * released in a coordinated fashion. - * - * Under normal operation, threads increase and decrease the reference count, - * which are wait-free operations. - * - * Exactly once during the reflock's lifecycle, a thread holding a reference to - * the lock may "destroy" the lock; this operation blocks until all other - * threads holding a reference to the lock have dereferenced it. After - * "destroy" returns, the calling thread may assume that no other threads have - * a reference to the lock. - * - * Attemmpting to lock or destroy a lock after reflock_unref_and_destroy() has - * been called is invalid and results in undefined behavior. Therefore the user - * should use another lock to guarantee that this can't happen. - */ - -typedef struct reflock { - volatile long state; /* 32-bit Interlocked APIs operate on `long` values. */ -} reflock_t; - -WEPOLL_INTERNAL int reflock_global_init(void); - -WEPOLL_INTERNAL void reflock_init(reflock_t* reflock); -WEPOLL_INTERNAL void reflock_ref(reflock_t* reflock); -WEPOLL_INTERNAL void reflock_unref(reflock_t* reflock); -WEPOLL_INTERNAL void reflock_unref_and_destroy(reflock_t* reflock); - -typedef struct ts_tree { - tree_t tree; - SRWLOCK lock; -} ts_tree_t; - -typedef struct ts_tree_node { - tree_node_t tree_node; - reflock_t reflock; -} ts_tree_node_t; - -WEPOLL_INTERNAL void ts_tree_init(ts_tree_t* rtl); -WEPOLL_INTERNAL void ts_tree_node_init(ts_tree_node_t* node); - -WEPOLL_INTERNAL int ts_tree_add(ts_tree_t* ts_tree, - ts_tree_node_t* node, - uintptr_t key); - -WEPOLL_INTERNAL ts_tree_node_t* ts_tree_del_and_ref(ts_tree_t* ts_tree, - uintptr_t key); -WEPOLL_INTERNAL ts_tree_node_t* ts_tree_find_and_ref(ts_tree_t* ts_tree, - uintptr_t key); - -WEPOLL_INTERNAL void ts_tree_node_unref(ts_tree_node_t* node); -WEPOLL_INTERNAL void ts_tree_node_unref_and_destroy(ts_tree_node_t* node); - -typedef struct port_state port_state_t; -typedef struct sock_state sock_state_t; - -typedef struct port_state { - HANDLE iocp; - tree_t sock_tree; - queue_t sock_update_queue; - queue_t sock_deleted_queue; - queue_t poll_group_queue; - ts_tree_node_t handle_tree_node; - CRITICAL_SECTION lock; - size_t active_poll_count; -} port_state_t; - -WEPOLL_INTERNAL port_state_t* port_new(HANDLE* iocp_out); -WEPOLL_INTERNAL int port_close(port_state_t* port_state); -WEPOLL_INTERNAL int port_delete(port_state_t* port_state); - -WEPOLL_INTERNAL int port_wait(port_state_t* port_state, - struct epoll_event* events, - int maxevents, - int timeout); - -WEPOLL_INTERNAL int port_ctl(port_state_t* port_state, - int op, - SOCKET sock, - struct epoll_event* ev); - -WEPOLL_INTERNAL int port_register_socket_handle(port_state_t* port_state, - sock_state_t* sock_state, - SOCKET socket); -WEPOLL_INTERNAL void port_unregister_socket_handle(port_state_t* port_state, - sock_state_t* sock_state); -WEPOLL_INTERNAL sock_state_t* port_find_socket(port_state_t* port_state, - SOCKET socket); - -WEPOLL_INTERNAL void port_request_socket_update(port_state_t* port_state, - sock_state_t* sock_state); -WEPOLL_INTERNAL void port_cancel_socket_update(port_state_t* port_state, - sock_state_t* sock_state); - -WEPOLL_INTERNAL void port_add_deleted_socket(port_state_t* port_state, - sock_state_t* sock_state); -WEPOLL_INTERNAL void port_remove_deleted_socket(port_state_t* port_state, - sock_state_t* sock_state); - -static ts_tree_t epoll__handle_tree; - -static inline port_state_t* epoll__handle_tree_node_to_port( - ts_tree_node_t* tree_node) { - return container_of(tree_node, port_state_t, handle_tree_node); -} - -int epoll_global_init(void) { - ts_tree_init(&epoll__handle_tree); - return 0; -} - -static HANDLE epoll__create(void) { - port_state_t* port_state; - HANDLE ephnd; - - if (init() < 0) - return NULL; - - port_state = port_new(&ephnd); - if (port_state == NULL) - return NULL; - - if (ts_tree_add(&epoll__handle_tree, - &port_state->handle_tree_node, - (uintptr_t) ephnd) < 0) { - /* This should never happen. */ - port_delete(port_state); - return_set_error(NULL, ERROR_ALREADY_EXISTS); - } - - return ephnd; -} - -HANDLE epoll_create(int size) { - if (size <= 0) - return_set_error(NULL, ERROR_INVALID_PARAMETER); - - return epoll__create(); -} - -HANDLE epoll_create1(int flags) { - if (flags != 0) - return_set_error(NULL, ERROR_INVALID_PARAMETER); - - return epoll__create(); -} - -int epoll_close(HANDLE ephnd) { - ts_tree_node_t* tree_node; - port_state_t* port_state; - - if (init() < 0) - return -1; - - tree_node = ts_tree_del_and_ref(&epoll__handle_tree, (uintptr_t) ephnd); - if (tree_node == NULL) { - err_set_win_error(ERROR_INVALID_PARAMETER); - goto err; - } - - port_state = epoll__handle_tree_node_to_port(tree_node); - port_close(port_state); - - ts_tree_node_unref_and_destroy(tree_node); - - return port_delete(port_state); - -err: - err_check_handle(ephnd); - return -1; -} - -int epoll_ctl(HANDLE ephnd, int op, SOCKET sock, struct epoll_event* ev) { - ts_tree_node_t* tree_node; - port_state_t* port_state; - int r; - - if (init() < 0) - return -1; - - tree_node = ts_tree_find_and_ref(&epoll__handle_tree, (uintptr_t) ephnd); - if (tree_node == NULL) { - err_set_win_error(ERROR_INVALID_PARAMETER); - goto err; - } - - port_state = epoll__handle_tree_node_to_port(tree_node); - r = port_ctl(port_state, op, sock, ev); - - ts_tree_node_unref(tree_node); - - if (r < 0) - goto err; - - return 0; - -err: - /* On Linux, in the case of epoll_ctl_mod(), EBADF takes priority over other - * errors. Wepoll mimics this behavior. */ - err_check_handle(ephnd); - err_check_handle((HANDLE) sock); - return -1; -} - -int epoll_wait(HANDLE ephnd, - struct epoll_event* events, - int maxevents, - int timeout) { - ts_tree_node_t* tree_node; - port_state_t* port_state; - int num_events; - - if (maxevents <= 0) - return_set_error(-1, ERROR_INVALID_PARAMETER); - - if (init() < 0) - return -1; - - tree_node = ts_tree_find_and_ref(&epoll__handle_tree, (uintptr_t) ephnd); - if (tree_node == NULL) { - err_set_win_error(ERROR_INVALID_PARAMETER); - goto err; - } - - port_state = epoll__handle_tree_node_to_port(tree_node); - num_events = port_wait(port_state, events, maxevents, timeout); - - ts_tree_node_unref(tree_node); - - if (num_events < 0) - goto err; - - return num_events; - -err: - err_check_handle(ephnd); - return -1; -} - -#include <errno.h> - -#define ERR__ERRNO_MAPPINGS(X) \ - X(ERROR_ACCESS_DENIED, EACCES) \ - X(ERROR_ALREADY_EXISTS, EEXIST) \ - X(ERROR_BAD_COMMAND, EACCES) \ - X(ERROR_BAD_EXE_FORMAT, ENOEXEC) \ - X(ERROR_BAD_LENGTH, EACCES) \ - X(ERROR_BAD_NETPATH, ENOENT) \ - X(ERROR_BAD_NET_NAME, ENOENT) \ - X(ERROR_BAD_NET_RESP, ENETDOWN) \ - X(ERROR_BAD_PATHNAME, ENOENT) \ - X(ERROR_BROKEN_PIPE, EPIPE) \ - X(ERROR_CANNOT_MAKE, EACCES) \ - X(ERROR_COMMITMENT_LIMIT, ENOMEM) \ - X(ERROR_CONNECTION_ABORTED, ECONNABORTED) \ - X(ERROR_CONNECTION_ACTIVE, EISCONN) \ - X(ERROR_CONNECTION_REFUSED, ECONNREFUSED) \ - X(ERROR_CRC, EACCES) \ - X(ERROR_DIR_NOT_EMPTY, ENOTEMPTY) \ - X(ERROR_DISK_FULL, ENOSPC) \ - X(ERROR_DUP_NAME, EADDRINUSE) \ - X(ERROR_FILENAME_EXCED_RANGE, ENOENT) \ - X(ERROR_FILE_NOT_FOUND, ENOENT) \ - X(ERROR_GEN_FAILURE, EACCES) \ - X(ERROR_GRACEFUL_DISCONNECT, EPIPE) \ - X(ERROR_HOST_DOWN, EHOSTUNREACH) \ - X(ERROR_HOST_UNREACHABLE, EHOSTUNREACH) \ - X(ERROR_INSUFFICIENT_BUFFER, EFAULT) \ - X(ERROR_INVALID_ADDRESS, EADDRNOTAVAIL) \ - X(ERROR_INVALID_FUNCTION, EINVAL) \ - X(ERROR_INVALID_HANDLE, EBADF) \ - X(ERROR_INVALID_NETNAME, EADDRNOTAVAIL) \ - X(ERROR_INVALID_PARAMETER, EINVAL) \ - X(ERROR_INVALID_USER_BUFFER, EMSGSIZE) \ - X(ERROR_IO_PENDING, EINPROGRESS) \ - X(ERROR_LOCK_VIOLATION, EACCES) \ - X(ERROR_MORE_DATA, EMSGSIZE) \ - X(ERROR_NETNAME_DELETED, ECONNABORTED) \ - X(ERROR_NETWORK_ACCESS_DENIED, EACCES) \ - X(ERROR_NETWORK_BUSY, ENETDOWN) \ - X(ERROR_NETWORK_UNREACHABLE, ENETUNREACH) \ - X(ERROR_NOACCESS, EFAULT) \ - X(ERROR_NONPAGED_SYSTEM_RESOURCES, ENOMEM) \ - X(ERROR_NOT_ENOUGH_MEMORY, ENOMEM) \ - X(ERROR_NOT_ENOUGH_QUOTA, ENOMEM) \ - X(ERROR_NOT_FOUND, ENOENT) \ - X(ERROR_NOT_LOCKED, EACCES) \ - X(ERROR_NOT_READY, EACCES) \ - X(ERROR_NOT_SAME_DEVICE, EXDEV) \ - X(ERROR_NOT_SUPPORTED, ENOTSUP) \ - X(ERROR_NO_MORE_FILES, ENOENT) \ - X(ERROR_NO_SYSTEM_RESOURCES, ENOMEM) \ - X(ERROR_OPERATION_ABORTED, EINTR) \ - X(ERROR_OUT_OF_PAPER, EACCES) \ - X(ERROR_PAGED_SYSTEM_RESOURCES, ENOMEM) \ - X(ERROR_PAGEFILE_QUOTA, ENOMEM) \ - X(ERROR_PATH_NOT_FOUND, ENOENT) \ - X(ERROR_PIPE_NOT_CONNECTED, EPIPE) \ - X(ERROR_PORT_UNREACHABLE, ECONNRESET) \ - X(ERROR_PROTOCOL_UNREACHABLE, ENETUNREACH) \ - X(ERROR_REM_NOT_LIST, ECONNREFUSED) \ - X(ERROR_REQUEST_ABORTED, EINTR) \ - X(ERROR_REQ_NOT_ACCEP, EWOULDBLOCK) \ - X(ERROR_SECTOR_NOT_FOUND, EACCES) \ - X(ERROR_SEM_TIMEOUT, ETIMEDOUT) \ - X(ERROR_SHARING_VIOLATION, EACCES) \ - X(ERROR_TOO_MANY_NAMES, ENOMEM) \ - X(ERROR_TOO_MANY_OPEN_FILES, EMFILE) \ - X(ERROR_UNEXP_NET_ERR, ECONNABORTED) \ - X(ERROR_WAIT_NO_CHILDREN, ECHILD) \ - X(ERROR_WORKING_SET_QUOTA, ENOMEM) \ - X(ERROR_WRITE_PROTECT, EACCES) \ - X(ERROR_WRONG_DISK, EACCES) \ - X(WSAEACCES, EACCES) \ - X(WSAEADDRINUSE, EADDRINUSE) \ - X(WSAEADDRNOTAVAIL, EADDRNOTAVAIL) \ - X(WSAEAFNOSUPPORT, EAFNOSUPPORT) \ - X(WSAECONNABORTED, ECONNABORTED) \ - X(WSAECONNREFUSED, ECONNREFUSED) \ - X(WSAECONNRESET, ECONNRESET) \ - X(WSAEDISCON, EPIPE) \ - X(WSAEFAULT, EFAULT) \ - X(WSAEHOSTDOWN, EHOSTUNREACH) \ - X(WSAEHOSTUNREACH, EHOSTUNREACH) \ - X(WSAEINPROGRESS, EBUSY) \ - X(WSAEINTR, EINTR) \ - X(WSAEINVAL, EINVAL) \ - X(WSAEISCONN, EISCONN) \ - X(WSAEMSGSIZE, EMSGSIZE) \ - X(WSAENETDOWN, ENETDOWN) \ - X(WSAENETRESET, EHOSTUNREACH) \ - X(WSAENETUNREACH, ENETUNREACH) \ - X(WSAENOBUFS, ENOMEM) \ - X(WSAENOTCONN, ENOTCONN) \ - X(WSAENOTSOCK, ENOTSOCK) \ - X(WSAEOPNOTSUPP, EOPNOTSUPP) \ - X(WSAEPROCLIM, ENOMEM) \ - X(WSAESHUTDOWN, EPIPE) \ - X(WSAETIMEDOUT, ETIMEDOUT) \ - X(WSAEWOULDBLOCK, EWOULDBLOCK) \ - X(WSANOTINITIALISED, ENETDOWN) \ - X(WSASYSNOTREADY, ENETDOWN) \ - X(WSAVERNOTSUPPORTED, ENOSYS) - -static errno_t err__map_win_error_to_errno(DWORD error) { - switch (error) { -#define X(error_sym, errno_sym) \ - case error_sym: \ - return errno_sym; - ERR__ERRNO_MAPPINGS(X) -#undef X - } - return EINVAL; -} - -void err_map_win_error(void) { - errno = err__map_win_error_to_errno(GetLastError()); -} - -void err_set_win_error(DWORD error) { - SetLastError(error); - errno = err__map_win_error_to_errno(error); -} - -int err_check_handle(HANDLE handle) { - DWORD flags; - - /* GetHandleInformation() succeeds when passed INVALID_HANDLE_VALUE, so check - * for this condition explicitly. */ - if (handle == INVALID_HANDLE_VALUE) - return_set_error(-1, ERROR_INVALID_HANDLE); - - if (!GetHandleInformation(handle, &flags)) - return_map_error(-1); - - return 0; -} - -static bool init__done = false; -static INIT_ONCE init__once = INIT_ONCE_STATIC_INIT; - -static BOOL CALLBACK init__once_callback(INIT_ONCE* once, - void* parameter, - void** context) { - unused_var(once); - unused_var(parameter); - unused_var(context); - - /* N.b. that initialization order matters here. */ - if (ws_global_init() < 0 || nt_global_init() < 0 || - reflock_global_init() < 0 || epoll_global_init() < 0) - return FALSE; - - init__done = true; - return TRUE; -} - -int init(void) { - if (!init__done && - !InitOnceExecuteOnce(&init__once, init__once_callback, NULL, NULL)) - /* `InitOnceExecuteOnce()` itself is infallible, and it doesn't set any - * error code when the once-callback returns FALSE. We return -1 here to - * indicate that global initialization failed; the failing init function is - * resposible for setting `errno` and calling `SetLastError()`. */ - return -1; - - return 0; -} - -/* Set up a workaround for the following problem: - * FARPROC addr = GetProcAddress(...); - * MY_FUNC func = (MY_FUNC) addr; <-- GCC 8 warning/error. - * MY_FUNC func = (MY_FUNC) (void*) addr; <-- MSVC warning/error. - * To compile cleanly with either compiler, do casts with this "bridge" type: - * MY_FUNC func = (MY_FUNC) (nt__fn_ptr_cast_t) addr; */ -#ifdef __GNUC__ -typedef void* nt__fn_ptr_cast_t; -#else -typedef FARPROC nt__fn_ptr_cast_t; -#endif - -#define X(return_type, attributes, name, parameters) \ - WEPOLL_INTERNAL return_type(attributes* name) parameters = NULL; -NT_NTDLL_IMPORT_LIST(X) -#undef X - -int nt_global_init(void) { - HMODULE ntdll; - FARPROC fn_ptr; - - ntdll = GetModuleHandleW(L"ntdll.dll"); - if (ntdll == NULL) - return -1; - -#define X(return_type, attributes, name, parameters) \ - fn_ptr = GetProcAddress(ntdll, #name); \ - if (fn_ptr == NULL) \ - return -1; \ - name = (return_type(attributes*) parameters)(nt__fn_ptr_cast_t) fn_ptr; - NT_NTDLL_IMPORT_LIST(X) -#undef X - - return 0; -} - -#include <string.h> - -static const size_t POLL_GROUP__MAX_GROUP_SIZE = 32; - -typedef struct poll_group { - port_state_t* port_state; - queue_node_t queue_node; - HANDLE afd_helper_handle; - size_t group_size; -} poll_group_t; - -static poll_group_t* poll_group__new(port_state_t* port_state) { - poll_group_t* poll_group = malloc(sizeof *poll_group); - if (poll_group == NULL) - return_set_error(NULL, ERROR_NOT_ENOUGH_MEMORY); - - memset(poll_group, 0, sizeof *poll_group); - - queue_node_init(&poll_group->queue_node); - poll_group->port_state = port_state; - - if (afd_create_helper_handle(port_state->iocp, - &poll_group->afd_helper_handle) < 0) { - free(poll_group); - return NULL; - } - - queue_append(&port_state->poll_group_queue, &poll_group->queue_node); - - return poll_group; -} - -void poll_group_delete(poll_group_t* poll_group) { - assert(poll_group->group_size == 0); - CloseHandle(poll_group->afd_helper_handle); - queue_remove(&poll_group->queue_node); - free(poll_group); -} - -poll_group_t* poll_group_from_queue_node(queue_node_t* queue_node) { - return container_of(queue_node, poll_group_t, queue_node); -} - -HANDLE poll_group_get_afd_helper_handle(poll_group_t* poll_group) { - return poll_group->afd_helper_handle; -} - -poll_group_t* poll_group_acquire(port_state_t* port_state) { - queue_t* queue = &port_state->poll_group_queue; - poll_group_t* poll_group = - !queue_empty(queue) - ? container_of(queue_last(queue), poll_group_t, queue_node) - : NULL; - - if (poll_group == NULL || - poll_group->group_size >= POLL_GROUP__MAX_GROUP_SIZE) - poll_group = poll_group__new(port_state); - if (poll_group == NULL) - return NULL; - - if (++poll_group->group_size == POLL_GROUP__MAX_GROUP_SIZE) - queue_move_first(&port_state->poll_group_queue, &poll_group->queue_node); - - return poll_group; -} - -void poll_group_release(poll_group_t* poll_group) { - port_state_t* port_state = poll_group->port_state; - - poll_group->group_size--; - assert(poll_group->group_size < POLL_GROUP__MAX_GROUP_SIZE); - - queue_move_last(&port_state->poll_group_queue, &poll_group->queue_node); - - /* Poll groups are currently only freed when the epoll port is closed. */ -} - -#define PORT__MAX_ON_STACK_COMPLETIONS 256 - -static port_state_t* port__alloc(void) { - port_state_t* port_state = malloc(sizeof *port_state); - if (port_state == NULL) - return_set_error(NULL, ERROR_NOT_ENOUGH_MEMORY); - - return port_state; -} - -static void port__free(port_state_t* port) { - assert(port != NULL); - free(port); -} - -static HANDLE port__create_iocp(void) { - HANDLE iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0); - if (iocp == NULL) - return_map_error(NULL); - - return iocp; -} - -port_state_t* port_new(HANDLE* iocp_out) { - port_state_t* port_state; - HANDLE iocp; - - port_state = port__alloc(); - if (port_state == NULL) - goto err1; - - iocp = port__create_iocp(); - if (iocp == NULL) - goto err2; - - memset(port_state, 0, sizeof *port_state); - - port_state->iocp = iocp; - tree_init(&port_state->sock_tree); - queue_init(&port_state->sock_update_queue); - queue_init(&port_state->sock_deleted_queue); - queue_init(&port_state->poll_group_queue); - ts_tree_node_init(&port_state->handle_tree_node); - InitializeCriticalSection(&port_state->lock); - - *iocp_out = iocp; - return port_state; - -err2: - port__free(port_state); -err1: - return NULL; -} - -static int port__close_iocp(port_state_t* port_state) { - HANDLE iocp = port_state->iocp; - port_state->iocp = NULL; - - if (!CloseHandle(iocp)) - return_map_error(-1); - - return 0; -} - -int port_close(port_state_t* port_state) { - int result; - - EnterCriticalSection(&port_state->lock); - result = port__close_iocp(port_state); - LeaveCriticalSection(&port_state->lock); - - return result; -} - -int port_delete(port_state_t* port_state) { - tree_node_t* tree_node; - queue_node_t* queue_node; - - /* At this point the IOCP port should have been closed. */ - assert(port_state->iocp == NULL); - - while ((tree_node = tree_root(&port_state->sock_tree)) != NULL) { - sock_state_t* sock_state = sock_state_from_tree_node(tree_node); - sock_force_delete(port_state, sock_state); - } - - while ((queue_node = queue_first(&port_state->sock_deleted_queue)) != NULL) { - sock_state_t* sock_state = sock_state_from_queue_node(queue_node); - sock_force_delete(port_state, sock_state); - } - - while ((queue_node = queue_first(&port_state->poll_group_queue)) != NULL) { - poll_group_t* poll_group = poll_group_from_queue_node(queue_node); - poll_group_delete(poll_group); - } - - assert(queue_empty(&port_state->sock_update_queue)); - - DeleteCriticalSection(&port_state->lock); - - port__free(port_state); - - return 0; -} - -static int port__update_events(port_state_t* port_state) { - queue_t* sock_update_queue = &port_state->sock_update_queue; - - /* Walk the queue, submitting new poll requests for every socket that needs - * it. */ - while (!queue_empty(sock_update_queue)) { - queue_node_t* queue_node = queue_first(sock_update_queue); - sock_state_t* sock_state = sock_state_from_queue_node(queue_node); - - if (sock_update(port_state, sock_state) < 0) - return -1; - - /* sock_update() removes the socket from the update queue. */ - } - - return 0; -} - -static void port__update_events_if_polling(port_state_t* port_state) { - if (port_state->active_poll_count > 0) - port__update_events(port_state); -} - -static int port__feed_events(port_state_t* port_state, - struct epoll_event* epoll_events, - OVERLAPPED_ENTRY* iocp_events, - DWORD iocp_event_count) { - int epoll_event_count = 0; - DWORD i; - - for (i = 0; i < iocp_event_count; i++) { - OVERLAPPED* overlapped = iocp_events[i].lpOverlapped; - struct epoll_event* ev = &epoll_events[epoll_event_count]; - - epoll_event_count += sock_feed_event(port_state, overlapped, ev); - } - - return epoll_event_count; -} - -static int port__poll(port_state_t* port_state, - struct epoll_event* epoll_events, - OVERLAPPED_ENTRY* iocp_events, - DWORD maxevents, - DWORD timeout) { - DWORD completion_count; - - if (port__update_events(port_state) < 0) - return -1; - - port_state->active_poll_count++; - - LeaveCriticalSection(&port_state->lock); - - BOOL r = GetQueuedCompletionStatusEx(port_state->iocp, - iocp_events, - maxevents, - &completion_count, - timeout, - FALSE); - - EnterCriticalSection(&port_state->lock); - - port_state->active_poll_count--; - - if (!r) - return_map_error(-1); - - return port__feed_events( - port_state, epoll_events, iocp_events, completion_count); -} - -int port_wait(port_state_t* port_state, - struct epoll_event* events, - int maxevents, - int timeout) { - OVERLAPPED_ENTRY stack_iocp_events[PORT__MAX_ON_STACK_COMPLETIONS]; - OVERLAPPED_ENTRY* iocp_events; - uint64_t due = 0; - DWORD gqcs_timeout; - int result; - - /* Check whether `maxevents` is in range. */ - if (maxevents <= 0) - return_set_error(-1, ERROR_INVALID_PARAMETER); - - /* Decide whether the IOCP completion list can live on the stack, or allocate - * memory for it on the heap. */ - if ((size_t) maxevents <= array_count(stack_iocp_events)) { - iocp_events = stack_iocp_events; - } else if ((iocp_events = - malloc((size_t) maxevents * sizeof *iocp_events)) == NULL) { - iocp_events = stack_iocp_events; - maxevents = array_count(stack_iocp_events); - } - - /* Compute the timeout for GetQueuedCompletionStatus, and the wait end - * time, if the user specified a timeout other than zero or infinite. */ - if (timeout > 0) { - due = GetTickCount64() + (uint64_t) timeout; - gqcs_timeout = (DWORD) timeout; - } else if (timeout == 0) { - gqcs_timeout = 0; - } else { - gqcs_timeout = INFINITE; - } - - EnterCriticalSection(&port_state->lock); - - /* Dequeue completion packets until either at least one interesting event - * has been discovered, or the timeout is reached. */ - for (;;) { - uint64_t now; - - result = port__poll( - port_state, events, iocp_events, (DWORD) maxevents, gqcs_timeout); - if (result < 0 || result > 0) - break; /* Result, error, or time-out. */ - - if (timeout < 0) - continue; /* When timeout is negative, never time out. */ - - /* Update time. */ - now = GetTickCount64(); - - /* Do not allow the due time to be in the past. */ - if (now >= due) { - SetLastError(WAIT_TIMEOUT); - break; - } - - /* Recompute time-out argument for GetQueuedCompletionStatus. */ - gqcs_timeout = (DWORD)(due - now); - } - - port__update_events_if_polling(port_state); - - LeaveCriticalSection(&port_state->lock); - - if (iocp_events != stack_iocp_events) - free(iocp_events); - - if (result >= 0) - return result; - else if (GetLastError() == WAIT_TIMEOUT) - return 0; - else - return -1; -} - -static int port__ctl_add(port_state_t* port_state, - SOCKET sock, - struct epoll_event* ev) { - sock_state_t* sock_state = sock_new(port_state, sock); - if (sock_state == NULL) - return -1; - - if (sock_set_event(port_state, sock_state, ev) < 0) { - sock_delete(port_state, sock_state); - return -1; - } - - port__update_events_if_polling(port_state); - - return 0; -} - -static int port__ctl_mod(port_state_t* port_state, - SOCKET sock, - struct epoll_event* ev) { - sock_state_t* sock_state = port_find_socket(port_state, sock); - if (sock_state == NULL) - return -1; - - if (sock_set_event(port_state, sock_state, ev) < 0) - return -1; - - port__update_events_if_polling(port_state); - - return 0; -} - -static int port__ctl_del(port_state_t* port_state, SOCKET sock) { - sock_state_t* sock_state = port_find_socket(port_state, sock); - if (sock_state == NULL) - return -1; - - sock_delete(port_state, sock_state); - - return 0; -} - -static int port__ctl_op(port_state_t* port_state, - int op, - SOCKET sock, - struct epoll_event* ev) { - switch (op) { - case EPOLL_CTL_ADD: - return port__ctl_add(port_state, sock, ev); - case EPOLL_CTL_MOD: - return port__ctl_mod(port_state, sock, ev); - case EPOLL_CTL_DEL: - return port__ctl_del(port_state, sock); - default: - return_set_error(-1, ERROR_INVALID_PARAMETER); - } -} - -int port_ctl(port_state_t* port_state, - int op, - SOCKET sock, - struct epoll_event* ev) { - int result; - - EnterCriticalSection(&port_state->lock); - result = port__ctl_op(port_state, op, sock, ev); - LeaveCriticalSection(&port_state->lock); - - return result; -} - -int port_register_socket_handle(port_state_t* port_state, - sock_state_t* sock_state, - SOCKET socket) { - if (tree_add(&port_state->sock_tree, - sock_state_to_tree_node(sock_state), - socket) < 0) - return_set_error(-1, ERROR_ALREADY_EXISTS); - return 0; -} - -void port_unregister_socket_handle(port_state_t* port_state, - sock_state_t* sock_state) { - tree_del(&port_state->sock_tree, sock_state_to_tree_node(sock_state)); -} - -sock_state_t* port_find_socket(port_state_t* port_state, SOCKET socket) { - tree_node_t* tree_node = tree_find(&port_state->sock_tree, socket); - if (tree_node == NULL) - return_set_error(NULL, ERROR_NOT_FOUND); - return sock_state_from_tree_node(tree_node); -} - -void port_request_socket_update(port_state_t* port_state, - sock_state_t* sock_state) { - if (queue_enqueued(sock_state_to_queue_node(sock_state))) - return; - queue_append(&port_state->sock_update_queue, - sock_state_to_queue_node(sock_state)); -} - -void port_cancel_socket_update(port_state_t* port_state, - sock_state_t* sock_state) { - unused_var(port_state); - if (!queue_enqueued(sock_state_to_queue_node(sock_state))) - return; - queue_remove(sock_state_to_queue_node(sock_state)); -} - -void port_add_deleted_socket(port_state_t* port_state, - sock_state_t* sock_state) { - if (queue_enqueued(sock_state_to_queue_node(sock_state))) - return; - queue_append(&port_state->sock_deleted_queue, - sock_state_to_queue_node(sock_state)); -} - -void port_remove_deleted_socket(port_state_t* port_state, - sock_state_t* sock_state) { - unused_var(port_state); - if (!queue_enqueued(sock_state_to_queue_node(sock_state))) - return; - queue_remove(sock_state_to_queue_node(sock_state)); -} - -void queue_init(queue_t* queue) { - queue_node_init(&queue->head); -} - -void queue_node_init(queue_node_t* node) { - node->prev = node; - node->next = node; -} - -static inline void queue__detach_node(queue_node_t* node) { - node->prev->next = node->next; - node->next->prev = node->prev; -} - -queue_node_t* queue_first(const queue_t* queue) { - return !queue_empty(queue) ? queue->head.next : NULL; -} - -queue_node_t* queue_last(const queue_t* queue) { - return !queue_empty(queue) ? queue->head.prev : NULL; -} - -void queue_prepend(queue_t* queue, queue_node_t* node) { - node->next = queue->head.next; - node->prev = &queue->head; - node->next->prev = node; - queue->head.next = node; -} - -void queue_append(queue_t* queue, queue_node_t* node) { - node->next = &queue->head; - node->prev = queue->head.prev; - node->prev->next = node; - queue->head.prev = node; -} - -void queue_move_first(queue_t* queue, queue_node_t* node) { - queue__detach_node(node); - queue_prepend(queue, node); -} - -void queue_move_last(queue_t* queue, queue_node_t* node) { - queue__detach_node(node); - queue_append(queue, node); -} - -void queue_remove(queue_node_t* node) { - queue__detach_node(node); - queue_node_init(node); -} - -bool queue_empty(const queue_t* queue) { - return !queue_enqueued(&queue->head); -} - -bool queue_enqueued(const queue_node_t* node) { - return node->prev != node; -} - -static const long REFLOCK__REF = (long) 0x00000001; -static const long REFLOCK__REF_MASK = (long) 0x0fffffff; -static const long REFLOCK__DESTROY = (long) 0x10000000; -static const long REFLOCK__DESTROY_MASK = (long) 0xf0000000; -static const long REFLOCK__POISON = (long) 0x300dead0; - -static HANDLE reflock__keyed_event = NULL; - -int reflock_global_init(void) { - NTSTATUS status = - NtCreateKeyedEvent(&reflock__keyed_event, KEYEDEVENT_ALL_ACCESS, NULL, 0); - if (status != STATUS_SUCCESS) - return_set_error(-1, RtlNtStatusToDosError(status)); - return 0; -} - -void reflock_init(reflock_t* reflock) { - reflock->state = 0; -} - -static void reflock__signal_event(void* address) { - NTSTATUS status = - NtReleaseKeyedEvent(reflock__keyed_event, address, FALSE, NULL); - if (status != STATUS_SUCCESS) - abort(); -} - -static void reflock__await_event(void* address) { - NTSTATUS status = - NtWaitForKeyedEvent(reflock__keyed_event, address, FALSE, NULL); - if (status != STATUS_SUCCESS) - abort(); -} - -void reflock_ref(reflock_t* reflock) { - long state = InterlockedAdd(&reflock->state, REFLOCK__REF); - - /* Verify that the counter didn't overflow and the lock isn't destroyed. */ - assert((state & REFLOCK__DESTROY_MASK) == 0); - unused_var(state); -} - -void reflock_unref(reflock_t* reflock) { - long state = InterlockedAdd(&reflock->state, -REFLOCK__REF); - - /* Verify that the lock was referenced and not already destroyed. */ - assert((state & REFLOCK__DESTROY_MASK & ~REFLOCK__DESTROY) == 0); - - if (state == REFLOCK__DESTROY) - reflock__signal_event(reflock); -} - -void reflock_unref_and_destroy(reflock_t* reflock) { - long state = - InterlockedAdd(&reflock->state, REFLOCK__DESTROY - REFLOCK__REF); - long ref_count = state & REFLOCK__REF_MASK; - - /* Verify that the lock was referenced and not already destroyed. */ - assert((state & REFLOCK__DESTROY_MASK) == REFLOCK__DESTROY); - - if (ref_count != 0) - reflock__await_event(reflock); - - state = InterlockedExchange(&reflock->state, REFLOCK__POISON); - assert(state == REFLOCK__DESTROY); -} - -static const uint32_t SOCK__KNOWN_EPOLL_EVENTS = - EPOLLIN | EPOLLPRI | EPOLLOUT | EPOLLERR | EPOLLHUP | EPOLLRDNORM | - EPOLLRDBAND | EPOLLWRNORM | EPOLLWRBAND | EPOLLMSG | EPOLLRDHUP; - -typedef enum sock__poll_status { - SOCK__POLL_IDLE = 0, - SOCK__POLL_PENDING, - SOCK__POLL_CANCELLED -} sock__poll_status_t; - -typedef struct sock_state { - OVERLAPPED overlapped; - AFD_POLL_INFO poll_info; - queue_node_t queue_node; - tree_node_t tree_node; - poll_group_t* poll_group; - SOCKET base_socket; - epoll_data_t user_data; - uint32_t user_events; - uint32_t pending_events; - sock__poll_status_t poll_status; - bool delete_pending; -} sock_state_t; - -static inline sock_state_t* sock__alloc(void) { - sock_state_t* sock_state = malloc(sizeof *sock_state); - if (sock_state == NULL) - return_set_error(NULL, ERROR_NOT_ENOUGH_MEMORY); - return sock_state; -} - -static inline void sock__free(sock_state_t* sock_state) { - free(sock_state); -} - -static int sock__cancel_poll(sock_state_t* sock_state) { - HANDLE afd_helper_handle = - poll_group_get_afd_helper_handle(sock_state->poll_group); - assert(sock_state->poll_status == SOCK__POLL_PENDING); - - /* CancelIoEx() may fail with ERROR_NOT_FOUND if the overlapped operation has - * already completed. This is not a problem and we proceed normally. */ - if (!HasOverlappedIoCompleted(&sock_state->overlapped) && - !CancelIoEx(afd_helper_handle, &sock_state->overlapped) && - GetLastError() != ERROR_NOT_FOUND) - return_map_error(-1); - - sock_state->poll_status = SOCK__POLL_CANCELLED; - sock_state->pending_events = 0; - return 0; -} - -sock_state_t* sock_new(port_state_t* port_state, SOCKET socket) { - SOCKET base_socket; - poll_group_t* poll_group; - sock_state_t* sock_state; - - if (socket == 0 || socket == INVALID_SOCKET) - return_set_error(NULL, ERROR_INVALID_HANDLE); - - base_socket = ws_get_base_socket(socket); - if (base_socket == INVALID_SOCKET) - return NULL; - - poll_group = poll_group_acquire(port_state); - if (poll_group == NULL) - return NULL; - - sock_state = sock__alloc(); - if (sock_state == NULL) - goto err1; - - memset(sock_state, 0, sizeof *sock_state); - - sock_state->base_socket = base_socket; - sock_state->poll_group = poll_group; - - tree_node_init(&sock_state->tree_node); - queue_node_init(&sock_state->queue_node); - - if (port_register_socket_handle(port_state, sock_state, socket) < 0) - goto err2; - - return sock_state; - -err2: - sock__free(sock_state); -err1: - poll_group_release(poll_group); - - return NULL; -} - -static int sock__delete(port_state_t* port_state, - sock_state_t* sock_state, - bool force) { - if (!sock_state->delete_pending) { - if (sock_state->poll_status == SOCK__POLL_PENDING) - sock__cancel_poll(sock_state); - - port_cancel_socket_update(port_state, sock_state); - port_unregister_socket_handle(port_state, sock_state); - - sock_state->delete_pending = true; - } - - /* If the poll request still needs to complete, the sock_state object can't - * be free()d yet. `sock_feed_event()` or `port_close()` will take care - * of this later. */ - if (force || sock_state->poll_status == SOCK__POLL_IDLE) { - /* Free the sock_state now. */ - port_remove_deleted_socket(port_state, sock_state); - poll_group_release(sock_state->poll_group); - sock__free(sock_state); - } else { - /* Free the socket later. */ - port_add_deleted_socket(port_state, sock_state); - } - - return 0; -} - -void sock_delete(port_state_t* port_state, sock_state_t* sock_state) { - sock__delete(port_state, sock_state, false); -} - -void sock_force_delete(port_state_t* port_state, sock_state_t* sock_state) { - sock__delete(port_state, sock_state, true); -} - -int sock_set_event(port_state_t* port_state, - sock_state_t* sock_state, - const struct epoll_event* ev) { - /* EPOLLERR and EPOLLHUP are always reported, even when not requested by the - * caller. However they are disabled after a event has been reported for a - * socket for which the EPOLLONESHOT flag as set. */ - uint32_t events = ev->events | EPOLLERR | EPOLLHUP; - - sock_state->user_events = events; - sock_state->user_data = ev->data; - - if ((events & SOCK__KNOWN_EPOLL_EVENTS & ~sock_state->pending_events) != 0) - port_request_socket_update(port_state, sock_state); - - return 0; -} - -static inline DWORD sock__epoll_events_to_afd_events(uint32_t epoll_events) { - /* Always monitor for AFD_POLL_LOCAL_CLOSE, which is triggered when the - * socket is closed with closesocket() or CloseHandle(). */ - DWORD afd_events = AFD_POLL_LOCAL_CLOSE; - - if (epoll_events & (EPOLLIN | EPOLLRDNORM)) - afd_events |= AFD_POLL_RECEIVE | AFD_POLL_ACCEPT; - if (epoll_events & (EPOLLPRI | EPOLLRDBAND)) - afd_events |= AFD_POLL_RECEIVE_EXPEDITED; - if (epoll_events & (EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND)) - afd_events |= AFD_POLL_SEND; - if (epoll_events & (EPOLLIN | EPOLLRDNORM | EPOLLRDHUP)) - afd_events |= AFD_POLL_DISCONNECT; - if (epoll_events & EPOLLHUP) - afd_events |= AFD_POLL_ABORT; - if (epoll_events & EPOLLERR) - afd_events |= AFD_POLL_CONNECT_FAIL; - - return afd_events; -} - -static inline uint32_t sock__afd_events_to_epoll_events(DWORD afd_events) { - uint32_t epoll_events = 0; - - if (afd_events & (AFD_POLL_RECEIVE | AFD_POLL_ACCEPT)) - epoll_events |= EPOLLIN | EPOLLRDNORM; - if (afd_events & AFD_POLL_RECEIVE_EXPEDITED) - epoll_events |= EPOLLPRI | EPOLLRDBAND; - if (afd_events & AFD_POLL_SEND) - epoll_events |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; - if (afd_events & AFD_POLL_DISCONNECT) - epoll_events |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; - if (afd_events & AFD_POLL_ABORT) - epoll_events |= EPOLLHUP; - if (afd_events & AFD_POLL_CONNECT_FAIL) - /* Linux reports all these events after connect() has failed. */ - epoll_events |= - EPOLLIN | EPOLLOUT | EPOLLERR | EPOLLRDNORM | EPOLLWRNORM | EPOLLRDHUP; - - return epoll_events; -} - -int sock_update(port_state_t* port_state, sock_state_t* sock_state) { - assert(!sock_state->delete_pending); - - if ((sock_state->poll_status == SOCK__POLL_PENDING) && - (sock_state->user_events & SOCK__KNOWN_EPOLL_EVENTS & - ~sock_state->pending_events) == 0) { - /* All the events the user is interested in are already being monitored by - * the pending poll operation. It might spuriously complete because of an - * event that we're no longer interested in; when that happens we'll submit - * a new poll operation with the updated event mask. */ - - } else if (sock_state->poll_status == SOCK__POLL_PENDING) { - /* A poll operation is already pending, but it's not monitoring for all the - * events that the user is interested in. Therefore, cancel the pending - * poll operation; when we receive it's completion package, a new poll - * operation will be submitted with the correct event mask. */ - if (sock__cancel_poll(sock_state) < 0) - return -1; - - } else if (sock_state->poll_status == SOCK__POLL_CANCELLED) { - /* The poll operation has already been cancelled, we're still waiting for - * it to return. For now, there's nothing that needs to be done. */ - - } else if (sock_state->poll_status == SOCK__POLL_IDLE) { - /* No poll operation is pending; start one. */ - sock_state->poll_info.Exclusive = FALSE; - sock_state->poll_info.NumberOfHandles = 1; - sock_state->poll_info.Timeout.QuadPart = INT64_MAX; - sock_state->poll_info.Handles[0].Handle = (HANDLE) sock_state->base_socket; - sock_state->poll_info.Handles[0].Status = 0; - sock_state->poll_info.Handles[0].Events = - sock__epoll_events_to_afd_events(sock_state->user_events); - - memset(&sock_state->overlapped, 0, sizeof sock_state->overlapped); - - if (afd_poll(poll_group_get_afd_helper_handle(sock_state->poll_group), - &sock_state->poll_info, - &sock_state->overlapped) < 0) { - switch (GetLastError()) { - case ERROR_IO_PENDING: - /* Overlapped poll operation in progress; this is expected. */ - break; - case ERROR_INVALID_HANDLE: - /* Socket closed; it'll be dropped from the epoll set. */ - return sock__delete(port_state, sock_state, false); - default: - /* Other errors are propagated to the caller. */ - return_map_error(-1); - } - } - - /* The poll request was successfully submitted. */ - sock_state->poll_status = SOCK__POLL_PENDING; - sock_state->pending_events = sock_state->user_events; - - } else { - /* Unreachable. */ - assert(false); - } - - port_cancel_socket_update(port_state, sock_state); - return 0; -} - -int sock_feed_event(port_state_t* port_state, - OVERLAPPED* overlapped, - struct epoll_event* ev) { - sock_state_t* sock_state = - container_of(overlapped, sock_state_t, overlapped); - AFD_POLL_INFO* poll_info = &sock_state->poll_info; - uint32_t epoll_events = 0; - - sock_state->poll_status = SOCK__POLL_IDLE; - sock_state->pending_events = 0; - - if (sock_state->delete_pending) { - /* Socket has been deleted earlier and can now be freed. */ - return sock__delete(port_state, sock_state, false); - - } else if ((NTSTATUS) overlapped->Internal == STATUS_CANCELLED) { - /* The poll request was cancelled by CancelIoEx. */ - - } else if (!NT_SUCCESS(overlapped->Internal)) { - /* The overlapped request itself failed in an unexpected way. */ - epoll_events = EPOLLERR; - - } else if (poll_info->NumberOfHandles < 1) { - /* This poll operation succeeded but didn't report any socket events. */ - - } else if (poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) { - /* The poll operation reported that the socket was closed. */ - return sock__delete(port_state, sock_state, false); - - } else { - /* Events related to our socket were reported. */ - epoll_events = - sock__afd_events_to_epoll_events(poll_info->Handles[0].Events); - } - - /* Requeue the socket so a new poll request will be submitted. */ - port_request_socket_update(port_state, sock_state); - - /* Filter out events that the user didn't ask for. */ - epoll_events &= sock_state->user_events; - - /* Return if there are no epoll events to report. */ - if (epoll_events == 0) - return 0; - - /* If the the socket has the EPOLLONESHOT flag set, unmonitor all events, - * even EPOLLERR and EPOLLHUP. But always keep looking for closed sockets. */ - if (sock_state->user_events & EPOLLONESHOT) - sock_state->user_events = 0; - - ev->data = sock_state->user_data; - ev->events = epoll_events; - return 1; -} - -queue_node_t* sock_state_to_queue_node(sock_state_t* sock_state) { - return &sock_state->queue_node; -} - -sock_state_t* sock_state_from_tree_node(tree_node_t* tree_node) { - return container_of(tree_node, sock_state_t, tree_node); -} - -tree_node_t* sock_state_to_tree_node(sock_state_t* sock_state) { - return &sock_state->tree_node; -} - -sock_state_t* sock_state_from_queue_node(queue_node_t* queue_node) { - return container_of(queue_node, sock_state_t, queue_node); -} - -void ts_tree_init(ts_tree_t* ts_tree) { - tree_init(&ts_tree->tree); - InitializeSRWLock(&ts_tree->lock); -} - -void ts_tree_node_init(ts_tree_node_t* node) { - tree_node_init(&node->tree_node); - reflock_init(&node->reflock); -} - -int ts_tree_add(ts_tree_t* ts_tree, ts_tree_node_t* node, uintptr_t key) { - int r; - - AcquireSRWLockExclusive(&ts_tree->lock); - r = tree_add(&ts_tree->tree, &node->tree_node, key); - ReleaseSRWLockExclusive(&ts_tree->lock); - - return r; -} - -static inline ts_tree_node_t* ts_tree__find_node(ts_tree_t* ts_tree, - uintptr_t key) { - tree_node_t* tree_node = tree_find(&ts_tree->tree, key); - if (tree_node == NULL) - return NULL; - - return container_of(tree_node, ts_tree_node_t, tree_node); -} - -ts_tree_node_t* ts_tree_del_and_ref(ts_tree_t* ts_tree, uintptr_t key) { - ts_tree_node_t* ts_tree_node; - - AcquireSRWLockExclusive(&ts_tree->lock); - - ts_tree_node = ts_tree__find_node(ts_tree, key); - if (ts_tree_node != NULL) { - tree_del(&ts_tree->tree, &ts_tree_node->tree_node); - reflock_ref(&ts_tree_node->reflock); - } - - ReleaseSRWLockExclusive(&ts_tree->lock); - - return ts_tree_node; -} - -ts_tree_node_t* ts_tree_find_and_ref(ts_tree_t* ts_tree, uintptr_t key) { - ts_tree_node_t* ts_tree_node; - - AcquireSRWLockShared(&ts_tree->lock); - - ts_tree_node = ts_tree__find_node(ts_tree, key); - if (ts_tree_node != NULL) - reflock_ref(&ts_tree_node->reflock); - - ReleaseSRWLockShared(&ts_tree->lock); - - return ts_tree_node; -} - -void ts_tree_node_unref(ts_tree_node_t* node) { - reflock_unref(&node->reflock); -} - -void ts_tree_node_unref_and_destroy(ts_tree_node_t* node) { - reflock_unref_and_destroy(&node->reflock); -} - -void tree_init(tree_t* tree) { - memset(tree, 0, sizeof *tree); -} - -void tree_node_init(tree_node_t* node) { - memset(node, 0, sizeof *node); -} - -#define TREE__ROTATE(cis, trans) \ - tree_node_t* p = node; \ - tree_node_t* q = node->trans; \ - tree_node_t* parent = p->parent; \ - \ - if (parent) { \ - if (parent->left == p) \ - parent->left = q; \ - else \ - parent->right = q; \ - } else { \ - tree->root = q; \ - } \ - \ - q->parent = parent; \ - p->parent = q; \ - p->trans = q->cis; \ - if (p->trans) \ - p->trans->parent = p; \ - q->cis = p; - -static inline void tree__rotate_left(tree_t* tree, tree_node_t* node) { - TREE__ROTATE(left, right) -} - -static inline void tree__rotate_right(tree_t* tree, tree_node_t* node) { - TREE__ROTATE(right, left) -} - -#define TREE__INSERT_OR_DESCEND(side) \ - if (parent->side) { \ - parent = parent->side; \ - } else { \ - parent->side = node; \ - break; \ - } - -#define TREE__FIXUP_AFTER_INSERT(cis, trans) \ - tree_node_t* grandparent = parent->parent; \ - tree_node_t* uncle = grandparent->trans; \ - \ - if (uncle && uncle->red) { \ - parent->red = uncle->red = false; \ - grandparent->red = true; \ - node = grandparent; \ - } else { \ - if (node == parent->trans) { \ - tree__rotate_##cis(tree, parent); \ - node = parent; \ - parent = node->parent; \ - } \ - parent->red = false; \ - grandparent->red = true; \ - tree__rotate_##trans(tree, grandparent); \ - } - -int tree_add(tree_t* tree, tree_node_t* node, uintptr_t key) { - tree_node_t* parent; - - parent = tree->root; - if (parent) { - for (;;) { - if (key < parent->key) { - TREE__INSERT_OR_DESCEND(left) - } else if (key > parent->key) { - TREE__INSERT_OR_DESCEND(right) - } else { - return -1; - } - } - } else { - tree->root = node; - } - - node->key = key; - node->left = node->right = NULL; - node->parent = parent; - node->red = true; - - for (; parent && parent->red; parent = node->parent) { - if (parent == parent->parent->left) { - TREE__FIXUP_AFTER_INSERT(left, right) - } else { - TREE__FIXUP_AFTER_INSERT(right, left) - } - } - tree->root->red = false; - - return 0; -} - -#define TREE__FIXUP_AFTER_REMOVE(cis, trans) \ - tree_node_t* sibling = parent->trans; \ - \ - if (sibling->red) { \ - sibling->red = false; \ - parent->red = true; \ - tree__rotate_##cis(tree, parent); \ - sibling = parent->trans; \ - } \ - if ((sibling->left && sibling->left->red) || \ - (sibling->right && sibling->right->red)) { \ - if (!sibling->trans || !sibling->trans->red) { \ - sibling->cis->red = false; \ - sibling->red = true; \ - tree__rotate_##trans(tree, sibling); \ - sibling = parent->trans; \ - } \ - sibling->red = parent->red; \ - parent->red = sibling->trans->red = false; \ - tree__rotate_##cis(tree, parent); \ - node = tree->root; \ - break; \ - } \ - sibling->red = true; - -void tree_del(tree_t* tree, tree_node_t* node) { - tree_node_t* parent = node->parent; - tree_node_t* left = node->left; - tree_node_t* right = node->right; - tree_node_t* next; - bool red; - - if (!left) { - next = right; - } else if (!right) { - next = left; - } else { - next = right; - while (next->left) - next = next->left; - } - - if (parent) { - if (parent->left == node) - parent->left = next; - else - parent->right = next; - } else { - tree->root = next; - } - - if (left && right) { - red = next->red; - next->red = node->red; - next->left = left; - left->parent = next; - if (next != right) { - parent = next->parent; - next->parent = node->parent; - node = next->right; - parent->left = node; - next->right = right; - right->parent = next; - } else { - next->parent = parent; - parent = next; - node = next->right; - } - } else { - red = node->red; - node = next; - } - - if (node) - node->parent = parent; - if (red) - return; - if (node && node->red) { - node->red = false; - return; - } - - do { - if (node == tree->root) - break; - if (node == parent->left) { - TREE__FIXUP_AFTER_REMOVE(left, right) - } else { - TREE__FIXUP_AFTER_REMOVE(right, left) - } - node = parent; - parent = parent->parent; - } while (!node->red); - - if (node) - node->red = false; -} - -tree_node_t* tree_find(const tree_t* tree, uintptr_t key) { - tree_node_t* node = tree->root; - while (node) { - if (key < node->key) - node = node->left; - else if (key > node->key) - node = node->right; - else - return node; - } - return NULL; -} - -tree_node_t* tree_root(const tree_t* tree) { - return tree->root; -} - -#ifndef SIO_BASE_HANDLE -#define SIO_BASE_HANDLE 0x48000022 -#endif - -int ws_global_init(void) { - int r; - WSADATA wsa_data; - - r = WSAStartup(MAKEWORD(2, 2), &wsa_data); - if (r != 0) - return_set_error(-1, (DWORD) r); - - return 0; -} - -SOCKET ws_get_base_socket(SOCKET socket) { - SOCKET base_socket; - DWORD bytes; - - if (WSAIoctl(socket, - SIO_BASE_HANDLE, - NULL, - 0, - &base_socket, - sizeof base_socket, - &bytes, - NULL, - NULL) == SOCKET_ERROR) - return_map_error(INVALID_SOCKET); - - return base_socket; -} diff --git a/3rdparty/wepoll/wepoll.h b/3rdparty/wepoll/wepoll.h deleted file mode 100644 index eebde2111fe1afaa3c75b8d19ada8b9ba5345c06..0000000000000000000000000000000000000000 --- a/3rdparty/wepoll/wepoll.h +++ /dev/null @@ -1,113 +0,0 @@ -/* - * wepoll - epoll for Windows - * https://github.com/piscisaureus/wepoll - * - * Copyright 2012-2019, Bert Belder <bertbelder@gmail.com> - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef WEPOLL_H_ -#define WEPOLL_H_ - -#ifndef WEPOLL_EXPORT -#define WEPOLL_EXPORT -#endif - -#include <stdint.h> - -enum EPOLL_EVENTS { - EPOLLIN = (int) (1U << 0), - EPOLLPRI = (int) (1U << 1), - EPOLLOUT = (int) (1U << 2), - EPOLLERR = (int) (1U << 3), - EPOLLHUP = (int) (1U << 4), - EPOLLRDNORM = (int) (1U << 6), - EPOLLRDBAND = (int) (1U << 7), - EPOLLWRNORM = (int) (1U << 8), - EPOLLWRBAND = (int) (1U << 9), - EPOLLMSG = (int) (1U << 10), /* Never reported. */ - EPOLLRDHUP = (int) (1U << 13), - EPOLLONESHOT = (int) (1U << 31) -}; - -#define EPOLLIN (1U << 0) -#define EPOLLPRI (1U << 1) -#define EPOLLOUT (1U << 2) -#define EPOLLERR (1U << 3) -#define EPOLLHUP (1U << 4) -#define EPOLLRDNORM (1U << 6) -#define EPOLLRDBAND (1U << 7) -#define EPOLLWRNORM (1U << 8) -#define EPOLLWRBAND (1U << 9) -#define EPOLLMSG (1U << 10) -#define EPOLLRDHUP (1U << 13) -#define EPOLLONESHOT (1U << 31) - -#define EPOLL_CTL_ADD 1 -#define EPOLL_CTL_MOD 2 -#define EPOLL_CTL_DEL 3 - -typedef void* HANDLE; -typedef uintptr_t SOCKET; - -typedef union epoll_data { - void* ptr; - int fd; - uint32_t u32; - uint64_t u64; - SOCKET sock; /* Windows specific */ - HANDLE hnd; /* Windows specific */ -} epoll_data_t; - -struct epoll_event { - uint32_t events; /* Epoll events and flags */ - epoll_data_t data; /* User data variable */ -}; - -#ifdef __cplusplus -extern "C" { -#endif - -WEPOLL_EXPORT HANDLE epoll_create(int size); -WEPOLL_EXPORT HANDLE epoll_create1(int flags); - -WEPOLL_EXPORT int epoll_close(HANDLE ephnd); - -WEPOLL_EXPORT int epoll_ctl(HANDLE ephnd, - int op, - SOCKET sock, - struct epoll_event* event); - -WEPOLL_EXPORT int epoll_wait(HANDLE ephnd, - struct epoll_event* events, - int maxevents, - int timeout); - -#ifdef __cplusplus -} /* extern "C" */ -#endif - -#endif /* WEPOLL_H_ */ diff --git a/cellframe-sdk b/cellframe-sdk index 45adfabbe017096a61a5fe97ae0e4bb05792e3ef..37d1bf8f4830a798592cd5c0a83e8a6f8a2e30e6 160000 --- a/cellframe-sdk +++ b/cellframe-sdk @@ -1 +1 @@ -Subproject commit 45adfabbe017096a61a5fe97ae0e4bb05792e3ef +Subproject commit 37d1bf8f4830a798592cd5c0a83e8a6f8a2e30e6 diff --git a/sources/wepoll/LICENSE b/sources/wepoll/LICENSE deleted file mode 100644 index 6c8b1c842b1a4c319c9398e95228994c2fbcbb6c..0000000000000000000000000000000000000000 --- a/sources/wepoll/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -wepoll - epoll for Windows -https://github.com/piscisaureus/wepoll - -Copyright 2012-2019, Bert Belder <bertbelder@gmail.com> -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/sources/wepoll/README.md b/sources/wepoll/README.md deleted file mode 100644 index d334d0833c9fba4d564ea8bbbe7d279288cd5964..0000000000000000000000000000000000000000 --- a/sources/wepoll/README.md +++ /dev/null @@ -1,202 +0,0 @@ -# wepoll - epoll for windows - -[![][ci status badge]][ci status link] - -This library implements the [epoll][man epoll] API for Windows -applications. It is fast and scalable, and it closely resembles the API -and behavior of Linux' epoll. - -## Rationale - -Unlike Linux, OS X, and many other operating systems, Windows doesn't -have a good API for receiving socket state notifications. It only -supports the `select` and `WSAPoll` APIs, but they -[don't scale][select scale] and suffer from -[other issues][wsapoll broken]. - -Using I/O completion ports isn't always practical when software is -designed to be cross-platform. Wepoll offers an alternative that is -much closer to a drop-in replacement for software that was designed -to run on Linux. - -## Features - -* Can poll 100000s of sockets efficiently. -* Fully thread-safe. -* Multiple threads can poll the same epoll port. -* Sockets can be added to multiple epoll sets. -* All epoll events (`EPOLLIN`, `EPOLLOUT`, `EPOLLPRI`, `EPOLLRDHUP`) - are supported. -* Level-triggered and one-shot (`EPOLLONESTHOT`) modes are supported -* Trivial to embed: you need [only two files][dist]. - -## Limitations - -* Only works with sockets. -* Edge-triggered (`EPOLLET`) mode isn't supported. - -## How to use - -The library is [distributed][dist] as a single source file -([wepoll.c][wepoll.c]) and a single header file ([wepoll.h][wepoll.h]).<br> -Compile the .c file as part of your project, and include the header wherever -needed. - -## Compatibility - -* Requires Windows Vista or higher. -* Can be compiled with recent versions of MSVC, Clang, and GCC. - -## API - -### General remarks - -* The epoll port is a `HANDLE`, not a file descriptor. -* All functions set both `errno` and `GetLastError()` on failure. -* For more extensive documentation, see the [epoll(7) man page][man epoll], - and the per-function man pages that are linked below. - -### epoll_create/epoll_create1 - -```c -HANDLE epoll_create(int size); -HANDLE epoll_create1(int flags); -``` - -* Create a new epoll instance (port). -* `size` is ignored but most be greater than zero. -* `flags` must be zero as there are no supported flags. -* Returns `NULL` on failure. -* [Linux man page][man epoll_create] - -### epoll_close - -```c -int epoll_close(HANDLE ephnd); -``` - -* Close an epoll port. -* Do not attempt to close the epoll port with `close()`, - `CloseHandle()` or `closesocket()`. - -### epoll_ctl - -```c -int epoll_ctl(HANDLE ephnd, - int op, - SOCKET sock, - struct epoll_event* event); -``` - -* Control which socket events are monitored by an epoll port. -* `ephnd` must be a HANDLE created by - [`epoll_create()`](#epoll_createepoll_create1) or - [`epoll_create1()`](#epoll_createepoll_create1). -* `op` must be one of `EPOLL_CTL_ADD`, `EPOLL_CTL_MOD`, `EPOLL_CTL_DEL`. -* `sock` must be a valid socket created by [`socket()`][msdn socket], - [`WSASocket()`][msdn wsasocket], or [`accept()`][msdn accept]. -* `event` should be a pointer to a [`struct epoll_event`](#struct-epoll_event).<br> - If `op` is `EPOLL_CTL_DEL` then the `event` parameter is ignored, and it - may be `NULL`. -* Returns 0 on success, -1 on failure. -* It is recommended to always explicitly remove a socket from its epoll - set using `EPOLL_CTL_DEL` *before* closing it.<br> - As on Linux, closed sockets are automatically removed from the epoll set, but - wepoll may not be able to detect that a socket was closed until the next call - to [`epoll_wait()`](#epoll_wait). -* [Linux man page][man epoll_ctl] - -### epoll_wait - -```c -int epoll_wait(HANDLE ephnd, - struct epoll_event* events, - int maxevents, - int timeout); -``` - -* Receive socket events from an epoll port. -* `events` should point to a caller-allocated array of - [`epoll_event`](#struct-epoll_event) structs, which will receive the - reported events. -* `maxevents` is the maximum number of events that will be written to the - `events` array, and must be greater than zero. -* `timeout` specifies whether to block when no events are immediately available. - - `<0` block indefinitely - - `0` report any events that are already waiting, but don't block - - `≥1` block for at most N milliseconds -* Return value: - - `-1` an error occurred - - `0` timed out without any events to report - - `≥1` the number of events stored in the `events` buffer -* [Linux man page][man epoll_wait] - -### struct epoll_event - -```c -typedef union epoll_data { - void* ptr; - int fd; - uint32_t u32; - uint64_t u64; - SOCKET sock; /* Windows specific */ - HANDLE hnd; /* Windows specific */ -} epoll_data_t; -``` - -```c -struct epoll_event { - uint32_t events; /* Epoll events and flags */ - epoll_data_t data; /* User data variable */ -}; -``` - -* The `events` field is a bit mask containing the events being - monitored/reported, and optional flags.<br> - Flags are accepted by [`epoll_ctl()`](#epoll_ctl), but they are not reported - back by [`epoll_wait()`](#epoll_wait). -* The `data` field can be used to associate application-specific information - with a socket; its value will be returned unmodified by - [`epoll_wait()`](#epoll_wait). -* [Linux man page][man epoll_ctl] - -| Event | Description | -|---------------|----------------------------------------------------------------------| -| `EPOLLIN` | incoming data available, or incoming connection ready to be accepted | -| `EPOLLOUT` | ready to send data, or outgoing connection successfully established | -| `EPOLLRDHUP` | remote peer initiated graceful socket shutdown | -| `EPOLLPRI` | out-of-band data available for reading | -| `EPOLLERR` | socket error<sup>1</sup> | -| `EPOLLHUP` | socket hang-up<sup>1</sup> | -| `EPOLLRDNORM` | same as `EPOLLIN` | -| `EPOLLRDBAND` | same as `EPOLLPRI` | -| `EPOLLWRNORM` | same as `EPOLLOUT` | -| `EPOLLWRBAND` | same as `EPOLLOUT` | -| `EPOLLMSG` | never reported | - -| Flag | Description | -|------------------|---------------------------| -| `EPOLLONESHOT` | report event(s) only once | -| `EPOLLET` | not supported by wepoll | -| `EPOLLEXCLUSIVE` | not supported by wepoll | -| `EPOLLWAKEUP` | not supported by wepoll | - -<sup>1</sup>: the `EPOLLERR` and `EPOLLHUP` events may always be reported by -[`epoll_wait()`](#epoll_wait), regardless of the event mask that was passed to -[`epoll_ctl()`](#epoll_ctl). - - -[ci status badge]: https://ci.appveyor.com/api/projects/status/github/piscisaureus/wepoll?branch=master&svg=true -[ci status link]: https://ci.appveyor.com/project/piscisaureus/wepoll/branch/master -[dist]: https://github.com/piscisaureus/wepoll/tree/dist -[man epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html -[man epoll_create]: http://man7.org/linux/man-pages/man2/epoll_create.2.html -[man epoll_ctl]: http://man7.org/linux/man-pages/man2/epoll_ctl.2.html -[man epoll_wait]: http://man7.org/linux/man-pages/man2/epoll_wait.2.html -[msdn accept]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms737526(v=vs.85).aspx -[msdn socket]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms740506(v=vs.85).aspx -[msdn wsasocket]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms742212(v=vs.85).aspx -[select scale]: https://daniel.haxx.se/docs/poll-vs-select.html -[wsapoll broken]: https://daniel.haxx.se/blog/2012/10/10/wsapoll-is-broken/ -[wepoll.c]: https://github.com/piscisaureus/wepoll/blob/dist/wepoll.c -[wepoll.h]: https://github.com/piscisaureus/wepoll/blob/dist/wepoll.h diff --git a/sources/wepoll/wepoll.c b/sources/wepoll/wepoll.c deleted file mode 100644 index 651673aad37227314985327a42f6d94790fdb653..0000000000000000000000000000000000000000 --- a/sources/wepoll/wepoll.c +++ /dev/null @@ -1,2189 +0,0 @@ -/* - * wepoll - epoll for Windows - * https://github.com/piscisaureus/wepoll - * - * Copyright 2012-2019, Bert Belder <bertbelder@gmail.com> - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef WEPOLL_EXPORT -#define WEPOLL_EXPORT -#endif - -#include <stdint.h> - -enum EPOLL_EVENTS { - EPOLLIN = (int) (1U << 0), - EPOLLPRI = (int) (1U << 1), - EPOLLOUT = (int) (1U << 2), - EPOLLERR = (int) (1U << 3), - EPOLLHUP = (int) (1U << 4), - EPOLLRDNORM = (int) (1U << 6), - EPOLLRDBAND = (int) (1U << 7), - EPOLLWRNORM = (int) (1U << 8), - EPOLLWRBAND = (int) (1U << 9), - EPOLLMSG = (int) (1U << 10), /* Never reported. */ - EPOLLRDHUP = (int) (1U << 13), - EPOLLONESHOT = (int) (1U << 31) -}; - -#define EPOLLIN (1U << 0) -#define EPOLLPRI (1U << 1) -#define EPOLLOUT (1U << 2) -#define EPOLLERR (1U << 3) -#define EPOLLHUP (1U << 4) -#define EPOLLRDNORM (1U << 6) -#define EPOLLRDBAND (1U << 7) -#define EPOLLWRNORM (1U << 8) -#define EPOLLWRBAND (1U << 9) -#define EPOLLMSG (1U << 10) -#define EPOLLRDHUP (1U << 13) -#define EPOLLONESHOT (1U << 31) - -#define EPOLL_CTL_ADD 1 -#define EPOLL_CTL_MOD 2 -#define EPOLL_CTL_DEL 3 - -typedef void* HANDLE; -typedef uintptr_t SOCKET; - -typedef union epoll_data { - void* ptr; - int fd; - uint32_t u32; - uint64_t u64; - SOCKET sock; /* Windows specific */ - HANDLE hnd; /* Windows specific */ -} epoll_data_t; - -struct epoll_event { - uint32_t events; /* Epoll events and flags */ - epoll_data_t data; /* User data variable */ -}; - -#ifdef __cplusplus -extern "C" { -#endif - -WEPOLL_EXPORT HANDLE epoll_create(int size); -WEPOLL_EXPORT HANDLE epoll_create1(int flags); - -WEPOLL_EXPORT int epoll_close(HANDLE ephnd); - -WEPOLL_EXPORT int epoll_ctl(HANDLE ephnd, - int op, - SOCKET sock, - struct epoll_event* event); - -WEPOLL_EXPORT int epoll_wait(HANDLE ephnd, - struct epoll_event* events, - int maxevents, - int timeout); - -#ifdef __cplusplus -} /* extern "C" */ -#endif - -#include <malloc.h> -#include <stdlib.h> - -#define WEPOLL_INTERNAL static -#define WEPOLL_INTERNAL_VAR static - -#ifndef WIN32_LEAN_AND_MEAN -#define WIN32_LEAN_AND_MEAN -#endif - -#ifdef __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wreserved-id-macro" -#endif - -#ifdef _WIN32_WINNT -#undef _WIN32_WINNT -#endif - -#define _WIN32_WINNT 0x0600 - -#ifdef __clang__ -#pragma clang diagnostic pop -#endif - -#ifndef __GNUC__ -#pragma warning(push, 1) -#endif - -#include <WS2tcpip.h> -#include <WinSock2.h> -#include <Windows.h> - -#ifndef __GNUC__ -#pragma warning(pop) -#endif - -WEPOLL_INTERNAL int nt_global_init(void); - -typedef LONG NTSTATUS; -typedef NTSTATUS* PNTSTATUS; - -#ifndef NT_SUCCESS -#define NT_SUCCESS(status) (((NTSTATUS)(status)) >= 0) -#endif - -#ifndef STATUS_SUCCESS -#define STATUS_SUCCESS ((NTSTATUS) 0x00000000L) -#endif - -#ifndef STATUS_PENDING -#define STATUS_PENDING ((NTSTATUS) 0x00000103L) -#endif - -#ifndef STATUS_CANCELLED -#define STATUS_CANCELLED ((NTSTATUS) 0xC0000120L) -#endif - -typedef struct _IO_STATUS_BLOCK { - NTSTATUS Status; - ULONG_PTR Information; -} IO_STATUS_BLOCK, *PIO_STATUS_BLOCK; - -typedef VOID(NTAPI* PIO_APC_ROUTINE)(PVOID ApcContext, - PIO_STATUS_BLOCK IoStatusBlock, - ULONG Reserved); - -typedef struct _UNICODE_STRING { - USHORT Length; - USHORT MaximumLength; - PWSTR Buffer; -} UNICODE_STRING, *PUNICODE_STRING; - -#define RTL_CONSTANT_STRING(s) \ - { sizeof(s) - sizeof((s)[0]), sizeof(s), s } - -typedef struct _OBJECT_ATTRIBUTES { - ULONG Length; - HANDLE RootDirectory; - PUNICODE_STRING ObjectName; - ULONG Attributes; - PVOID SecurityDescriptor; - PVOID SecurityQualityOfService; -} OBJECT_ATTRIBUTES, *POBJECT_ATTRIBUTES; - -#define RTL_CONSTANT_OBJECT_ATTRIBUTES(ObjectName, Attributes) \ - { sizeof(OBJECT_ATTRIBUTES), NULL, ObjectName, Attributes, NULL, NULL } - -#ifndef FILE_OPEN -#define FILE_OPEN 0x00000001UL -#endif - -#define KEYEDEVENT_WAIT 0x00000001UL -#define KEYEDEVENT_WAKE 0x00000002UL -#define KEYEDEVENT_ALL_ACCESS \ - (STANDARD_RIGHTS_REQUIRED | KEYEDEVENT_WAIT | KEYEDEVENT_WAKE) - -#define NT_NTDLL_IMPORT_LIST(X) \ - X(NTSTATUS, \ - NTAPI, \ - NtCreateFile, \ - (PHANDLE FileHandle, \ - ACCESS_MASK DesiredAccess, \ - POBJECT_ATTRIBUTES ObjectAttributes, \ - PIO_STATUS_BLOCK IoStatusBlock, \ - PLARGE_INTEGER AllocationSize, \ - ULONG FileAttributes, \ - ULONG ShareAccess, \ - ULONG CreateDisposition, \ - ULONG CreateOptions, \ - PVOID EaBuffer, \ - ULONG EaLength)) \ - \ - X(NTSTATUS, \ - NTAPI, \ - NtCreateKeyedEvent, \ - (PHANDLE KeyedEventHandle, \ - ACCESS_MASK DesiredAccess, \ - POBJECT_ATTRIBUTES ObjectAttributes, \ - ULONG Flags)) \ - \ - X(NTSTATUS, \ - NTAPI, \ - NtDeviceIoControlFile, \ - (HANDLE FileHandle, \ - HANDLE Event, \ - PIO_APC_ROUTINE ApcRoutine, \ - PVOID ApcContext, \ - PIO_STATUS_BLOCK IoStatusBlock, \ - ULONG IoControlCode, \ - PVOID InputBuffer, \ - ULONG InputBufferLength, \ - PVOID OutputBuffer, \ - ULONG OutputBufferLength)) \ - \ - X(NTSTATUS, \ - NTAPI, \ - NtReleaseKeyedEvent, \ - (HANDLE KeyedEventHandle, \ - PVOID KeyValue, \ - BOOLEAN Alertable, \ - PLARGE_INTEGER Timeout)) \ - \ - X(NTSTATUS, \ - NTAPI, \ - NtWaitForKeyedEvent, \ - (HANDLE KeyedEventHandle, \ - PVOID KeyValue, \ - BOOLEAN Alertable, \ - PLARGE_INTEGER Timeout)) \ - \ - X(ULONG, WINAPI, RtlNtStatusToDosError, (NTSTATUS Status)) - -#define X(return_type, attributes, name, parameters) \ - WEPOLL_INTERNAL_VAR return_type(attributes* name) parameters; -NT_NTDLL_IMPORT_LIST(X) -#undef X - -#include <assert.h> -#include <stddef.h> - -#ifndef _SSIZE_T_DEFINED -typedef intptr_t ssize_t; -#endif - -#define array_count(a) (sizeof(a) / (sizeof((a)[0]))) - -#define container_of(ptr, type, member) \ - ((type*) ((uintptr_t) (ptr) - offsetof(type, member))) - -#define unused_var(v) ((void) (v)) - -/* Polyfill `inline` for older versions of msvc (up to Visual Studio 2013) */ -#if defined(_MSC_VER) && _MSC_VER < 1900 -#define inline __inline -#endif - -#define AFD_POLL_RECEIVE 0x0001 -#define AFD_POLL_RECEIVE_EXPEDITED 0x0002 -#define AFD_POLL_SEND 0x0004 -#define AFD_POLL_DISCONNECT 0x0008 -#define AFD_POLL_ABORT 0x0010 -#define AFD_POLL_LOCAL_CLOSE 0x0020 -#define AFD_POLL_ACCEPT 0x0080 -#define AFD_POLL_CONNECT_FAIL 0x0100 - -typedef struct _AFD_POLL_HANDLE_INFO { - HANDLE Handle; - ULONG Events; - NTSTATUS Status; -} AFD_POLL_HANDLE_INFO, *PAFD_POLL_HANDLE_INFO; - -typedef struct _AFD_POLL_INFO { - LARGE_INTEGER Timeout; - ULONG NumberOfHandles; - ULONG Exclusive; - AFD_POLL_HANDLE_INFO Handles[1]; -} AFD_POLL_INFO, *PAFD_POLL_INFO; - -WEPOLL_INTERNAL int afd_create_helper_handle(HANDLE iocp, - HANDLE* afd_helper_handle_out); - -WEPOLL_INTERNAL int afd_poll(HANDLE afd_helper_handle, - AFD_POLL_INFO* poll_info, - OVERLAPPED* overlapped); - -#define return_map_error(value) \ - do { \ - err_map_win_error(); \ - return (value); \ - } while (0) - -#define return_set_error(value, error) \ - do { \ - err_set_win_error(error); \ - return (value); \ - } while (0) - -WEPOLL_INTERNAL void err_map_win_error(void); -WEPOLL_INTERNAL void err_set_win_error(DWORD error); -WEPOLL_INTERNAL int err_check_handle(HANDLE handle); - -WEPOLL_INTERNAL int ws_global_init(void); -WEPOLL_INTERNAL SOCKET ws_get_base_socket(SOCKET socket); - -#define IOCTL_AFD_POLL 0x00012024 - -static UNICODE_STRING afd__helper_name = - RTL_CONSTANT_STRING(L"\\Device\\Afd\\Wepoll"); - -static OBJECT_ATTRIBUTES afd__helper_attributes = - RTL_CONSTANT_OBJECT_ATTRIBUTES(&afd__helper_name, 0); - -int afd_create_helper_handle(HANDLE iocp, HANDLE* afd_helper_handle_out) { - HANDLE afd_helper_handle; - IO_STATUS_BLOCK iosb; - NTSTATUS status; - - /* By opening \Device\Afd without specifying any extended attributes, we'll - * get a handle that lets us talk to the AFD driver, but that doesn't have an - * associated endpoint (so it's not a socket). */ - status = NtCreateFile(&afd_helper_handle, - SYNCHRONIZE, - &afd__helper_attributes, - &iosb, - NULL, - 0, - FILE_SHARE_READ | FILE_SHARE_WRITE, - FILE_OPEN, - 0, - NULL, - 0); - if (status != STATUS_SUCCESS) - return_set_error(-1, RtlNtStatusToDosError(status)); - - if (CreateIoCompletionPort(afd_helper_handle, iocp, 0, 0) == NULL) - goto error; - - if (!SetFileCompletionNotificationModes(afd_helper_handle, - FILE_SKIP_SET_EVENT_ON_HANDLE)) - goto error; - - *afd_helper_handle_out = afd_helper_handle; - return 0; - -error: - CloseHandle(afd_helper_handle); - return_map_error(-1); -} - -int afd_poll(HANDLE afd_helper_handle, - AFD_POLL_INFO* poll_info, - OVERLAPPED* overlapped) { - IO_STATUS_BLOCK* iosb; - HANDLE event; - void* apc_context; - NTSTATUS status; - - /* Blocking operation is not supported. */ - assert(overlapped != NULL); - - iosb = (IO_STATUS_BLOCK*) &overlapped->Internal; - event = overlapped->hEvent; - - /* Do what other windows APIs would do: if hEvent has it's lowest bit set, - * don't post a completion to the completion port. */ - if ((uintptr_t) event & 1) { - event = (HANDLE)((uintptr_t) event & ~(uintptr_t) 1); - apc_context = NULL; - } else { - apc_context = overlapped; - } - - iosb->Status = STATUS_PENDING; - status = NtDeviceIoControlFile(afd_helper_handle, - event, - NULL, - apc_context, - iosb, - IOCTL_AFD_POLL, - poll_info, - sizeof *poll_info, - poll_info, - sizeof *poll_info); - - if (status == STATUS_SUCCESS) - return 0; - else if (status == STATUS_PENDING) - return_set_error(-1, ERROR_IO_PENDING); - else - return_set_error(-1, RtlNtStatusToDosError(status)); -} - -WEPOLL_INTERNAL int epoll_global_init(void); - -WEPOLL_INTERNAL int init(void); - -#include <stdbool.h> - -typedef struct queue_node queue_node_t; - -typedef struct queue_node { - queue_node_t* prev; - queue_node_t* next; -} queue_node_t; - -typedef struct queue { - queue_node_t head; -} queue_t; - -WEPOLL_INTERNAL void queue_init(queue_t* queue); -WEPOLL_INTERNAL void queue_node_init(queue_node_t* node); - -WEPOLL_INTERNAL queue_node_t* queue_first(const queue_t* queue); -WEPOLL_INTERNAL queue_node_t* queue_last(const queue_t* queue); - -WEPOLL_INTERNAL void queue_prepend(queue_t* queue, queue_node_t* node); -WEPOLL_INTERNAL void queue_append(queue_t* queue, queue_node_t* node); -WEPOLL_INTERNAL void queue_move_first(queue_t* queue, queue_node_t* node); -WEPOLL_INTERNAL void queue_move_last(queue_t* queue, queue_node_t* node); -WEPOLL_INTERNAL void queue_remove(queue_node_t* node); - -WEPOLL_INTERNAL bool queue_empty(const queue_t* queue); -WEPOLL_INTERNAL bool queue_enqueued(const queue_node_t* node); - -typedef struct port_state port_state_t; -typedef struct poll_group poll_group_t; - -WEPOLL_INTERNAL poll_group_t* poll_group_acquire(port_state_t* port); -WEPOLL_INTERNAL void poll_group_release(poll_group_t* poll_group); - -WEPOLL_INTERNAL void poll_group_delete(poll_group_t* poll_group); - -WEPOLL_INTERNAL poll_group_t* poll_group_from_queue_node( - queue_node_t* queue_node); -WEPOLL_INTERNAL HANDLE - poll_group_get_afd_helper_handle(poll_group_t* poll_group); - -/* N.b.: the tree functions do not set errno or LastError when they fail. Each - * of the API functions has at most one failure mode. It is up to the caller to - * set an appropriate error code when necessary. */ - -typedef struct tree tree_t; -typedef struct tree_node tree_node_t; - -typedef struct tree { - tree_node_t* root; -} tree_t; - -typedef struct tree_node { - tree_node_t* left; - tree_node_t* right; - tree_node_t* parent; - uintptr_t key; - bool red; -} tree_node_t; - -WEPOLL_INTERNAL void tree_init(tree_t* tree); -WEPOLL_INTERNAL void tree_node_init(tree_node_t* node); - -WEPOLL_INTERNAL int tree_add(tree_t* tree, tree_node_t* node, uintptr_t key); -WEPOLL_INTERNAL void tree_del(tree_t* tree, tree_node_t* node); - -WEPOLL_INTERNAL tree_node_t* tree_find(const tree_t* tree, uintptr_t key); -WEPOLL_INTERNAL tree_node_t* tree_root(const tree_t* tree); - -typedef struct port_state port_state_t; -typedef struct sock_state sock_state_t; - -WEPOLL_INTERNAL sock_state_t* sock_new(port_state_t* port_state, - SOCKET socket); -WEPOLL_INTERNAL void sock_delete(port_state_t* port_state, - sock_state_t* sock_state); -WEPOLL_INTERNAL void sock_force_delete(port_state_t* port_state, - sock_state_t* sock_state); - -WEPOLL_INTERNAL int sock_set_event(port_state_t* port_state, - sock_state_t* sock_state, - const struct epoll_event* ev); - -WEPOLL_INTERNAL int sock_update(port_state_t* port_state, - sock_state_t* sock_state); -WEPOLL_INTERNAL int sock_feed_event(port_state_t* port_state, - OVERLAPPED* overlapped, - struct epoll_event* ev); - -WEPOLL_INTERNAL sock_state_t* sock_state_from_queue_node( - queue_node_t* queue_node); -WEPOLL_INTERNAL queue_node_t* sock_state_to_queue_node( - sock_state_t* sock_state); -WEPOLL_INTERNAL sock_state_t* sock_state_from_tree_node( - tree_node_t* tree_node); -WEPOLL_INTERNAL tree_node_t* sock_state_to_tree_node(sock_state_t* sock_state); - -/* The reflock is a special kind of lock that normally prevents a chunk of - * memory from being freed, but does allow the chunk of memory to eventually be - * released in a coordinated fashion. - * - * Under normal operation, threads increase and decrease the reference count, - * which are wait-free operations. - * - * Exactly once during the reflock's lifecycle, a thread holding a reference to - * the lock may "destroy" the lock; this operation blocks until all other - * threads holding a reference to the lock have dereferenced it. After - * "destroy" returns, the calling thread may assume that no other threads have - * a reference to the lock. - * - * Attemmpting to lock or destroy a lock after reflock_unref_and_destroy() has - * been called is invalid and results in undefined behavior. Therefore the user - * should use another lock to guarantee that this can't happen. - */ - -typedef struct reflock { - volatile long state; /* 32-bit Interlocked APIs operate on `long` values. */ -} reflock_t; - -WEPOLL_INTERNAL int reflock_global_init(void); - -WEPOLL_INTERNAL void reflock_init(reflock_t* reflock); -WEPOLL_INTERNAL void reflock_ref(reflock_t* reflock); -WEPOLL_INTERNAL void reflock_unref(reflock_t* reflock); -WEPOLL_INTERNAL void reflock_unref_and_destroy(reflock_t* reflock); - -typedef struct ts_tree { - tree_t tree; - SRWLOCK lock; -} ts_tree_t; - -typedef struct ts_tree_node { - tree_node_t tree_node; - reflock_t reflock; -} ts_tree_node_t; - -WEPOLL_INTERNAL void ts_tree_init(ts_tree_t* rtl); -WEPOLL_INTERNAL void ts_tree_node_init(ts_tree_node_t* node); - -WEPOLL_INTERNAL int ts_tree_add(ts_tree_t* ts_tree, - ts_tree_node_t* node, - uintptr_t key); - -WEPOLL_INTERNAL ts_tree_node_t* ts_tree_del_and_ref(ts_tree_t* ts_tree, - uintptr_t key); -WEPOLL_INTERNAL ts_tree_node_t* ts_tree_find_and_ref(ts_tree_t* ts_tree, - uintptr_t key); - -WEPOLL_INTERNAL void ts_tree_node_unref(ts_tree_node_t* node); -WEPOLL_INTERNAL void ts_tree_node_unref_and_destroy(ts_tree_node_t* node); - -typedef struct port_state port_state_t; -typedef struct sock_state sock_state_t; - -typedef struct port_state { - HANDLE iocp; - tree_t sock_tree; - queue_t sock_update_queue; - queue_t sock_deleted_queue; - queue_t poll_group_queue; - ts_tree_node_t handle_tree_node; - CRITICAL_SECTION lock; - size_t active_poll_count; -} port_state_t; - -WEPOLL_INTERNAL port_state_t* port_new(HANDLE* iocp_out); -WEPOLL_INTERNAL int port_close(port_state_t* port_state); -WEPOLL_INTERNAL int port_delete(port_state_t* port_state); - -WEPOLL_INTERNAL int port_wait(port_state_t* port_state, - struct epoll_event* events, - int maxevents, - int timeout); - -WEPOLL_INTERNAL int port_ctl(port_state_t* port_state, - int op, - SOCKET sock, - struct epoll_event* ev); - -WEPOLL_INTERNAL int port_register_socket_handle(port_state_t* port_state, - sock_state_t* sock_state, - SOCKET socket); -WEPOLL_INTERNAL void port_unregister_socket_handle(port_state_t* port_state, - sock_state_t* sock_state); -WEPOLL_INTERNAL sock_state_t* port_find_socket(port_state_t* port_state, - SOCKET socket); - -WEPOLL_INTERNAL void port_request_socket_update(port_state_t* port_state, - sock_state_t* sock_state); -WEPOLL_INTERNAL void port_cancel_socket_update(port_state_t* port_state, - sock_state_t* sock_state); - -WEPOLL_INTERNAL void port_add_deleted_socket(port_state_t* port_state, - sock_state_t* sock_state); -WEPOLL_INTERNAL void port_remove_deleted_socket(port_state_t* port_state, - sock_state_t* sock_state); - -static ts_tree_t epoll__handle_tree; - -static inline port_state_t* epoll__handle_tree_node_to_port( - ts_tree_node_t* tree_node) { - return container_of(tree_node, port_state_t, handle_tree_node); -} - -int epoll_global_init(void) { - ts_tree_init(&epoll__handle_tree); - return 0; -} - -static HANDLE epoll__create(void) { - port_state_t* port_state; - HANDLE ephnd; - - if (init() < 0) - return NULL; - - port_state = port_new(&ephnd); - if (port_state == NULL) - return NULL; - - if (ts_tree_add(&epoll__handle_tree, - &port_state->handle_tree_node, - (uintptr_t) ephnd) < 0) { - /* This should never happen. */ - port_delete(port_state); - return_set_error(NULL, ERROR_ALREADY_EXISTS); - } - - return ephnd; -} - -HANDLE epoll_create(int size) { - if (size <= 0) - return_set_error(NULL, ERROR_INVALID_PARAMETER); - - return epoll__create(); -} - -HANDLE epoll_create1(int flags) { - if (flags != 0) - return_set_error(NULL, ERROR_INVALID_PARAMETER); - - return epoll__create(); -} - -int epoll_close(HANDLE ephnd) { - ts_tree_node_t* tree_node; - port_state_t* port_state; - - if (init() < 0) - return -1; - - tree_node = ts_tree_del_and_ref(&epoll__handle_tree, (uintptr_t) ephnd); - if (tree_node == NULL) { - err_set_win_error(ERROR_INVALID_PARAMETER); - goto err; - } - - port_state = epoll__handle_tree_node_to_port(tree_node); - port_close(port_state); - - ts_tree_node_unref_and_destroy(tree_node); - - return port_delete(port_state); - -err: - err_check_handle(ephnd); - return -1; -} - -int epoll_ctl(HANDLE ephnd, int op, SOCKET sock, struct epoll_event* ev) { - ts_tree_node_t* tree_node; - port_state_t* port_state; - int r; - - if (init() < 0) - return -1; - - tree_node = ts_tree_find_and_ref(&epoll__handle_tree, (uintptr_t) ephnd); - if (tree_node == NULL) { - err_set_win_error(ERROR_INVALID_PARAMETER); - goto err; - } - - port_state = epoll__handle_tree_node_to_port(tree_node); - r = port_ctl(port_state, op, sock, ev); - - ts_tree_node_unref(tree_node); - - if (r < 0) - goto err; - - return 0; - -err: - /* On Linux, in the case of epoll_ctl_mod(), EBADF takes priority over other - * errors. Wepoll mimics this behavior. */ - err_check_handle(ephnd); - err_check_handle((HANDLE) sock); - return -1; -} - -int epoll_wait(HANDLE ephnd, - struct epoll_event* events, - int maxevents, - int timeout) { - ts_tree_node_t* tree_node; - port_state_t* port_state; - int num_events; - - if (maxevents <= 0) - return_set_error(-1, ERROR_INVALID_PARAMETER); - - if (init() < 0) - return -1; - - tree_node = ts_tree_find_and_ref(&epoll__handle_tree, (uintptr_t) ephnd); - if (tree_node == NULL) { - err_set_win_error(ERROR_INVALID_PARAMETER); - goto err; - } - - port_state = epoll__handle_tree_node_to_port(tree_node); - num_events = port_wait(port_state, events, maxevents, timeout); - - ts_tree_node_unref(tree_node); - - if (num_events < 0) - goto err; - - return num_events; - -err: - err_check_handle(ephnd); - return -1; -} - -#include <errno.h> - -#define ERR__ERRNO_MAPPINGS(X) \ - X(ERROR_ACCESS_DENIED, EACCES) \ - X(ERROR_ALREADY_EXISTS, EEXIST) \ - X(ERROR_BAD_COMMAND, EACCES) \ - X(ERROR_BAD_EXE_FORMAT, ENOEXEC) \ - X(ERROR_BAD_LENGTH, EACCES) \ - X(ERROR_BAD_NETPATH, ENOENT) \ - X(ERROR_BAD_NET_NAME, ENOENT) \ - X(ERROR_BAD_NET_RESP, ENETDOWN) \ - X(ERROR_BAD_PATHNAME, ENOENT) \ - X(ERROR_BROKEN_PIPE, EPIPE) \ - X(ERROR_CANNOT_MAKE, EACCES) \ - X(ERROR_COMMITMENT_LIMIT, ENOMEM) \ - X(ERROR_CONNECTION_ABORTED, ECONNABORTED) \ - X(ERROR_CONNECTION_ACTIVE, EISCONN) \ - X(ERROR_CONNECTION_REFUSED, ECONNREFUSED) \ - X(ERROR_CRC, EACCES) \ - X(ERROR_DIR_NOT_EMPTY, ENOTEMPTY) \ - X(ERROR_DISK_FULL, ENOSPC) \ - X(ERROR_DUP_NAME, EADDRINUSE) \ - X(ERROR_FILENAME_EXCED_RANGE, ENOENT) \ - X(ERROR_FILE_NOT_FOUND, ENOENT) \ - X(ERROR_GEN_FAILURE, EACCES) \ - X(ERROR_GRACEFUL_DISCONNECT, EPIPE) \ - X(ERROR_HOST_DOWN, EHOSTUNREACH) \ - X(ERROR_HOST_UNREACHABLE, EHOSTUNREACH) \ - X(ERROR_INSUFFICIENT_BUFFER, EFAULT) \ - X(ERROR_INVALID_ADDRESS, EADDRNOTAVAIL) \ - X(ERROR_INVALID_FUNCTION, EINVAL) \ - X(ERROR_INVALID_HANDLE, EBADF) \ - X(ERROR_INVALID_NETNAME, EADDRNOTAVAIL) \ - X(ERROR_INVALID_PARAMETER, EINVAL) \ - X(ERROR_INVALID_USER_BUFFER, EMSGSIZE) \ - X(ERROR_IO_PENDING, EINPROGRESS) \ - X(ERROR_LOCK_VIOLATION, EACCES) \ - X(ERROR_MORE_DATA, EMSGSIZE) \ - X(ERROR_NETNAME_DELETED, ECONNABORTED) \ - X(ERROR_NETWORK_ACCESS_DENIED, EACCES) \ - X(ERROR_NETWORK_BUSY, ENETDOWN) \ - X(ERROR_NETWORK_UNREACHABLE, ENETUNREACH) \ - X(ERROR_NOACCESS, EFAULT) \ - X(ERROR_NONPAGED_SYSTEM_RESOURCES, ENOMEM) \ - X(ERROR_NOT_ENOUGH_MEMORY, ENOMEM) \ - X(ERROR_NOT_ENOUGH_QUOTA, ENOMEM) \ - X(ERROR_NOT_FOUND, ENOENT) \ - X(ERROR_NOT_LOCKED, EACCES) \ - X(ERROR_NOT_READY, EACCES) \ - X(ERROR_NOT_SAME_DEVICE, EXDEV) \ - X(ERROR_NOT_SUPPORTED, ENOTSUP) \ - X(ERROR_NO_MORE_FILES, ENOENT) \ - X(ERROR_NO_SYSTEM_RESOURCES, ENOMEM) \ - X(ERROR_OPERATION_ABORTED, EINTR) \ - X(ERROR_OUT_OF_PAPER, EACCES) \ - X(ERROR_PAGED_SYSTEM_RESOURCES, ENOMEM) \ - X(ERROR_PAGEFILE_QUOTA, ENOMEM) \ - X(ERROR_PATH_NOT_FOUND, ENOENT) \ - X(ERROR_PIPE_NOT_CONNECTED, EPIPE) \ - X(ERROR_PORT_UNREACHABLE, ECONNRESET) \ - X(ERROR_PROTOCOL_UNREACHABLE, ENETUNREACH) \ - X(ERROR_REM_NOT_LIST, ECONNREFUSED) \ - X(ERROR_REQUEST_ABORTED, EINTR) \ - X(ERROR_REQ_NOT_ACCEP, EWOULDBLOCK) \ - X(ERROR_SECTOR_NOT_FOUND, EACCES) \ - X(ERROR_SEM_TIMEOUT, ETIMEDOUT) \ - X(ERROR_SHARING_VIOLATION, EACCES) \ - X(ERROR_TOO_MANY_NAMES, ENOMEM) \ - X(ERROR_TOO_MANY_OPEN_FILES, EMFILE) \ - X(ERROR_UNEXP_NET_ERR, ECONNABORTED) \ - X(ERROR_WAIT_NO_CHILDREN, ECHILD) \ - X(ERROR_WORKING_SET_QUOTA, ENOMEM) \ - X(ERROR_WRITE_PROTECT, EACCES) \ - X(ERROR_WRONG_DISK, EACCES) \ - X(WSAEACCES, EACCES) \ - X(WSAEADDRINUSE, EADDRINUSE) \ - X(WSAEADDRNOTAVAIL, EADDRNOTAVAIL) \ - X(WSAEAFNOSUPPORT, EAFNOSUPPORT) \ - X(WSAECONNABORTED, ECONNABORTED) \ - X(WSAECONNREFUSED, ECONNREFUSED) \ - X(WSAECONNRESET, ECONNRESET) \ - X(WSAEDISCON, EPIPE) \ - X(WSAEFAULT, EFAULT) \ - X(WSAEHOSTDOWN, EHOSTUNREACH) \ - X(WSAEHOSTUNREACH, EHOSTUNREACH) \ - X(WSAEINPROGRESS, EBUSY) \ - X(WSAEINTR, EINTR) \ - X(WSAEINVAL, EINVAL) \ - X(WSAEISCONN, EISCONN) \ - X(WSAEMSGSIZE, EMSGSIZE) \ - X(WSAENETDOWN, ENETDOWN) \ - X(WSAENETRESET, EHOSTUNREACH) \ - X(WSAENETUNREACH, ENETUNREACH) \ - X(WSAENOBUFS, ENOMEM) \ - X(WSAENOTCONN, ENOTCONN) \ - X(WSAENOTSOCK, ENOTSOCK) \ - X(WSAEOPNOTSUPP, EOPNOTSUPP) \ - X(WSAEPROCLIM, ENOMEM) \ - X(WSAESHUTDOWN, EPIPE) \ - X(WSAETIMEDOUT, ETIMEDOUT) \ - X(WSAEWOULDBLOCK, EWOULDBLOCK) \ - X(WSANOTINITIALISED, ENETDOWN) \ - X(WSASYSNOTREADY, ENETDOWN) \ - X(WSAVERNOTSUPPORTED, ENOSYS) - -static errno_t err__map_win_error_to_errno(DWORD error) { - switch (error) { -#define X(error_sym, errno_sym) \ - case error_sym: \ - return errno_sym; - ERR__ERRNO_MAPPINGS(X) -#undef X - } - return EINVAL; -} - -void err_map_win_error(void) { - errno = err__map_win_error_to_errno(GetLastError()); -} - -void err_set_win_error(DWORD error) { - SetLastError(error); - errno = err__map_win_error_to_errno(error); -} - -int err_check_handle(HANDLE handle) { - DWORD flags; - - /* GetHandleInformation() succeeds when passed INVALID_HANDLE_VALUE, so check - * for this condition explicitly. */ - if (handle == INVALID_HANDLE_VALUE) - return_set_error(-1, ERROR_INVALID_HANDLE); - - if (!GetHandleInformation(handle, &flags)) - return_map_error(-1); - - return 0; -} - -static bool init__done = false; -static INIT_ONCE init__once = INIT_ONCE_STATIC_INIT; - -static BOOL CALLBACK init__once_callback(INIT_ONCE* once, - void* parameter, - void** context) { - unused_var(once); - unused_var(parameter); - unused_var(context); - - /* N.b. that initialization order matters here. */ - if (ws_global_init() < 0 || nt_global_init() < 0 || - reflock_global_init() < 0 || epoll_global_init() < 0) - return FALSE; - - init__done = true; - return TRUE; -} - -int init(void) { - if (!init__done && - !InitOnceExecuteOnce(&init__once, init__once_callback, NULL, NULL)) - /* `InitOnceExecuteOnce()` itself is infallible, and it doesn't set any - * error code when the once-callback returns FALSE. We return -1 here to - * indicate that global initialization failed; the failing init function is - * resposible for setting `errno` and calling `SetLastError()`. */ - return -1; - - return 0; -} - -/* Set up a workaround for the following problem: - * FARPROC addr = GetProcAddress(...); - * MY_FUNC func = (MY_FUNC) addr; <-- GCC 8 warning/error. - * MY_FUNC func = (MY_FUNC) (void*) addr; <-- MSVC warning/error. - * To compile cleanly with either compiler, do casts with this "bridge" type: - * MY_FUNC func = (MY_FUNC) (nt__fn_ptr_cast_t) addr; */ -#ifdef __GNUC__ -typedef void* nt__fn_ptr_cast_t; -#else -typedef FARPROC nt__fn_ptr_cast_t; -#endif - -#define X(return_type, attributes, name, parameters) \ - WEPOLL_INTERNAL return_type(attributes* name) parameters = NULL; -NT_NTDLL_IMPORT_LIST(X) -#undef X - -int nt_global_init(void) { - HMODULE ntdll; - FARPROC fn_ptr; - - ntdll = GetModuleHandleW(L"ntdll.dll"); - if (ntdll == NULL) - return -1; - -#define X(return_type, attributes, name, parameters) \ - fn_ptr = GetProcAddress(ntdll, #name); \ - if (fn_ptr == NULL) \ - return -1; \ - name = (return_type(attributes*) parameters)(nt__fn_ptr_cast_t) fn_ptr; - NT_NTDLL_IMPORT_LIST(X) -#undef X - - return 0; -} - -#include <string.h> - -static const size_t POLL_GROUP__MAX_GROUP_SIZE = 32; - -typedef struct poll_group { - port_state_t* port_state; - queue_node_t queue_node; - HANDLE afd_helper_handle; - size_t group_size; -} poll_group_t; - -static poll_group_t* poll_group__new(port_state_t* port_state) { - poll_group_t* poll_group = malloc(sizeof *poll_group); - if (poll_group == NULL) - return_set_error(NULL, ERROR_NOT_ENOUGH_MEMORY); - - memset(poll_group, 0, sizeof *poll_group); - - queue_node_init(&poll_group->queue_node); - poll_group->port_state = port_state; - - if (afd_create_helper_handle(port_state->iocp, - &poll_group->afd_helper_handle) < 0) { - free(poll_group); - return NULL; - } - - queue_append(&port_state->poll_group_queue, &poll_group->queue_node); - - return poll_group; -} - -void poll_group_delete(poll_group_t* poll_group) { - assert(poll_group->group_size == 0); - CloseHandle(poll_group->afd_helper_handle); - queue_remove(&poll_group->queue_node); - free(poll_group); -} - -poll_group_t* poll_group_from_queue_node(queue_node_t* queue_node) { - return container_of(queue_node, poll_group_t, queue_node); -} - -HANDLE poll_group_get_afd_helper_handle(poll_group_t* poll_group) { - return poll_group->afd_helper_handle; -} - -poll_group_t* poll_group_acquire(port_state_t* port_state) { - queue_t* queue = &port_state->poll_group_queue; - poll_group_t* poll_group = - !queue_empty(queue) - ? container_of(queue_last(queue), poll_group_t, queue_node) - : NULL; - - if (poll_group == NULL || - poll_group->group_size >= POLL_GROUP__MAX_GROUP_SIZE) - poll_group = poll_group__new(port_state); - if (poll_group == NULL) - return NULL; - - if (++poll_group->group_size == POLL_GROUP__MAX_GROUP_SIZE) - queue_move_first(&port_state->poll_group_queue, &poll_group->queue_node); - - return poll_group; -} - -void poll_group_release(poll_group_t* poll_group) { - port_state_t* port_state = poll_group->port_state; - - poll_group->group_size--; - assert(poll_group->group_size < POLL_GROUP__MAX_GROUP_SIZE); - - queue_move_last(&port_state->poll_group_queue, &poll_group->queue_node); - - /* Poll groups are currently only freed when the epoll port is closed. */ -} - -#define PORT__MAX_ON_STACK_COMPLETIONS 256 - -static port_state_t* port__alloc(void) { - port_state_t* port_state = malloc(sizeof *port_state); - if (port_state == NULL) - return_set_error(NULL, ERROR_NOT_ENOUGH_MEMORY); - - return port_state; -} - -static void port__free(port_state_t* port) { - assert(port != NULL); - free(port); -} - -static HANDLE port__create_iocp(void) { - HANDLE iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0); - if (iocp == NULL) - return_map_error(NULL); - - return iocp; -} - -port_state_t* port_new(HANDLE* iocp_out) { - port_state_t* port_state; - HANDLE iocp; - - port_state = port__alloc(); - if (port_state == NULL) - goto err1; - - iocp = port__create_iocp(); - if (iocp == NULL) - goto err2; - - memset(port_state, 0, sizeof *port_state); - - port_state->iocp = iocp; - tree_init(&port_state->sock_tree); - queue_init(&port_state->sock_update_queue); - queue_init(&port_state->sock_deleted_queue); - queue_init(&port_state->poll_group_queue); - ts_tree_node_init(&port_state->handle_tree_node); - InitializeCriticalSection(&port_state->lock); - - *iocp_out = iocp; - return port_state; - -err2: - port__free(port_state); -err1: - return NULL; -} - -static int port__close_iocp(port_state_t* port_state) { - HANDLE iocp = port_state->iocp; - port_state->iocp = NULL; - - if (!CloseHandle(iocp)) - return_map_error(-1); - - return 0; -} - -int port_close(port_state_t* port_state) { - int result; - - EnterCriticalSection(&port_state->lock); - result = port__close_iocp(port_state); - LeaveCriticalSection(&port_state->lock); - - return result; -} - -int port_delete(port_state_t* port_state) { - tree_node_t* tree_node; - queue_node_t* queue_node; - - /* At this point the IOCP port should have been closed. */ - assert(port_state->iocp == NULL); - - while ((tree_node = tree_root(&port_state->sock_tree)) != NULL) { - sock_state_t* sock_state = sock_state_from_tree_node(tree_node); - sock_force_delete(port_state, sock_state); - } - - while ((queue_node = queue_first(&port_state->sock_deleted_queue)) != NULL) { - sock_state_t* sock_state = sock_state_from_queue_node(queue_node); - sock_force_delete(port_state, sock_state); - } - - while ((queue_node = queue_first(&port_state->poll_group_queue)) != NULL) { - poll_group_t* poll_group = poll_group_from_queue_node(queue_node); - poll_group_delete(poll_group); - } - - assert(queue_empty(&port_state->sock_update_queue)); - - DeleteCriticalSection(&port_state->lock); - - port__free(port_state); - - return 0; -} - -static int port__update_events(port_state_t* port_state) { - queue_t* sock_update_queue = &port_state->sock_update_queue; - - /* Walk the queue, submitting new poll requests for every socket that needs - * it. */ - while (!queue_empty(sock_update_queue)) { - queue_node_t* queue_node = queue_first(sock_update_queue); - sock_state_t* sock_state = sock_state_from_queue_node(queue_node); - - if (sock_update(port_state, sock_state) < 0) - return -1; - - /* sock_update() removes the socket from the update queue. */ - } - - return 0; -} - -static void port__update_events_if_polling(port_state_t* port_state) { - if (port_state->active_poll_count > 0) - port__update_events(port_state); -} - -static int port__feed_events(port_state_t* port_state, - struct epoll_event* epoll_events, - OVERLAPPED_ENTRY* iocp_events, - DWORD iocp_event_count) { - int epoll_event_count = 0; - DWORD i; - - for (i = 0; i < iocp_event_count; i++) { - OVERLAPPED* overlapped = iocp_events[i].lpOverlapped; - struct epoll_event* ev = &epoll_events[epoll_event_count]; - - epoll_event_count += sock_feed_event(port_state, overlapped, ev); - } - - return epoll_event_count; -} - -static int port__poll(port_state_t* port_state, - struct epoll_event* epoll_events, - OVERLAPPED_ENTRY* iocp_events, - DWORD maxevents, - DWORD timeout) { - DWORD completion_count; - - if (port__update_events(port_state) < 0) - return -1; - - port_state->active_poll_count++; - - LeaveCriticalSection(&port_state->lock); - - BOOL r = GetQueuedCompletionStatusEx(port_state->iocp, - iocp_events, - maxevents, - &completion_count, - timeout, - FALSE); - - EnterCriticalSection(&port_state->lock); - - port_state->active_poll_count--; - - if (!r) - return_map_error(-1); - - return port__feed_events( - port_state, epoll_events, iocp_events, completion_count); -} - -int port_wait(port_state_t* port_state, - struct epoll_event* events, - int maxevents, - int timeout) { - OVERLAPPED_ENTRY stack_iocp_events[PORT__MAX_ON_STACK_COMPLETIONS]; - OVERLAPPED_ENTRY* iocp_events; - uint64_t due = 0; - DWORD gqcs_timeout; - int result; - - /* Check whether `maxevents` is in range. */ - if (maxevents <= 0) - return_set_error(-1, ERROR_INVALID_PARAMETER); - - /* Decide whether the IOCP completion list can live on the stack, or allocate - * memory for it on the heap. */ - if ((size_t) maxevents <= array_count(stack_iocp_events)) { - iocp_events = stack_iocp_events; - } else if ((iocp_events = - malloc((size_t) maxevents * sizeof *iocp_events)) == NULL) { - iocp_events = stack_iocp_events; - maxevents = array_count(stack_iocp_events); - } - - /* Compute the timeout for GetQueuedCompletionStatus, and the wait end - * time, if the user specified a timeout other than zero or infinite. */ - if (timeout > 0) { - due = GetTickCount64() + (uint64_t) timeout; - gqcs_timeout = (DWORD) timeout; - } else if (timeout == 0) { - gqcs_timeout = 0; - } else { - gqcs_timeout = INFINITE; - } - - EnterCriticalSection(&port_state->lock); - - /* Dequeue completion packets until either at least one interesting event - * has been discovered, or the timeout is reached. */ - for (;;) { - uint64_t now; - - result = port__poll( - port_state, events, iocp_events, (DWORD) maxevents, gqcs_timeout); - if (result < 0 || result > 0) - break; /* Result, error, or time-out. */ - - if (timeout < 0) - continue; /* When timeout is negative, never time out. */ - - /* Update time. */ - now = GetTickCount64(); - - /* Do not allow the due time to be in the past. */ - if (now >= due) { - SetLastError(WAIT_TIMEOUT); - break; - } - - /* Recompute time-out argument for GetQueuedCompletionStatus. */ - gqcs_timeout = (DWORD)(due - now); - } - - port__update_events_if_polling(port_state); - - LeaveCriticalSection(&port_state->lock); - - if (iocp_events != stack_iocp_events) - free(iocp_events); - - if (result >= 0) - return result; - else if (GetLastError() == WAIT_TIMEOUT) - return 0; - else - return -1; -} - -static int port__ctl_add(port_state_t* port_state, - SOCKET sock, - struct epoll_event* ev) { - sock_state_t* sock_state = sock_new(port_state, sock); - if (sock_state == NULL) - return -1; - - if (sock_set_event(port_state, sock_state, ev) < 0) { - sock_delete(port_state, sock_state); - return -1; - } - - port__update_events_if_polling(port_state); - - return 0; -} - -static int port__ctl_mod(port_state_t* port_state, - SOCKET sock, - struct epoll_event* ev) { - sock_state_t* sock_state = port_find_socket(port_state, sock); - if (sock_state == NULL) - return -1; - - if (sock_set_event(port_state, sock_state, ev) < 0) - return -1; - - port__update_events_if_polling(port_state); - - return 0; -} - -static int port__ctl_del(port_state_t* port_state, SOCKET sock) { - sock_state_t* sock_state = port_find_socket(port_state, sock); - if (sock_state == NULL) - return -1; - - sock_delete(port_state, sock_state); - - return 0; -} - -static int port__ctl_op(port_state_t* port_state, - int op, - SOCKET sock, - struct epoll_event* ev) { - switch (op) { - case EPOLL_CTL_ADD: - return port__ctl_add(port_state, sock, ev); - case EPOLL_CTL_MOD: - return port__ctl_mod(port_state, sock, ev); - case EPOLL_CTL_DEL: - return port__ctl_del(port_state, sock); - default: - return_set_error(-1, ERROR_INVALID_PARAMETER); - } -} - -int port_ctl(port_state_t* port_state, - int op, - SOCKET sock, - struct epoll_event* ev) { - int result; - - EnterCriticalSection(&port_state->lock); - result = port__ctl_op(port_state, op, sock, ev); - LeaveCriticalSection(&port_state->lock); - - return result; -} - -int port_register_socket_handle(port_state_t* port_state, - sock_state_t* sock_state, - SOCKET socket) { - if (tree_add(&port_state->sock_tree, - sock_state_to_tree_node(sock_state), - socket) < 0) - return_set_error(-1, ERROR_ALREADY_EXISTS); - return 0; -} - -void port_unregister_socket_handle(port_state_t* port_state, - sock_state_t* sock_state) { - tree_del(&port_state->sock_tree, sock_state_to_tree_node(sock_state)); -} - -sock_state_t* port_find_socket(port_state_t* port_state, SOCKET socket) { - tree_node_t* tree_node = tree_find(&port_state->sock_tree, socket); - if (tree_node == NULL) - return_set_error(NULL, ERROR_NOT_FOUND); - return sock_state_from_tree_node(tree_node); -} - -void port_request_socket_update(port_state_t* port_state, - sock_state_t* sock_state) { - if (queue_enqueued(sock_state_to_queue_node(sock_state))) - return; - queue_append(&port_state->sock_update_queue, - sock_state_to_queue_node(sock_state)); -} - -void port_cancel_socket_update(port_state_t* port_state, - sock_state_t* sock_state) { - unused_var(port_state); - if (!queue_enqueued(sock_state_to_queue_node(sock_state))) - return; - queue_remove(sock_state_to_queue_node(sock_state)); -} - -void port_add_deleted_socket(port_state_t* port_state, - sock_state_t* sock_state) { - if (queue_enqueued(sock_state_to_queue_node(sock_state))) - return; - queue_append(&port_state->sock_deleted_queue, - sock_state_to_queue_node(sock_state)); -} - -void port_remove_deleted_socket(port_state_t* port_state, - sock_state_t* sock_state) { - unused_var(port_state); - if (!queue_enqueued(sock_state_to_queue_node(sock_state))) - return; - queue_remove(sock_state_to_queue_node(sock_state)); -} - -void queue_init(queue_t* queue) { - queue_node_init(&queue->head); -} - -void queue_node_init(queue_node_t* node) { - node->prev = node; - node->next = node; -} - -static inline void queue__detach_node(queue_node_t* node) { - node->prev->next = node->next; - node->next->prev = node->prev; -} - -queue_node_t* queue_first(const queue_t* queue) { - return !queue_empty(queue) ? queue->head.next : NULL; -} - -queue_node_t* queue_last(const queue_t* queue) { - return !queue_empty(queue) ? queue->head.prev : NULL; -} - -void queue_prepend(queue_t* queue, queue_node_t* node) { - node->next = queue->head.next; - node->prev = &queue->head; - node->next->prev = node; - queue->head.next = node; -} - -void queue_append(queue_t* queue, queue_node_t* node) { - node->next = &queue->head; - node->prev = queue->head.prev; - node->prev->next = node; - queue->head.prev = node; -} - -void queue_move_first(queue_t* queue, queue_node_t* node) { - queue__detach_node(node); - queue_prepend(queue, node); -} - -void queue_move_last(queue_t* queue, queue_node_t* node) { - queue__detach_node(node); - queue_append(queue, node); -} - -void queue_remove(queue_node_t* node) { - queue__detach_node(node); - queue_node_init(node); -} - -bool queue_empty(const queue_t* queue) { - return !queue_enqueued(&queue->head); -} - -bool queue_enqueued(const queue_node_t* node) { - return node->prev != node; -} - -static const long REFLOCK__REF = (long) 0x00000001; -static const long REFLOCK__REF_MASK = (long) 0x0fffffff; -static const long REFLOCK__DESTROY = (long) 0x10000000; -static const long REFLOCK__DESTROY_MASK = (long) 0xf0000000; -static const long REFLOCK__POISON = (long) 0x300dead0; - -static HANDLE reflock__keyed_event = NULL; - -int reflock_global_init(void) { - NTSTATUS status = - NtCreateKeyedEvent(&reflock__keyed_event, KEYEDEVENT_ALL_ACCESS, NULL, 0); - if (status != STATUS_SUCCESS) - return_set_error(-1, RtlNtStatusToDosError(status)); - return 0; -} - -void reflock_init(reflock_t* reflock) { - reflock->state = 0; -} - -static void reflock__signal_event(void* address) { - NTSTATUS status = - NtReleaseKeyedEvent(reflock__keyed_event, address, FALSE, NULL); - if (status != STATUS_SUCCESS) - abort(); -} - -static void reflock__await_event(void* address) { - NTSTATUS status = - NtWaitForKeyedEvent(reflock__keyed_event, address, FALSE, NULL); - if (status != STATUS_SUCCESS) - abort(); -} - -void reflock_ref(reflock_t* reflock) { - long state = InterlockedAdd(&reflock->state, REFLOCK__REF); - - /* Verify that the counter didn't overflow and the lock isn't destroyed. */ - assert((state & REFLOCK__DESTROY_MASK) == 0); - unused_var(state); -} - -void reflock_unref(reflock_t* reflock) { - long state = InterlockedAdd(&reflock->state, -REFLOCK__REF); - - /* Verify that the lock was referenced and not already destroyed. */ - assert((state & REFLOCK__DESTROY_MASK & ~REFLOCK__DESTROY) == 0); - - if (state == REFLOCK__DESTROY) - reflock__signal_event(reflock); -} - -void reflock_unref_and_destroy(reflock_t* reflock) { - long state = - InterlockedAdd(&reflock->state, REFLOCK__DESTROY - REFLOCK__REF); - long ref_count = state & REFLOCK__REF_MASK; - - /* Verify that the lock was referenced and not already destroyed. */ - assert((state & REFLOCK__DESTROY_MASK) == REFLOCK__DESTROY); - - if (ref_count != 0) - reflock__await_event(reflock); - - state = InterlockedExchange(&reflock->state, REFLOCK__POISON); - assert(state == REFLOCK__DESTROY); -} - -static const uint32_t SOCK__KNOWN_EPOLL_EVENTS = - EPOLLIN | EPOLLPRI | EPOLLOUT | EPOLLERR | EPOLLHUP | EPOLLRDNORM | - EPOLLRDBAND | EPOLLWRNORM | EPOLLWRBAND | EPOLLMSG | EPOLLRDHUP; - -typedef enum sock__poll_status { - SOCK__POLL_IDLE = 0, - SOCK__POLL_PENDING, - SOCK__POLL_CANCELLED -} sock__poll_status_t; - -typedef struct sock_state { - OVERLAPPED overlapped; - AFD_POLL_INFO poll_info; - queue_node_t queue_node; - tree_node_t tree_node; - poll_group_t* poll_group; - SOCKET base_socket; - epoll_data_t user_data; - uint32_t user_events; - uint32_t pending_events; - sock__poll_status_t poll_status; - bool delete_pending; -} sock_state_t; - -static inline sock_state_t* sock__alloc(void) { - sock_state_t* sock_state = malloc(sizeof *sock_state); - if (sock_state == NULL) - return_set_error(NULL, ERROR_NOT_ENOUGH_MEMORY); - return sock_state; -} - -static inline void sock__free(sock_state_t* sock_state) { - free(sock_state); -} - -static int sock__cancel_poll(sock_state_t* sock_state) { - HANDLE afd_helper_handle = - poll_group_get_afd_helper_handle(sock_state->poll_group); - assert(sock_state->poll_status == SOCK__POLL_PENDING); - - /* CancelIoEx() may fail with ERROR_NOT_FOUND if the overlapped operation has - * already completed. This is not a problem and we proceed normally. */ - if (!HasOverlappedIoCompleted(&sock_state->overlapped) && - !CancelIoEx(afd_helper_handle, &sock_state->overlapped) && - GetLastError() != ERROR_NOT_FOUND) - return_map_error(-1); - - sock_state->poll_status = SOCK__POLL_CANCELLED; - sock_state->pending_events = 0; - return 0; -} - -sock_state_t* sock_new(port_state_t* port_state, SOCKET socket) { - SOCKET base_socket; - poll_group_t* poll_group; - sock_state_t* sock_state; - - if (socket == 0 || socket == INVALID_SOCKET) - return_set_error(NULL, ERROR_INVALID_HANDLE); - - base_socket = ws_get_base_socket(socket); - if (base_socket == INVALID_SOCKET) - return NULL; - - poll_group = poll_group_acquire(port_state); - if (poll_group == NULL) - return NULL; - - sock_state = sock__alloc(); - if (sock_state == NULL) - goto err1; - - memset(sock_state, 0, sizeof *sock_state); - - sock_state->base_socket = base_socket; - sock_state->poll_group = poll_group; - - tree_node_init(&sock_state->tree_node); - queue_node_init(&sock_state->queue_node); - - if (port_register_socket_handle(port_state, sock_state, socket) < 0) - goto err2; - - return sock_state; - -err2: - sock__free(sock_state); -err1: - poll_group_release(poll_group); - - return NULL; -} - -static int sock__delete(port_state_t* port_state, - sock_state_t* sock_state, - bool force) { - if (!sock_state->delete_pending) { - if (sock_state->poll_status == SOCK__POLL_PENDING) - sock__cancel_poll(sock_state); - - port_cancel_socket_update(port_state, sock_state); - port_unregister_socket_handle(port_state, sock_state); - - sock_state->delete_pending = true; - } - - /* If the poll request still needs to complete, the sock_state object can't - * be free()d yet. `sock_feed_event()` or `port_close()` will take care - * of this later. */ - if (force || sock_state->poll_status == SOCK__POLL_IDLE) { - /* Free the sock_state now. */ - port_remove_deleted_socket(port_state, sock_state); - poll_group_release(sock_state->poll_group); - sock__free(sock_state); - } else { - /* Free the socket later. */ - port_add_deleted_socket(port_state, sock_state); - } - - return 0; -} - -void sock_delete(port_state_t* port_state, sock_state_t* sock_state) { - sock__delete(port_state, sock_state, false); -} - -void sock_force_delete(port_state_t* port_state, sock_state_t* sock_state) { - sock__delete(port_state, sock_state, true); -} - -int sock_set_event(port_state_t* port_state, - sock_state_t* sock_state, - const struct epoll_event* ev) { - /* EPOLLERR and EPOLLHUP are always reported, even when not requested by the - * caller. However they are disabled after a event has been reported for a - * socket for which the EPOLLONESHOT flag as set. */ - uint32_t events = ev->events | EPOLLERR | EPOLLHUP; - - sock_state->user_events = events; - sock_state->user_data = ev->data; - - if ((events & SOCK__KNOWN_EPOLL_EVENTS & ~sock_state->pending_events) != 0) - port_request_socket_update(port_state, sock_state); - - return 0; -} - -static inline DWORD sock__epoll_events_to_afd_events(uint32_t epoll_events) { - /* Always monitor for AFD_POLL_LOCAL_CLOSE, which is triggered when the - * socket is closed with closesocket() or CloseHandle(). */ - DWORD afd_events = AFD_POLL_LOCAL_CLOSE; - - if (epoll_events & (EPOLLIN | EPOLLRDNORM)) - afd_events |= AFD_POLL_RECEIVE | AFD_POLL_ACCEPT; - if (epoll_events & (EPOLLPRI | EPOLLRDBAND)) - afd_events |= AFD_POLL_RECEIVE_EXPEDITED; - if (epoll_events & (EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND)) - afd_events |= AFD_POLL_SEND; - if (epoll_events & (EPOLLIN | EPOLLRDNORM | EPOLLRDHUP)) - afd_events |= AFD_POLL_DISCONNECT; - if (epoll_events & EPOLLHUP) - afd_events |= AFD_POLL_ABORT; - if (epoll_events & EPOLLERR) - afd_events |= AFD_POLL_CONNECT_FAIL; - - return afd_events; -} - -static inline uint32_t sock__afd_events_to_epoll_events(DWORD afd_events) { - uint32_t epoll_events = 0; - - if (afd_events & (AFD_POLL_RECEIVE | AFD_POLL_ACCEPT)) - epoll_events |= EPOLLIN | EPOLLRDNORM; - if (afd_events & AFD_POLL_RECEIVE_EXPEDITED) - epoll_events |= EPOLLPRI | EPOLLRDBAND; - if (afd_events & AFD_POLL_SEND) - epoll_events |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; - if (afd_events & AFD_POLL_DISCONNECT) - epoll_events |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; - if (afd_events & AFD_POLL_ABORT) - epoll_events |= EPOLLHUP; - if (afd_events & AFD_POLL_CONNECT_FAIL) - /* Linux reports all these events after connect() has failed. */ - epoll_events |= - EPOLLIN | EPOLLOUT | EPOLLERR | EPOLLRDNORM | EPOLLWRNORM | EPOLLRDHUP; - - return epoll_events; -} - -int sock_update(port_state_t* port_state, sock_state_t* sock_state) { - assert(!sock_state->delete_pending); - - if ((sock_state->poll_status == SOCK__POLL_PENDING) && - (sock_state->user_events & SOCK__KNOWN_EPOLL_EVENTS & - ~sock_state->pending_events) == 0) { - /* All the events the user is interested in are already being monitored by - * the pending poll operation. It might spuriously complete because of an - * event that we're no longer interested in; when that happens we'll submit - * a new poll operation with the updated event mask. */ - - } else if (sock_state->poll_status == SOCK__POLL_PENDING) { - /* A poll operation is already pending, but it's not monitoring for all the - * events that the user is interested in. Therefore, cancel the pending - * poll operation; when we receive it's completion package, a new poll - * operation will be submitted with the correct event mask. */ - if (sock__cancel_poll(sock_state) < 0) - return -1; - - } else if (sock_state->poll_status == SOCK__POLL_CANCELLED) { - /* The poll operation has already been cancelled, we're still waiting for - * it to return. For now, there's nothing that needs to be done. */ - - } else if (sock_state->poll_status == SOCK__POLL_IDLE) { - /* No poll operation is pending; start one. */ - sock_state->poll_info.Exclusive = FALSE; - sock_state->poll_info.NumberOfHandles = 1; - sock_state->poll_info.Timeout.QuadPart = INT64_MAX; - sock_state->poll_info.Handles[0].Handle = (HANDLE) sock_state->base_socket; - sock_state->poll_info.Handles[0].Status = 0; - sock_state->poll_info.Handles[0].Events = - sock__epoll_events_to_afd_events(sock_state->user_events); - - memset(&sock_state->overlapped, 0, sizeof sock_state->overlapped); - - if (afd_poll(poll_group_get_afd_helper_handle(sock_state->poll_group), - &sock_state->poll_info, - &sock_state->overlapped) < 0) { - switch (GetLastError()) { - case ERROR_IO_PENDING: - /* Overlapped poll operation in progress; this is expected. */ - break; - case ERROR_INVALID_HANDLE: - /* Socket closed; it'll be dropped from the epoll set. */ - return sock__delete(port_state, sock_state, false); - default: - /* Other errors are propagated to the caller. */ - return_map_error(-1); - } - } - - /* The poll request was successfully submitted. */ - sock_state->poll_status = SOCK__POLL_PENDING; - sock_state->pending_events = sock_state->user_events; - - } else { - /* Unreachable. */ - assert(false); - } - - port_cancel_socket_update(port_state, sock_state); - return 0; -} - -int sock_feed_event(port_state_t* port_state, - OVERLAPPED* overlapped, - struct epoll_event* ev) { - sock_state_t* sock_state = - container_of(overlapped, sock_state_t, overlapped); - AFD_POLL_INFO* poll_info = &sock_state->poll_info; - uint32_t epoll_events = 0; - - sock_state->poll_status = SOCK__POLL_IDLE; - sock_state->pending_events = 0; - - if (sock_state->delete_pending) { - /* Socket has been deleted earlier and can now be freed. */ - return sock__delete(port_state, sock_state, false); - - } else if ((NTSTATUS) overlapped->Internal == STATUS_CANCELLED) { - /* The poll request was cancelled by CancelIoEx. */ - - } else if (!NT_SUCCESS(overlapped->Internal)) { - /* The overlapped request itself failed in an unexpected way. */ - epoll_events = EPOLLERR; - - } else if (poll_info->NumberOfHandles < 1) { - /* This poll operation succeeded but didn't report any socket events. */ - - } else if (poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) { - /* The poll operation reported that the socket was closed. */ - return sock__delete(port_state, sock_state, false); - - } else { - /* Events related to our socket were reported. */ - epoll_events = - sock__afd_events_to_epoll_events(poll_info->Handles[0].Events); - } - - /* Requeue the socket so a new poll request will be submitted. */ - port_request_socket_update(port_state, sock_state); - - /* Filter out events that the user didn't ask for. */ - epoll_events &= sock_state->user_events; - - /* Return if there are no epoll events to report. */ - if (epoll_events == 0) - return 0; - - /* If the the socket has the EPOLLONESHOT flag set, unmonitor all events, - * even EPOLLERR and EPOLLHUP. But always keep looking for closed sockets. */ - if (sock_state->user_events & EPOLLONESHOT) - sock_state->user_events = 0; - - ev->data = sock_state->user_data; - ev->events = epoll_events; - return 1; -} - -queue_node_t* sock_state_to_queue_node(sock_state_t* sock_state) { - return &sock_state->queue_node; -} - -sock_state_t* sock_state_from_tree_node(tree_node_t* tree_node) { - return container_of(tree_node, sock_state_t, tree_node); -} - -tree_node_t* sock_state_to_tree_node(sock_state_t* sock_state) { - return &sock_state->tree_node; -} - -sock_state_t* sock_state_from_queue_node(queue_node_t* queue_node) { - return container_of(queue_node, sock_state_t, queue_node); -} - -void ts_tree_init(ts_tree_t* ts_tree) { - tree_init(&ts_tree->tree); - InitializeSRWLock(&ts_tree->lock); -} - -void ts_tree_node_init(ts_tree_node_t* node) { - tree_node_init(&node->tree_node); - reflock_init(&node->reflock); -} - -int ts_tree_add(ts_tree_t* ts_tree, ts_tree_node_t* node, uintptr_t key) { - int r; - - AcquireSRWLockExclusive(&ts_tree->lock); - r = tree_add(&ts_tree->tree, &node->tree_node, key); - ReleaseSRWLockExclusive(&ts_tree->lock); - - return r; -} - -static inline ts_tree_node_t* ts_tree__find_node(ts_tree_t* ts_tree, - uintptr_t key) { - tree_node_t* tree_node = tree_find(&ts_tree->tree, key); - if (tree_node == NULL) - return NULL; - - return container_of(tree_node, ts_tree_node_t, tree_node); -} - -ts_tree_node_t* ts_tree_del_and_ref(ts_tree_t* ts_tree, uintptr_t key) { - ts_tree_node_t* ts_tree_node; - - AcquireSRWLockExclusive(&ts_tree->lock); - - ts_tree_node = ts_tree__find_node(ts_tree, key); - if (ts_tree_node != NULL) { - tree_del(&ts_tree->tree, &ts_tree_node->tree_node); - reflock_ref(&ts_tree_node->reflock); - } - - ReleaseSRWLockExclusive(&ts_tree->lock); - - return ts_tree_node; -} - -ts_tree_node_t* ts_tree_find_and_ref(ts_tree_t* ts_tree, uintptr_t key) { - ts_tree_node_t* ts_tree_node; - - AcquireSRWLockShared(&ts_tree->lock); - - ts_tree_node = ts_tree__find_node(ts_tree, key); - if (ts_tree_node != NULL) - reflock_ref(&ts_tree_node->reflock); - - ReleaseSRWLockShared(&ts_tree->lock); - - return ts_tree_node; -} - -void ts_tree_node_unref(ts_tree_node_t* node) { - reflock_unref(&node->reflock); -} - -void ts_tree_node_unref_and_destroy(ts_tree_node_t* node) { - reflock_unref_and_destroy(&node->reflock); -} - -void tree_init(tree_t* tree) { - memset(tree, 0, sizeof *tree); -} - -void tree_node_init(tree_node_t* node) { - memset(node, 0, sizeof *node); -} - -#define TREE__ROTATE(cis, trans) \ - tree_node_t* p = node; \ - tree_node_t* q = node->trans; \ - tree_node_t* parent = p->parent; \ - \ - if (parent) { \ - if (parent->left == p) \ - parent->left = q; \ - else \ - parent->right = q; \ - } else { \ - tree->root = q; \ - } \ - \ - q->parent = parent; \ - p->parent = q; \ - p->trans = q->cis; \ - if (p->trans) \ - p->trans->parent = p; \ - q->cis = p; - -static inline void tree__rotate_left(tree_t* tree, tree_node_t* node) { - TREE__ROTATE(left, right) -} - -static inline void tree__rotate_right(tree_t* tree, tree_node_t* node) { - TREE__ROTATE(right, left) -} - -#define TREE__INSERT_OR_DESCEND(side) \ - if (parent->side) { \ - parent = parent->side; \ - } else { \ - parent->side = node; \ - break; \ - } - -#define TREE__FIXUP_AFTER_INSERT(cis, trans) \ - tree_node_t* grandparent = parent->parent; \ - tree_node_t* uncle = grandparent->trans; \ - \ - if (uncle && uncle->red) { \ - parent->red = uncle->red = false; \ - grandparent->red = true; \ - node = grandparent; \ - } else { \ - if (node == parent->trans) { \ - tree__rotate_##cis(tree, parent); \ - node = parent; \ - parent = node->parent; \ - } \ - parent->red = false; \ - grandparent->red = true; \ - tree__rotate_##trans(tree, grandparent); \ - } - -int tree_add(tree_t* tree, tree_node_t* node, uintptr_t key) { - tree_node_t* parent; - - parent = tree->root; - if (parent) { - for (;;) { - if (key < parent->key) { - TREE__INSERT_OR_DESCEND(left) - } else if (key > parent->key) { - TREE__INSERT_OR_DESCEND(right) - } else { - return -1; - } - } - } else { - tree->root = node; - } - - node->key = key; - node->left = node->right = NULL; - node->parent = parent; - node->red = true; - - for (; parent && parent->red; parent = node->parent) { - if (parent == parent->parent->left) { - TREE__FIXUP_AFTER_INSERT(left, right) - } else { - TREE__FIXUP_AFTER_INSERT(right, left) - } - } - tree->root->red = false; - - return 0; -} - -#define TREE__FIXUP_AFTER_REMOVE(cis, trans) \ - tree_node_t* sibling = parent->trans; \ - \ - if (sibling->red) { \ - sibling->red = false; \ - parent->red = true; \ - tree__rotate_##cis(tree, parent); \ - sibling = parent->trans; \ - } \ - if ((sibling->left && sibling->left->red) || \ - (sibling->right && sibling->right->red)) { \ - if (!sibling->trans || !sibling->trans->red) { \ - sibling->cis->red = false; \ - sibling->red = true; \ - tree__rotate_##trans(tree, sibling); \ - sibling = parent->trans; \ - } \ - sibling->red = parent->red; \ - parent->red = sibling->trans->red = false; \ - tree__rotate_##cis(tree, parent); \ - node = tree->root; \ - break; \ - } \ - sibling->red = true; - -void tree_del(tree_t* tree, tree_node_t* node) { - tree_node_t* parent = node->parent; - tree_node_t* left = node->left; - tree_node_t* right = node->right; - tree_node_t* next; - bool red; - - if (!left) { - next = right; - } else if (!right) { - next = left; - } else { - next = right; - while (next->left) - next = next->left; - } - - if (parent) { - if (parent->left == node) - parent->left = next; - else - parent->right = next; - } else { - tree->root = next; - } - - if (left && right) { - red = next->red; - next->red = node->red; - next->left = left; - left->parent = next; - if (next != right) { - parent = next->parent; - next->parent = node->parent; - node = next->right; - parent->left = node; - next->right = right; - right->parent = next; - } else { - next->parent = parent; - parent = next; - node = next->right; - } - } else { - red = node->red; - node = next; - } - - if (node) - node->parent = parent; - if (red) - return; - if (node && node->red) { - node->red = false; - return; - } - - do { - if (node == tree->root) - break; - if (node == parent->left) { - TREE__FIXUP_AFTER_REMOVE(left, right) - } else { - TREE__FIXUP_AFTER_REMOVE(right, left) - } - node = parent; - parent = parent->parent; - } while (!node->red); - - if (node) - node->red = false; -} - -tree_node_t* tree_find(const tree_t* tree, uintptr_t key) { - tree_node_t* node = tree->root; - while (node) { - if (key < node->key) - node = node->left; - else if (key > node->key) - node = node->right; - else - return node; - } - return NULL; -} - -tree_node_t* tree_root(const tree_t* tree) { - return tree->root; -} - -#ifndef SIO_BASE_HANDLE -#define SIO_BASE_HANDLE 0x48000022 -#endif - -int ws_global_init(void) { - int r; - WSADATA wsa_data; - - r = WSAStartup(MAKEWORD(2, 2), &wsa_data); - if (r != 0) - return_set_error(-1, (DWORD) r); - - return 0; -} - -SOCKET ws_get_base_socket(SOCKET socket) { - SOCKET base_socket; - DWORD bytes; - - if (WSAIoctl(socket, - SIO_BASE_HANDLE, - NULL, - 0, - &base_socket, - sizeof base_socket, - &bytes, - NULL, - NULL) == SOCKET_ERROR) - return_map_error(INVALID_SOCKET); - - return base_socket; -} diff --git a/sources/wepoll/wepoll.h b/sources/wepoll/wepoll.h deleted file mode 100644 index eebde2111fe1afaa3c75b8d19ada8b9ba5345c06..0000000000000000000000000000000000000000 --- a/sources/wepoll/wepoll.h +++ /dev/null @@ -1,113 +0,0 @@ -/* - * wepoll - epoll for Windows - * https://github.com/piscisaureus/wepoll - * - * Copyright 2012-2019, Bert Belder <bertbelder@gmail.com> - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef WEPOLL_H_ -#define WEPOLL_H_ - -#ifndef WEPOLL_EXPORT -#define WEPOLL_EXPORT -#endif - -#include <stdint.h> - -enum EPOLL_EVENTS { - EPOLLIN = (int) (1U << 0), - EPOLLPRI = (int) (1U << 1), - EPOLLOUT = (int) (1U << 2), - EPOLLERR = (int) (1U << 3), - EPOLLHUP = (int) (1U << 4), - EPOLLRDNORM = (int) (1U << 6), - EPOLLRDBAND = (int) (1U << 7), - EPOLLWRNORM = (int) (1U << 8), - EPOLLWRBAND = (int) (1U << 9), - EPOLLMSG = (int) (1U << 10), /* Never reported. */ - EPOLLRDHUP = (int) (1U << 13), - EPOLLONESHOT = (int) (1U << 31) -}; - -#define EPOLLIN (1U << 0) -#define EPOLLPRI (1U << 1) -#define EPOLLOUT (1U << 2) -#define EPOLLERR (1U << 3) -#define EPOLLHUP (1U << 4) -#define EPOLLRDNORM (1U << 6) -#define EPOLLRDBAND (1U << 7) -#define EPOLLWRNORM (1U << 8) -#define EPOLLWRBAND (1U << 9) -#define EPOLLMSG (1U << 10) -#define EPOLLRDHUP (1U << 13) -#define EPOLLONESHOT (1U << 31) - -#define EPOLL_CTL_ADD 1 -#define EPOLL_CTL_MOD 2 -#define EPOLL_CTL_DEL 3 - -typedef void* HANDLE; -typedef uintptr_t SOCKET; - -typedef union epoll_data { - void* ptr; - int fd; - uint32_t u32; - uint64_t u64; - SOCKET sock; /* Windows specific */ - HANDLE hnd; /* Windows specific */ -} epoll_data_t; - -struct epoll_event { - uint32_t events; /* Epoll events and flags */ - epoll_data_t data; /* User data variable */ -}; - -#ifdef __cplusplus -extern "C" { -#endif - -WEPOLL_EXPORT HANDLE epoll_create(int size); -WEPOLL_EXPORT HANDLE epoll_create1(int flags); - -WEPOLL_EXPORT int epoll_close(HANDLE ephnd); - -WEPOLL_EXPORT int epoll_ctl(HANDLE ephnd, - int op, - SOCKET sock, - struct epoll_event* event); - -WEPOLL_EXPORT int epoll_wait(HANDLE ephnd, - struct epoll_event* events, - int maxevents, - int timeout); - -#ifdef __cplusplus -} /* extern "C" */ -#endif - -#endif /* WEPOLL_H_ */