From 02badb3c4c7658056e2529b8d7169f8f763b05ad Mon Sep 17 00:00:00 2001 From: bol-van Date: Tue, 27 May 2025 16:03:28 +0300 Subject: [PATCH] nfqws,tpws: move --ipset to kavl --- docs/changes.txt | 4 + nfq/helpers.c | 10 ++ nfq/helpers.h | 2 + nfq/kavl.h | 400 +++++++++++++++++++++++++++++++++++++++++++++++ nfq/pools.c | 217 +++++++++++++------------ nfq/pools.h | 43 ++--- tpws/helpers.c | 11 ++ tpws/helpers.h | 2 + tpws/kavl.h | 400 +++++++++++++++++++++++++++++++++++++++++++++++ tpws/pools.c | 219 ++++++++++++++------------ tpws/pools.h | 43 ++--- 11 files changed, 1118 insertions(+), 233 deletions(-) create mode 100644 nfq/kavl.h create mode 100644 tpws/kavl.h diff --git a/docs/changes.txt b/docs/changes.txt index 299f5b24..446adb41 100644 --- a/docs/changes.txt +++ b/docs/changes.txt @@ -502,3 +502,7 @@ init.d, blockcheck: drop time exceeded icmp for nfqws-related connections blockcheck: some dup and orig-ttl mods blockcheck: PKTWS_EXTRA_PRE blockcheck: report test function and domain every test + +v71.1 + +nfqws,tpws: much faster ipset implementation. move from hash to avl tree diff --git a/nfq/helpers.c b/nfq/helpers.c index b0670b8c..aec2e9a3 100644 --- a/nfq/helpers.c +++ b/nfq/helpers.c @@ -113,6 +113,16 @@ bool append_to_list_file(const char *filename, const char *s) return bOK; } +void expand_bits(void *target, const void *source, unsigned int source_bitlen, unsigned int target_bytelen) +{ + unsigned int target_bitlen = target_bytelen<<3; + unsigned int bitlen = target_bitlen>3; + + if ((target_bytelen-bytelen)>=1) memset(target+bytelen,0,target_bytelen-bytelen); + memcpy(target,source,bytelen); + if ((bitlen &= 7)) ((uint8_t*)target)[bytelen] = ((uint8_t*)source)[bytelen] & (~((1 << (8-bitlen)) - 1)); +} void ntop46(const struct sockaddr *sa, char *str, size_t len) { diff --git a/nfq/helpers.h b/nfq/helpers.h index 9f4aa2dd..833fe7d5 100644 --- a/nfq/helpers.h +++ b/nfq/helpers.h @@ -31,6 +31,8 @@ bool load_file_nonempty(const char *filename,void *buffer,size_t *buffer_size); bool save_file(const char *filename, const void *buffer, size_t buffer_size); bool append_to_list_file(const char *filename, const char *s); +void expand_bits(void *target, const void *source, unsigned int source_bitlen, unsigned int target_bytelen); + void print_sockaddr(const struct sockaddr *sa); void ntop46(const struct sockaddr *sa, char *str, size_t len); void ntop46_port(const struct sockaddr *sa, char *str, size_t len); diff --git a/nfq/kavl.h b/nfq/kavl.h new file mode 100644 index 00000000..f3520fd7 --- /dev/null +++ b/nfq/kavl.h @@ -0,0 +1,400 @@ +/* The MIT License + + Copyright (c) 2018 by Attractive Chaos + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. +*/ + +/* An example: + +#include +#include +#include +#include "kavl.h" + +struct my_node { + char key; + KAVL_HEAD(struct my_node) head; +}; +#define my_cmp(p, q) (((q)->key < (p)->key) - ((p)->key < (q)->key)) +KAVL_INIT(my, struct my_node, head, my_cmp) + +int main(void) { + const char *str = "MNOLKQOPHIA"; // from wiki, except a duplicate + struct my_node *root = 0; + int i, l = strlen(str); + for (i = 0; i < l; ++i) { // insert in the input order + struct my_node *q, *p = malloc(sizeof(*p)); + p->key = str[i]; + q = kavl_insert(my, &root, p, 0); + if (p != q) free(p); // if already present, free + } + kavl_itr_t(my) itr; + kavl_itr_first(my, root, &itr); // place at first + do { // traverse + const struct my_node *p = kavl_at(&itr); + putchar(p->key); + free((void*)p); // free node + } while (kavl_itr_next(my, &itr)); + putchar('\n'); + return 0; +} +*/ + +#ifndef KAVL_H +#define KAVL_H + +#ifdef __STRICT_ANSI__ +#define inline __inline__ +#endif + +#define KAVL_MAX_DEPTH 64 + +#define kavl_size(head, p) ((p)? (p)->head.size : 0) +#define kavl_size_child(head, q, i) ((q)->head.p[(i)]? (q)->head.p[(i)]->head.size : 0) + +#define KAVL_HEAD(__type) \ + struct { \ + __type *p[2]; \ + signed char balance; /* balance factor */ \ + unsigned size; /* #elements in subtree */ \ + } + +#define __KAVL_FIND(suf, __scope, __type, __head, __cmp) \ + __scope __type *kavl_find_##suf(const __type *root, const __type *x, unsigned *cnt_) { \ + const __type *p = root; \ + unsigned cnt = 0; \ + while (p != 0) { \ + int cmp; \ + cmp = __cmp(x, p); \ + if (cmp >= 0) cnt += kavl_size_child(__head, p, 0) + 1; \ + if (cmp < 0) p = p->__head.p[0]; \ + else if (cmp > 0) p = p->__head.p[1]; \ + else break; \ + } \ + if (cnt_) *cnt_ = cnt; \ + return (__type*)p; \ + } + +#define __KAVL_ROTATE(suf, __type, __head) \ + /* one rotation: (a,(b,c)q)p => ((a,b)p,c)q */ \ + static inline __type *kavl_rotate1_##suf(__type *p, int dir) { /* dir=0 to left; dir=1 to right */ \ + int opp = 1 - dir; /* opposite direction */ \ + __type *q = p->__head.p[opp]; \ + unsigned size_p = p->__head.size; \ + p->__head.size -= q->__head.size - kavl_size_child(__head, q, dir); \ + q->__head.size = size_p; \ + p->__head.p[opp] = q->__head.p[dir]; \ + q->__head.p[dir] = p; \ + return q; \ + } \ + /* two consecutive rotations: (a,((b,c)r,d)q)p => ((a,b)p,(c,d)q)r */ \ + static inline __type *kavl_rotate2_##suf(__type *p, int dir) { \ + int b1, opp = 1 - dir; \ + __type *q = p->__head.p[opp], *r = q->__head.p[dir]; \ + unsigned size_x_dir = kavl_size_child(__head, r, dir); \ + r->__head.size = p->__head.size; \ + p->__head.size -= q->__head.size - size_x_dir; \ + q->__head.size -= size_x_dir + 1; \ + p->__head.p[opp] = r->__head.p[dir]; \ + r->__head.p[dir] = p; \ + q->__head.p[dir] = r->__head.p[opp]; \ + r->__head.p[opp] = q; \ + b1 = dir == 0? +1 : -1; \ + if (r->__head.balance == b1) q->__head.balance = 0, p->__head.balance = -b1; \ + else if (r->__head.balance == 0) q->__head.balance = p->__head.balance = 0; \ + else q->__head.balance = b1, p->__head.balance = 0; \ + r->__head.balance = 0; \ + return r; \ + } + +#define __KAVL_INSERT(suf, __scope, __type, __head, __cmp) \ + __scope __type *kavl_insert_##suf(__type **root_, __type *x, unsigned *cnt_) { \ + unsigned char stack[KAVL_MAX_DEPTH]; \ + __type *path[KAVL_MAX_DEPTH]; \ + __type *bp, *bq; \ + __type *p, *q, *r = 0; /* _r_ is potentially the new root */ \ + int i, which = 0, top, b1, path_len; \ + unsigned cnt = 0; \ + bp = *root_, bq = 0; \ + /* find the insertion location */ \ + for (p = bp, q = bq, top = path_len = 0; p; q = p, p = p->__head.p[which]) { \ + int cmp; \ + cmp = __cmp(x, p); \ + if (cmp >= 0) cnt += kavl_size_child(__head, p, 0) + 1; \ + if (cmp == 0) { \ + if (cnt_) *cnt_ = cnt; \ + return p; \ + } \ + if (p->__head.balance != 0) \ + bq = q, bp = p, top = 0; \ + stack[top++] = which = (cmp > 0); \ + path[path_len++] = p; \ + } \ + if (cnt_) *cnt_ = cnt; \ + x->__head.balance = 0, x->__head.size = 1, x->__head.p[0] = x->__head.p[1] = 0; \ + if (q == 0) *root_ = x; \ + else q->__head.p[which] = x; \ + if (bp == 0) return x; \ + for (i = 0; i < path_len; ++i) ++path[i]->__head.size; \ + for (p = bp, top = 0; p != x; p = p->__head.p[stack[top]], ++top) /* update balance factors */ \ + if (stack[top] == 0) --p->__head.balance; \ + else ++p->__head.balance; \ + if (bp->__head.balance > -2 && bp->__head.balance < 2) return x; /* no re-balance needed */ \ + /* re-balance */ \ + which = (bp->__head.balance < 0); \ + b1 = which == 0? +1 : -1; \ + q = bp->__head.p[1 - which]; \ + if (q->__head.balance == b1) { \ + r = kavl_rotate1_##suf(bp, which); \ + q->__head.balance = bp->__head.balance = 0; \ + } else r = kavl_rotate2_##suf(bp, which); \ + if (bq == 0) *root_ = r; \ + else bq->__head.p[bp != bq->__head.p[0]] = r; \ + return x; \ + } + +#define __KAVL_ERASE(suf, __scope, __type, __head, __cmp) \ + __scope __type *kavl_erase_##suf(__type **root_, const __type *x, unsigned *cnt_) { \ + __type *p, *path[KAVL_MAX_DEPTH], fake; \ + unsigned char dir[KAVL_MAX_DEPTH]; \ + int i, d = 0, cmp; \ + unsigned cnt = 0; \ + fake.__head.p[0] = *root_, fake.__head.p[1] = 0; \ + if (cnt_) *cnt_ = 0; \ + if (x) { \ + for (cmp = -1, p = &fake; cmp; cmp = __cmp(x, p)) { \ + int which = (cmp > 0); \ + if (cmp > 0) cnt += kavl_size_child(__head, p, 0) + 1; \ + dir[d] = which; \ + path[d++] = p; \ + p = p->__head.p[which]; \ + if (p == 0) { \ + if (cnt_) *cnt_ = 0; \ + return 0; \ + } \ + } \ + cnt += kavl_size_child(__head, p, 0) + 1; /* because p==x is not counted */ \ + } else { \ + for (p = &fake, cnt = 1; p; p = p->__head.p[0]) \ + dir[d] = 0, path[d++] = p; \ + p = path[--d]; \ + } \ + if (cnt_) *cnt_ = cnt; \ + for (i = 1; i < d; ++i) --path[i]->__head.size; \ + if (p->__head.p[1] == 0) { /* ((1,.)2,3)4 => (1,3)4; p=2 */ \ + path[d-1]->__head.p[dir[d-1]] = p->__head.p[0]; \ + } else { \ + __type *q = p->__head.p[1]; \ + if (q->__head.p[0] == 0) { /* ((1,2)3,4)5 => ((1)2,4)5; p=3 */ \ + q->__head.p[0] = p->__head.p[0]; \ + q->__head.balance = p->__head.balance; \ + path[d-1]->__head.p[dir[d-1]] = q; \ + path[d] = q, dir[d++] = 1; \ + q->__head.size = p->__head.size - 1; \ + } else { /* ((1,((.,2)3,4)5)6,7)8 => ((1,(2,4)5)3,7)8; p=6 */ \ + __type *r; \ + int e = d++; /* backup _d_ */\ + for (;;) { \ + dir[d] = 0; \ + path[d++] = q; \ + r = q->__head.p[0]; \ + if (r->__head.p[0] == 0) break; \ + q = r; \ + } \ + r->__head.p[0] = p->__head.p[0]; \ + q->__head.p[0] = r->__head.p[1]; \ + r->__head.p[1] = p->__head.p[1]; \ + r->__head.balance = p->__head.balance; \ + path[e-1]->__head.p[dir[e-1]] = r; \ + path[e] = r, dir[e] = 1; \ + for (i = e + 1; i < d; ++i) --path[i]->__head.size; \ + r->__head.size = p->__head.size - 1; \ + } \ + } \ + while (--d > 0) { \ + __type *q = path[d]; \ + int which, other, b1 = 1, b2 = 2; \ + which = dir[d], other = 1 - which; \ + if (which) b1 = -b1, b2 = -b2; \ + q->__head.balance += b1; \ + if (q->__head.balance == b1) break; \ + else if (q->__head.balance == b2) { \ + __type *r = q->__head.p[other]; \ + if (r->__head.balance == -b1) { \ + path[d-1]->__head.p[dir[d-1]] = kavl_rotate2_##suf(q, which); \ + } else { \ + path[d-1]->__head.p[dir[d-1]] = kavl_rotate1_##suf(q, which); \ + if (r->__head.balance == 0) { \ + r->__head.balance = -b1; \ + q->__head.balance = b1; \ + break; \ + } else r->__head.balance = q->__head.balance = 0; \ + } \ + } \ + } \ + *root_ = fake.__head.p[0]; \ + return p; \ + } + +#define kavl_free(__type, __head, __root, __free) do { \ + __type *_p, *_q; \ + for (_p = __root; _p; _p = _q) { \ + if (_p->__head.p[0] == 0) { \ + _q = _p->__head.p[1]; \ + __free(_p); \ + } else { \ + _q = _p->__head.p[0]; \ + _p->__head.p[0] = _q->__head.p[1]; \ + _q->__head.p[1] = _p; \ + } \ + } \ + } while (0) + +#define __KAVL_ITR(suf, __scope, __type, __head, __cmp) \ + struct kavl_itr_##suf { \ + const __type *stack[KAVL_MAX_DEPTH], **top, *right; /* _right_ points to the right child of *top */ \ + }; \ + __scope void kavl_itr_first_##suf(const __type *root, struct kavl_itr_##suf *itr) { \ + const __type *p; \ + for (itr->top = itr->stack - 1, p = root; p; p = p->__head.p[0]) \ + *++itr->top = p; \ + itr->right = (*itr->top)->__head.p[1]; \ + } \ + __scope int kavl_itr_find_##suf(const __type *root, const __type *x, struct kavl_itr_##suf *itr) { \ + const __type *p = root; \ + itr->top = itr->stack - 1; \ + while (p != 0) { \ + int cmp; \ + cmp = __cmp(x, p); \ + if (cmp < 0) *++itr->top = p, p = p->__head.p[0]; \ + else if (cmp > 0) p = p->__head.p[1]; \ + else break; \ + } \ + if (p) { \ + *++itr->top = p; \ + itr->right = p->__head.p[1]; \ + return 1; \ + } else if (itr->top >= itr->stack) { \ + itr->right = (*itr->top)->__head.p[1]; \ + return 0; \ + } else return 0; \ + } \ + __scope int kavl_itr_next_##suf(struct kavl_itr_##suf *itr) { \ + for (;;) { \ + const __type *p; \ + for (p = itr->right, --itr->top; p; p = p->__head.p[0]) \ + *++itr->top = p; \ + if (itr->top < itr->stack) return 0; \ + itr->right = (*itr->top)->__head.p[1]; \ + return 1; \ + } \ + } + +/** + * Insert a node to the tree + * + * @param suf name suffix used in KAVL_INIT() + * @param proot pointer to the root of the tree (in/out: root may change) + * @param x node to insert (in) + * @param cnt number of nodes smaller than or equal to _x_; can be NULL (out) + * + * @return _x_ if not present in the tree, or the node equal to x. + */ +#define kavl_insert(suf, proot, x, cnt) kavl_insert_##suf(proot, x, cnt) + +/** + * Find a node in the tree + * + * @param suf name suffix used in KAVL_INIT() + * @param root root of the tree + * @param x node value to find (in) + * @param cnt number of nodes smaller than or equal to _x_; can be NULL (out) + * + * @return node equal to _x_ if present, or NULL if absent + */ +#define kavl_find(suf, root, x, cnt) kavl_find_##suf(root, x, cnt) + +/** + * Delete a node from the tree + * + * @param suf name suffix used in KAVL_INIT() + * @param proot pointer to the root of the tree (in/out: root may change) + * @param x node value to delete; if NULL, delete the first node (in) + * + * @return node removed from the tree if present, or NULL if absent + */ +#define kavl_erase(suf, proot, x, cnt) kavl_erase_##suf(proot, x, cnt) +#define kavl_erase_first(suf, proot) kavl_erase_##suf(proot, 0, 0) + +#define kavl_itr_t(suf) struct kavl_itr_##suf + +/** + * Place the iterator at the smallest object + * + * @param suf name suffix used in KAVL_INIT() + * @param root root of the tree + * @param itr iterator + */ +#define kavl_itr_first(suf, root, itr) kavl_itr_first_##suf(root, itr) + +/** + * Place the iterator at the object equal to or greater than the query + * + * @param suf name suffix used in KAVL_INIT() + * @param root root of the tree + * @param x query (in) + * @param itr iterator (out) + * + * @return 1 if find; 0 otherwise. kavl_at(itr) is NULL if and only if query is + * larger than all objects in the tree + */ +#define kavl_itr_find(suf, root, x, itr) kavl_itr_find_##suf(root, x, itr) + +/** + * Move to the next object in order + * + * @param itr iterator (modified) + * + * @return 1 if there is a next object; 0 otherwise + */ +#define kavl_itr_next(suf, itr) kavl_itr_next_##suf(itr) + +/** + * Return the pointer at the iterator + * + * @param itr iterator + * + * @return pointer if present; NULL otherwise + */ +#define kavl_at(itr) ((itr)->top < (itr)->stack? 0 : *(itr)->top) + +#define KAVL_INIT2(suf, __scope, __type, __head, __cmp) \ + __KAVL_FIND(suf, __scope, __type, __head, __cmp) \ + __KAVL_ROTATE(suf, __type, __head) \ + __KAVL_INSERT(suf, __scope, __type, __head, __cmp) \ + __KAVL_ERASE(suf, __scope, __type, __head, __cmp) \ + __KAVL_ITR(suf, __scope, __type, __head, __cmp) + +#define KAVL_INIT(suf, __type, __head, __cmp) \ + KAVL_INIT2(suf,, __type, __head, __cmp) + +#endif diff --git a/nfq/pools.c b/nfq/pools.c index ed104074..2ec4cee9 100644 --- a/nfq/pools.c +++ b/nfq/pools.c @@ -260,121 +260,146 @@ bool hostlist_collection_is_empty(const struct hostlist_collection_head *head) } -void ipset4Destroy(ipset4 **ipset) +static int kavl_bit_cmp(const struct kavl_bit_elem *p, const struct kavl_bit_elem *q) { - ipset4 *elem, *tmp; - HASH_ITER(hh, *ipset, elem, tmp) + unsigned int bitlen = q->bitlen < p->bitlen ? q->bitlen : p->bitlen; + unsigned int df = bitlen & 7, bytes = bitlen >> 3; + int cmp = memcmp(p->data, q->data, bytes); + + if (cmp || !df) return cmp; + + uint8_t c1 = p->data[bytes] >> (8 - df); + uint8_t c2 = q->data[bytes] >> (8 - df); + return c1data); + free(e); } } -bool ipset4Check(ipset4 *ipset, const struct in_addr *a, uint8_t preflen) +void kavl_bit_delete(struct kavl_bit_elem **hdr, const void *data, unsigned int bitlen) { - uint32_t ip = ntohl(a->s_addr); - struct cidr4 cidr; - ipset4 *ips_found; - - // zero alignment bytes - memset(&cidr,0,sizeof(cidr)); - cidr.preflen = preflen+1; - do - { - cidr.preflen--; - cidr.addr.s_addr = htonl(ip & mask_from_preflen(cidr.preflen)); - HASH_FIND(hh, ipset, &cidr, sizeof(cidr), ips_found); - if (ips_found) return true; - } while(cidr.preflen); - - return false; + struct kavl_bit_elem temp = { + .bitlen = bitlen, .data = (uint8_t*)data + }; + kavl_bit_destroy_elem(kavl_erase(kavl_bit, hdr, &temp, 0)); } -bool ipset4Add(ipset4 **ipset, const struct in_addr *a, uint8_t preflen) +void kavl_bit_destroy(struct kavl_bit_elem **hdr) +{ + while (*hdr) + { + struct kavl_bit_elem *e = kavl_erase_first(kavl_bit, hdr); + if (!e) break; + kavl_bit_destroy_elem(e); + } + free(*hdr); +} +struct kavl_bit_elem *kavl_bit_add(struct kavl_bit_elem **hdr, void *data, unsigned int bitlen, size_t struct_size) +{ + if (!struct_size) struct_size=sizeof(struct kavl_bit_elem); + + struct kavl_bit_elem *v, *e = calloc(1, struct_size); + if (!e) return 0; + + e->bitlen = bitlen; + e->data = data; + + v = kavl_insert(kavl_bit, hdr, e, 0); + while (e != v && e->bitlen < v->bitlen) + { + kavl_bit_delete(hdr, v->data, v->bitlen); + v = kavl_insert(kavl_bit, hdr, e, 0); + } + if (e != v) kavl_bit_destroy_elem(e); + return v; +} +struct kavl_bit_elem *kavl_bit_get(const struct kavl_bit_elem *hdr, const void *data, unsigned int bitlen) +{ + struct kavl_bit_elem temp = { + .bitlen = bitlen, .data = (uint8_t*)data + }; + return kavl_find(kavl_bit, hdr, &temp, 0); +} + +static bool ipset_kavl_add(struct kavl_bit_elem **ipset, const void *a, uint8_t preflen) +{ + uint8_t bytelen = (preflen+7)>>3; + uint8_t *abuf = malloc(bytelen); + if (!abuf) return false; + memcpy(abuf,a,bytelen); + if (!kavl_bit_add(ipset,abuf,preflen,0)) + { + free(abuf); + return false; + } + return true; +} + + +bool ipset4Check(const struct kavl_bit_elem *ipset, const struct in_addr *a, uint8_t preflen) +{ + return !!kavl_bit_get(ipset,a,preflen); +} +bool ipset4Add(struct kavl_bit_elem **ipset, const struct in_addr *a, uint8_t preflen) { if (preflen>32) return false; - - // avoid dups - if (ipset4Check(*ipset, a, preflen)) return true; // already included - - struct ipset4 *entry = calloc(1,sizeof(ipset4)); - if (!entry) return false; - - entry->cidr.addr.s_addr = htonl(ntohl(a->s_addr) & mask_from_preflen(preflen)); - entry->cidr.preflen = preflen; - oom = false; - HASH_ADD(hh, *ipset, cidr, sizeof(entry->cidr), entry); - if (oom) { free(entry); return false; } - - return true; + return ipset_kavl_add(ipset,a,preflen); } -void ipset4Print(ipset4 *ipset) +void ipset4Print(struct kavl_bit_elem *ipset) { - ipset4 *ips, *tmp; - HASH_ITER(hh, ipset , ips, tmp) - { - print_cidr4(&ips->cidr); - printf("\n"); - } -} + if (!ipset) return; -void ipset6Destroy(ipset6 **ipset) -{ - ipset6 *elem, *tmp; - HASH_ITER(hh, *ipset, elem, tmp) - { - HASH_DEL(*ipset, elem); - free(elem); - } -} -bool ipset6Check(ipset6 *ipset, const struct in6_addr *a, uint8_t preflen) -{ - struct cidr6 cidr; - ipset6 *ips_found; - - // zero alignment bytes - memset(&cidr,0,sizeof(cidr)); - cidr.preflen = preflen+1; + struct cidr4 c; + const struct kavl_bit_elem *elem; + kavl_itr_t(kavl_bit) itr; + kavl_itr_first(kavl_bit, ipset, &itr); do { - cidr.preflen--; - ip6_and(a, mask_from_preflen6(cidr.preflen), &cidr.addr); - HASH_FIND(hh, ipset, &cidr, sizeof(cidr), ips_found); - if (ips_found) return true; - } while(cidr.preflen); - - return false; -} -bool ipset6Add(ipset6 **ipset, const struct in6_addr *a, uint8_t preflen) -{ - if (preflen>128) return false; - - // avoid dups - if (ipset6Check(*ipset, a, preflen)) return true; // already included - - struct ipset6 *entry = calloc(1,sizeof(ipset6)); - if (!entry) return false; - - ip6_and(a, mask_from_preflen6(preflen), &entry->cidr.addr); - entry->cidr.preflen = preflen; - oom = false; - HASH_ADD(hh, *ipset, cidr, sizeof(entry->cidr), entry); - if (oom) { free(entry); return false; } - - return true; -} -void ipset6Print(ipset6 *ipset) -{ - ipset6 *ips, *tmp; - HASH_ITER(hh, ipset , ips, tmp) - { - print_cidr6(&ips->cidr); + elem = kavl_at(&itr); + c.preflen = elem->bitlen; + expand_bits(&c.addr, elem->data, elem->bitlen, sizeof(c.addr)); + print_cidr4(&c); printf("\n"); } + while (kavl_itr_next(kavl_bit, &itr)); +} + +bool ipset6Check(const struct kavl_bit_elem *ipset, const struct in6_addr *a, uint8_t preflen) +{ + return !!kavl_bit_get(ipset,a,preflen); +} +bool ipset6Add(struct kavl_bit_elem **ipset, const struct in6_addr *a, uint8_t preflen) +{ + if (preflen>128) return false; + return ipset_kavl_add(ipset,a,preflen); +} +void ipset6Print(struct kavl_bit_elem *ipset) +{ + if (!ipset) return; + + struct cidr6 c; + const struct kavl_bit_elem *elem; + kavl_itr_t(kavl_bit) itr; + kavl_itr_first(kavl_bit, ipset, &itr); + do + { + elem = kavl_at(&itr); + c.preflen = elem->bitlen; + expand_bits(&c.addr, elem->data, elem->bitlen, sizeof(c.addr)); + print_cidr6(&c); + printf("\n"); + } + while (kavl_itr_next(kavl_bit, &itr)); } void ipsetDestroy(ipset *ipset) { - ipset4Destroy(&ipset->ips4); - ipset6Destroy(&ipset->ips6); + kavl_bit_destroy(&ipset->ips4); + kavl_bit_destroy(&ipset->ips6); } void ipsetPrint(ipset *ipset) { diff --git a/nfq/pools.h b/nfq/pools.h index f038c5e2..5adaf7f6 100644 --- a/nfq/pools.h +++ b/nfq/pools.h @@ -13,6 +13,8 @@ #define HASH_FUNCTION HASH_BER #include "uthash.h" +#include "kavl.h" + #define HOSTLIST_POOL_FLAG_STRICT_MATCH 1 typedef struct hostlist_pool { @@ -76,39 +78,40 @@ struct hostlist_item *hostlist_collection_search(struct hostlist_collection_head bool hostlist_collection_is_empty(const struct hostlist_collection_head *head); -typedef struct ipset4 { - struct cidr4 cidr; /* key */ - UT_hash_handle hh; /* makes this structure hashable */ -} ipset4; -typedef struct ipset6 { - struct cidr6 cidr; /* key */ - UT_hash_handle hh; /* makes this structure hashable */ -} ipset6; +struct kavl_bit_elem +{ + unsigned int bitlen; + uint8_t *data; + KAVL_HEAD(struct kavl_bit_elem) head; +}; + +struct kavl_bit_elem *kavl_bit_get(const struct kavl_bit_elem *hdr, const void *data, unsigned int bitlen); +struct kavl_bit_elem *kavl_bit_add(struct kavl_bit_elem **hdr, void *data, unsigned int bitlen, size_t struct_size); +void kavl_bit_delete(struct kavl_bit_elem **hdr, const void *data, unsigned int bitlen); +void kavl_bit_destroy(struct kavl_bit_elem **hdr); + // combined ipset ipv4 and ipv6 typedef struct ipset { - ipset4 *ips4; - ipset6 *ips6; + struct kavl_bit_elem *ips4,*ips6; } ipset; #define IPSET_EMPTY(ips) (!(ips)->ips4 && !(ips)->ips6) -void ipset4Destroy(ipset4 **ipset); -bool ipset4Add(ipset4 **ipset, const struct in_addr *a, uint8_t preflen); -static inline bool ipset4AddCidr(ipset4 **ipset, const struct cidr4 *cidr) +bool ipset4Add(struct kavl_bit_elem **ipset, const struct in_addr *a, uint8_t preflen); +static inline bool ipset4AddCidr(struct kavl_bit_elem **ipset, const struct cidr4 *cidr) { return ipset4Add(ipset,&cidr->addr,cidr->preflen); } -bool ipset4Check(ipset4 *ipset, const struct in_addr *a, uint8_t preflen); -void ipset4Print(ipset4 *ipset); +bool ipset4Check(const struct kavl_bit_elem *ipset, const struct in_addr *a, uint8_t preflen); +void ipset4Print(struct kavl_bit_elem *ipset); -void ipset6Destroy(ipset6 **ipset); -bool ipset6Add(ipset6 **ipset, const struct in6_addr *a, uint8_t preflen); -static inline bool ipset6AddCidr(ipset6 **ipset, const struct cidr6 *cidr) +bool ipset6Add(struct kavl_bit_elem **ipset, const struct in6_addr *a, uint8_t preflen); +static inline bool ipset6AddCidr(struct kavl_bit_elem **ipset, const struct cidr6 *cidr) { return ipset6Add(ipset,&cidr->addr,cidr->preflen); } -bool ipset6Check(ipset6 *ipset, const struct in6_addr *a, uint8_t preflen); -void ipset6Print(ipset6 *ipset); +bool ipset6Check(const struct kavl_bit_elem *ipset, const struct in6_addr *a, uint8_t preflen); +void ipset6Print(struct kavl_bit_elem *ipset); void ipsetDestroy(ipset *ipset); void ipsetPrint(ipset *ipset); diff --git a/tpws/helpers.c b/tpws/helpers.c index 23b5273d..60328411 100644 --- a/tpws/helpers.c +++ b/tpws/helpers.c @@ -112,6 +112,17 @@ bool append_to_list_file(const char *filename, const char *s) return bOK; } +void expand_bits(void *target, const void *source, unsigned int source_bitlen, unsigned int target_bytelen) +{ + unsigned int target_bitlen = target_bytelen<<3; + unsigned int bitlen = target_bitlen>3; + + if ((target_bytelen-bytelen)>=1) memset(target+bytelen,0,target_bytelen-bytelen); + memcpy(target,source,bytelen); + if ((bitlen &= 7)) ((uint8_t*)target)[bytelen] = ((uint8_t*)source)[bytelen] & (~((1 << (8-bitlen)) - 1)); +} + void ntop46(const struct sockaddr *sa, char *str, size_t len) { if (!len) return; diff --git a/tpws/helpers.h b/tpws/helpers.h index e2f19658..fd57de47 100644 --- a/tpws/helpers.h +++ b/tpws/helpers.h @@ -29,6 +29,8 @@ bool str_ends_with(const char *s, const char *suffix); bool load_file(const char *filename,void *buffer,size_t *buffer_size); bool append_to_list_file(const char *filename, const char *s); +void expand_bits(void *target, const void *source, unsigned int source_bitlen, unsigned int target_bytelen); + void ntop46(const struct sockaddr *sa, char *str, size_t len); void ntop46_port(const struct sockaddr *sa, char *str, size_t len); void print_sockaddr(const struct sockaddr *sa); diff --git a/tpws/kavl.h b/tpws/kavl.h new file mode 100644 index 00000000..f3520fd7 --- /dev/null +++ b/tpws/kavl.h @@ -0,0 +1,400 @@ +/* The MIT License + + Copyright (c) 2018 by Attractive Chaos + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. +*/ + +/* An example: + +#include +#include +#include +#include "kavl.h" + +struct my_node { + char key; + KAVL_HEAD(struct my_node) head; +}; +#define my_cmp(p, q) (((q)->key < (p)->key) - ((p)->key < (q)->key)) +KAVL_INIT(my, struct my_node, head, my_cmp) + +int main(void) { + const char *str = "MNOLKQOPHIA"; // from wiki, except a duplicate + struct my_node *root = 0; + int i, l = strlen(str); + for (i = 0; i < l; ++i) { // insert in the input order + struct my_node *q, *p = malloc(sizeof(*p)); + p->key = str[i]; + q = kavl_insert(my, &root, p, 0); + if (p != q) free(p); // if already present, free + } + kavl_itr_t(my) itr; + kavl_itr_first(my, root, &itr); // place at first + do { // traverse + const struct my_node *p = kavl_at(&itr); + putchar(p->key); + free((void*)p); // free node + } while (kavl_itr_next(my, &itr)); + putchar('\n'); + return 0; +} +*/ + +#ifndef KAVL_H +#define KAVL_H + +#ifdef __STRICT_ANSI__ +#define inline __inline__ +#endif + +#define KAVL_MAX_DEPTH 64 + +#define kavl_size(head, p) ((p)? (p)->head.size : 0) +#define kavl_size_child(head, q, i) ((q)->head.p[(i)]? (q)->head.p[(i)]->head.size : 0) + +#define KAVL_HEAD(__type) \ + struct { \ + __type *p[2]; \ + signed char balance; /* balance factor */ \ + unsigned size; /* #elements in subtree */ \ + } + +#define __KAVL_FIND(suf, __scope, __type, __head, __cmp) \ + __scope __type *kavl_find_##suf(const __type *root, const __type *x, unsigned *cnt_) { \ + const __type *p = root; \ + unsigned cnt = 0; \ + while (p != 0) { \ + int cmp; \ + cmp = __cmp(x, p); \ + if (cmp >= 0) cnt += kavl_size_child(__head, p, 0) + 1; \ + if (cmp < 0) p = p->__head.p[0]; \ + else if (cmp > 0) p = p->__head.p[1]; \ + else break; \ + } \ + if (cnt_) *cnt_ = cnt; \ + return (__type*)p; \ + } + +#define __KAVL_ROTATE(suf, __type, __head) \ + /* one rotation: (a,(b,c)q)p => ((a,b)p,c)q */ \ + static inline __type *kavl_rotate1_##suf(__type *p, int dir) { /* dir=0 to left; dir=1 to right */ \ + int opp = 1 - dir; /* opposite direction */ \ + __type *q = p->__head.p[opp]; \ + unsigned size_p = p->__head.size; \ + p->__head.size -= q->__head.size - kavl_size_child(__head, q, dir); \ + q->__head.size = size_p; \ + p->__head.p[opp] = q->__head.p[dir]; \ + q->__head.p[dir] = p; \ + return q; \ + } \ + /* two consecutive rotations: (a,((b,c)r,d)q)p => ((a,b)p,(c,d)q)r */ \ + static inline __type *kavl_rotate2_##suf(__type *p, int dir) { \ + int b1, opp = 1 - dir; \ + __type *q = p->__head.p[opp], *r = q->__head.p[dir]; \ + unsigned size_x_dir = kavl_size_child(__head, r, dir); \ + r->__head.size = p->__head.size; \ + p->__head.size -= q->__head.size - size_x_dir; \ + q->__head.size -= size_x_dir + 1; \ + p->__head.p[opp] = r->__head.p[dir]; \ + r->__head.p[dir] = p; \ + q->__head.p[dir] = r->__head.p[opp]; \ + r->__head.p[opp] = q; \ + b1 = dir == 0? +1 : -1; \ + if (r->__head.balance == b1) q->__head.balance = 0, p->__head.balance = -b1; \ + else if (r->__head.balance == 0) q->__head.balance = p->__head.balance = 0; \ + else q->__head.balance = b1, p->__head.balance = 0; \ + r->__head.balance = 0; \ + return r; \ + } + +#define __KAVL_INSERT(suf, __scope, __type, __head, __cmp) \ + __scope __type *kavl_insert_##suf(__type **root_, __type *x, unsigned *cnt_) { \ + unsigned char stack[KAVL_MAX_DEPTH]; \ + __type *path[KAVL_MAX_DEPTH]; \ + __type *bp, *bq; \ + __type *p, *q, *r = 0; /* _r_ is potentially the new root */ \ + int i, which = 0, top, b1, path_len; \ + unsigned cnt = 0; \ + bp = *root_, bq = 0; \ + /* find the insertion location */ \ + for (p = bp, q = bq, top = path_len = 0; p; q = p, p = p->__head.p[which]) { \ + int cmp; \ + cmp = __cmp(x, p); \ + if (cmp >= 0) cnt += kavl_size_child(__head, p, 0) + 1; \ + if (cmp == 0) { \ + if (cnt_) *cnt_ = cnt; \ + return p; \ + } \ + if (p->__head.balance != 0) \ + bq = q, bp = p, top = 0; \ + stack[top++] = which = (cmp > 0); \ + path[path_len++] = p; \ + } \ + if (cnt_) *cnt_ = cnt; \ + x->__head.balance = 0, x->__head.size = 1, x->__head.p[0] = x->__head.p[1] = 0; \ + if (q == 0) *root_ = x; \ + else q->__head.p[which] = x; \ + if (bp == 0) return x; \ + for (i = 0; i < path_len; ++i) ++path[i]->__head.size; \ + for (p = bp, top = 0; p != x; p = p->__head.p[stack[top]], ++top) /* update balance factors */ \ + if (stack[top] == 0) --p->__head.balance; \ + else ++p->__head.balance; \ + if (bp->__head.balance > -2 && bp->__head.balance < 2) return x; /* no re-balance needed */ \ + /* re-balance */ \ + which = (bp->__head.balance < 0); \ + b1 = which == 0? +1 : -1; \ + q = bp->__head.p[1 - which]; \ + if (q->__head.balance == b1) { \ + r = kavl_rotate1_##suf(bp, which); \ + q->__head.balance = bp->__head.balance = 0; \ + } else r = kavl_rotate2_##suf(bp, which); \ + if (bq == 0) *root_ = r; \ + else bq->__head.p[bp != bq->__head.p[0]] = r; \ + return x; \ + } + +#define __KAVL_ERASE(suf, __scope, __type, __head, __cmp) \ + __scope __type *kavl_erase_##suf(__type **root_, const __type *x, unsigned *cnt_) { \ + __type *p, *path[KAVL_MAX_DEPTH], fake; \ + unsigned char dir[KAVL_MAX_DEPTH]; \ + int i, d = 0, cmp; \ + unsigned cnt = 0; \ + fake.__head.p[0] = *root_, fake.__head.p[1] = 0; \ + if (cnt_) *cnt_ = 0; \ + if (x) { \ + for (cmp = -1, p = &fake; cmp; cmp = __cmp(x, p)) { \ + int which = (cmp > 0); \ + if (cmp > 0) cnt += kavl_size_child(__head, p, 0) + 1; \ + dir[d] = which; \ + path[d++] = p; \ + p = p->__head.p[which]; \ + if (p == 0) { \ + if (cnt_) *cnt_ = 0; \ + return 0; \ + } \ + } \ + cnt += kavl_size_child(__head, p, 0) + 1; /* because p==x is not counted */ \ + } else { \ + for (p = &fake, cnt = 1; p; p = p->__head.p[0]) \ + dir[d] = 0, path[d++] = p; \ + p = path[--d]; \ + } \ + if (cnt_) *cnt_ = cnt; \ + for (i = 1; i < d; ++i) --path[i]->__head.size; \ + if (p->__head.p[1] == 0) { /* ((1,.)2,3)4 => (1,3)4; p=2 */ \ + path[d-1]->__head.p[dir[d-1]] = p->__head.p[0]; \ + } else { \ + __type *q = p->__head.p[1]; \ + if (q->__head.p[0] == 0) { /* ((1,2)3,4)5 => ((1)2,4)5; p=3 */ \ + q->__head.p[0] = p->__head.p[0]; \ + q->__head.balance = p->__head.balance; \ + path[d-1]->__head.p[dir[d-1]] = q; \ + path[d] = q, dir[d++] = 1; \ + q->__head.size = p->__head.size - 1; \ + } else { /* ((1,((.,2)3,4)5)6,7)8 => ((1,(2,4)5)3,7)8; p=6 */ \ + __type *r; \ + int e = d++; /* backup _d_ */\ + for (;;) { \ + dir[d] = 0; \ + path[d++] = q; \ + r = q->__head.p[0]; \ + if (r->__head.p[0] == 0) break; \ + q = r; \ + } \ + r->__head.p[0] = p->__head.p[0]; \ + q->__head.p[0] = r->__head.p[1]; \ + r->__head.p[1] = p->__head.p[1]; \ + r->__head.balance = p->__head.balance; \ + path[e-1]->__head.p[dir[e-1]] = r; \ + path[e] = r, dir[e] = 1; \ + for (i = e + 1; i < d; ++i) --path[i]->__head.size; \ + r->__head.size = p->__head.size - 1; \ + } \ + } \ + while (--d > 0) { \ + __type *q = path[d]; \ + int which, other, b1 = 1, b2 = 2; \ + which = dir[d], other = 1 - which; \ + if (which) b1 = -b1, b2 = -b2; \ + q->__head.balance += b1; \ + if (q->__head.balance == b1) break; \ + else if (q->__head.balance == b2) { \ + __type *r = q->__head.p[other]; \ + if (r->__head.balance == -b1) { \ + path[d-1]->__head.p[dir[d-1]] = kavl_rotate2_##suf(q, which); \ + } else { \ + path[d-1]->__head.p[dir[d-1]] = kavl_rotate1_##suf(q, which); \ + if (r->__head.balance == 0) { \ + r->__head.balance = -b1; \ + q->__head.balance = b1; \ + break; \ + } else r->__head.balance = q->__head.balance = 0; \ + } \ + } \ + } \ + *root_ = fake.__head.p[0]; \ + return p; \ + } + +#define kavl_free(__type, __head, __root, __free) do { \ + __type *_p, *_q; \ + for (_p = __root; _p; _p = _q) { \ + if (_p->__head.p[0] == 0) { \ + _q = _p->__head.p[1]; \ + __free(_p); \ + } else { \ + _q = _p->__head.p[0]; \ + _p->__head.p[0] = _q->__head.p[1]; \ + _q->__head.p[1] = _p; \ + } \ + } \ + } while (0) + +#define __KAVL_ITR(suf, __scope, __type, __head, __cmp) \ + struct kavl_itr_##suf { \ + const __type *stack[KAVL_MAX_DEPTH], **top, *right; /* _right_ points to the right child of *top */ \ + }; \ + __scope void kavl_itr_first_##suf(const __type *root, struct kavl_itr_##suf *itr) { \ + const __type *p; \ + for (itr->top = itr->stack - 1, p = root; p; p = p->__head.p[0]) \ + *++itr->top = p; \ + itr->right = (*itr->top)->__head.p[1]; \ + } \ + __scope int kavl_itr_find_##suf(const __type *root, const __type *x, struct kavl_itr_##suf *itr) { \ + const __type *p = root; \ + itr->top = itr->stack - 1; \ + while (p != 0) { \ + int cmp; \ + cmp = __cmp(x, p); \ + if (cmp < 0) *++itr->top = p, p = p->__head.p[0]; \ + else if (cmp > 0) p = p->__head.p[1]; \ + else break; \ + } \ + if (p) { \ + *++itr->top = p; \ + itr->right = p->__head.p[1]; \ + return 1; \ + } else if (itr->top >= itr->stack) { \ + itr->right = (*itr->top)->__head.p[1]; \ + return 0; \ + } else return 0; \ + } \ + __scope int kavl_itr_next_##suf(struct kavl_itr_##suf *itr) { \ + for (;;) { \ + const __type *p; \ + for (p = itr->right, --itr->top; p; p = p->__head.p[0]) \ + *++itr->top = p; \ + if (itr->top < itr->stack) return 0; \ + itr->right = (*itr->top)->__head.p[1]; \ + return 1; \ + } \ + } + +/** + * Insert a node to the tree + * + * @param suf name suffix used in KAVL_INIT() + * @param proot pointer to the root of the tree (in/out: root may change) + * @param x node to insert (in) + * @param cnt number of nodes smaller than or equal to _x_; can be NULL (out) + * + * @return _x_ if not present in the tree, or the node equal to x. + */ +#define kavl_insert(suf, proot, x, cnt) kavl_insert_##suf(proot, x, cnt) + +/** + * Find a node in the tree + * + * @param suf name suffix used in KAVL_INIT() + * @param root root of the tree + * @param x node value to find (in) + * @param cnt number of nodes smaller than or equal to _x_; can be NULL (out) + * + * @return node equal to _x_ if present, or NULL if absent + */ +#define kavl_find(suf, root, x, cnt) kavl_find_##suf(root, x, cnt) + +/** + * Delete a node from the tree + * + * @param suf name suffix used in KAVL_INIT() + * @param proot pointer to the root of the tree (in/out: root may change) + * @param x node value to delete; if NULL, delete the first node (in) + * + * @return node removed from the tree if present, or NULL if absent + */ +#define kavl_erase(suf, proot, x, cnt) kavl_erase_##suf(proot, x, cnt) +#define kavl_erase_first(suf, proot) kavl_erase_##suf(proot, 0, 0) + +#define kavl_itr_t(suf) struct kavl_itr_##suf + +/** + * Place the iterator at the smallest object + * + * @param suf name suffix used in KAVL_INIT() + * @param root root of the tree + * @param itr iterator + */ +#define kavl_itr_first(suf, root, itr) kavl_itr_first_##suf(root, itr) + +/** + * Place the iterator at the object equal to or greater than the query + * + * @param suf name suffix used in KAVL_INIT() + * @param root root of the tree + * @param x query (in) + * @param itr iterator (out) + * + * @return 1 if find; 0 otherwise. kavl_at(itr) is NULL if and only if query is + * larger than all objects in the tree + */ +#define kavl_itr_find(suf, root, x, itr) kavl_itr_find_##suf(root, x, itr) + +/** + * Move to the next object in order + * + * @param itr iterator (modified) + * + * @return 1 if there is a next object; 0 otherwise + */ +#define kavl_itr_next(suf, itr) kavl_itr_next_##suf(itr) + +/** + * Return the pointer at the iterator + * + * @param itr iterator + * + * @return pointer if present; NULL otherwise + */ +#define kavl_at(itr) ((itr)->top < (itr)->stack? 0 : *(itr)->top) + +#define KAVL_INIT2(suf, __scope, __type, __head, __cmp) \ + __KAVL_FIND(suf, __scope, __type, __head, __cmp) \ + __KAVL_ROTATE(suf, __type, __head) \ + __KAVL_INSERT(suf, __scope, __type, __head, __cmp) \ + __KAVL_ERASE(suf, __scope, __type, __head, __cmp) \ + __KAVL_ITR(suf, __scope, __type, __head, __cmp) + +#define KAVL_INIT(suf, __type, __head, __cmp) \ + KAVL_INIT2(suf,, __type, __head, __cmp) + +#endif diff --git a/tpws/pools.c b/tpws/pools.c index 4b19ac75..3cd03341 100644 --- a/tpws/pools.c +++ b/tpws/pools.c @@ -260,121 +260,146 @@ bool hostlist_collection_is_empty(const struct hostlist_collection_head *head) } -void ipset4Destroy(ipset4 **ipset) +static int kavl_bit_cmp(const struct kavl_bit_elem *p, const struct kavl_bit_elem *q) { - ipset4 *elem, *tmp; - HASH_ITER(hh, *ipset, elem, tmp) + unsigned int bitlen = q->bitlen < p->bitlen ? q->bitlen : p->bitlen; + unsigned int df = bitlen & 7, bytes = bitlen >> 3; + int cmp = memcmp(p->data, q->data, bytes); + + if (cmp || !df) return cmp; + + uint8_t c1 = p->data[bytes] >> (8 - df); + uint8_t c2 = q->data[bytes] >> (8 - df); + return c1data); + free(e); } } -bool ipset4Check(ipset4 *ipset, const struct in_addr *a, uint8_t preflen) +void kavl_bit_delete(struct kavl_bit_elem **hdr, const void *data, unsigned int bitlen) { - uint32_t ip = ntohl(a->s_addr); - struct cidr4 cidr; - ipset4 *ips_found; - - // zero alignment bytes - memset(&cidr,0,sizeof(cidr)); - cidr.preflen = preflen+1; - do - { - cidr.preflen--; - cidr.addr.s_addr = htonl(ip & mask_from_preflen(cidr.preflen)); - HASH_FIND(hh, ipset, &cidr, sizeof(cidr), ips_found); - if (ips_found) return true; - } while(cidr.preflen); - - return false; + struct kavl_bit_elem temp = { + .bitlen = bitlen, .data = (uint8_t*)data + }; + kavl_bit_destroy_elem(kavl_erase(kavl_bit, hdr, &temp, 0)); } -bool ipset4Add(ipset4 **ipset, const struct in_addr *a, uint8_t preflen) +void kavl_bit_destroy(struct kavl_bit_elem **hdr) +{ + while (*hdr) + { + struct kavl_bit_elem *e = kavl_erase_first(kavl_bit, hdr); + if (!e) break; + kavl_bit_destroy_elem(e); + } + free(*hdr); +} +struct kavl_bit_elem *kavl_bit_add(struct kavl_bit_elem **hdr, void *data, unsigned int bitlen, size_t struct_size) +{ + if (!struct_size) struct_size=sizeof(struct kavl_bit_elem); + + struct kavl_bit_elem *v, *e = calloc(1, struct_size); + if (!e) return 0; + + e->bitlen = bitlen; + e->data = data; + + v = kavl_insert(kavl_bit, hdr, e, 0); + while (e != v && e->bitlen < v->bitlen) + { + kavl_bit_delete(hdr, v->data, v->bitlen); + v = kavl_insert(kavl_bit, hdr, e, 0); + } + if (e != v) kavl_bit_destroy_elem(e); + return v; +} +struct kavl_bit_elem *kavl_bit_get(const struct kavl_bit_elem *hdr, const void *data, unsigned int bitlen) +{ + struct kavl_bit_elem temp = { + .bitlen = bitlen, .data = (uint8_t*)data + }; + return kavl_find(kavl_bit, hdr, &temp, 0); +} + +static bool ipset_kavl_add(struct kavl_bit_elem **ipset, const void *a, uint8_t preflen) +{ + uint8_t bytelen = (preflen+7)>>3; + uint8_t *abuf = malloc(bytelen); + if (!abuf) return false; + memcpy(abuf,a,bytelen); + if (!kavl_bit_add(ipset,abuf,preflen,0)) + { + free(abuf); + return false; + } + return true; +} + + +bool ipset4Check(const struct kavl_bit_elem *ipset, const struct in_addr *a, uint8_t preflen) +{ + return !!kavl_bit_get(ipset,a,preflen); +} +bool ipset4Add(struct kavl_bit_elem **ipset, const struct in_addr *a, uint8_t preflen) { if (preflen>32) return false; - - // avoid dups - if (ipset4Check(*ipset, a, preflen)) return true; // already included - - struct ipset4 *entry = calloc(1,sizeof(ipset4)); - if (!entry) return false; - - entry->cidr.addr.s_addr = htonl(ntohl(a->s_addr) & mask_from_preflen(preflen)); - entry->cidr.preflen = preflen; - oom = false; - HASH_ADD(hh, *ipset, cidr, sizeof(entry->cidr), entry); - if (oom) { free(entry); return false; } - - return true; + return ipset_kavl_add(ipset,a,preflen); } -void ipset4Print(ipset4 *ipset) +void ipset4Print(struct kavl_bit_elem *ipset) { - ipset4 *ips, *tmp; - HASH_ITER(hh, ipset , ips, tmp) - { - print_cidr4(&ips->cidr); - printf("\n"); - } -} + if (!ipset) return; -void ipset6Destroy(ipset6 **ipset) -{ - ipset6 *elem, *tmp; - HASH_ITER(hh, *ipset, elem, tmp) - { - HASH_DEL(*ipset, elem); - free(elem); - } -} -bool ipset6Check(ipset6 *ipset, const struct in6_addr *a, uint8_t preflen) -{ - struct cidr6 cidr; - ipset6 *ips_found; - - // zero alignment bytes - memset(&cidr,0,sizeof(cidr)); - cidr.preflen = preflen+1; + struct cidr4 c; + const struct kavl_bit_elem *elem; + kavl_itr_t(kavl_bit) itr; + kavl_itr_first(kavl_bit, ipset, &itr); do { - cidr.preflen--; - ip6_and(a, mask_from_preflen6(cidr.preflen), &cidr.addr); - HASH_FIND(hh, ipset, &cidr, sizeof(cidr), ips_found); - if (ips_found) return true; - } while(cidr.preflen); - - return false; -} -bool ipset6Add(ipset6 **ipset, const struct in6_addr *a, uint8_t preflen) -{ - if (preflen>128) return false; - - // avoid dups - if (ipset6Check(*ipset, a, preflen)) return true; // already included - - struct ipset6 *entry = calloc(1,sizeof(ipset6)); - if (!entry) return false; - - ip6_and(a, mask_from_preflen6(preflen), &entry->cidr.addr); - entry->cidr.preflen = preflen; - oom = false; - HASH_ADD(hh, *ipset, cidr, sizeof(entry->cidr), entry); - if (oom) { free(entry); return false; } - - return true; -} -void ipset6Print(ipset6 *ipset) -{ - ipset6 *ips, *tmp; - HASH_ITER(hh, ipset , ips, tmp) - { - print_cidr6(&ips->cidr); + elem = kavl_at(&itr); + c.preflen = elem->bitlen; + expand_bits(&c.addr, elem->data, elem->bitlen, sizeof(c.addr)); + print_cidr4(&c); printf("\n"); } + while (kavl_itr_next(kavl_bit, &itr)); +} + +bool ipset6Check(const struct kavl_bit_elem *ipset, const struct in6_addr *a, uint8_t preflen) +{ + return !!kavl_bit_get(ipset,a,preflen); +} +bool ipset6Add(struct kavl_bit_elem **ipset, const struct in6_addr *a, uint8_t preflen) +{ + if (preflen>128) return false; + return ipset_kavl_add(ipset,a,preflen); +} +void ipset6Print(struct kavl_bit_elem *ipset) +{ + if (!ipset) return; + + struct cidr6 c; + const struct kavl_bit_elem *elem; + kavl_itr_t(kavl_bit) itr; + kavl_itr_first(kavl_bit, ipset, &itr); + do + { + elem = kavl_at(&itr); + c.preflen = elem->bitlen; + expand_bits(&c.addr, elem->data, elem->bitlen, sizeof(c.addr)); + print_cidr6(&c); + printf("\n"); + } + while (kavl_itr_next(kavl_bit, &itr)); } void ipsetDestroy(ipset *ipset) { - ipset4Destroy(&ipset->ips4); - ipset6Destroy(&ipset->ips6); + kavl_bit_destroy(&ipset->ips4); + kavl_bit_destroy(&ipset->ips6); } void ipsetPrint(ipset *ipset) { @@ -520,7 +545,7 @@ bool port_filters_deny_if_empty(struct port_filters_head *head) } - + struct blob_item *blob_collection_add(struct blob_collection_head *head) { struct blob_item *entry = calloc(1,sizeof(struct blob_item)); diff --git a/tpws/pools.h b/tpws/pools.h index e127a048..67e16123 100644 --- a/tpws/pools.h +++ b/tpws/pools.h @@ -13,6 +13,8 @@ #define HASH_FUNCTION HASH_BER #include "uthash.h" +#include "kavl.h" + #define HOSTLIST_POOL_FLAG_STRICT_MATCH 1 typedef struct hostlist_pool { @@ -76,39 +78,40 @@ struct hostlist_item *hostlist_collection_search(struct hostlist_collection_head bool hostlist_collection_is_empty(const struct hostlist_collection_head *head); -typedef struct ipset4 { - struct cidr4 cidr; /* key */ - UT_hash_handle hh; /* makes this structure hashable */ -} ipset4; -typedef struct ipset6 { - struct cidr6 cidr; /* key */ - UT_hash_handle hh; /* makes this structure hashable */ -} ipset6; +struct kavl_bit_elem +{ + unsigned int bitlen; + uint8_t *data; + KAVL_HEAD(struct kavl_bit_elem) head; +}; + +struct kavl_bit_elem *kavl_bit_get(const struct kavl_bit_elem *hdr, const void *data, unsigned int bitlen); +struct kavl_bit_elem *kavl_bit_add(struct kavl_bit_elem **hdr, void *data, unsigned int bitlen, size_t struct_size); +void kavl_bit_delete(struct kavl_bit_elem **hdr, const void *data, unsigned int bitlen); +void kavl_bit_destroy(struct kavl_bit_elem **hdr); + // combined ipset ipv4 and ipv6 typedef struct ipset { - ipset4 *ips4; - ipset6 *ips6; + struct kavl_bit_elem *ips4,*ips6; } ipset; #define IPSET_EMPTY(ips) (!(ips)->ips4 && !(ips)->ips6) -void ipset4Destroy(ipset4 **ipset); -bool ipset4Add(ipset4 **ipset, const struct in_addr *a, uint8_t preflen); -static inline bool ipset4AddCidr(ipset4 **ipset, const struct cidr4 *cidr) +bool ipset4Add(struct kavl_bit_elem **ipset, const struct in_addr *a, uint8_t preflen); +static inline bool ipset4AddCidr(struct kavl_bit_elem **ipset, const struct cidr4 *cidr) { return ipset4Add(ipset,&cidr->addr,cidr->preflen); } -bool ipset4Check(ipset4 *ipset, const struct in_addr *a, uint8_t preflen); -void ipset4Print(ipset4 *ipset); +bool ipset4Check(const struct kavl_bit_elem *ipset, const struct in_addr *a, uint8_t preflen); +void ipset4Print(struct kavl_bit_elem *ipset); -void ipset6Destroy(ipset6 **ipset); -bool ipset6Add(ipset6 **ipset, const struct in6_addr *a, uint8_t preflen); -static inline bool ipset6AddCidr(ipset6 **ipset, const struct cidr6 *cidr) +bool ipset6Add(struct kavl_bit_elem **ipset, const struct in6_addr *a, uint8_t preflen); +static inline bool ipset6AddCidr(struct kavl_bit_elem **ipset, const struct cidr6 *cidr) { return ipset6Add(ipset,&cidr->addr,cidr->preflen); } -bool ipset6Check(ipset6 *ipset, const struct in6_addr *a, uint8_t preflen); -void ipset6Print(ipset6 *ipset); +bool ipset6Check(const struct kavl_bit_elem *ipset, const struct in6_addr *a, uint8_t preflen); +void ipset6Print(struct kavl_bit_elem *ipset); void ipsetDestroy(ipset *ipset); void ipsetPrint(ipset *ipset);