refactor: enable formatting for files under lib

This commit is contained in:
Dundar Göc
2021-10-12 17:23:50 +02:00
parent 29b718d04c
commit f98b8d2d44
6 changed files with 986 additions and 964 deletions

View File

@@ -1,5 +1,3 @@
// uncrustify:off
/*- /*-
* Copyright 1997-1999, 2001, John-Mark Gurney. * Copyright 1997-1999, 2001, John-Mark Gurney.
* 2008-2009, Attractive Chaos <attractor@live.co.uk> * 2008-2009, Attractive Chaos <attractor@live.co.uk>
@@ -36,386 +34,419 @@
#ifndef NVIM_LIB_KBTREE_H #ifndef NVIM_LIB_KBTREE_H
#define NVIM_LIB_KBTREE_H #define NVIM_LIB_KBTREE_H
#include <assert.h>
#include <stdint.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include <stdint.h>
#include <assert.h>
#include "nvim/memory.h" #include "nvim/memory.h"
#define KB_MAX_DEPTH 64 #define KB_MAX_DEPTH 64
#define __KB_KEY(type, x) (x->key) #define __KB_KEY(type, x) (x->key)
#define __KB_PTR(btr, x) (x->ptr) #define __KB_PTR(btr, x) (x->ptr)
#define __KB_TREE_T(name,key_t,T) \ #define __KB_TREE_T(name, key_t, T) \
typedef struct kbnode_##name##_s kbnode_##name##_t; \ typedef struct kbnode_##name##_s kbnode_##name##_t; \
struct kbnode_##name##_s { \ struct kbnode_##name##_s { \
int32_t n; \ int32_t n; \
bool is_internal; \ bool is_internal; \
key_t key[2*T-1]; \ key_t key[2*T-1]; \
kbnode_##name##_t *ptr[]; \ kbnode_##name##_t *ptr[]; \
} ; \ } ; \
\ \
typedef struct { \ typedef struct { \
kbnode_##name##_t *root; \ kbnode_##name##_t *root; \
int n_keys, n_nodes; \ int n_keys, n_nodes; \
} kbtree_##name##_t; \ } kbtree_##name##_t; \
\ \
typedef struct { \ typedef struct { \
kbnode_##name##_t *x; \ kbnode_##name##_t *x; \
int i; \ int i; \
} kbpos_##name##_t; \ } kbpos_##name##_t; \
typedef struct { \ typedef struct { \
kbpos_##name##_t stack[KB_MAX_DEPTH], *p; \ kbpos_##name##_t stack[KB_MAX_DEPTH], *p; \
} kbitr_##name##_t; \ } kbitr_##name##_t; \
#define __kb_destroy(kbnode_t,b) do { \ #define __kb_destroy(kbnode_t, b) do { \
int i; \ int i; \
unsigned int max = 8; \ unsigned int max = 8; \
kbnode_t *x, **top, **stack = 0; \ kbnode_t *x, **top, **stack = 0; \
if (b->root) { \ if (b->root) { \
top = stack = (kbnode_t**)xcalloc(max, sizeof(kbnode_t*)); \ top = stack = (kbnode_t **)xcalloc(max, sizeof(kbnode_t *)); \
*top++ = (b)->root; \ *top++ = (b)->root; \
while (top != stack) { \ while (top != stack) { \
x = *--top; \ x = *--top; \
if (x->is_internal == 0) { XFREE_CLEAR(x); continue; } \ if (x->is_internal == 0) { XFREE_CLEAR(x); continue; } \
for (i = 0; i <= x->n; ++i) \ for (i = 0; i <= x->n; ++i) \
if (__KB_PTR(b, x)[i]) { \ if (__KB_PTR(b, x)[i]) { \
if (top - stack == (int)max) { \ if (top - stack == (int)max) { \
max <<= 1; \ max <<= 1; \
stack = (kbnode_t**)xrealloc(stack, max * sizeof(kbnode_t*)); \ stack = (kbnode_t **)xrealloc(stack, max * sizeof(kbnode_t *)); \
top = stack + (max>>1); \ top = stack + (max>>1); \
} \ } \
*top++ = __KB_PTR(b, x)[i]; \ *top++ = __KB_PTR(b, x)[i]; \
} \ } \
XFREE_CLEAR(x); \ XFREE_CLEAR(x); \
} \ } \
} \ } \
XFREE_CLEAR(stack); \ XFREE_CLEAR(stack); \
} while (0) } while (0)
#define __KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \ #define __KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \
static inline int __kb_getp_aux_##name(const kbnode_t * __restrict x, key_t * __restrict k, int *r) \ static inline int __kb_getp_aux_##name(const kbnode_t * __restrict x, key_t * __restrict k, \
{ \ int *r) \
int tr, *rr, begin = 0, end = x->n; \ { \
if (x->n == 0) return -1; \ int tr, *rr, begin = 0, end = x->n; \
rr = r? r : &tr; \ if (x->n == 0) return -1; \
while (begin < end) { \ rr = r? r : &tr; \
int mid = (begin + end) >> 1; \ while (begin < end) { \
if (__cmp(__KB_KEY(key_t, x)[mid], *k) < 0) begin = mid + 1; \ int mid = (begin + end) >> 1; \
else end = mid; \ if (__cmp(__KB_KEY(key_t, x)[mid], *k) < 0) begin = mid + 1; \
} \ else end = mid; \
if (begin == x->n) { *rr = 1; return x->n - 1; } \ } \
if ((*rr = __cmp(*k, __KB_KEY(key_t, x)[begin])) < 0) --begin; \ if (begin == x->n) { *rr = 1; return x->n - 1; } \
return begin; \ if ((*rr = __cmp(*k, __KB_KEY(key_t, x)[begin])) < 0) --begin; \
} return begin; \
}
#define __KB_GET(name, key_t, kbnode_t) \ #define __KB_GET(name, key_t, kbnode_t) \
static key_t *kb_getp_##name(kbtree_##name##_t *b, key_t * __restrict k) \ static key_t *kb_getp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
{ \ { \
if (!b->root) { \ if (!b->root) { \
return 0; \ return 0; \
} \ } \
int i, r = 0; \ int i, r = 0; \
kbnode_t *x = b->root; \ kbnode_t *x = b->root; \
while (x) { \ while (x) { \
i = __kb_getp_aux_##name(x, k, &r); \ i = __kb_getp_aux_##name(x, k, &r); \
if (i >= 0 && r == 0) return &__KB_KEY(key_t, x)[i]; \ if (i >= 0 && r == 0) return &__KB_KEY(key_t, x)[i]; \
if (x->is_internal == 0) return 0; \ if (x->is_internal == 0) return 0; \
x = __KB_PTR(b, x)[i + 1]; \ x = __KB_PTR(b, x)[i + 1]; \
} \ } \
return 0; \ return 0; \
} \ } \
static inline key_t *kb_get_##name(kbtree_##name##_t *b, key_t k) \ static inline key_t *kb_get_##name(kbtree_##name##_t *b, key_t k) \
{ \ { \
return kb_getp_##name(b, &k); \ return kb_getp_##name(b, &k); \
} }
#define __KB_INTERVAL(name, key_t, kbnode_t) \ #define __KB_INTERVAL(name, key_t, kbnode_t) \
static inline void kb_intervalp_##name(kbtree_##name##_t *b, key_t * __restrict k, key_t **lower, key_t **upper) \ static inline void kb_intervalp_##name(kbtree_##name##_t *b, key_t * __restrict k, key_t **lower, \
{ \ key_t **upper) \
if (!b->root) { \ { \
return; \ if (!b->root) { \
} \ return; \
int i, r = 0; \ } \
kbnode_t *x = b->root; \ int i, r = 0; \
*lower = *upper = 0; \ kbnode_t *x = b->root; \
while (x) { \ *lower = *upper = 0; \
i = __kb_getp_aux_##name(x, k, &r); \ while (x) { \
if (i >= 0 && r == 0) { \ i = __kb_getp_aux_##name(x, k, &r); \
*lower = *upper = &__KB_KEY(key_t, x)[i]; \ if (i >= 0 && r == 0) { \
return; \ *lower = *upper = &__KB_KEY(key_t, x)[i]; \
} \ return; \
if (i >= 0) *lower = &__KB_KEY(key_t, x)[i]; \ } \
if (i < x->n - 1) *upper = &__KB_KEY(key_t, x)[i + 1]; \ if (i >= 0) *lower = &__KB_KEY(key_t, x)[i]; \
if (x->is_internal == 0) return; \ if (i < x->n - 1) *upper = &__KB_KEY(key_t, x)[i + 1]; \
x = __KB_PTR(b, x)[i + 1]; \ if (x->is_internal == 0) return; \
} \ x = __KB_PTR(b, x)[i + 1]; \
} \ } \
static inline void kb_interval_##name(kbtree_##name##_t *b, key_t k, key_t **lower, key_t **upper) \ } \
{ \ static inline void kb_interval_##name(kbtree_##name##_t *b, key_t k, key_t **lower, key_t **upper) \
kb_intervalp_##name(b, &k, lower, upper); \ { \
} kb_intervalp_##name(b, &k, lower, upper); \
}
#define __KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \ #define __KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \
/* x must be an internal node */ \ /* x must be an internal node */ \
static inline void __kb_split_##name(kbtree_##name##_t *b, kbnode_t *x, int i, kbnode_t *y) \ static inline void __kb_split_##name(kbtree_##name##_t *b, kbnode_t *x, int i, kbnode_t *y) \
{ \ { \
kbnode_t *z; \ kbnode_t *z; \
z = (kbnode_t*)xcalloc(1, y->is_internal? ILEN : sizeof(kbnode_##name##_t)); \ z = (kbnode_t *)xcalloc(1, y->is_internal? ILEN : sizeof(kbnode_##name##_t)); \
++b->n_nodes; \ ++b->n_nodes; \
z->is_internal = y->is_internal; \ z->is_internal = y->is_internal; \
z->n = T - 1; \ z->n = T - 1; \
memcpy(__KB_KEY(key_t, z), &__KB_KEY(key_t, y)[T], sizeof(key_t) * (T - 1)); \ memcpy(__KB_KEY(key_t, z), &__KB_KEY(key_t, y)[T], sizeof(key_t) * (T - 1)); \
if (y->is_internal) memcpy(__KB_PTR(b, z), &__KB_PTR(b, y)[T], sizeof(void*) * T); \ if (y->is_internal) memcpy(__KB_PTR(b, z), &__KB_PTR(b, y)[T], sizeof(void *) * T); \
y->n = T - 1; \ y->n = T - 1; \
memmove(&__KB_PTR(b, x)[i + 2], &__KB_PTR(b, x)[i + 1], sizeof(void*) * (unsigned int)(x->n - i)); \ memmove(&__KB_PTR(b, x)[i + 2], &__KB_PTR(b, \
__KB_PTR(b, x)[i + 1] = z; \ x)[i + 1], sizeof(void *) * (unsigned int)(x->n - i)); \
memmove(&__KB_KEY(key_t, x)[i + 1], &__KB_KEY(key_t, x)[i], sizeof(key_t) * (unsigned int)(x->n - i)); \ __KB_PTR(b, x)[i + 1] = z; \
__KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[T - 1]; \ memmove(&__KB_KEY(key_t, x)[i + 1], &__KB_KEY(key_t, x)[i], \
++x->n; \ sizeof(key_t) * (unsigned int)(x->n - i)); \
} \ __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[T - 1]; \
static inline key_t *__kb_putp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k) \ ++x->n; \
{ \ } \
int i = x->n - 1; \ static inline key_t *__kb_putp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k) \
key_t *ret; \ { \
if (x->is_internal == 0) { \ int i = x->n - 1; \
i = __kb_getp_aux_##name(x, k, 0); \ key_t *ret; \
if (i != x->n - 1) \ if (x->is_internal == 0) { \
memmove(&__KB_KEY(key_t, x)[i + 2], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \ i = __kb_getp_aux_##name(x, k, 0); \
ret = &__KB_KEY(key_t, x)[i + 1]; \ if (i != x->n - 1) \
*ret = *k; \ memmove(&__KB_KEY(key_t, x)[i + 2], &__KB_KEY(key_t, \
++x->n; \ x)[i + 1], \
} else { \ (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
i = __kb_getp_aux_##name(x, k, 0) + 1; \ ret = &__KB_KEY(key_t, x)[i + 1]; \
if (__KB_PTR(b, x)[i]->n == 2 * T - 1) { \ *ret = *k; \
__kb_split_##name(b, x, i, __KB_PTR(b, x)[i]); \ ++x->n; \
if (__cmp(*k, __KB_KEY(key_t, x)[i]) > 0) ++i; \ } else { \
} \ i = __kb_getp_aux_##name(x, k, 0) + 1; \
ret = __kb_putp_aux_##name(b, __KB_PTR(b, x)[i], k); \ if (__KB_PTR(b, x)[i]->n == 2 * T - 1) { \
} \ __kb_split_##name(b, x, i, __KB_PTR(b, x)[i]); \
return ret; \ if (__cmp(*k, __KB_KEY(key_t, x)[i]) > 0) ++i; \
} \ } \
static inline key_t *kb_putp_##name(kbtree_##name##_t *b, key_t * __restrict k) \ ret = __kb_putp_aux_##name(b, __KB_PTR(b, x)[i], k); \
{ \ } \
if (!b->root) { \ return ret; \
b->root = (kbnode_t*)xcalloc(1, ILEN); \ } \
++b->n_nodes; \ static inline key_t *kb_putp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
} \ { \
kbnode_t *r, *s; \ if (!b->root) { \
++b->n_keys; \ b->root = (kbnode_t *)xcalloc(1, ILEN); \
r = b->root; \ ++b->n_nodes; \
if (r->n == 2 * T - 1) { \ } \
++b->n_nodes; \ kbnode_t *r, *s; \
s = (kbnode_t*)xcalloc(1, ILEN); \ ++b->n_keys; \
b->root = s; s->is_internal = 1; s->n = 0; \ r = b->root; \
__KB_PTR(b, s)[0] = r; \ if (r->n == 2 * T - 1) { \
__kb_split_##name(b, s, 0, r); \ ++b->n_nodes; \
r = s; \ s = (kbnode_t *)xcalloc(1, ILEN); \
} \ b->root = s; s->is_internal = 1; s->n = 0; \
return __kb_putp_aux_##name(b, r, k); \ __KB_PTR(b, s)[0] = r; \
} \ __kb_split_##name(b, s, 0, r); \
static inline void kb_put_##name(kbtree_##name##_t *b, key_t k) \ r = s; \
{ \ } \
kb_putp_##name(b, &k); \ return __kb_putp_aux_##name(b, r, k); \
} } \
static inline void kb_put_##name(kbtree_##name##_t *b, key_t k) \
{ \
kb_putp_##name(b, &k); \
}
#define __KB_DEL(name, key_t, kbnode_t, T) \ #define __KB_DEL(name, key_t, kbnode_t, T) \
static inline key_t __kb_delp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k, int s) \ static inline key_t __kb_delp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k, \
{ \ int s) \
int yn, zn, i, r = 0; \ { \
kbnode_t *xp, *y, *z; \ int yn, zn, i, r = 0; \
key_t kp; \ kbnode_t *xp, *y, *z; \
if (x == 0) return *k; \ key_t kp; \
if (s) { /* s can only be 0, 1 or 2 */ \ if (x == 0) return *k; \
r = x->is_internal == 0? 0 : s == 1? 1 : -1; \ if (s) { /* s can only be 0, 1 or 2 */ \
i = s == 1? x->n - 1 : -1; \ r = x->is_internal == 0? 0 : s == 1? 1 : -1; \
} else i = __kb_getp_aux_##name(x, k, &r); \ i = s == 1? x->n - 1 : -1; \
if (x->is_internal == 0) { \ } else i = __kb_getp_aux_##name(x, k, &r); \
if (s == 2) ++i; \ if (x->is_internal == 0) { \
kp = __KB_KEY(key_t, x)[i]; \ if (s == 2) ++i; \
memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \ kp = __KB_KEY(key_t, x)[i]; \
--x->n; \ memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, \
return kp; \ x)[i + 1], \
} \ (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
if (r == 0) { \ --x->n; \
if ((yn = __KB_PTR(b, x)[i]->n) >= T) { \ return kp; \
xp = __KB_PTR(b, x)[i]; \ } \
kp = __KB_KEY(key_t, x)[i]; \ if (r == 0) { \
__KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 1); \ if ((yn = __KB_PTR(b, x)[i]->n) >= T) { \
return kp; \ xp = __KB_PTR(b, x)[i]; \
} else if ((zn = __KB_PTR(b, x)[i + 1]->n) >= T) { \ kp = __KB_KEY(key_t, x)[i]; \
xp = __KB_PTR(b, x)[i + 1]; \ __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 1); \
kp = __KB_KEY(key_t, x)[i]; \ return kp; \
__KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 2); \ } else if ((zn = __KB_PTR(b, x)[i + 1]->n) >= T) { \
return kp; \ xp = __KB_PTR(b, x)[i + 1]; \
} else if (yn == T - 1 && zn == T - 1) { \ kp = __KB_KEY(key_t, x)[i]; \
y = __KB_PTR(b, x)[i]; z = __KB_PTR(b, x)[i + 1]; \ __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 2); \
__KB_KEY(key_t, y)[y->n++] = *k; \ return kp; \
memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, z), (unsigned int)z->n * sizeof(key_t)); \ } else if (yn == T - 1 && zn == T - 1) { \
if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, z), (unsigned int)(z->n + 1) * sizeof(void*)); \ y = __KB_PTR(b, x)[i]; z = __KB_PTR(b, x)[i + 1]; \
y->n += z->n; \ __KB_KEY(key_t, y)[y->n++] = *k; \
memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \ memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, z), (unsigned int)z->n * sizeof(key_t)); \
memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, x)[i + 2], (unsigned int)(x->n - i - 1) * sizeof(void*)); \ if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, \
--x->n; \ z), \
XFREE_CLEAR(z); \ (unsigned int)(z->n + 1) * sizeof(void *)); \
return __kb_delp_aux_##name(b, y, k, s); \ y->n += z->n; \
} \ memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, \
} \ x)[i + 1], \
++i; \ (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
if ((xp = __KB_PTR(b, x)[i])->n == T - 1) { \ memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, \
if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n >= T) { \ x)[i + 2], \
memmove(&__KB_KEY(key_t, xp)[1], __KB_KEY(key_t, xp), (unsigned int)xp->n * sizeof(key_t)); \ (unsigned int)(x->n - i - 1) * sizeof(void *)); \
if (xp->is_internal) memmove(&__KB_PTR(b, xp)[1], __KB_PTR(b, xp), (unsigned int)(xp->n + 1) * sizeof(void*)); \ --x->n; \
__KB_KEY(key_t, xp)[0] = __KB_KEY(key_t, x)[i - 1]; \ XFREE_CLEAR(z); \
__KB_KEY(key_t, x)[i - 1] = __KB_KEY(key_t, y)[y->n - 1]; \ return __kb_delp_aux_##name(b, y, k, s); \
if (xp->is_internal) __KB_PTR(b, xp)[0] = __KB_PTR(b, y)[y->n]; \ } \
--y->n; ++xp->n; \ } \
} else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n >= T) { \ ++i; \
__KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \ if ((xp = __KB_PTR(b, x)[i])->n == T - 1) { \
__KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[0]; \ if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n >= T) { \
if (xp->is_internal) __KB_PTR(b, xp)[xp->n] = __KB_PTR(b, y)[0]; \ memmove(&__KB_KEY(key_t, xp)[1], __KB_KEY(key_t, xp), (unsigned int)xp->n * sizeof(key_t)); \
--y->n; \ if (xp->is_internal) memmove(&__KB_PTR(b, xp)[1], __KB_PTR(b, \
memmove(__KB_KEY(key_t, y), &__KB_KEY(key_t, y)[1], (unsigned int)y->n * sizeof(key_t)); \ xp), \
if (y->is_internal) memmove(__KB_PTR(b, y), &__KB_PTR(b, y)[1], (unsigned int)(y->n + 1) * sizeof(void*)); \ (unsigned int)(xp->n + 1) * sizeof(void *)); \
} else if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n == T - 1) { \ __KB_KEY(key_t, xp)[0] = __KB_KEY(key_t, x)[i - 1]; \
__KB_KEY(key_t, y)[y->n++] = __KB_KEY(key_t, x)[i - 1]; \ __KB_KEY(key_t, x)[i - 1] = __KB_KEY(key_t, y)[y->n - 1]; \
memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, xp), (unsigned int)xp->n * sizeof(key_t)); \ if (xp->is_internal) __KB_PTR(b, xp)[0] = __KB_PTR(b, y)[y->n]; \
if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, xp), (unsigned int)(xp->n + 1) * sizeof(void*)); \ --y->n; ++xp->n; \
y->n += xp->n; \ } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n >= T) { \
memmove(&__KB_KEY(key_t, x)[i - 1], &__KB_KEY(key_t, x)[i], (unsigned int)(x->n - i) * sizeof(key_t)); \ __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
memmove(&__KB_PTR(b, x)[i], &__KB_PTR(b, x)[i + 1], (unsigned int)(x->n - i) * sizeof(void*)); \ __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[0]; \
--x->n; \ if (xp->is_internal) __KB_PTR(b, xp)[xp->n] = __KB_PTR(b, y)[0]; \
XFREE_CLEAR(xp); \ --y->n; \
xp = y; \ memmove(__KB_KEY(key_t, y), &__KB_KEY(key_t, y)[1], (unsigned int)y->n * sizeof(key_t)); \
} else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n == T - 1) { \ if (y->is_internal) memmove(__KB_PTR(b, y), &__KB_PTR(b, \
__KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \ y)[1], \
memmove(&__KB_KEY(key_t, xp)[xp->n], __KB_KEY(key_t, y), (unsigned int)y->n * sizeof(key_t)); \ (unsigned int)(y->n + 1) * sizeof(void *)); \
if (xp->is_internal) memmove(&__KB_PTR(b, xp)[xp->n], __KB_PTR(b, y), (unsigned int)(y->n + 1) * sizeof(void*)); \ } else if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n == T - 1) { \
xp->n += y->n; \ __KB_KEY(key_t, y)[y->n++] = __KB_KEY(key_t, x)[i - 1]; \
memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \ memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, xp), \
memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, x)[i + 2], (unsigned int)(x->n - i - 1) * sizeof(void*)); \ (unsigned int)xp->n * sizeof(key_t)); \
--x->n; \ if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, \
XFREE_CLEAR(y); \ xp), \
} \ (unsigned int)(xp->n + 1) * sizeof(void *)); \
} \ y->n += xp->n; \
return __kb_delp_aux_##name(b, xp, k, s); \ memmove(&__KB_KEY(key_t, x)[i - 1], &__KB_KEY(key_t, \
} \ x)[i], \
static inline key_t kb_delp_##name(kbtree_##name##_t *b, key_t * __restrict k) \ (unsigned int)(x->n - i) * sizeof(key_t)); \
{ \ memmove(&__KB_PTR(b, x)[i], &__KB_PTR(b, \
kbnode_t *x; \ x)[i + 1], (unsigned int)(x->n - i) * sizeof(void *)); \
key_t ret; \ --x->n; \
ret = __kb_delp_aux_##name(b, b->root, k, 0); \ XFREE_CLEAR(xp); \
--b->n_keys; \ xp = y; \
if (b->root->n == 0 && b->root->is_internal) { \ } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n == T - 1) { \
--b->n_nodes; \ __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
x = b->root; \ memmove(&__KB_KEY(key_t, xp)[xp->n], __KB_KEY(key_t, y), \
b->root = __KB_PTR(b, x)[0]; \ (unsigned int)y->n * sizeof(key_t)); \
XFREE_CLEAR(x); \ if (xp->is_internal) memmove(&__KB_PTR(b, xp)[xp->n], __KB_PTR(b, y), \
} \ (unsigned int)(y->n + 1) * sizeof(void *)); \
return ret; \ xp->n += y->n; \
} \ memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, \
static inline key_t kb_del_##name(kbtree_##name##_t *b, key_t k) \ x)[i + 1], \
{ \ (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
return kb_delp_##name(b, &k); \ memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, \
} x)[i + 2], \
(unsigned int)(x->n - i - 1) * sizeof(void *)); \
--x->n; \
XFREE_CLEAR(y); \
} \
} \
return __kb_delp_aux_##name(b, xp, k, s); \
} \
static inline key_t kb_delp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
{ \
kbnode_t *x; \
key_t ret; \
ret = __kb_delp_aux_##name(b, b->root, k, 0); \
--b->n_keys; \
if (b->root->n == 0 && b->root->is_internal) { \
--b->n_nodes; \
x = b->root; \
b->root = __KB_PTR(b, x)[0]; \
XFREE_CLEAR(x); \
} \
return ret; \
} \
static inline key_t kb_del_##name(kbtree_##name##_t *b, key_t k) \
{ \
return kb_delp_##name(b, &k); \
}
#define __KB_ITR(name, key_t, kbnode_t) \ #define __KB_ITR(name, key_t, kbnode_t) \
static inline void kb_itr_first_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \ static inline void kb_itr_first_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
{ \ { \
itr->p = NULL; \ itr->p = NULL; \
if (b->n_keys == 0) return; \ if (b->n_keys == 0) return; \
itr->p = itr->stack; \ itr->p = itr->stack; \
itr->p->x = b->root; itr->p->i = 0; \ itr->p->x = b->root; itr->p->i = 0; \
while (itr->p->x->is_internal && __KB_PTR(b, itr->p->x)[0] != 0) { \ while (itr->p->x->is_internal && __KB_PTR(b, itr->p->x)[0] != 0) { \
kbnode_t *x = itr->p->x; \ kbnode_t *x = itr->p->x; \
++itr->p; \ ++itr->p; \
itr->p->x = __KB_PTR(b, x)[0]; itr->p->i = 0; \ itr->p->x = __KB_PTR(b, x)[0]; itr->p->i = 0; \
} \ } \
} \ } \
static inline int kb_itr_next_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \ static inline int kb_itr_next_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
{ \ { \
if (itr->p == NULL) return 0; \ if (itr->p == NULL) return 0; \
for (;;) { \ for (;;) { \
++itr->p->i; \ ++itr->p->i; \
assert(itr->p->i <= 21); \ assert(itr->p->i <= 21); \
while (itr->p->x && itr->p->i <= itr->p->x->n) { \ while (itr->p->x && itr->p->i <= itr->p->x->n) { \
itr->p[1].i = 0; \ itr->p[1].i = 0; \
itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \ itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
++itr->p; \ ++itr->p; \
} \ } \
if (itr->p == itr->stack) { \ if (itr->p == itr->stack) { \
itr->p = NULL; \ itr->p = NULL; \
return 0; \ return 0; \
} \ } \
--itr->p; \ --itr->p; \
if (itr->p->x && itr->p->i < itr->p->x->n) return 1; \ if (itr->p->x && itr->p->i < itr->p->x->n) return 1; \
} \ } \
} \ } \
static inline int kb_itr_prev_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \ static inline int kb_itr_prev_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
{ \ { \
if (itr->p == NULL) return 0; \ if (itr->p == NULL) return 0; \
for (;;) { \ for (;;) { \
while (itr->p->x && itr->p->i >= 0) { \ while (itr->p->x && itr->p->i >= 0) { \
itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \ itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
itr->p[1].i = itr->p[1].x ? itr->p[1].x->n : -1; \ itr->p[1].i = itr->p[1].x ? itr->p[1].x->n : -1; \
++itr->p; \ ++itr->p; \
} \ } \
if (itr->p == itr->stack) { \ if (itr->p == itr->stack) { \
itr->p = NULL; \
return 0; \
} \
--itr->p; \
--itr->p->i; \
if (itr->p->x && itr->p->i >= 0) return 1; \
} \
} \
static inline int kb_itr_getp_##name(kbtree_##name##_t *b, key_t * __restrict k, kbitr_##name##_t *itr) \
{ \
if (b->n_keys == 0) { \
itr->p = NULL; \ itr->p = NULL; \
return 0; \ return 0; \
} \ } \
int i, r = 0; \ --itr->p; \
itr->p = itr->stack; \ --itr->p->i; \
itr->p->x = b->root; \ if (itr->p->x && itr->p->i >= 0) return 1; \
while (itr->p->x) { \ } \
i = __kb_getp_aux_##name(itr->p->x, k, &r); \ } \
itr->p->i = i; \ static inline int kb_itr_getp_##name(kbtree_##name##_t *b, key_t * __restrict k, \
if (i >= 0 && r == 0) return 1; \ kbitr_##name##_t *itr) \
++itr->p->i; \ { \
assert(itr->p->i <= 21); \ if (b->n_keys == 0) { \
itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[i + 1] : 0; \ itr->p = NULL; \
++itr->p; \ return 0; \
} \ } \
itr->p->i = 0; \ int i, r = 0; \
return 0; \ itr->p = itr->stack; \
} \ itr->p->x = b->root; \
static inline int kb_itr_get_##name(kbtree_##name##_t *b, key_t k, kbitr_##name##_t *itr) \ while (itr->p->x) { \
{ \ i = __kb_getp_aux_##name(itr->p->x, k, &r); \
return kb_itr_getp_##name(b,&k,itr); \ itr->p->i = i; \
} \ if (i >= 0 && r == 0) return 1; \
static inline void kb_del_itr_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \ ++itr->p->i; \
{ \ assert(itr->p->i <= 21); \
key_t k = kb_itr_key(itr); \ itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[i + 1] : 0; \
kb_delp_##name(b, &k); \ ++itr->p; \
kb_itr_getp_##name(b, &k, itr); \ } \
} itr->p->i = 0; \
return 0; \
} \
static inline int kb_itr_get_##name(kbtree_##name##_t *b, key_t k, kbitr_##name##_t *itr) \
{ \
return kb_itr_getp_##name(b, &k, itr); \
} \
static inline void kb_del_itr_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
{ \
key_t k = kb_itr_key(itr); \
kb_delp_##name(b, &k); \
kb_itr_getp_##name(b, &k, itr); \
}
#define KBTREE_INIT(name, key_t, __cmp, T) \ #define KBTREE_INIT(name, key_t, __cmp, T) \
KBTREE_INIT_IMPL(name, key_t, kbnode_##name##_t, __cmp, T, (sizeof(kbnode_##name##_t)+(2*T)*sizeof(void *))) KBTREE_INIT_IMPL(name, key_t, kbnode_##name##_t, __cmp, T, \
(sizeof(kbnode_##name##_t)+(2*T)*sizeof(void *)))
#define KBTREE_INIT_IMPL(name, key_t, kbnode_t, __cmp, T, ILEN) \ #define KBTREE_INIT_IMPL(name, key_t, kbnode_t, __cmp, T, ILEN) \
__KB_TREE_T(name, key_t, T) \ __KB_TREE_T(name, key_t, T) \
__KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \ __KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \
__KB_GET(name, key_t, kbnode_t) \ __KB_GET(name, key_t, kbnode_t) \
__KB_INTERVAL(name, key_t, kbnode_t) \ __KB_INTERVAL(name, key_t, kbnode_t) \
__KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \ __KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \
__KB_DEL(name, key_t, kbnode_t, T) \ __KB_DEL(name, key_t, kbnode_t, T) \
__KB_ITR(name, key_t, kbnode_t) __KB_ITR(name, key_t, kbnode_t)
#define KB_DEFAULT_SIZE 512 #define KB_DEFAULT_SIZE 512

View File

@@ -1,5 +1,3 @@
// uncrustify:off
/* The MIT License /* The MIT License
Copyright (c) 2008, 2009, 2011 by Attractive Chaos <attractor@live.co.uk> Copyright (c) 2008, 2009, 2011 by Attractive Chaos <attractor@live.co.uk>
@@ -23,14 +21,14 @@
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
/* /*
Example: Example:
#include "nvim/khash.h" #include "nvim/khash.h"
KHASH_MAP_INIT_INT(32, char) KHASH_MAP_INIT_INT(32, char)
int main() { int main() {
int ret, is_missing; int ret, is_missing;
khiter_t k; khiter_t k;
khash_t(32) *h = kh_init(32); khash_t(32) *h = kh_init(32);
@@ -44,99 +42,98 @@ int main() {
if (kh_exist(h, k)) kh_value(h, k) = 1; if (kh_exist(h, k)) kh_value(h, k) = 1;
kh_destroy(32, h); kh_destroy(32, h);
return 0; return 0;
} }
*/ */
/* /*
2013-05-02 (0.2.8): 2013-05-02 (0.2.8):
* Use quadratic probing. When the capacity is power of 2, stepping function * Use quadratic probing. When the capacity is power of 2, stepping function
i*(i+1)/2 guarantees to traverse each bucket. It is better than double i*(i+1)/2 guarantees to traverse each bucket. It is better than double
hashing on cache performance and is more robust than linear probing. hashing on cache performance and is more robust than linear probing.
In theory, double hashing should be more robust than quadratic probing. In theory, double hashing should be more robust than quadratic probing.
However, my implementation is probably not for large hash tables, because However, my implementation is probably not for large hash tables, because
the second hash function is closely tied to the first hash function, the second hash function is closely tied to the first hash function,
which reduce the effectiveness of double hashing. which reduce the effectiveness of double hashing.
Reference: http://research.cs.vt.edu/AVresearch/hashing/quadratic.php Reference: http://research.cs.vt.edu/AVresearch/hashing/quadratic.php
2011-12-29 (0.2.7): 2011-12-29 (0.2.7):
* Minor code clean up; no actual effect. * Minor code clean up; no actual effect.
2011-09-16 (0.2.6): 2011-09-16 (0.2.6):
* The capacity is a power of 2. This seems to dramatically improve the * The capacity is a power of 2. This seems to dramatically improve the
speed for simple keys. Thank Zilong Tan for the suggestion. Reference: speed for simple keys. Thank Zilong Tan for the suggestion. Reference:
- http://code.google.com/p/ulib/ - http://code.google.com/p/ulib/
- http://nothings.org/computer/judy/ - http://nothings.org/computer/judy/
* Allow to optionally use linear probing which usually has better * Allow to optionally use linear probing which usually has better
performance for random input. Double hashing is still the default as it performance for random input. Double hashing is still the default as it
is more robust to certain non-random input. is more robust to certain non-random input.
* Added Wang's integer hash function (not used by default). This hash * Added Wang's integer hash function (not used by default). This hash
function is more robust to certain non-random input. function is more robust to certain non-random input.
2011-02-14 (0.2.5): 2011-02-14 (0.2.5):
* Allow to declare global functions. * Allow to declare global functions.
2009-09-26 (0.2.4): 2009-09-26 (0.2.4):
* Improve portability * Improve portability
2008-09-19 (0.2.3): 2008-09-19 (0.2.3):
* Corrected the example * Corrected the example
* Improved interfaces * Improved interfaces
2008-09-11 (0.2.2): 2008-09-11 (0.2.2):
* Improved speed a little in kh_put() * Improved speed a little in kh_put()
2008-09-10 (0.2.1): 2008-09-10 (0.2.1):
* Added kh_clear() * Added kh_clear()
* Fixed a compiling error * Fixed a compiling error
2008-09-02 (0.2.0): 2008-09-02 (0.2.0):
* Changed to token concatenation which increases flexibility. * Changed to token concatenation which increases flexibility.
2008-08-31 (0.1.2): 2008-08-31 (0.1.2):
* Fixed a bug in kh_get(), which has not been tested previously. * Fixed a bug in kh_get(), which has not been tested previously.
2008-08-31 (0.1.1): 2008-08-31 (0.1.1):
* Added destructor * Added destructor
*/ */
#ifndef NVIM_LIB_KHASH_H #ifndef NVIM_LIB_KHASH_H
#define NVIM_LIB_KHASH_H #define NVIM_LIB_KHASH_H
/*! /*!
@header @header
Generic hash table library. Generic hash table library.
*/ */
#define AC_VERSION_KHASH_H "0.2.8" #define AC_VERSION_KHASH_H "0.2.8"
#include <stdlib.h>
#include <string.h>
#include <limits.h> #include <limits.h>
#include <stdint.h> #include <stdint.h>
#include <stdlib.h>
#include "nvim/memory.h" #include <string.h>
#include "nvim/func_attr.h" #include "nvim/func_attr.h"
#include "nvim/memory.h"
/* compiler specific configuration */ // compiler specific configuration
#if UINT_MAX == 0xffffffffu #if UINT_MAX == 0xffffffffu
typedef unsigned int khint32_t; typedef unsigned int khint32_t;
@@ -151,9 +148,9 @@ typedef unsigned long long khint64_t;
#endif #endif
#ifdef _MSC_VER #ifdef _MSC_VER
#define kh_inline __inline # define kh_inline __inline
#else #else
#define kh_inline inline # define kh_inline inline
#endif #endif
typedef khint32_t khint_t; typedef khint32_t khint_t;
@@ -170,31 +167,32 @@ typedef khint_t khiter_t;
#define __ac_fsize(m) ((m) < 16? 1 : (m)>>4) #define __ac_fsize(m) ((m) < 16? 1 : (m)>>4)
#ifndef kroundup32 #ifndef kroundup32
#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x)) # define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, \
++(x))
#endif #endif
#ifndef kcalloc #ifndef kcalloc
#define kcalloc(N,Z) xcalloc(N,Z) # define kcalloc(N, Z) xcalloc(N, Z)
#endif #endif
#ifndef kmalloc #ifndef kmalloc
#define kmalloc(Z) xmalloc(Z) # define kmalloc(Z) xmalloc(Z)
#endif #endif
#ifndef krealloc #ifndef krealloc
#define krealloc(P,Z) xrealloc(P,Z) # define krealloc(P, Z) xrealloc(P, Z)
#endif #endif
#ifndef kfree #ifndef kfree
#define kfree(P) XFREE_CLEAR(P) # define kfree(P) XFREE_CLEAR(P)
#endif #endif
#define __ac_HASH_UPPER 0.77 #define __ac_HASH_UPPER 0.77
#define __KHASH_TYPE(name, khkey_t, khval_t) \ #define __KHASH_TYPE(name, khkey_t, khval_t) \
typedef struct { \ typedef struct { \
khint_t n_buckets, size, n_occupied, upper_bound; \ khint_t n_buckets, size, n_occupied, upper_bound; \
khint32_t *flags; \ khint32_t *flags; \
khkey_t *keys; \ khkey_t *keys; \
khval_t *vals; \ khval_t *vals; \
} kh_##name##_t; } kh_##name##_t;
#define __KHASH_PROTOTYPES(name, khkey_t, khval_t) \ #define __KHASH_PROTOTYPES(name, khkey_t, khval_t) \
extern kh_##name##_t *kh_init_##name(void); \ extern kh_##name##_t *kh_init_##name(void); \
@@ -209,12 +207,12 @@ typedef khint_t khiter_t;
#define __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, \ #define __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, \
__hash_equal) \ __hash_equal) \
SCOPE kh_##name##_t *kh_init_##name(void) \ SCOPE kh_##name##_t *kh_init_##name(void) \
REAL_FATTR_UNUSED; \ REAL_FATTR_UNUSED; \
SCOPE kh_##name##_t *kh_init_##name(void) { \ SCOPE kh_##name##_t *kh_init_##name(void) { \
return (kh_##name##_t*)kcalloc(1, sizeof(kh_##name##_t)); \ return (kh_##name##_t *)kcalloc(1, sizeof(kh_##name##_t)); \
} \ } \
SCOPE void kh_dealloc_##name(kh_##name##_t *h) \ SCOPE void kh_dealloc_##name(kh_##name##_t *h) \
REAL_FATTR_UNUSED; \ REAL_FATTR_UNUSED; \
SCOPE void kh_dealloc_##name(kh_##name##_t *h) \ SCOPE void kh_dealloc_##name(kh_##name##_t *h) \
{ \ { \
kfree(h->keys); \ kfree(h->keys); \
@@ -222,7 +220,7 @@ typedef khint_t khiter_t;
kfree(h->vals); \ kfree(h->vals); \
} \ } \
SCOPE void kh_destroy_##name(kh_##name##_t *h) \ SCOPE void kh_destroy_##name(kh_##name##_t *h) \
REAL_FATTR_UNUSED; \ REAL_FATTR_UNUSED; \
SCOPE void kh_destroy_##name(kh_##name##_t *h) \ SCOPE void kh_destroy_##name(kh_##name##_t *h) \
{ \ { \
if (h) { \ if (h) { \
@@ -231,7 +229,7 @@ typedef khint_t khiter_t;
} \ } \
} \ } \
SCOPE void kh_clear_##name(kh_##name##_t *h) \ SCOPE void kh_clear_##name(kh_##name##_t *h) \
REAL_FATTR_UNUSED; \ REAL_FATTR_UNUSED; \
SCOPE void kh_clear_##name(kh_##name##_t *h) \ SCOPE void kh_clear_##name(kh_##name##_t *h) \
{ \ { \
if (h && h->flags) { \ if (h && h->flags) { \
@@ -240,7 +238,7 @@ typedef khint_t khiter_t;
} \ } \
} \ } \
SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \ SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \
REAL_FATTR_UNUSED; \ REAL_FATTR_UNUSED; \
SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \ SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \
{ \ { \
if (h->n_buckets) { \ if (h->n_buckets) { \
@@ -261,10 +259,10 @@ typedef khint_t khiter_t;
} \ } \
} \ } \
SCOPE void kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \ SCOPE void kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \
REAL_FATTR_UNUSED; \ REAL_FATTR_UNUSED; \
SCOPE void kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \ SCOPE void kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \
{ /* This function uses 0.25*n_buckets bytes of working space instead of */ \ { /* This function uses 0.25*n_buckets bytes of working space instead of */ \
/* [sizeof(key_t+val_t)+.25]*n_buckets. */ \ /* [sizeof(key_t+val_t)+.25]*n_buckets. */ \
khint32_t *new_flags = 0; \ khint32_t *new_flags = 0; \
khint_t j = 1; \ khint_t j = 1; \
{ \ { \
@@ -275,24 +273,23 @@ typedef khint_t khiter_t;
/* requested size is too small */ \ /* requested size is too small */ \
if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) { \ if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) { \
j = 0; \ j = 0; \
} else { /* hash table size to be changed (shrink or expand); rehash */ \ } else { /* hash table size to be changed (shrink or expand); rehash */ \
new_flags = (khint32_t*)kmalloc(__ac_fsize(new_n_buckets) \ new_flags = (khint32_t *)kmalloc(__ac_fsize(new_n_buckets) \
* sizeof(khint32_t)); \ * sizeof(khint32_t)); \
memset(new_flags, 0xaa, \ memset(new_flags, 0xaa, \
__ac_fsize(new_n_buckets) * sizeof(khint32_t)); \ __ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
if (h->n_buckets < new_n_buckets) { /* expand */ \ if (h->n_buckets < new_n_buckets) { /* expand */ \
khkey_t *new_keys = (khkey_t*)krealloc( \ khkey_t *new_keys = (khkey_t *)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
(void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
h->keys = new_keys; \ h->keys = new_keys; \
if (kh_is_map) { \ if (kh_is_map) { \
khval_t *new_vals = (khval_t*)krealloc( \ khval_t *new_vals = \
(void *)h->vals, new_n_buckets * sizeof(khval_t)); \ (khval_t *)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \
h->vals = new_vals; \ h->vals = new_vals; \
} \ } \
} /* otherwise shrink */ \ } /* otherwise shrink */ \
} \ } \
} \ } \
if (j) { /* rehashing is needed */ \ if (j) { /* rehashing is needed */ \
for (j = 0; j != h->n_buckets; ++j) { \ for (j = 0; j != h->n_buckets; ++j) { \
if (__ac_iseither(h->flags, j) == 0) { \ if (__ac_iseither(h->flags, j) == 0) { \
khkey_t key = h->keys[j]; \ khkey_t key = h->keys[j]; \
@@ -326,7 +323,7 @@ typedef khint_t khiter_t;
} \ } \
/* mark it as deleted in the old hash table */ \ /* mark it as deleted in the old hash table */ \
__ac_set_isdel_true(h->flags, i); \ __ac_set_isdel_true(h->flags, i); \
} else { /* write the element and jump out of the loop */ \ } else { /* write the element and jump out of the loop */ \
h->keys[i] = key; \ h->keys[i] = key; \
if (kh_is_map) { \ if (kh_is_map) { \
h->vals[i] = val; \ h->vals[i] = val; \
@@ -336,15 +333,15 @@ typedef khint_t khiter_t;
} \ } \
} \ } \
} \ } \
if (h->n_buckets > new_n_buckets) { /* shrink the hash table */ \ if (h->n_buckets > new_n_buckets) { /* shrink the hash table */ \
h->keys = (khkey_t*)krealloc((void *)h->keys, \ h->keys = (khkey_t *)krealloc((void *)h->keys, \
new_n_buckets * sizeof(khkey_t)); \ new_n_buckets * sizeof(khkey_t)); \
if (kh_is_map) { \ if (kh_is_map) { \
h->vals = (khval_t*)krealloc((void *)h->vals, \ h->vals = (khval_t *)krealloc((void *)h->vals, \
new_n_buckets * sizeof(khval_t)); \ new_n_buckets * sizeof(khval_t)); \
} \ } \
} \ } \
kfree(h->flags); /* free the working space */ \ kfree(h->flags); /* free the working space */ \
h->flags = new_flags; \ h->flags = new_flags; \
h->n_buckets = new_n_buckets; \ h->n_buckets = new_n_buckets; \
h->n_occupied = h->size; \ h->n_occupied = h->size; \
@@ -352,25 +349,25 @@ typedef khint_t khiter_t;
} \ } \
} \ } \
SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \ SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \
REAL_FATTR_UNUSED; \ REAL_FATTR_UNUSED; \
SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \ SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \
{ \ { \
khint_t x; \ khint_t x; \
if (h->n_occupied >= h->upper_bound) { /* update the hash table */ \ if (h->n_occupied >= h->upper_bound) { /* update the hash table */ \
if (h->n_buckets > (h->size << 1)) { \ if (h->n_buckets > (h->size << 1)) { \
kh_resize_##name(h, h->n_buckets - 1); /* clear "deleted" elements */ \ kh_resize_##name(h, h->n_buckets - 1); /* clear "deleted" elements */ \
} else { \ } else { \
kh_resize_##name(h, h->n_buckets + 1); /* expand the hash table */ \ kh_resize_##name(h, h->n_buckets + 1); /* expand the hash table */ \
} \ } \
} /* TODO: implement automatically shrinking; */ \ } /* TODO: implement automatically shrinking; */ \
/* resize() already support shrinking */ \ /* resize() already support shrinking */ \
{ \ { \
khint_t k, i, site, last, mask = h->n_buckets - 1, step = 0; \ khint_t k, i, site, last, mask = h->n_buckets - 1, step = 0; \
x = site = h->n_buckets; \ x = site = h->n_buckets; \
k = __hash_func(key); \ k = __hash_func(key); \
i = k & mask; \ i = k & mask; \
if (__ac_isempty(h->flags, i)) { \ if (__ac_isempty(h->flags, i)) { \
x = i; /* for speed up */ \ x = i; /* for speed up */ \
} else { \ } else { \
last = i; \ last = i; \
while (!__ac_isempty(h->flags, i) \ while (!__ac_isempty(h->flags, i) \
@@ -394,24 +391,24 @@ typedef khint_t khiter_t;
} \ } \
} \ } \
} \ } \
if (__ac_isempty(h->flags, x)) { /* not present at all */ \ if (__ac_isempty(h->flags, x)) { /* not present at all */ \
h->keys[x] = key; \ h->keys[x] = key; \
__ac_set_isboth_false(h->flags, x); \ __ac_set_isboth_false(h->flags, x); \
h->size++; \ h->size++; \
h->n_occupied++; \ h->n_occupied++; \
*ret = 1; \ *ret = 1; \
} else if (__ac_isdel(h->flags, x)) { /* deleted */ \ } else if (__ac_isdel(h->flags, x)) { /* deleted */ \
h->keys[x] = key; \ h->keys[x] = key; \
__ac_set_isboth_false(h->flags, x); \ __ac_set_isboth_false(h->flags, x); \
h->size++; \ h->size++; \
*ret = 2; \ *ret = 2; \
} else { \ } else { \
*ret = 0; /* Don't touch h->keys[x] if present and not deleted */ \ *ret = 0; /* Don't touch h->keys[x] if present and not deleted */ \
} \ } \
return x; \ return x; \
} \ } \
SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x) \ SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x) \
REAL_FATTR_UNUSED; \ REAL_FATTR_UNUSED; \
SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x) \ SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x) \
{ \ { \
if (x != h->n_buckets && !__ac_iseither(h->flags, x)) { \ if (x != h->n_buckets && !__ac_iseither(h->flags, x)) { \
@@ -420,240 +417,242 @@ typedef khint_t khiter_t;
} \ } \
} }
#define KHASH_DECLARE(name, khkey_t, khval_t) \ #define KHASH_DECLARE(name, khkey_t, khval_t) \
__KHASH_TYPE(name, khkey_t, khval_t) \ __KHASH_TYPE(name, khkey_t, khval_t) \
__KHASH_PROTOTYPES(name, khkey_t, khval_t) __KHASH_PROTOTYPES(name, khkey_t, khval_t)
#define KHASH_INIT2(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \ #define KHASH_INIT2(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
__KHASH_TYPE(name, khkey_t, khval_t) \ __KHASH_TYPE(name, khkey_t, khval_t) \
__KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
#define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \ #define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
KHASH_INIT2(name, static kh_inline, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) KHASH_INIT2(name, static kh_inline, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
/* --- BEGIN OF HASH FUNCTIONS --- */ // --- BEGIN OF HASH FUNCTIONS ---
/*! @function /*! @function
@abstract Integer hash function @abstract Integer hash function
@param key The integer [khint32_t] @param key The integer [khint32_t]
@return The hash value [khint_t] @return The hash value [khint_t]
*/ */
#define kh_int_hash_func(key) (khint32_t)(key) #define kh_int_hash_func(key) (khint32_t)(key)
/*! @function /*! @function
@abstract Integer comparison function @abstract Integer comparison function
*/ */
#define kh_int_hash_equal(a, b) ((a) == (b)) #define kh_int_hash_equal(a, b) ((a) == (b))
/*! @function /*! @function
@abstract 64-bit integer hash function @abstract 64-bit integer hash function
@param key The integer [khint64_t] @param key The integer [khint64_t]
@return The hash value [khint_t] @return The hash value [khint_t]
*/ */
#define kh_int64_hash_func(key) (khint32_t)((key)>>33^(key)^(key)<<11) #define kh_int64_hash_func(key) (khint32_t)((key)>>33^(key)^(key)<<11)
/*! @function /*! @function
@abstract 64-bit integer comparison function @abstract 64-bit integer comparison function
*/ */
#define kh_int64_hash_equal(a, b) ((a) == (b)) #define kh_int64_hash_equal(a, b) ((a) == (b))
/*! @function /*! @function
@abstract const char* hash function @abstract const char* hash function
@param s Pointer to a null terminated string @param s Pointer to a null terminated string
@return The hash value @return The hash value
*/ */
static kh_inline khint_t __ac_X31_hash_string(const char *s) static kh_inline khint_t __ac_X31_hash_string(const char *s)
{ {
khint_t h = (khint_t)*s; khint_t h = (khint_t)*s;
if (h) for (++s ; *s; ++s) h = (h << 5) - h + (uint8_t)*s; if (h) {
return h; for (++s ; *s; ++s) { h = (h << 5) - h + (uint8_t)*s; }
}
return h;
} }
/*! @function /*! @function
@abstract Another interface to const char* hash function @abstract Another interface to const char* hash function
@param key Pointer to a null terminated string [const char*] @param key Pointer to a null terminated string [const char*]
@return The hash value [khint_t] @return The hash value [khint_t]
*/ */
#define kh_str_hash_func(key) __ac_X31_hash_string(key) #define kh_str_hash_func(key) __ac_X31_hash_string(key)
/*! @function /*! @function
@abstract Const char* comparison function @abstract Const char* comparison function
*/ */
#define kh_str_hash_equal(a, b) (strcmp(a, b) == 0) #define kh_str_hash_equal(a, b) (strcmp(a, b) == 0)
static kh_inline khint_t __ac_Wang_hash(khint_t key) static kh_inline khint_t __ac_Wang_hash(khint_t key)
{ {
key += ~(key << 15); key += ~(key << 15);
key ^= (key >> 10); key ^= (key >> 10);
key += (key << 3); key += (key << 3);
key ^= (key >> 6); key ^= (key >> 6);
key += ~(key << 11); key += ~(key << 11);
key ^= (key >> 16); key ^= (key >> 16);
return key; return key;
} }
#define kh_int_hash_func2(k) __ac_Wang_hash((khint_t)key) #define kh_int_hash_func2(k) __ac_Wang_hash((khint_t)key)
/* --- END OF HASH FUNCTIONS --- */ // --- END OF HASH FUNCTIONS ---
/* Other convenient macros... */ // Other convenient macros...
/*! /*!
@abstract Type of the hash table. @abstract Type of the hash table.
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
*/ */
#define khash_t(name) kh_##name##_t #define khash_t(name) kh_##name##_t
/*! @function /*! @function
@abstract Initiate a hash table. @abstract Initiate a hash table.
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
@return Pointer to the hash table [khash_t(name)*] @return Pointer to the hash table [khash_t(name)*]
*/ */
#define kh_init(name) kh_init_##name() #define kh_init(name) kh_init_##name()
/*! @function /*! @function
@abstract Destroy a hash table. @abstract Destroy a hash table.
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
*/ */
#define kh_destroy(name, h) kh_destroy_##name(h) #define kh_destroy(name, h) kh_destroy_##name(h)
/*! @function /*! @function
@abstract Free memory referenced directly inside a hash table. @abstract Free memory referenced directly inside a hash table.
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
*/ */
#define kh_dealloc(name, h) kh_dealloc_##name(h) #define kh_dealloc(name, h) kh_dealloc_##name(h)
/*! @function /*! @function
@abstract Reset a hash table without deallocating memory. @abstract Reset a hash table without deallocating memory.
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
*/ */
#define kh_clear(name, h) kh_clear_##name(h) #define kh_clear(name, h) kh_clear_##name(h)
/*! @function /*! @function
@abstract Resize a hash table. @abstract Resize a hash table.
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
@param s New size [khint_t] @param s New size [khint_t]
*/ */
#define kh_resize(name, h, s) kh_resize_##name(h, s) #define kh_resize(name, h, s) kh_resize_##name(h, s)
/*! @function /*! @function
@abstract Insert a key to the hash table. @abstract Insert a key to the hash table.
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
@param k Key [type of keys] @param k Key [type of keys]
@param r Extra return code: -1 if the operation failed; @param r Extra return code: -1 if the operation failed;
0 if the key is present in the hash table; 0 if the key is present in the hash table;
1 if the bucket is empty (never used); 2 if the element in 1 if the bucket is empty (never used); 2 if the element in
the bucket has been deleted [int*] the bucket has been deleted [int*]
@return Iterator to the inserted element [khint_t] @return Iterator to the inserted element [khint_t]
*/ */
#define kh_put(name, h, k, r) kh_put_##name(h, k, r) #define kh_put(name, h, k, r) kh_put_##name(h, k, r)
/*! @function /*! @function
@abstract Retrieve a key from the hash table. @abstract Retrieve a key from the hash table.
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
@param k Key [type of keys] @param k Key [type of keys]
@return Iterator to the found element, or kh_end(h) if the element is absent [khint_t] @return Iterator to the found element, or kh_end(h) if the element is absent [khint_t]
*/ */
#define kh_get(name, h, k) kh_get_##name(h, k) #define kh_get(name, h, k) kh_get_##name(h, k)
/*! @function /*! @function
@abstract Remove a key from the hash table. @abstract Remove a key from the hash table.
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
@param k Iterator to the element to be deleted [khint_t] @param k Iterator to the element to be deleted [khint_t]
*/ */
#define kh_del(name, h, k) kh_del_##name(h, k) #define kh_del(name, h, k) kh_del_##name(h, k)
/*! @function /*! @function
@abstract Test whether a bucket contains data. @abstract Test whether a bucket contains data.
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
@param x Iterator to the bucket [khint_t] @param x Iterator to the bucket [khint_t]
@return 1 if containing data; 0 otherwise [int] @return 1 if containing data; 0 otherwise [int]
*/ */
#define kh_exist(h, x) (!__ac_iseither((h)->flags, (x))) #define kh_exist(h, x) (!__ac_iseither((h)->flags, (x)))
/*! @function /*! @function
@abstract Get key given an iterator @abstract Get key given an iterator
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
@param x Iterator to the bucket [khint_t] @param x Iterator to the bucket [khint_t]
@return Key [type of keys] @return Key [type of keys]
*/ */
#define kh_key(h, x) ((h)->keys[x]) #define kh_key(h, x) ((h)->keys[x])
/*! @function /*! @function
@abstract Get value given an iterator @abstract Get value given an iterator
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
@param x Iterator to the bucket [khint_t] @param x Iterator to the bucket [khint_t]
@return Value [type of values] @return Value [type of values]
@discussion For hash sets, calling this results in segfault. @discussion For hash sets, calling this results in segfault.
*/ */
#define kh_val(h, x) ((h)->vals[x]) #define kh_val(h, x) ((h)->vals[x])
/*! @function /*! @function
@abstract Alias of kh_val() @abstract Alias of kh_val()
*/ */
#define kh_value(h, x) ((h)->vals[x]) #define kh_value(h, x) ((h)->vals[x])
/*! @function /*! @function
@abstract Get the start iterator @abstract Get the start iterator
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
@return The start iterator [khint_t] @return The start iterator [khint_t]
*/ */
#define kh_begin(h) (khint_t)(0) #define kh_begin(h) (khint_t)(0)
/*! @function /*! @function
@abstract Get the end iterator @abstract Get the end iterator
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
@return The end iterator [khint_t] @return The end iterator [khint_t]
*/ */
#define kh_end(h) ((h)->n_buckets) #define kh_end(h) ((h)->n_buckets)
/*! @function /*! @function
@abstract Get the number of elements in the hash table @abstract Get the number of elements in the hash table
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
@return Number of elements in the hash table [khint_t] @return Number of elements in the hash table [khint_t]
*/ */
#define kh_size(h) ((h)->size) #define kh_size(h) ((h)->size)
/*! @function /*! @function
@abstract Get the number of buckets in the hash table @abstract Get the number of buckets in the hash table
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
@return Number of buckets in the hash table [khint_t] @return Number of buckets in the hash table [khint_t]
*/ */
#define kh_n_buckets(h) ((h)->n_buckets) #define kh_n_buckets(h) ((h)->n_buckets)
/*! @function /*! @function
@abstract Iterate over the entries in the hash table @abstract Iterate over the entries in the hash table
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
@param kvar Variable to which key will be assigned @param kvar Variable to which key will be assigned
@param vvar Variable to which value will be assigned @param vvar Variable to which value will be assigned
@param code Block of code to execute @param code Block of code to execute
*/ */
#define kh_foreach(h, kvar, vvar, code) { khint_t __i; \ #define kh_foreach(h, kvar, vvar, code) { khint_t __i; \
for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \ for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
if (!kh_exist(h,__i)) continue; \ if (!kh_exist(h, __i)) continue; \
(kvar) = kh_key(h,__i); \ (kvar) = kh_key(h, __i); \
(vvar) = kh_val(h,__i); \ (vvar) = kh_val(h, __i); \
code; \ code; \
} } } }
/*! @function /*! @function
@abstract Iterate over the values in the hash table @abstract Iterate over the values in the hash table
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
@param vvar Variable to which value will be assigned @param vvar Variable to which value will be assigned
@param code Block of code to execute @param code Block of code to execute
*/ */
#define kh_foreach_value(h, vvar, code) { khint_t __i; \ #define kh_foreach_value(h, vvar, code) { khint_t __i; \
for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \ for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
if (!kh_exist(h,__i)) continue; \ if (!kh_exist(h, __i)) continue; \
(vvar) = kh_val(h,__i); \ (vvar) = kh_val(h, __i); \
code; \ code; \
} } } }
/*! @function /*! @function
@abstract Iterate over the keys in the hash table @abstract Iterate over the keys in the hash table
@param h Pointer to the hash table [khash_t(name)*] @param h Pointer to the hash table [khash_t(name)*]
@param kvar Variable to which value will be assigned @param kvar Variable to which value will be assigned
@param code Block of code to execute @param code Block of code to execute
*/ */
#define kh_foreach_key(h, kvar, code) \ #define kh_foreach_key(h, kvar, code) \
{ \ { \
@@ -667,57 +666,57 @@ static kh_inline khint_t __ac_Wang_hash(khint_t key)
} \ } \
} }
/* More conenient interfaces */ // More conenient interfaces
/*! @function /*! @function
@abstract Instantiate a hash set containing integer keys @abstract Instantiate a hash set containing integer keys
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
*/ */
#define KHASH_SET_INIT_INT(name) \ #define KHASH_SET_INIT_INT(name) \
KHASH_INIT(name, khint32_t, char, 0, kh_int_hash_func, kh_int_hash_equal) KHASH_INIT(name, khint32_t, char, 0, kh_int_hash_func, kh_int_hash_equal)
/*! @function /*! @function
@abstract Instantiate a hash map containing integer keys @abstract Instantiate a hash map containing integer keys
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
@param khval_t Type of values [type] @param khval_t Type of values [type]
*/ */
#define KHASH_MAP_INIT_INT(name, khval_t) \ #define KHASH_MAP_INIT_INT(name, khval_t) \
KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal) KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
/*! @function /*! @function
@abstract Instantiate a hash map containing 64-bit integer keys @abstract Instantiate a hash map containing 64-bit integer keys
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
*/ */
#define KHASH_SET_INIT_INT64(name) \ #define KHASH_SET_INIT_INT64(name) \
KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal) KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal)
/*! @function /*! @function
@abstract Instantiate a hash map containing 64-bit integer keys @abstract Instantiate a hash map containing 64-bit integer keys
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
@param khval_t Type of values [type] @param khval_t Type of values [type]
*/ */
#define KHASH_MAP_INIT_INT64(name, khval_t) \ #define KHASH_MAP_INIT_INT64(name, khval_t) \
KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal) KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal)
typedef const char *kh_cstr_t; typedef const char *kh_cstr_t;
/*! @function /*! @function
@abstract Instantiate a hash map containing const char* keys @abstract Instantiate a hash map containing const char* keys
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
*/ */
#define KHASH_SET_INIT_STR(name) \ #define KHASH_SET_INIT_STR(name) \
KHASH_INIT(name, kh_cstr_t, char, 0, kh_str_hash_func, kh_str_hash_equal) KHASH_INIT(name, kh_cstr_t, char, 0, kh_str_hash_func, kh_str_hash_equal)
/*! @function /*! @function
@abstract Instantiate a hash map containing const char* keys @abstract Instantiate a hash map containing const char* keys
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
@param khval_t Type of values [type] @param khval_t Type of values [type]
*/ */
#define KHASH_MAP_INIT_STR(name, khval_t) \ #define KHASH_MAP_INIT_STR(name, khval_t) \
KHASH_INIT(name, kh_cstr_t, khval_t, 1, kh_str_hash_func, kh_str_hash_equal) KHASH_INIT(name, kh_cstr_t, khval_t, 1, kh_str_hash_func, kh_str_hash_equal)
/*! @function /*! @function
@abstract Return a literal for an empty hash table. @abstract Return a literal for an empty hash table.
@param name Name of the hash table [symbol] @param name Name of the hash table [symbol]
*/ */
#define KHASH_EMPTY_TABLE(name) \ #define KHASH_EMPTY_TABLE(name) \
((kh_##name##_t) { \ ((kh_##name##_t) { \

View File

@@ -1,5 +1,3 @@
// uncrustify:off
/* The MIT License /* The MIT License
Copyright (c) 2008-2009, by Attractive Chaos <attractor@live.co.uk> Copyright (c) 2008-2009, by Attractive Chaos <attractor@live.co.uk>
@@ -23,50 +21,50 @@
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
#ifndef _AC_KLIST_H #ifndef _AC_KLIST_H
#define _AC_KLIST_H #define _AC_KLIST_H
#include <stdlib.h>
#include <assert.h> #include <assert.h>
#include <stdlib.h>
#include "nvim/memory.h"
#include "nvim/func_attr.h" #include "nvim/func_attr.h"
#include "nvim/memory.h"
#define KMEMPOOL_INIT(name, kmptype_t, kmpfree_f) \ #define KMEMPOOL_INIT(name, kmptype_t, kmpfree_f) \
typedef struct { \ typedef struct { \
size_t cnt, n, max; \ size_t cnt, n, max; \
kmptype_t **buf; \ kmptype_t **buf; \
} kmp_##name##_t; \ } kmp_##name##_t; \
static inline kmp_##name##_t *kmp_init_##name(void) { \ static inline kmp_##name##_t *kmp_init_##name(void) { \
return xcalloc(1, sizeof(kmp_##name##_t)); \ return xcalloc(1, sizeof(kmp_##name##_t)); \
} \
static inline void kmp_destroy_##name(kmp_##name##_t *mp) \
REAL_FATTR_UNUSED; \
static inline void kmp_destroy_##name(kmp_##name##_t *mp) { \
size_t k; \
for (k = 0; k < mp->n; k++) { \
kmpfree_f(mp->buf[k]); XFREE_CLEAR(mp->buf[k]); \
} \ } \
static inline void kmp_destroy_##name(kmp_##name##_t *mp) \ XFREE_CLEAR(mp->buf); XFREE_CLEAR(mp); \
REAL_FATTR_UNUSED; \ } \
static inline void kmp_destroy_##name(kmp_##name##_t *mp) { \ static inline kmptype_t *kmp_alloc_##name(kmp_##name##_t *mp) { \
size_t k; \ mp->cnt++; \
for (k = 0; k < mp->n; k++) { \ if (mp->n == 0) { \
kmpfree_f(mp->buf[k]); XFREE_CLEAR(mp->buf[k]); \ return xcalloc(1, sizeof(kmptype_t)); \
} \
XFREE_CLEAR(mp->buf); XFREE_CLEAR(mp); \
} \ } \
static inline kmptype_t *kmp_alloc_##name(kmp_##name##_t *mp) { \ return mp->buf[--mp->n]; \
mp->cnt++; \ } \
if (mp->n == 0) { \ static inline void kmp_free_##name(kmp_##name##_t *mp, kmptype_t *p) { \
return xcalloc(1, sizeof(kmptype_t)); \ mp->cnt--; \
} \ if (mp->n == mp->max) { \
return mp->buf[--mp->n]; \ mp->max = mp->max ? (mp->max << 1) : 16; \
mp->buf = xrealloc(mp->buf, sizeof(kmptype_t *) * mp->max); \
} \ } \
static inline void kmp_free_##name(kmp_##name##_t *mp, kmptype_t *p) { \ mp->buf[mp->n++] = p; \
mp->cnt--; \ }
if (mp->n == mp->max) { \
mp->max = mp->max ? (mp->max << 1) : 16; \
mp->buf = xrealloc(mp->buf, sizeof(kmptype_t *) * mp->max); \
} \
mp->buf[mp->n++] = p; \
}
#define kmempool_t(name) kmp_##name##_t #define kmempool_t(name) kmp_##name##_t
#define kmp_init(name) kmp_init_##name() #define kmp_init(name) kmp_init_##name()
@@ -75,55 +73,55 @@
#define kmp_free(name, mp, p) kmp_free_##name(mp, p) #define kmp_free(name, mp, p) kmp_free_##name(mp, p)
#define KLIST_INIT(name, kltype_t, kmpfree_t) \ #define KLIST_INIT(name, kltype_t, kmpfree_t) \
struct __kl1_##name { \ struct __kl1_##name { \
kltype_t data; \ kltype_t data; \
struct __kl1_##name *next; \ struct __kl1_##name *next; \
}; \ }; \
typedef struct __kl1_##name kl1_##name; \ typedef struct __kl1_##name kl1_##name; \
KMEMPOOL_INIT(name, kl1_##name, kmpfree_t) \ KMEMPOOL_INIT(name, kl1_##name, kmpfree_t) \
typedef struct { \ typedef struct { \
kl1_##name *head, *tail; \ kl1_##name *head, *tail; \
kmp_##name##_t *mp; \ kmp_##name##_t *mp; \
size_t size; \ size_t size; \
} kl_##name##_t; \ } kl_##name##_t; \
static inline kl_##name##_t *kl_init_##name(void) { \ static inline kl_##name##_t *kl_init_##name(void) { \
kl_##name##_t *kl = xcalloc(1, sizeof(kl_##name##_t)); \ kl_##name##_t *kl = xcalloc(1, sizeof(kl_##name##_t)); \
kl->mp = kmp_init(name); \ kl->mp = kmp_init(name); \
kl->head = kl->tail = kmp_alloc(name, kl->mp); \ kl->head = kl->tail = kmp_alloc(name, kl->mp); \
kl->head->next = 0; \ kl->head->next = 0; \
return kl; \ return kl; \
} \
static inline void kl_destroy_##name(kl_##name##_t *kl) \
REAL_FATTR_UNUSED; \
static inline void kl_destroy_##name(kl_##name##_t *kl) { \
kl1_##name *p; \
for (p = kl->head; p != kl->tail; p = p->next) { \
kmp_free(name, kl->mp, p); \
} \ } \
static inline void kl_destroy_##name(kl_##name##_t *kl) \ kmp_free(name, kl->mp, p); \
REAL_FATTR_UNUSED; \ kmp_destroy(name, kl->mp); \
static inline void kl_destroy_##name(kl_##name##_t *kl) { \ XFREE_CLEAR(kl); \
kl1_##name *p; \ } \
for (p = kl->head; p != kl->tail; p = p->next) { \ static inline void kl_push_##name(kl_##name##_t *kl, kltype_t d) { \
kmp_free(name, kl->mp, p); \ kl1_##name *q, *p = kmp_alloc(name, kl->mp); \
} \ q = kl->tail; p->next = 0; kl->tail->next = p; kl->tail = p; \
kmp_free(name, kl->mp, p); \ kl->size++; \
kmp_destroy(name, kl->mp); \ q->data = d; \
XFREE_CLEAR(kl); \ } \
static inline kltype_t kl_shift_at_##name(kl_##name##_t *kl, \
kl1_##name **n) { \
assert((*n)->next); \
kl1_##name *p; \
kl->size--; \
p = *n; \
*n = (*n)->next; \
if (p == kl->head) { \
kl->head = *n; \
} \ } \
static inline void kl_push_##name(kl_##name##_t *kl, kltype_t d) { \ kltype_t d = p->data; \
kl1_##name *q, *p = kmp_alloc(name, kl->mp); \ kmp_free(name, kl->mp, p); \
q = kl->tail; p->next = 0; kl->tail->next = p; kl->tail = p; \ return d; \
kl->size++; \ }
q->data = d; \
} \
static inline kltype_t kl_shift_at_##name(kl_##name##_t *kl, \
kl1_##name **n) { \
assert((*n)->next); \
kl1_##name *p; \
kl->size--; \
p = *n; \
*n = (*n)->next; \
if (p == kl->head) { \
kl->head = *n; \
} \
kltype_t d = p->data; \
kmp_free(name, kl->mp, p); \
return d; \
}
#define kliter_t(name) kl1_##name #define kliter_t(name) kl1_##name
#define klist_t(name) kl_##name##_t #define klist_t(name) kl_##name##_t

View File

@@ -1,5 +1,3 @@
// uncrustify:off
// The MIT License // The MIT License
// //
// Copyright (c) 2008, by Attractive Chaos <attractor@live.co.uk> // Copyright (c) 2008, by Attractive Chaos <attractor@live.co.uk>
@@ -46,25 +44,25 @@
#include "nvim/os/os_defs.h" #include "nvim/os/os_defs.h"
#define kv_roundup32(x) \ #define kv_roundup32(x) \
((--(x)), \ ((--(x)), \
((x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16), \ ((x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16), \
(++(x))) (++(x)))
#define KV_INITIAL_VALUE { .size = 0, .capacity = 0, .items = NULL } #define KV_INITIAL_VALUE { .size = 0, .capacity = 0, .items = NULL }
#define kvec_t(type) \ #define kvec_t(type) \
struct { \ struct { \
size_t size; \ size_t size; \
size_t capacity; \ size_t capacity; \
type *items; \ type *items; \
} }
#define kv_init(v) ((v).size = (v).capacity = 0, (v).items = 0) #define kv_init(v) ((v).size = (v).capacity = 0, (v).items = 0)
#define kv_destroy(v) \ #define kv_destroy(v) \
do { \ do { \
xfree((v).items); \ xfree((v).items); \
kv_init(v); \ kv_init(v); \
} while (0) } while (0)
#define kv_A(v, i) ((v).items[(i)]) #define kv_A(v, i) ((v).items[(i)])
#define kv_pop(v) ((v).items[--(v).size]) #define kv_pop(v) ((v).items[--(v).size])
#define kv_size(v) ((v).size) #define kv_size(v) ((v).size)
@@ -81,34 +79,34 @@
#define kv_drop(v, n) ((v).size -= (n)) #define kv_drop(v, n) ((v).size -= (n))
#define kv_resize(v, s) \ #define kv_resize(v, s) \
((v).capacity = (s), \ ((v).capacity = (s), \
(v).items = xrealloc((v).items, sizeof((v).items[0]) * (v).capacity)) (v).items = xrealloc((v).items, sizeof((v).items[0]) * (v).capacity))
#define kv_resize_full(v) \ #define kv_resize_full(v) \
kv_resize(v, (v).capacity ? (v).capacity << 1 : 8) kv_resize(v, (v).capacity ? (v).capacity << 1 : 8)
#define kv_copy(v1, v0) \ #define kv_copy(v1, v0) \
do { \ do { \
if ((v1).capacity < (v0).size) { \ if ((v1).capacity < (v0).size) { \
kv_resize(v1, (v0).size); \ kv_resize(v1, (v0).size); \
} \ } \
(v1).size = (v0).size; \ (v1).size = (v0).size; \
memcpy((v1).items, (v0).items, sizeof((v1).items[0]) * (v0).size); \ memcpy((v1).items, (v0).items, sizeof((v1).items[0]) * (v0).size); \
} while (0) } while (0)
#define kv_pushp(v) \ #define kv_pushp(v) \
((((v).size == (v).capacity) ? (kv_resize_full(v), 0) : 0), \ ((((v).size == (v).capacity) ? (kv_resize_full(v), 0) : 0), \
((v).items + ((v).size++))) ((v).items + ((v).size++)))
#define kv_push(v, x) \ #define kv_push(v, x) \
(*kv_pushp(v) = (x)) (*kv_pushp(v) = (x))
#define kv_a(v, i) \ #define kv_a(v, i) \
(*(((v).capacity <= (size_t) (i) \ (*(((v).capacity <= (size_t)(i) \
? ((v).capacity = (v).size = (i) + 1, \ ? ((v).capacity = (v).size = (i) + 1, \
kv_roundup32((v).capacity), \ kv_roundup32((v).capacity), \
kv_resize((v), (v).capacity), 0UL) \ kv_resize((v), (v).capacity), 0UL) \
: ((v).size <= (size_t) (i) \ : ((v).size <= (size_t)(i) \
? (v).size = (i) + 1 \ ? (v).size = (i) + 1 \
: 0UL)), \ : 0UL)), \
&(v).items[(i)])) &(v).items[(i)]))
@@ -122,24 +120,23 @@
/// @param[in] type Type of vector elements. /// @param[in] type Type of vector elements.
/// @param[in] init_size Number of the elements in the initial array. /// @param[in] init_size Number of the elements in the initial array.
#define kvec_withinit_t(type, INIT_SIZE) \ #define kvec_withinit_t(type, INIT_SIZE) \
struct { \ struct { \
size_t size; \ size_t size; \
size_t capacity; \ size_t capacity; \
type *items; \ type *items; \
type init_array[INIT_SIZE]; \ type init_array[INIT_SIZE]; \
} }
/// Initialize vector with preallocated array /// Initialize vector with preallocated array
/// ///
/// @param[out] v Vector to initialize. /// @param[out] v Vector to initialize.
#define kvi_init(v) \ #define kvi_init(v) \
((v).capacity = ARRAY_SIZE((v).init_array), \ ((v).capacity = ARRAY_SIZE((v).init_array), \
(v).size = 0, \ (v).size = 0, \
(v).items = (v).init_array) (v).items = (v).init_array)
/// Move data to a new destination and free source /// Move data to a new destination and free source
static inline void *_memcpy_free(void *const restrict dest, static inline void *_memcpy_free(void *const restrict dest, void *const restrict src,
void *const restrict src,
const size_t size) const size_t size)
FUNC_ATTR_NONNULL_ALL FUNC_ATTR_NONNULL_RET FUNC_ATTR_ALWAYS_INLINE FUNC_ATTR_NONNULL_ALL FUNC_ATTR_NONNULL_RET FUNC_ATTR_ALWAYS_INLINE
{ {
@@ -158,15 +155,15 @@ static inline void *_memcpy_free(void *const restrict dest,
/// @param[out] v Vector to resize. /// @param[out] v Vector to resize.
/// @param[in] s New size. /// @param[in] s New size.
#define kvi_resize(v, s) \ #define kvi_resize(v, s) \
((v).capacity = ((s) > ARRAY_SIZE((v).init_array) \ ((v).capacity = ((s) > ARRAY_SIZE((v).init_array) \
? (s) \ ? (s) \
: ARRAY_SIZE((v).init_array)), \ : ARRAY_SIZE((v).init_array)), \
(v).items = ((v).capacity == ARRAY_SIZE((v).init_array) \ (v).items = ((v).capacity == ARRAY_SIZE((v).init_array) \
? ((v).items == (v).init_array \ ? ((v).items == (v).init_array \
? (v).items \ ? (v).items \
: _memcpy_free((v).init_array, (v).items, \ : _memcpy_free((v).init_array, (v).items, \
(v).size * sizeof((v).items[0]))) \ (v).size * sizeof((v).items[0]))) \
: ((v).items == (v).init_array \ : ((v).items == (v).init_array \
? memcpy(xmalloc((v).capacity * sizeof((v).items[0])), \ ? memcpy(xmalloc((v).capacity * sizeof((v).items[0])), \
(v).items, \ (v).items, \
(v).size * sizeof((v).items[0])) \ (v).size * sizeof((v).items[0])) \
@@ -177,13 +174,13 @@ static inline void *_memcpy_free(void *const restrict dest,
/// ///
/// @param[out] v Vector to resize. /// @param[out] v Vector to resize.
#define kvi_resize_full(v) \ #define kvi_resize_full(v) \
/* ARRAY_SIZE((v).init_array) is the minimal capacity of this vector. */ \ /* ARRAY_SIZE((v).init_array) is the minimal capacity of this vector. */ \
/* Thus when vector is full capacity may not be zero and it is safe */ \ /* Thus when vector is full capacity may not be zero and it is safe */ \
/* not to bother with checking whether (v).capacity is 0. But now */ \ /* not to bother with checking whether (v).capacity is 0. But now */ \
/* capacity is not guaranteed to have size that is a power of 2, it is */ \ /* capacity is not guaranteed to have size that is a power of 2, it is */ \
/* hard to fix this here and is not very necessary if users will use */ \ /* hard to fix this here and is not very necessary if users will use */ \
/* 2^x initial array size. */ \ /* 2^x initial array size. */ \
kvi_resize(v, (v).capacity << 1) kvi_resize(v, (v).capacity << 1)
/// Get location where to store new element to a vector with preallocated array /// Get location where to store new element to a vector with preallocated array
/// ///
@@ -191,24 +188,24 @@ static inline void *_memcpy_free(void *const restrict dest,
/// ///
/// @return Pointer to the place where new value should be stored. /// @return Pointer to the place where new value should be stored.
#define kvi_pushp(v) \ #define kvi_pushp(v) \
((((v).size == (v).capacity) ? (kvi_resize_full(v), 0) : 0), \ ((((v).size == (v).capacity) ? (kvi_resize_full(v), 0) : 0), \
((v).items + ((v).size++))) ((v).items + ((v).size++)))
/// Push value to a vector with preallocated array /// Push value to a vector with preallocated array
/// ///
/// @param[out] v Vector to push to. /// @param[out] v Vector to push to.
/// @param[in] x Value to push. /// @param[in] x Value to push.
#define kvi_push(v, x) \ #define kvi_push(v, x) \
(*kvi_pushp(v) = (x)) (*kvi_pushp(v) = (x))
/// Free array of elements of a vector with preallocated array if needed /// Free array of elements of a vector with preallocated array if needed
/// ///
/// @param[out] v Vector to free. /// @param[out] v Vector to free.
#define kvi_destroy(v) \ #define kvi_destroy(v) \
do { \ do { \
if ((v).items != (v).init_array) { \ if ((v).items != (v).init_array) { \
XFREE_CLEAR((v).items); \ XFREE_CLEAR((v).items); \
} \ } \
} while (0) } while (0)
#endif // NVIM_LIB_KVEC_H #endif // NVIM_LIB_KVEC_H

View File

@@ -1,5 +1,3 @@
// uncrustify:off
// Queue implemented by circularly-linked list. // Queue implemented by circularly-linked list.
// //
// Adapted from libuv. Simpler and more efficient than klist.h for implementing // Adapted from libuv. Simpler and more efficient than klist.h for implementing
@@ -43,7 +41,7 @@ typedef struct _queue {
while((q) != (h)) { \ while((q) != (h)) { \
QUEUE *next = q->next; \ QUEUE *next = q->next; \
code \ code \
(q) = next; \ (q) = next; \
} }

View File

@@ -1,5 +1,3 @@
// uncrustify:off
/// Macros-based ring buffer implementation. /// Macros-based ring buffer implementation.
/// ///
/// Supported functions: /// Supported functions:
@@ -17,24 +15,29 @@
#ifndef NVIM_LIB_RINGBUF_H #ifndef NVIM_LIB_RINGBUF_H
#define NVIM_LIB_RINGBUF_H #define NVIM_LIB_RINGBUF_H
#include <stddef.h>
#include <string.h>
#include <assert.h> #include <assert.h>
#include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include <string.h>
#include "nvim/memory.h"
#include "nvim/func_attr.h" #include "nvim/func_attr.h"
#include "nvim/memory.h"
#define _RINGBUF_LENGTH(rb) \ #define _RINGBUF_LENGTH(rb) \
((rb)->first == NULL ? 0 \ ((rb)->first == NULL ? 0 \
: ((rb)->next == (rb)->first) ? (size_t) ((rb)->buf_end - (rb)->buf) + 1 \ : ((rb)->next == (rb)->first) ? (size_t)((rb)->buf_end - (rb)->buf) + 1 \
: ((rb)->next > (rb)->first) ? (size_t) ((rb)->next - (rb)->first) \ : ((rb)->next > \
: (size_t) ((rb)->next - (rb)->buf + (rb)->buf_end - (rb)->first + 1)) (rb)->first) ? (size_t)((rb)->next - \
(rb)->first) \
: (size_t)((rb)->\
next - (rb)->buf + \
(rb)->buf_end - \
(rb)->first + 1))
#define _RINGBUF_NEXT(rb, var) \ #define _RINGBUF_NEXT(rb, var) \
((var) == (rb)->buf_end ? (rb)->buf : (var) + 1) ((var) == (rb)->buf_end ? (rb)->buf : (var) + 1)
#define _RINGBUF_PREV(rb, var) \ #define _RINGBUF_PREV(rb, var) \
((var) == (rb)->buf ? (rb)->buf_end : (var) - 1) ((var) == (rb)->buf ? (rb)->buf_end : (var) - 1)
/// Iterate over all ringbuf values /// Iterate over all ringbuf values
/// ///
@@ -42,11 +45,11 @@
/// @param RBType Type of the ring buffer element. /// @param RBType Type of the ring buffer element.
/// @param varname Variable name. /// @param varname Variable name.
#define RINGBUF_FORALL(rb, RBType, varname) \ #define RINGBUF_FORALL(rb, RBType, varname) \
size_t varname##_length_fa_ = _RINGBUF_LENGTH(rb); \ size_t varname##_length_fa_ = _RINGBUF_LENGTH(rb); \
for (RBType *varname = ((rb)->first == NULL ? (rb)->next : (rb)->first); \ for (RBType *varname = ((rb)->first == NULL ? (rb)->next : (rb)->first); \
varname##_length_fa_; \ varname##_length_fa_; \
(varname = _RINGBUF_NEXT(rb, varname)), \ (varname = _RINGBUF_NEXT(rb, varname)), \
varname##_length_fa_--) varname##_length_fa_--)
/// Iterate over all ringbuf values, from end to the beginning /// Iterate over all ringbuf values, from end to the beginning
/// ///
@@ -57,11 +60,11 @@
/// @param RBType Type of the ring buffer element. /// @param RBType Type of the ring buffer element.
/// @param varname Variable name. /// @param varname Variable name.
#define RINGBUF_ITER_BACK(rb, RBType, varname) \ #define RINGBUF_ITER_BACK(rb, RBType, varname) \
size_t varname##_length_ib_ = _RINGBUF_LENGTH(rb); \ size_t varname##_length_ib_ = _RINGBUF_LENGTH(rb); \
for (varname = ((rb)->next == (rb)->buf ? (rb)->buf_end : (rb)->next - 1); \ for (varname = ((rb)->next == (rb)->buf ? (rb)->buf_end : (rb)->next - 1); \
varname##_length_ib_; \ varname##_length_ib_; \
(varname = _RINGBUF_PREV(rb, varname)), \ (varname = _RINGBUF_PREV(rb, varname)), \
varname##_length_ib_--) varname##_length_ib_--)
/// Define a ring buffer structure /// Define a ring buffer structure
/// ///
@@ -69,12 +72,12 @@
/// `{TypeName}RingBuffer`. /// `{TypeName}RingBuffer`.
/// @param RBType Type of the single ring buffer element. /// @param RBType Type of the single ring buffer element.
#define RINGBUF_TYPEDEF(TypeName, RBType) \ #define RINGBUF_TYPEDEF(TypeName, RBType) \
typedef struct { \ typedef struct { \
RBType *buf; \ RBType *buf; \
RBType *next; \ RBType *next; \
RBType *first; \ RBType *first; \
RBType *buf_end; \ RBType *buf_end; \
} TypeName##RingBuffer; } TypeName##RingBuffer;
/// Dummy item free macros, for use in RINGBUF_INIT /// Dummy item free macros, for use in RINGBUF_INIT
/// ///
@@ -94,13 +97,13 @@ typedef struct { \
/// @param varname Variable name. /// @param varname Variable name.
/// @param rbsize Ring buffer size. /// @param rbsize Ring buffer size.
#define RINGBUF_STATIC(scope, TypeName, RBType, varname, rbsize) \ #define RINGBUF_STATIC(scope, TypeName, RBType, varname, rbsize) \
static RBType _##varname##_buf[rbsize]; \ static RBType _##varname##_buf[rbsize]; \
scope TypeName##RingBuffer varname = { \ scope TypeName##RingBuffer varname = { \
.buf = _##varname##_buf, \ .buf = _##varname##_buf, \
.next = _##varname##_buf, \ .next = _##varname##_buf, \
.first = NULL, \ .first = NULL, \
.buf_end = _##varname##_buf + rbsize - 1, \ .buf_end = _##varname##_buf + rbsize - 1, \
}; };
/// Initialize a new ring buffer /// Initialize a new ring buffer
/// ///
@@ -114,195 +117,191 @@ scope TypeName##RingBuffer varname = { \
/// ///
/// Intended function signature: `void *rbfree(RBType *)`; /// Intended function signature: `void *rbfree(RBType *)`;
#define RINGBUF_INIT(TypeName, funcprefix, RBType, rbfree) \ #define RINGBUF_INIT(TypeName, funcprefix, RBType, rbfree) \
static inline TypeName##RingBuffer funcprefix##_rb_new(const size_t size) \ static inline TypeName##RingBuffer funcprefix##_rb_new(const size_t size) \
REAL_FATTR_WARN_UNUSED_RESULT; \ REAL_FATTR_WARN_UNUSED_RESULT; \
static inline TypeName##RingBuffer funcprefix##_rb_new(const size_t size) \ static inline TypeName##RingBuffer funcprefix##_rb_new(const size_t size) \
{ \ { \
assert(size != 0); \ assert(size != 0); \
RBType *buf = xmalloc(size * sizeof(RBType)); \ RBType *buf = xmalloc(size * sizeof(RBType)); \
return (TypeName##RingBuffer) { \ return (TypeName##RingBuffer) { \
.buf = buf, \ .buf = buf, \
.next = buf, \ .next = buf, \
.first = NULL, \ .first = NULL, \
.buf_end = buf + size - 1, \ .buf_end = buf + size - 1, \
}; \ }; \
} \
\
static inline void funcprefix##_rb_free(TypeName##RingBuffer *const rb) \
REAL_FATTR_UNUSED; \
static inline void funcprefix##_rb_free(TypeName##RingBuffer *const rb) \
{ \
if (rb == NULL) { \
return; \
} \ } \
RINGBUF_FORALL(rb, RBType, rbitem) { \
rbfree(rbitem); \
} \
XFREE_CLEAR(rb->buf); \
} \
\ \
static inline void funcprefix##_rb_dealloc(TypeName##RingBuffer *const rb) \ static inline void funcprefix##_rb_free(TypeName##RingBuffer *const rb) \
REAL_FATTR_UNUSED; \ REAL_FATTR_UNUSED; \
static inline void funcprefix##_rb_dealloc(TypeName##RingBuffer *const rb) \ static inline void funcprefix##_rb_free(TypeName##RingBuffer *const rb) \
{ \ { \
XFREE_CLEAR(rb->buf); \ if (rb == NULL) { \
} \ return; \
} \
RINGBUF_FORALL(rb, RBType, rbitem) { \
rbfree(rbitem); \
} \
XFREE_CLEAR(rb->buf); \
} \
\ \
static inline void funcprefix##_rb_push(TypeName##RingBuffer *const rb, \ static inline void funcprefix##_rb_dealloc(TypeName##RingBuffer *const rb) \
RBType item) \ REAL_FATTR_UNUSED; \
static inline void funcprefix##_rb_dealloc(TypeName##RingBuffer *const rb) \
{ \
XFREE_CLEAR(rb->buf); \
} \
\
static inline void funcprefix##_rb_push(TypeName##RingBuffer *const rb, \
RBType item) \
REAL_FATTR_NONNULL_ARG(1); \ REAL_FATTR_NONNULL_ARG(1); \
static inline void funcprefix##_rb_push(TypeName##RingBuffer *const rb, \ static inline void funcprefix##_rb_push(TypeName##RingBuffer *const rb, \
RBType item) \
{ \
if (rb->next == rb->first) { \
rbfree(rb->first); \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
} else if (rb->first == NULL) { \
rb->first = rb->next; \
} \
*rb->next = item; \
rb->next = _RINGBUF_NEXT(rb, rb->next); \
} \
\
static inline ptrdiff_t funcprefix##_rb_find_idx( \
const TypeName##RingBuffer *const rb, const RBType *const item_p) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE REAL_FATTR_UNUSED; \
static inline ptrdiff_t funcprefix##_rb_find_idx( \
const TypeName##RingBuffer *const rb, const RBType *const item_p) \
{ \
assert(rb->buf <= item_p); \
assert(rb->buf_end >= item_p); \
if (rb->first == NULL) { \
return -1; \
} else if (item_p >= rb->first) { \
return item_p - rb->first; \
} else { \
return item_p - rb->buf + rb->buf_end - rb->first + 1; \
} \
} \
\
static inline size_t funcprefix##_rb_size( \
const TypeName##RingBuffer *const rb) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
static inline size_t funcprefix##_rb_size( \
const TypeName##RingBuffer *const rb) \
{ \
return (size_t) (rb->buf_end - rb->buf) + 1; \
} \
\
static inline size_t funcprefix##_rb_length( \
const TypeName##RingBuffer *const rb) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
static inline size_t funcprefix##_rb_length( \
const TypeName##RingBuffer *const rb) \
{ \
return _RINGBUF_LENGTH(rb); \
} \
\
static inline RBType *funcprefix##_rb_idx_p( \
const TypeName##RingBuffer *const rb, const size_t idx) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
static inline RBType *funcprefix##_rb_idx_p( \
const TypeName##RingBuffer *const rb, const size_t idx) \
{ \
assert(idx <= funcprefix##_rb_size(rb)); \
assert(idx <= funcprefix##_rb_length(rb)); \
if (rb->first + idx > rb->buf_end) { \
return rb->buf + ((rb->first + idx) - (rb->buf_end + 1)); \
} else { \
return rb->first + idx; \
} \
} \
\
static inline RBType funcprefix##_rb_idx(const TypeName##RingBuffer *const rb, \
const size_t idx) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE REAL_FATTR_UNUSED; \
static inline RBType funcprefix##_rb_idx(const TypeName##RingBuffer *const rb, \
const size_t idx) \
{ \
return *funcprefix##_rb_idx_p(rb, idx); \
} \
\
static inline void funcprefix##_rb_insert(TypeName##RingBuffer *const rb, \
const size_t idx, \
RBType item) \ RBType item) \
REAL_FATTR_NONNULL_ARG(1) REAL_FATTR_UNUSED; \ { \
static inline void funcprefix##_rb_insert(TypeName##RingBuffer *const rb, \ if (rb->next == rb->first) { \
const size_t idx, \ rbfree(rb->first); \
RBType item) \ rb->first = _RINGBUF_NEXT(rb, rb->first); \
{ \ } else if (rb->first == NULL) { \
assert(idx <= funcprefix##_rb_size(rb)); \ rb->first = rb->next; \
assert(idx <= funcprefix##_rb_length(rb)); \ } \
const size_t length = funcprefix##_rb_length(rb); \ *rb->next = item; \
if (idx == length) { \ rb->next = _RINGBUF_NEXT(rb, rb->next); \
funcprefix##_rb_push(rb, item); \
return; \
} \ } \
RBType *const insertpos = funcprefix##_rb_idx_p(rb, idx); \
if (insertpos == rb->next) { \
funcprefix##_rb_push(rb, item); \
return; \
} \
if (length == funcprefix##_rb_size(rb)) { \
rbfree(rb->first); \
} \
if (insertpos < rb->next) { \
memmove(insertpos + 1, insertpos, \
(size_t) ((uintptr_t) rb->next - (uintptr_t) insertpos)); \
} else { \
assert(insertpos > rb->first); \
assert(rb->next <= rb->first); \
memmove(rb->buf + 1, rb->buf, \
(size_t) ((uintptr_t) rb->next - (uintptr_t) rb->buf)); \
*rb->buf = *rb->buf_end; \
memmove(insertpos + 1, insertpos, \
(size_t) ((uintptr_t) (rb->buf_end + 1) - (uintptr_t) insertpos)); \
} \
*insertpos = item; \
if (length == funcprefix##_rb_size(rb)) { \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
} \
rb->next = _RINGBUF_NEXT(rb, rb->next); \
} \
\ \
static inline void funcprefix##_rb_remove(TypeName##RingBuffer *const rb, \ static inline ptrdiff_t funcprefix##_rb_find_idx(const TypeName##RingBuffer *const rb, \
const size_t idx) \ const RBType *const item_p) \
REAL_FATTR_NONNULL_ARG(1) REAL_FATTR_UNUSED; \ REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE REAL_FATTR_UNUSED; \
static inline void funcprefix##_rb_remove(TypeName##RingBuffer *const rb, \ static inline ptrdiff_t funcprefix##_rb_find_idx(const TypeName##RingBuffer *const rb, \
const size_t idx) \ const RBType *const item_p) \
{ \ { \
assert(idx < funcprefix##_rb_size(rb)); \ assert(rb->buf <= item_p); \
assert(idx < funcprefix##_rb_length(rb)); \ assert(rb->buf_end >= item_p); \
RBType *const rmpos = funcprefix##_rb_idx_p(rb, idx); \ if (rb->first == NULL) { \
rbfree(rmpos); \ return -1; \
if (rmpos == rb->next - 1) { \ } else if (item_p >= rb->first) { \
rb->next--; \ return item_p - rb->first; \
if (rb->first == rb->next) { \ } else { \
rb->first = NULL; \ return item_p - rb->buf + rb->buf_end - rb->first + 1; \
rb->next = rb->buf; \
} \ } \
} else if (rmpos == rb->first) { \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
if (rb->first == rb->next) { \
rb->first = NULL; \
rb->next = rb->buf; \
} \
} else if (rb->first < rb->next || rb->next == rb->buf) { \
assert(rmpos > rb->first); \
assert(rmpos <= _RINGBUF_PREV(rb, rb->next)); \
memmove(rb->first + 1, rb->first, \
(size_t) ((uintptr_t) rmpos - (uintptr_t) rb->first)); \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
} else if (rmpos < rb->next) { \
memmove(rmpos, rmpos + 1, \
(size_t) ((uintptr_t) rb->next - (uintptr_t) rmpos)); \
rb->next = _RINGBUF_PREV(rb, rb->next); \
} else { \
assert(rb->first < rb->buf_end); \
memmove(rb->first + 1, rb->first, \
(size_t) ((uintptr_t) rmpos - (uintptr_t) rb->first)); \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
} \ } \
} \
static inline size_t funcprefix##_rb_size(const TypeName##RingBuffer *const rb) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
static inline size_t funcprefix##_rb_size(const TypeName##RingBuffer *const rb) \
{ \
return (size_t)(rb->buf_end - rb->buf) + 1; \
} \
\
static inline size_t funcprefix##_rb_length(const TypeName##RingBuffer *const rb) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
static inline size_t funcprefix##_rb_length(const TypeName##RingBuffer *const rb) \
{ \
return _RINGBUF_LENGTH(rb); \
} \
\
static inline RBType *funcprefix##_rb_idx_p(const TypeName##RingBuffer *const rb, \
const size_t idx) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
static inline RBType *funcprefix##_rb_idx_p(const TypeName##RingBuffer *const rb, \
const size_t idx) \
{ \
assert(idx <= funcprefix##_rb_size(rb)); \
assert(idx <= funcprefix##_rb_length(rb)); \
if (rb->first + idx > rb->buf_end) { \
return rb->buf + ((rb->first + idx) - (rb->buf_end + 1)); \
} else { \
return rb->first + idx; \
} \
} \
\
static inline RBType funcprefix##_rb_idx(const TypeName##RingBuffer *const rb, \
const size_t idx) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE REAL_FATTR_UNUSED; \
static inline RBType funcprefix##_rb_idx(const TypeName##RingBuffer *const rb, \
const size_t idx) \
{ \
return *funcprefix##_rb_idx_p(rb, idx); \
} \
\
static inline void funcprefix##_rb_insert(TypeName##RingBuffer *const rb, \
const size_t idx, \
RBType item) \
REAL_FATTR_NONNULL_ARG(1) REAL_FATTR_UNUSED; \
static inline void funcprefix##_rb_insert(TypeName##RingBuffer *const rb, \
const size_t idx, \
RBType item) \
{ \
assert(idx <= funcprefix##_rb_size(rb)); \
assert(idx <= funcprefix##_rb_length(rb)); \
const size_t length = funcprefix##_rb_length(rb); \
if (idx == length) { \
funcprefix##_rb_push(rb, item); \
return; \
} \
RBType *const insertpos = funcprefix##_rb_idx_p(rb, idx); \
if (insertpos == rb->next) { \
funcprefix##_rb_push(rb, item); \
return; \
} \
if (length == funcprefix##_rb_size(rb)) { \
rbfree(rb->first); \
} \
if (insertpos < rb->next) { \
memmove(insertpos + 1, insertpos, \
(size_t)((uintptr_t)rb->next - (uintptr_t)insertpos)); \
} else { \
assert(insertpos > rb->first); \
assert(rb->next <= rb->first); \
memmove(rb->buf + 1, rb->buf, \
(size_t)((uintptr_t)rb->next - (uintptr_t)rb->buf)); \
*rb->buf = *rb->buf_end; \
memmove(insertpos + 1, insertpos, \
(size_t)((uintptr_t)(rb->buf_end + 1) - (uintptr_t)insertpos)); \
} \
*insertpos = item; \
if (length == funcprefix##_rb_size(rb)) { \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
} \
rb->next = _RINGBUF_NEXT(rb, rb->next); \
} \
\
static inline void funcprefix##_rb_remove(TypeName##RingBuffer *const rb, \
const size_t idx) \
REAL_FATTR_NONNULL_ARG(1) REAL_FATTR_UNUSED; \
static inline void funcprefix##_rb_remove(TypeName##RingBuffer *const rb, \
const size_t idx) \
{ \
assert(idx < funcprefix##_rb_size(rb)); \
assert(idx < funcprefix##_rb_length(rb)); \
RBType *const rmpos = funcprefix##_rb_idx_p(rb, idx); \
rbfree(rmpos); \
if (rmpos == rb->next - 1) { \
rb->next--; \
if (rb->first == rb->next) { \
rb->first = NULL; \
rb->next = rb->buf; \
} \
} else if (rmpos == rb->first) { \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
if (rb->first == rb->next) { \
rb->first = NULL; \
rb->next = rb->buf; \
} \
} else if (rb->first < rb->next || rb->next == rb->buf) { \
assert(rmpos > rb->first); \
assert(rmpos <= _RINGBUF_PREV(rb, rb->next)); \
memmove(rb->first + 1, rb->first, \
(size_t)((uintptr_t)rmpos - (uintptr_t)rb->first)); \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
} else if (rmpos < rb->next) { \
memmove(rmpos, rmpos + 1, \
(size_t)((uintptr_t)rb->next - (uintptr_t)rmpos)); \
rb->next = _RINGBUF_PREV(rb, rb->next); \
} else { \
assert(rb->first < rb->buf_end); \
memmove(rb->first + 1, rb->first, \
(size_t)((uintptr_t)rmpos - (uintptr_t)rb->first)); \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
} \
}
#endif // NVIM_LIB_RINGBUF_H #endif // NVIM_LIB_RINGBUF_H