improve paging logic, remove unused map code
This commit is contained in:
parent
242041da34
commit
5b9254211f
4 changed files with 31 additions and 190 deletions
95
src/map.c
95
src/map.c
|
@ -199,67 +199,6 @@ int hashmap_set(hashmap *m, const void *key, size_t ksize, uintptr_t val,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int hashmap_get_set(hashmap *m, const void *key, size_t ksize,
|
||||
uintptr_t *out_in) {
|
||||
if (m->count + 1 > HASHMAP_MAX_LOAD * m->capacity) {
|
||||
if (hashmap_resize(m) == -1)
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint32_t hash = hash_data(key, ksize);
|
||||
struct bucket *entry = find_entry(m, key, ksize, hash);
|
||||
if (entry->key == NULL) {
|
||||
m->last->next = entry;
|
||||
m->last = entry;
|
||||
entry->next = NULL;
|
||||
|
||||
++m->count;
|
||||
|
||||
entry->value = *out_in;
|
||||
entry->key = key;
|
||||
entry->ksize = ksize;
|
||||
entry->hash = hash;
|
||||
|
||||
return 0;
|
||||
}
|
||||
*out_in = entry->value;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int hashmap_set_free(hashmap *m, const void *key, size_t ksize, uintptr_t val,
|
||||
hashmap_callback c, void *usr) {
|
||||
if (m->count + 1 > HASHMAP_MAX_LOAD * m->capacity) {
|
||||
if (hashmap_resize(m) == -1)
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint32_t hash = hash_data(key, ksize);
|
||||
struct bucket *entry = find_entry(m, key, ksize, hash);
|
||||
if (entry->key == NULL) {
|
||||
m->last->next = entry;
|
||||
m->last = entry;
|
||||
entry->next = NULL;
|
||||
|
||||
++m->count;
|
||||
|
||||
entry->key = key;
|
||||
entry->ksize = ksize;
|
||||
entry->hash = hash;
|
||||
entry->value = val;
|
||||
// there was no overwrite, exit the function.
|
||||
return 0;
|
||||
}
|
||||
// allow the callback to free entry data.
|
||||
// use old key and value so the callback can free them.
|
||||
// the old key and value will be overwritten after this call.
|
||||
int error = c(entry->key, ksize, entry->value, usr);
|
||||
|
||||
// overwrite the old key pointer in case the callback frees it.
|
||||
entry->key = key;
|
||||
entry->value = val;
|
||||
return error;
|
||||
}
|
||||
|
||||
int hashmap_get(hashmap *m, const void *key, size_t ksize, uintptr_t *out_val) {
|
||||
uint32_t hash = hash_data(key, ksize);
|
||||
struct bucket *entry = find_entry(m, key, ksize, hash);
|
||||
|
@ -270,40 +209,6 @@ int hashmap_get(hashmap *m, const void *key, size_t ksize, uintptr_t *out_val) {
|
|||
return entry->key != NULL ? 1 : 0;
|
||||
}
|
||||
|
||||
// doesn't "remove" the element per se, but it will be ignored.
|
||||
// the element will eventually be removed when the map is resized.
|
||||
void hashmap_remove(hashmap *m, const void *key, size_t ksize) {
|
||||
uint32_t hash = hash_data(key, ksize);
|
||||
struct bucket *entry = find_entry(m, key, ksize, hash);
|
||||
|
||||
if (entry->key != NULL) {
|
||||
|
||||
// "tombstone" entry is signified by a NULL key with a nonzero value
|
||||
// element removal is optional because of the overhead of tombstone checks
|
||||
entry->key = NULL;
|
||||
entry->value = 0xDEAD; // I mean, it's a tombstone...
|
||||
|
||||
++m->tombstone_count;
|
||||
}
|
||||
}
|
||||
|
||||
void hashmap_remove_free(hashmap *m, const void *key, size_t ksize,
|
||||
hashmap_callback c, void *usr) {
|
||||
uint32_t hash = hash_data(key, ksize);
|
||||
struct bucket *entry = find_entry(m, key, ksize, hash);
|
||||
|
||||
if (entry->key != NULL) {
|
||||
c(entry->key, entry->ksize, entry->value, usr);
|
||||
|
||||
// "tombstone" entry is signified by a NULL key with a nonzero value
|
||||
// element removal is optional because of the overhead of tombstone checks
|
||||
entry->key = NULL;
|
||||
entry->value = 0xDEAD; // I mean, it's a tombstone...
|
||||
|
||||
++m->tombstone_count;
|
||||
}
|
||||
}
|
||||
|
||||
int hashmap_size(hashmap *m) { return m->count - m->tombstone_count; }
|
||||
|
||||
int hashmap_iterate(hashmap *m, hashmap_callback c, void *user_ptr) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue