4 #define MIN_HASH_SIZE 4
6 #define EQUAL(a, b) (IDENTICAL(a, b) || naEqual(a, b))
8 #define HASH_MAGIC 2654435769u
10 #define INSERT(hh, hkey, hval, hcol) do { \
11 unsigned int cc = (hcol), iidx=(hh)->size++; \
12 if(iidx < (1<<(hh)->lgalloced)) { \
13 struct HashNode* hnn = &(hh)->nodes[iidx]; \
14 hnn->key = (hkey); hnn->val = (hval); \
15 hnn->next = (hh)->table[cc]; \
16 (hh)->table[cc] = hnn; \
19 // Computes a hash code for a given scalar
20 static unsigned int hashcode(naRef r)
24 // Numbers get the number as a hash. Just use the bits and
25 // xor them together. Note assumption that sizeof(double) >=
27 unsigned int* p = (unsigned int*)&(r.num);
29 } else if(PTR(r).str->hashcode) {
30 return PTR(r).str->hashcode;
32 // This is Daniel Bernstein's djb2 hash function that I found
33 // on the web somewhere. It appears to work pretty well.
34 unsigned int i, hash = 5831;
35 for(i=0; i<PTR(r).str->len; i++)
36 hash = (hash * 33) ^ PTR(r).str->data[i];
37 PTR(r).str->hashcode = hash;
42 // Which column in a given hash does the key correspond to.
43 static unsigned int hashcolumn(struct HashRec* h, naRef key)
45 // Multiply by a big number, and take the top N bits. Note
46 // assumption that sizeof(unsigned int) == 4.
47 return (HASH_MAGIC * hashcode(key)) >> (32 - h->lgalloced);
50 static struct HashRec* resize(struct naHash* hash)
52 struct HashRec *h, *h0 = hash->rec;
53 int lga, cols, need = h0 ? h0->size - h0->dels : MIN_HASH_SIZE;
55 if(need < MIN_HASH_SIZE) need = MIN_HASH_SIZE;
56 for(lga=0; 1<<lga <= need; lga++);
58 h = naAlloc(sizeof(struct HashRec) +
59 cols * (sizeof(struct HashNode*) + sizeof(struct HashNode)));
60 naBZero(h, sizeof(struct HashRec) + cols * sizeof(struct HashNode*));
63 h->nodes = (struct HashNode*)(((char*)h)
64 + sizeof(struct HashRec)
65 + cols * sizeof(struct HashNode*));
66 for(lga=0; h0 != 0 && lga<(1<<h0->lgalloced); lga++) {
67 struct HashNode* hn = h0->table[lga];
69 INSERT(h, hn->key, hn->val, hashcolumn(h, hn->key));
73 naGC_swapfree((void**)&hash->rec, h);
77 // Special, optimized version of naHash_get for the express purpose of
78 // looking up symbols in the local variables hash (OP_LOCAL is by far
79 // the most common opcode and deserves some special case
80 // optimization). Elides all the typing checks that are normally
81 // required, presumes that the key is a string and has had its
82 // hashcode precomputed, checks only for object identity, and inlines
83 // the column computation.
84 int naHash_sym(struct naHash* hash, struct naStr* sym, naRef* out)
86 struct HashRec* h = hash->rec;
88 int col = (HASH_MAGIC * sym->hashcode) >> (32 - h->lgalloced);
89 struct HashNode* hn = h->table[col];
91 if(PTR(hn->key).str == sym) {
101 static struct HashNode* find(struct naHash* hash, naRef key)
103 struct HashRec* h = hash->rec;
106 for(hn = h->table[hashcolumn(h, key)]; hn; hn = hn->next)
107 if(EQUAL(key, hn->key))
112 // Make a temporary string on the stack
113 static void tmpStr(naRef* out, struct naStr* str, const char* key)
117 str->data = (unsigned char*)key;
119 while(key[str->len]) str->len++;
124 int naMember_cget(naRef obj, const char* field, naRef* out)
128 tmpStr(&key, &str, field);
129 return naMember_get(obj, key, out);
132 naRef naHash_cget(naRef hash, char* key)
136 tmpStr(&key2, &str, key);
137 if(naHash_get(hash, key2, &result))
142 void naHash_cset(naRef hash, char* key, naRef val)
146 tmpStr(&key2, &str, key);
147 naHash_tryset(hash, key2, val);
150 int naHash_get(naRef hash, naRef key, naRef* out)
153 struct HashNode* n = find(PTR(hash).hash, key);
154 if(n) { *out = n->val; return 1; }
159 // Simpler version. Don't create a new node if the value isn't there
160 int naHash_tryset(naRef hash, naRef key, naRef val)
163 struct HashNode* n = find(PTR(hash).hash, key);
170 // Special purpose optimization for use in function call setups. Sets
171 // a value that is known *not* to be present in the hash table. As
172 // for naHash_sym, the key must be a string with a precomputed hash
174 void naHash_newsym(struct naHash* hash, naRef* sym, naRef* val)
177 struct HashRec* h = hash->rec;
178 while(!h || h->size >= 1<<h->lgalloced)
180 col = (HASH_MAGIC * PTR(*sym).str->hashcode) >> (32 - h->lgalloced);
181 INSERT(h, *sym, *val, col);
184 // The cycle check is an integrity requirement for multithreading,
185 // where raced inserts can potentially cause cycles. This ensures
186 // that the "last" thread to hold a reference to an inserted node
187 // breaks any cycles that might have happened (at the expense of
188 // potentially dropping items out of the hash). Under normal
189 // circumstances, chains will be very short and this will be fast.
190 static void chkcycle(struct HashNode* node, int count)
192 struct HashNode* hn = node;
193 while(hn && (hn = hn->next) != 0)
194 if(count-- <= 0) { node->next = 0; return; }
197 void naHash_set(naRef hash, naRef key, naRef val)
202 if(!IS_HASH(hash)) return;
203 if((n = find(PTR(hash).hash, key))) { n->val = val; return; }
204 h = PTR(hash).hash->rec;
205 while(!h || h->size >= 1<<h->lgalloced)
206 h = resize(PTR(hash).hash);
207 col = hashcolumn(h, key);
208 INSERT(h, key, val, hashcolumn(h, key));
209 chkcycle(h->table[col], h->size - h->dels);
212 void naHash_delete(naRef hash, naRef key)
214 struct HashRec* h = PTR(hash).hash->rec;
216 struct HashNode *last=0, *hn;
217 if(!IS_HASH(hash) || !h) return;
218 col = hashcolumn(h, key);
221 if(EQUAL(hn->key, key)) {
222 if(last == 0) h->table[col] = hn->next;
223 else last->next = hn->next;
232 void naHash_keys(naRef dst, naRef hash)
235 struct HashRec* h = PTR(hash).hash->rec;
236 if(!IS_HASH(hash) || !h) return;
237 for(i=0; i<(1<<h->lgalloced); i++) {
238 struct HashNode* hn = h->table[i];
240 naVec_append(dst, hn->key);
246 int naHash_size(naRef hash)
248 struct HashRec* h = PTR(hash).hash->rec;
249 if(!IS_HASH(hash) || !h) return 0;
250 return h->size - h->dels;
253 void naHash_gcclean(struct naHash* h)