5 #define MIN_BLOCK_SIZE 256
7 static void reap(struct naPool* p);
8 static void mark(naRef r);
16 // Must be called with the giant exclusive lock!
17 static void freeDead()
20 for(i=0; i<globals->ndead; i++)
21 naFree(globals->deadBlocks[i]);
25 static void marktemps(struct Context* c)
29 for(i=0; i<c->ntemps; i++) {
30 r.ref.ptr.obj = c->temps[i];
35 // Must be called with the big lock!
36 static void garbageCollect()
40 globals->allocCount = 0;
41 c = globals->allContexts;
43 for(i=0; i<NUM_NASAL_TYPES; i++)
45 for(i=0; i < c->fTop; i++) {
46 mark(c->fStack[i].func);
47 mark(c->fStack[i].locals);
49 for(i=0; i < c->opTop; i++)
57 mark(globals->symbols);
59 mark(globals->argRef);
60 mark(globals->parentsRef);
62 // Finally collect all the freed objects
63 for(i=0; i<NUM_NASAL_TYPES; i++)
64 reap(&(globals->pools[i]));
66 // Make enough space for the dead blocks we need to free during
67 // execution. This works out to 1 spot for every 2 live objects,
68 // which should be limit the number of bottleneck operations
69 // without imposing an undue burden of extra "freeable" memory.
70 if(globals->deadsz < globals->allocCount) {
71 globals->deadsz = globals->allocCount;
72 if(globals->deadsz < 256) globals->deadsz = 256;
73 naFree(globals->deadBlocks);
74 globals->deadBlocks = naAlloc(sizeof(void*) * globals->deadsz);
94 // Must be called with the main lock. Engages the "bottleneck", where
95 // all threads will block so that one (the last one to call this
96 // function) can run alone. This is done for GC, and also to free the
97 // list of "dead" blocks when it gets full (which is part of GC, if
98 // you think about it).
99 static void bottleneck()
101 struct Globals* g = globals;
103 while(g->bottleneck && g->waitCount < g->nThreads - 1) {
105 UNLOCK(); naSemDown(g->sem); LOCK();
108 if(g->waitCount >= g->nThreads - 1) {
110 if(g->needGC) garbageCollect();
111 if(g->waitCount) naSemUpAll(g->sem, g->waitCount);
116 void naCheckBottleneck()
118 if(globals->bottleneck) { LOCK(); bottleneck(); UNLOCK(); }
121 static void naCode_gcclean(struct naCode* o)
123 naFree(o->byteCode); o->byteCode = 0;
124 naFree(o->constants); o->constants = 0;
125 naFree(o->argSyms); o->argSyms = 0;
126 naFree(o->optArgSyms); o->argSyms = 0;
129 static void naGhost_gcclean(struct naGhost* g)
131 if(g->ptr) g->gtype->destroy(g->ptr);
135 static void freeelem(struct naPool* p, struct naObj* o)
137 // Free any intrinsic (i.e. non-garbage collected) storage the
141 naStr_gcclean((struct naStr*)o);
144 naVec_gcclean((struct naVec*)o);
147 naHash_gcclean((struct naHash*)o);
150 naCode_gcclean((struct naCode*)o);
153 naGhost_gcclean((struct naGhost*)o);
157 // And add it to the free list
158 p->free[p->nfree++] = o;
161 static void newBlock(struct naPool* p, int need)
166 if(need < MIN_BLOCK_SIZE) need = MIN_BLOCK_SIZE;
168 newb = naAlloc(sizeof(struct Block));
169 newb->block = naAlloc(need * p->elemsz);
171 newb->next = p->blocks;
173 naBZero(newb->block, need * p->elemsz);
175 if(need > p->freesz - p->freetop) need = p->freesz - p->freetop;
177 p->free = p->free0 + p->freetop;
178 for(i=0; i < need; i++) {
179 struct naObj* o = (struct naObj*)(newb->block + i*p->elemsz);
181 p->free[p->nfree++] = o;
186 void naGC_init(struct naPool* p, int type)
189 p->elemsz = naTypeSize(type);
192 p->free0 = p->free = 0;
193 p->nfree = p->freesz = p->freetop = 0;
197 static int poolsize(struct naPool* p)
200 struct Block* b = p->blocks;
201 while(b) { total += b->size; b = b->next; }
205 struct naObj** naGC_get(struct naPool* p, int n, int* nout)
207 struct naObj** result;
210 while(globals->allocCount < 0 || (p->nfree == 0 && p->freetop >= p->freesz)) {
215 newBlock(p, poolsize(p)/8);
216 n = p->nfree < n ? p->nfree : n;
219 globals->allocCount -= n;
220 result = (struct naObj**)(p->free + p->nfree);
225 static void markvec(naRef r)
228 struct VecRec* vr = r.ref.ptr.vec->rec;
230 for(i=0; i<vr->size; i++)
234 static void markhash(naRef r)
237 struct HashRec* hr = r.ref.ptr.hash->rec;
239 for(i=0; i < (1<<hr->lgalloced); i++) {
240 struct HashNode* hn = hr->table[i];
249 // Sets the reference bit on the object, and recursively on all
250 // objects reachable from it. Uses the processor stack for recursion...
251 static void mark(naRef r)
255 if(IS_NUM(r) || IS_NIL(r))
258 if(r.ref.ptr.obj->mark == 1)
261 r.ref.ptr.obj->mark = 1;
262 switch(r.ref.ptr.obj->type) {
263 case T_VEC: markvec(r); break;
264 case T_HASH: markhash(r); break;
266 mark(r.ref.ptr.code->srcFile);
267 for(i=0; i<r.ref.ptr.code->nConstants; i++)
268 mark(r.ref.ptr.code->constants[i]);
271 mark(r.ref.ptr.func->code);
272 mark(r.ref.ptr.func->namespace);
273 mark(r.ref.ptr.func->next);
278 // Collects all the unreachable objects into a free list, and
279 // allocates more space if needed.
280 static void reap(struct naPool* p)
283 int elem, freesz, total = poolsize(p);
285 freesz = total < MIN_BLOCK_SIZE ? MIN_BLOCK_SIZE : total;
286 freesz = (3 * freesz / 2) + (globals->nThreads * OBJ_CACHE_SZ);
287 if(p->freesz < freesz) {
290 p->free = p->free0 = naAlloc(sizeof(void*) * p->freesz);
293 for(b = p->blocks; b; b = b->next)
294 for(elem=0; elem < b->size; elem++) {
295 struct naObj* o = (struct naObj*)(b->block + elem * p->elemsz);
301 // allocs of this type until the next collection
302 globals->allocCount += total/2;
304 // Allocate more if necessary (try to keep 25-50% of the objects
306 if(p->nfree < total/4) {
307 int used = total - p->nfree;
308 int avail = total - used;
309 int need = used/2 - avail;
313 p->freetop = p->nfree;
316 // Does the swap, returning the old value
317 static void* doswap(void** target, void* val)
324 // Atomically replaces target with a new pointer, and adds the old one
325 // to the list of blocks to free the next time something holds the
327 void naGC_swapfree(void** target, void* val)
331 old = doswap(target, val);
332 while(globals->ndead >= globals->deadsz)
334 globals->deadBlocks[globals->ndead++] = old;