5 #define MIN_BLOCK_SIZE 256
7 static void reap(struct naPool* p);
8 static void mark(naRef r);
16 // Must be called with the giant exclusive lock!
17 static void freeDead()
20 for(i=0; i<globals->ndead; i++)
21 naFree(globals->deadBlocks[i]);
25 static void marktemps(struct Context* c)
29 for(i=0; i<c->ntemps; i++) {
30 r.ref.ptr.obj = c->temps[i];
35 // Must be called with the big lock!
36 static void garbageCollect()
40 globals->allocCount = 0;
41 c = globals->allContexts;
43 for(i=0; i<NUM_NASAL_TYPES; i++)
45 for(i=0; i < c->fTop; i++) {
46 mark(c->fStack[i].func);
47 mark(c->fStack[i].locals);
49 for(i=0; i < c->opTop; i++)
57 mark(globals->symbols);
59 mark(globals->argRef);
60 mark(globals->parentsRef);
62 // Finally collect all the freed objects
63 for(i=0; i<NUM_NASAL_TYPES; i++)
64 reap(&(globals->pools[i]));
66 // Make enough space for the dead blocks we need to free during
67 // execution. This works out to 1 spot for every 2 live objects,
68 // which should be limit the number of bottleneck operations
69 // without imposing an undue burden of extra "freeable" memory.
70 if(globals->deadsz < globals->allocCount) {
71 globals->deadsz = globals->allocCount;
72 if(globals->deadsz < 256) globals->deadsz = 256;
73 naFree(globals->deadBlocks);
74 globals->deadBlocks = naAlloc(sizeof(void*) * globals->deadsz);
94 // Must be called with the main lock. Engages the "bottleneck", where
95 // all threads will block so that one (the last one to call this
96 // function) can run alone. This is done for GC, and also to free the
97 // list of "dead" blocks when it gets full (which is part of GC, if
98 // you think about it).
99 static void bottleneck()
101 struct Globals* g = globals;
103 while(g->bottleneck && g->waitCount < g->nThreads - 1) {
105 UNLOCK(); naSemDown(g->sem); LOCK();
108 if(g->waitCount >= g->nThreads - 1) {
110 if(g->needGC) garbageCollect();
111 if(g->waitCount) naSemUpAll(g->sem, g->waitCount);
116 void naCheckBottleneck()
118 if(globals->bottleneck) { LOCK(); bottleneck(); UNLOCK(); }
121 static void naCode_gcclean(struct naCode* o)
123 naFree(o->byteCode); o->byteCode = 0;
124 naFree(o->constants); o->constants = 0;
125 naFree(o->argSyms); o->argSyms = 0;
126 naFree(o->optArgSyms); o->optArgSyms = 0;
127 naFree(o->optArgVals); o->optArgVals = 0;
128 naFree(o->lineIps); o->lineIps = 0;
131 static void naGhost_gcclean(struct naGhost* g)
133 if(g->ptr) g->gtype->destroy(g->ptr);
137 static void freeelem(struct naPool* p, struct naObj* o)
139 // Free any intrinsic (i.e. non-garbage collected) storage the
143 naStr_gcclean((struct naStr*)o);
146 naVec_gcclean((struct naVec*)o);
149 naHash_gcclean((struct naHash*)o);
152 naCode_gcclean((struct naCode*)o);
155 naGhost_gcclean((struct naGhost*)o);
159 // And add it to the free list
160 p->free[p->nfree++] = o;
163 static void newBlock(struct naPool* p, int need)
168 if(need < MIN_BLOCK_SIZE) need = MIN_BLOCK_SIZE;
170 newb = naAlloc(sizeof(struct Block));
171 newb->block = naAlloc(need * p->elemsz);
173 newb->next = p->blocks;
175 naBZero(newb->block, need * p->elemsz);
177 if(need > p->freesz - p->freetop) need = p->freesz - p->freetop;
179 p->free = p->free0 + p->freetop;
180 for(i=0; i < need; i++) {
181 struct naObj* o = (struct naObj*)(newb->block + i*p->elemsz);
183 p->free[p->nfree++] = o;
188 void naGC_init(struct naPool* p, int type)
191 p->elemsz = naTypeSize(type);
194 p->free0 = p->free = 0;
195 p->nfree = p->freesz = p->freetop = 0;
199 static int poolsize(struct naPool* p)
202 struct Block* b = p->blocks;
203 while(b) { total += b->size; b = b->next; }
207 struct naObj** naGC_get(struct naPool* p, int n, int* nout)
209 struct naObj** result;
212 while(globals->allocCount < 0 || (p->nfree == 0 && p->freetop >= p->freesz)) {
217 newBlock(p, poolsize(p)/8);
218 n = p->nfree < n ? p->nfree : n;
221 globals->allocCount -= n;
222 result = (struct naObj**)(p->free + p->nfree);
227 static void markvec(naRef r)
230 struct VecRec* vr = r.ref.ptr.vec->rec;
232 for(i=0; i<vr->size; i++)
236 static void markhash(naRef r)
239 struct HashRec* hr = r.ref.ptr.hash->rec;
241 for(i=0; i < (1<<hr->lgalloced); i++) {
242 struct HashNode* hn = hr->table[i];
251 // Sets the reference bit on the object, and recursively on all
252 // objects reachable from it. Uses the processor stack for recursion...
253 static void mark(naRef r)
257 if(IS_NUM(r) || IS_NIL(r))
260 if(r.ref.ptr.obj->mark == 1)
263 r.ref.ptr.obj->mark = 1;
264 switch(r.ref.ptr.obj->type) {
265 case T_VEC: markvec(r); break;
266 case T_HASH: markhash(r); break;
268 mark(r.ref.ptr.code->srcFile);
269 for(i=0; i<r.ref.ptr.code->nConstants; i++)
270 mark(r.ref.ptr.code->constants[i]);
273 mark(r.ref.ptr.func->code);
274 mark(r.ref.ptr.func->namespace);
275 mark(r.ref.ptr.func->next);
280 // Collects all the unreachable objects into a free list, and
281 // allocates more space if needed.
282 static void reap(struct naPool* p)
285 int elem, freesz, total = poolsize(p);
287 freesz = total < MIN_BLOCK_SIZE ? MIN_BLOCK_SIZE : total;
288 freesz = (3 * freesz / 2) + (globals->nThreads * OBJ_CACHE_SZ);
289 if(p->freesz < freesz) {
292 p->free = p->free0 = naAlloc(sizeof(void*) * p->freesz);
295 for(b = p->blocks; b; b = b->next)
296 for(elem=0; elem < b->size; elem++) {
297 struct naObj* o = (struct naObj*)(b->block + elem * p->elemsz);
303 // allocs of this type until the next collection
304 globals->allocCount += total/2;
306 // Allocate more if necessary (try to keep 25-50% of the objects
308 if(p->nfree < total/4) {
309 int used = total - p->nfree;
310 int avail = total - used;
311 int need = used/2 - avail;
315 p->freetop = p->nfree;
318 // Does the swap, returning the old value
319 static void* doswap(void** target, void* val)
326 // Atomically replaces target with a new pointer, and adds the old one
327 // to the list of blocks to free the next time something holds the
329 void naGC_swapfree(void** target, void* val)
333 old = doswap(target, val);
334 while(globals->ndead >= globals->deadsz)
336 globals->deadBlocks[globals->ndead++] = old;