5 #define MIN_BLOCK_SIZE 256
7 // "type" for an object freed by the collector
8 #define T_GCFREED 123 // DEBUG
10 static void reap(struct naPool* p);
11 static void mark(naRef r);
19 // Must be called with the giant exclusive lock!
20 static void freeDead()
23 for(i=0; i<globals->ndead; i++)
24 naFree(globals->deadBlocks[i]);
29 // Must be called with the big lock!
30 static void garbageCollect()
34 globals->allocCount = 0;
35 c = globals->allContexts;
37 for(i=0; i<NUM_NASAL_TYPES; i++)
39 for(i=0; i < c->fTop; i++) {
40 mark(c->fStack[i].func);
41 mark(c->fStack[i].locals);
43 for(i=0; i < c->opTop; i++)
51 mark(globals->symbols);
53 mark(globals->argRef);
54 mark(globals->parentsRef);
56 // Finally collect all the freed objects
57 for(i=0; i<NUM_NASAL_TYPES; i++)
58 reap(&(globals->pools[i]));
60 // Make enough space for the dead blocks we need to free during
61 // execution. This works out to 1 spot for every 2 live objects,
62 // which should be limit the number of bottleneck operations
63 // without imposing an undue burden of extra "freeable" memory.
64 if(globals->deadsz < globals->allocCount) {
65 globals->deadsz = globals->allocCount;
66 if(globals->deadsz < 256) globals->deadsz = 256;
67 naFree(globals->deadBlocks);
68 globals->deadBlocks = naAlloc(sizeof(void*) * globals->deadsz);
88 // Must be called with the main lock. Engages the "bottleneck", where
89 // all threads will block so that one (the last one to call this
90 // function) can run alone. This is done for GC, and also to free the
91 // list of "dead" blocks when it gets full (which is part of GC, if
92 // you think about it).
93 static void bottleneck()
95 struct Globals* g = globals;
97 while(g->bottleneck && g->waitCount < g->nThreads - 1) {
99 UNLOCK(); naSemDown(g->sem); LOCK();
102 if(g->waitCount >= g->nThreads - 1) {
104 if(g->needGC) garbageCollect();
105 if(g->waitCount) naSemUpAll(g->sem, g->waitCount);
110 void naCheckBottleneck()
112 if(globals->bottleneck) { LOCK(); bottleneck(); UNLOCK(); }
115 static void naCode_gcclean(struct naCode* o)
117 naFree(o->byteCode); o->byteCode = 0;
118 naFree(o->constants); o->constants = 0;
119 naFree(o->argSyms); o->argSyms = 0;
120 naFree(o->optArgSyms); o->argSyms = 0;
123 static void naGhost_gcclean(struct naGhost* g)
125 if(g->ptr) g->gtype->destroy(g->ptr);
129 static void freeelem(struct naPool* p, struct naObj* o)
131 // Free any intrinsic (i.e. non-garbage collected) storage the
135 naStr_gcclean((struct naStr*)o);
138 naVec_gcclean((struct naVec*)o);
141 naHash_gcclean((struct naHash*)o);
144 naCode_gcclean((struct naCode*)o);
147 naGhost_gcclean((struct naGhost*)o);
151 // And add it to the free list
152 o->type = T_GCFREED; // DEBUG
153 p->free[p->nfree++] = o;
156 static void newBlock(struct naPool* p, int need)
161 if(need < MIN_BLOCK_SIZE) need = MIN_BLOCK_SIZE;
163 newb = naAlloc(sizeof(struct Block));
164 newb->block = naAlloc(need * p->elemsz);
166 newb->next = p->blocks;
168 naBZero(newb->block, need * p->elemsz);
170 if(need > p->freesz - p->freetop) need = p->freesz - p->freetop;
172 p->free = p->free0 + p->freetop;
173 for(i=0; i < need; i++) {
174 struct naObj* o = (struct naObj*)(newb->block + i*p->elemsz);
176 o->type = T_GCFREED; // DEBUG
177 p->free[p->nfree++] = o;
182 void naGC_init(struct naPool* p, int type)
185 p->elemsz = naTypeSize(type);
188 p->free0 = p->free = 0;
189 p->nfree = p->freesz = p->freetop = 0;
193 static int poolsize(struct naPool* p)
196 struct Block* b = p->blocks;
197 while(b) { total += b->size; b = b->next; }
201 struct naObj** naGC_get(struct naPool* p, int n, int* nout)
203 struct naObj** result;
206 while(globals->allocCount < 0 || (p->nfree == 0 && p->freetop >= p->freesz)) {
211 newBlock(p, poolsize(p)/8);
212 n = p->nfree < n ? p->nfree : n;
215 globals->allocCount -= n;
216 result = (struct naObj**)(p->free + p->nfree);
221 // Sets the reference bit on the object, and recursively on all
222 // objects reachable from it. Uses the processor stack for recursion...
223 static void mark(naRef r)
227 if(IS_NUM(r) || IS_NIL(r))
230 if(r.ref.ptr.obj->mark == 1)
233 // Verify that the object hasn't been freed incorrectly:
234 if(r.ref.ptr.obj->type == T_GCFREED) *(int*)0=0; // DEBUG
236 r.ref.ptr.obj->mark = 1;
237 switch(r.ref.ptr.obj->type) {
239 if(r.ref.ptr.vec->rec)
240 for(i=0; i<r.ref.ptr.vec->rec->size; i++)
241 mark(r.ref.ptr.vec->rec->array[i]);
244 if(r.ref.ptr.hash->rec != 0) {
245 struct HashRec* hr = r.ref.ptr.hash->rec;
246 for(i=0; i < (1<<hr->lgalloced); i++) {
247 struct HashNode* hn = hr->table[i];
257 mark(r.ref.ptr.code->srcFile);
258 for(i=0; i<r.ref.ptr.code->nConstants; i++)
259 mark(r.ref.ptr.code->constants[i]);
262 mark(r.ref.ptr.func->code);
263 mark(r.ref.ptr.func->namespace);
264 mark(r.ref.ptr.func->next);
269 // Collects all the unreachable objects into a free list, and
270 // allocates more space if needed.
271 static void reap(struct naPool* p)
274 int elem, freesz, total = poolsize(p);
276 freesz = total < MIN_BLOCK_SIZE ? MIN_BLOCK_SIZE : total;
277 freesz = (3 * freesz / 2) + (globals->nThreads * OBJ_CACHE_SZ);
278 if(p->freesz < freesz) {
281 p->free = p->free0 = naAlloc(sizeof(void*) * p->freesz);
284 for(b = p->blocks; b; b = b->next)
285 for(elem=0; elem < b->size; elem++) {
286 struct naObj* o = (struct naObj*)(b->block + elem * p->elemsz);
292 // allocs of this type until the next collection
293 globals->allocCount += total/2;
295 // Allocate more if necessary (try to keep 25-50% of the objects
297 if(p->nfree < total/4) {
298 int used = total - p->nfree;
299 int avail = total - used;
300 int need = used/2 - avail;
304 p->freetop = p->nfree;
307 // Atomically replaces target with a new pointer, and adds the old one
308 // to the list of blocks to free the next time something holds the
310 void naGC_swapfree(void** target, void* val)
313 while(globals->ndead >= globals->deadsz)
315 globals->deadBlocks[globals->ndead++] = *target;