5 #define MIN_BLOCK_SIZE 32
7 static void reap(struct naPool* p);
8 static void mark(naRef r);
16 // Must be called with the giant exclusive lock!
17 static void freeDead()
20 for(i=0; i<globals->ndead; i++)
21 naFree(globals->deadBlocks[i]);
25 static void marktemps(struct Context* c)
29 for(i=0; i<c->ntemps; i++) {
30 SETPTR(r, c->temps[i]);
35 // Must be called with the big lock!
36 static void garbageCollect()
40 globals->allocCount = 0;
41 c = globals->allContexts;
43 for(i=0; i<NUM_NASAL_TYPES; i++)
45 for(i=0; i < c->fTop; i++) {
46 mark(c->fStack[i].func);
47 mark(c->fStack[i].locals);
49 for(i=0; i < c->opTop; i++)
57 mark(globals->symbols);
59 mark(globals->argRef);
60 mark(globals->parentsRef);
62 // Finally collect all the freed objects
63 for(i=0; i<NUM_NASAL_TYPES; i++)
64 reap(&(globals->pools[i]));
66 // Make enough space for the dead blocks we need to free during
67 // execution. This works out to 1 spot for every 2 live objects,
68 // which should be limit the number of bottleneck operations
69 // without imposing an undue burden of extra "freeable" memory.
70 if(globals->deadsz < globals->allocCount) {
71 globals->deadsz = globals->allocCount;
72 if(globals->deadsz < 256) globals->deadsz = 256;
73 naFree(globals->deadBlocks);
74 globals->deadBlocks = naAlloc(sizeof(void*) * globals->deadsz);
91 // We might be the "last" thread needed for collection. Since
92 // we're releasing our modlock to do something else for a while,
93 // wake someone else up to do it.
94 if(globals->waitCount == globals->nThreads)
95 naSemUp(globals->sem, 1);
99 // Must be called with the main lock. Engages the "bottleneck", where
100 // all threads will block so that one (the last one to call this
101 // function) can run alone. This is done for GC, and also to free the
102 // list of "dead" blocks when it gets full (which is part of GC, if
103 // you think about it).
104 static void bottleneck()
106 struct Globals* g = globals;
108 while(g->bottleneck && g->waitCount < g->nThreads - 1) {
110 UNLOCK(); naSemDown(g->sem); LOCK();
113 if(g->waitCount >= g->nThreads - 1) {
115 if(g->needGC) garbageCollect();
116 if(g->waitCount) naSemUp(g->sem, g->waitCount);
121 void naCheckBottleneck()
123 if(globals->bottleneck) { LOCK(); bottleneck(); UNLOCK(); }
126 static void naCode_gcclean(struct naCode* o)
128 naFree(o->constants); o->constants = 0;
131 static void naGhost_gcclean(struct naGhost* g)
133 if(g->ptr && g->gtype->destroy) g->gtype->destroy(g->ptr);
137 static void freeelem(struct naPool* p, struct naObj* o)
139 // Clean up any intrinsic storage the object might have...
141 case T_STR: naStr_gcclean ((struct naStr*) o); break;
142 case T_VEC: naVec_gcclean ((struct naVec*) o); break;
143 case T_HASH: naiGCHashClean ((struct naHash*) o); break;
144 case T_CODE: naCode_gcclean ((struct naCode*) o); break;
145 case T_GHOST: naGhost_gcclean((struct naGhost*)o); break;
147 p->free[p->nfree++] = o; // ...and add it to the free list
150 static void newBlock(struct naPool* p, int need)
155 if(need < MIN_BLOCK_SIZE) need = MIN_BLOCK_SIZE;
157 newb = naAlloc(sizeof(struct Block));
158 newb->block = naAlloc(need * p->elemsz);
160 newb->next = p->blocks;
162 naBZero(newb->block, need * p->elemsz);
164 if(need > p->freesz - p->freetop) need = p->freesz - p->freetop;
166 p->free = p->free0 + p->freetop;
167 for(i=0; i < need; i++) {
168 struct naObj* o = (struct naObj*)(newb->block + i*p->elemsz);
170 p->free[p->nfree++] = o;
175 void naGC_init(struct naPool* p, int type)
178 p->elemsz = naTypeSize(type);
181 p->free0 = p->free = 0;
182 p->nfree = p->freesz = p->freetop = 0;
186 static int poolsize(struct naPool* p)
189 struct Block* b = p->blocks;
190 while(b) { total += b->size; b = b->next; }
194 struct naObj** naGC_get(struct naPool* p, int n, int* nout)
196 struct naObj** result;
199 while(globals->allocCount < 0 || (p->nfree == 0 && p->freetop >= p->freesz)) {
204 newBlock(p, poolsize(p)/8);
205 n = p->nfree < n ? p->nfree : n;
208 globals->allocCount -= n;
209 result = (struct naObj**)(p->free + p->nfree);
214 static void markvec(naRef r)
217 struct VecRec* vr = PTR(r).vec->rec;
219 for(i=0; i<vr->size; i++)
223 // Sets the reference bit on the object, and recursively on all
224 // objects reachable from it. Uses the processor stack for recursion...
225 static void mark(naRef r)
229 if(IS_NUM(r) || IS_NIL(r))
232 if(PTR(r).obj->mark == 1)
235 PTR(r).obj->mark = 1;
236 switch(PTR(r).obj->type) {
237 case T_VEC: markvec(r); break;
238 case T_HASH: naiGCMarkHash(r); break;
240 mark(PTR(r).code->srcFile);
241 for(i=0; i<PTR(r).code->nConstants; i++)
242 mark(PTR(r).code->constants[i]);
245 mark(PTR(r).func->code);
246 mark(PTR(r).func->namespace);
247 mark(PTR(r).func->next);
252 void naiGCMark(naRef r)
257 // Collects all the unreachable objects into a free list, and
258 // allocates more space if needed.
259 static void reap(struct naPool* p)
262 int elem, freesz, total = poolsize(p);
263 freesz = total < MIN_BLOCK_SIZE ? MIN_BLOCK_SIZE : total;
264 freesz = (3 * freesz / 2) + (globals->nThreads * OBJ_CACHE_SZ);
265 if(p->freesz < freesz) {
268 p->free = p->free0 = naAlloc(sizeof(void*) * p->freesz);
274 for(b = p->blocks; b; b = b->next)
275 for(elem=0; elem < b->size; elem++) {
276 struct naObj* o = (struct naObj*)(b->block + elem * p->elemsz);
282 p->freetop = p->nfree;
284 // allocs of this type until the next collection
285 globals->allocCount += total/2;
287 // Allocate more if necessary (try to keep 25-50% of the objects
289 if(p->nfree < total/4) {
290 int used = total - p->nfree;
291 int avail = total - used;
292 int need = used/2 - avail;
298 // Does the swap, returning the old value
299 static void* doswap(void** target, void* val)
306 // Atomically replaces target with a new pointer, and adds the old one
307 // to the list of blocks to free the next time something holds the
309 void naGC_swapfree(void** target, void* val)
313 old = doswap(target, val);
314 while(globals->ndead >= globals->deadsz)
316 globals->deadBlocks[globals->ndead++] = old;