5 #define MIN_BLOCK_SIZE 32
7 static void reap(struct naPool* p);
8 static void mark(naRef r);
16 // Must be called with the giant exclusive lock!
17 static void freeDead()
20 for(i=0; i<globals->ndead; i++)
21 naFree(globals->deadBlocks[i]);
25 static void marktemps(struct Context* c)
29 for(i=0; i<c->ntemps; i++) {
30 SETPTR(r, c->temps[i]);
35 // Must be called with the big lock!
36 static void garbageCollect()
40 globals->allocCount = 0;
41 c = globals->allContexts;
43 for(i=0; i<NUM_NASAL_TYPES; i++)
45 for(i=0; i < c->fTop; i++) {
46 mark(c->fStack[i].func);
47 mark(c->fStack[i].locals);
49 for(i=0; i < c->opTop; i++)
57 mark(globals->save_hash);
58 mark(globals->symbols);
60 mark(globals->argRef);
61 mark(globals->parentsRef);
63 // Finally collect all the freed objects
64 for(i=0; i<NUM_NASAL_TYPES; i++)
65 reap(&(globals->pools[i]));
67 // Make enough space for the dead blocks we need to free during
68 // execution. This works out to 1 spot for every 2 live objects,
69 // which should be limit the number of bottleneck operations
70 // without imposing an undue burden of extra "freeable" memory.
71 if(globals->deadsz < globals->allocCount) {
72 globals->deadsz = globals->allocCount;
73 if(globals->deadsz < 256) globals->deadsz = 256;
74 naFree(globals->deadBlocks);
75 globals->deadBlocks = naAlloc(sizeof(void*) * globals->deadsz);
92 // We might be the "last" thread needed for collection. Since
93 // we're releasing our modlock to do something else for a while,
94 // wake someone else up to do it.
95 if(globals->waitCount == globals->nThreads)
96 naSemUp(globals->sem, 1);
100 // Must be called with the main lock. Engages the "bottleneck", where
101 // all threads will block so that one (the last one to call this
102 // function) can run alone. This is done for GC, and also to free the
103 // list of "dead" blocks when it gets full (which is part of GC, if
104 // you think about it).
105 static void bottleneck()
107 struct Globals* g = globals;
109 while(g->bottleneck && g->waitCount < g->nThreads - 1) {
111 UNLOCK(); naSemDown(g->sem); LOCK();
114 if(g->waitCount >= g->nThreads - 1) {
116 if(g->needGC) garbageCollect();
117 if(g->waitCount) naSemUp(g->sem, g->waitCount);
131 void naCheckBottleneck()
133 if(globals->bottleneck) { LOCK(); bottleneck(); UNLOCK(); }
136 static void naCode_gcclean(struct naCode* o)
138 naFree(o->constants); o->constants = 0;
141 static void naCCode_gcclean(struct naCCode* c)
143 if(c->fptru && c->user_data && c->destroy) c->destroy(c->user_data);
147 static void naGhost_gcclean(struct naGhost* g)
149 if(g->ptr && g->gtype->destroy) g->gtype->destroy(g->ptr);
153 static void freeelem(struct naPool* p, struct naObj* o)
155 // Clean up any intrinsic storage the object might have...
157 case T_STR: naStr_gcclean ((struct naStr*) o); break;
158 case T_VEC: naVec_gcclean ((struct naVec*) o); break;
159 case T_HASH: naiGCHashClean ((struct naHash*) o); break;
160 case T_CODE: naCode_gcclean ((struct naCode*) o); break;
161 case T_CCODE: naCCode_gcclean((struct naCCode*)o); break;
162 case T_GHOST: naGhost_gcclean((struct naGhost*)o); break;
164 p->free[p->nfree++] = o; // ...and add it to the free list
167 static void newBlock(struct naPool* p, int need)
172 if(need < MIN_BLOCK_SIZE) need = MIN_BLOCK_SIZE;
174 newb = naAlloc(sizeof(struct Block));
175 newb->block = naAlloc(need * p->elemsz);
177 newb->next = p->blocks;
179 naBZero(newb->block, need * p->elemsz);
181 if(need > p->freesz - p->freetop) need = p->freesz - p->freetop;
183 p->free = p->free0 + p->freetop;
184 for(i=0; i < need; i++) {
185 struct naObj* o = (struct naObj*)(newb->block + i*p->elemsz);
187 p->free[p->nfree++] = o;
192 void naGC_init(struct naPool* p, int type)
195 p->elemsz = naTypeSize(type);
198 p->free0 = p->free = 0;
199 p->nfree = p->freesz = p->freetop = 0;
203 static int poolsize(struct naPool* p)
206 struct Block* b = p->blocks;
207 while(b) { total += b->size; b = b->next; }
211 struct naObj** naGC_get(struct naPool* p, int n, int* nout)
213 struct naObj** result;
216 while(globals->allocCount < 0 || (p->nfree == 0 && p->freetop >= p->freesz)) {
221 newBlock(p, poolsize(p)/8);
222 n = p->nfree < n ? p->nfree : n;
225 globals->allocCount -= n;
226 result = (struct naObj**)(p->free + p->nfree);
231 static void markvec(naRef r)
234 struct VecRec* vr = PTR(r).vec->rec;
236 for(i=0; i<vr->size; i++)
240 // Sets the reference bit on the object, and recursively on all
241 // objects reachable from it. Uses the processor stack for recursion...
242 static void mark(naRef r)
246 if(IS_NUM(r) || IS_NIL(r))
249 if(PTR(r).obj->mark == 1)
252 PTR(r).obj->mark = 1;
253 switch(PTR(r).obj->type) {
254 case T_VEC: markvec(r); break;
255 case T_HASH: naiGCMarkHash(r); break;
257 mark(PTR(r).code->srcFile);
258 for(i=0; i<PTR(r).code->nConstants; i++)
259 mark(PTR(r).code->constants[i]);
262 mark(PTR(r).func->code);
263 mark(PTR(r).func->namespace);
264 mark(PTR(r).func->next);
269 void naiGCMark(naRef r)
274 // Collects all the unreachable objects into a free list, and
275 // allocates more space if needed.
276 static void reap(struct naPool* p)
279 int elem, freesz, total = poolsize(p);
280 freesz = total < MIN_BLOCK_SIZE ? MIN_BLOCK_SIZE : total;
281 freesz = (3 * freesz / 2) + (globals->nThreads * OBJ_CACHE_SZ);
282 if(p->freesz < freesz) {
285 p->free = p->free0 = naAlloc(sizeof(void*) * p->freesz);
291 for(b = p->blocks; b; b = b->next)
292 for(elem=0; elem < b->size; elem++) {
293 struct naObj* o = (struct naObj*)(b->block + elem * p->elemsz);
299 p->freetop = p->nfree;
301 // allocs of this type until the next collection
302 globals->allocCount += total/2;
304 // Allocate more if necessary (try to keep 25-50% of the objects
306 if(p->nfree < total/4) {
307 int used = total - p->nfree;
308 int avail = total - used;
309 int need = used/2 - avail;
315 // Does the swap, returning the old value
316 static void* doswap(void** target, void* val)
323 // Atomically replaces target with a new pointer, and adds the old one
324 // to the list of blocks to free the next time something holds the
326 void naGC_swapfree(void** target, void* val)
330 old = doswap(target, val);
331 while(globals->ndead >= globals->deadsz)
333 globals->deadBlocks[globals->ndead++] = old;