4 #define MIN_BLOCK_SIZE 256
6 // "type" for an object freed by the collector
7 #define T_GCFREED 123 // DEBUG
14 // Decremented every allocation. When it reaches zero, we do a
15 // garbage collection. The value is reset to 1/2 of the total object
16 // count each collection, which is sane: it ensures that no more than
17 // 50% growth can happen between collections, and ensures that garbage
18 // collection work is constant with allocation work (i.e. that O(N)
19 // work is done only every O(1/2N) allocations).
20 static int GlobalAllocCount = 256;
22 static void appendfree(struct naPool*p, struct naObj* o)
25 if(p->freesz <= p->nfree) {
26 int i, n = 1+((3*p->nfree)>>1);
27 void** newf = naAlloc(n * sizeof(void*));
28 for(i=0; i<p->nfree; i++)
35 p->free[p->nfree++] = o;
38 static void naCode_gcclean(struct naCode* o)
40 naFree(o->byteCode); o->byteCode = 0;
41 naFree(o->constants); o->constants = 0;
44 static void freeelem(struct naPool* p, struct naObj* o)
46 // Mark the object as "freed" for debugging purposes
47 o->type = T_GCFREED; // DEBUG
49 // Free any intrinsic (i.e. non-garbage collected) storage the
53 naStr_gcclean((struct naStr*)o);
56 naVec_gcclean((struct naVec*)o);
59 naHash_gcclean((struct naHash*)o);
62 naCode_gcclean((struct naCode*)o);
66 // And add it to the free list
70 static void newBlock(struct naPool* p, int need)
74 struct Block* newblocks;
76 if(need < MIN_BLOCK_SIZE)
77 need = MIN_BLOCK_SIZE;
79 newblocks = naAlloc((p->nblocks+1) * sizeof(struct Block));
80 for(i=0; i<p->nblocks; i++) newblocks[i] = p->blocks[i];
82 p->blocks = newblocks;
83 buf = naAlloc(need * p->elemsz);
84 naBZero(buf, need * p->elemsz);
85 p->blocks[p->nblocks].size = need;
86 p->blocks[p->nblocks].block = buf;
89 for(i=0; i<need; i++) {
90 struct naObj* o = (struct naObj*)(buf + i*p->elemsz);
97 void naGC_init(struct naPool* p, int type)
100 p->elemsz = naTypeSize(type);
109 int naGC_size(struct naPool* p)
112 for(i=0; i<p->nblocks; i++)
113 total += ((struct Block*)(p->blocks + i))->size;
117 struct naObj* naGC_get(struct naPool* p)
119 // Collect every GlobalAllocCount allocations.
120 // This gets set to ~50% of the total object count each
121 // collection (it's incremented in naGC_reap()).
122 if(--GlobalAllocCount < 0) {
123 GlobalAllocCount = 0;
127 // If we're out, then allocate an extra 12.5%
129 newBlock(p, naGC_size(p)/8);
130 return p->free[--p->nfree];
133 // Sets the reference bit on the object, and recursively on all
134 // objects reachable from it. Clumsy: uses C stack recursion, which
135 // is slower than it need be and may cause problems on some platforms
136 // due to the very large stack depths that result.
137 void naGC_mark(naRef r)
141 if(IS_NUM(r) || IS_NIL(r))
144 if(r.ref.ptr.obj->mark == 1)
147 // Verify that the object hasn't been freed incorrectly:
148 if(r.ref.ptr.obj->type == T_GCFREED) *(int*)0=0; // DEBUG
150 r.ref.ptr.obj->mark = 1;
151 switch(r.ref.ptr.obj->type) {
153 for(i=0; i<r.ref.ptr.vec->size; i++)
154 naGC_mark(r.ref.ptr.vec->array[i]);
157 if(r.ref.ptr.hash->table == 0)
159 for(i=0; i < (1<<r.ref.ptr.hash->lgalloced); i++) {
160 struct HashNode* hn = r.ref.ptr.hash->table[i];
169 naGC_mark(r.ref.ptr.code->srcFile);
170 for(i=0; i<r.ref.ptr.code->nConstants; i++)
171 naGC_mark(r.ref.ptr.code->constants[i]);
174 naGC_mark(r.ref.ptr.closure->namespace);
175 naGC_mark(r.ref.ptr.closure->next);
178 naGC_mark(r.ref.ptr.func->code);
179 naGC_mark(r.ref.ptr.func->closure);
184 // Collects all the unreachable objects into a free list, and
185 // allocates more space if needed.
186 void naGC_reap(struct naPool* p)
188 int i, elem, total = 0;
190 for(i=0; i<p->nblocks; i++) {
191 struct Block* b = p->blocks + i;
193 for(elem=0; elem < b->size; elem++) {
194 struct naObj* o = (struct naObj*)(b->block + elem * p->elemsz);
201 // Add 50% of our total to the global count
202 GlobalAllocCount += total/2;
204 // Allocate more if necessary (try to keep 25-50% of the objects
206 if(p->nfree < total/4) {
207 int used = total - p->nfree;
208 int avail = total - used;
209 int need = used/2 - avail;