X-Git-Url: https://git.mxchange.org/?a=blobdiff_plain;f=simgear%2Fnasal%2Fgc.c;h=e9d9b7b8ba60587f9618eca6e25fab55a3a8a5f5;hb=2a2e2716bdfbecb3494ab935171ed6224a178470;hp=ee2095277b770db3797c7ac34559be9fb3080777;hpb=979d3da69c085733d573af7298a3fd95fb4574a3;p=simgear.git diff --git a/simgear/nasal/gc.c b/simgear/nasal/gc.c index ee209527..e9d9b7b8 100644 --- a/simgear/nasal/gc.c +++ b/simgear/nasal/gc.c @@ -2,7 +2,7 @@ #include "data.h" #include "code.h" -#define MIN_BLOCK_SIZE 256 +#define MIN_BLOCK_SIZE 32 static void reap(struct naPool* p); static void mark(naRef r); @@ -27,7 +27,7 @@ static void marktemps(struct Context* c) int i; naRef r = naNil(); for(i=0; intemps; i++) { - r.ref.ptr.obj = c->temps[i]; + SETPTR(r, c->temps[i]); mark(r); } } @@ -88,6 +88,11 @@ void naModUnlock() { LOCK(); globals->nThreads--; + // We might be the "last" thread needed for collection. Since + // we're releasing our modlock to do something else for a while, + // wake someone else up to do it. + if(globals->waitCount == globals->nThreads) + naSemUp(globals->sem, 1); UNLOCK(); } @@ -108,7 +113,7 @@ static void bottleneck() if(g->waitCount >= g->nThreads - 1) { freeDead(); if(g->needGC) garbageCollect(); - if(g->waitCount) naSemUpAll(g->sem, g->waitCount); + if(g->waitCount) naSemUp(g->sem, g->waitCount); g->bottleneck = 0; } } @@ -120,42 +125,26 @@ void naCheckBottleneck() static void naCode_gcclean(struct naCode* o) { - naFree(o->byteCode); o->byteCode = 0; naFree(o->constants); o->constants = 0; - naFree(o->argSyms); o->argSyms = 0; - naFree(o->optArgSyms); o->argSyms = 0; } static void naGhost_gcclean(struct naGhost* g) { - if(g->ptr) g->gtype->destroy(g->ptr); + if(g->ptr && g->gtype->destroy) g->gtype->destroy(g->ptr); g->ptr = 0; } static void freeelem(struct naPool* p, struct naObj* o) { - // Free any intrinsic (i.e. non-garbage collected) storage the - // object might have + // Clean up any intrinsic storage the object might have... switch(p->type) { - case T_STR: - naStr_gcclean((struct naStr*)o); - break; - case T_VEC: - naVec_gcclean((struct naVec*)o); - break; - case T_HASH: - naHash_gcclean((struct naHash*)o); - break; - case T_CODE: - naCode_gcclean((struct naCode*)o); - break; - case T_GHOST: - naGhost_gcclean((struct naGhost*)o); - break; + case T_STR: naStr_gcclean ((struct naStr*) o); break; + case T_VEC: naVec_gcclean ((struct naVec*) o); break; + case T_HASH: naiGCHashClean ((struct naHash*) o); break; + case T_CODE: naCode_gcclean ((struct naCode*) o); break; + case T_GHOST: naGhost_gcclean((struct naGhost*)o); break; } - - // And add it to the free list - p->free[p->nfree++] = o; + p->free[p->nfree++] = o; // ...and add it to the free list } static void newBlock(struct naPool* p, int need) @@ -225,27 +214,12 @@ struct naObj** naGC_get(struct naPool* p, int n, int* nout) static void markvec(naRef r) { int i; - struct VecRec* vr = r.ref.ptr.vec->rec; + struct VecRec* vr = PTR(r).vec->rec; if(!vr) return; for(i=0; isize; i++) mark(vr->array[i]); } -static void markhash(naRef r) -{ - int i; - struct HashRec* hr = r.ref.ptr.hash->rec; - if(!hr) return; - for(i=0; i < (1<lgalloced); i++) { - struct HashNode* hn = hr->table[i]; - while(hn) { - mark(hn->key); - mark(hn->val); - hn = hn->next; - } - } -} - // Sets the reference bit on the object, and recursively on all // objects reachable from it. Uses the processor stack for recursion... static void mark(naRef r) @@ -255,33 +229,37 @@ static void mark(naRef r) if(IS_NUM(r) || IS_NIL(r)) return; - if(r.ref.ptr.obj->mark == 1) + if(PTR(r).obj->mark == 1) return; - r.ref.ptr.obj->mark = 1; - switch(r.ref.ptr.obj->type) { + PTR(r).obj->mark = 1; + switch(PTR(r).obj->type) { case T_VEC: markvec(r); break; - case T_HASH: markhash(r); break; + case T_HASH: naiGCMarkHash(r); break; case T_CODE: - mark(r.ref.ptr.code->srcFile); - for(i=0; inConstants; i++) - mark(r.ref.ptr.code->constants[i]); + mark(PTR(r).code->srcFile); + for(i=0; inConstants; i++) + mark(PTR(r).code->constants[i]); break; case T_FUNC: - mark(r.ref.ptr.func->code); - mark(r.ref.ptr.func->namespace); - mark(r.ref.ptr.func->next); + mark(PTR(r).func->code); + mark(PTR(r).func->namespace); + mark(PTR(r).func->next); break; } } +void naiGCMark(naRef r) +{ + mark(r); +} + // Collects all the unreachable objects into a free list, and // allocates more space if needed. static void reap(struct naPool* p) { struct Block* b; int elem, freesz, total = poolsize(p); - p->nfree = 0; freesz = total < MIN_BLOCK_SIZE ? MIN_BLOCK_SIZE : total; freesz = (3 * freesz / 2) + (globals->nThreads * OBJ_CACHE_SZ); if(p->freesz < freesz) { @@ -290,6 +268,9 @@ static void reap(struct naPool* p) p->free = p->free0 = naAlloc(sizeof(void*) * p->freesz); } + p->nfree = 0; + p->free = p->free0; + for(b = p->blocks; b; b = b->next) for(elem=0; elem < b->size; elem++) { struct naObj* o = (struct naObj*)(b->block + elem * p->elemsz); @@ -298,6 +279,8 @@ static void reap(struct naPool* p) o->mark = 0; } + p->freetop = p->nfree; + // allocs of this type until the next collection globals->allocCount += total/2; @@ -310,7 +293,6 @@ static void reap(struct naPool* p) if(need > 0) newBlock(p, need); } - p->freetop = p->nfree; } // Does the swap, returning the old value