X-Git-Url: https://git.mxchange.org/?a=blobdiff_plain;f=simgear%2Fnasal%2Fcode.h;h=e7cb3f39edc47f2331b0491a908271699c742443;hb=f41b18f0649a50e179ba41383f4061e1878c4d4c;hp=f8cbe3812c986c2c8f42a88218307720f225934e;hpb=7d2134c488f5ae87b8bed0c65a0219e0abddd819;p=simgear.git diff --git a/simgear/nasal/code.h b/simgear/nasal/code.h index f8cbe381..e7cb3f39 100644 --- a/simgear/nasal/code.h +++ b/simgear/nasal/code.h @@ -10,19 +10,24 @@ #define MAX_MARK_DEPTH 128 // Number of objects (per pool per thread) asked for using naGC_get(). -// Testing with fib.nas shows that this gives the best performance, -// without too much per-thread overhead. -#define OBJ_CACHE_SZ 128 +// The idea is that contexts can "cache" allocations to prevent thread +// contention on the global pools. But in practice this interacts +// very badly with small subcontext calls, which grab huge numbers of +// cached objects and don't use them, causing far more collections +// than necessary. Just leave it at 1 pending a rework of the +// collector synchronization. +#define OBJ_CACHE_SZ 1 enum { - OP_AND, OP_OR, OP_NOT, OP_MUL, OP_PLUS, OP_MINUS, OP_DIV, OP_NEG, - OP_CAT, OP_LT, OP_LTE, OP_GT, OP_GTE, OP_EQ, OP_NEQ, OP_EACH, - OP_JMP, OP_JMPLOOP, OP_JIFNOT, OP_JIFNIL, OP_FCALL, OP_MCALL, OP_RETURN, - OP_PUSHCONST, OP_PUSHONE, OP_PUSHZERO, OP_PUSHNIL, OP_POP, - OP_DUP, OP_XCHG, OP_INSERT, OP_EXTRACT, OP_MEMBER, OP_SETMEMBER, - OP_LOCAL, OP_SETLOCAL, OP_NEWVEC, OP_VAPPEND, OP_NEWHASH, OP_HAPPEND, - OP_MARK, OP_UNMARK, OP_BREAK, OP_FTAIL, OP_MTAIL, OP_SETSYM, OP_DUP2, - OP_INDEX, OP_BREAK2 + OP_NOT, OP_MUL, OP_PLUS, OP_MINUS, OP_DIV, OP_NEG, OP_CAT, OP_LT, OP_LTE, + OP_GT, OP_GTE, OP_EQ, OP_NEQ, OP_EACH, OP_JMP, OP_JMPLOOP, OP_JIFNOTPOP, + OP_JIFEND, OP_FCALL, OP_MCALL, OP_RETURN, OP_PUSHCONST, OP_PUSHONE, + OP_PUSHZERO, OP_PUSHNIL, OP_POP, OP_DUP, OP_XCHG, OP_INSERT, OP_EXTRACT, + OP_MEMBER, OP_SETMEMBER, OP_LOCAL, OP_SETLOCAL, OP_NEWVEC, OP_VAPPEND, + OP_NEWHASH, OP_HAPPEND, OP_MARK, OP_UNMARK, OP_BREAK, OP_SETSYM, OP_DUP2, + OP_INDEX, OP_BREAK2, OP_PUSHEND, OP_JIFTRUE, OP_JIFNOT, OP_FCALLH, + OP_MCALLH, OP_XCHG2, OP_UNPACK, OP_SLICE, OP_SLICE2, OP_BIT_AND, OP_BIT_OR, + OP_BIT_XOR, OP_BIT_NEG }; struct Frame { @@ -58,7 +63,11 @@ struct Globals { // A hash of symbol names naRef symbols; + // Vector/hash containing objects which should not be freed by the gc + // TODO do we need a separate vector and hash? naRef save; + naRef save_hash; + int next_gc_key; struct Context* freeContexts; struct Context* allContexts; @@ -69,6 +78,7 @@ struct Context { struct Frame fStack[MAX_RECURSION]; int fTop; naRef opStack[MAX_STACK_DEPTH]; + int opFrame; // like Frame::bp, but for C functions int opTop; int markStack[MAX_MARK_DEPTH]; int markTop; @@ -86,7 +96,7 @@ struct Context { // Error handling jmp_buf jumpHandle; - char* error; + char error[128]; naRef dieArg; // Sub-call lists @@ -96,6 +106,8 @@ struct Context { // Linked list pointers in globals struct Context* nextFree; struct Context* nextAll; + + void* userData; }; #define globals nasal_globals @@ -103,11 +115,13 @@ extern struct Globals* globals; // Threading low-level functions void* naNewLock(); +void naFreeLock(void* lock); void naLock(void* lock); void naUnlock(void* lock); void* naNewSem(); +void naFreeSem(void* sem); void naSemDown(void* sem); -void naSemUpAll(void* sem, int count); +void naSemUp(void* sem, int count); void naCheckBottleneck();