7 ////////////////////////////////////////////////////////////////////////
8 // Debugging stuff. ////////////////////////////////////////////////////
9 ////////////////////////////////////////////////////////////////////////
10 //#define INTERPRETER_DUMP
11 #if !defined(INTERPRETER_DUMP)
12 # define DBG(expr) /* noop */
14 # define DBG(expr) expr
18 char* opStringDEBUG(int op);
19 void printOpDEBUG(int ip, int op);
20 void printStackDEBUG(struct Context* ctx);
21 ////////////////////////////////////////////////////////////////////////
24 #define vsnprintf _vsnprintf
27 struct Globals* globals = 0;
29 static naRef bindFunction(struct Context* ctx, struct Frame* f, naRef code);
31 #define ERR(c, msg) naRuntimeError((c),(msg))
32 void naRuntimeError(struct Context* c, const char* fmt, ...)
36 vsnprintf(c->error, sizeof(c->error), fmt, ap);
38 longjmp(c->jumpHandle, 1);
41 void naRethrowError(naContext subc)
43 strncpy(subc->callParent->error, subc->error, sizeof(subc->error));
44 subc->callParent->dieArg = subc->dieArg;
45 longjmp(subc->callParent->jumpHandle, 1);
48 #define END_PTR ((void*)1)
49 #define IS_END(r) (IS_REF((r)) && PTR((r)).obj == END_PTR)
50 static naRef endToken()
57 static int boolify(struct Context* ctx, naRef r)
59 if(IS_NUM(r)) return r.num != 0;
60 if(IS_NIL(r) || IS_END(r)) return 0;
63 if(naStr_len(r) == 0) return 0;
64 if(naStr_tonum(r, &d)) return d != 0;
67 ERR(ctx, "non-scalar used in boolean context");
71 static double numify(struct Context* ctx, naRef o)
74 if(IS_NUM(o)) return o.num;
75 else if(IS_NIL(o)) ERR(ctx, "nil used in numeric context");
76 else if(!IS_STR(o)) ERR(ctx, "non-scalar in numeric context");
77 else if(naStr_tonum(o, &n)) return n;
78 else ERR(ctx, "non-numeric string in numeric context");
82 static naRef stringify(struct Context* ctx, naRef r)
84 if(IS_STR(r)) return r;
85 if(IS_NUM(r)) return naStr_fromnum(naNewString(ctx), r.num);
86 ERR(ctx, "non-scalar in string context");
90 static int checkVec(struct Context* ctx, naRef vec, naRef idx)
92 int i = (int)numify(ctx, idx);
93 if(i < 0) i += naVec_size(vec);
94 if(i < 0 || i >= naVec_size(vec))
95 naRuntimeError(ctx, "vector index %d out of bounds (size: %d)",
100 static int checkStr(struct Context* ctx, naRef str, naRef idx)
102 int i = (int)numify(ctx, idx);
103 if(i < 0) i += naStr_len(str);
104 if(i < 0 || i >= naStr_len(str))
105 naRuntimeError(ctx, "string index %d out of bounds (size: %d)",
110 static naRef containerGet(struct Context* ctx, naRef box, naRef key)
112 naRef result = naNil();
113 if(!IS_SCALAR(key)) ERR(ctx, "container index not scalar");
115 naHash_get(box, key, &result);
116 } else if(IS_VEC(box)) {
117 result = naVec_get(box, checkVec(ctx, box, key));
118 } else if(IS_STR(box)) {
119 result = naNum((unsigned char)naStr_data(box)[checkStr(ctx, box, key)]);
121 ERR(ctx, "extract from non-container");
126 static void containerSet(struct Context* ctx, naRef box, naRef key, naRef val)
128 if(!IS_SCALAR(key)) ERR(ctx, "container index not scalar");
129 else if(IS_HASH(box)) naHash_set(box, key, val);
130 else if(IS_VEC(box)) naVec_set(box, checkVec(ctx, box, key), val);
131 else if(IS_STR(box)) {
132 if(PTR(box).str->hashcode)
133 ERR(ctx, "cannot change immutable string");
134 naStr_data(box)[checkStr(ctx, box, key)] = (char)numify(ctx, val);
135 } else ERR(ctx, "insert into non-container");
138 static void initTemps(struct Context* c)
141 c->temps = naAlloc(c->tempsz * sizeof(struct naObj*));
145 static void initContext(struct Context* c)
148 c->fTop = c->opTop = c->markTop = 0;
149 for(i=0; i<NUM_NASAL_TYPES; i++)
164 static void initGlobals()
168 globals = (struct Globals*)naAlloc(sizeof(struct Globals));
169 naBZero(globals, sizeof(struct Globals));
171 globals->sem = naNewSem();
172 globals->lock = naNewLock();
174 globals->allocCount = 256; // reasonable starting value
175 for(i=0; i<NUM_NASAL_TYPES; i++)
176 naGC_init(&(globals->pools[i]), i);
177 globals->deadsz = 256;
179 globals->deadBlocks = naAlloc(sizeof(void*) * globals->deadsz);
181 // Initialize a single context
182 globals->freeContexts = 0;
183 globals->allContexts = 0;
186 globals->symbols = naNewHash(c);
187 globals->save = naNewVector(c);
189 // Cache pre-calculated "me", "arg" and "parents" scalars
190 globals->meRef = naInternSymbol(naStr_fromdata(naNewString(c), "me", 2));
191 globals->argRef = naInternSymbol(naStr_fromdata(naNewString(c), "arg", 3));
192 globals->parentsRef = naInternSymbol(naStr_fromdata(naNewString(c), "parents", 7));
197 struct Context* naNewContext()
204 c = globals->freeContexts;
206 globals->freeContexts = c->nextFree;
212 c = (struct Context*)naAlloc(sizeof(struct Context));
216 c->nextAll = globals->allContexts;
218 globals->allContexts = c;
224 struct Context* naSubContext(struct Context* super)
226 struct Context* ctx = naNewContext();
227 if(super->callChild) naFreeContext(super->callChild);
228 ctx->callParent = super;
229 super->callChild = ctx;
233 void naFreeContext(struct Context* c)
236 if(c->callChild) naFreeContext(c->callChild);
237 if(c->callParent) c->callParent->callChild = 0;
239 c->nextFree = globals->freeContexts;
240 globals->freeContexts = c;
244 // Note that opTop is incremented separately, to avoid situations
245 // where the "r" expression also references opTop. The SGI compiler
246 // is known to have issues with such code.
247 #define PUSH(r) do { \
248 if(ctx->opTop >= MAX_STACK_DEPTH) ERR(ctx, "stack overflow"); \
249 ctx->opStack[ctx->opTop] = r; \
253 static void setupArgs(naContext ctx, struct Frame* f, naRef* args, int nargs)
256 struct naCode* c = PTR(PTR(f->func).func->code).code;
258 // Set the argument symbols, and put any remaining args in a vector
260 naRuntimeError(ctx, "too few function args (have %d need %d)",
262 for(i=0; i<c->nArgs; i++)
263 naHash_newsym(PTR(f->locals).hash,
264 &c->constants[c->argSyms[i]], &args[i]);
267 for(i=0; i<c->nOptArgs; i++, nargs--) {
268 naRef val = nargs > 0 ? args[i] : c->constants[c->optArgVals[i]];
270 val = bindFunction(ctx, &ctx->fStack[ctx->fTop-2], val);
271 naHash_newsym(PTR(f->locals).hash, &c->constants[c->optArgSyms[i]],
275 if(c->needArgVector || nargs > 0) {
276 naRef argsv = naNewVector(ctx);
277 naVec_setsize(argsv, nargs > 0 ? nargs : 0);
278 for(i=0; i<nargs; i++)
279 PTR(argsv).vec->rec->array[i] = *args++;
280 naHash_newsym(PTR(f->locals).hash, &c->restArgSym, &argsv);
284 static struct Frame* setupFuncall(struct Context* ctx, int nargs, int mcall)
289 DBG(printf("setupFuncall(nargs:%d, mcall:%d)\n", nargs, mcall);)
291 frame = &ctx->opStack[ctx->opTop - nargs - 1];
292 if(!IS_FUNC(frame[0]))
293 ERR(ctx, "function/method call invoked on uncallable object");
295 ctx->opFrame = ctx->opTop - (nargs + 1 + mcall);
297 // Just do native calls right here
298 if(PTR(PTR(frame[0]).func->code).obj->type == T_CCODE) {
299 naRef obj = mcall ? frame[-1] : naNil();
300 naCFunction fp = PTR(PTR(frame[0]).func->code).ccode->fptr;
301 naRef result = (*fp)(ctx, obj, nargs, frame + 1);
302 ctx->opTop = ctx->opFrame;
304 return &(ctx->fStack[ctx->fTop-1]);
307 if(ctx->fTop >= MAX_RECURSION) ERR(ctx, "call stack overflow");
309 // Note: assign nil first, otherwise the naNew() can cause a GC,
310 // which will now (after fTop++) see the *old* reference as a
312 f = &(ctx->fStack[ctx->fTop++]);
313 f->locals = f->func = naNil();
314 f->locals = naNewHash(ctx);
317 f->bp = ctx->opFrame;
320 naHash_set(f->locals, globals->meRef, frame[-1]);
322 setupArgs(ctx, f, frame+1, nargs);
324 ctx->opTop = f->bp; // Pop the stack last, to avoid GC lossage
325 DBG(printf("Entering frame %d with %d args\n", ctx->fTop-1, nargs);)
329 static naRef evalEquality(int op, naRef ra, naRef rb)
331 int result = naEqual(ra, rb);
332 return naNum((op==OP_EQ) ? result : !result);
335 static naRef evalCat(naContext ctx, naRef l, naRef r)
337 if(IS_VEC(l) && IS_VEC(r)) {
338 int i, ls = naVec_size(l), rs = naVec_size(r);
339 naRef v = naNewVector(ctx);
340 naVec_setsize(v, ls + rs);
341 for(i=0; i<ls; i+=1) naVec_set(v, i, naVec_get(l, i));
342 for(i=0; i<rs; i+=1) naVec_set(v, i+ls, naVec_get(r, i));
345 naRef a = stringify(ctx, l);
346 naRef b = stringify(ctx, r);
347 return naStr_concat(naNewString(ctx), a, b);
351 // When a code object comes out of the constant pool and shows up on
352 // the stack, it needs to be bound with the lexical context.
353 static naRef bindFunction(struct Context* ctx, struct Frame* f, naRef code)
355 naRef result = naNewFunc(ctx, code);
356 PTR(result).func->namespace = f->locals;
357 PTR(result).func->next = f->func;
361 static int getClosure(struct naFunc* c, naRef sym, naRef* result)
364 if(naHash_get(c->namespace, sym, result)) return 1;
365 c = PTR(c->next).func;
370 static naRef getLocal2(struct Context* ctx, struct Frame* f, naRef sym)
373 if(!naHash_get(f->locals, sym, &result))
374 if(!getClosure(PTR(f->func).func, sym, &result))
375 naRuntimeError(ctx, "undefined symbol: %s", naStr_data(sym));
379 static void getLocal(struct Context* ctx, struct Frame* f,
380 naRef* sym, naRef* out)
383 struct naStr* str = PTR(*sym).str;
384 if(naHash_sym(PTR(f->locals).hash, str, out))
386 func = PTR(f->func).func;
387 while(func && PTR(func->namespace).hash) {
388 if(naHash_sym(PTR(func->namespace).hash, str, out))
390 func = PTR(func->next).func;
392 // Now do it again using the more general naHash_get(). This will
393 // only be necessary if something has created the value in the
394 // namespace using the more generic hash syntax
395 // (e.g. namespace["symbol"] and not namespace.symbol).
396 *out = getLocal2(ctx, f, *sym);
399 static int setClosure(naRef func, naRef sym, naRef val)
401 struct naFunc* c = PTR(func).func;
402 if(c == 0) { return 0; }
403 else if(naHash_tryset(c->namespace, sym, val)) { return 1; }
404 else { return setClosure(c->next, sym, val); }
407 static naRef setSymbol(struct Frame* f, naRef sym, naRef val)
409 // Try the locals first, if not already there try the closures in
410 // order. Finally put it in the locals if nothing matched.
411 if(!naHash_tryset(f->locals, sym, val))
412 if(!setClosure(f->func, sym, val))
413 naHash_set(f->locals, sym, val);
417 // Funky API: returns null to indicate no member, an empty string to
418 // indicate success, or a non-empty error message. Works this way so
419 // we can generate smart error messages without throwing them with a
420 // longjmp -- this gets called under naMember_get() from C code.
421 static const char* getMember_r(naRef obj, naRef field, naRef* out, int count)
426 if(--count < 0) return "too many parents";
427 if(!IS_HASH(obj)) return 0;
428 if(naHash_get(obj, field, out)) return "";
429 if(!naHash_get(obj, globals->parentsRef, &p)) return 0;
430 if(!IS_VEC(p)) return "object \"parents\" field not vector";
431 pv = PTR(p).vec->rec;
432 for(i=0; i<pv->size; i++) {
433 const char* err = getMember_r(pv->array[i], field, out, count);
434 if(err) return err; /* either an error or success */
439 static void getMember(struct Context* ctx, naRef obj, naRef fld,
440 naRef* result, int count)
442 const char* err = getMember_r(obj, fld, result, count);
443 if(!err) naRuntimeError(ctx, "No such member: %s", naStr_data(fld));
444 if(err[0]) naRuntimeError(ctx, err);
447 int naMember_get(naRef obj, naRef field, naRef* out)
449 const char* err = getMember_r(obj, field, out, 64);
450 return err && !err[0];
453 // OP_EACH works like a vector get, except that it leaves the vector
454 // and index on the stack, increments the index after use, and
455 // pushes a nil if the index is beyond the end.
456 static void evalEach(struct Context* ctx, int useIndex)
458 int idx = (int)(ctx->opStack[ctx->opTop-1].num);
459 naRef vec = ctx->opStack[ctx->opTop-2];
460 if(!IS_VEC(vec)) ERR(ctx, "foreach enumeration of non-vector");
461 if(!PTR(vec).vec->rec || idx >= PTR(vec).vec->rec->size) {
465 ctx->opStack[ctx->opTop-1].num = idx+1; // modify in place
466 PUSH(useIndex ? naNum(idx) : naVec_get(vec, idx));
469 #define ARG() cd->byteCode[f->ip++]
470 #define CONSTARG() cd->constants[ARG()]
471 #define POP() ctx->opStack[--ctx->opTop]
472 #define STK(n) (ctx->opStack[ctx->opTop-(n)])
473 #define FIXFRAME() f = &(ctx->fStack[ctx->fTop-1]); \
474 cd = PTR(PTR(f->func).func->code).code;
475 static naRef run(struct Context* ctx)
482 ctx->dieArg = naNil();
488 op = cd->byteCode[f->ip++];
489 DBG(printf("Stack Depth: %d\n", ctx->opTop));
490 DBG(printOpDEBUG(f->ip-1, op));
496 PUSH(ctx->opStack[ctx->opTop-1]);
499 PUSH(ctx->opStack[ctx->opTop-2]);
500 PUSH(ctx->opStack[ctx->opTop-2]);
503 a = STK(1); STK(1) = STK(2); STK(2) = a;
506 #define BINOP(expr) do { \
507 double l = IS_NUM(STK(2)) ? STK(2).num : numify(ctx, STK(2)); \
508 double r = IS_NUM(STK(1)) ? STK(1).num : numify(ctx, STK(1)); \
509 SETNUM(STK(2), expr); \
510 ctx->opTop--; } while(0)
512 case OP_PLUS: BINOP(l + r); break;
513 case OP_MINUS: BINOP(l - r); break;
514 case OP_MUL: BINOP(l * r); break;
515 case OP_DIV: BINOP(l / r); break;
516 case OP_LT: BINOP(l < r ? 1 : 0); break;
517 case OP_LTE: BINOP(l <= r ? 1 : 0); break;
518 case OP_GT: BINOP(l > r ? 1 : 0); break;
519 case OP_GTE: BINOP(l >= r ? 1 : 0); break;
522 case OP_EQ: case OP_NEQ:
523 STK(2) = evalEquality(op, STK(2), STK(1));
527 STK(2) = evalCat(ctx, STK(2), STK(1));
531 STK(1) = naNum(-numify(ctx, STK(1)));
534 STK(1) = naNum(boolify(ctx, STK(1)) ? 0 : 1);
538 if(IS_CODE(a)) a = bindFunction(ctx, f, a);
554 PUSH(naNewVector(ctx));
557 naVec_append(STK(2), STK(1));
561 PUSH(naNewHash(ctx));
564 naHash_set(STK(3), STK(2), STK(1));
569 getLocal(ctx, f, &a, &b);
573 STK(2) = setSymbol(f, STK(2), STK(1));
577 naHash_set(f->locals, STK(2), STK(1));
578 STK(2) = STK(1); // FIXME: reverse order of arguments instead!
582 getMember(ctx, STK(1), CONSTARG(), &STK(1), 64);
585 if(!IS_HASH(STK(3))) ERR(ctx, "non-objects have no members");
586 naHash_set(STK(3), STK(2), STK(1));
587 STK(3) = STK(1); // FIXME: fix arg order instead
591 containerSet(ctx, STK(3), STK(2), STK(1));
592 STK(3) = STK(1); // FIXME: codegen order again...
596 STK(2) = containerGet(ctx, STK(2), STK(1));
600 // Identical to JMP, except for locking
602 f->ip = cd->byteCode[f->ip];
603 DBG(printf(" [Jump to: %d]\n", f->ip);)
606 f->ip = cd->byteCode[f->ip];
607 DBG(printf(" [Jump to: %d]\n", f->ip);)
612 ctx->opTop--; // Pops **ONLY** if it's nil!
614 DBG(printf(" [Jump to: %d]\n", f->ip);)
619 if(boolify(ctx, STK(1))) {
621 DBG(printf(" [Jump to: %d]\n", f->ip);)
626 if(!boolify(ctx, STK(1))) {
628 DBG(printf(" [Jump to: %d]\n", f->ip);)
633 if(!boolify(ctx, POP())) {
635 DBG(printf(" [Jump to: %d]\n", f->ip);)
639 f = setupFuncall(ctx, ARG(), 0);
640 cd = PTR(PTR(f->func).func->code).code;
643 f = setupFuncall(ctx, ARG(), 1);
644 cd = PTR(PTR(f->func).func->code).code;
648 ctx->dieArg = naNil();
649 if(ctx->callChild) naFreeContext(ctx->callChild);
650 if(--ctx->fTop <= 0) return a;
651 ctx->opTop = f->bp + 1; // restore the correct opstack frame!
661 case OP_MARK: // save stack state (e.g. "setjmp")
662 if(ctx->markTop >= MAX_MARK_DEPTH)
663 ERR(ctx, "mark stack overflow");
664 ctx->markStack[ctx->markTop++] = ctx->opTop;
666 case OP_UNMARK: // pop stack state set by mark
669 case OP_BREAK: // restore stack state (FOLLOW WITH JMP!)
670 ctx->opTop = ctx->markStack[ctx->markTop-1];
672 case OP_BREAK2: // same, but also pop the mark stack
673 ctx->opTop = ctx->markStack[--ctx->markTop];
676 ERR(ctx, "BUG: bad opcode");
678 ctx->ntemps = 0; // reset GC temp vector
679 DBG(printStackDEBUG(ctx);)
681 return naNil(); // unreachable
688 void naSave(struct Context* ctx, naRef obj)
690 naVec_append(globals->save, obj);
693 int naStackDepth(struct Context* ctx)
695 return ctx ? ctx->fTop + naStackDepth(ctx->callChild): 0;
698 static int findFrame(naContext ctx, naContext* out, int fn)
700 int sd = naStackDepth(ctx->callChild);
701 if(fn < sd) return findFrame(ctx->callChild, out, fn);
703 return ctx->fTop - 1 - (fn - sd);
706 int naGetLine(struct Context* ctx, int frame)
709 frame = findFrame(ctx, &ctx, frame);
710 f = &ctx->fStack[frame];
711 if(IS_FUNC(f->func) && IS_CODE(PTR(f->func).func->code)) {
712 struct naCode* c = PTR(PTR(f->func).func->code).code;
713 unsigned short* p = c->lineIps + c->nLines - 2;
714 while(p >= c->lineIps && p[0] > f->ip)
721 naRef naGetSourceFile(struct Context* ctx, int frame)
724 frame = findFrame(ctx, &ctx, frame);
725 f = ctx->fStack[frame].func;
726 f = PTR(f).func->code;
727 return PTR(f).code->srcFile;
730 char* naGetError(struct Context* ctx)
732 if(IS_STR(ctx->dieArg))
733 return (char*)PTR(ctx->dieArg).str->data;
734 return ctx->error[0] ? ctx->error : 0;
737 naRef naBindFunction(naContext ctx, naRef code, naRef closure)
739 naRef func = naNewFunc(ctx, code);
740 PTR(func).func->namespace = closure;
741 PTR(func).func->next = naNil();
745 naRef naBindToContext(naContext ctx, naRef code)
747 naRef func = naNewFunc(ctx, code);
748 struct Frame* f = &ctx->fStack[ctx->fTop-1];
749 PTR(func).func->namespace = f->locals;
750 PTR(func).func->next = f->func;
754 naRef naCall(naContext ctx, naRef func, int argc, naRef* args,
755 naRef obj, naRef locals)
759 if(!ctx->callParent) naModLock();
761 // We might have to allocate objects, which can call the GC. But
762 // the call isn't on the Nasal stack yet, so the GC won't find our
763 // C-space arguments.
764 naTempSave(ctx, func);
765 for(i=0; i<argc; i++)
766 naTempSave(ctx, args[i]);
767 naTempSave(ctx, obj);
768 naTempSave(ctx, locals);
770 // naRuntimeError() calls end up here:
771 if(setjmp(ctx->jumpHandle)) {
772 if(!ctx->callParent) naModUnlock(ctx);
776 if(IS_CCODE(PTR(func).func->code)) {
777 naCFunction fp = PTR(PTR(func).func->code).ccode->fptr;
778 result = (*fp)(ctx, obj, argc, args);
779 if(!ctx->callParent) naModUnlock();
784 locals = naNewHash(ctx);
786 func = naNewFunc(ctx, func);
787 PTR(func).func->namespace = locals;
790 naHash_set(locals, globals->meRef, obj);
792 ctx->opTop = ctx->markTop = 0;
794 ctx->fStack[0].func = func;
795 ctx->fStack[0].locals = locals;
796 ctx->fStack[0].ip = 0;
797 ctx->fStack[0].bp = ctx->opTop;
799 if(args) setupArgs(ctx, ctx->fStack, args, argc);
802 if(!ctx->callParent) naModUnlock(ctx);
806 naRef naContinue(naContext ctx)
809 if(!ctx->callParent) naModLock();
811 ctx->dieArg = naNil();
814 if(setjmp(ctx->jumpHandle)) {
815 if(!ctx->callParent) naModUnlock(ctx);
816 else naRethrowError(ctx);
820 // Wipe off the old function arguments, and push the expected
821 // result (either the result of our subcontext, or a synthesized
822 // nil if the thrown error was from an extension function or
823 // in-script die() call) before re-running the code from the
824 // instruction following the error.
825 ctx->opTop = ctx->opFrame;
826 PUSH(ctx->callChild ? naContinue(ctx->callChild) : naNil());
828 // Getting here means the child completed successfully. But
829 // because its original C stack was longjmp'd out of existence,
830 // there is no one left to free the context, so we have to do it.
831 // This is fragile, but unfortunately required.
832 if(ctx->callChild) naFreeContext(ctx->callChild);
835 if(!ctx->callParent) naModUnlock();