Bug 642327 - Add OOM regression checking to the shell via |make check|. (r=nnethercote)

In a known test file, there are 129 OOM errors. This adds a script which checks
the number of unhandled OOMs, and fails if it is not exactly 129. If we
accidentally increase the number of unhandled OOMs, this should catch it. If we
reduce the number of unhandled OOMs (which is the plan), then adjust this
number in Makefile.in.
This commit is contained in:
Paul Biggar 2011-01-17 15:15:00 -08:00
parent 1cc6a2b21f
commit ba53bef661
6 changed files with 745 additions and 0 deletions

335
config/find_OOM_errors.py Executable file
View File

@ -0,0 +1,335 @@
#!/usr/bin/env python
usage = """%prog: A test for OOM conditions in the shell.
%prog finds segfaults and other errors caused by incorrect handling of
allocation during OOM (out-of-memory) conditions.
"""
help = """Check for regressions only. This runs a set of files with a known
number of OOM errors (specified by REGRESSION_COUNT), and exits with a non-zero
result if more or less errors are found. See js/src/Makefile.in for invocation.
"""
import hashlib
import re
import shlex
import subprocess
import sys
import threading
import time
from optparse import OptionParser
#####################################################################
# Utility functions
#####################################################################
def run(args, stdin=None):
class ThreadWorker(threading.Thread):
def __init__(self, pipe):
super(ThreadWorker, self).__init__()
self.all = ""
self.pipe = pipe
self.setDaemon(True)
def run(self):
while True:
line = self.pipe.readline()
if line == '': break
else:
self.all += line
try:
if type(args) == str:
args = shlex.split(args)
args = [str(a) for a in args] # convert to strs
stdin_pipe = subprocess.PIPE if stdin else None
proc = subprocess.Popen(args, stdin=stdin_pipe, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if stdin_pipe:
proc.stdin.write(stdin)
proc.stdin.close()
stdout_worker = ThreadWorker(proc.stdout)
stderr_worker = ThreadWorker(proc.stderr)
stdout_worker.start()
stderr_worker.start()
proc.wait()
stdout_worker.join()
stderr_worker.join()
except KeyboardInterrupt, e:
sys.exit(-1)
stdout, stderr = stdout_worker.all, stderr_worker.all
result = (stdout, stderr, proc.returncode)
return result
def get_js_files():
(out, err, exit) = run('find ../jit-test/tests -name "*.js"')
if (err, exit) == ("", 0):
sys.exit("Wrong directory, run from an objdir")
return out.split()
#####################################################################
# Blacklisting
#####################################################################
def in_blacklist(sig):
return sig in blacklist
def add_to_blacklist(sig):
blacklist[sig] = blacklist.get(sig, 0)
blacklist[sig] += 1
# How often is a particular lines important for this.
def count_lines():
"""Keep track of the amount of times individual lines occur, in order to
prioritize the errors which occur most frequently."""
counts = {}
for string,count in blacklist.items():
for line in string.split("\n"):
counts[line] = counts.get(line, 0) + count
lines = []
for k,v in counts.items():
lines.append("%6d: %s" % (v,k))
lines.sort()
countlog = file("../OOM_count_log", "w")
countlog.write("\n".join(lines))
countlog.flush()
countlog.close()
#####################################################################
# Output cleaning
#####################################################################
def clean_voutput(err):
# Skip what we can't reproduce
err = re.sub(r"^--\d+-- run: /usr/bin/dsymutil \"shell/js\"$", "", err, flags=re.MULTILINE)
err = re.sub(r"^==\d+==", "", err, flags=re.MULTILINE)
err = re.sub(r"^\*\*\d+\*\*", "", err, flags=re.MULTILINE)
err = re.sub(r"^\s+by 0x[0-9A-Fa-f]+: ", "by: ", err, flags=re.MULTILINE)
err = re.sub(r"^\s+at 0x[0-9A-Fa-f]+: ", "at: ", err, flags=re.MULTILINE)
err = re.sub(r"(^\s+Address 0x)[0-9A-Fa-f]+( is not stack'd)", r"\1\2", err, flags=re.MULTILINE)
err = re.sub(r"(^\s+Invalid write of size )\d+", r"\1x", err, flags=re.MULTILINE)
err = re.sub(r"(^\s+Invalid read of size )\d+", r"\1x", err, flags=re.MULTILINE)
err = re.sub(r"(^\s+Address 0x)[0-9A-Fa-f]+( is )\d+( bytes inside a block of size )[0-9,]+( free'd)", r"\1\2\3\4", err, flags=re.MULTILINE)
# Skip the repeating bit due to the segfault
lines = []
for l in err.split('\n'):
if l == " Process terminating with default action of signal 11 (SIGSEGV)":
break
lines.append(l)
err = '\n'.join(lines)
return err
def remove_failed_allocation_backtraces(err):
lines = []
add = True
for l in err.split('\n'):
# Set start and end conditions for including text
if l == " The site of the failed allocation is:":
add = False
elif l[:2] not in ['by: ', 'at:']:
add = True
if add:
lines.append(l)
err = '\n'.join(lines)
return err
def clean_output(err):
err = re.sub(r"^js\(\d+,0x[0-9a-f]+\) malloc: \*\*\* error for object 0x[0-9a-f]+: pointer being freed was not allocated\n\*\*\* set a breakppoint in malloc_error_break to debug\n$", "pointer being freed was not allocated", err, flags=re.MULTILINE)
return err
#####################################################################
# Consts, etc
#####################################################################
command_template = 'shell/js' \
+ ' -m -j -p' \
+ ' -e "const platform=\'darwin\'; const libdir=\'../jit-test/lib/\';"' \
+ ' -f ../jit-test/lib/prolog.js' \
+ ' -f %s'
# Blacklists are things we don't want to see in our logs again (though we do
# want to count them when they happen). Whitelists we do want to see in our
# logs again, principally because the information we have isn't enough.
blacklist = {}
add_to_blacklist(r"('', '', 1)") # 1 means OOM if the shell hasn't launched yet.
add_to_blacklist(r"('', 'out of memory\n', 1)")
whitelist = set()
whitelist.add(r"('', 'out of memory\n', -11)") # -11 means OOM
whitelist.add(r"('', 'out of memory\nout of memory\n', -11)")
#####################################################################
# Program
#####################################################################
# Options
parser = OptionParser(usage=usage)
parser.add_option("-r", "--regression", action="store", metavar="REGRESSION_COUNT", help=help,
type="int", dest="regression", default=0) # TODO: support a value of zero, eventually
(OPTIONS, args) = parser.parse_args()
if OPTIONS.regression:
# TODO: This should be expanded as we get a better hang of the OOM problems.
# For now, we'll just check that the number of OOMs in one short file does not
# increase.
files = ["../jit-test/tests/arguments/args-createontrace.js"]
else:
files = get_js_files()
# Use a command-line arg to reduce the set of files
if len (args):
files = [f for f in files if f.find(args[0]) != -1]
if OPTIONS.regression:
# Don't use a logfile, this is automated for tinderbox.
log = file("../OOM_log", "w")
num_failures = 0
for f in files:
# Run it once to establish boundaries
command = (command_template + ' -O') % (f)
out, err, exit = run(command)
max = re.match(".*OOM max count: (\d+).*", out, flags=re.DOTALL).groups()[0]
max = int(max)
# OOMs don't recover well for the first 20 allocations or so.
# TODO: revisit this.
for i in range(20, max):
if OPTIONS.regression == None:
print "Testing allocation %d/%d in %s" % (i,max,f)
command = (command_template + ' -A %d') % (f, i)
out, err, exit = run(command)
# Success (5 is SM's exit code for controlled errors)
if exit == 5 and err.find("out of memory") != -1:
continue
# Failure
else:
if OPTIONS.regression:
# Just count them
num_failures += 1
continue
#########################################################################
# The regression tests ends above. The rest of this is for running the
# script manually.
#########################################################################
problem = str((out, err, exit))
if in_blacklist(problem) and problem not in whitelist:
add_to_blacklist(problem)
continue
add_to_blacklist(problem)
# Get valgrind output for a good stack trace
vcommand = "valgrind --dsymutil=yes -q --log-file=OOM_valgrind_log_file " + command
run(vcommand)
vout = file("OOM_valgrind_log_file").read()
vout = clean_voutput(vout)
sans_alloc_sites = remove_failed_allocation_backtraces(vout)
# Don't print duplicate information
if in_blacklist(sans_alloc_sites):
add_to_blacklist(sans_alloc_sites)
continue
add_to_blacklist(sans_alloc_sites)
log.write ("\n")
log.write ("\n")
log.write ("=========================================================================")
log.write ("\n")
log.write ("An allocation failure at\n\tallocation %d/%d in %s\n\tcauses problems (detected using bug 624094)" % (i, max, f))
log.write ("\n")
log.write ("\n")
log.write ("Command (from obj directory, using patch from bug 624094):\n " + command)
log.write ("\n")
log.write ("\n")
log.write ("stdout, stderr, exitcode:\n " + problem)
log.write ("\n")
log.write ("\n")
double_free = err.find("pointer being freed was not allocated") != -1
oom_detected = err.find("out of memory") != -1
multiple_oom_detected = err.find("out of memory\nout of memory") != -1
segfault_detected = exit == -11
log.write ("Diagnosis: ")
log.write ("\n")
if multiple_oom_detected:
log.write (" - Multiple OOMs reported")
log.write ("\n")
if segfault_detected:
log.write (" - segfault")
log.write ("\n")
if not oom_detected:
log.write (" - No OOM checking")
log.write ("\n")
if double_free:
log.write (" - Double free")
log.write ("\n")
log.write ("\n")
log.write ("Valgrind info:\n" + vout)
log.write ("\n")
log.write ("\n")
log.flush()
if not OPTIONS.regression == None:
count_lines()
# Do the actual regression check
if OPTIONS.regression:
expected_num_failures = OPTIONS.regression
if num_failures != expected_num_failures:
print "TEST-UNEXPECTED-FAIL |",
if num_failures > expected_num_failures:
print "More out-of-memory errors were found (%s) than expected (%d). This probably means an allocation site has been added without a NULL-check. If this is unavoidable, you can account for it by updating Makefile.in." % (num_failures, expected_num_failures),
else:
print "Congratulations, you have removed %d out-of-memory error(s) (%d remain)! Please account for it by updating Makefile.in." % (expected_num_failures - num_failures, num_failures),
sys.exit(-1)
else:
print 'TEST-PASS | find_OOM_errors | Found the expected number of OOM errors (%d)' % (expected_num_failures)

View File

@ -586,6 +586,16 @@ check::
$(srcdir)/config/find_vanilla_new_calls $(LIBRARY)
endif
# Help ensure that the number of OOM errors in SpiderMonkey doesn't increase.
# If the number of OOM errors changes, update the number below. We intend this
# number to go down over time, by fixing OOMs.
ifdef DEBUG
check-ooms:
$(wildcard $(RUN_TEST_PROGRAM)) $(PYTHON) -u $(srcdir)/config/find_OOM_errors.py --regression 129
check:: check-ooms
endif
ifndef WINCE
JITFLAGS = ,m,j,mj,mjp,am,amj,amjp,amd
check::

335
js/src/config/find_OOM_errors.py Executable file
View File

@ -0,0 +1,335 @@
#!/usr/bin/env python
usage = """%prog: A test for OOM conditions in the shell.
%prog finds segfaults and other errors caused by incorrect handling of
allocation during OOM (out-of-memory) conditions.
"""
help = """Check for regressions only. This runs a set of files with a known
number of OOM errors (specified by REGRESSION_COUNT), and exits with a non-zero
result if more or less errors are found. See js/src/Makefile.in for invocation.
"""
import hashlib
import re
import shlex
import subprocess
import sys
import threading
import time
from optparse import OptionParser
#####################################################################
# Utility functions
#####################################################################
def run(args, stdin=None):
class ThreadWorker(threading.Thread):
def __init__(self, pipe):
super(ThreadWorker, self).__init__()
self.all = ""
self.pipe = pipe
self.setDaemon(True)
def run(self):
while True:
line = self.pipe.readline()
if line == '': break
else:
self.all += line
try:
if type(args) == str:
args = shlex.split(args)
args = [str(a) for a in args] # convert to strs
stdin_pipe = subprocess.PIPE if stdin else None
proc = subprocess.Popen(args, stdin=stdin_pipe, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if stdin_pipe:
proc.stdin.write(stdin)
proc.stdin.close()
stdout_worker = ThreadWorker(proc.stdout)
stderr_worker = ThreadWorker(proc.stderr)
stdout_worker.start()
stderr_worker.start()
proc.wait()
stdout_worker.join()
stderr_worker.join()
except KeyboardInterrupt, e:
sys.exit(-1)
stdout, stderr = stdout_worker.all, stderr_worker.all
result = (stdout, stderr, proc.returncode)
return result
def get_js_files():
(out, err, exit) = run('find ../jit-test/tests -name "*.js"')
if (err, exit) == ("", 0):
sys.exit("Wrong directory, run from an objdir")
return out.split()
#####################################################################
# Blacklisting
#####################################################################
def in_blacklist(sig):
return sig in blacklist
def add_to_blacklist(sig):
blacklist[sig] = blacklist.get(sig, 0)
blacklist[sig] += 1
# How often is a particular lines important for this.
def count_lines():
"""Keep track of the amount of times individual lines occur, in order to
prioritize the errors which occur most frequently."""
counts = {}
for string,count in blacklist.items():
for line in string.split("\n"):
counts[line] = counts.get(line, 0) + count
lines = []
for k,v in counts.items():
lines.append("%6d: %s" % (v,k))
lines.sort()
countlog = file("../OOM_count_log", "w")
countlog.write("\n".join(lines))
countlog.flush()
countlog.close()
#####################################################################
# Output cleaning
#####################################################################
def clean_voutput(err):
# Skip what we can't reproduce
err = re.sub(r"^--\d+-- run: /usr/bin/dsymutil \"shell/js\"$", "", err, flags=re.MULTILINE)
err = re.sub(r"^==\d+==", "", err, flags=re.MULTILINE)
err = re.sub(r"^\*\*\d+\*\*", "", err, flags=re.MULTILINE)
err = re.sub(r"^\s+by 0x[0-9A-Fa-f]+: ", "by: ", err, flags=re.MULTILINE)
err = re.sub(r"^\s+at 0x[0-9A-Fa-f]+: ", "at: ", err, flags=re.MULTILINE)
err = re.sub(r"(^\s+Address 0x)[0-9A-Fa-f]+( is not stack'd)", r"\1\2", err, flags=re.MULTILINE)
err = re.sub(r"(^\s+Invalid write of size )\d+", r"\1x", err, flags=re.MULTILINE)
err = re.sub(r"(^\s+Invalid read of size )\d+", r"\1x", err, flags=re.MULTILINE)
err = re.sub(r"(^\s+Address 0x)[0-9A-Fa-f]+( is )\d+( bytes inside a block of size )[0-9,]+( free'd)", r"\1\2\3\4", err, flags=re.MULTILINE)
# Skip the repeating bit due to the segfault
lines = []
for l in err.split('\n'):
if l == " Process terminating with default action of signal 11 (SIGSEGV)":
break
lines.append(l)
err = '\n'.join(lines)
return err
def remove_failed_allocation_backtraces(err):
lines = []
add = True
for l in err.split('\n'):
# Set start and end conditions for including text
if l == " The site of the failed allocation is:":
add = False
elif l[:2] not in ['by: ', 'at:']:
add = True
if add:
lines.append(l)
err = '\n'.join(lines)
return err
def clean_output(err):
err = re.sub(r"^js\(\d+,0x[0-9a-f]+\) malloc: \*\*\* error for object 0x[0-9a-f]+: pointer being freed was not allocated\n\*\*\* set a breakppoint in malloc_error_break to debug\n$", "pointer being freed was not allocated", err, flags=re.MULTILINE)
return err
#####################################################################
# Consts, etc
#####################################################################
command_template = 'shell/js' \
+ ' -m -j -p' \
+ ' -e "const platform=\'darwin\'; const libdir=\'../jit-test/lib/\';"' \
+ ' -f ../jit-test/lib/prolog.js' \
+ ' -f %s'
# Blacklists are things we don't want to see in our logs again (though we do
# want to count them when they happen). Whitelists we do want to see in our
# logs again, principally because the information we have isn't enough.
blacklist = {}
add_to_blacklist(r"('', '', 1)") # 1 means OOM if the shell hasn't launched yet.
add_to_blacklist(r"('', 'out of memory\n', 1)")
whitelist = set()
whitelist.add(r"('', 'out of memory\n', -11)") # -11 means OOM
whitelist.add(r"('', 'out of memory\nout of memory\n', -11)")
#####################################################################
# Program
#####################################################################
# Options
parser = OptionParser(usage=usage)
parser.add_option("-r", "--regression", action="store", metavar="REGRESSION_COUNT", help=help,
type="int", dest="regression", default=0) # TODO: support a value of zero, eventually
(OPTIONS, args) = parser.parse_args()
if OPTIONS.regression:
# TODO: This should be expanded as we get a better hang of the OOM problems.
# For now, we'll just check that the number of OOMs in one short file does not
# increase.
files = ["../jit-test/tests/arguments/args-createontrace.js"]
else:
files = get_js_files()
# Use a command-line arg to reduce the set of files
if len (args):
files = [f for f in files if f.find(args[0]) != -1]
if OPTIONS.regression:
# Don't use a logfile, this is automated for tinderbox.
log = file("../OOM_log", "w")
num_failures = 0
for f in files:
# Run it once to establish boundaries
command = (command_template + ' -O') % (f)
out, err, exit = run(command)
max = re.match(".*OOM max count: (\d+).*", out, flags=re.DOTALL).groups()[0]
max = int(max)
# OOMs don't recover well for the first 20 allocations or so.
# TODO: revisit this.
for i in range(20, max):
if OPTIONS.regression == None:
print "Testing allocation %d/%d in %s" % (i,max,f)
command = (command_template + ' -A %d') % (f, i)
out, err, exit = run(command)
# Success (5 is SM's exit code for controlled errors)
if exit == 5 and err.find("out of memory") != -1:
continue
# Failure
else:
if OPTIONS.regression:
# Just count them
num_failures += 1
continue
#########################################################################
# The regression tests ends above. The rest of this is for running the
# script manually.
#########################################################################
problem = str((out, err, exit))
if in_blacklist(problem) and problem not in whitelist:
add_to_blacklist(problem)
continue
add_to_blacklist(problem)
# Get valgrind output for a good stack trace
vcommand = "valgrind --dsymutil=yes -q --log-file=OOM_valgrind_log_file " + command
run(vcommand)
vout = file("OOM_valgrind_log_file").read()
vout = clean_voutput(vout)
sans_alloc_sites = remove_failed_allocation_backtraces(vout)
# Don't print duplicate information
if in_blacklist(sans_alloc_sites):
add_to_blacklist(sans_alloc_sites)
continue
add_to_blacklist(sans_alloc_sites)
log.write ("\n")
log.write ("\n")
log.write ("=========================================================================")
log.write ("\n")
log.write ("An allocation failure at\n\tallocation %d/%d in %s\n\tcauses problems (detected using bug 624094)" % (i, max, f))
log.write ("\n")
log.write ("\n")
log.write ("Command (from obj directory, using patch from bug 624094):\n " + command)
log.write ("\n")
log.write ("\n")
log.write ("stdout, stderr, exitcode:\n " + problem)
log.write ("\n")
log.write ("\n")
double_free = err.find("pointer being freed was not allocated") != -1
oom_detected = err.find("out of memory") != -1
multiple_oom_detected = err.find("out of memory\nout of memory") != -1
segfault_detected = exit == -11
log.write ("Diagnosis: ")
log.write ("\n")
if multiple_oom_detected:
log.write (" - Multiple OOMs reported")
log.write ("\n")
if segfault_detected:
log.write (" - segfault")
log.write ("\n")
if not oom_detected:
log.write (" - No OOM checking")
log.write ("\n")
if double_free:
log.write (" - Double free")
log.write ("\n")
log.write ("\n")
log.write ("Valgrind info:\n" + vout)
log.write ("\n")
log.write ("\n")
log.flush()
if not OPTIONS.regression == None:
count_lines()
# Do the actual regression check
if OPTIONS.regression:
expected_num_failures = OPTIONS.regression
if num_failures != expected_num_failures:
print "TEST-UNEXPECTED-FAIL |",
if num_failures > expected_num_failures:
print "More out-of-memory errors were found (%s) than expected (%d). This probably means an allocation site has been added without a NULL-check. If this is unavoidable, you can account for it by updating Makefile.in." % (num_failures, expected_num_failures),
else:
print "Congratulations, you have removed %d out-of-memory error(s) (%d remain)! Please account for it by updating Makefile.in." % (expected_num_failures - num_failures, num_failures),
sys.exit(-1)
else:
print 'TEST-PASS | find_OOM_errors | Found the expected number of OOM errors (%d)' % (expected_num_failures)

View File

@ -55,6 +55,12 @@
using namespace js;
#ifdef DEBUG
/* For JS_OOM_POSSIBLY_FAIL in jsutil.h. */
JS_PUBLIC_DATA(JSUint32) OOM_maxAllocations = UINT32_MAX;
JS_PUBLIC_DATA(JSUint32) OOM_counter = 0;
#endif
/*
* Checks the assumption that JS_FUNC_TO_DATA_PTR and JS_DATA_TO_FUNC_PTR
* macros uses to implement casts between function and data pointers.

View File

@ -209,15 +209,38 @@ JS_DumpBacktrace(JSCallsite *trace);
#else
#ifdef DEBUG
/*
* In order to test OOM conditions, when the shell command-line option
* |-A NUM| is passed, we fail continuously after the NUM'th allocation.
*/
extern JS_PUBLIC_DATA(JSUint32) OOM_maxAllocations; /* set from shell/js.cpp */
extern JS_PUBLIC_DATA(JSUint32) OOM_counter; /* data race, who cares. */
#define JS_OOM_POSSIBLY_FAIL() \
do \
{ \
if (OOM_counter++ >= OOM_maxAllocations) { \
return NULL; \
} \
} while (0)
#else
#define JS_OOM_POSSIBLY_FAIL() do {} while(0)
#endif
static JS_INLINE void* js_malloc(size_t bytes) {
JS_OOM_POSSIBLY_FAIL();
return malloc(bytes);
}
static JS_INLINE void* js_calloc(size_t bytes) {
JS_OOM_POSSIBLY_FAIL();
return calloc(bytes, 1);
}
static JS_INLINE void* js_realloc(void* p, size_t bytes) {
JS_OOM_POSSIBLY_FAIL();
return realloc(p, bytes);
}

View File

@ -201,6 +201,8 @@ js::workers::ThreadPool *gWorkerThreadPool = NULL;
static JSBool reportWarnings = JS_TRUE;
static JSBool compileOnly = JS_FALSE;
static JSBool OOM_printAllocationCount = JS_FALSE;
typedef enum JSShellErrNum {
#define MSG_DEF(name, number, count, exception, format) \
name = number,
@ -613,6 +615,11 @@ usage(void)
" Note: this option switches to non-interactive mode.\n"
" -S <size> Set the maximum size of the stack to <size> bytes\n"
" Default is %u.\n", DEFAULT_MAX_STACK_SIZE);
#ifdef DEBUG
fprintf(gErrFile, " -A <max> After <max> memory allocations, act like we're OOM.\n");
fprintf(gErrFile, " -O <max> At exit, print the number of memory allocations in \n"
" the program.\n");
#endif
#ifdef JS_THREADSAFE
fprintf(gErrFile, " -g <n> Sleep for <n> seconds before starting (default: 0)\n");
#endif
@ -718,6 +725,7 @@ ProcessArgs(JSContext *cx, JSObject *obj, char **argv, int argc)
case 'T':
#endif
case 'g':
case 'A':
++i;
break;
default:;
@ -769,7 +777,16 @@ ProcessArgs(JSContext *cx, JSObject *obj, char **argv, int argc)
JS_SetGCZeal(cx, !!(atoi(argv[i])));
break;
#endif
#ifdef DEBUG
case 'A':
/* Handled at the very start of main(). */
++i; /* skip the argument */
break;
case 'O':
OOM_printAllocationCount = JS_TRUE;
break;
#endif
case 'w':
reportWarnings = JS_TRUE;
break;
@ -5735,6 +5752,20 @@ MaybeOverrideOutFileFromEnv(const char* const envVar,
int
main(int argc, char **argv, char **envp)
{
#ifdef DEBUG
/* Check the allocation count first, or else we'll miss allocations. */
for (int i = 0; i < argc; i++)
{
if (strlen(argv[i]) == 2 && argv[i][0] == '-' && argv[i][1] == 'A')
{
if (++i == argc)
return usage();
OOM_maxAllocations = atoi(argv[i]);
break;
}
}
#endif
int stackDummy;
JSRuntime *rt;
JSContext *cx;
@ -5809,6 +5840,11 @@ main(int argc, char **argv, char **envp)
result = Shell(cx, argc, argv, envp);
#ifdef DEBUG
if (OOM_printAllocationCount)
printf("OOM max count: %u\n", OOM_counter);
#endif
DestroyContext(cx, true);
KillWatchdog();