diff --git a/docs/CommandGuide/lit.pod b/docs/CommandGuide/lit.pod new file mode 100644 index 00000000000..a818302c242 --- /dev/null +++ b/docs/CommandGuide/lit.pod @@ -0,0 +1,222 @@ +=pod + +=head1 NAME + +lit - LLVM Integrated Tester + +=head1 SYNOPSIS + +B [I] [I] + +=head1 DESCRIPTION + +B is a portable tool for executing LLVM and Clang style test suites, +summarizing their results, and providing indication of failures. B is +designed to be a lightweight testing tool with as simple a user interface as +possible. + +B should be run with one or more I to run specified on the command +line. Tests can be either individual test files or directories to search for +tests (see L<"TEST DISCOVERY">). + +Each specified test will be executed (potentially in parallel) and once all +tests have been run B will print summary information on the number of tests +which passed or failed (see L<"TEST STATUS RESULTS">). The B program will +execute with a non-zero exit code if any tests fail. + +By default B will use a succinct progress display and will only print +summary information for test failures. See L<"OUTPUT OPTIONS"> for options +controlling the B progress display and output. + +B also includes a number of options for controlling how tests are exected +(specific features may depend on the particular test format). See L<"EXECUTION +OPTIONS"> for more information. + +Finally, B also supports additional options for only running a subset of +the options specified on the command line, see L<"SELECTION OPTIONS"> for +more information. + +=head1 GENERAL OPTIONS + +=over + +=item B<-h>, B<--help> + +Show the B help message. + +=item B<-j> I, B<--threads>=I + +Run I tests in parallel. By default, this is automatically chose to match the +number of detected available CPUs. + +=back + +=head1 OUTPUT OPTIONS + +=over + +=item B<-q>, B<--quiet> + +Suppress any output except for test failures. + +=item B<-s>, B<--succinct> + +Show less output, for example don't show information on tests that pass. + +=item B<-v>, B<--verbose> + +Show more information on test failures, for example the entire test output +instead of just the test result. + +=item B<--no-progress-bar> + +Do not use curses based progress bar. + +=back + +=head1 EXECUTION OPTIONS + +=over + +=item B<--path>=I + +Specify an addition I to use when searching for executables in tests. + +=item B<--vg> + +Run individual tests under valgrind (using the memcheck tool). The +I<--error-exitcode> argument for valgrind is used so that valgrind failures will +cause the program to exit with a non-zero status. + +=item B<--vg-arg>=I + +When I<--vg> is used, specify an additional argument to pass to valgrind itself. + +=item B<--time-tests> + +Track the wall time individual tests take to execute and includes the results in +the summary output. This is useful for determining which tests in a test suite +take the most time to execute. Note that this option is most useful with I<-j +1>. + +=back + +=head1 SELECTION OPTIONS + +=over + +=item B<--max-tests>=I + +Run at most I tests and then terminate. + +=item B<--max-time>=I + +Spend at most I seconds (approximately) running tests and then terminate. + +=item B<--shuffle> + +Run the tests in a random order. + +=back + +=head1 ADDITIONAL OPTIONS + +=over + +=item B<--debug> + +Run B in debug mode, for debugging configuration issues and B itself. + +=item B<--show-suites> + +List the discovered test suites as part of the standard output. + +=item B<--no-tcl-as-sh> + +Run Tcl scripts internally (instead of converting to shell scripts). + +=back + +=head1 EXIT STATUS + +B will exit with an exit code of 1 if there are any FAIL or XPASS +results. Otherwise, it will exit with the status 0. Other exit codes used for +non-test related failures (for example a user error or an internal program +error). + +=head1 TEST DISCOVERY + +The inputs passed to B can be either individual tests, or entire +directories or hierarchies of tests to run. When B starts up, the first +thing it does is convert the inputs into a complete list of tests to run as part +of I. + +In the B model, every test must exist inside some I. B +resolves the inputs specified on the command line to test suites by searching +upwards from the input path until it finds a I or I +file. These files serve as both a marker of test suites and as configuration +files which B loads in order to understand how to find and run the tests +inside the test suite. + +Once B has mapped the inputs into test suites it traverses the list of +inputs adding tests for individual files and recursively searching for tests in +directories. + +This behavior makes it easy to specify a subset of tests to run, while still +allowing the test suite configuration to control exactly how tests are +interpreted. In addition, B always identifies tests by the test suite they +are in, and their relative path inside the test suite. For appropriately +configured projects, this allows B to provide convenient and flexible +support for out-of-tree builds. + +=head1 TEST STATUS RESULTS + +Each test ultimately produces one of the following six results: + +=over + +=item B + +The test succeeded. + +=item B + +The test failed, but that is expected. This is used for test formats which allow +specifying that a test does not currently work, but wish to leave it in the test +suite. + +=item B + +The test succeeded, but it was expected to fail. This is used for tests which +were specified as expected to fail, but are now succeeding (generally because +the feautre they test was broken and has been fixed). + +=item B + +The test failed. + +=item B + +The test result could not be determined. For example, this occurs when the test +could not be run, the test itself is invalid, or the test was interrupted. + +=item B + +The test is not supported in this environment. This is used by test formats +which can report unsupported tests. + +=back + +Depending on the test format tests may produce additional information about +their status (generally only for failures). See the L +section for more information. + +=head1 SEE ALSO + +L + +=head1 AUTHOR + +Written by Daniel Dunbar and maintained by the LLVM Team (L). + +=cut diff --git a/utils/lit/LitConfig.py b/utils/lit/LitConfig.py new file mode 100644 index 00000000000..4fb0ccc0935 --- /dev/null +++ b/utils/lit/LitConfig.py @@ -0,0 +1,71 @@ +class LitConfig: + """LitConfig - Configuration data for a 'lit' test runner instance, shared + across all tests. + + The LitConfig object is also used to communicate with client configuration + files, it is always passed in as the global variable 'lit' so that + configuration files can access common functionality and internal components + easily. + """ + + # Provide access to built-in formats. + import LitFormats as formats + + # Provide access to built-in utility functions. + import Util as util + + def __init__(self, progname, path, quiet, + useValgrind, valgrindArgs, + useTclAsSh, + noExecute, debug, isWindows): + # The name of the test runner. + self.progname = progname + # The items to add to the PATH environment variable. + self.path = list(map(str, path)) + self.quiet = bool(quiet) + self.useValgrind = bool(useValgrind) + self.valgrindArgs = list(valgrindArgs) + self.useTclAsSh = bool(useTclAsSh) + self.noExecute = noExecute + self.debug = debug + self.isWindows = bool(isWindows) + + self.numErrors = 0 + self.numWarnings = 0 + + def load_config(self, config, path): + """load_config(config, path) - Load a config object from an alternate + path.""" + from TestingConfig import TestingConfig + return TestingConfig.frompath(path, config.parent, self, + mustExist = True, + config = config) + + def _write_message(self, kind, message): + import inspect, os, sys + + # Get the file/line where this message was generated. + f = inspect.currentframe() + # Step out of _write_message, and then out of wrapper. + f = f.f_back.f_back + file,line,_,_,_ = inspect.getframeinfo(f) + location = '%s:%d' % (os.path.basename(file), line) + + print >>sys.stderr, '%s: %s: %s: %s' % (self.progname, location, + kind, message) + + def note(self, message): + self._write_message('note', message) + + def warning(self, message): + self._write_message('warning', message) + self.numWarnings += 1 + + def error(self, message): + self._write_message('error', message) + self.numErrors += 1 + + def fatal(self, message): + import sys + self._write_message('fatal', message) + sys.exit(2) diff --git a/utils/lit/LitFormats.py b/utils/lit/LitFormats.py new file mode 100644 index 00000000000..cc00ddc7e7e --- /dev/null +++ b/utils/lit/LitFormats.py @@ -0,0 +1,2 @@ +from ShTest import ShTest +from TclTest import TclTest diff --git a/utils/lit/ProgressBar.py b/utils/lit/ProgressBar.py new file mode 100644 index 00000000000..85c95f57f7a --- /dev/null +++ b/utils/lit/ProgressBar.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python + +# Source: http://code.activestate.com/recipes/475116/, with +# modifications by Daniel Dunbar. + +import sys, re, time + +class TerminalController: + """ + A class that can be used to portably generate formatted output to + a terminal. + + `TerminalController` defines a set of instance variables whose + values are initialized to the control sequence necessary to + perform a given action. These can be simply included in normal + output to the terminal: + + >>> term = TerminalController() + >>> print 'This is '+term.GREEN+'green'+term.NORMAL + + Alternatively, the `render()` method can used, which replaces + '${action}' with the string required to perform 'action': + + >>> term = TerminalController() + >>> print term.render('This is ${GREEN}green${NORMAL}') + + If the terminal doesn't support a given action, then the value of + the corresponding instance variable will be set to ''. As a + result, the above code will still work on terminals that do not + support color, except that their output will not be colored. + Also, this means that you can test whether the terminal supports a + given action by simply testing the truth value of the + corresponding instance variable: + + >>> term = TerminalController() + >>> if term.CLEAR_SCREEN: + ... print 'This terminal supports clearning the screen.' + + Finally, if the width and height of the terminal are known, then + they will be stored in the `COLS` and `LINES` attributes. + """ + # Cursor movement: + BOL = '' #: Move the cursor to the beginning of the line + UP = '' #: Move the cursor up one line + DOWN = '' #: Move the cursor down one line + LEFT = '' #: Move the cursor left one char + RIGHT = '' #: Move the cursor right one char + + # Deletion: + CLEAR_SCREEN = '' #: Clear the screen and move to home position + CLEAR_EOL = '' #: Clear to the end of the line. + CLEAR_BOL = '' #: Clear to the beginning of the line. + CLEAR_EOS = '' #: Clear to the end of the screen + + # Output modes: + BOLD = '' #: Turn on bold mode + BLINK = '' #: Turn on blink mode + DIM = '' #: Turn on half-bright mode + REVERSE = '' #: Turn on reverse-video mode + NORMAL = '' #: Turn off all modes + + # Cursor display: + HIDE_CURSOR = '' #: Make the cursor invisible + SHOW_CURSOR = '' #: Make the cursor visible + + # Terminal size: + COLS = None #: Width of the terminal (None for unknown) + LINES = None #: Height of the terminal (None for unknown) + + # Foreground colors: + BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = '' + + # Background colors: + BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = '' + BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = '' + + _STRING_CAPABILITIES = """ + BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1 + CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold + BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0 + HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split() + _COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split() + _ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split() + + def __init__(self, term_stream=sys.stdout): + """ + Create a `TerminalController` and initialize its attributes + with appropriate values for the current terminal. + `term_stream` is the stream that will be used for terminal + output; if this stream is not a tty, then the terminal is + assumed to be a dumb terminal (i.e., have no capabilities). + """ + # Curses isn't available on all platforms + try: import curses + except: return + + # If the stream isn't a tty, then assume it has no capabilities. + if not term_stream.isatty(): return + + # Check the terminal type. If we fail, then assume that the + # terminal has no capabilities. + try: curses.setupterm() + except: return + + # Look up numeric capabilities. + self.COLS = curses.tigetnum('cols') + self.LINES = curses.tigetnum('lines') + + # Look up string capabilities. + for capability in self._STRING_CAPABILITIES: + (attrib, cap_name) = capability.split('=') + setattr(self, attrib, self._tigetstr(cap_name) or '') + + # Colors + set_fg = self._tigetstr('setf') + if set_fg: + for i,color in zip(range(len(self._COLORS)), self._COLORS): + setattr(self, color, curses.tparm(set_fg, i) or '') + set_fg_ansi = self._tigetstr('setaf') + if set_fg_ansi: + for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS): + setattr(self, color, curses.tparm(set_fg_ansi, i) or '') + set_bg = self._tigetstr('setb') + if set_bg: + for i,color in zip(range(len(self._COLORS)), self._COLORS): + setattr(self, 'BG_'+color, curses.tparm(set_bg, i) or '') + set_bg_ansi = self._tigetstr('setab') + if set_bg_ansi: + for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS): + setattr(self, 'BG_'+color, curses.tparm(set_bg_ansi, i) or '') + + def _tigetstr(self, cap_name): + # String capabilities can include "delays" of the form "$<2>". + # For any modern terminal, we should be able to just ignore + # these, so strip them out. + import curses + cap = curses.tigetstr(cap_name) or '' + return re.sub(r'\$<\d+>[/*]?', '', cap) + + def render(self, template): + """ + Replace each $-substitutions in the given template string with + the corresponding terminal control string (if it's defined) or + '' (if it's not). + """ + return re.sub(r'\$\$|\${\w+}', self._render_sub, template) + + def _render_sub(self, match): + s = match.group() + if s == '$$': return s + else: return getattr(self, s[2:-1]) + +####################################################################### +# Example use case: progress bar +####################################################################### + +class SimpleProgressBar: + """ + A simple progress bar which doesn't need any terminal support. + + This prints out a progress bar like: + 'Header: 0 .. 10.. 20.. ...' + """ + + def __init__(self, header): + self.header = header + self.atIndex = None + + def update(self, percent, message): + if self.atIndex is None: + sys.stdout.write(self.header) + self.atIndex = 0 + + next = int(percent*50) + if next == self.atIndex: + return + + for i in range(self.atIndex, next): + idx = i % 5 + if idx == 0: + sys.stdout.write('%-2d' % (i*2)) + elif idx == 1: + pass # Skip second char + elif idx < 4: + sys.stdout.write('.') + else: + sys.stdout.write(' ') + sys.stdout.flush() + self.atIndex = next + + def clear(self): + if self.atIndex is not None: + sys.stdout.write('\n') + sys.stdout.flush() + self.atIndex = None + +class ProgressBar: + """ + A 3-line progress bar, which looks like:: + + Header + 20% [===========----------------------------------] + progress message + + The progress bar is colored, if the terminal supports color + output; and adjusts to the width of the terminal. + """ + BAR = '%s${GREEN}[${BOLD}%s%s${NORMAL}${GREEN}]${NORMAL}%s\n' + HEADER = '${BOLD}${CYAN}%s${NORMAL}\n\n' + + def __init__(self, term, header, useETA=True): + self.term = term + if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL): + raise ValueError("Terminal isn't capable enough -- you " + "should use a simpler progress dispaly.") + self.width = self.term.COLS or 75 + self.bar = term.render(self.BAR) + self.header = self.term.render(self.HEADER % header.center(self.width)) + self.cleared = 1 #: true if we haven't drawn the bar yet. + self.useETA = useETA + if self.useETA: + self.startTime = time.time() + self.update(0, '') + + def update(self, percent, message): + if self.cleared: + sys.stdout.write(self.header) + self.cleared = 0 + prefix = '%3d%% ' % (percent*100,) + suffix = '' + if self.useETA: + elapsed = time.time() - self.startTime + if percent > .0001 and elapsed > 1: + total = elapsed / percent + eta = int(total - elapsed) + h = eta//3600. + m = (eta//60) % 60 + s = eta % 60 + suffix = ' ETA: %02d:%02d:%02d'%(h,m,s) + barWidth = self.width - len(prefix) - len(suffix) - 2 + n = int(barWidth*percent) + if len(message) < self.width: + message = message + ' '*(self.width - len(message)) + else: + message = '... ' + message[-(self.width-4):] + sys.stdout.write( + self.term.BOL + self.term.UP + self.term.CLEAR_EOL + + (self.bar % (prefix, '='*n, '-'*(barWidth-n), suffix)) + + self.term.CLEAR_EOL + message) + + def clear(self): + if not self.cleared: + sys.stdout.write(self.term.BOL + self.term.CLEAR_EOL + + self.term.UP + self.term.CLEAR_EOL + + self.term.UP + self.term.CLEAR_EOL) + self.cleared = 1 + +def test(): + import time + tc = TerminalController() + p = ProgressBar(tc, 'Tests') + for i in range(101): + p.update(i/100., str(i)) + time.sleep(.3) + +if __name__=='__main__': + test() diff --git a/utils/lit/ShCommands.py b/utils/lit/ShCommands.py new file mode 100644 index 00000000000..be3e680e9e0 --- /dev/null +++ b/utils/lit/ShCommands.py @@ -0,0 +1,86 @@ +import ShUtil + +class Command: + def __init__(self, args, redirects): + self.args = list(args) + self.redirects = list(redirects) + + def __repr__(self): + return 'Command(%r, %r)' % (self.args, self.redirects) + + def __cmp__(self, other): + if not isinstance(other, Command): + return -1 + + return cmp((self.args, self.redirects), + (other.args, other.redirects)) + + def toShell(self, file): + for arg in self.args: + if "'" not in arg: + quoted = "'%s'" % arg + elif '"' not in arg and '$' not in arg: + quoted = '"%s"' % arg + else: + raise NotImplementedError,'Unable to quote %r' % arg + print >>file, quoted, + + # For debugging / validation. + dequoted = list(ShUtil.ShLexer(quoted).lex()) + if dequoted != [arg]: + raise NotImplementedError,'Unable to quote %r' % arg + + for r in self.redirects: + if len(r[0]) == 1: + print >>file, "%s '%s'" % (r[0][0], r[1]), + else: + print >>file, "%s%s '%s'" % (r[0][1], r[0][0], r[1]), + +class Pipeline: + def __init__(self, commands, negate=False, pipe_err=False): + self.commands = commands + self.negate = negate + self.pipe_err = pipe_err + + def __repr__(self): + return 'Pipeline(%r, %r, %r)' % (self.commands, self.negate, + self.pipe_err) + + def __cmp__(self, other): + if not isinstance(other, Pipeline): + return -1 + + return cmp((self.commands, self.negate, self.pipe_err), + (other.commands, other.negate, self.pipe_err)) + + def toShell(self, file, pipefail=False): + if pipefail != self.pipe_err: + raise ValueError,'Inconsistent "pipefail" attribute!' + if self.negate: + print >>file, '!', + for cmd in self.commands: + cmd.toShell(file) + if cmd is not self.commands[-1]: + print >>file, '|\n ', + +class Seq: + def __init__(self, lhs, op, rhs): + assert op in (';', '&', '||', '&&') + self.op = op + self.lhs = lhs + self.rhs = rhs + + def __repr__(self): + return 'Seq(%r, %r, %r)' % (self.lhs, self.op, self.rhs) + + def __cmp__(self, other): + if not isinstance(other, Seq): + return -1 + + return cmp((self.lhs, self.op, self.rhs), + (other.lhs, other.op, other.rhs)) + + def toShell(self, file, pipefail=False): + self.lhs.toShell(file, pipefail) + print >>file, ' %s\n' % self.op + self.rhs.toShell(file, pipefail) diff --git a/utils/lit/ShTest.py b/utils/lit/ShTest.py new file mode 100644 index 00000000000..fefdf7602b2 --- /dev/null +++ b/utils/lit/ShTest.py @@ -0,0 +1,12 @@ +import TestRunner + +class ShTest: + def __init__(self, execute_external = False, require_and_and = False): + self.execute_external = execute_external + self.require_and_and = require_and_and + + def execute(self, test, litConfig): + return TestRunner.executeShTest(test, litConfig, + self.execute_external, + self.require_and_and) + diff --git a/utils/lit/ShUtil.py b/utils/lit/ShUtil.py new file mode 100644 index 00000000000..c4bbb3d3731 --- /dev/null +++ b/utils/lit/ShUtil.py @@ -0,0 +1,346 @@ +import itertools + +import Util +from ShCommands import Command, Pipeline, Seq + +class ShLexer: + def __init__(self, data, win32Escapes = False): + self.data = data + self.pos = 0 + self.end = len(data) + self.win32Escapes = win32Escapes + + def eat(self): + c = self.data[self.pos] + self.pos += 1 + return c + + def look(self): + return self.data[self.pos] + + def maybe_eat(self, c): + """ + maybe_eat(c) - Consume the character c if it is the next character, + returning True if a character was consumed. """ + if self.data[self.pos] == c: + self.pos += 1 + return True + return False + + def lex_arg_fast(self, c): + # Get the leading whitespace free section. + chunk = self.data[self.pos - 1:].split(None, 1)[0] + + # If it has special characters, the fast path failed. + if ('|' in chunk or '&' in chunk or + '<' in chunk or '>' in chunk or + "'" in chunk or '"' in chunk or + '\\' in chunk): + return None + + self.pos = self.pos - 1 + len(chunk) + return chunk + + def lex_arg_slow(self, c): + if c in "'\"": + str = self.lex_arg_quoted(c) + else: + str = c + while self.pos != self.end: + c = self.look() + if c.isspace() or c in "|&": + break + elif c in '><': + # This is an annoying case; we treat '2>' as a single token so + # we don't have to track whitespace tokens. + + # If the parse string isn't an integer, do the usual thing. + if not str.isdigit(): + break + + # Otherwise, lex the operator and convert to a redirection + # token. + num = int(str) + tok = self.lex_one_token() + assert isinstance(tok, tuple) and len(tok) == 1 + return (tok[0], num) + elif c == '"': + self.eat() + str += self.lex_arg_quoted('"') + elif not self.win32Escapes and c == '\\': + # Outside of a string, '\\' escapes everything. + self.eat() + if self.pos == self.end: + Util.warning("escape at end of quoted argument in: %r" % + self.data) + return str + str += self.eat() + else: + str += self.eat() + return str + + def lex_arg_quoted(self, delim): + str = '' + while self.pos != self.end: + c = self.eat() + if c == delim: + return str + elif c == '\\' and delim == '"': + # Inside a '"' quoted string, '\\' only escapes the quote + # character and backslash, otherwise it is preserved. + if self.pos == self.end: + Util.warning("escape at end of quoted argument in: %r" % + self.data) + return str + c = self.eat() + if c == '"': # + str += '"' + elif c == '\\': + str += '\\' + else: + str += '\\' + c + else: + str += c + Util.warning("missing quote character in %r" % self.data) + return str + + def lex_arg_checked(self, c): + pos = self.pos + res = self.lex_arg_fast(c) + end = self.pos + + self.pos = pos + reference = self.lex_arg_slow(c) + if res is not None: + if res != reference: + raise ValueError,"Fast path failure: %r != %r" % (res, reference) + if self.pos != end: + raise ValueError,"Fast path failure: %r != %r" % (self.pos, end) + return reference + + def lex_arg(self, c): + return self.lex_arg_fast(c) or self.lex_arg_slow(c) + + def lex_one_token(self): + """ + lex_one_token - Lex a single 'sh' token. """ + + c = self.eat() + if c in ';!': + return (c,) + if c == '|': + if self.maybe_eat('|'): + return ('||',) + return (c,) + if c == '&': + if self.maybe_eat('&'): + return ('&&',) + if self.maybe_eat('>'): + return ('&>',) + return (c,) + if c == '>': + if self.maybe_eat('&'): + return ('>&',) + if self.maybe_eat('>'): + return ('>>',) + return (c,) + if c == '<': + if self.maybe_eat('&'): + return ('<&',) + if self.maybe_eat('>'): + return ('<<',) + return (c,) + + return self.lex_arg(c) + + def lex(self): + while self.pos != self.end: + if self.look().isspace(): + self.eat() + else: + yield self.lex_one_token() + +### + +class ShParser: + def __init__(self, data, win32Escapes = False): + self.data = data + self.tokens = ShLexer(data, win32Escapes = win32Escapes).lex() + + def lex(self): + try: + return self.tokens.next() + except StopIteration: + return None + + def look(self): + next = self.lex() + if next is not None: + self.tokens = itertools.chain([next], self.tokens) + return next + + def parse_command(self): + tok = self.lex() + if not tok: + raise ValueError,"empty command!" + if isinstance(tok, tuple): + raise ValueError,"syntax error near unexpected token %r" % tok[0] + + args = [tok] + redirects = [] + while 1: + tok = self.look() + + # EOF? + if tok is None: + break + + # If this is an argument, just add it to the current command. + if isinstance(tok, str): + args.append(self.lex()) + continue + + # Otherwise see if it is a terminator. + assert isinstance(tok, tuple) + if tok[0] in ('|',';','&','||','&&'): + break + + # Otherwise it must be a redirection. + op = self.lex() + arg = self.lex() + if not arg: + raise ValueError,"syntax error near token %r" % op[0] + redirects.append((op, arg)) + + return Command(args, redirects) + + def parse_pipeline(self): + negate = False + if self.look() == ('!',): + self.lex() + negate = True + + commands = [self.parse_command()] + while self.look() == ('|',): + self.lex() + commands.append(self.parse_command()) + return Pipeline(commands, negate) + + def parse(self): + lhs = self.parse_pipeline() + + while self.look(): + operator = self.lex() + assert isinstance(operator, tuple) and len(operator) == 1 + + if not self.look(): + raise ValueError, "missing argument to operator %r" % operator[0] + + # FIXME: Operator precedence!! + lhs = Seq(lhs, operator[0], self.parse_pipeline()) + + return lhs + +### + +import unittest + +class TestShLexer(unittest.TestCase): + def lex(self, str, *args, **kwargs): + return list(ShLexer(str, *args, **kwargs).lex()) + + def test_basic(self): + self.assertEqual(self.lex('a|b>c&d',), 'c', ('&',), 'd', + ('<',), 'e']) + + def test_redirection_tokens(self): + self.assertEqual(self.lex('a2>c'), + ['a2', ('>',), 'c']) + self.assertEqual(self.lex('a 2>c'), + ['a', ('>',2), 'c']) + + def test_quoting(self): + self.assertEqual(self.lex(""" 'a' """), + ['a']) + self.assertEqual(self.lex(""" "hello\\"world" """), + ['hello"world']) + self.assertEqual(self.lex(""" "hello\\'world" """), + ["hello\\'world"]) + self.assertEqual(self.lex(""" "hello\\\\world" """), + ["hello\\world"]) + self.assertEqual(self.lex(""" he"llo wo"rld """), + ["hello world"]) + self.assertEqual(self.lex(""" a\\ b a\\\\b """), + ["a b", "a\\b"]) + self.assertEqual(self.lex(""" "" "" """), + ["", ""]) + self.assertEqual(self.lex(""" a\\ b """, win32Escapes = True), + ['a\\', 'b']) + +class TestShParse(unittest.TestCase): + def parse(self, str): + return ShParser(str).parse() + + def test_basic(self): + self.assertEqual(self.parse('echo hello'), + Pipeline([Command(['echo', 'hello'], [])], False)) + self.assertEqual(self.parse('echo ""'), + Pipeline([Command(['echo', ''], [])], False)) + + def test_redirection(self): + self.assertEqual(self.parse('echo hello > c'), + Pipeline([Command(['echo', 'hello'], + [((('>'),), 'c')])], False)) + self.assertEqual(self.parse('echo hello > c >> d'), + Pipeline([Command(['echo', 'hello'], [(('>',), 'c'), + (('>>',), 'd')])], False)) + self.assertEqual(self.parse('a 2>&1'), + Pipeline([Command(['a'], [(('>&',2), '1')])], False)) + + def test_pipeline(self): + self.assertEqual(self.parse('a | b'), + Pipeline([Command(['a'], []), + Command(['b'], [])], + False)) + + self.assertEqual(self.parse('a | b | c'), + Pipeline([Command(['a'], []), + Command(['b'], []), + Command(['c'], [])], + False)) + + self.assertEqual(self.parse('! a'), + Pipeline([Command(['a'], [])], + True)) + + def test_list(self): + self.assertEqual(self.parse('a ; b'), + Seq(Pipeline([Command(['a'], [])], False), + ';', + Pipeline([Command(['b'], [])], False))) + + self.assertEqual(self.parse('a & b'), + Seq(Pipeline([Command(['a'], [])], False), + '&', + Pipeline([Command(['b'], [])], False))) + + self.assertEqual(self.parse('a && b'), + Seq(Pipeline([Command(['a'], [])], False), + '&&', + Pipeline([Command(['b'], [])], False))) + + self.assertEqual(self.parse('a || b'), + Seq(Pipeline([Command(['a'], [])], False), + '||', + Pipeline([Command(['b'], [])], False))) + + self.assertEqual(self.parse('a && b || c'), + Seq(Seq(Pipeline([Command(['a'], [])], False), + '&&', + Pipeline([Command(['b'], [])], False)), + '||', + Pipeline([Command(['c'], [])], False))) + +if __name__ == '__main__': + unittest.main() diff --git a/utils/lit/TODO b/utils/lit/TODO new file mode 100644 index 00000000000..4d00d2c1cfc --- /dev/null +++ b/utils/lit/TODO @@ -0,0 +1,19 @@ + - Move temp directory name into local test config. + + - Add --show-unsupported, don't show by default? + + - Finish documentation. + + - Optionally use multiprocessing. + + - Support llvmc and ocaml tests. + + - Support valgrind in all configs, and LLVM style valgrind. + + - Provide test suite config for running unit tests. + + - Support a timeout / ulimit. + + - Support "disabling" tests? The advantage of making this distinct from XFAIL + is it makes it more obvious that it is a temporary measure (and lit can put + in a separate category). diff --git a/utils/lit/TclTest.py b/utils/lit/TclTest.py new file mode 100644 index 00000000000..e79f1797506 --- /dev/null +++ b/utils/lit/TclTest.py @@ -0,0 +1,7 @@ +import TestRunner + +class TclTest: + def execute(self, test, litConfig): + return TestRunner.executeTclTest(test, litConfig) + + diff --git a/utils/lit/TclUtil.py b/utils/lit/TclUtil.py new file mode 100644 index 00000000000..da7659de857 --- /dev/null +++ b/utils/lit/TclUtil.py @@ -0,0 +1,322 @@ +import itertools + +from ShCommands import Command, Pipeline + +def tcl_preprocess(data): + # Tcl has a preprocessing step to replace escaped newlines. + i = data.find('\\\n') + if i == -1: + return data + + # Replace '\\\n' and subsequent whitespace by a single space. + n = len(data) + str = data[:i] + i += 2 + while i < n and data[i] in ' \t': + i += 1 + return str + ' ' + data[i:] + +class TclLexer: + """TclLexer - Lex a string into "words", following the Tcl syntax.""" + + def __init__(self, data): + self.data = tcl_preprocess(data) + self.pos = 0 + self.end = len(self.data) + + def at_end(self): + return self.pos == self.end + + def eat(self): + c = self.data[self.pos] + self.pos += 1 + return c + + def look(self): + return self.data[self.pos] + + def maybe_eat(self, c): + """ + maybe_eat(c) - Consume the character c if it is the next character, + returning True if a character was consumed. """ + if self.data[self.pos] == c: + self.pos += 1 + return True + return False + + def escape(self, c): + if c == 'a': + return '\x07' + elif c == 'b': + return '\x08' + elif c == 'f': + return '\x0c' + elif c == 'n': + return '\n' + elif c == 'r': + return '\r' + elif c == 't': + return '\t' + elif c == 'v': + return '\x0b' + elif c in 'uxo': + raise ValueError,'Invalid quoted character %r' % c + else: + return c + + def lex_braced(self): + # Lex until whitespace or end of string, the opening brace has already + # been consumed. + + str = '' + while 1: + if self.at_end(): + raise ValueError,"Unterminated '{' quoted word" + + c = self.eat() + if c == '}': + break + elif c == '{': + str += '{' + self.lex_braced() + '}' + elif c == '\\' and self.look() in '{}': + str += self.eat() + else: + str += c + + return str + + def lex_quoted(self): + str = '' + + while 1: + if self.at_end(): + raise ValueError,"Unterminated '\"' quoted word" + + c = self.eat() + if c == '"': + break + elif c == '\\': + if self.at_end(): + raise ValueError,'Missing quoted character' + + str += self.escape(self.eat()) + else: + str += c + + return str + + def lex_unquoted(self, process_all=False): + # Lex until whitespace or end of string. + str = '' + while not self.at_end(): + if not process_all: + if self.look().isspace() or self.look() == ';': + break + + c = self.eat() + if c == '\\': + if self.at_end(): + raise ValueError,'Missing quoted character' + + str += self.escape(self.eat()) + elif c == '[': + raise NotImplementedError, ('Command substitution is ' + 'not supported') + elif c == '$' and not self.at_end() and (self.look().isalpha() or + self.look() == '{'): + raise NotImplementedError, ('Variable substitution is ' + 'not supported') + else: + str += c + + return str + + def lex_one_token(self): + if self.maybe_eat('"'): + return self.lex_quoted() + elif self.maybe_eat('{'): + # Check for argument substitution. + if not self.maybe_eat('*'): + return self.lex_braced() + + if not self.maybe_eat('}'): + return '*' + self.lex_braced() + + if self.at_end() or self.look().isspace(): + return '*' + + raise NotImplementedError, "Argument substitution is unsupported" + else: + return self.lex_unquoted() + + def lex(self): + while not self.at_end(): + c = self.look() + if c in ' \t': + self.eat() + elif c in ';\n': + self.eat() + yield (';',) + else: + yield self.lex_one_token() + +class TclExecCommand: + kRedirectPrefixes1 = ('<', '>') + kRedirectPrefixes2 = ('<@', '<<', '2>', '>&', '>>', '>@') + kRedirectPrefixes3 = ('2>@', '2>>', '>>&', '>&@') + kRedirectPrefixes4 = ('2>@1',) + + def __init__(self, args): + self.args = iter(args) + + def lex(self): + try: + return self.args.next() + except StopIteration: + return None + + def look(self): + next = self.lex() + if next is not None: + self.args = itertools.chain([next], self.args) + return next + + def parse_redirect(self, tok, length): + if len(tok) == length: + arg = self.lex() + if next is None: + raise ValueError,'Missing argument to %r redirection' % tok + else: + tok,arg = tok[:length],tok[length:] + + if tok[0] == '2': + op = (tok[1:],2) + else: + op = (tok,) + return (op, arg) + + def parse_pipeline(self): + if self.look() is None: + raise ValueError,"Expected at least one argument to exec" + + commands = [Command([],[])] + while 1: + arg = self.lex() + if arg is None: + break + elif arg == '|': + commands.append(Command([],[])) + elif arg == '|&': + # Write this as a redirect of stderr; it must come first because + # stdout may have already been redirected. + commands[-1].redirects.insert(0, (('>&',2),'1')) + commands.append(Command([],[])) + elif arg[:4] in TclExecCommand.kRedirectPrefixes4: + commands[-1].redirects.append(self.parse_redirect(arg, 4)) + elif arg[:3] in TclExecCommand.kRedirectPrefixes3: + commands[-1].redirects.append(self.parse_redirect(arg, 3)) + elif arg[:2] in TclExecCommand.kRedirectPrefixes2: + commands[-1].redirects.append(self.parse_redirect(arg, 2)) + elif arg[:1] in TclExecCommand.kRedirectPrefixes1: + commands[-1].redirects.append(self.parse_redirect(arg, 1)) + else: + commands[-1].args.append(arg) + + return Pipeline(commands, False, pipe_err=True) + + def parse(self): + ignoreStderr = False + keepNewline = False + + # Parse arguments. + while 1: + next = self.look() + if not isinstance(next, str) or next[0] != '-': + break + + if next == '--': + self.lex() + break + elif next == '-ignorestderr': + ignoreStderr = True + elif next == '-keepnewline': + keepNewline = True + else: + raise ValueError,"Invalid exec argument %r" % next + + return (ignoreStderr, keepNewline, self.parse_pipeline()) + +### + +import unittest + +class TestTclLexer(unittest.TestCase): + def lex(self, str, *args, **kwargs): + return list(TclLexer(str, *args, **kwargs).lex()) + + def test_preprocess(self): + self.assertEqual(tcl_preprocess('a b'), 'a b') + self.assertEqual(tcl_preprocess('a\\\nb c'), 'a b c') + + def test_unquoted(self): + self.assertEqual(self.lex('a b c'), + ['a', 'b', 'c']) + self.assertEqual(self.lex(r'a\nb\tc\ '), + ['a\nb\tc ']) + self.assertEqual(self.lex(r'a \\\$b c $\\'), + ['a', r'\$b', 'c', '$\\']) + + def test_braced(self): + self.assertEqual(self.lex('a {b c} {}'), + ['a', 'b c', '']) + self.assertEqual(self.lex(r'a {b {c\n}}'), + ['a', 'b {c\\n}']) + self.assertEqual(self.lex(r'a {b\{}'), + ['a', 'b{']) + self.assertEqual(self.lex(r'{*}'), ['*']) + self.assertEqual(self.lex(r'{*} a'), ['*', 'a']) + self.assertEqual(self.lex(r'{*} a'), ['*', 'a']) + self.assertEqual(self.lex('{a\\\n b}'), + ['a b']) + + def test_quoted(self): + self.assertEqual(self.lex('a "b c"'), + ['a', 'b c']) + + def test_terminators(self): + self.assertEqual(self.lex('a\nb'), + ['a', (';',), 'b']) + self.assertEqual(self.lex('a;b'), + ['a', (';',), 'b']) + self.assertEqual(self.lex('a ; b'), + ['a', (';',), 'b']) + +class TestTclExecCommand(unittest.TestCase): + def parse(self, str): + return TclExecCommand(list(TclLexer(str).lex())).parse() + + def test_basic(self): + self.assertEqual(self.parse('echo hello'), + (False, False, + Pipeline([Command(['echo', 'hello'], [])], + False, True))) + self.assertEqual(self.parse('echo hello | grep hello'), + (False, False, + Pipeline([Command(['echo', 'hello'], []), + Command(['grep', 'hello'], [])], + False, True))) + + def test_redirect(self): + self.assertEqual(self.parse('echo hello > a >b >>c 2> d |& e'), + (False, False, + Pipeline([Command(['echo', 'hello'], + [(('>',),'a'), + (('>',),'b'), + (('>>',),'c'), + (('>',2),'d'), + (('>&',2),'1')]), + Command(['e'], [])], + False, True))) + +if __name__ == '__main__': + unittest.main() diff --git a/utils/lit/Test.py b/utils/lit/Test.py new file mode 100644 index 00000000000..d3f627456f8 --- /dev/null +++ b/utils/lit/Test.py @@ -0,0 +1,71 @@ +import os + +# Test results. + +class TestResult: + def __init__(self, name, isFailure): + self.name = name + self.isFailure = isFailure + +PASS = TestResult('PASS', False) +XFAIL = TestResult('XFAIL', False) +FAIL = TestResult('FAIL', True) +XPASS = TestResult('XPASS', True) +UNRESOLVED = TestResult('UNRESOLVED', True) +UNSUPPORTED = TestResult('UNSUPPORTED', False) + +# Test classes. + +class TestFormat: + """TestFormat - Test information provider.""" + + def __init__(self, name): + self.name = name + +class TestSuite: + """TestSuite - Information on a group of tests. + + A test suite groups together a set of logically related tests. + """ + + def __init__(self, name, source_root, exec_root, config): + self.name = name + self.source_root = source_root + self.exec_root = exec_root + # The test suite configuration. + self.config = config + + def getSourcePath(self, components): + return os.path.join(self.source_root, *components) + + def getExecPath(self, components): + return os.path.join(self.exec_root, *components) + +class Test: + """Test - Information on a single test instance.""" + + def __init__(self, suite, path_in_suite, config): + self.suite = suite + self.path_in_suite = path_in_suite + self.config = config + # The test result code, once complete. + self.result = None + # Any additional output from the test, once complete. + self.output = None + # The wall time to execute this test, if timing and once complete. + self.elapsed = None + + def setResult(self, result, output, elapsed): + assert self.result is None, "Test result already set!" + self.result = result + self.output = output + self.elapsed = elapsed + + def getFullName(self): + return self.suite.config.name + '::' + '/'.join(self.path_in_suite) + + def getSourcePath(self): + return self.suite.getSourcePath(self.path_in_suite) + + def getExecPath(self): + return self.suite.getExecPath(self.path_in_suite) diff --git a/utils/lit/TestRunner.py b/utils/lit/TestRunner.py new file mode 100644 index 00000000000..954013adedc --- /dev/null +++ b/utils/lit/TestRunner.py @@ -0,0 +1,460 @@ +import os, signal, subprocess, sys +import StringIO + +import ShUtil +import Test +import Util + +def executeCommand(command, cwd=None, env=None): + p = subprocess.Popen(command, cwd=cwd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env) + out,err = p.communicate() + exitCode = p.wait() + + # Detect Ctrl-C in subprocess. + if exitCode == -signal.SIGINT: + raise KeyboardInterrupt + + return out, err, exitCode + +def executeShCmd(cmd, cfg, cwd, results): + if isinstance(cmd, ShUtil.Seq): + if cmd.op == ';': + res = executeShCmd(cmd.lhs, cfg, cwd, results) + return executeShCmd(cmd.rhs, cfg, cwd, results) + + if cmd.op == '&': + raise NotImplementedError,"unsupported test command: '&'" + + if cmd.op == '||': + res = executeShCmd(cmd.lhs, cfg, cwd, results) + if res != 0: + res = executeShCmd(cmd.rhs, cfg, cwd, results) + return res + if cmd.op == '&&': + res = executeShCmd(cmd.lhs, cfg, cwd, results) + if res is None: + return res + + if res == 0: + res = executeShCmd(cmd.rhs, cfg, cwd, results) + return res + + raise ValueError,'Unknown shell command: %r' % cmd.op + + assert isinstance(cmd, ShUtil.Pipeline) + procs = [] + input = subprocess.PIPE + for j in cmd.commands: + redirects = [(0,), (1,), (2,)] + for r in j.redirects: + if r[0] == ('>',2): + redirects[2] = [r[1], 'w', None] + elif r[0] == ('>&',2) and r[1] in '012': + redirects[2] = redirects[int(r[1])] + elif r[0] == ('>&',) or r[0] == ('&>',): + redirects[1] = redirects[2] = [r[1], 'w', None] + elif r[0] == ('>',): + redirects[1] = [r[1], 'w', None] + elif r[0] == ('<',): + redirects[0] = [r[1], 'r', None] + else: + raise NotImplementedError,"Unsupported redirect: %r" % (r,) + + final_redirects = [] + for index,r in enumerate(redirects): + if r == (0,): + result = input + elif r == (1,): + if index == 0: + raise NotImplementedError,"Unsupported redirect for stdin" + elif index == 1: + result = subprocess.PIPE + else: + result = subprocess.STDOUT + elif r == (2,): + if index != 2: + raise NotImplementedError,"Unsupported redirect on stdout" + result = subprocess.PIPE + else: + if r[2] is None: + r[2] = open(r[0], r[1]) + result = r[2] + final_redirects.append(result) + + stdin, stdout, stderr = final_redirects + + # If stderr wants to come from stdout, but stdout isn't a pipe, then put + # stderr on a pipe and treat it as stdout. + if (stderr == subprocess.STDOUT and stdout != subprocess.PIPE): + stderr = subprocess.PIPE + stderrIsStdout = True + else: + stderrIsStdout = False + procs.append(subprocess.Popen(j.args, cwd=cwd, + stdin = stdin, + stdout = stdout, + stderr = stderr, + env = cfg.environment, + close_fds = True)) + + # Immediately close stdin for any process taking stdin from us. + if stdin == subprocess.PIPE: + procs[-1].stdin.close() + procs[-1].stdin = None + + # Update the current stdin source. + if stdout == subprocess.PIPE: + input = procs[-1].stdout + elif stderrIsStdout: + input = procs[-1].stderr + else: + input = subprocess.PIPE + + # FIXME: There is a potential for deadlock here, when we have a pipe and + # some process other than the last one ends up blocked on stderr. + procData = [None] * len(procs) + procData[-1] = procs[-1].communicate() + for i in range(len(procs) - 1): + if procs[i].stdout is not None: + out = procs[i].stdout.read() + else: + out = '' + if procs[i].stderr is not None: + err = procs[i].stderr.read() + else: + err = '' + procData[i] = (out,err) + + exitCode = None + for i,(out,err) in enumerate(procData): + res = procs[i].wait() + # Detect Ctrl-C in subprocess. + if res == -signal.SIGINT: + raise KeyboardInterrupt + + results.append((cmd.commands[i], out, err, res)) + if cmd.pipe_err: + # Python treats the exit code as a signed char. + if res < 0: + exitCode = min(exitCode, res) + else: + exitCode = max(exitCode, res) + else: + exitCode = res + + if cmd.negate: + exitCode = not exitCode + + return exitCode + +def executeScriptInternal(test, litConfig, tmpBase, commands, cwd): + ln = ' &&\n'.join(commands) + try: + cmd = ShUtil.ShParser(ln, litConfig.isWindows).parse() + except: + return (Test.FAIL, "shell parser error on: %r" % ln) + + results = [] + exitCode = executeShCmd(cmd, test.config, cwd, results) + + out = err = '' + for i,(cmd, cmd_out,cmd_err,res) in enumerate(results): + out += 'Command %d: %s\n' % (i, ' '.join('"%s"' % s for s in cmd.args)) + out += 'Command %d Result: %r\n' % (i, res) + out += 'Command %d Output:\n%s\n\n' % (i, cmd_out) + out += 'Command %d Stderr:\n%s\n\n' % (i, cmd_err) + + return out, err, exitCode + +def executeTclScriptInternal(test, litConfig, tmpBase, commands, cwd): + import TclUtil + cmds = [] + for ln in commands: + # Given the unfortunate way LLVM's test are written, the line gets + # backslash substitution done twice. + ln = TclUtil.TclLexer(ln).lex_unquoted(process_all = True) + + try: + tokens = list(TclUtil.TclLexer(ln).lex()) + except: + return (Test.FAIL, "Tcl lexer error on: %r" % ln) + + # Validate there are no control tokens. + for t in tokens: + if not isinstance(t, str): + return (Test.FAIL, + "Invalid test line: %r containing %r" % (ln, t)) + + try: + cmds.append(TclUtil.TclExecCommand(tokens).parse_pipeline()) + except: + return (TestStatus.Fail, "Tcl 'exec' parse error on: %r" % ln) + + cmd = cmds[0] + for c in cmds[1:]: + cmd = ShUtil.Seq(cmd, '&&', c) + + if litConfig.useTclAsSh: + script = tmpBase + '.script' + + # Write script file + f = open(script,'w') + print >>f, 'set -o pipefail' + cmd.toShell(f, pipefail = True) + f.close() + + if 0: + print >>sys.stdout, cmd + print >>sys.stdout, open(script).read() + print >>sys.stdout + return '', '', 0 + + command = ['/bin/sh', script] + out,err,exitCode = executeCommand(command, cwd=cwd, + env=test.config.environment) + + # Tcl commands fail on standard error output. + if err: + exitCode = 1 + out = 'Command has output on stderr!\n\n' + out + + return out,err,exitCode + else: + results = [] + exitCode = executeShCmd(cmd, test.config, cwd, results) + + out = err = '' + + # Tcl commands fail on standard error output. + if [True for _,_,err,res in results if err]: + exitCode = 1 + out += 'Command has output on stderr!\n\n' + + for i,(cmd, cmd_out, cmd_err, res) in enumerate(results): + out += 'Command %d: %s\n' % (i, ' '.join('"%s"' % s for s in cmd.args)) + out += 'Command %d Result: %r\n' % (i, res) + out += 'Command %d Output:\n%s\n\n' % (i, cmd_out) + out += 'Command %d Stderr:\n%s\n\n' % (i, cmd_err) + + return out, err, exitCode + +def executeScript(test, litConfig, tmpBase, commands, cwd): + script = tmpBase + '.script' + if litConfig.isWindows: + script += '.bat' + + # Write script file + f = open(script,'w') + if litConfig.isWindows: + f.write('\nif %ERRORLEVEL% NEQ 0 EXIT\n'.join(commands)) + else: + f.write(' &&\n'.join(commands)) + f.write('\n') + f.close() + + if litConfig.isWindows: + command = ['cmd','/c', script] + else: + command = ['/bin/sh', script] + if litConfig.useValgrind: + # FIXME: Running valgrind on sh is overkill. We probably could just + # run on clang with no real loss. + valgrindArgs = ['valgrind', '-q', + '--tool=memcheck', '--trace-children=yes', + '--error-exitcode=123'] + valgrindArgs.extend(litConfig.valgrindArgs) + + command = valgrindArgs + command + + return executeCommand(command, cwd=cwd, env=test.config.environment) + +def parseIntegratedTestScript(test, xfailHasColon, requireAndAnd): + """parseIntegratedTestScript - Scan an LLVM/Clang style integrated test + script and extract the lines to 'RUN' as well as 'XFAIL' and 'XTARGET' + information. The RUN lines also will have variable substitution performed. + """ + + # Get the temporary location, this is always relative to the test suite + # root, not test source root. + # + # FIXME: This should not be here? + sourcepath = test.getSourcePath() + execpath = test.getExecPath() + execdir,execbase = os.path.split(execpath) + tmpBase = os.path.join(execdir, 'Output', execbase) + + # We use #_MARKER_# to hide %% while we do the other substitutions. + substitutions = [('%%', '#_MARKER_#')] + substitutions.extend(test.config.substitutions) + substitutions.extend([('%s', sourcepath), + ('%S', os.path.dirname(sourcepath)), + ('%p', os.path.dirname(sourcepath)), + ('%t', tmpBase + '.tmp'), + ('#_MARKER_#', '%')]) + + # Collect the test lines from the script. + script = [] + xfails = [] + xtargets = [] + for ln in open(sourcepath): + if 'RUN:' in ln: + # Isolate the command to run. + index = ln.index('RUN:') + ln = ln[index+4:] + + # Trim trailing whitespace. + ln = ln.rstrip() + + # Collapse lines with trailing '\\'. + if script and script[-1][-1] == '\\': + script[-1] = script[-1][:-1] + ln + else: + script.append(ln) + elif xfailHasColon and 'XFAIL:' in ln: + items = ln[ln.index('XFAIL:') + 6:].split(',') + xfails.extend([s.strip() for s in items]) + elif not xfailHasColon and 'XFAIL' in ln: + items = ln[ln.index('XFAIL') + 5:].split(',') + xfails.extend([s.strip() for s in items]) + elif 'XTARGET:' in ln: + items = ln[ln.index('XTARGET:') + 8:].split(',') + xtargets.extend([s.strip() for s in items]) + elif 'END.' in ln: + # Check for END. lines. + if ln[ln.index('END.'):].strip() == 'END.': + break + + # Apply substitutions to the script. + def processLine(ln): + # Apply substitutions + for a,b in substitutions: + ln = ln.replace(a,b) + + # Strip the trailing newline and any extra whitespace. + return ln.strip() + script = map(processLine, script) + + # Verify the script contains a run line. + if not script: + return (Test.UNRESOLVED, "Test has no run line!") + + if script[-1][-1] == '\\': + return (Test.UNRESOLVED, "Test has unterminated run lines (with '\\')") + + # Validate interior lines for '&&', a lovely historical artifact. + if requireAndAnd: + for i in range(len(script) - 1): + ln = script[i] + + if not ln.endswith('&&'): + return (Test.FAIL, + ("MISSING \'&&\': %s\n" + + "FOLLOWED BY : %s\n") % (ln, script[i + 1])) + + # Strip off '&&' + script[i] = ln[:-2] + + return script,xfails,xtargets,tmpBase,execdir + +def formatTestOutput(status, out, err, exitCode, script): + output = StringIO.StringIO() + print >>output, "Script:" + print >>output, "--" + print >>output, '\n'.join(script) + print >>output, "--" + print >>output, "Exit Code: %r" % exitCode + print >>output, "Command Output (stdout):" + print >>output, "--" + output.write(out) + print >>output, "--" + print >>output, "Command Output (stderr):" + print >>output, "--" + output.write(err) + print >>output, "--" + return (status, output.getvalue()) + +def executeTclTest(test, litConfig): + if test.config.unsupported: + return (Test.UNSUPPORTED, 'Test is unsupported') + + res = parseIntegratedTestScript(test, True, False) + if len(res) == 2: + return res + + script, xfails, xtargets, tmpBase, execdir = res + + if litConfig.noExecute: + return (Test.PASS, '') + + # Create the output directory if it does not already exist. + Util.mkdir_p(os.path.dirname(tmpBase)) + + res = executeTclScriptInternal(test, litConfig, tmpBase, script, execdir) + if len(res) == 2: + return res + + isXFail = False + for item in xfails: + if item == '*' or item in test.suite.config.target_triple: + isXFail = True + break + + # If this is XFAIL, see if it is expected to pass on this target. + if isXFail: + for item in xtargets: + if item == '*' or item in test.suite.config.target_triple: + isXFail = False + break + + out,err,exitCode = res + if isXFail: + ok = exitCode != 0 + status = (Test.XPASS, Test.XFAIL)[ok] + else: + ok = exitCode == 0 + status = (Test.FAIL, Test.PASS)[ok] + + if ok: + return (status,'') + + return formatTestOutput(status, out, err, exitCode, script) + +def executeShTest(test, litConfig, useExternalSh, requireAndAnd): + if test.config.unsupported: + return (Test.UNSUPPORTED, 'Test is unsupported') + + res = parseIntegratedTestScript(test, False, requireAndAnd) + if len(res) == 2: + return res + + script, xfails, xtargets, tmpBase, execdir = res + + if litConfig.noExecute: + return (Test.PASS, '') + + # Create the output directory if it does not already exist. + Util.mkdir_p(os.path.dirname(tmpBase)) + + if useExternalSh: + res = executeScript(test, litConfig, tmpBase, script, execdir) + else: + res = executeScriptInternal(test, litConfig, tmpBase, script, execdir) + if len(res) == 2: + return res + + out,err,exitCode = res + if xfails: + ok = exitCode != 0 + status = (Test.XPASS, Test.XFAIL)[ok] + else: + ok = exitCode == 0 + status = (Test.FAIL, Test.PASS)[ok] + + if ok: + return (status,'') + + return formatTestOutput(status, out, err, exitCode, script) diff --git a/utils/lit/TestingConfig.py b/utils/lit/TestingConfig.py new file mode 100644 index 00000000000..bfe23b282cd --- /dev/null +++ b/utils/lit/TestingConfig.py @@ -0,0 +1,95 @@ +import os + +class TestingConfig: + """" + TestingConfig - Information on the tests inside a suite. + """ + + @staticmethod + def frompath(path, parent, litConfig, mustExist, config = None): + if config is None: + # Set the environment based on the command line arguments. + environment = { + 'PATH' : os.pathsep.join(litConfig.path + + [os.environ.get('PATH','')]), + 'SYSTEMROOT' : os.environ.get('SYSTEMROOT',''), + } + + config = TestingConfig(parent, + name = '', + suffixes = set(), + test_format = None, + environment = environment, + substitutions = [], + unsupported = False, + on_clone = None, + test_exec_root = None, + test_source_root = None, + excludes = []) + + if os.path.exists(path): + # FIXME: Improve detection and error reporting of errors in the + # config file. + f = open(path) + cfg_globals = dict(globals()) + cfg_globals['config'] = config + cfg_globals['lit'] = litConfig + cfg_globals['__file__'] = path + try: + exec f in cfg_globals + except SystemExit,status: + # We allow normal system exit inside a config file to just + # return control without error. + if status.args: + raise + f.close() + elif mustExist: + litConfig.fatal('unable to load config from %r ' % path) + + config.finish(litConfig) + return config + + def __init__(self, parent, name, suffixes, test_format, + environment, substitutions, unsupported, on_clone, + test_exec_root, test_source_root, excludes): + self.parent = parent + self.name = str(name) + self.suffixes = set(suffixes) + self.test_format = test_format + self.environment = dict(environment) + self.substitutions = list(substitutions) + self.unsupported = unsupported + self.on_clone = on_clone + self.test_exec_root = test_exec_root + self.test_source_root = test_source_root + self.excludes = set(excludes) + + def clone(self, path): + # FIXME: Chain implementations? + # + # FIXME: Allow extra parameters? + cfg = TestingConfig(self, self.name, self.suffixes, self.test_format, + self.environment, self.substitutions, + self.unsupported, self.on_clone, + self.test_exec_root, self.test_source_root, + self.excludes) + if cfg.on_clone: + cfg.on_clone(self, cfg, path) + return cfg + + def finish(self, litConfig): + """finish() - Finish this config object, after loading is complete.""" + + self.name = str(self.name) + self.suffixes = set(self.suffixes) + self.environment = dict(self.environment) + self.substitutions = list(self.substitutions) + if self.test_exec_root is not None: + # FIXME: This should really only be suite in test suite config + # files. Should we distinguish them? + self.test_exec_root = str(self.test_exec_root) + if self.test_source_root is not None: + # FIXME: This should really only be suite in test suite config + # files. Should we distinguish them? + self.test_source_root = str(self.test_source_root) + self.excludes = set(self.excludes) diff --git a/utils/lit/Util.py b/utils/lit/Util.py new file mode 100644 index 00000000000..e62a8ed81dc --- /dev/null +++ b/utils/lit/Util.py @@ -0,0 +1,124 @@ +import os, sys + +def detectCPUs(): + """ + Detects the number of CPUs on a system. Cribbed from pp. + """ + # Linux, Unix and MacOS: + if hasattr(os, "sysconf"): + if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"): + # Linux & Unix: + ncpus = os.sysconf("SC_NPROCESSORS_ONLN") + if isinstance(ncpus, int) and ncpus > 0: + return ncpus + else: # OSX: + return int(os.popen2("sysctl -n hw.ncpu")[1].read()) + # Windows: + if os.environ.has_key("NUMBER_OF_PROCESSORS"): + ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]); + if ncpus > 0: + return ncpus + return 1 # Default + +def mkdir_p(path): + """mkdir_p(path) - Make the "path" directory, if it does not exist; this + will also make directories for any missing parent directories.""" + import errno + + if not path or os.path.exists(path): + return + + parent = os.path.dirname(path) + if parent != path: + mkdir_p(parent) + + try: + os.mkdir(path) + except OSError,e: + # Ignore EEXIST, which may occur during a race condition. + if e.errno != errno.EEXIST: + raise + +def capture(args): + import subprocess + """capture(command) - Run the given command (or argv list) in a shell and + return the standard output.""" + p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out,_ = p.communicate() + return out + +def which(command, paths = None): + """which(command, [paths]) - Look up the given command in the paths string + (or the PATH environment variable, if unspecified).""" + + if paths is None: + paths = os.environ.get('PATH','') + + # Check for absolute match first. + if os.path.exists(command): + return command + + # Would be nice if Python had a lib function for this. + if not paths: + paths = os.defpath + + # Get suffixes to search. + pathext = os.environ.get('PATHEXT', '').split(os.pathsep) + + # Search the paths... + for path in paths.split(os.pathsep): + for ext in pathext: + p = os.path.join(path, command + ext) + if os.path.exists(p): + return p + + return None + +def printHistogram(items, title = 'Items'): + import itertools, math + + items.sort(key = lambda (_,v): v) + + maxValue = max([v for _,v in items]) + + # Select first "nice" bar height that produces more than 10 bars. + power = int(math.ceil(math.log(maxValue, 10))) + for inc in itertools.cycle((5, 2, 2.5, 1)): + barH = inc * 10**power + N = int(math.ceil(maxValue / barH)) + if N > 10: + break + elif inc == 1: + power -= 1 + + histo = [set() for i in range(N)] + for name,v in items: + bin = min(int(N * v/maxValue), N-1) + histo[bin].add(name) + + barW = 40 + hr = '-' * (barW + 34) + print '\nSlowest %s:' % title + print hr + for name,value in items[-20:]: + print '%.2fs: %s' % (value, name) + print '\n%s Times:' % title + print hr + pDigits = int(math.ceil(math.log(maxValue, 10))) + pfDigits = max(0, 3-pDigits) + if pfDigits: + pDigits += pfDigits + 1 + cDigits = int(math.ceil(math.log(len(items), 10))) + print "[%s] :: [%s] :: [%s]" % ('Range'.center((pDigits+1)*2 + 3), + 'Percentage'.center(barW), + 'Count'.center(cDigits*2 + 1)) + print hr + for i,row in enumerate(histo): + pct = float(len(row)) / len(items) + w = int(barW * pct) + print "[%*.*fs,%*.*fs)" % (pDigits, pfDigits, i*barH, + pDigits, pfDigits, (i+1)*barH), + print ":: [%s%s] :: [%*d/%*d]" % ('*'*w, ' '*(barW-w), + cDigits, len(row), + cDigits, len(items)) + diff --git a/utils/lit/lit.py b/utils/lit/lit.py new file mode 100755 index 00000000000..62ebf51a71d --- /dev/null +++ b/utils/lit/lit.py @@ -0,0 +1,519 @@ +#!/usr/bin/env python + +""" +lit - LLVM Integrated Tester. + +See lit.pod for more information. +""" + +import math, os, platform, random, re, sys, time, threading, traceback + +import ProgressBar +import TestRunner +import Util + +from TestingConfig import TestingConfig +import LitConfig +import Test + +# FIXME: Rename to 'config.lit', 'site.lit', and 'local.lit' ? +kConfigName = 'lit.cfg' +kSiteConfigName = 'lit.site.cfg' +kLocalConfigName = 'lit.local.cfg' + +class TestingProgressDisplay: + def __init__(self, opts, numTests, progressBar=None): + self.opts = opts + self.numTests = numTests + self.current = None + self.lock = threading.Lock() + self.progressBar = progressBar + self.completed = 0 + + def update(self, test): + # Avoid locking overhead in quiet mode + if self.opts.quiet and not test.result.isFailure: + return + + # Output lock. + self.lock.acquire() + try: + self.handleUpdate(test) + finally: + self.lock.release() + + def finish(self): + if self.progressBar: + self.progressBar.clear() + elif self.opts.quiet: + pass + elif self.opts.succinct: + sys.stdout.write('\n') + + def handleUpdate(self, test): + self.completed += 1 + if self.progressBar: + self.progressBar.update(float(self.completed)/self.numTests, + test.getFullName()) + + if self.opts.succinct and not test.result.isFailure: + return + + if self.progressBar: + self.progressBar.clear() + + print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(), + self.completed, self.numTests) + + if test.result.isFailure and self.opts.showOutput: + print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(), + '*'*20) + print test.output + print "*" * 20 + + sys.stdout.flush() + +class TestProvider: + def __init__(self, tests, maxTime): + self.maxTime = maxTime + self.iter = iter(tests) + self.lock = threading.Lock() + self.startTime = time.time() + + def get(self): + # Check if we have run out of time. + if self.maxTime is not None: + if time.time() - self.startTime > self.maxTime: + return None + + # Otherwise take the next test. + self.lock.acquire() + try: + item = self.iter.next() + except StopIteration: + item = None + self.lock.release() + return item + +class Tester(threading.Thread): + def __init__(self, litConfig, provider, display): + threading.Thread.__init__(self) + self.litConfig = litConfig + self.provider = provider + self.display = display + + def run(self): + while 1: + item = self.provider.get() + if item is None: + break + self.runTest(item) + + def runTest(self, test): + result = None + startTime = time.time() + try: + result, output = test.config.test_format.execute(test, + self.litConfig) + except KeyboardInterrupt: + # This is a sad hack. Unfortunately subprocess goes + # bonkers with ctrl-c and we start forking merrily. + print '\nCtrl-C detected, goodbye.' + os.kill(0,9) + except: + if self.litConfig.debug: + raise + result = Test.UNRESOLVED + output = 'Exception during script execution:\n' + output += traceback.format_exc() + output += '\n' + elapsed = time.time() - startTime + + test.setResult(result, output, elapsed) + self.display.update(test) + +def dirContainsTestSuite(path): + cfgpath = os.path.join(path, kSiteConfigName) + if os.path.exists(cfgpath): + return cfgpath + cfgpath = os.path.join(path, kConfigName) + if os.path.exists(cfgpath): + return cfgpath + +def getTestSuite(item, litConfig, cache): + """getTestSuite(item, litConfig, cache) -> (suite, relative_path) + + Find the test suite containing @arg item. + + @retval (None, ...) - Indicates no test suite contains @arg item. + @retval (suite, relative_path) - The suite that @arg item is in, and its + relative path inside that suite. + """ + def search1(path): + # Check for a site config or a lit config. + cfgpath = dirContainsTestSuite(path) + + # If we didn't find a config file, keep looking. + if not cfgpath: + parent,base = os.path.split(path) + if parent == item: + return (None, ()) + + ts, relative = search(parent) + return (ts, relative + (base,)) + + # We found a config file, load it. + if litConfig.debug: + litConfig.note('loading suite config %r' % cfgpath) + + cfg = TestingConfig.frompath(cfgpath, None, litConfig, mustExist = True) + source_root = os.path.realpath(cfg.test_source_root or path) + exec_root = os.path.realpath(cfg.test_exec_root or path) + return Test.TestSuite(cfg.name, source_root, exec_root, cfg), () + + def search(path): + # Check for an already instantiated test suite. + res = cache.get(path) + if res is None: + cache[path] = res = search1(path) + return res + + # Canonicalize the path. + item = os.path.realpath(item) + + # Skip files and virtual components. + components = [] + while not os.path.isdir(item): + parent,base = os.path.split(item) + if parent == item: + return (None, ()) + components.append(base) + item = parent + components.reverse() + + ts, relative = search(item) + return ts, tuple(relative + tuple(components)) + +def getLocalConfig(ts, path_in_suite, litConfig, cache): + def search1(path_in_suite): + # Get the parent config. + if not path_in_suite: + parent = ts.config + else: + parent = search(path_in_suite[:-1]) + + # Load the local configuration. + source_path = ts.getSourcePath(path_in_suite) + cfgpath = os.path.join(source_path, kLocalConfigName) + if litConfig.debug: + litConfig.note('loading local config %r' % cfgpath) + return TestingConfig.frompath(cfgpath, parent, litConfig, + mustExist = False, + config = parent.clone(cfgpath)) + + def search(path_in_suite): + key = (ts, path_in_suite) + res = cache.get(key) + if res is None: + cache[key] = res = search1(path_in_suite) + return res + + return search(path_in_suite) + +def getTests(path, litConfig, testSuiteCache, localConfigCache): + # Find the test suite for this input and its relative path. + ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache) + if ts is None: + litConfig.warning('unable to find test suite for %r' % path) + return () + + if litConfig.debug: + litConfig.note('resolved input %r to %r::%r' % (path, ts.name, + path_in_suite)) + + return getTestsInSuite(ts, path_in_suite, litConfig, + testSuiteCache, localConfigCache) + +def getTestsInSuite(ts, path_in_suite, litConfig, + testSuiteCache, localConfigCache): + # Check that the source path exists (errors here are reported by the + # caller). + source_path = ts.getSourcePath(path_in_suite) + if not os.path.exists(source_path): + return + + # Check if the user named a test directly. + if not os.path.isdir(source_path): + lc = getLocalConfig(ts, path_in_suite[:-1], litConfig, localConfigCache) + yield Test.Test(ts, path_in_suite, lc) + return + + # Otherwise we have a directory to search for tests, start by getting the + # local configuration. + lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache) + for filename in os.listdir(source_path): + # FIXME: This doesn't belong here? + if filename == 'Output' or filename in lc.excludes: + continue + + filepath = os.path.join(source_path, filename) + if os.path.isdir(filepath): + # If this directory contains a test suite, reload it. + if dirContainsTestSuite(filepath): + for res in getTests(filepath, litConfig, + testSuiteCache, localConfigCache): + yield res + else: + # Otherwise, continue loading from inside this test suite. + for res in getTestsInSuite(ts, path_in_suite + (filename,), + litConfig, testSuiteCache, + localConfigCache): + yield res + else: + # Otherwise add tests for matching suffixes. + base,ext = os.path.splitext(filename) + if ext in lc.suffixes: + yield Test.Test(ts, path_in_suite + (filename,), lc) + +def runTests(numThreads, litConfig, provider, display): + # If only using one testing thread, don't use threads at all; this lets us + # profile, among other things. + if numThreads == 1: + t = Tester(litConfig, provider, display) + t.run() + return + + # Otherwise spin up the testing threads and wait for them to finish. + testers = [Tester(litConfig, provider, display) + for i in range(numThreads)] + for t in testers: + t.start() + try: + for t in testers: + t.join() + except KeyboardInterrupt: + sys.exit(2) + +def main(): + global options + from optparse import OptionParser, OptionGroup + parser = OptionParser("usage: %prog [options] {file-or-path}") + + parser.add_option("-j", "--threads", dest="numThreads", metavar="N", + help="Number of testing threads", + type=int, action="store", default=None) + + group = OptionGroup(parser, "Output Format") + # FIXME: I find these names very confusing, although I like the + # functionality. + group.add_option("-q", "--quiet", dest="quiet", + help="Suppress no error output", + action="store_true", default=False) + group.add_option("-s", "--succinct", dest="succinct", + help="Reduce amount of output", + action="store_true", default=False) + group.add_option("-v", "--verbose", dest="showOutput", + help="Show all test output", + action="store_true", default=False) + group.add_option("", "--no-progress-bar", dest="useProgressBar", + help="Do not use curses based progress bar", + action="store_false", default=True) + parser.add_option_group(group) + + group = OptionGroup(parser, "Test Execution") + group.add_option("", "--path", dest="path", + help="Additional paths to add to testing environment", + action="append", type=str, default=[]) + group.add_option("", "--vg", dest="useValgrind", + help="Run tests under valgrind", + action="store_true", default=False) + group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG", + help="Specify an extra argument for valgrind", + type=str, action="append", default=[]) + group.add_option("", "--time-tests", dest="timeTests", + help="Track elapsed wall time for each test", + action="store_true", default=False) + group.add_option("", "--no-execute", dest="noExecute", + help="Don't execute any tests (assume PASS)", + action="store_true", default=False) + parser.add_option_group(group) + + group = OptionGroup(parser, "Test Selection") + group.add_option("", "--max-tests", dest="maxTests", metavar="N", + help="Maximum number of tests to run", + action="store", type=int, default=None) + group.add_option("", "--max-time", dest="maxTime", metavar="N", + help="Maximum time to spend testing (in seconds)", + action="store", type=float, default=None) + group.add_option("", "--shuffle", dest="shuffle", + help="Run tests in random order", + action="store_true", default=False) + parser.add_option_group(group) + + group = OptionGroup(parser, "Debug and Experimental Options") + group.add_option("", "--debug", dest="debug", + help="Enable debugging (for 'lit' development)", + action="store_true", default=False) + group.add_option("", "--show-suites", dest="showSuites", + help="Show discovered test suites", + action="store_true", default=False) + group.add_option("", "--no-tcl-as-sh", dest="useTclAsSh", + help="Don't run Tcl scripts using 'sh'", + action="store_false", default=True) + parser.add_option_group(group) + + (opts, args) = parser.parse_args() + + if not args: + parser.error('No inputs specified') + + if opts.numThreads is None: + opts.numThreads = Util.detectCPUs() + + inputs = args + + # Create the global config object. + litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]), + path = opts.path, + quiet = opts.quiet, + useValgrind = opts.useValgrind, + valgrindArgs = opts.valgrindArgs, + useTclAsSh = opts.useTclAsSh, + noExecute = opts.noExecute, + debug = opts.debug, + isWindows = (platform.system()=='Windows')) + + # Load the tests from the inputs. + tests = [] + testSuiteCache = {} + localConfigCache = {} + for input in inputs: + prev = len(tests) + tests.extend(getTests(input, litConfig, + testSuiteCache, localConfigCache)) + if prev == len(tests): + litConfig.warning('input %r contained no tests' % input) + + # If there were any errors during test discovery, exit now. + if litConfig.numErrors: + print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors + sys.exit(2) + + if opts.showSuites: + suitesAndTests = dict([(ts,[]) + for ts,_ in testSuiteCache.values()]) + for t in tests: + suitesAndTests[t.suite].append(t) + + print '-- Test Suites --' + suitesAndTests = suitesAndTests.items() + suitesAndTests.sort(key = lambda (ts,_): ts.name) + for ts,tests in suitesAndTests: + print ' %s - %d tests' %(ts.name, len(tests)) + print ' Source Root: %s' % ts.source_root + print ' Exec Root : %s' % ts.exec_root + + # Select and order the tests. + numTotalTests = len(tests) + if opts.shuffle: + random.shuffle(tests) + else: + tests.sort(key = lambda t: t.getFullName()) + if opts.maxTests is not None: + tests = tests[:opts.maxTests] + + extra = '' + if len(tests) != numTotalTests: + extra = ' of %d' % numTotalTests + header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra, + opts.numThreads) + + progressBar = None + if not opts.quiet: + if opts.succinct and opts.useProgressBar: + try: + tc = ProgressBar.TerminalController() + progressBar = ProgressBar.ProgressBar(tc, header) + except ValueError: + print header + progressBar = ProgressBar.SimpleProgressBar('Testing: ') + else: + print header + + # Don't create more threads than tests. + opts.numThreads = min(len(tests), opts.numThreads) + + startTime = time.time() + display = TestingProgressDisplay(opts, len(tests), progressBar) + provider = TestProvider(tests, opts.maxTime) + runTests(opts.numThreads, litConfig, provider, display) + display.finish() + + if not opts.quiet: + print 'Testing Time: %.2fs'%(time.time() - startTime) + + # Update results for any tests which weren't run. + for t in tests: + if t.result is None: + t.setResult(Test.UNRESOLVED, '', 0.0) + + # List test results organized by kind. + hasFailures = False + byCode = {} + for t in tests: + if t.result not in byCode: + byCode[t.result] = [] + byCode[t.result].append(t) + if t.result.isFailure: + hasFailures = True + + # FIXME: Show unresolved and (optionally) unsupported tests. + for title,code in (('Unexpected Passing Tests', Test.XPASS), + ('Failing Tests', Test.FAIL)): + elts = byCode.get(code) + if not elts: + continue + print '*'*20 + print '%s (%d):' % (title, len(elts)) + for t in elts: + print ' %s' % t.getFullName() + print + + if opts.timeTests: + byTime = list(tests) + byTime.sort(key = lambda t: t.elapsed) + if byTime: + Util.printHistogram([(t.getFullName(), t.elapsed) for t in byTime], + title='Tests') + + for name,code in (('Expected Passes ', Test.PASS), + ('Expected Failures ', Test.XFAIL), + ('Unsupported Tests ', Test.UNSUPPORTED), + ('Unresolved Tests ', Test.UNRESOLVED), + ('Unexpected Passes ', Test.XPASS), + ('Unexpected Failures', Test.FAIL),): + if opts.quiet and not code.isFailure: + continue + N = len(byCode.get(code,[])) + if N: + print ' %s: %d' % (name,N) + + # If we encountered any additional errors, exit abnormally. + if litConfig.numErrors: + print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors + sys.exit(2) + + # Warn about warnings. + if litConfig.numWarnings: + print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings + + if hasFailures: + sys.exit(1) + sys.exit(0) + +if __name__=='__main__': + # Bump the GIL check interval, its more important to get any one thread to a + # blocking operation (hopefully exec) than to try and unblock other threads. + import sys + sys.setcheckinterval(1000) + main()