mirror of
https://github.com/torproject/stem.git
synced 2024-12-05 00:46:41 +00:00
fbfa73a099
Several imporvements for the integration tests, most notably including... - Test configurability via a 'test/settings.cfg' file - Thread safety for runner usage - Vastly better startup time for how integration tests run by default... - Reusing data directory so we don't need to request as much from authorities when starting (faster startup and less burden on them). Users can opt for a fresh temporary directory instead by setting 'test.integ.test_directory' to a blank value. - Starting tests when bootstraping reaches 5%. This is enough for tests that don't require network activity to run, and we can explicitly run those tests by setting the 'test.integ.run.online' option. This change also means that we can now run integration tests while offline.
165 lines
5.2 KiB
Python
Executable File
165 lines
5.2 KiB
Python
Executable File
#!/usr/bin/env python
|
|
|
|
"""
|
|
Runs unit and integration tests.
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
import time
|
|
import getopt
|
|
import unittest
|
|
import test.runner
|
|
import test.unit.message
|
|
import test.unit.version
|
|
import test.integ.message
|
|
import test.integ.system
|
|
|
|
from stem.util import conf, enum, term
|
|
|
|
OPT = "uit:h"
|
|
OPT_EXPANDED = ["unit", "integ", "targets=", "help"]
|
|
DIVIDER = "=" * 70
|
|
|
|
# (name, class) tuples for all of our unit and integration tests
|
|
UNIT_TESTS = (("stem.types.ControlMessage", test.unit.message.TestMessageFunctions),
|
|
("stem.types.Version", test.unit.version.TestVerionFunctions),
|
|
)
|
|
|
|
INTEG_TESTS = (("stem.types.ControlMessage", test.integ.message.TestMessageFunctions),
|
|
("stem.util.system", test.integ.system.TestSystemFunctions),
|
|
)
|
|
|
|
# TODO: drop targets?
|
|
# Configurations that the intergration tests can be ran with. Attributs are
|
|
# tuples of the test runner and description.
|
|
TARGETS = enum.Enum(*[(v, v) for v in ("NONE", "NO_CONTROL", "NO_AUTH", "COOKIE", "PASSWORD", "SOCKET")])
|
|
TARGET_ATTR = {
|
|
TARGETS.NONE: (None, "No running tor instance."),
|
|
TARGETS.NO_CONTROL: (None, "Basic client, no control port or socket."),
|
|
TARGETS.NO_AUTH: (None, "Basic client, control port with no authenticaion."),
|
|
TARGETS.COOKIE: (None, "Basic client, control port with cookie authenticaion."),
|
|
TARGETS.PASSWORD: (None, "Basic client, control port wiht password authentication."),
|
|
TARGETS.SOCKET: (None, "Basic client, control socket."),
|
|
}
|
|
|
|
HELP_MSG = """Usage runTests.py [OPTION]
|
|
Runs tests for the stem library.
|
|
|
|
-u, --unit runs unit tests
|
|
-i, --integ runs integration tests
|
|
-t, --target comma separated list of tor configurations to use for the
|
|
integration tests (all are used by default)
|
|
-h, --help presents this help
|
|
|
|
Integration targets:
|
|
%s
|
|
"""
|
|
|
|
if __name__ == '__main__':
|
|
start_time = time.time()
|
|
run_unit_tests = False
|
|
run_integ_tests = False
|
|
integ_targets = TARGETS.values()
|
|
|
|
# parses user input, noting any issues
|
|
try:
|
|
opts, args = getopt.getopt(sys.argv[1:], OPT, OPT_EXPANDED)
|
|
except getopt.GetoptError, exc:
|
|
print str(exc) + " (for usage provide --help)"
|
|
sys.exit(1)
|
|
|
|
for opt, arg in opts:
|
|
if opt in ("-u", "--unit"): run_unit_tests = True
|
|
elif opt in ("-i", "--integ"): run_integ_tests = True
|
|
elif opt in ("-t", "--targets"):
|
|
integ_targets = arg.split(",")
|
|
|
|
# validates the targets
|
|
if not integ_targets:
|
|
print "No targets provided"
|
|
sys.exit(1)
|
|
|
|
for target in integ_targets:
|
|
if not target in TARGETS.values():
|
|
print "Invalid integration target: %s" % target
|
|
sys.exit(1)
|
|
elif opt in ("-h", "--help"):
|
|
# Prints usage information and quits. This includes a listing of the
|
|
# valid integration targets.
|
|
|
|
# gets the longest target length so we can show the entries in columns
|
|
target_name_length = max([len(name) for name in TARGETS.values()])
|
|
description_format = "%%-%is - %%s" % target_name_length
|
|
|
|
target_lines = []
|
|
for target in TARGETS.values():
|
|
target_lines.append(description_format % (target, TARGET_ATTR[target][1]))
|
|
|
|
print HELP_MSG % "\n ".join(target_lines)
|
|
sys.exit()
|
|
|
|
test_config = conf.get_config("test")
|
|
|
|
try:
|
|
config_path = os.path.dirname(__file__) + "/test/settings.cfg"
|
|
test_config.load(config_path)
|
|
except IOError, exc:
|
|
print term.format("Unable to load testing configuration: %s" % exc, term.Color.RED, term.Attr.BOLD)
|
|
sys.exit(1)
|
|
|
|
if not run_unit_tests and not run_integ_tests:
|
|
print "Nothing to run (for usage provide --help)\n"
|
|
sys.exit()
|
|
|
|
if run_unit_tests:
|
|
print "%s\n%s\n%s\n" % (DIVIDER, "UNIT TESTS".center(70), DIVIDER)
|
|
|
|
for name, test_class in UNIT_TESTS:
|
|
print "%s\n%s\n%s\n" % (DIVIDER, name, DIVIDER)
|
|
suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
|
|
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
print
|
|
|
|
print
|
|
|
|
if run_integ_tests:
|
|
print "%s\n%s\n%s\n" % (DIVIDER, "INTEGRATION TESTS".center(70), DIVIDER)
|
|
|
|
integ_runner = test.runner.get_runner()
|
|
|
|
try:
|
|
integ_runner.start(user_config = test_config)
|
|
|
|
print term.format("Running tests...", term.Color.BLUE, term.Attr.BOLD)
|
|
print
|
|
|
|
for name, test_class in INTEG_TESTS:
|
|
print "%s\n%s\n%s\n" % (DIVIDER, name, DIVIDER)
|
|
suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
|
|
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
print
|
|
except OSError:
|
|
pass
|
|
finally:
|
|
integ_runner.stop()
|
|
|
|
# TODO: we might do target selection later but for now we should simply
|
|
# work with a single simple tor instance and see how it works out
|
|
#
|
|
#for target in integ_targets:
|
|
# runner, description = TARGET_ATTR[target]
|
|
#
|
|
# print "Configuration: %s - %s" % (target, description)
|
|
#
|
|
# if runner:
|
|
# pass # TODO: implement
|
|
# else:
|
|
# print " %s" % term.format("Unimplemented", term.Color.RED, term.Attr.BOLD)
|
|
#
|
|
# print ""
|
|
|
|
print term.format("Testing Completed (%i seconds)" % (time.time() - start_time), term.Color.GREEN, term.Attr.BOLD)
|
|
print
|
|
|