mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2024-11-29 16:41:27 +00:00
66d362e9bf
inferior program for the lldb debugger to operate on. The fixed lldb executable corresponds to r142902. Plus some minor modifications to the test benchmark to conform to way bench.py is meant to be invoked. llvm-svn: 143075
72 lines
2.5 KiB
Python
Executable File
72 lines
2.5 KiB
Python
Executable File
#!/usr/bin/env python
|
|
|
|
"""
|
|
A simple bench runner which delegates to the ./dotest.py test driver to run the
|
|
benchmarks defined in the list named 'benches'.
|
|
|
|
You need to hand edit 'benches' to modify/change the command lines passed to the
|
|
test driver.
|
|
|
|
Use the following to get only the benchmark results in your terminal output:
|
|
|
|
./bench.py -e /Volumes/data/lldb/svn/regression/build/Debug/lldb -x '-F Driver::MainLoop()' 2>&1 | grep -P '^lldb.*benchmark:'
|
|
|
|
See also bench-history.
|
|
"""
|
|
|
|
import os, sys
|
|
import re
|
|
from optparse import OptionParser
|
|
|
|
# dotest.py invocation with no '-e exe-path' uses lldb as the inferior program,
|
|
# unless there is a mentioning of custom executable program.
|
|
benches = [
|
|
# Measure startup delays creating a target, setting a breakpoint, and run to breakpoint stop.
|
|
'./dotest.py -v +b %E %X -n -p TestStartupDelays.py',
|
|
|
|
# Measure 'frame variable' response after stopping at a breakpoint.
|
|
'./dotest.py -v +b %E %X -n -p TestFrameVariableResponse.py',
|
|
|
|
# Measure stepping speed after stopping at a breakpoint.
|
|
'./dotest.py -v +b %E %X -n -p TestSteppingSpeed.py',
|
|
|
|
# Measure expression cmd response with a simple custom executable program.
|
|
'./dotest.py +b -n -p TestExpressionCmd.py',
|
|
|
|
# Attach to a spawned process then run disassembly benchmarks.
|
|
'./dotest.py -v +b -n %E -p TestDoAttachThenDisassembly.py'
|
|
]
|
|
|
|
def main():
|
|
"""Read the items from 'benches' and run the command line one by one."""
|
|
parser = OptionParser(usage="""\
|
|
%prog [options]
|
|
Run the standard benchmarks defined in the list named 'benches'.\
|
|
""")
|
|
parser.add_option('-e', '--executable',
|
|
type='string', action='store',
|
|
dest='exe',
|
|
help='The target program launched by lldb.')
|
|
parser.add_option('-x', '--breakpoint-spec',
|
|
type='string', action='store',
|
|
dest='break_spec',
|
|
help='The lldb breakpoint spec for the target program.')
|
|
|
|
# Parses the options, if any.
|
|
opts, args = parser.parse_args()
|
|
|
|
print "Starting bench runner...."
|
|
|
|
for item in benches:
|
|
command = item.replace('%E',
|
|
'-e "%s"' % opts.exe if opts.exe else '')
|
|
command = command.replace('%X',
|
|
'-x "%s"' % opts.break_spec if opts.break_spec else '')
|
|
print "Running %s" % (command)
|
|
os.system(command)
|
|
|
|
print "Bench runner done."
|
|
|
|
if __name__ == '__main__':
|
|
main()
|