From aaa82ff9ad1d66e36ece8fb8f0440a71dc22262a Mon Sep 17 00:00:00 2001 From: Johnny Chen Date: Tue, 2 Aug 2011 22:54:37 +0000 Subject: [PATCH] Add the real benchmarks comparing lldb against gdb for repeated expression evaluations. Modify lldbbench.py so that lldbtest.line_number() utility function is available to BenchBase client as just line_number(), and modify lldbtest.py so that self.lldbExec (the full path for the 'lldb' executable) is available to BenchBase client as well. An example run of the test case on my MacBook Pro running Lion: 1: test_compare_lldb_to_gdb (TestRepeatedExprs.RepeatedExprsCase) Test repeated expressions with lldb vs. gdb. ... lldb_avg: 0.204339 gdb_avg: 0.205721 lldb_avg/gdb_avg: 0.993284 ok llvm-svn: 136740 --- .../benchmarks/example/TestRepeatedExprs.py | 124 +++++++++++++++--- lldb/test/benchmarks/example/main.cpp | 8 +- lldb/test/lldbbench.py | 5 +- lldb/test/lldbtest.py | 6 +- 4 files changed, 117 insertions(+), 26 deletions(-) diff --git a/lldb/test/benchmarks/example/TestRepeatedExprs.py b/lldb/test/benchmarks/example/TestRepeatedExprs.py index 7e0c3d352b34..4e6b71edf5d3 100644 --- a/lldb/test/benchmarks/example/TestRepeatedExprs.py +++ b/lldb/test/benchmarks/example/TestRepeatedExprs.py @@ -1,6 +1,6 @@ """Test evaluating expressions repeatedly comparing lldb against gdb.""" -import os +import os, sys import unittest2 import lldb import pexpect @@ -10,28 +10,118 @@ class RepeatedExprsCase(BenchBase): mydir = os.path.join("benchmarks", "example") - @benchmarks_test - def test_with_lldb(self): - """Test repeated expressions with lldb.""" - self.buildDefault() - self.run_lldb_repeated_exprs() + def setUp(self): + BenchBase.setUp(self) + self.source = 'main.cpp' + self.line_to_break = line_number(self.source, '// Set breakpoint here.') + self.lldb_avg = None + self.gdb_avg = None @benchmarks_test - def test_with_gdb(self): - """Test repeated expressions with gdb.""" + def test_compare_lldb_to_gdb(self): + """Test repeated expressions with lldb vs. gdb.""" self.buildDefault() - self.run_gdb_repeated_exprs() + self.exe_name = 'a.out' - def run_lldb_repeated_exprs(self): - for i in range(1000): + print + self.run_lldb_repeated_exprs(self.exe_name, 100) + self.run_gdb_repeated_exprs(self.exe_name, 100) + print "lldb_avg: %f" % self.lldb_avg + print "gdb_avg: %f" % self.gdb_avg + print "lldb_avg/gdb_avg: %f" % (self.lldb_avg/self.gdb_avg) + + def run_lldb_repeated_exprs(self, exe_name, count): + exe = os.path.join(os.getcwd(), exe_name) + + # Set self.child_prompt, which is "(lldb) ". + self.child_prompt = '(lldb) ' + prompt = self.child_prompt + + # So that the child gets torn down after the test. + self.child = pexpect.spawn('%s %s' % (self.lldbExec, exe)) + child = self.child + + # Turn on logging for what the child sends back. + if self.TraceOn(): + child.logfile_read = sys.stdout + + child.expect_exact(prompt) + child.sendline('breakpoint set -f %s -l %d' % (self.source, self.line_to_break)) + child.expect_exact(prompt) + child.sendline('run') + child.expect_exact(prompt) + expr_cmd1 = 'expr ptr[j]->point.x' + expr_cmd2 = 'expr ptr[j]->point.y' + + # Reset the stopwatch now. + self.stopwatch.reset() + for i in range(count): with self.stopwatch: - print "running "+self.testMethodName - print "benchmarks result for "+self.testMethodName - print "stopwatch:", str(self.stopwatch) + child.sendline(expr_cmd1) + child.expect_exact(prompt) + child.sendline(expr_cmd2) + child.expect_exact(prompt) + child.sendline('process continue') + child.expect_exact(prompt) + + child.sendline('quit') + try: + self.child.expect(pexpect.EOF) + except: + pass + + self.lldb_avg = self.stopwatch.avg() + if self.TraceOn(): + print "lldb expression benchmark:", str(self.stopwatch) + self.child = None + + def run_gdb_repeated_exprs(self, exe_name, count): + exe = os.path.join(os.getcwd(), exe_name) + + # Set self.child_prompt, which is "(gdb) ". + self.child_prompt = '(gdb) ' + prompt = self.child_prompt + + # So that the child gets torn down after the test. + self.child = pexpect.spawn('gdb %s' % exe) + child = self.child + + # Turn on logging for what the child sends back. + if self.TraceOn(): + child.logfile_read = sys.stdout + + child.expect_exact(prompt) + child.sendline('break %s:%d' % (self.source, self.line_to_break)) + child.expect_exact(prompt) + child.sendline('run') + child.expect_exact(prompt) + expr_cmd1 = 'print ptr[j]->point.x' + expr_cmd2 = 'print ptr[j]->point.y' + + # Reset the stopwatch now. + self.stopwatch.reset() + for i in range(count): + with self.stopwatch: + child.sendline(expr_cmd1) + child.expect_exact(prompt) + child.sendline(expr_cmd2) + child.expect_exact(prompt) + child.sendline('continue') + child.expect_exact(prompt) + + child.sendline('quit') + child.expect_exact('The program is running. Exit anyway?') + child.sendline('y') + try: + self.child.expect(pexpect.EOF) + except: + pass + + self.gdb_avg = self.stopwatch.avg() + if self.TraceOn(): + print "gdb expression benchmark:", str(self.stopwatch) + self.child = None - def run_gdb_repeated_exprs(self): - print "running "+self.testMethodName - print "benchmarks result for "+self.testMethodName if __name__ == '__main__': import atexit diff --git a/lldb/test/benchmarks/example/main.cpp b/lldb/test/benchmarks/example/main.cpp index 730a704a7f81..1a095d350227 100644 --- a/lldb/test/benchmarks/example/main.cpp +++ b/lldb/test/benchmarks/example/main.cpp @@ -30,13 +30,13 @@ int main(int argc, char const *argv[]) { } printf("Finished populating data.\n"); - for (int i = 0; i < 1000; ++i) { + for (int j = 0; j < 1000; ++j) { bool dump = argc > 1; // Set breakpoint here. // Evaluate a couple of expressions (2*1000 = 2000 exprs): - // expr ptr[i]->point.x - // expr ptr[i]->point.y + // expr ptr[j]->point.x + // expr ptr[j]->point.y if (dump) { - printf("data[%d] = %d (%d, %d)\n", i, ptr[i]->id, ptr[i]->point.x, ptr[i]->point.y); + printf("data[%d] = %d (%d, %d)\n", j, ptr[j]->id, ptr[j]->point.x, ptr[j]->point.y); } } return 0; diff --git a/lldb/test/lldbbench.py b/lldb/test/lldbbench.py index 4bc220b19de6..e0c7814f646b 100644 --- a/lldb/test/lldbbench.py +++ b/lldb/test/lldbbench.py @@ -1,6 +1,7 @@ import time -from lldbtest import benchmarks_test from lldbtest import Base +from lldbtest import benchmarks_test +from lldbtest import line_number class Stopwatch(object): """Stopwatch provides a simple utility to start/stop your stopwatch multiple @@ -80,7 +81,7 @@ class Stopwatch(object): return self.__total_elapsed__ / self.__laps__ def __str__(self): - return "Avg: %f (Laps: %d, Total Elapsed Time: %d)" % (self.avg(), + return "Avg: %f (Laps: %d, Total Elapsed Time: %f)" % (self.avg(), self.__laps__, self.__total_elapsed__) diff --git a/lldb/test/lldbtest.py b/lldb/test/lldbtest.py index dea1a235cc05..3932dad52e23 100644 --- a/lldb/test/lldbtest.py +++ b/lldb/test/lldbtest.py @@ -451,6 +451,9 @@ class Base(unittest2.TestCase): #import traceback #traceback.print_stack() + if "LLDB_EXEC" in os.environ: + self.lldbExec = os.environ["LLDB_EXEC"] + # Assign the test method name to self.testMethodName. # # For an example of the use of this attribute, look at test/types dir. @@ -837,9 +840,6 @@ class TestBase(Base): # Works with the test driver to conditionally skip tests via decorators. Base.setUp(self) - if "LLDB_EXEC" in os.environ: - self.lldbExec = os.environ["LLDB_EXEC"] - try: if lldb.blacklist: className = self.__class__.__name__