llvm-project/lldb/packages/Python/lldbsuite/test/benchmarks/turnaround/TestCompileRunToBreakpointTurnaround.py

122 lines
3.8 KiB
Python
Raw Normal View History

"""Benchmark the turnaround time starting a debugger and run to the breakpont with lldb vs. gdb."""
from __future__ import print_function
import use_lldb_suite
import os, sys
import lldb
from lldbbench import *
class CompileRunToBreakpointBench(BenchBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
BenchBase.setUp(self)
self.exe = lldbtest_config.lldbExec
self.function = 'Driver::MainLoop()'
self.count = lldb.bmIterationCount
if self.count <= 0:
self.count = 3
self.lldb_avg = None
self.gdb_avg = None
@benchmarks_test
Merge dwarf and dsym tests Currently most of the test files have a separate dwarf and a separate dsym test with almost identical content (only the build step is different). With adding dwo symbol file handling to the test suit it would increase this to a 3-way duplication. The purpose of this change is to eliminate this redundancy with generating 2 test case (one dwarf and one dsym) for each test function specified (dwo handling will be added at a later commit). Main design goals: * There should be no boilerplate code in each test file to support the multiple debug info in most of the tests (custom scenarios are acceptable in special cases) so adding a new test case is easier and we can't miss one of the debug info type. * In case of a test failure, the debug symbols used during the test run have to be cleanly visible from the output of dotest.py to make debugging easier both from build bot logs and from local test runs * Each test case should have a unique, fully qualified name so we can run exactly 1 test with "-f <test-case>.<test-function>" syntax * Test output should be grouped based on test files the same way as it happens now (displaying dwarf/dsym results separately isn't preferable) Proposed solution (main logic in lldbtest.py, rest of them are test cases fixed up for the new style): * Have only 1 test fuction in the test files what will run for all debug info separately and this test function should call just "self.build(...)" to build an inferior with the right debug info * When a class is created by python (the class object, not the class instance), we will generate a new test method for each debug info format in the test class with the name "<test-function>_<debug-info>" and remove the original test method. This way unittest2 see multiple test methods (1 for each debug info, pretty much as of now) and will handle the test selection and the failure reporting correctly (the debug info will be visible from the end of the test name) * Add new annotation @no_debug_info_test to disable the generation of multiple tests for each debug info format when the test don't have an inferior Differential revision: http://reviews.llvm.org/D13028 llvm-svn: 248883
2015-09-30 10:12:40 +00:00
@no_debug_info_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
def test_run_lldb_then_gdb(self):
"""Benchmark turnaround time with lldb vs. gdb."""
print()
self.run_lldb_turnaround(self.exe, self.function, self.count)
print("lldb turnaround benchmark:", self.stopwatch)
self.run_gdb_turnaround(self.exe, self.function, self.count)
print("gdb turnaround benchmark:", self.stopwatch)
print("lldb_avg/gdb_avg: %f" % (self.lldb_avg/self.gdb_avg))
def run_lldb_turnaround(self, exe, function, count):
import pexpect
def run_one_round():
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec, self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline('breakpoint set -F %s' % function)
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count + 1):
# Ignore the first invoke lldb and run to the breakpoint turnaround time.
if i == 0:
run_one_round()
else:
with self.stopwatch:
run_one_round()
self.child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.lldb_avg = self.stopwatch.avg()
self.child = None
def run_gdb_turnaround(self, exe, function, count):
import pexpect
def run_one_round():
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn('gdb --nx %s' % exe)
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline('break %s' % function)
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
# Set self.child_prompt, which is "(gdb) ".
self.child_prompt = '(gdb) '
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count+1):
# Ignore the first invoke lldb and run to the breakpoint turnaround time.
if i == 0:
run_one_round()
else:
with self.stopwatch:
run_one_round()
self.child.sendline('quit')
self.child.expect_exact('The program is running. Exit anyway?')
self.child.sendline('y')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.gdb_avg = self.stopwatch.avg()
self.child = None