mirror of
https://github.com/llvm/llvm-project.git
synced 2025-05-13 01:36:05 +00:00

For convenience, we had added the folder that dotest.py was in to sys.path, so that we could easily write things like `import lldbutil` from anywhere and any test. This introduces a subtle problem when using Python's package system, because when unittest2 imports a particular test suite, the test suite is detached from the package. Thus, writing "import lldbutil" from dotest imports it as part of the package, and writing the same line from a test does a fresh import since the importing module was not part of the same package. The real way to fix this is to use absolute imports everywhere. Instead of writing "import lldbutil", we need to write "import lldbsuite.test.util". This patch fixes up that and all other similar cases, and additionally removes the script directory from sys.path to ensure that this can't happen again. llvm-svn: 251886
132 lines
4.2 KiB
Python
132 lines
4.2 KiB
Python
"""Test evaluating expressions repeatedly comparing lldb against gdb."""
|
|
|
|
from __future__ import print_function
|
|
|
|
import use_lldb_suite
|
|
|
|
import os, sys
|
|
import lldb
|
|
from lldbsuite.test.lldbbench import *
|
|
|
|
class RepeatedExprsCase(BenchBase):
|
|
|
|
mydir = TestBase.compute_mydir(__file__)
|
|
|
|
def setUp(self):
|
|
BenchBase.setUp(self)
|
|
self.source = 'main.cpp'
|
|
self.line_to_break = line_number(self.source, '// Set breakpoint here.')
|
|
self.lldb_avg = None
|
|
self.gdb_avg = None
|
|
self.count = lldb.bmIterationCount
|
|
if self.count <= 0:
|
|
self.count = 100
|
|
|
|
@benchmarks_test
|
|
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
|
|
def test_compare_lldb_to_gdb(self):
|
|
"""Test repeated expressions with lldb vs. gdb."""
|
|
self.build()
|
|
self.exe_name = 'a.out'
|
|
|
|
print()
|
|
self.run_lldb_repeated_exprs(self.exe_name, self.count)
|
|
print("lldb benchmark:", self.stopwatch)
|
|
self.run_gdb_repeated_exprs(self.exe_name, self.count)
|
|
print("gdb benchmark:", self.stopwatch)
|
|
print("lldb_avg/gdb_avg: %f" % (self.lldb_avg/self.gdb_avg))
|
|
|
|
def run_lldb_repeated_exprs(self, exe_name, count):
|
|
import pexpect
|
|
exe = os.path.join(os.getcwd(), exe_name)
|
|
|
|
# Set self.child_prompt, which is "(lldb) ".
|
|
self.child_prompt = '(lldb) '
|
|
prompt = self.child_prompt
|
|
|
|
# So that the child gets torn down after the test.
|
|
self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec, self.lldbOption, exe))
|
|
child = self.child
|
|
|
|
# Turn on logging for what the child sends back.
|
|
if self.TraceOn():
|
|
child.logfile_read = sys.stdout
|
|
|
|
child.expect_exact(prompt)
|
|
child.sendline('breakpoint set -f %s -l %d' % (self.source, self.line_to_break))
|
|
child.expect_exact(prompt)
|
|
child.sendline('run')
|
|
child.expect_exact(prompt)
|
|
expr_cmd1 = 'expr ptr[j]->point.x'
|
|
expr_cmd2 = 'expr ptr[j]->point.y'
|
|
|
|
# Reset the stopwatch now.
|
|
self.stopwatch.reset()
|
|
for i in range(count):
|
|
with self.stopwatch:
|
|
child.sendline(expr_cmd1)
|
|
child.expect_exact(prompt)
|
|
child.sendline(expr_cmd2)
|
|
child.expect_exact(prompt)
|
|
child.sendline('process continue')
|
|
child.expect_exact(prompt)
|
|
|
|
child.sendline('quit')
|
|
try:
|
|
self.child.expect(pexpect.EOF)
|
|
except:
|
|
pass
|
|
|
|
self.lldb_avg = self.stopwatch.avg()
|
|
if self.TraceOn():
|
|
print("lldb expression benchmark:", str(self.stopwatch))
|
|
self.child = None
|
|
|
|
def run_gdb_repeated_exprs(self, exe_name, count):
|
|
import pexpect
|
|
exe = os.path.join(os.getcwd(), exe_name)
|
|
|
|
# Set self.child_prompt, which is "(gdb) ".
|
|
self.child_prompt = '(gdb) '
|
|
prompt = self.child_prompt
|
|
|
|
# So that the child gets torn down after the test.
|
|
self.child = pexpect.spawn('gdb --nx %s' % exe)
|
|
child = self.child
|
|
|
|
# Turn on logging for what the child sends back.
|
|
if self.TraceOn():
|
|
child.logfile_read = sys.stdout
|
|
|
|
child.expect_exact(prompt)
|
|
child.sendline('break %s:%d' % (self.source, self.line_to_break))
|
|
child.expect_exact(prompt)
|
|
child.sendline('run')
|
|
child.expect_exact(prompt)
|
|
expr_cmd1 = 'print ptr[j]->point.x'
|
|
expr_cmd2 = 'print ptr[j]->point.y'
|
|
|
|
# Reset the stopwatch now.
|
|
self.stopwatch.reset()
|
|
for i in range(count):
|
|
with self.stopwatch:
|
|
child.sendline(expr_cmd1)
|
|
child.expect_exact(prompt)
|
|
child.sendline(expr_cmd2)
|
|
child.expect_exact(prompt)
|
|
child.sendline('continue')
|
|
child.expect_exact(prompt)
|
|
|
|
child.sendline('quit')
|
|
child.expect_exact('The program is running. Exit anyway?')
|
|
child.sendline('y')
|
|
try:
|
|
self.child.expect(pexpect.EOF)
|
|
except:
|
|
pass
|
|
|
|
self.gdb_avg = self.stopwatch.avg()
|
|
if self.TraceOn():
|
|
print("gdb expression benchmark:", str(self.stopwatch))
|
|
self.child = None
|