mirror of
https://github.com/llvm/llvm-project.git
synced 2025-05-13 01:36:05 +00:00

This moves all the global variables into a separate module called `configuration`. This has a number of advantages: 1. Configuration data is centrally maintained so it's easy to get a high level overview of what configuration data the test suite makes use of. 2. The method of sharing configuration data among different parts of the test suite becomes standardized. Previously we would put some things into the `lldb` module, some things into the `lldbtest_config` module, and some things would not get shared. Now everything is shared through one module and is available to the entire test suite. 3. It opens the door to moving some of the initialization code into the `configuration` module, simplifying the implementation of `dotest.py`. There are a few stragglers that didn't get converted over to using the `configuration` module in this patch, because it would have grown the size of the patch unnecessarily. This includes everything currently in the `lldbtest_config` module, as well as the `lldb.remote_platform` variable. We can address these in the future. llvm-svn: 254982
75 lines
2.3 KiB
Python
75 lines
2.3 KiB
Python
"""Test lldb's expression evaluations and collect statistics."""
|
|
|
|
from __future__ import print_function
|
|
|
|
|
|
|
|
import os, sys
|
|
import lldb
|
|
from lldbsuite.test import configuration
|
|
from lldbsuite.test.lldbbench import *
|
|
|
|
class ExpressionEvaluationCase(BenchBase):
|
|
|
|
mydir = TestBase.compute_mydir(__file__)
|
|
|
|
def setUp(self):
|
|
BenchBase.setUp(self)
|
|
self.source = 'main.cpp'
|
|
self.line_to_break = line_number(self.source, '// Set breakpoint here.')
|
|
self.count = configuration.bmIterationCount
|
|
if self.count <= 0:
|
|
self.count = 25
|
|
|
|
@benchmarks_test
|
|
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
|
|
def test_expr_cmd(self):
|
|
"""Test lldb's expression commands and collect statistics."""
|
|
self.build()
|
|
self.exe_name = 'a.out'
|
|
|
|
print()
|
|
self.run_lldb_repeated_exprs(self.exe_name, self.count)
|
|
print("lldb expr cmd benchmark:", self.stopwatch)
|
|
|
|
def run_lldb_repeated_exprs(self, exe_name, count):
|
|
import pexpect
|
|
exe = os.path.join(os.getcwd(), exe_name)
|
|
|
|
# Set self.child_prompt, which is "(lldb) ".
|
|
self.child_prompt = '(lldb) '
|
|
prompt = self.child_prompt
|
|
|
|
# Reset the stopwatch now.
|
|
self.stopwatch.reset()
|
|
for i in range(count):
|
|
# So that the child gets torn down after the test.
|
|
self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec, self.lldbOption, exe))
|
|
child = self.child
|
|
|
|
# Turn on logging for what the child sends back.
|
|
if self.TraceOn():
|
|
child.logfile_read = sys.stdout
|
|
|
|
child.expect_exact(prompt)
|
|
child.sendline('breakpoint set -f %s -l %d' % (self.source, self.line_to_break))
|
|
child.expect_exact(prompt)
|
|
child.sendline('run')
|
|
child.expect_exact(prompt)
|
|
expr_cmd1 = 'expr ptr[j]->point.x'
|
|
expr_cmd2 = 'expr ptr[j]->point.y'
|
|
|
|
with self.stopwatch:
|
|
child.sendline(expr_cmd1)
|
|
child.expect_exact(prompt)
|
|
child.sendline(expr_cmd2)
|
|
child.expect_exact(prompt)
|
|
|
|
child.sendline('quit')
|
|
try:
|
|
self.child.expect(pexpect.EOF)
|
|
except:
|
|
pass
|
|
|
|
self.child = None
|