[lldb][test] Remove benchmark API tests (#108629)

These benchmarks don't get run as part of the regular API test-suite.
And I'm not aware of any CI running this. Also, I haven't quite managed
to actually run them locally using the `bench.py` script. It looks like
these are obsolete, so I'm proposing to remove the infrastructure around
it entirely.

If anyone does know of a use for these do let me know.
This commit is contained in:
Michael Buch 2024-09-16 10:15:52 +01:00 committed by GitHub
parent 9e9b1178ca
commit 7e5fe3ec5a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 0 additions and 925 deletions

View File

@ -1,77 +0,0 @@
#!/usr/bin/env python
"""
A simple bench runner which delegates to the ./dotest.py test driver to run the
benchmarks defined in the list named 'benches'.
You need to hand edit 'benches' to modify/change the command lines passed to the
test driver.
Use the following to get only the benchmark results in your terminal output:
./bench.py -e /Volumes/data/lldb/svn/regression/build/Debug/lldb -x '-F Driver::MainLoop()' 2>&1 | grep -P '^lldb.*benchmark:'
"""
import os
from optparse import OptionParser
# dotest.py invocation with no '-e exe-path' uses lldb as the inferior program,
# unless there is a mentioning of custom executable program.
benches = [
# Measure startup delays creating a target, setting a breakpoint, and run
# to breakpoint stop.
"./dotest.py -v +b %E %X -n -p TestStartupDelays.py",
# Measure 'frame variable' response after stopping at a breakpoint.
"./dotest.py -v +b %E %X -n -p TestFrameVariableResponse.py",
# Measure stepping speed after stopping at a breakpoint.
"./dotest.py -v +b %E %X -n -p TestSteppingSpeed.py",
# Measure expression cmd response with a simple custom executable program.
"./dotest.py +b -n -p TestExpressionCmd.py",
# Attach to a spawned process then run disassembly benchmarks.
"./dotest.py -v +b -n %E -p TestDoAttachThenDisassembly.py",
]
def main():
"""Read the items from 'benches' and run the command line one by one."""
parser = OptionParser(
usage="""\
%prog [options]
Run the standard benchmarks defined in the list named 'benches'.\
"""
)
parser.add_option(
"-e",
"--executable",
type="string",
action="store",
dest="exe",
help="The target program launched by lldb.",
)
parser.add_option(
"-x",
"--breakpoint-spec",
type="string",
action="store",
dest="break_spec",
help="The lldb breakpoint spec for the target program.",
)
# Parses the options, if any.
opts, args = parser.parse_args()
print("Starting bench runner....")
for item in benches:
command = item.replace("%E", '-e "%s"' % opts.exe if opts.exe else "")
command = command.replace(
"%X", '-x "%s"' % opts.break_spec if opts.break_spec else ""
)
print("Running %s" % (command))
os.system(command)
print("Bench runner done.")
if __name__ == "__main__":
main()

View File

@ -426,18 +426,6 @@ def add_test_categories(cat):
return impl
def benchmarks_test(func):
"""Decorate the item as a benchmarks test."""
def should_skip_benchmarks_test():
return "benchmarks test"
# Mark this function as such to separate them from the regular tests.
result = skipTestIfFn(should_skip_benchmarks_test)(func)
result.__benchmarks_test__ = True
return result
def no_debug_info_test(func):
"""Decorate the item as a test what don't use any debug info. If this annotation is specified
then the test runner won't generate a separate test for each debug info format."""

View File

@ -1,3 +0,0 @@
CXX_SOURCES := main.cpp
include Makefile.rules

View File

@ -1,65 +0,0 @@
"""
Test lldb data formatter subsystem.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbbench import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestBenchmarkContinue(BenchBase):
@benchmarks_test
def test_run_command(self):
"""Benchmark different ways to continue a process"""
self.build()
self.data_formatter_commands()
def setUp(self):
# Call super's setUp().
BenchBase.setUp(self)
def data_formatter_commands(self):
"""Benchmark different ways to continue a process"""
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
bkpt = self.target().FindBreakpointByID(
lldbutil.run_break_set_by_source_regexp(self, "// break here")
)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect(
"thread list",
STOPPED_DUE_TO_BREAKPOINT,
substrs=["stopped", "stop reason = breakpoint"],
)
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd("type format clear", check=False)
self.runCmd("type summary clear", check=False)
self.runCmd("type filter clear", check=False)
self.runCmd("type synth clear", check=False)
self.runCmd("settings set target.max-children-count 256", check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
runCmd_sw = Stopwatch()
lldbutil_sw = Stopwatch()
for i in range(0, 15):
runCmd_sw.start()
self.runCmd("continue")
runCmd_sw.stop()
for i in range(0, 15):
lldbutil_sw.start()
lldbutil.continue_to_breakpoint(self.process(), bkpt)
lldbutil_sw.stop()
print("runCmd: %s\nlldbutil: %s" % (runCmd_sw, lldbutil_sw))

View File

@ -1,36 +0,0 @@
#include <map>
#define intint_map std::map<int, int>
int g_the_foo = 0;
int thefoo_rw(int arg = 1)
{
if (arg < 0)
arg = 0;
if (!arg)
arg = 1;
g_the_foo += arg;
return g_the_foo;
}
int main()
{
intint_map ii;
for (int i = 0; i < 15; i++)
{
ii[i] = i + 1;
thefoo_rw(i); // break here
}
ii.clear();
for (int j = 0; j < 15; j++)
{
ii[j] = j + 1;
thefoo_rw(j); // break here
}
return 0;
}

View File

@ -1,3 +0,0 @@
CXX_SOURCES := main.cpp
include Makefile.rules

View File

@ -1,74 +0,0 @@
"""Test lldb's expression evaluations and collect statistics."""
import sys
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbbench import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import configuration
from lldbsuite.test import lldbutil
class ExpressionEvaluationCase(BenchBase):
def setUp(self):
BenchBase.setUp(self)
self.source = "main.cpp"
self.line_to_break = line_number(self.source, "// Set breakpoint here.")
self.count = 25
@benchmarks_test
@add_test_categories(["pexpect"])
def test_expr_cmd(self):
"""Test lldb's expression commands and collect statistics."""
self.build()
self.exe_name = "a.out"
print()
self.run_lldb_repeated_exprs(self.exe_name, self.count)
print("lldb expr cmd benchmark:", self.stopwatch)
def run_lldb_repeated_exprs(self, exe_name, count):
import pexpect
exe = self.getBuildArtifact(exe_name)
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = "(lldb) "
prompt = self.child_prompt
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn(
"%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe)
)
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline(
"breakpoint set -f %s -l %d" % (self.source, self.line_to_break)
)
child.expect_exact(prompt)
child.sendline("run")
child.expect_exact(prompt)
expr_cmd1 = "expr ptr[j]->point.x"
expr_cmd2 = "expr ptr[j]->point.y"
with self.stopwatch:
child.sendline(expr_cmd1)
child.expect_exact(prompt)
child.sendline(expr_cmd2)
child.expect_exact(prompt)
child.sendline("quit")
try:
self.child.expect(pexpect.EOF)
except:
pass
self.child = None

View File

@ -1,131 +0,0 @@
"""Test evaluating expressions repeatedly comparing lldb against gdb."""
import sys
import lldb
from lldbsuite.test.lldbbench import BenchBase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import configuration
from lldbsuite.test import lldbutil
class RepeatedExprsCase(BenchBase):
def setUp(self):
BenchBase.setUp(self)
self.source = "main.cpp"
self.line_to_break = line_number(self.source, "// Set breakpoint here.")
self.lldb_avg = None
self.gdb_avg = None
self.count = 100
@benchmarks_test
@add_test_categories(["pexpect"])
def test_compare_lldb_to_gdb(self):
"""Test repeated expressions with lldb vs. gdb."""
self.build()
self.exe_name = "a.out"
print()
self.run_lldb_repeated_exprs(self.exe_name, self.count)
print("lldb benchmark:", self.stopwatch)
self.run_gdb_repeated_exprs(self.exe_name, self.count)
print("gdb benchmark:", self.stopwatch)
print("lldb_avg/gdb_avg: %f" % (self.lldb_avg / self.gdb_avg))
def run_lldb_repeated_exprs(self, exe_name, count):
import pexpect
exe = self.getBuildArtifact(exe_name)
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = "(lldb) "
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn(
"%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe)
)
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline("breakpoint set -f %s -l %d" % (self.source, self.line_to_break))
child.expect_exact(prompt)
child.sendline("run")
child.expect_exact(prompt)
expr_cmd1 = "expr ptr[j]->point.x"
expr_cmd2 = "expr ptr[j]->point.y"
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
child.sendline(expr_cmd1)
child.expect_exact(prompt)
child.sendline(expr_cmd2)
child.expect_exact(prompt)
child.sendline("process continue")
child.expect_exact(prompt)
child.sendline("quit")
try:
self.child.expect(pexpect.EOF)
except:
pass
self.lldb_avg = self.stopwatch.avg()
if self.TraceOn():
print("lldb expression benchmark:", str(self.stopwatch))
self.child = None
def run_gdb_repeated_exprs(self, exe_name, count):
import pexpect
exe = self.getBuildArtifact(exe_name)
# Set self.child_prompt, which is "(gdb) ".
self.child_prompt = "(gdb) "
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn("gdb --nx %s" % exe)
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline("break %s:%d" % (self.source, self.line_to_break))
child.expect_exact(prompt)
child.sendline("run")
child.expect_exact(prompt)
expr_cmd1 = "print ptr[j]->point.x"
expr_cmd2 = "print ptr[j]->point.y"
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
child.sendline(expr_cmd1)
child.expect_exact(prompt)
child.sendline(expr_cmd2)
child.expect_exact(prompt)
child.sendline("continue")
child.expect_exact(prompt)
child.sendline("quit")
child.expect_exact("The program is running. Exit anyway?")
child.sendline("y")
try:
self.child.expect(pexpect.EOF)
except:
pass
self.gdb_avg = self.stopwatch.avg()
if self.TraceOn():
print("gdb expression benchmark:", str(self.stopwatch))
self.child = None

View File

@ -1,43 +0,0 @@
#include <stdio.h>
class Point {
public:
int x;
int y;
Point(int a, int b):
x(a),
y(b)
{}
};
class Data {
public:
int id;
Point point;
Data(int i):
id(i),
point(0, 0)
{}
};
int main(int argc, char const *argv[]) {
Data *data[1000];
Data **ptr = data;
for (int i = 0; i < 1000; ++i) {
ptr[i] = new Data(i);
ptr[i]->point.x = i;
ptr[i]->point.y = i+1;
}
printf("Finished populating data.\n");
for (int j = 0; j < 1000; ++j) {
bool dump = argc > 1; // Set breakpoint here.
// Evaluate a couple of expressions (2*1000 = 2000 exprs):
// expr ptr[j]->point.x
// expr ptr[j]->point.y
if (dump) {
printf("data[%d] = %d (%d, %d)\n", j, ptr[j]->id, ptr[j]->point.x, ptr[j]->point.y);
}
}
return 0;
}

View File

@ -1,68 +0,0 @@
"""Test lldb's response time for 'frame variable' command."""
import sys
import lldb
from lldbsuite.test import configuration
from lldbsuite.test import lldbtest_config
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbbench import *
class FrameVariableResponseBench(BenchBase):
def setUp(self):
BenchBase.setUp(self)
self.exe = lldbtest_config.lldbExec
self.break_spec = "-n main"
self.count = 20
@benchmarks_test
@no_debug_info_test
@add_test_categories(["pexpect"])
def test_startup_delay(self):
"""Test response time for the 'frame variable' command."""
print()
self.run_frame_variable_bench(self.exe, self.break_spec, self.count)
print("lldb frame variable benchmark:", self.stopwatch)
def run_frame_variable_bench(self, exe, break_spec, count):
import pexpect
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = "(lldb) "
prompt = self.child_prompt
# Reset the stopwatchs now.
self.stopwatch.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn(
"%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe)
)
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
# Set our breakpoint.
child.sendline("breakpoint set %s" % break_spec)
child.expect_exact(prompt)
# Run the target and expect it to be stopped due to breakpoint.
child.sendline("run") # Aka 'process launch'.
child.expect_exact(prompt)
with self.stopwatch:
# Measure the 'frame variable' response time.
child.sendline("frame variable")
child.expect_exact(prompt)
child.sendline("quit")
try:
self.child.expect(pexpect.EOF)
except:
pass
# The test is about to end and if we come to here, the child process has
# been terminated. Mark it so.
self.child = None

View File

@ -1,3 +0,0 @@
CXX_SOURCES := main.cpp
include Makefile.rules

View File

@ -1,58 +0,0 @@
"""
Test lldb data formatter subsystem.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbbench import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestBenchmarkLibcxxList(BenchBase):
@benchmarks_test
def test_run_command(self):
"""Benchmark the std::list data formatter (libc++)"""
self.build()
self.data_formatter_commands()
def setUp(self):
# Call super's setUp().
BenchBase.setUp(self)
def data_formatter_commands(self):
"""Benchmark the std::list data formatter (libc++)"""
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
bkpt = self.target().FindBreakpointByID(
lldbutil.run_break_set_by_source_regexp(self, "break here")
)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect(
"thread list",
STOPPED_DUE_TO_BREAKPOINT,
substrs=["stopped", "stop reason = breakpoint"],
)
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd("type format clear", check=False)
self.runCmd("type summary clear", check=False)
self.runCmd("type filter clear", check=False)
self.runCmd("type synth clear", check=False)
self.runCmd("settings set target.max-children-count 256", check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
sw = Stopwatch()
sw.start()
self.expect("frame variable -A list", substrs=["[300]", "300"])
sw.stop()
print("time to print: %s" % (sw))

View File

@ -1,11 +0,0 @@
#include <list>
int main()
{
std::list<int> list;
for (int i = 0;
i < 1500;
i++)
list.push_back(i);
return list.size(); // break here
}

View File

@ -1,3 +0,0 @@
CXX_SOURCES := main.cpp
include Makefile.rules

View File

@ -1,58 +0,0 @@
"""
Test lldb data formatter subsystem.
"""
import lldb
from lldbsuite.test.lldbbench import *
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestBenchmarkLibcxxMap(BenchBase):
@benchmarks_test
def test_run_command(self):
"""Benchmark the std::map data formatter (libc++)"""
self.build()
self.data_formatter_commands()
def setUp(self):
# Call super's setUp().
BenchBase.setUp(self)
def data_formatter_commands(self):
"""Benchmark the std::map data formatter (libc++)"""
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
bkpt = self.target().FindBreakpointByID(
lldbutil.run_break_set_by_source_regexp(self, "break here")
)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect(
"thread list",
STOPPED_DUE_TO_BREAKPOINT,
substrs=["stopped", "stop reason = breakpoint"],
)
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd("type format clear", check=False)
self.runCmd("type summary clear", check=False)
self.runCmd("type filter clear", check=False)
self.runCmd("type synth clear", check=False)
self.runCmd("settings set target.max-children-count 256", check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
sw = Stopwatch()
sw.start()
self.expect("frame variable -A map", substrs=["[300]", "300"])
sw.stop()
print("time to print: %s" % (sw))

View File

@ -1,11 +0,0 @@
#include <map>
int main()
{
std::map<int, int> map;
for (int i = 0;
i < 1500;
i++)
map[i] = i;
return map.size(); // break here
}

View File

@ -1,78 +0,0 @@
"""Test lldb's startup delays creating a target, setting a breakpoint, and run to breakpoint stop."""
import sys
import lldb
from lldbsuite.test import configuration
from lldbsuite.test import lldbtest_config
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbbench import *
class StartupDelaysBench(BenchBase):
def setUp(self):
BenchBase.setUp(self)
# Create self.stopwatch2 for measuring "set first breakpoint".
# The default self.stopwatch is for "create fresh target".
self.stopwatch2 = Stopwatch()
# Create self.stopwatch3 for measuring "run to breakpoint".
self.stopwatch3 = Stopwatch()
self.exe = lldbtest_config.lldbExec
self.break_spec = "-n main"
self.count = 30
@benchmarks_test
@no_debug_info_test
@add_test_categories(["pexpect"])
def test_startup_delay(self):
"""Test start up delays creating a target, setting a breakpoint, and run to breakpoint stop."""
print()
self.run_startup_delays_bench(self.exe, self.break_spec, self.count)
print("lldb startup delay (create fresh target) benchmark:", self.stopwatch)
print("lldb startup delay (set first breakpoint) benchmark:", self.stopwatch2)
print("lldb startup delay (run to breakpoint) benchmark:", self.stopwatch3)
def run_startup_delays_bench(self, exe, break_spec, count):
import pexpect
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = "(lldb) "
prompt = self.child_prompt
# Reset the stopwatchs now.
self.stopwatch.reset()
self.stopwatch2.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn(
"%s %s" % (lldbtest_config.lldbExec, self.lldbOption)
)
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
with self.stopwatch:
# Create a fresh target.
child.sendline("file %s" % exe) # Aka 'target create'.
child.expect_exact(prompt)
with self.stopwatch2:
# Read debug info and set the first breakpoint.
child.sendline("breakpoint set %s" % break_spec)
child.expect_exact(prompt)
with self.stopwatch3:
# Run to the breakpoint just set.
child.sendline("run")
child.expect_exact(prompt)
child.sendline("quit")
try:
self.child.expect(pexpect.EOF)
except:
pass
# The test is about to end and if we come to here, the child process has
# been terminated. Mark it so.
self.child = None

View File

@ -1,69 +0,0 @@
"""Test lldb's stepping speed."""
import sys
import lldb
from lldbsuite.test import configuration
from lldbsuite.test import lldbtest_config
from lldbsuite.test.lldbbench import *
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SteppingSpeedBench(BenchBase):
def setUp(self):
BenchBase.setUp(self)
self.exe = lldbtest_config.lldbExec
self.break_spec = "-n main"
self.count = 50
self.trace("self.exe=%s" % self.exe)
self.trace("self.break_spec=%s" % self.break_spec)
@benchmarks_test
@no_debug_info_test
@add_test_categories(["pexpect"])
def test_run_lldb_steppings(self):
"""Test lldb steppings on a large executable."""
print()
self.run_lldb_steppings(self.exe, self.break_spec, self.count)
print("lldb stepping benchmark:", self.stopwatch)
def run_lldb_steppings(self, exe, break_spec, count):
import pexpect
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = "(lldb) "
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn(
"%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe)
)
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline("breakpoint set %s" % break_spec)
child.expect_exact(prompt)
child.sendline("run")
child.expect_exact(prompt)
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
# Disassemble the function.
child.sendline("next") # Aka 'thread step-over'.
child.expect_exact(prompt)
child.sendline("quit")
try:
self.child.expect(pexpect.EOF)
except:
pass
self.child = None

View File

@ -1,122 +0,0 @@
"""Benchmark the turnaround time starting a debugger and run to the breakpoint with lldb vs. gdb."""
import sys
import lldb
from lldbsuite.test.lldbbench import *
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import configuration
from lldbsuite.test import lldbutil
class CompileRunToBreakpointBench(BenchBase):
def setUp(self):
BenchBase.setUp(self)
self.exe = lldbtest_config.lldbExec
self.function = "Driver::MainLoop()"
self.count = 3
self.lldb_avg = None
self.gdb_avg = None
@benchmarks_test
@no_debug_info_test
@add_test_categories(["pexpect"])
def test_run_lldb_then_gdb(self):
"""Benchmark turnaround time with lldb vs. gdb."""
print()
self.run_lldb_turnaround(self.exe, self.function, self.count)
print("lldb turnaround benchmark:", self.stopwatch)
self.run_gdb_turnaround(self.exe, self.function, self.count)
print("gdb turnaround benchmark:", self.stopwatch)
print("lldb_avg/gdb_avg: %f" % (self.lldb_avg / self.gdb_avg))
def run_lldb_turnaround(self, exe, function, count):
import pexpect
def run_one_round():
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn(
"%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe)
)
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline("breakpoint set -F %s" % function)
child.expect_exact(prompt)
child.sendline("run")
child.expect_exact(prompt)
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = "(lldb) "
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count + 1):
# Ignore the first invoke lldb and run to the breakpoint turnaround
# time.
if i == 0:
run_one_round()
else:
with self.stopwatch:
run_one_round()
self.child.sendline("quit")
try:
self.child.expect(pexpect.EOF)
except:
pass
self.lldb_avg = self.stopwatch.avg()
self.child = None
def run_gdb_turnaround(self, exe, function, count):
import pexpect
def run_one_round():
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn("gdb --nx %s" % exe)
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline("break %s" % function)
child.expect_exact(prompt)
child.sendline("run")
child.expect_exact(prompt)
# Set self.child_prompt, which is "(gdb) ".
self.child_prompt = "(gdb) "
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count + 1):
# Ignore the first invoke lldb and run to the breakpoint turnaround
# time.
if i == 0:
run_one_round()
else:
with self.stopwatch:
run_one_round()
self.child.sendline("quit")
self.child.expect_exact("The program is running. Exit anyway?")
self.child.sendline("y")
try:
self.child.expect(pexpect.EOF)
except:
pass
self.gdb_avg = self.stopwatch.avg()
self.child = None