2016-07-22 21:50:55 +00:00
|
|
|
"""
|
|
|
|
A stress-test of sorts for LLDB's handling of threads in the inferior.
|
|
|
|
|
|
|
|
This test sets a breakpoint in the main thread where test parameters (numbers of
|
|
|
|
threads) can be adjusted, runs the inferior to that point, and modifies the
|
|
|
|
locals that control the event thread counts. This test also sets a breakpoint in
|
|
|
|
breakpoint_func (the function executed by each 'breakpoint' thread) and a
|
|
|
|
watchpoint on a global modified in watchpoint_func. The inferior is continued
|
|
|
|
until exit or a crash takes place, and the number of events seen by LLDB is
|
|
|
|
verified to match the expected number of events.
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
import lldb
|
|
|
|
from lldbsuite.test.decorators import *
|
|
|
|
from lldbsuite.test.lldbtest import *
|
|
|
|
from lldbsuite.test import lldbutil
|
|
|
|
|
2016-09-06 20:57:50 +00:00
|
|
|
|
2016-07-22 21:50:55 +00:00
|
|
|
class ConcurrentEventsBase(TestBase):
|
|
|
|
# Concurrency is the primary test factor here, not debug info variants.
|
|
|
|
NO_DEBUG_INFO_TESTCASE = True
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
# Call super's setUp().
|
|
|
|
super(ConcurrentEventsBase, self).setUp()
|
|
|
|
# Find the line number for our breakpoint.
|
|
|
|
self.filename = "main.cpp"
|
|
|
|
self.thread_breakpoint_line = line_number(
|
2018-02-27 02:01:30 +00:00
|
|
|
self.filename, "// Set breakpoint here"
|
|
|
|
)
|
2016-07-22 21:50:55 +00:00
|
|
|
self.setup_breakpoint_line = line_number(
|
2018-02-27 02:01:30 +00:00
|
|
|
self.filename, "// Break here and adjust num"
|
|
|
|
)
|
2016-07-22 21:50:55 +00:00
|
|
|
self.finish_breakpoint_line = line_number(
|
2018-02-27 02:01:30 +00:00
|
|
|
self.filename, "// Break here and verify one thread is active"
|
|
|
|
)
|
2016-07-22 21:50:55 +00:00
|
|
|
|
|
|
|
def describe_threads(self):
|
|
|
|
ret = []
|
|
|
|
for x in self.inferior_process:
|
|
|
|
id = x.GetIndexID()
|
|
|
|
reason = x.GetStopReason()
|
|
|
|
status = "stopped" if x.IsStopped() else "running"
|
|
|
|
reason_str = lldbutil.stop_reason_to_str(reason)
|
|
|
|
if reason == lldb.eStopReasonBreakpoint:
|
|
|
|
bpid = x.GetStopReasonDataAtIndex(0)
|
|
|
|
bp = self.inferior_target.FindBreakpointByID(bpid)
|
|
|
|
reason_str = "%s hit %d times" % (
|
|
|
|
lldbutil.get_description(bp),
|
|
|
|
bp.GetHitCount(),
|
|
|
|
)
|
|
|
|
elif reason == lldb.eStopReasonWatchpoint:
|
|
|
|
watchid = x.GetStopReasonDataAtIndex(0)
|
|
|
|
watch = self.inferior_target.FindWatchpointByID(watchid)
|
|
|
|
reason_str = "%s hit %d times" % (
|
|
|
|
lldbutil.get_description(watch),
|
|
|
|
watch.GetHitCount(),
|
|
|
|
)
|
|
|
|
elif reason == lldb.eStopReasonSignal:
|
|
|
|
signals = self.inferior_process.GetUnixSignals()
|
|
|
|
signal_name = signals.GetSignalAsCString(x.GetStopReasonDataAtIndex(0))
|
|
|
|
reason_str = "signal %s" % signal_name
|
|
|
|
|
|
|
|
location = "\t".join(
|
2023-05-25 08:48:57 -07:00
|
|
|
[
|
2016-07-22 21:50:55 +00:00
|
|
|
lldbutil.get_description(x.GetFrameAtIndex(i))
|
|
|
|
for i in range(x.GetNumFrames())
|
2023-05-25 08:48:57 -07:00
|
|
|
]
|
2016-07-22 21:50:55 +00:00
|
|
|
)
|
|
|
|
ret.append(
|
|
|
|
"thread %d %s due to %s at\n\t%s" % (id, status, reason_str, location)
|
|
|
|
)
|
|
|
|
return ret
|
|
|
|
|
|
|
|
def add_breakpoint(self, line, descriptions):
|
|
|
|
"""Adds a breakpoint at self.filename:line and appends its description to descriptions, and
|
|
|
|
returns the LLDB SBBreakpoint object.
|
|
|
|
"""
|
|
|
|
|
|
|
|
bpno = lldbutil.run_break_set_by_file_and_line(
|
|
|
|
self, self.filename, line, num_expected_locations=-1
|
|
|
|
)
|
|
|
|
bp = self.inferior_target.FindBreakpointByID(bpno)
|
2022-10-12 16:07:44 -07:00
|
|
|
descriptions.append(": file = 'main.cpp', line = %d" % line)
|
2016-07-22 21:50:55 +00:00
|
|
|
return bp
|
|
|
|
|
|
|
|
def inferior_done(self):
|
|
|
|
"""Returns true if the inferior is done executing all the event threads (and is stopped at self.finish_breakpoint,
|
|
|
|
or has terminated execution.
|
|
|
|
"""
|
|
|
|
return (
|
|
|
|
self.finish_breakpoint.GetHitCount() > 0
|
|
|
|
or self.crash_count > 0
|
|
|
|
or self.inferior_process.GetState() == lldb.eStateExited
|
2023-05-25 08:48:57 -07:00
|
|
|
)
|
2016-07-22 21:50:55 +00:00
|
|
|
|
|
|
|
def count_signaled_threads(self):
|
|
|
|
count = 0
|
|
|
|
for thread in self.inferior_process:
|
|
|
|
if (
|
|
|
|
thread.GetStopReason() == lldb.eStopReasonSignal
|
|
|
|
and thread.GetStopReasonDataAtIndex(0)
|
|
|
|
== self.inferior_process.GetUnixSignals().GetSignalNumberFromName(
|
|
|
|
"SIGUSR1"
|
|
|
|
)
|
|
|
|
):
|
|
|
|
count += 1
|
|
|
|
return count
|
|
|
|
|
|
|
|
def do_thread_actions(
|
|
|
|
self,
|
|
|
|
num_breakpoint_threads=0,
|
|
|
|
num_signal_threads=0,
|
|
|
|
num_watchpoint_threads=0,
|
|
|
|
num_crash_threads=0,
|
|
|
|
num_delay_breakpoint_threads=0,
|
|
|
|
num_delay_signal_threads=0,
|
|
|
|
num_delay_watchpoint_threads=0,
|
|
|
|
num_delay_crash_threads=0,
|
|
|
|
):
|
|
|
|
"""Sets a breakpoint in the main thread where test parameters (numbers of threads) can be adjusted, runs the inferior
|
|
|
|
to that point, and modifies the locals that control the event thread counts. Also sets a breakpoint in
|
|
|
|
breakpoint_func (the function executed by each 'breakpoint' thread) and a watchpoint on a global modified in
|
|
|
|
watchpoint_func. The inferior is continued until exit or a crash takes place, and the number of events seen by LLDB
|
|
|
|
is verified to match the expected number of events.
|
|
|
|
"""
|
2018-01-19 23:24:35 +00:00
|
|
|
exe = self.getBuildArtifact("a.out")
|
2016-07-22 21:50:55 +00:00
|
|
|
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
|
|
|
|
|
|
|
|
# Get the target
|
|
|
|
self.inferior_target = self.dbg.GetSelectedTarget()
|
|
|
|
|
|
|
|
expected_bps = []
|
|
|
|
|
|
|
|
# Initialize all the breakpoints (main thread/aux thread)
|
|
|
|
self.setup_breakpoint = self.add_breakpoint(
|
|
|
|
self.setup_breakpoint_line, expected_bps
|
|
|
|
)
|
|
|
|
self.finish_breakpoint = self.add_breakpoint(
|
|
|
|
self.finish_breakpoint_line, expected_bps
|
|
|
|
)
|
|
|
|
|
|
|
|
# Set the thread breakpoint
|
|
|
|
if num_breakpoint_threads + num_delay_breakpoint_threads > 0:
|
|
|
|
self.thread_breakpoint = self.add_breakpoint(
|
|
|
|
self.thread_breakpoint_line, expected_bps
|
|
|
|
)
|
|
|
|
|
|
|
|
# Verify breakpoints
|
|
|
|
self.expect(
|
|
|
|
"breakpoint list -f",
|
|
|
|
"Breakpoint locations shown correctly",
|
|
|
|
substrs=expected_bps,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Run the program.
|
|
|
|
self.runCmd("run", RUN_SUCCEEDED)
|
|
|
|
|
|
|
|
# Check we are at line self.setup_breakpoint
|
|
|
|
self.expect(
|
|
|
|
"thread backtrace",
|
|
|
|
STOPPED_DUE_TO_BREAKPOINT,
|
|
|
|
substrs=["stop reason = breakpoint 1."],
|
|
|
|
)
|
|
|
|
|
|
|
|
# Initialize the (single) watchpoint on the global variable (g_watchme)
|
|
|
|
if num_watchpoint_threads + num_delay_watchpoint_threads > 0:
|
[lldb] Add support for large watchpoints in lldb (#79962)
This patch is the next piece of work in my Large Watchpoint proposal,
https://discourse.llvm.org/t/rfc-large-watchpoint-support-in-lldb/72116
This patch breaks a user's watchpoint into one or more
WatchpointResources which reflect what the hardware registers can cover.
This means we can watch objects larger than 8 bytes, and we can watched
unaligned address ranges. On a typical 64-bit target with 4 watchpoint
registers you can watch 32 bytes of memory if the start address is
doubleword aligned.
Additionally, if the remote stub implements AArch64 MASK style
watchpoints (e.g. debugserver on Darwin), we can watch any power-of-2
size region of memory up to 2GB, aligned to that same size.
I updated the Watchpoint constructor and CommandObjectWatchpoint to
create a CompilerType of Array<UInt8> when the size of the watched
region is greater than pointer-size and we don't have a variable type to
use. For pointer-size and smaller, we can display the watched granule as
an integer value; for larger-than-pointer-size we will display as an
array of bytes.
I have `watchpoint list` now print the WatchpointResources used to
implement the watchpoint.
I added a WatchpointAlgorithm class which has a top-level static method
that takes an enum flag mask WatchpointHardwareFeature and a user
address and size, and returns a vector of WatchpointResources covering
the request. It does not take into account the number of watchpoint
registers the target has, or the number still available for use. Right
now there is only one algorithm, which monitors power-of-2 regions of
memory. For up to pointer-size, this is what Intel hardware supports.
AArch64 Byte Address Select watchpoints can watch any number of
contiguous bytes in a pointer-size memory granule, that is not currently
supported so if you ask to watch bytes 3-5, the algorithm will watch the
entire doubleword (8 bytes). The newly default "modify" style means we
will silently ignore modifications to bytes outside the watched range.
I've temporarily skipped TestLargeWatchpoint.py for all targets. It was
only run on Darwin when using the in-tree debugserver, which was a proxy
for "debugserver supports MASK watchpoints". I'll be adding the
aforementioned feature flag from the stub and enabling full mask
watchpoints when a debugserver with that feature is enabled, and
re-enable this test.
I added a new TestUnalignedLargeWatchpoint.py which only has one test
but it's a great one, watching a 22-byte range that is unaligned and
requires four 8-byte watchpoints to cover.
I also added a unit test, WatchpointAlgorithmsTests, which has a number
of simple tests against WatchpointAlgorithms::PowerOf2Watchpoints. I
think there's interesting possible different approaches to how we cover
these; I note in the unit test that a user requesting a watch on address
0x12e0 of 120 bytes will be covered by two watchpoints today, a
128-bytes at 0x1280 and at 0x1300. But it could be done with a 16-byte
watchpoint at 0x12e0 and a 128-byte at 0x1300, which would have fewer
false positives/private stops. As we try refining this one, it's helpful
to have a collection of tests to make sure things don't regress.
I tested this on arm64 macOS, (genuine) x86_64 macOS, and AArch64
Ubuntu. I have not modifed the Windows process plugins yet, I might try
that as a standalone patch, I'd be making the change blind, but the
necessary changes (see ProcessGDBRemote::EnableWatchpoint) are pretty
small so it might be obvious enough that I can change it and see what
the Windows CI thinks.
There isn't yet a packet (or a qSupported feature query) for the gdb
remote serial protocol stub to communicate its watchpoint capabilities
to lldb. I'll be doing that in a patch right after this is landed,
having debugserver advertise its capability of AArch64 MASK watchpoints,
and have ProcessGDBRemote add eWatchpointHardwareArmMASK to
WatchpointAlgorithms so we can watch larger than 32-byte requests on
Darwin.
I haven't yet tackled WatchpointResource *sharing* by multiple
Watchpoints. This is all part of the goal, especially when we may be
watching a larger memory range than the user requested, if they then add
another watchpoint next to their first request, it may be covered by the
same WatchpointResource (hardware watchpoint register). Also one "read"
watchpoint and one "write" watchpoint on the same memory granule need to
be handled, making the WatchpointResource cover all requests.
As WatchpointResources aren't shared among multiple Watchpoints yet,
there's no handling of running the conditions/commands/etc on multiple
Watchpoints when their shared WatchpointResource is hit. The goal beyond
"large watchpoint" is to unify (much more) the Watchpoint and Breakpoint
behavior and commands. I have a feeling I may be slowly chipping away at
this for a while.
Re-landing this patch after fixing two undefined behaviors in
WatchpointAlgorithms found by UBSan and by failures on different
CI bots.
rdar://108234227
2024-01-31 21:01:59 -08:00
|
|
|
# The concurrent tests have multiple threads modifying a variable
|
|
|
|
# with the same value. The default "modify" style watchpoint will
|
|
|
|
# only report this as 1 hit for all threads, because they all wrote
|
|
|
|
# the same value. The testsuite needs "write" style watchpoints to
|
|
|
|
# get the correct number of hits reported.
|
|
|
|
self.runCmd("watchpoint set variable -w write g_watchme")
|
2016-07-22 21:50:55 +00:00
|
|
|
for w in self.inferior_target.watchpoint_iter():
|
|
|
|
self.thread_watchpoint = w
|
|
|
|
self.assertTrue(
|
|
|
|
"g_watchme" in str(self.thread_watchpoint),
|
|
|
|
"Watchpoint location not shown correctly",
|
|
|
|
)
|
|
|
|
|
|
|
|
# Get the process
|
|
|
|
self.inferior_process = self.inferior_target.GetProcess()
|
|
|
|
|
|
|
|
# We should be stopped at the setup site where we can set the number of
|
|
|
|
# threads doing each action (break/crash/signal/watch)
|
|
|
|
self.assertEqual(
|
|
|
|
self.inferior_process.GetNumThreads(),
|
|
|
|
1,
|
|
|
|
"Expected to stop before any additional threads are spawned.",
|
|
|
|
)
|
|
|
|
|
|
|
|
self.runCmd("expr num_breakpoint_threads=%d" % num_breakpoint_threads)
|
|
|
|
self.runCmd("expr num_crash_threads=%d" % num_crash_threads)
|
|
|
|
self.runCmd("expr num_signal_threads=%d" % num_signal_threads)
|
|
|
|
self.runCmd("expr num_watchpoint_threads=%d" % num_watchpoint_threads)
|
|
|
|
|
|
|
|
self.runCmd(
|
|
|
|
"expr num_delay_breakpoint_threads=%d" % num_delay_breakpoint_threads
|
|
|
|
)
|
|
|
|
self.runCmd("expr num_delay_crash_threads=%d" % num_delay_crash_threads)
|
|
|
|
self.runCmd("expr num_delay_signal_threads=%d" % num_delay_signal_threads)
|
|
|
|
self.runCmd(
|
|
|
|
"expr num_delay_watchpoint_threads=%d" % num_delay_watchpoint_threads
|
|
|
|
)
|
|
|
|
|
|
|
|
# Continue the inferior so threads are spawned
|
|
|
|
self.runCmd("continue")
|
|
|
|
|
|
|
|
# Make sure we see all the threads. The inferior program's threads all synchronize with a pseudo-barrier; that is,
|
|
|
|
# the inferior program ensures all threads are started and running
|
|
|
|
# before any thread triggers its 'event'.
|
|
|
|
num_threads = self.inferior_process.GetNumThreads()
|
|
|
|
expected_num_threads = (
|
|
|
|
num_breakpoint_threads
|
|
|
|
+ num_delay_breakpoint_threads
|
|
|
|
+ num_signal_threads
|
|
|
|
+ num_delay_signal_threads
|
|
|
|
+ num_watchpoint_threads
|
|
|
|
+ num_delay_watchpoint_threads
|
|
|
|
+ num_crash_threads
|
|
|
|
+ num_delay_crash_threads
|
|
|
|
+ 1
|
2023-05-25 08:48:57 -07:00
|
|
|
)
|
2016-07-22 21:50:55 +00:00
|
|
|
self.assertEqual(
|
|
|
|
num_threads,
|
|
|
|
expected_num_threads,
|
|
|
|
"Expected to see %d threads, but seeing %d. Details:\n%s"
|
|
|
|
% (expected_num_threads, num_threads, "\n\t".join(self.describe_threads())),
|
|
|
|
)
|
|
|
|
|
|
|
|
self.signal_count = self.count_signaled_threads()
|
|
|
|
self.crash_count = len(
|
|
|
|
lldbutil.get_crashed_threads(self, self.inferior_process)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Run to completion (or crash)
|
|
|
|
while not self.inferior_done():
|
|
|
|
if self.TraceOn():
|
|
|
|
self.runCmd("thread backtrace all")
|
|
|
|
self.runCmd("continue")
|
|
|
|
self.signal_count += self.count_signaled_threads()
|
|
|
|
self.crash_count += len(
|
|
|
|
lldbutil.get_crashed_threads(self, self.inferior_process)
|
|
|
|
)
|
|
|
|
|
|
|
|
if num_crash_threads > 0 or num_delay_crash_threads > 0:
|
|
|
|
# Expecting a crash
|
|
|
|
self.assertTrue(
|
|
|
|
self.crash_count > 0,
|
|
|
|
"Expecting at least one thread to crash. Details: %s"
|
|
|
|
% "\t\n".join(self.describe_threads()),
|
|
|
|
)
|
|
|
|
|
|
|
|
# Ensure the zombie process is reaped
|
|
|
|
self.runCmd("process kill")
|
|
|
|
|
|
|
|
elif num_crash_threads == 0 and num_delay_crash_threads == 0:
|
|
|
|
# There should be a single active thread (the main one) which hit
|
|
|
|
# the breakpoint after joining
|
|
|
|
self.assertEqual(
|
|
|
|
1,
|
|
|
|
self.finish_breakpoint.GetHitCount(),
|
|
|
|
"Expected main thread (finish) breakpoint to be hit once",
|
|
|
|
)
|
|
|
|
|
2024-02-14 10:05:22 -08:00
|
|
|
# There should be a single active thread (the main one) which hit
|
|
|
|
# the breakpoint after joining. Depending on the pthread
|
|
|
|
# implementation we may have a worker thread finishing the pthread_join()
|
|
|
|
# after it has returned. Filter the threads to only count those
|
|
|
|
# with user functions on them from our test case file,
|
|
|
|
# lldb/test/API/functionalities/thread/concurrent_events/main.cpp
|
|
|
|
user_code_funcnames = [
|
|
|
|
"breakpoint_func",
|
|
|
|
"crash_func",
|
|
|
|
"do_action_args",
|
|
|
|
"dotest",
|
|
|
|
"main",
|
|
|
|
"register_signal_handler",
|
|
|
|
"signal_func",
|
|
|
|
"sigusr1_handler",
|
|
|
|
"start_threads",
|
|
|
|
"watchpoint_func",
|
|
|
|
]
|
|
|
|
num_threads_with_usercode = 0
|
|
|
|
for t in self.inferior_process.threads:
|
|
|
|
thread_has_user_code = False
|
|
|
|
for f in t.frames:
|
|
|
|
for funcname in user_code_funcnames:
|
|
|
|
if funcname in f.GetDisplayFunctionName():
|
|
|
|
thread_has_user_code = True
|
|
|
|
break
|
|
|
|
if thread_has_user_code:
|
|
|
|
num_threads_with_usercode += 1
|
|
|
|
|
2016-07-22 21:50:55 +00:00
|
|
|
self.assertEqual(
|
|
|
|
1,
|
2024-02-14 10:05:22 -08:00
|
|
|
num_threads_with_usercode,
|
2016-07-22 21:50:55 +00:00
|
|
|
"Expecting 1 thread but seeing %d. Details:%s"
|
2024-02-14 10:05:22 -08:00
|
|
|
% (num_threads_with_usercode, "\n\t".join(self.describe_threads())),
|
2016-07-22 21:50:55 +00:00
|
|
|
)
|
|
|
|
self.runCmd("continue")
|
|
|
|
|
|
|
|
# The inferior process should have exited without crashing
|
|
|
|
self.assertEqual(
|
|
|
|
0, self.crash_count, "Unexpected thread(s) in crashed state"
|
|
|
|
)
|
|
|
|
self.assertEqual(
|
|
|
|
self.inferior_process.GetState(), lldb.eStateExited, PROCESS_EXITED
|
|
|
|
)
|
|
|
|
|
|
|
|
# Verify the number of actions took place matches expected numbers
|
|
|
|
expected_breakpoint_threads = (
|
|
|
|
num_delay_breakpoint_threads + num_breakpoint_threads
|
2023-05-25 08:48:57 -07:00
|
|
|
)
|
2016-07-22 21:50:55 +00:00
|
|
|
breakpoint_hit_count = (
|
|
|
|
self.thread_breakpoint.GetHitCount()
|
|
|
|
if expected_breakpoint_threads > 0
|
|
|
|
else 0
|
|
|
|
)
|
|
|
|
self.assertEqual(
|
|
|
|
expected_breakpoint_threads,
|
|
|
|
breakpoint_hit_count,
|
|
|
|
"Expected %d breakpoint hits, but got %d"
|
|
|
|
% (expected_breakpoint_threads, breakpoint_hit_count),
|
|
|
|
)
|
|
|
|
|
|
|
|
expected_signal_threads = num_delay_signal_threads + num_signal_threads
|
|
|
|
self.assertEqual(
|
|
|
|
expected_signal_threads,
|
|
|
|
self.signal_count,
|
|
|
|
"Expected %d stops due to signal delivery, but got %d"
|
|
|
|
% (expected_signal_threads, self.signal_count),
|
|
|
|
)
|
2023-05-25 08:48:57 -07:00
|
|
|
|
2016-07-22 21:50:55 +00:00
|
|
|
expected_watchpoint_threads = (
|
|
|
|
num_delay_watchpoint_threads + num_watchpoint_threads
|
2023-05-25 08:48:57 -07:00
|
|
|
)
|
2016-07-22 21:50:55 +00:00
|
|
|
watchpoint_hit_count = (
|
|
|
|
self.thread_watchpoint.GetHitCount()
|
|
|
|
if expected_watchpoint_threads > 0
|
|
|
|
else 0
|
|
|
|
)
|
|
|
|
self.assertEqual(
|
|
|
|
expected_watchpoint_threads,
|
|
|
|
watchpoint_hit_count,
|
|
|
|
"Expected %d watchpoint hits, got %d"
|
|
|
|
% (expected_watchpoint_threads, watchpoint_hit_count),
|
|
|
|
)
|