2015-12-08 01:15:44 +00:00
|
|
|
"""
|
2019-01-19 08:50:56 +00:00
|
|
|
Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2015-12-08 01:15:44 +00:00
|
|
|
|
|
|
|
Provides the LLDBTestResult class, which holds information about progress
|
|
|
|
and results of a single test run.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# System modules
|
2016-05-14 00:42:30 +00:00
|
|
|
import os
|
2020-08-17 09:53:25 +02:00
|
|
|
import traceback
|
2015-12-08 01:15:44 +00:00
|
|
|
|
|
|
|
# Third-party modules
|
[lldb][test] Switch LLDB API tests from vendored unittest2 to unittest (#79945)
This removes the dependency LLDB API tests have on
lldb/third_party/Python/module/unittest2, and instead uses the standard
one provided by Python.
This does not actually remove the vendored dep yet, nor update the docs.
I'll do both those once this sticks.
Non-trivial changes to call out:
- expected failures (i.e. "bugnumber") don't have a reason anymore, so
those params were removed
- `assertItemsEqual` is now called `assertCountEqual`
- When a test is marked xfail, our copy of unittest2 considers failures
during teardown to be OK, but modern unittest does not. See
TestThreadLocal.py. (Very likely could be a real bug/leak).
- Our copy of unittest2 was patched to print all test results, even ones
that don't happen, e.g. `(5 passes, 0 failures, 1 errors, 0 skipped,
...)`, but standard unittest prints a terser message that omits test
result types that didn't happen, e.g. `OK (skipped=1)`. Our lit
integration parses this stderr and needs to be updated w/ that
expectation.
I tested this w/ `ninja check-lldb-api` on Linux. There's a good chance
non-Linux tests have similar quirks, but I'm not able to uncover those.
2024-02-13 14:19:41 -08:00
|
|
|
import unittest
|
2015-12-08 01:15:44 +00:00
|
|
|
|
|
|
|
# LLDB Modules
|
|
|
|
from . import configuration
|
2016-05-14 00:42:30 +00:00
|
|
|
from lldbsuite.test_event import build_exception
|
2015-12-08 01:15:44 +00:00
|
|
|
|
2016-09-06 20:57:50 +00:00
|
|
|
|
[lldb][test] Switch LLDB API tests from vendored unittest2 to unittest (#79945)
This removes the dependency LLDB API tests have on
lldb/third_party/Python/module/unittest2, and instead uses the standard
one provided by Python.
This does not actually remove the vendored dep yet, nor update the docs.
I'll do both those once this sticks.
Non-trivial changes to call out:
- expected failures (i.e. "bugnumber") don't have a reason anymore, so
those params were removed
- `assertItemsEqual` is now called `assertCountEqual`
- When a test is marked xfail, our copy of unittest2 considers failures
during teardown to be OK, but modern unittest does not. See
TestThreadLocal.py. (Very likely could be a real bug/leak).
- Our copy of unittest2 was patched to print all test results, even ones
that don't happen, e.g. `(5 passes, 0 failures, 1 errors, 0 skipped,
...)`, but standard unittest prints a terser message that omits test
result types that didn't happen, e.g. `OK (skipped=1)`. Our lit
integration parses this stderr and needs to be updated w/ that
expectation.
I tested this w/ `ninja check-lldb-api` on Linux. There's a good chance
non-Linux tests have similar quirks, but I'm not able to uncover those.
2024-02-13 14:19:41 -08:00
|
|
|
class LLDBTestResult(unittest.TextTestResult):
|
2015-12-08 01:15:44 +00:00
|
|
|
"""
|
|
|
|
Enforce a singleton pattern to allow introspection of test progress.
|
|
|
|
|
|
|
|
Overwrite addError(), addFailure(), and addExpectedFailure() methods
|
|
|
|
to enable each test instance to track its failure/error status. It
|
|
|
|
is used in the LLDB test framework to emit detailed trace messages
|
|
|
|
to a log file for easier human inspection of test failures/errors.
|
|
|
|
"""
|
2023-05-25 08:48:57 -07:00
|
|
|
|
2015-12-08 01:15:44 +00:00
|
|
|
__singleton__ = None
|
|
|
|
__ignore_singleton__ = False
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def getTerminalSize():
|
|
|
|
import os
|
2023-05-25 08:48:57 -07:00
|
|
|
|
2015-12-08 01:15:44 +00:00
|
|
|
env = os.environ
|
2016-09-06 20:57:50 +00:00
|
|
|
|
2015-12-08 01:15:44 +00:00
|
|
|
def ioctl_GWINSZ(fd):
|
|
|
|
try:
|
|
|
|
import fcntl
|
|
|
|
import termios
|
|
|
|
import struct
|
2023-05-25 08:48:57 -07:00
|
|
|
|
2015-12-08 01:15:44 +00:00
|
|
|
cr = struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
|
|
|
|
except:
|
|
|
|
return
|
|
|
|
return cr
|
2023-05-25 08:48:57 -07:00
|
|
|
|
2015-12-08 01:15:44 +00:00
|
|
|
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
|
|
|
|
if not cr:
|
|
|
|
try:
|
|
|
|
fd = os.open(os.ctermid(), os.O_RDONLY)
|
|
|
|
cr = ioctl_GWINSZ(fd)
|
|
|
|
os.close(fd)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
if not cr:
|
|
|
|
cr = (env.get("LINES", 25), env.get("COLUMNS", 80))
|
|
|
|
return int(cr[1]), int(cr[0])
|
|
|
|
|
|
|
|
def __init__(self, *args):
|
|
|
|
if not LLDBTestResult.__ignore_singleton__ and LLDBTestResult.__singleton__:
|
|
|
|
raise Exception("LLDBTestResult instantiated more than once")
|
|
|
|
super(LLDBTestResult, self).__init__(*args)
|
|
|
|
LLDBTestResult.__singleton__ = self
|
|
|
|
# Now put this singleton into the lldb module namespace.
|
|
|
|
configuration.test_result = self
|
|
|
|
# Computes the format string for displaying the counter.
|
|
|
|
counterWidth = len(str(configuration.suite.countTestCases()))
|
|
|
|
self.fmt = "%" + str(counterWidth) + "d: "
|
|
|
|
self.indentation = " " * (counterWidth + 2)
|
|
|
|
# This counts from 1 .. suite.countTestCases().
|
|
|
|
self.counter = 0
|
|
|
|
(width, height) = LLDBTestResult.getTerminalSize()
|
|
|
|
|
|
|
|
def _config_string(self, test):
|
|
|
|
compiler = getattr(test, "getCompiler", None)
|
|
|
|
arch = getattr(test, "getArchitecture", None)
|
|
|
|
return "%s-%s" % (compiler() if compiler else "", arch() if arch else "")
|
|
|
|
|
|
|
|
def _exc_info_to_string(self, err, test):
|
|
|
|
"""Overrides superclass TestResult's method in order to append
|
|
|
|
our test config info string to the exception info string."""
|
|
|
|
if hasattr(test, "getArchitecture") and hasattr(test, "getCompiler"):
|
|
|
|
return "%sConfig=%s-%s" % (
|
|
|
|
super(LLDBTestResult, self)._exc_info_to_string(err, test),
|
|
|
|
test.getArchitecture(),
|
|
|
|
test.getCompiler(),
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
return super(LLDBTestResult, self)._exc_info_to_string(err, test)
|
|
|
|
|
|
|
|
def getDescription(self, test):
|
|
|
|
doc_first_line = test.shortDescription()
|
|
|
|
if self.descriptions and doc_first_line:
|
|
|
|
return "\n".join((str(test), self.indentation + doc_first_line))
|
|
|
|
else:
|
|
|
|
return str(test)
|
|
|
|
|
2020-01-09 18:38:31 +03:00
|
|
|
def _getTestPath(self, test):
|
|
|
|
# Use test.test_filename if the test was created with
|
|
|
|
# lldbinline.MakeInlineTest().
|
|
|
|
if test is None:
|
|
|
|
return ""
|
|
|
|
elif hasattr(test, "test_filename"):
|
|
|
|
return test.test_filename
|
|
|
|
else:
|
|
|
|
import inspect
|
2023-05-25 08:48:57 -07:00
|
|
|
|
2020-01-09 18:38:31 +03:00
|
|
|
return inspect.getsourcefile(test.__class__)
|
|
|
|
|
|
|
|
def _getFileBasedCategories(self, test):
|
2017-11-02 22:13:09 +00:00
|
|
|
"""
|
|
|
|
Returns the list of categories to which this test case belongs by
|
2020-11-09 12:05:54 +01:00
|
|
|
collecting values of "categories" files. We start at the folder the test is in
|
2020-01-09 16:55:45 +03:00
|
|
|
and traverse the hierarchy upwards until the test-suite root directory.
|
2017-11-02 22:13:09 +00:00
|
|
|
"""
|
2020-01-09 18:38:31 +03:00
|
|
|
start_path = self._getTestPath(test)
|
2019-12-06 08:36:23 -08:00
|
|
|
|
2020-01-09 18:38:31 +03:00
|
|
|
import os.path
|
2023-05-25 08:48:57 -07:00
|
|
|
|
2019-12-06 08:36:23 -08:00
|
|
|
folder = os.path.dirname(start_path)
|
2020-01-09 16:55:45 +03:00
|
|
|
|
|
|
|
from lldbsuite import lldb_test_root as test_root
|
2023-05-25 08:48:57 -07:00
|
|
|
|
2020-01-09 16:55:45 +03:00
|
|
|
if test_root != os.path.commonprefix([folder, test_root]):
|
|
|
|
raise Exception(
|
|
|
|
"The test file %s is outside the test root directory" % start_path
|
|
|
|
)
|
|
|
|
|
|
|
|
categories = set()
|
|
|
|
while not os.path.samefile(folder, test_root):
|
2020-11-09 12:05:54 +01:00
|
|
|
categories_file_name = os.path.join(folder, "categories")
|
2017-11-02 22:13:09 +00:00
|
|
|
if os.path.exists(categories_file_name):
|
|
|
|
categories_file = open(categories_file_name, "r")
|
2020-01-09 16:55:45 +03:00
|
|
|
categories_str = categories_file.readline().strip()
|
2017-11-02 22:13:09 +00:00
|
|
|
categories_file.close()
|
2020-01-09 16:55:45 +03:00
|
|
|
categories.update(categories_str.split(","))
|
|
|
|
folder = os.path.dirname(folder)
|
2017-11-02 22:13:09 +00:00
|
|
|
|
2020-01-09 16:55:45 +03:00
|
|
|
return list(categories)
|
2017-11-02 22:13:09 +00:00
|
|
|
|
Make test categories composable
Summary:
Previously the add_test_categories would simply overwrite the current set of categories for a
method. This change makes the decorator truly "add" categories, by extending the current set of
categories instead of replacing it.
To do this, I have:
- replaced the getCategories() property on a method (which was itself a method), with a simple
list property "categories". This makes add_test_categories easier to implement, and test
categories isn't something which should change between calls anyway.
- rewritten the getCategoriesForTest function to merge method categories with the categories of
the test case. Previously, it would just use the method categories if they were present. I have
also greatly simplified this method. Originally, it would use a lot of introspection to enable
it being called on various types of objects. Based on my tests, it was only ever being called
on a test case. The new function uses much less introspection then the preivous one, so we
should easily catch any stray uses, if there are any, as they will generate exceptions now.
Reviewers: zturner, tfiala, tberghammer
Subscribers: lldb-commits
Differential Revision: http://reviews.llvm.org/D15451
llvm-svn: 255493
2015-12-14 13:17:18 +00:00
|
|
|
def getCategoriesForTest(self, test):
|
|
|
|
"""
|
|
|
|
Gets all the categories for the currently running test method in test case
|
|
|
|
"""
|
|
|
|
test_categories = []
|
2024-03-13 15:16:15 -05:00
|
|
|
test_categories.extend(getattr(test, "categories", []))
|
|
|
|
|
Make test categories composable
Summary:
Previously the add_test_categories would simply overwrite the current set of categories for a
method. This change makes the decorator truly "add" categories, by extending the current set of
categories instead of replacing it.
To do this, I have:
- replaced the getCategories() property on a method (which was itself a method), with a simple
list property "categories". This makes add_test_categories easier to implement, and test
categories isn't something which should change between calls anyway.
- rewritten the getCategoriesForTest function to merge method categories with the categories of
the test case. Previously, it would just use the method categories if they were present. I have
also greatly simplified this method. Originally, it would use a lot of introspection to enable
it being called on various types of objects. Based on my tests, it was only ever being called
on a test case. The new function uses much less introspection then the preivous one, so we
should easily catch any stray uses, if there are any, as they will generate exceptions now.
Reviewers: zturner, tfiala, tberghammer
Subscribers: lldb-commits
Differential Revision: http://reviews.llvm.org/D15451
llvm-svn: 255493
2015-12-14 13:17:18 +00:00
|
|
|
test_method = getattr(test, test._testMethodName)
|
2024-03-13 15:16:15 -05:00
|
|
|
if test_method is not None:
|
|
|
|
test_categories.extend(getattr(test_method, "categories", []))
|
Make test categories composable
Summary:
Previously the add_test_categories would simply overwrite the current set of categories for a
method. This change makes the decorator truly "add" categories, by extending the current set of
categories instead of replacing it.
To do this, I have:
- replaced the getCategories() property on a method (which was itself a method), with a simple
list property "categories". This makes add_test_categories easier to implement, and test
categories isn't something which should change between calls anyway.
- rewritten the getCategoriesForTest function to merge method categories with the categories of
the test case. Previously, it would just use the method categories if they were present. I have
also greatly simplified this method. Originally, it would use a lot of introspection to enable
it being called on various types of objects. Based on my tests, it was only ever being called
on a test case. The new function uses much less introspection then the preivous one, so we
should easily catch any stray uses, if there are any, as they will generate exceptions now.
Reviewers: zturner, tfiala, tberghammer
Subscribers: lldb-commits
Differential Revision: http://reviews.llvm.org/D15451
llvm-svn: 255493
2015-12-14 13:17:18 +00:00
|
|
|
|
2017-11-02 22:13:09 +00:00
|
|
|
test_categories.extend(self._getFileBasedCategories(test))
|
Make test categories composable
Summary:
Previously the add_test_categories would simply overwrite the current set of categories for a
method. This change makes the decorator truly "add" categories, by extending the current set of
categories instead of replacing it.
To do this, I have:
- replaced the getCategories() property on a method (which was itself a method), with a simple
list property "categories". This makes add_test_categories easier to implement, and test
categories isn't something which should change between calls anyway.
- rewritten the getCategoriesForTest function to merge method categories with the categories of
the test case. Previously, it would just use the method categories if they were present. I have
also greatly simplified this method. Originally, it would use a lot of introspection to enable
it being called on various types of objects. Based on my tests, it was only ever being called
on a test case. The new function uses much less introspection then the preivous one, so we
should easily catch any stray uses, if there are any, as they will generate exceptions now.
Reviewers: zturner, tfiala, tberghammer
Subscribers: lldb-commits
Differential Revision: http://reviews.llvm.org/D15451
llvm-svn: 255493
2015-12-14 13:17:18 +00:00
|
|
|
|
2015-12-08 01:15:44 +00:00
|
|
|
return test_categories
|
|
|
|
|
|
|
|
def hardMarkAsSkipped(self, test):
|
|
|
|
getattr(test, test._testMethodName).__func__.__unittest_skip__ = True
|
|
|
|
getattr(
|
|
|
|
test, test._testMethodName
|
|
|
|
).__func__.__unittest_skip_why__ = (
|
|
|
|
"test case does not fall in any category of interest for this run"
|
2023-05-25 08:48:57 -07:00
|
|
|
)
|
2015-12-08 01:15:44 +00:00
|
|
|
|
2016-09-23 21:32:47 +00:00
|
|
|
def checkExclusion(self, exclusion_list, name):
|
|
|
|
if exclusion_list:
|
|
|
|
import re
|
2023-05-25 08:48:57 -07:00
|
|
|
|
2016-09-23 21:32:47 +00:00
|
|
|
for item in exclusion_list:
|
|
|
|
if re.search(item, name):
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2019-12-12 15:01:25 +03:00
|
|
|
def checkCategoryExclusion(self, exclusion_list, test):
|
|
|
|
return not set(exclusion_list).isdisjoint(self.getCategoriesForTest(test))
|
|
|
|
|
2015-12-08 01:15:44 +00:00
|
|
|
def startTest(self, test):
|
|
|
|
if configuration.shouldSkipBecauseOfCategories(self.getCategoriesForTest(test)):
|
|
|
|
self.hardMarkAsSkipped(test)
|
2016-10-04 18:48:00 +00:00
|
|
|
if self.checkExclusion(configuration.skip_tests, test.id()):
|
2016-09-23 21:32:47 +00:00
|
|
|
self.hardMarkAsSkipped(test)
|
|
|
|
|
2015-12-08 01:15:44 +00:00
|
|
|
self.counter += 1
|
|
|
|
test.test_number = self.counter
|
|
|
|
if self.showAll:
|
|
|
|
self.stream.write(self.fmt % self.counter)
|
|
|
|
super(LLDBTestResult, self).startTest(test)
|
|
|
|
|
|
|
|
def addSuccess(self, test):
|
2019-12-12 15:01:25 +03:00
|
|
|
if self.checkExclusion(
|
|
|
|
configuration.xfail_tests, test.id()
|
|
|
|
) or self.checkCategoryExclusion(configuration.xfail_categories, test):
|
2016-09-23 21:32:47 +00:00
|
|
|
self.addUnexpectedSuccess(test, None)
|
|
|
|
return
|
|
|
|
|
2015-12-08 01:15:44 +00:00
|
|
|
super(LLDBTestResult, self).addSuccess(test)
|
2019-08-28 16:28:58 +00:00
|
|
|
self.stream.write(
|
|
|
|
"PASS: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))
|
2023-05-25 08:48:57 -07:00
|
|
|
)
|
2015-12-08 01:15:44 +00:00
|
|
|
|
2016-05-14 00:42:30 +00:00
|
|
|
def _isBuildError(self, err_tuple):
|
|
|
|
exception = err_tuple[1]
|
|
|
|
return isinstance(exception, build_exception.BuildError)
|
|
|
|
|
|
|
|
def _saveBuildErrorTuple(self, test, err):
|
|
|
|
# Adjust the error description so it prints the build command and build error
|
|
|
|
# rather than an uninformative Python backtrace.
|
|
|
|
build_error = err[1]
|
|
|
|
error_description = "{}\nTest Directory:\n{}".format(
|
|
|
|
str(build_error), os.path.dirname(self._getTestPath(test))
|
2023-05-25 08:48:57 -07:00
|
|
|
)
|
2016-05-14 00:42:30 +00:00
|
|
|
self.errors.append((test, error_description))
|
|
|
|
self._mirrorOutput = True
|
|
|
|
|
2015-12-08 01:15:44 +00:00
|
|
|
def addError(self, test, err):
|
|
|
|
configuration.sdir_has_content = True
|
2016-05-14 00:42:30 +00:00
|
|
|
if self._isBuildError(err):
|
|
|
|
self._saveBuildErrorTuple(test, err)
|
|
|
|
else:
|
|
|
|
super(LLDBTestResult, self).addError(test, err)
|
|
|
|
|
2015-12-08 01:15:44 +00:00
|
|
|
method = getattr(test, "markError", None)
|
|
|
|
if method:
|
|
|
|
method()
|
2019-08-28 16:28:58 +00:00
|
|
|
self.stream.write(
|
|
|
|
"FAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))
|
2023-05-25 08:48:57 -07:00
|
|
|
)
|
2015-12-08 01:15:44 +00:00
|
|
|
|
|
|
|
def addCleanupError(self, test, err):
|
|
|
|
configuration.sdir_has_content = True
|
|
|
|
super(LLDBTestResult, self).addCleanupError(test, err)
|
|
|
|
method = getattr(test, "markCleanupError", None)
|
|
|
|
if method:
|
|
|
|
method()
|
2019-08-28 16:28:58 +00:00
|
|
|
self.stream.write(
|
2020-08-17 09:53:25 +02:00
|
|
|
"CLEANUP ERROR: LLDB (%s) :: %s\n%s\n"
|
|
|
|
% (self._config_string(test), str(test), traceback.format_exc())
|
2023-05-25 08:48:57 -07:00
|
|
|
)
|
2015-12-08 01:15:44 +00:00
|
|
|
|
|
|
|
def addFailure(self, test, err):
|
2019-12-12 15:01:25 +03:00
|
|
|
if self.checkExclusion(
|
|
|
|
configuration.xfail_tests, test.id()
|
|
|
|
) or self.checkCategoryExclusion(configuration.xfail_categories, test):
|
[lldb][test] Switch LLDB API tests from vendored unittest2 to unittest (#79945)
This removes the dependency LLDB API tests have on
lldb/third_party/Python/module/unittest2, and instead uses the standard
one provided by Python.
This does not actually remove the vendored dep yet, nor update the docs.
I'll do both those once this sticks.
Non-trivial changes to call out:
- expected failures (i.e. "bugnumber") don't have a reason anymore, so
those params were removed
- `assertItemsEqual` is now called `assertCountEqual`
- When a test is marked xfail, our copy of unittest2 considers failures
during teardown to be OK, but modern unittest does not. See
TestThreadLocal.py. (Very likely could be a real bug/leak).
- Our copy of unittest2 was patched to print all test results, even ones
that don't happen, e.g. `(5 passes, 0 failures, 1 errors, 0 skipped,
...)`, but standard unittest prints a terser message that omits test
result types that didn't happen, e.g. `OK (skipped=1)`. Our lit
integration parses this stderr and needs to be updated w/ that
expectation.
I tested this w/ `ninja check-lldb-api` on Linux. There's a good chance
non-Linux tests have similar quirks, but I'm not able to uncover those.
2024-02-13 14:19:41 -08:00
|
|
|
self.addExpectedFailure(test, err)
|
2016-09-23 21:32:47 +00:00
|
|
|
return
|
|
|
|
|
2015-12-08 01:15:44 +00:00
|
|
|
configuration.sdir_has_content = True
|
|
|
|
super(LLDBTestResult, self).addFailure(test, err)
|
|
|
|
method = getattr(test, "markFailure", None)
|
|
|
|
if method:
|
|
|
|
method()
|
2019-08-28 16:28:58 +00:00
|
|
|
self.stream.write(
|
|
|
|
"FAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))
|
2023-05-25 08:48:57 -07:00
|
|
|
)
|
2019-12-12 14:17:14 +03:00
|
|
|
if configuration.use_categories:
|
2015-12-08 01:15:44 +00:00
|
|
|
test_categories = self.getCategoriesForTest(test)
|
|
|
|
for category in test_categories:
|
2019-12-12 14:17:14 +03:00
|
|
|
if category in configuration.failures_per_category:
|
|
|
|
configuration.failures_per_category[category] = (
|
|
|
|
configuration.failures_per_category[category] + 1
|
2023-05-25 08:48:57 -07:00
|
|
|
)
|
2015-12-08 01:15:44 +00:00
|
|
|
else:
|
2019-12-12 14:17:14 +03:00
|
|
|
configuration.failures_per_category[category] = 1
|
2015-12-08 01:15:44 +00:00
|
|
|
|
[lldb][test] Switch LLDB API tests from vendored unittest2 to unittest (#79945)
This removes the dependency LLDB API tests have on
lldb/third_party/Python/module/unittest2, and instead uses the standard
one provided by Python.
This does not actually remove the vendored dep yet, nor update the docs.
I'll do both those once this sticks.
Non-trivial changes to call out:
- expected failures (i.e. "bugnumber") don't have a reason anymore, so
those params were removed
- `assertItemsEqual` is now called `assertCountEqual`
- When a test is marked xfail, our copy of unittest2 considers failures
during teardown to be OK, but modern unittest does not. See
TestThreadLocal.py. (Very likely could be a real bug/leak).
- Our copy of unittest2 was patched to print all test results, even ones
that don't happen, e.g. `(5 passes, 0 failures, 1 errors, 0 skipped,
...)`, but standard unittest prints a terser message that omits test
result types that didn't happen, e.g. `OK (skipped=1)`. Our lit
integration parses this stderr and needs to be updated w/ that
expectation.
I tested this w/ `ninja check-lldb-api` on Linux. There's a good chance
non-Linux tests have similar quirks, but I'm not able to uncover those.
2024-02-13 14:19:41 -08:00
|
|
|
def addExpectedFailure(self, test, err):
|
2015-12-08 01:15:44 +00:00
|
|
|
configuration.sdir_has_content = True
|
[lldb][test] Switch LLDB API tests from vendored unittest2 to unittest (#79945)
This removes the dependency LLDB API tests have on
lldb/third_party/Python/module/unittest2, and instead uses the standard
one provided by Python.
This does not actually remove the vendored dep yet, nor update the docs.
I'll do both those once this sticks.
Non-trivial changes to call out:
- expected failures (i.e. "bugnumber") don't have a reason anymore, so
those params were removed
- `assertItemsEqual` is now called `assertCountEqual`
- When a test is marked xfail, our copy of unittest2 considers failures
during teardown to be OK, but modern unittest does not. See
TestThreadLocal.py. (Very likely could be a real bug/leak).
- Our copy of unittest2 was patched to print all test results, even ones
that don't happen, e.g. `(5 passes, 0 failures, 1 errors, 0 skipped,
...)`, but standard unittest prints a terser message that omits test
result types that didn't happen, e.g. `OK (skipped=1)`. Our lit
integration parses this stderr and needs to be updated w/ that
expectation.
I tested this w/ `ninja check-lldb-api` on Linux. There's a good chance
non-Linux tests have similar quirks, but I'm not able to uncover those.
2024-02-13 14:19:41 -08:00
|
|
|
super(LLDBTestResult, self).addExpectedFailure(test, err)
|
2015-12-08 01:15:44 +00:00
|
|
|
method = getattr(test, "markExpectedFailure", None)
|
|
|
|
if method:
|
[lldb][test] Switch LLDB API tests from vendored unittest2 to unittest (#79945)
This removes the dependency LLDB API tests have on
lldb/third_party/Python/module/unittest2, and instead uses the standard
one provided by Python.
This does not actually remove the vendored dep yet, nor update the docs.
I'll do both those once this sticks.
Non-trivial changes to call out:
- expected failures (i.e. "bugnumber") don't have a reason anymore, so
those params were removed
- `assertItemsEqual` is now called `assertCountEqual`
- When a test is marked xfail, our copy of unittest2 considers failures
during teardown to be OK, but modern unittest does not. See
TestThreadLocal.py. (Very likely could be a real bug/leak).
- Our copy of unittest2 was patched to print all test results, even ones
that don't happen, e.g. `(5 passes, 0 failures, 1 errors, 0 skipped,
...)`, but standard unittest prints a terser message that omits test
result types that didn't happen, e.g. `OK (skipped=1)`. Our lit
integration parses this stderr and needs to be updated w/ that
expectation.
I tested this w/ `ninja check-lldb-api` on Linux. There's a good chance
non-Linux tests have similar quirks, but I'm not able to uncover those.
2024-02-13 14:19:41 -08:00
|
|
|
method(err)
|
2019-08-28 16:28:58 +00:00
|
|
|
self.stream.write(
|
|
|
|
"XFAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))
|
2023-05-25 08:48:57 -07:00
|
|
|
)
|
2015-12-08 01:15:44 +00:00
|
|
|
|
|
|
|
def addSkip(self, test, reason):
|
|
|
|
configuration.sdir_has_content = True
|
|
|
|
super(LLDBTestResult, self).addSkip(test, reason)
|
|
|
|
method = getattr(test, "markSkippedTest", None)
|
|
|
|
if method:
|
|
|
|
method()
|
2019-08-28 16:28:58 +00:00
|
|
|
self.stream.write(
|
|
|
|
"UNSUPPORTED: LLDB (%s) :: %s (%s) \n"
|
|
|
|
% (self._config_string(test), str(test), reason)
|
2023-05-25 08:48:57 -07:00
|
|
|
)
|
2015-12-08 01:15:44 +00:00
|
|
|
|
[lldb][test] Switch LLDB API tests from vendored unittest2 to unittest (#79945)
This removes the dependency LLDB API tests have on
lldb/third_party/Python/module/unittest2, and instead uses the standard
one provided by Python.
This does not actually remove the vendored dep yet, nor update the docs.
I'll do both those once this sticks.
Non-trivial changes to call out:
- expected failures (i.e. "bugnumber") don't have a reason anymore, so
those params were removed
- `assertItemsEqual` is now called `assertCountEqual`
- When a test is marked xfail, our copy of unittest2 considers failures
during teardown to be OK, but modern unittest does not. See
TestThreadLocal.py. (Very likely could be a real bug/leak).
- Our copy of unittest2 was patched to print all test results, even ones
that don't happen, e.g. `(5 passes, 0 failures, 1 errors, 0 skipped,
...)`, but standard unittest prints a terser message that omits test
result types that didn't happen, e.g. `OK (skipped=1)`. Our lit
integration parses this stderr and needs to be updated w/ that
expectation.
I tested this w/ `ninja check-lldb-api` on Linux. There's a good chance
non-Linux tests have similar quirks, but I'm not able to uncover those.
2024-02-13 14:19:41 -08:00
|
|
|
def addUnexpectedSuccess(self, test):
|
2015-12-08 01:15:44 +00:00
|
|
|
configuration.sdir_has_content = True
|
[lldb][test] Switch LLDB API tests from vendored unittest2 to unittest (#79945)
This removes the dependency LLDB API tests have on
lldb/third_party/Python/module/unittest2, and instead uses the standard
one provided by Python.
This does not actually remove the vendored dep yet, nor update the docs.
I'll do both those once this sticks.
Non-trivial changes to call out:
- expected failures (i.e. "bugnumber") don't have a reason anymore, so
those params were removed
- `assertItemsEqual` is now called `assertCountEqual`
- When a test is marked xfail, our copy of unittest2 considers failures
during teardown to be OK, but modern unittest does not. See
TestThreadLocal.py. (Very likely could be a real bug/leak).
- Our copy of unittest2 was patched to print all test results, even ones
that don't happen, e.g. `(5 passes, 0 failures, 1 errors, 0 skipped,
...)`, but standard unittest prints a terser message that omits test
result types that didn't happen, e.g. `OK (skipped=1)`. Our lit
integration parses this stderr and needs to be updated w/ that
expectation.
I tested this w/ `ninja check-lldb-api` on Linux. There's a good chance
non-Linux tests have similar quirks, but I'm not able to uncover those.
2024-02-13 14:19:41 -08:00
|
|
|
super(LLDBTestResult, self).addUnexpectedSuccess(test)
|
2015-12-08 01:15:44 +00:00
|
|
|
method = getattr(test, "markUnexpectedSuccess", None)
|
|
|
|
if method:
|
[lldb][test] Switch LLDB API tests from vendored unittest2 to unittest (#79945)
This removes the dependency LLDB API tests have on
lldb/third_party/Python/module/unittest2, and instead uses the standard
one provided by Python.
This does not actually remove the vendored dep yet, nor update the docs.
I'll do both those once this sticks.
Non-trivial changes to call out:
- expected failures (i.e. "bugnumber") don't have a reason anymore, so
those params were removed
- `assertItemsEqual` is now called `assertCountEqual`
- When a test is marked xfail, our copy of unittest2 considers failures
during teardown to be OK, but modern unittest does not. See
TestThreadLocal.py. (Very likely could be a real bug/leak).
- Our copy of unittest2 was patched to print all test results, even ones
that don't happen, e.g. `(5 passes, 0 failures, 1 errors, 0 skipped,
...)`, but standard unittest prints a terser message that omits test
result types that didn't happen, e.g. `OK (skipped=1)`. Our lit
integration parses this stderr and needs to be updated w/ that
expectation.
I tested this w/ `ninja check-lldb-api` on Linux. There's a good chance
non-Linux tests have similar quirks, but I'm not able to uncover those.
2024-02-13 14:19:41 -08:00
|
|
|
method()
|
2019-08-28 16:28:58 +00:00
|
|
|
self.stream.write(
|
|
|
|
"XPASS: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))
|
2023-05-25 08:48:57 -07:00
|
|
|
)
|