Adding unit tests and docstrings for editor test files

Signed-off-by: evanchia <evanchia@amazon.com>
monroegm-disable-blank-issue-2
evanchia 4 years ago
parent 91925d0f65
commit 8ab9f89b46

@ -3,9 +3,7 @@ Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
"""
Utility for specifying an Editor test, supports seamless parallelization and/or batching of tests.
"""
@ -15,22 +13,41 @@ import inspect
__test__ = False
def pytest_addoption(parser):
# type (argparse.ArgumentParser) -> None
"""
Options when running tests in batches or parallel.
:param parser: The ArgumentParser object
:return: None
"""
parser.addoption("--no-editor-batch", action="store_true", help="Don't batch multiple tests in single editor")
parser.addoption("--no-editor-parallel", action="store_true", help="Don't run multiple editors in parallel")
parser.addoption("--editors-parallel", type=int, action="store", help="Override the number editors to run at the same time")
# Create a custom custom item collection if the class defines pytest_custom_makeitem function
# This is used for automtically generating test functions with a custom collector
def pytest_pycollect_makeitem(collector, name, obj):
# type (PyCollector, str, object) -> Collector
"""
Create a custom custom item collection if the class defines pytest_custom_makeitem function. This is used for
automtically generating test functions with a custom collector.
:param collector: The Python test collector
:param name: Name of the collector
:param obj: The custom collector, normally an EditorTestSuite.EditorTestClass object
:return: Returns the custom collector
"""
if inspect.isclass(obj):
for base in obj.__bases__:
if hasattr(base, "pytest_custom_makeitem"):
return base.pytest_custom_makeitem(collector, name, obj)
# Add custom modification of items.
# This is used for adding the runners into the item list
@pytest.hookimpl(hookwrapper=True)
def pytest_collection_modifyitems(session, items, config):
# type (Session, list, Config) -> None
"""
Add custom modification of items. This is used for adding the runners into the item list.
:param session: The Pytest Session
:param items: The test case functions
:param config: The Pytest Config object
:return: None
"""
all_classes = set()
for item in items:
all_classes.add(item.instance.__class__)
@ -40,4 +57,4 @@ def pytest_collection_modifyitems(session, items, config):
for cls in all_classes:
if hasattr(cls, "pytest_custom_modify_items"):
cls.pytest_custom_modify_items(session, items, config)

@ -55,8 +55,16 @@ def pytest_configure(config):
ly_test_tools._internal.pytest_plugin.build_directory = _get_build_directory(config)
ly_test_tools._internal.pytest_plugin.output_path = _get_output_path(config)
def pytest_pycollect_makeitem(collector, name, obj):
# type (PyCollector, str, object) -> Collector
"""
Create a custom custom item collection if the class defines pytest_custom_makeitem function. This is used for
automtically generating test functions with a custom collector.
:param collector: The Python test collector
:param name: Name of the collector
:param obj: The custom collector, normally an EditorTestSuite.EditorTestClass object
:return: Returns the custom collector
"""
import inspect
if inspect.isclass(obj):
for base in obj.__bases__:

@ -25,16 +25,16 @@ import re
import ly_test_tools.environment.file_system as file_system
import ly_test_tools.environment.waiter as waiter
import ly_test_tools.environment.process_utils as process_utils
import ly_test_tools.o3de.editor_test_utils as editor_utils
from ly_test_tools.o3de.asset_processor import AssetProcessor
from ly_test_tools.launchers.exceptions import WaitTimeoutError
from . import editor_test_utils as editor_utils
# This file provides editor testing functionality to easily write automated editor tests for O3DE.
# For using these utilities, you can subclass your test suite from EditorTestSuite, this allows an easy way of specifying
# python test scripts that the editor will run without needing to write any boilerplace code.
# It supports out of the box parallelization(running multiple editor instances at once), batching(running multiple tests in the same editor instance) and
# crash detection.
# For using these utilities, you can subclass your test suite from EditorTestSuite, this allows an easy way of
# specifying python test scripts that the editor will run without needing to write any boilerplace code.
# It supports out of the box parallelization(running multiple editor instances at once), batching(running multiple tests
# in the same editor instance) and crash detection.
# Usage example:
# class MyTestSuite(EditorTestSuite):
#
@ -48,7 +48,8 @@ from . import editor_test_utils as editor_utils
# from . import yet_another_script_to_be_run_by_editor as test_module
#
#
# EditorTestSuite does introspection of the defined classes inside of it and automatically prepares the tests, parallelizing/batching as required
# EditorTestSuite does introspection of the defined classes inside of it and automatically prepares the tests,
# parallelizing/batching as required
# This file contains no tests, but with this we make sure it won't be picked up by the runner since the file ends with _test
__test__ = False
@ -109,12 +110,22 @@ class EditorBatchedTest(EditorSharedTest):
class Result:
class Base:
def get_output_str(self):
# type () -> str
"""
Checks if the output attribute exists and returns it.
:return: Either the output string or a no output message
"""
if hasattr(self, "output") and self.output is not None:
return self.output
else:
return "-- No output --"
def get_editor_log_str(self):
# type () -> str
"""
Checks if the editor_log attribute exists and returns it.
:return: Either the editor_log string or a no output message
"""
if hasattr(self, "editor_log") and self.editor_log is not None:
return self.editor_log
else:
@ -122,7 +133,15 @@ class Result:
class Pass(Base):
@classmethod
def create(cls, test_spec : EditorTestBase, output : str, editor_log : str):
def create(cls, test_spec, output, editor_log):
# type (EditorTestBase, str, str) -> Pass
"""
Creates a Pass object with a given test spec, output string, and editor log string.
:test_spec: The type of EditorTestBase
:output: The test output
:editor_log: The editor log's output
:return: the Pass object
"""
r = cls()
r.test_spec = test_spec
r.output = output
@ -141,7 +160,15 @@ class Result:
class Fail(Base):
@classmethod
def create(cls, test_spec : EditorTestBase, output, editor_log : str):
def create(cls, test_spec, output, editor_log):
# type (EditorTestBase, str, str) -> Fail
"""
Creates a Fail object with a given test spec, output string, and editor log string.
:test_spec: The type of EditorTestBase
:output: The test output
:editor_log: The editor log's output
:return: the Fail object
"""
r = cls()
r.test_spec = test_spec
r.output = output
@ -164,7 +191,18 @@ class Result:
class Crash(Base):
@classmethod
def create(cls, test_spec : EditorTestBase, output : str, ret_code : int, stacktrace : str, editor_log : str):
def create(cls, test_spec, output, ret_code, stacktrace, editor_log):
# type (EditorTestBase, str, int, str, str) -> Crash
"""
Creates a Crash object with a given test spec, output string, and editor log string. This also includes the
return code and stacktrace.
:test_spec: The type of EditorTestBase
:output: The test output
:ret_code: The test's return code
:stacktrace: The test's stacktrace if available
:editor_log: The editor log's output
:return: The Crash object
"""
r = cls()
r.output = output
r.test_spec = test_spec
@ -174,7 +212,7 @@ class Result:
return r
def __str__(self):
stacktrace_str = "-- No stacktrace data found --" if not self.stacktrace else self.stacktrace
stacktrace_str = "-- No stacktrace data found --\n" if not self.stacktrace else self.stacktrace
output = (
f"Test CRASHED, return code {hex(self.ret_code)}\n"
f"---------------\n"
@ -190,12 +228,21 @@ class Result:
f"--------------\n"
f"{self.get_editor_log_str()}\n"
)
crash_str = "-- No crash information found --"
return output
class Timeout(Base):
@classmethod
def create(cls, test_spec : EditorTestBase, output : str, time_secs : float, editor_log : str):
def create(cls, test_spec, output, time_secs, editor_log):
# type (EditorTestBase, str, float, str) -> Timeout
"""
Creates a Timeout object with a given test spec, output string, and editor log string. The timeout time
should be provided in seconds
:test_spec: The type of EditorTestBase
:output: The test output
:time_secs: The timeout duration in seconds
:editor_log: The editor log's output
:return: The Timeout object
"""
r = cls()
r.output = output
r.test_spec = test_spec
@ -219,14 +266,23 @@ class Result:
class Unknown(Base):
@classmethod
def create(cls, test_spec : EditorTestBase, output : str, extra_info : str, editor_log : str):
def create(cls, test_spec, output, extra_info, editor_log):
# type (EditorTestBase, str, str , str) -> Unknown
"""
Creates an Unknown test results object if something goes wrong.
:test_spec: The type of EditorTestBase
:output: The test output
:extra_info: Any extra information as a string
:editor_log: The editor log's output
:return: The Unknown object
"""
r = cls()
r.output = output
r.test_spec = test_spec
r.editor_log = editor_log
r.extra_info = extra_info
return r
def __str__(self):
output = (
f"Unknown test result, possible cause: {self.extra_info}\n"
@ -263,6 +319,18 @@ class EditorTestSuite():
@pytest.fixture(scope="class")
def editor_test_data(self, request):
# type (request) -> TestData
"""
Yields a generator to capture the test results and an AssetProcessor object.
:request: The pytest request
:yield: The TestData object
"""
self._editor_test_data(request)
def _editor_test_data(self, request):
"""
A wrapper function for unit testing to call directly
"""
class TestData():
def __init__(self):
self.results = {} # Dict of str(test_spec.__name__) -> Result
@ -445,6 +513,10 @@ class EditorTestSuite():
@classmethod
def pytest_custom_modify_items(cls, session, items, config):
# type () -> None
"""
"""
# Add here the runners functions and filter the tests that will be run.
# The runners will be added if they have any selected tests
new_items = []
@ -463,23 +535,53 @@ class EditorTestSuite():
@classmethod
def get_single_tests(cls):
# type () -> list
"""
Grabs all of the EditorSingleTests subclassed tests from the EditorTestSuite class
Usage example:
class MyTestSuite(EditorTestSuite):
class MyFirstTest(EditorSingleTest):
from . import script_to_be_run_by_editor as test_module
:return: The list of single tests
"""
single_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSingleTest)]
return single_tests
@classmethod
def get_shared_tests(cls):
# type () -> list
"""
Grabs all of the EditorSharedTests from the EditorTestSuite
Usage example:
class MyTestSuite(EditorTestSuite):
class MyFirstTest(EditorSharedTest):
from . import script_to_be_run_by_editor as test_module
:return: The list of shared tests
"""
shared_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSharedTest)]
return shared_tests
@classmethod
def get_session_shared_tests(cls, session):
# type (Session) -> list[EditorTestBase]
"""
Filters and returns all of the shared tests in a given session.
:session: The test session
:return: The list of tests
"""
shared_tests = cls.get_shared_tests()
return cls.filter_session_shared_tests(session, shared_tests)
@staticmethod
def filter_session_shared_tests(session_items, shared_tests):
# Retrieve the test sub-set that was collected
# this can be less than the original set if were overriden via -k argument or similars
# type (list, list) -> list[EditorTestBase]
"""
Retrieve the test sub-set that was collected this can be less than the original set if were overriden via -k
argument or similars
:session_items: The tests in a session to run
:shared_tests: All of the shared tests
:return: The list of filtered tests
"""
def will_run(item):
try:
skipping_pytest_runtest_setup(item)
@ -488,13 +590,20 @@ class EditorTestSuite():
return False
session_items_by_name = { item.originalname:item for item in session_items }
selected_shared_tests = [test for test in shared_tests if test.__name__ in session_items_by_name.keys() and will_run(session_items_by_name[test.__name__])]
selected_shared_tests = [test for test in shared_tests if test.__name__ in session_items_by_name.keys() and
will_run(session_items_by_name[test.__name__])]
return selected_shared_tests
@staticmethod
def filter_shared_tests(shared_tests, is_batchable=False, is_parallelizable=False):
# Retrieve the test sub-set that was collected
# this can be less than the original set if were overriden via -k argument or similars
# type (list, bool, bool) -> list
"""
Filters and returns all tests based off of if they are batchable and/or parallelizable
:shared_tests: All shared tests
:is_batchable: Filter to batchable tests
:is_parallelizable: Filter to parallelizable tests
:return: The list of filtered tests
"""
return [
t for t in shared_tests if (
getattr(t, "is_batchable", None) is is_batchable
@ -504,9 +613,15 @@ class EditorTestSuite():
]
### Utils ###
# Prepares the asset processor for the test
def _prepare_asset_processor(self, workspace, editor_test_data):
# type (AbstractWorkspace, TestData) -> None
"""
Prepares the asset processor for the test depending on whether or not the process is open and if the current
test owns it.
:workspace: The workspace object in case an AssetProcessor object needs to be created
:editor_test_data: The test data from calling editor_test_data()
:return: None
"""
try:
# Start-up an asset processor if we are not running one
# If another AP process exist, don't kill it, as we don't own it
@ -525,14 +640,29 @@ class EditorTestSuite():
raise ex
def _setup_editor_test(self, editor, workspace, editor_test_data):
# type(Editor, AbstractWorkspace, TestData) -> None
"""
Sets up an editor test by preparing the Asset Processor, killing all other O3DE processes, and configuring
:editor: The launcher Editor object
:workspace: The test Workspace object
:editor_test_data: The TestData from calling editor_test_data()
:return: None
"""
self._prepare_asset_processor(workspace, editor_test_data)
editor_utils.kill_all_ly_processes(include_asset_processor=False)
editor.configure_settings()
# Utility function for parsing the output information from the editor.
# It deserializes the JSON content printed in the output for every test and returns that information.
@staticmethod
def _get_results_using_output(test_spec_list, output, editor_log_content):
# type(list, str, str) -> dict{str: Result}
"""
Utility function for parsing the output information from the editor. It deserializes the JSON content printed in
the output for every test and returns that information.
:test_spec_list: The list of EditorTests
:output: The Editor from Editor.get_output()
:editor_log_content: The contents of the editor log as a string
:return: A dict of the tests and their respective Result objects
"""
results = {}
pattern = re.compile(r"JSON_START\((.+?)\)JSON_END")
out_matches = pattern.finditer(output)
@ -541,7 +671,8 @@ class EditorTestSuite():
try:
elem = json.loads(m.groups()[0])
found_jsons[elem["name"]] = elem
except Exception:
except Exception as e:
raise e
continue # Avoid to fail if the output data is corrupt
# Try to find the element in the log, this is used for cutting the log contents later
@ -558,7 +689,9 @@ class EditorTestSuite():
for test_spec in test_spec_list:
name = editor_utils.get_module_filename(test_spec.test_module)
if name not in found_jsons.keys():
results[test_spec.__name__] = Result.Unknown.create(test_spec, output, "Couldn't find any test run information on stdout", editor_log_content)
results[test_spec.__name__] = Result.Unknown.create(test_spec, output,
"Couldn't find any test run information on stdout",
editor_log_content)
else:
result = None
json_result = found_jsons[name]
@ -573,7 +706,7 @@ class EditorTestSuite():
cur_log = editor_log_content[log_start : end]
log_start = end
if json_result["success"]:
if "success" in json_result.keys():
result = Result.Pass.create(test_spec, json_output, cur_log)
else:
result = Result.Fail.create(test_spec, json_output, cur_log)
@ -581,9 +714,15 @@ class EditorTestSuite():
return results
# Fails the test if the test result is not a PASS, specifying the information
@staticmethod
def _report_result(name : str, result : Result.Base):
def _report_result(name, result):
# type (str, Result) -> None
"""
Fails the test if the test result is not a PASS, specifying the information
:name: Name of the test
:result: The Result object which denotes if the test passed or not
:return: None
"""
if isinstance(result, Result.Pass):
output_str = f"Test {name}:\n{str(result)}"
print(output_str)
@ -592,10 +731,19 @@ class EditorTestSuite():
pytest.fail(error_str)
### Running tests ###
# Starts the editor with the given test and retuns an result dict with a single element specifying the result
def _exec_editor_test(self, request, workspace, editor, run_id : int, log_name : str,
test_spec : EditorTestBase, cmdline_args : List[str] = []):
def _exec_editor_test(self, request, workspace, editor, run_id, log_name, test_spec, cmdline_args = []):
# type (Request, AbstractWorkspace, Editor, int, str, EditorTestBase, list[str] -> dict{str: Result}
"""
Starts the editor with the given test and retuns an result dict with a single element specifying the result
:request: The pytest request
:workspace: The LyTestTools Workspace object
:editor: The LyTestTools Editor object
:run_id: The unique run id
:log_name: The name of the editor log to retrieve
:test_spec: The type of EditorTestBase
:cmdline_args: Any additional command line args
:return: a dictionary of Result objects
"""
test_cmdline_args = self.global_extra_cmdline_args + cmdline_args
test_spec_uses_null_renderer = getattr(test_spec, "use_null_renderer", None)
if test_spec_uses_null_renderer or (test_spec_uses_null_renderer is None and self.use_null_renderer):
@ -629,12 +777,14 @@ class EditorTestSuite():
else:
has_crashed = return_code != EditorTestSuite._TEST_FAIL_RETCODE
if has_crashed:
test_result = Result.Crash.create(test_spec, output, return_code, editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG), None)
test_result = Result.Crash.create(test_spec, output, return_code, editor_utils.retrieve_crash_output
(run_id, workspace, self._TIMEOUT_CRASH_LOG), None)
editor_utils.cycle_crash_report(run_id, workspace)
else:
test_result = Result.Fail.create(test_spec, output, editor_log_content)
except WaitTimeoutError:
editor.kill()
output = editor.get_output()
editor.kill()
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
test_result = Result.Timeout.create(test_spec, output, test_spec.timeout, editor_log_content)
@ -643,11 +793,21 @@ class EditorTestSuite():
results[test_spec.__name__] = test_result
return results
# Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that editor
# instance. In case of failure this function also parses the editor output to find out what specific tests failed
def _exec_editor_multitest(self, request, workspace, editor, run_id : int, log_name : str,
test_spec_list : List[EditorTestBase], cmdline_args=[]):
def _exec_editor_multitest(self, request, workspace, editor, run_id, log_name, test_spec_list, cmdline_args=[]):
# type (Request, AbstractWorkspace, Editor, int, str, list[EditorTestBase], list[str]) -> dict{str: Result}
"""
Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that
editor instance. In case of failure this function also parses the editor output to find out what specific tests
failed.
:request: The pytest request
:workspace: The LyTestTools Workspace object
:editor: The LyTestTools Editor object
:run_id: The unique run id
:log_name: The name of the editor log to retrieve
:test_spec_list: A list of EditorTestBase tests to run
:cmdline_args: Any additional command line args
:return: A dict of Result objects
"""
test_cmdline_args = self.global_extra_cmdline_args + cmdline_args
if self.use_null_renderer:
test_cmdline_args += ["-rhi=null"]
@ -660,7 +820,8 @@ class EditorTestSuite():
editor_utils.cycle_crash_report(run_id, workspace)
results = {}
test_filenames_str = ";".join(editor_utils.get_testcase_module_filepath(test_spec.test_module) for test_spec in test_spec_list)
test_filenames_str = ";".join(editor_utils.get_testcase_module_filepath(test_spec.test_module) for
test_spec in test_spec_list)
cmdline = [
"--runpythontest", test_filenames_str,
"-logfile", f"@log@/{log_name}",
@ -685,7 +846,8 @@ class EditorTestSuite():
# Scrap the output to attempt to find out which tests failed.
# This function should always populate the result list, if it didn't find it, it will have "Unknown" type of result
results = self._get_results_using_output(test_spec_list, output, editor_log_content)
assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results don't match the tests ran"
assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results" \
"don't match the tests ran"
# If the editor crashed, find out in which test it happened and update the results
has_crashed = return_code != EditorTestSuite._TEST_FAIL_RETCODE
@ -695,50 +857,67 @@ class EditorTestSuite():
if isinstance(result, Result.Unknown):
if not crashed_result:
# The first test with "Unknown" result (no data in output) is likely the one that crashed
crash_error = editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG)
crash_error = editor_utils.retrieve_crash_output(run_id, workspace,
self._TIMEOUT_CRASH_LOG)
editor_utils.cycle_crash_report(run_id, workspace)
results[test_spec_name] = Result.Crash.create(result.test_spec, output, return_code, crash_error, result.editor_log)
results[test_spec_name] = Result.Crash.create(result.test_spec, output, return_code,
crash_error, result.editor_log)
crashed_result = result
else:
# If there are remaning "Unknown" results, these couldn't execute because of the crash, update with info about the offender
results[test_spec_name].extra_info = f"This test has unknown result, test '{crashed_result.test_spec.__name__}' crashed before this test could be executed"
# If there are remaning "Unknown" results, these couldn't execute because of the crash,
# update with info about the offender
results[test_spec_name].extra_info = f"This test has unknown result, test " \
f"'{crashed_result.test_spec.__name__}' crashed " \
f"before this test could be executed"
# if all the tests ran, the one that has caused the crash is the last test
if not crashed_result:
crash_error = editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG)
editor_utils.cycle_crash_report(run_id, workspace)
results[test_spec_name] = Result.Crash.create(crashed_result.test_spec, output, return_code, crash_error, crashed_result.editor_log)
results[test_spec_name] = Result.Crash.create(crashed_result.test_spec, output, return_code,
crash_error, crashed_result.editor_log)
except WaitTimeoutError:
editor.kill()
output = editor.get_output()
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
# The editor timed out when running the tests, get the data from the output to find out which ones ran
results = self._get_results_using_output(test_spec_list, output, editor_log_content)
assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results don't match the tests ran"
assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results " \
"don't match the tests ran"
# Similar logic here as crashes, the first test that has no result is the one that timed out
timed_out_result = None
for test_spec_name, result in results.items():
if isinstance(result, Result.Unknown):
if not timed_out_result:
results[test_spec_name] = Result.Timeout.create(result.test_spec, result.output, self.timeout_editor_shared_test, result.editor_log)
results[test_spec_name] = Result.Timeout.create(result.test_spec, result.output,
self.timeout_editor_shared_test,
result.editor_log)
timed_out_result = result
else:
# If there are remaning "Unknown" results, these couldn't execute because of the timeout, update with info about the offender
results[test_spec_name].extra_info = f"This test has unknown result, test '{timed_out_result.test_spec.__name__}' timed out before this test could be executed"
# If there are remaning "Unknown" results, these couldn't execute because of the timeout,
# update with info about the offender
results[test_spec_name].extra_info = f"This test has unknown result, test " \
f"'{timed_out_result.test_spec.__name__}' timed out " \
f"before this test could be executed"
# if all the tests ran, the one that has caused the timeout is the last test, as it didn't close the editor
if not timed_out_result:
results[test_spec_name] = Result.Timeout.create(timed_out_result.test_spec, results[test_spec_name].output, self.timeout_editor_shared_test, result.editor_log)
results[test_spec_name] = Result.Timeout.create(timed_out_result.test_spec,
results[test_spec_name].output,
self.timeout_editor_shared_test, result.editor_log)
return results
# Runs a single test (one editor, one test) with the given specs
def _run_single_test(self, request, workspace, editor, editor_test_data, test_spec : EditorSingleTest):
def _run_single_test(self, request, workspace, editor, editor_test_data, test_spec):
# type (Request, AbstractWorkspace, Editor, TestData, EditorSingleTest) -> None
"""
Runs a single test (one editor, one test) with the given specs
:request: The Pytest Request
:workspace: The LyTestTools Workspace object
:editor: The LyTestTools Editor object
:editor_test_data: The TestData from calling editor_test_data()
:test_spec: The test class that should be a subclass of EditorSingleTest
:return: None
"""
self._setup_editor_test(editor, workspace, editor_test_data)
extra_cmdline_args = []
if hasattr(test_spec, "extra_cmdline_args"):
@ -749,18 +928,39 @@ class EditorTestSuite():
test_name, test_result = next(iter(results.items()))
self._report_result(test_name, test_result)
# Runs a batch of tests in one single editor with the given spec list (one editor, multiple tests)
def _run_batched_tests(self, request, workspace, editor, editor_test_data, test_spec_list : List[EditorSharedTest], extra_cmdline_args=[]):
def _run_batched_tests(self, request, workspace, editor, editor_test_data, test_spec_list, extra_cmdline_args=[]):
# type (Request, AbstractWorkspace, Editor, TestData, list[EditorSharedTest], list[str]) -> None
"""
Runs a batch of tests in one single editor with the given spec list (one editor, multiple tests)
:request: The Pytest Request
:workspace: The LyTestTools Workspace object
:editor: The LyTestTools Editor object
:editor_test_data: The TestData from calling editor_test_data()
:test_spec_list: A list of EditorSharedTest tests to run
:extra_cmdline_args: Any extra command line args in a list
:return: None
"""
if not test_spec_list:
return
self._setup_editor_test(editor, workspace, editor_test_data)
results = self._exec_editor_multitest(request, workspace, editor, 1, "editor_test.log", test_spec_list, extra_cmdline_args)
results = self._exec_editor_multitest(request, workspace, editor, 1, "editor_test.log", test_spec_list,
extra_cmdline_args)
assert results is not None
editor_test_data.results.update(results)
# Runs multiple editors with one test on each editor (multiple editor, one test each)
def _run_parallel_tests(self, request, workspace, editor, editor_test_data, test_spec_list : List[EditorSharedTest], extra_cmdline_args=[]):
def _run_parallel_tests(self, request, workspace, editor, editor_test_data, test_spec_list, extra_cmdline_args=[]):
# type(Request, AbstractWorkspace, Editor, TestData, list[EditorSharedTest], list[str]) -> None
"""
Runs multiple editors with one test on each editor (multiple editor, one test each)
:request: The Pytest Request
:workspace: The LyTestTools Workspace object
:editor: The LyTestTools Editor object
:editor_test_data: The TestData from calling editor_test_data()
:test_spec_list: A list of EditorSharedTest tests to run
:extra_cmdline_args: Any extra command line args in a list
:return: None
"""
if not test_spec_list:
return
@ -778,7 +978,8 @@ class EditorTestSuite():
for i in range(total_threads):
def make_func(test_spec, index, my_editor):
def run(request, workspace, extra_cmdline_args):
results = self._exec_editor_test(request, workspace, my_editor, index+1, f"editor_test.log", test_spec, extra_cmdline_args)
results = self._exec_editor_test(request, workspace, my_editor, index+1, f"editor_test.log",
test_spec, extra_cmdline_args)
assert results is not None
results_per_thread[index] = results
return run
@ -796,8 +997,19 @@ class EditorTestSuite():
for result in results_per_thread:
editor_test_data.results.update(result)
# Runs multiple editors with a batch of tests for each editor (multiple editor, multiple tests each)
def _run_parallel_batched_tests(self, request, workspace, editor, editor_test_data, test_spec_list : List[EditorSharedTest], extra_cmdline_args=[]):
def _run_parallel_batched_tests(self, request, workspace, editor, editor_test_data, test_spec_list,
extra_cmdline_args=[]):
# type(Request, AbstractWorkspace, Editor, TestData, list[EditorSharedTest], list[str] -> None
"""
Runs multiple editors with a batch of tests for each editor (multiple editor, multiple tests each)
:request: The Pytest Request
:workspace: The LyTestTools Workspace object
:editor: The LyTestTools Editor object
:editor_test_data: The TestData from calling editor_test_data()
:test_spec_list: A list of EditorSharedTest tests to run
:extra_cmdline_args: Any extra command line args in a list
:return: None
"""
if not test_spec_list:
return
@ -813,7 +1025,9 @@ class EditorTestSuite():
def run(request, workspace, extra_cmdline_args):
results = None
if len(test_spec_list_for_editor) > 0:
results = self._exec_editor_multitest(request, workspace, my_editor, index+1, f"editor_test.log", test_spec_list_for_editor, extra_cmdline_args)
results = self._exec_editor_multitest(request, workspace, my_editor, index+1,
f"editor_test.log", test_spec_list_for_editor,
extra_cmdline_args)
assert results is not None
else:
results = {}
@ -833,8 +1047,13 @@ class EditorTestSuite():
for result in results_per_thread:
editor_test_data.results.update(result)
# Retrieves the number of parallel preference cmdline overrides
def _get_number_parallel_editors(self, request):
# type(Request) -> int
"""
Retrieves the number of parallel preference cmdline overrides
:request: The Pytest Request
:return: The number of parallel editors to use
"""
parallel_editors_value = request.config.getoption("--editors-parallel", None)
if parallel_editors_value:
return int(parallel_editors_value)

@ -3,6 +3,8 @@ Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Utility functions for the editor_test module
"""
import os
@ -15,6 +17,12 @@ import ly_test_tools.environment.waiter as waiter
logger = logging.getLogger(__name__)
def kill_all_ly_processes(include_asset_processor=True):
# type (bool) -> None
"""
Kills all common O3DE processes such as the Editor, Game Launchers, and Asset Processor.
:param include_asset_processor: Boolean flag whether or not to kill the AP
:return: None
"""
LY_PROCESSES = [
'Editor', 'Profiler', 'RemoteConsole',
]
@ -47,7 +55,8 @@ def get_module_filename(testcase_module):
"""
return os.path.splitext(os.path.basename(testcase_module.__file__))[0]
def retrieve_log_path(run_id : int, workspace):
def retrieve_log_path(run_id, workspace):
# type (int, ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager) -> str
"""
return the log/ project path for this test run.
:param run_id: editor id that will be used for differentiating paths
@ -56,7 +65,8 @@ def retrieve_log_path(run_id : int, workspace):
"""
return os.path.join(workspace.paths.project(), "user", f"log_test_{run_id}")
def retrieve_crash_output(run_id : int, workspace, timeout : float):
def retrieve_crash_output(run_id, workspace, timeout):
# type (int, ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager, float) -> str
"""
returns the crash output string for the given test run.
:param run_id: editor id that will be used for differentiating paths
@ -79,7 +89,8 @@ def retrieve_crash_output(run_id : int, workspace, timeout : float):
crash_info += f"\n{str(ex)}"
return crash_info
def cycle_crash_report(run_id : int, workspace):
def cycle_crash_report(run_id, workspace):
# type (int, ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager) -> None
"""
Attempts to rename error.log and error.dmp(crash files) into new names with the timestamp on it.
:param run_id: editor id that will be used for differentiating paths
@ -99,10 +110,12 @@ def cycle_crash_report(run_id : int, workspace):
except Exception as ex:
logger.warning(f"Couldn't cycle file {filepath}. Error: {str(ex)}")
def retrieve_editor_log_content(run_id : int, log_name : str, workspace, timeout=10):
def retrieve_editor_log_content(run_id, log_name, workspace, timeout=10):
# type (int , str, ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager, int) -> str
"""
Retrieves the contents of the given editor log file.
:param run_id: editor id that will be used for differentiating paths
:log_name: The name of the editor log to retrieve
:param workspace: Workspace fixture
:timeout: Maximum time to wait for the log file to appear
:return str: The contents of the log
@ -124,7 +137,8 @@ def retrieve_editor_log_content(run_id : int, log_name : str, workspace, timeout
editor_info = f"-- Error reading editor.log: {str(ex)} --"
return editor_info
def retrieve_last_run_test_index_from_output(test_spec_list, output : str):
def retrieve_last_run_test_index_from_output(test_spec_list, output):
# type (list, str) -> int
"""
Finds out what was the last test that was run by inspecting the input.
This is used for determining what was the batched test has crashed the editor

@ -0,0 +1,158 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import pytest
import os
import unittest.mock as mock
import unittest
import ly_test_tools.o3de.editor_test_utils as editor_test_utils
pytestmark = pytest.mark.SUITE_smoke
class TestEditorTestUtils(unittest.TestCase):
@mock.patch('ly_test_tools.environment.process_utils.kill_processes_named')
def test_KillAllLyProcesses_IncludeAP_CallsCorrectly(self, under_test):
process_list = ['Editor', 'Profiler', 'RemoteConsole', 'AssetProcessor', 'AssetProcessorBatch', 'AssetBuilder']
editor_test_utils.kill_all_ly_processes(include_asset_processor=True)
under_test.assert_called_once_with(process_list, ignore_extensions=True)
@mock.patch('ly_test_tools.environment.process_utils.kill_processes_named')
def test_KillAllLyProcesses_NotIncludeAP_CallsCorrectly(self, under_test):
process_list = ['Editor', 'Profiler', 'RemoteConsole']
editor_test_utils.kill_all_ly_processes(include_asset_processor=False)
under_test.assert_called_once_with(process_list, ignore_extensions=True)
def test_GetTestcaseModuleFilepath_NoExtension_ReturnsPYExtension(self):
mock_module = mock.MagicMock()
file_path = os.path.join('path', 'under_test')
mock_module.__file__ = file_path
assert file_path + '.py' == editor_test_utils.get_testcase_module_filepath(mock_module)
def test_GetTestcaseModuleFilepath_PYExtension_ReturnsPYExtension(self):
mock_module = mock.MagicMock()
file_path = os.path.join('path', 'under_test.py')
mock_module.__file__ = file_path
assert file_path == editor_test_utils.get_testcase_module_filepath(mock_module)
def test_GetModuleFilename_PythonModule_ReturnsFilename(self):
mock_module = mock.MagicMock()
file_path = os.path.join('path', 'under_test.py')
mock_module.__file__ = file_path
assert 'under_test' == editor_test_utils.get_module_filename(mock_module)
def test_RetrieveLogPath_NormalProject_ReturnsLogPath(self):
mock_workspace = mock.MagicMock()
mock_workspace.paths.project.return_value = 'mock_project_path'
expected = os.path.join('mock_project_path', 'user', 'log_test_0')
assert expected == editor_test_utils.retrieve_log_path(0, mock_workspace)
@mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path')
@mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock())
def test_RetrieveCrashOutput_CrashLogExists_ReturnsLogInfo(self, mock_retrieve_log_path):
mock_retrieve_log_path.return_value = 'mock_log_path'
mock_workspace = mock.MagicMock()
mock_log = 'mock crash info'
with mock.patch('builtins.open', mock.mock_open(read_data=mock_log)) as mock_file:
assert mock_log == editor_test_utils.retrieve_crash_output(0, mock_workspace, 0)
@mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path')
@mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock())
def test_RetrieveCrashOutput_CrashLogNotExists_ReturnsError(self, mock_retrieve_log_path):
mock_retrieve_log_path.return_value = 'mock_log_path'
mock_workspace = mock.MagicMock()
expected = "-- No crash log available --\n[Errno 2] No such file or directory: 'mock_log_path\\\\error.log'"
assert expected == editor_test_utils.retrieve_crash_output(0, mock_workspace, 0)
@mock.patch('os.rename')
@mock.patch('os.path.getmtime')
@mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path')
@mock.patch('os.path.exists')
def test_CycleCrashReport_LogExists_NamedCorrectly(self, mock_exists, mock_retrieve_log_path, mock_getmtime,
under_test):
mock_exists.side_effect = [True, False]
mock_retrieve_log_path.return_value = 'mock_log_path'
mock_workspace = mock.MagicMock()
mock_getmtime.return_value = 1
editor_test_utils.cycle_crash_report(0, mock_workspace)
under_test.assert_called_once_with(os.path.join('mock_log_path', 'error.log'),
os.path.join('mock_log_path', 'error_1969_12_31_16_00_01.log'))
@mock.patch('os.rename')
@mock.patch('os.path.getmtime')
@mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path')
@mock.patch('os.path.exists')
def test_CycleCrashReport_DmpExists_NamedCorrectly(self, mock_exists, mock_retrieve_log_path, mock_getmtime,
under_test):
mock_exists.side_effect = [False, True]
mock_retrieve_log_path.return_value = 'mock_log_path'
mock_workspace = mock.MagicMock()
mock_getmtime.return_value = 1
editor_test_utils.cycle_crash_report(0, mock_workspace)
under_test.assert_called_once_with(os.path.join('mock_log_path', 'error.dmp'),
os.path.join('mock_log_path', 'error_1969_12_31_16_00_01.dmp'))
@mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path')
@mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock())
def test_RetrieveEditorLogContent_CrashLogExists_ReturnsLogInfo(self, mock_retrieve_log_path):
mock_retrieve_log_path.return_value = 'mock_log_path'
mock_logname = 'mock_log.log'
mock_workspace = mock.MagicMock()
mock_log = 'mock log info'
with mock.patch('builtins.open', mock.mock_open(read_data=mock_log)) as mock_file:
assert f'[editor.log] {mock_log}' == editor_test_utils.retrieve_editor_log_content(0, mock_logname, mock_workspace)
@mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path')
@mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock())
def test_RetrieveEditorLogContent_CrashLogNotExists_ReturnsError(self, mock_retrieve_log_path):
mock_retrieve_log_path.return_value = 'mock_log_path'
mock_logname = 'mock_log.log'
mock_workspace = mock.MagicMock()
expected = f"-- Error reading editor.log: [Errno 2] No such file or directory: 'mock_log_path\\\\mock_log.log' --"
assert expected == editor_test_utils.retrieve_editor_log_content(0, mock_logname, mock_workspace)
def test_RetrieveLastRunTestIndexFromOutput_SecondTestFailed_Returns0(self):
mock_test = mock.MagicMock()
mock_test.__name__ = 'mock_test_name'
mock_test_list = [mock_test]
mock_editor_output = 'mock_test_name\n' \
'mock_test_name_1'
assert 0 == editor_test_utils.retrieve_last_run_test_index_from_output(mock_test_list, mock_editor_output)
def test_RetrieveLastRunTestIndexFromOutput_TenthTestFailed_Returns9(self):
mock_test_list = []
mock_editor_output = ''
for x in range(10):
mock_test = mock.MagicMock()
mock_test.__name__ = f'mock_test_name_{x}'
mock_test_list.append(mock_test)
mock_editor_output += f'{mock_test.__name__}\n'
mock_editor_output += 'mock_test_name_x'
assert 9 == editor_test_utils.retrieve_last_run_test_index_from_output(mock_test_list, mock_editor_output)
def test_RetrieveLastRunTestIndexFromOutput_FirstItemFailed_Returns0(self):
mock_test_list = []
mock_editor_output = ''
for x in range(10):
mock_test = mock.MagicMock()
mock_test.__name__ = f'mock_test_name_{x}'
mock_test_list.append(mock_test)
assert 0 == editor_test_utils.retrieve_last_run_test_index_from_output(mock_test_list, mock_editor_output)

@ -369,3 +369,14 @@ class TestFixtures(object):
mock_request.addfinalizer.call_args[0][0]()
mock_stop.assert_called_once()
@mock.patch('inspect.isclass', mock.MagicMock(return_value=True))
def test_PytestPycollectMakeitem_ValidArgs_CallsCorrectly(self):
mock_collector = mock.MagicMock()
mock_name = mock.MagicMock()
mock_obj = mock.MagicMock()
mock_base = mock.MagicMock()
mock_obj.__bases__ = [mock_base]
test_tools_fixtures.pytest_pycollect_makeitem(mock_collector, mock_name, mock_obj)
mock_base.pytest_custom_makeitem.assert_called_once_with(mock_collector, mock_name, mock_obj)

File diff suppressed because it is too large Load Diff

@ -0,0 +1,41 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import pytest
import os
import unittest.mock as mock
import unittest
import ly_test_tools._internal.pytest_plugin.editor_test as editor_test
pytestmark = pytest.mark.SUITE_smoke
class TestEditorTest(unittest.TestCase):
@mock.patch('inspect.isclass', mock.MagicMock(return_value=True))
def test_PytestPycollectMakeitem_ValidArgs_CallsCorrectly(self):
mock_collector = mock.MagicMock()
mock_name = mock.MagicMock()
mock_obj = mock.MagicMock()
mock_base = mock.MagicMock()
mock_obj.__bases__ = [mock_base]
editor_test.pytest_pycollect_makeitem(mock_collector, mock_name, mock_obj)
mock_base.pytest_custom_makeitem.assert_called_once_with(mock_collector, mock_name, mock_obj)
def test_PytestCollectionModifyitem_OneValidClass_CallsOnce(self):
mock_item = mock.MagicMock()
mock_class = mock.MagicMock()
mock_class.pytest_custom_modify_items = mock.MagicMock()
mock_item.instance.__class__ = mock_class
mock_session = mock.MagicMock()
mock_items = [mock_item, mock.MagicMock()]
mock_config = mock.MagicMock()
generator = editor_test.pytest_collection_modifyitems(mock_session, mock_items, mock_config)
for x in generator:
pass
assert mock_class.pytest_custom_modify_items.call_count == 1
Loading…
Cancel
Save