diff --git a/AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuite_Main.py b/AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuite_Main.py index 15ba6690a1..50b89ab138 100644 --- a/AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuite_Main.py +++ b/AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuite_Main.py @@ -15,6 +15,7 @@ import sys import importlib import re +import ly_test_tools from ly_test_tools import LAUNCHERS sys.path.append(os.path.dirname(os.path.abspath(__file__))) @@ -25,8 +26,15 @@ import ly_test_tools.environment.process_utils as process_utils import argparse, sys -@pytest.mark.SUITE_main -@pytest.mark.parametrize("launcher_platform", ['windows_editor']) +def get_editor_launcher_platform(): + if ly_test_tools.WINDOWS: + return "windows_editor" + elif ly_test_tools.LINUX: + return "linux_editor" + else: + return None + +@pytest.mark.parametrize("launcher_platform", [get_editor_launcher_platform()]) @pytest.mark.parametrize("project", ["AutomatedTesting"]) class TestEditorTest: @@ -69,7 +77,7 @@ class TestEditorTest: from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorTestSuite @pytest.mark.SUITE_main - @pytest.mark.parametrize("launcher_platform", ['windows_editor']) + @pytest.mark.parametrize("launcher_platform", [{get_editor_launcher_platform()}]) @pytest.mark.parametrize("project", ["AutomatedTesting"]) class TestAutomation(EditorTestSuite): class test_single(EditorSingleTest): @@ -123,7 +131,7 @@ class TestEditorTest: from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorTestSuite @pytest.mark.SUITE_main - @pytest.mark.parametrize("launcher_platform", ['windows_editor']) + @pytest.mark.parametrize("launcher_platform", [{get_editor_launcher_platform()}]) @pytest.mark.parametrize("project", ["AutomatedTesting"]) class TestAutomation(EditorTestSuite): {module_class_code} diff --git a/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/editor_test.py b/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/editor_test.py index 80a562977b..1e9d4c3654 100644 --- a/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/editor_test.py +++ b/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/editor_test.py @@ -3,34 +3,49 @@ Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution. SPDX-License-Identifier: Apache-2.0 OR MIT -""" +Utility for specifying an Editor test, supports seamless parallelization and/or batching of tests. This is not a set of +tools to directly invoke, but a plugin with functions intended to be called by only the Pytest framework. """ -Utility for specifying an Editor test, supports seamless parallelization and/or batching of tests. -""" - +from __future__ import annotations import pytest import inspect __test__ = False -def pytest_addoption(parser): +def pytest_addoption(parser: argparse.ArgumentParser) -> None: + """ + Options when running editor tests in batches or parallel. + :param parser: The ArgumentParser object + :return: None + """ parser.addoption("--no-editor-batch", action="store_true", help="Don't batch multiple tests in single editor") parser.addoption("--no-editor-parallel", action="store_true", help="Don't run multiple editors in parallel") parser.addoption("--editors-parallel", type=int, action="store", help="Override the number editors to run at the same time") -# Create a custom custom item collection if the class defines pytest_custom_makeitem function -# This is used for automtically generating test functions with a custom collector -def pytest_pycollect_makeitem(collector, name, obj): +def pytest_pycollect_makeitem(collector: PyCollector, name: str, obj: object) -> PyCollector: + """ + Create a custom custom item collection if the class defines pytest_custom_makeitem function. This is used for + automatically generating test functions with a custom collector. + :param collector: The Pytest collector + :param name: Name of the collector + :param obj: The custom collector, normally an EditorTestSuite.EditorTestClass object + :return: Returns the custom collector + """ if inspect.isclass(obj): for base in obj.__bases__: if hasattr(base, "pytest_custom_makeitem"): return base.pytest_custom_makeitem(collector, name, obj) -# Add custom modification of items. -# This is used for adding the runners into the item list @pytest.hookimpl(hookwrapper=True) -def pytest_collection_modifyitems(session, items, config): +def pytest_collection_modifyitems(session: Session, items: list[EditorTestBase], config: Config) -> None: + """ + Add custom modification of items. This is used for adding the runners into the item list. + :param session: The Pytest Session + :param items: The test case functions + :param config: The Pytest Config object + :return: None + """ all_classes = set() for item in items: all_classes.add(item.instance.__class__) @@ -40,4 +55,4 @@ def pytest_collection_modifyitems(session, items, config): for cls in all_classes: if hasattr(cls, "pytest_custom_modify_items"): cls.pytest_custom_modify_items(session, items, config) - + diff --git a/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/test_tools_fixtures.py b/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/test_tools_fixtures.py index f4bda9b293..af29064766 100755 --- a/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/test_tools_fixtures.py +++ b/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/test_tools_fixtures.py @@ -55,14 +55,6 @@ def pytest_configure(config): ly_test_tools._internal.pytest_plugin.build_directory = _get_build_directory(config) ly_test_tools._internal.pytest_plugin.output_path = _get_output_path(config) - -def pytest_pycollect_makeitem(collector, name, obj): - import inspect - if inspect.isclass(obj): - for base in obj.__bases__: - if hasattr(base, "pytest_custom_makeitem"): - return base.pytest_custom_makeitem(collector, name, obj) - def _get_build_directory(config): """ Fetch and verify the cmake build directory CLI arg, without creating an error when unset diff --git a/Tools/LyTestTools/ly_test_tools/launchers/launcher_helper.py b/Tools/LyTestTools/ly_test_tools/launchers/launcher_helper.py index ac4ad621da..5e96f34e95 100755 --- a/Tools/LyTestTools/ly_test_tools/launchers/launcher_helper.py +++ b/Tools/LyTestTools/ly_test_tools/launchers/launcher_helper.py @@ -60,7 +60,7 @@ def create_editor(workspace, launcher_platform=ly_test_tools.HOST_OS_EDITOR, arg Editor is only officially supported on the Windows Platform. :param workspace: lumberyard workspace to use - :param launcher_platform: the platform to target for a launcher (i.e. 'windows_dedicated' for DedicatedWinLauncher) + :param launcher_platform: the platform to target for a launcher (i.e. 'windows_dedicated' for DedicatedWinLauncher) :param args: List of arguments to pass to the launcher's 'args' argument during construction :return: Editor instance """ diff --git a/Tools/LyTestTools/ly_test_tools/o3de/editor_test.py b/Tools/LyTestTools/ly_test_tools/o3de/editor_test.py index 781977cf33..ae6ed16321 100644 --- a/Tools/LyTestTools/ly_test_tools/o3de/editor_test.py +++ b/Tools/LyTestTools/ly_test_tools/o3de/editor_test.py @@ -3,8 +3,29 @@ Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution. SPDX-License-Identifier: Apache-2.0 OR MIT -""" +This file provides editor testing functionality to easily write automated editor tests for O3DE. +For using these utilities, you can subclass your test suite from EditorTestSuite, this allows an easy way of +specifying python test scripts that the editor will run without needing to write any boilerplace code. +It supports out of the box parallelization(running multiple editor instances at once), batching(running multiple tests +in the same editor instance) and crash detection. +Usage example: + class MyTestSuite(EditorTestSuite): + + class MyFirstTest(EditorSingleTest): + from . import script_to_be_run_by_editor as test_module + + class MyTestInParallel_1(EditorParallelTest): + from . import another_script_to_be_run_by_editor as test_module + + class MyTestInParallel_2(EditorParallelTest): + from . import yet_another_script_to_be_run_by_editor as test_module + + +EditorTestSuite does introspection of the defined classes inside of it and automatically prepares the tests, +parallelizing/batching as required +""" +from __future__ import annotations import pytest from _pytest.skipping import pytest_runtest_setup as skipping_pytest_runtest_setup @@ -25,30 +46,11 @@ import re import ly_test_tools.environment.file_system as file_system import ly_test_tools.environment.waiter as waiter import ly_test_tools.environment.process_utils as process_utils +import ly_test_tools.o3de.editor_test +import ly_test_tools.o3de.editor_test_utils as editor_utils from ly_test_tools.o3de.asset_processor import AssetProcessor from ly_test_tools.launchers.exceptions import WaitTimeoutError -from . import editor_test_utils as editor_utils - -# This file provides editor testing functionality to easily write automated editor tests for O3DE. -# For using these utilities, you can subclass your test suite from EditorTestSuite, this allows an easy way of specifying -# python test scripts that the editor will run without needing to write any boilerplace code. -# It supports out of the box parallelization(running multiple editor instances at once), batching(running multiple tests in the same editor instance) and -# crash detection. -# Usage example: -# class MyTestSuite(EditorTestSuite): -# -# class MyFirstTest(EditorSingleTest): -# from . import script_to_be_run_by_editor as test_module -# -# class MyTestInParallel_1(EditorParallelTest): -# from . import another_script_to_be_run_by_editor as test_module -# -# class MyTestInParallel_2(EditorParallelTest): -# from . import yet_another_script_to_be_run_by_editor as test_module -# -# -# EditorTestSuite does introspection of the defined classes inside of it and automatically prepares the tests, parallelizing/batching as required # This file contains no tests, but with this we make sure it won't be picked up by the runner since the file ends with _test __test__ = False @@ -109,12 +111,22 @@ class EditorBatchedTest(EditorSharedTest): class Result: class Base: def get_output_str(self): + # type () -> str + """ + Checks if the output attribute exists and returns it. + :return: Either the output string or a no output message + """ if hasattr(self, "output") and self.output is not None: return self.output else: return "-- No output --" def get_editor_log_str(self): + # type () -> str + """ + Checks if the editor_log attribute exists and returns it. + :return: Either the editor_log string or a no output message + """ if hasattr(self, "editor_log") and self.editor_log is not None: return self.editor_log else: @@ -122,7 +134,14 @@ class Result: class Pass(Base): @classmethod - def create(cls, test_spec : EditorTestBase, output : str, editor_log : str): + def create(cls, test_spec: EditorTestBase, output: str, editor_log: str) -> Pass: + """ + Creates a Pass object with a given test spec, output string, and editor log string. + :test_spec: The type of EditorTestBase + :output: The test output + :editor_log: The editor log's output + :return: the Pass object + """ r = cls() r.test_spec = test_spec r.output = output @@ -141,7 +160,14 @@ class Result: class Fail(Base): @classmethod - def create(cls, test_spec : EditorTestBase, output, editor_log : str): + def create(cls, test_spec: EditorTestBase, output: str, editor_log: str) -> Fail: + """ + Creates a Fail object with a given test spec, output string, and editor log string. + :test_spec: The type of EditorTestBase + :output: The test output + :editor_log: The editor log's output + :return: the Fail object + """ r = cls() r.test_spec = test_spec r.output = output @@ -164,7 +190,17 @@ class Result: class Crash(Base): @classmethod - def create(cls, test_spec : EditorTestBase, output : str, ret_code : int, stacktrace : str, editor_log : str): + def create(cls, test_spec: EditorTestBase, output: str, ret_code: int, stacktrace: str, editor_log: str) -> Crash: + """ + Creates a Crash object with a given test spec, output string, and editor log string. This also includes the + return code and stacktrace. + :test_spec: The type of EditorTestBase + :output: The test output + :ret_code: The test's return code + :stacktrace: The test's stacktrace if available + :editor_log: The editor log's output + :return: The Crash object + """ r = cls() r.output = output r.test_spec = test_spec @@ -190,12 +226,20 @@ class Result: f"--------------\n" f"{self.get_editor_log_str()}\n" ) - crash_str = "-- No crash information found --" return output class Timeout(Base): @classmethod - def create(cls, test_spec : EditorTestBase, output : str, time_secs : float, editor_log : str): + def create(cls, test_spec: EditorTestBase, output: str, time_secs: float, editor_log: str) -> Timeout: + """ + Creates a Timeout object with a given test spec, output string, and editor log string. The timeout time + should be provided in seconds + :test_spec: The type of EditorTestBase + :output: The test output + :time_secs: The timeout duration in seconds + :editor_log: The editor log's output + :return: The Timeout object + """ r = cls() r.output = output r.test_spec = test_spec @@ -219,14 +263,22 @@ class Result: class Unknown(Base): @classmethod - def create(cls, test_spec : EditorTestBase, output : str, extra_info : str, editor_log : str): + def create(cls, test_spec: EditorTestBase, output: str, extra_info: str, editor_log: str) -> Unknown: + """ + Creates an Unknown test results object if something goes wrong. + :test_spec: The type of EditorTestBase + :output: The test output + :extra_info: Any extra information as a string + :editor_log: The editor log's output + :return: The Unknown object + """ r = cls() r.output = output r.test_spec = test_spec r.editor_log = editor_log r.extra_info = extra_info return r - + def __str__(self): output = ( f"Unknown test result, possible cause: {self.extra_info}\n" @@ -262,7 +314,19 @@ class EditorTestSuite(): _TEST_FAIL_RETCODE = 0xF # Return code for test failure @pytest.fixture(scope="class") - def editor_test_data(self, request): + def editor_test_data(self, request: Request) -> TestData: + """ + Yields a per-testsuite structure to store the data of each test result and an AssetProcessor object that will be + re-used on the whole suite + :request: The Pytest request + :yield: The TestData object + """ + yield from self._editor_test_data(request) + + def _editor_test_data(self, request: Request) -> TestData: + """ + A wrapper function for unit testing to call directly + """ class TestData(): def __init__(self): self.results = {} # Dict of str(test_spec.__name__) -> Result @@ -444,9 +508,15 @@ class EditorTestSuite(): return EditorTestSuite.EditorTestClass(name, collector) @classmethod - def pytest_custom_modify_items(cls, session, items, config): - # Add here the runners functions and filter the tests that will be run. - # The runners will be added if they have any selected tests + def pytest_custom_modify_items(cls, session: Session, items: list[EditorTestBase], config: Config) -> None: + """ + Adds the runners' functions and filters the tests that will run. The runners will be added if they have any + selected tests + :param session: The Pytest Session + :param items: The test case functions + :param config: The Pytest Config object + :return: None + """ new_items = [] for runner in cls._runners: runner.tests[:] = cls.filter_session_shared_tests(items, runner.tests) @@ -462,24 +532,50 @@ class EditorTestSuite(): items[:] = items + new_items @classmethod - def get_single_tests(cls): + def get_single_tests(cls) -> list[EditorSingleTest]: + """ + Grabs all of the EditorSingleTests subclassed tests from the EditorTestSuite class + Usage example: + class MyTestSuite(EditorTestSuite): + class MyFirstTest(EditorSingleTest): + from . import script_to_be_run_by_editor as test_module + :return: The list of single tests + """ single_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSingleTest)] return single_tests @classmethod - def get_shared_tests(cls): + def get_shared_tests(cls) -> list[EditorSharedTest]: + """ + Grabs all of the EditorSharedTests from the EditorTestSuite + Usage example: + class MyTestSuite(EditorTestSuite): + class MyFirstTest(EditorSharedTest): + from . import script_to_be_run_by_editor as test_module + :return: The list of shared tests + """ shared_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSharedTest)] return shared_tests @classmethod - def get_session_shared_tests(cls, session): + def get_session_shared_tests(cls, session: Session) -> list[EditorTestBase]: + """ + Filters and returns all of the shared tests in a given session. + :session: The test session + :return: The list of tests + """ shared_tests = cls.get_shared_tests() return cls.filter_session_shared_tests(session, shared_tests) @staticmethod - def filter_session_shared_tests(session_items, shared_tests): - # Retrieve the test sub-set that was collected - # this can be less than the original set if were overriden via -k argument or similars + def filter_session_shared_tests(session_items: list[EditorTestBase], shared_tests: list[EditorSharedTest]) -> list[EditorTestBase]: + """ + Retrieve the test sub-set that was collected this can be less than the original set if were overriden via -k + argument or similars + :session_items: The tests in a session to run + :shared_tests: All of the shared tests + :return: The list of filtered tests + """ def will_run(item): try: skipping_pytest_runtest_setup(item) @@ -488,13 +584,20 @@ class EditorTestSuite(): return False session_items_by_name = { item.originalname:item for item in session_items } - selected_shared_tests = [test for test in shared_tests if test.__name__ in session_items_by_name.keys() and will_run(session_items_by_name[test.__name__])] + selected_shared_tests = [test for test in shared_tests if test.__name__ in session_items_by_name.keys() and + will_run(session_items_by_name[test.__name__])] return selected_shared_tests @staticmethod - def filter_shared_tests(shared_tests, is_batchable=False, is_parallelizable=False): - # Retrieve the test sub-set that was collected - # this can be less than the original set if were overriden via -k argument or similars + def filter_shared_tests(shared_tests: list[EditorSharedTest], is_batchable: bool = False, + is_parallelizable: bool = False) -> list[EditorSharedTest]: + """ + Filters and returns all tests based off of if they are batchable and/or parallelizable + :shared_tests: All shared tests + :is_batchable: Filter to batchable tests + :is_parallelizable: Filter to parallelizable tests + :return: The list of filtered tests + """ return [ t for t in shared_tests if ( getattr(t, "is_batchable", None) is is_batchable @@ -504,9 +607,14 @@ class EditorTestSuite(): ] ### Utils ### - - # Prepares the asset processor for the test - def _prepare_asset_processor(self, workspace, editor_test_data): + def _prepare_asset_processor(self, workspace: AbstractWorkspace, editor_test_data: TestData) -> None: + """ + Prepares the asset processor for the test depending on whether or not the process is open and if the current + test owns it. + :workspace: The workspace object in case an AssetProcessor object needs to be created + :editor_test_data: The test data from calling editor_test_data() + :return: None + """ try: # Start-up an asset processor if we are not running one # If another AP process exist, don't kill it, as we don't own it @@ -524,15 +632,28 @@ class EditorTestSuite(): editor_test_data.asset_processor = None raise ex - def _setup_editor_test(self, editor, workspace, editor_test_data): + def _setup_editor_test(self, editor: Editor, workspace: AbstractWorkspace, editor_test_data: TestData) -> None: + """ + Sets up an editor test by preparing the Asset Processor, killing all other O3DE processes, and configuring + :editor: The launcher Editor object + :workspace: The test Workspace object + :editor_test_data: The TestData from calling editor_test_data() + :return: None + """ self._prepare_asset_processor(workspace, editor_test_data) editor_utils.kill_all_ly_processes(include_asset_processor=False) editor.configure_settings() - # Utility function for parsing the output information from the editor. - # It deserializes the JSON content printed in the output for every test and returns that information. @staticmethod - def _get_results_using_output(test_spec_list, output, editor_log_content): + def _get_results_using_output(test_spec_list: list[EditorTestBase], output: str, editor_log_content: str) -> dict[str, Result]: + """ + Utility function for parsing the output information from the editor. It deserializes the JSON content printed in + the output for every test and returns that information. + :test_spec_list: The list of EditorTests + :output: The Editor from Editor.get_output() + :editor_log_content: The contents of the editor log as a string + :return: A dict of the tests and their respective Result objects + """ results = {} pattern = re.compile(r"JSON_START\((.+?)\)JSON_END") out_matches = pattern.finditer(output) @@ -558,7 +679,9 @@ class EditorTestSuite(): for test_spec in test_spec_list: name = editor_utils.get_module_filename(test_spec.test_module) if name not in found_jsons.keys(): - results[test_spec.__name__] = Result.Unknown.create(test_spec, output, "Couldn't find any test run information on stdout", editor_log_content) + results[test_spec.__name__] = Result.Unknown.create(test_spec, output, + "Couldn't find any test run information on stdout", + editor_log_content) else: result = None json_result = found_jsons[name] @@ -581,9 +704,14 @@ class EditorTestSuite(): return results - # Fails the test if the test result is not a PASS, specifying the information @staticmethod - def _report_result(name : str, result : Result.Base): + def _report_result(name: str, result: Result) -> None: + """ + Fails the test if the test result is not a PASS, specifying the information + :name: Name of the test + :result: The Result object which denotes if the test passed or not + :return: None + """ if isinstance(result, Result.Pass): output_str = f"Test {name}:\n{str(result)}" print(output_str) @@ -592,10 +720,19 @@ class EditorTestSuite(): pytest.fail(error_str) ### Running tests ### - # Starts the editor with the given test and retuns an result dict with a single element specifying the result - def _exec_editor_test(self, request, workspace, editor, run_id : int, log_name : str, - test_spec : EditorTestBase, cmdline_args : List[str] = []): - + def _exec_editor_test(self, request: Request, workspace: AbstractWorkspace, editor: Editor, run_id: int, + log_name: str, test_spec: EditorTestBase, cmdline_args: list[str] = []) -> dict[str, Result]: + """ + Starts the editor with the given test and retuns an result dict with a single element specifying the result + :request: The pytest request + :workspace: The LyTestTools Workspace object + :editor: The LyTestTools Editor object + :run_id: The unique run id + :log_name: The name of the editor log to retrieve + :test_spec: The type of EditorTestBase + :cmdline_args: Any additional command line args + :return: a dictionary of Result objects + """ test_cmdline_args = self.global_extra_cmdline_args + cmdline_args test_spec_uses_null_renderer = getattr(test_spec, "use_null_renderer", None) if test_spec_uses_null_renderer or (test_spec_uses_null_renderer is None and self.use_null_renderer): @@ -629,12 +766,14 @@ class EditorTestSuite(): else: has_crashed = return_code != EditorTestSuite._TEST_FAIL_RETCODE if has_crashed: - test_result = Result.Crash.create(test_spec, output, return_code, editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG), None) + test_result = Result.Crash.create(test_spec, output, return_code, editor_utils.retrieve_crash_output + (run_id, workspace, self._TIMEOUT_CRASH_LOG), None) editor_utils.cycle_crash_report(run_id, workspace) else: test_result = Result.Fail.create(test_spec, output, editor_log_content) except WaitTimeoutError: - editor.kill() + output = editor.get_output() + editor.kill() editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace) test_result = Result.Timeout.create(test_spec, output, test_spec.timeout, editor_log_content) @@ -643,11 +782,21 @@ class EditorTestSuite(): results[test_spec.__name__] = test_result return results - # Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that editor - # instance. In case of failure this function also parses the editor output to find out what specific tests failed - def _exec_editor_multitest(self, request, workspace, editor, run_id : int, log_name : str, - test_spec_list : List[EditorTestBase], cmdline_args=[]): - + def _exec_editor_multitest(self, request: Request, workspace: AbstractWorkspace, editor: Editor, run_id: int, log_name: str, + test_spec_list: list[EditorTestBase], cmdline_args: list[str] = []) -> dict[str, Result]: + """ + Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that + editor instance. In case of failure this function also parses the editor output to find out what specific tests + failed. + :request: The pytest request + :workspace: The LyTestTools Workspace object + :editor: The LyTestTools Editor object + :run_id: The unique run id + :log_name: The name of the editor log to retrieve + :test_spec_list: A list of EditorTestBase tests to run in the same editor instance + :cmdline_args: Any additional command line args + :return: A dict of Result objects + """ test_cmdline_args = self.global_extra_cmdline_args + cmdline_args if self.use_null_renderer: test_cmdline_args += ["-rhi=null"] @@ -695,50 +844,66 @@ class EditorTestSuite(): if isinstance(result, Result.Unknown): if not crashed_result: # The first test with "Unknown" result (no data in output) is likely the one that crashed - crash_error = editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG) + crash_error = editor_utils.retrieve_crash_output(run_id, workspace, + self._TIMEOUT_CRASH_LOG) editor_utils.cycle_crash_report(run_id, workspace) - results[test_spec_name] = Result.Crash.create(result.test_spec, output, return_code, crash_error, result.editor_log) + results[test_spec_name] = Result.Crash.create(result.test_spec, output, return_code, + crash_error, result.editor_log) crashed_result = result else: - # If there are remaning "Unknown" results, these couldn't execute because of the crash, update with info about the offender - results[test_spec_name].extra_info = f"This test has unknown result, test '{crashed_result.test_spec.__name__}' crashed before this test could be executed" - + # If there are remaning "Unknown" results, these couldn't execute because of the crash, + # update with info about the offender + results[test_spec_name].extra_info = f"This test has unknown result," \ + f"test '{crashed_result.test_spec.__name__}'" \ + f"crashed before this test could be executed" # if all the tests ran, the one that has caused the crash is the last test if not crashed_result: crash_error = editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG) editor_utils.cycle_crash_report(run_id, workspace) - results[test_spec_name] = Result.Crash.create(crashed_result.test_spec, output, return_code, crash_error, crashed_result.editor_log) - - + results[test_spec_name] = Result.Crash.create(crashed_result.test_spec, output, return_code, + crash_error, crashed_result.editor_log) except WaitTimeoutError: editor.kill() - output = editor.get_output() editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace) # The editor timed out when running the tests, get the data from the output to find out which ones ran results = self._get_results_using_output(test_spec_list, output, editor_log_content) assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results don't match the tests ran" - # Similar logic here as crashes, the first test that has no result is the one that timed out timed_out_result = None for test_spec_name, result in results.items(): if isinstance(result, Result.Unknown): if not timed_out_result: - results[test_spec_name] = Result.Timeout.create(result.test_spec, result.output, self.timeout_editor_shared_test, result.editor_log) + results[test_spec_name] = Result.Timeout.create(result.test_spec, result.output, + self.timeout_editor_shared_test, + result.editor_log) timed_out_result = result else: - # If there are remaning "Unknown" results, these couldn't execute because of the timeout, update with info about the offender - results[test_spec_name].extra_info = f"This test has unknown result, test '{timed_out_result.test_spec.__name__}' timed out before this test could be executed" - + # If there are remaning "Unknown" results, these couldn't execute because of the timeout, + # update with info about the offender + results[test_spec_name].extra_info = f"This test has unknown result, test " \ + f"'{timed_out_result.test_spec.__name__}' timed out " \ + f"before this test could be executed" # if all the tests ran, the one that has caused the timeout is the last test, as it didn't close the editor if not timed_out_result: - results[test_spec_name] = Result.Timeout.create(timed_out_result.test_spec, results[test_spec_name].output, self.timeout_editor_shared_test, result.editor_log) + results[test_spec_name] = Result.Timeout.create(timed_out_result.test_spec, + results[test_spec_name].output, + self.timeout_editor_shared_test, result.editor_log) return results - # Runs a single test (one editor, one test) with the given specs - def _run_single_test(self, request, workspace, editor, editor_test_data, test_spec : EditorSingleTest): + def _run_single_test(self, request: Request, workspace: AbstractWorkspace, editor: Editor, + editor_test_data: TestData, test_spec: EditorSingleTest) -> None: + """ + Runs a single test (one editor, one test) with the given specs + :request: The Pytest Request + :workspace: The LyTestTools Workspace object + :editor: The LyTestTools Editor object + :editor_test_data: The TestData from calling editor_test_data() + :test_spec: The test class that should be a subclass of EditorSingleTest + :return: None + """ self._setup_editor_test(editor, workspace, editor_test_data) extra_cmdline_args = [] if hasattr(test_spec, "extra_cmdline_args"): @@ -749,18 +914,39 @@ class EditorTestSuite(): test_name, test_result = next(iter(results.items())) self._report_result(test_name, test_result) - # Runs a batch of tests in one single editor with the given spec list (one editor, multiple tests) - def _run_batched_tests(self, request, workspace, editor, editor_test_data, test_spec_list : List[EditorSharedTest], extra_cmdline_args=[]): + def _run_batched_tests(self, request: Request, workspace: AbstractWorkspace, editor: Editor, editor_test_data: TestData, + test_spec_list: list[EditorSharedTest], extra_cmdline_args: list[str] = []) -> None: + """ + Runs a batch of tests in one single editor with the given spec list (one editor, multiple tests) + :request: The Pytest Request + :workspace: The LyTestTools Workspace object + :editor: The LyTestTools Editor object + :editor_test_data: The TestData from calling editor_test_data() + :test_spec_list: A list of EditorSharedTest tests to run + :extra_cmdline_args: Any extra command line args in a list + :return: None + """ if not test_spec_list: return self._setup_editor_test(editor, workspace, editor_test_data) - results = self._exec_editor_multitest(request, workspace, editor, 1, "editor_test.log", test_spec_list, extra_cmdline_args) + results = self._exec_editor_multitest(request, workspace, editor, 1, "editor_test.log", test_spec_list, + extra_cmdline_args) assert results is not None editor_test_data.results.update(results) - # Runs multiple editors with one test on each editor (multiple editor, one test each) - def _run_parallel_tests(self, request, workspace, editor, editor_test_data, test_spec_list : List[EditorSharedTest], extra_cmdline_args=[]): + def _run_parallel_tests(self, request: Request, workspace: AbstractWorkspace, editor: Editor, editor_test_data: TestData, + test_spec_list: list[EditorSharedTest], extra_cmdline_args: list[str] = []) -> None: + """ + Runs multiple editors with one test on each editor (multiple editor, one test each) + :request: The Pytest Request + :workspace: The LyTestTools Workspace object + :editor: The LyTestTools Editor object + :editor_test_data: The TestData from calling editor_test_data() + :test_spec_list: A list of EditorSharedTest tests to run + :extra_cmdline_args: Any extra command line args in a list + :return: None + """ if not test_spec_list: return @@ -778,7 +964,8 @@ class EditorTestSuite(): for i in range(total_threads): def make_func(test_spec, index, my_editor): def run(request, workspace, extra_cmdline_args): - results = self._exec_editor_test(request, workspace, my_editor, index+1, f"editor_test.log", test_spec, extra_cmdline_args) + results = self._exec_editor_test(request, workspace, my_editor, index+1, f"editor_test.log", + test_spec, extra_cmdline_args) assert results is not None results_per_thread[index] = results return run @@ -796,8 +983,18 @@ class EditorTestSuite(): for result in results_per_thread: editor_test_data.results.update(result) - # Runs multiple editors with a batch of tests for each editor (multiple editor, multiple tests each) - def _run_parallel_batched_tests(self, request, workspace, editor, editor_test_data, test_spec_list : List[EditorSharedTest], extra_cmdline_args=[]): + def _run_parallel_batched_tests(self, request: Request, workspace: AbstractWorkspace, editor: Editor, editor_test_data: TestData, + test_spec_list: list[EditorSharedTest], extra_cmdline_args: list[str] = []) -> None: + """ + Runs multiple editors with a batch of tests for each editor (multiple editor, multiple tests each) + :request: The Pytest Request + :workspace: The LyTestTools Workspace object + :editor: The LyTestTools Editor object + :editor_test_data: The TestData from calling editor_test_data() + :test_spec_list: A list of EditorSharedTest tests to run + :extra_cmdline_args: Any extra command line args in a list + :return: None + """ if not test_spec_list: return @@ -813,7 +1010,9 @@ class EditorTestSuite(): def run(request, workspace, extra_cmdline_args): results = None if len(test_spec_list_for_editor) > 0: - results = self._exec_editor_multitest(request, workspace, my_editor, index+1, f"editor_test.log", test_spec_list_for_editor, extra_cmdline_args) + results = self._exec_editor_multitest(request, workspace, my_editor, index+1, + f"editor_test.log", test_spec_list_for_editor, + extra_cmdline_args) assert results is not None else: results = {} @@ -833,8 +1032,12 @@ class EditorTestSuite(): for result in results_per_thread: editor_test_data.results.update(result) - # Retrieves the number of parallel preference cmdline overrides - def _get_number_parallel_editors(self, request): + def _get_number_parallel_editors(self, request: Request) -> int: + """ + Retrieves the number of parallel preference cmdline overrides + :request: The Pytest Request + :return: The number of parallel editors to use + """ parallel_editors_value = request.config.getoption("--editors-parallel", None) if parallel_editors_value: return int(parallel_editors_value) diff --git a/Tools/LyTestTools/ly_test_tools/o3de/editor_test_utils.py b/Tools/LyTestTools/ly_test_tools/o3de/editor_test_utils.py index feff78d866..35fbe93d37 100644 --- a/Tools/LyTestTools/ly_test_tools/o3de/editor_test_utils.py +++ b/Tools/LyTestTools/ly_test_tools/o3de/editor_test_utils.py @@ -3,8 +3,10 @@ Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution. SPDX-License-Identifier: Apache-2.0 OR MIT -""" +Utility functions mostly for the editor_test module. They can also be used for assisting Editor tests. +""" +from __future__ import annotations import os import time import logging @@ -14,7 +16,13 @@ import ly_test_tools.environment.waiter as waiter logger = logging.getLogger(__name__) -def kill_all_ly_processes(include_asset_processor=True): +def kill_all_ly_processes(include_asset_processor: bool = True) -> None: + """ + Kills all common O3DE processes such as the Editor, Game Launchers, and optionally Asset Processor. Defaults to + killing the Asset Processor. + :param include_asset_processor: Boolean flag whether or not to kill the AP + :return: None + """ LY_PROCESSES = [ 'Editor', 'Profiler', 'RemoteConsole', ] @@ -27,8 +35,7 @@ def kill_all_ly_processes(include_asset_processor=True): else: process_utils.kill_processes_named(LY_PROCESSES, ignore_extensions=True) -def get_testcase_module_filepath(testcase_module): - # type: (Module) -> str +def get_testcase_module_filepath(testcase_module: Module) -> str: """ return the full path of the test module using always '.py' extension :param testcase_module: The testcase python module being tested @@ -36,8 +43,7 @@ def get_testcase_module_filepath(testcase_module): """ return os.path.splitext(testcase_module.__file__)[0] + ".py" -def get_module_filename(testcase_module): - # type: (Module) -> str +def get_module_filename(testcase_module: Module): """ return The filename of the module without path Note: This is differs from module.__name__ in the essence of not having the package directory. @@ -47,7 +53,7 @@ def get_module_filename(testcase_module): """ return os.path.splitext(os.path.basename(testcase_module.__file__))[0] -def retrieve_log_path(run_id : int, workspace): +def retrieve_log_path(run_id: int, workspace: AbstractWorkspaceManager) -> str: """ return the log/ project path for this test run. :param run_id: editor id that will be used for differentiating paths @@ -56,7 +62,7 @@ def retrieve_log_path(run_id : int, workspace): """ return os.path.join(workspace.paths.project(), "user", f"log_test_{run_id}") -def retrieve_crash_output(run_id : int, workspace, timeout : float): +def retrieve_crash_output(run_id: int, workspace: AbstractWorkspaceManager, timeout: float = 10) -> str: """ returns the crash output string for the given test run. :param run_id: editor id that will be used for differentiating paths @@ -79,7 +85,7 @@ def retrieve_crash_output(run_id : int, workspace, timeout : float): crash_info += f"\n{str(ex)}" return crash_info -def cycle_crash_report(run_id : int, workspace): +def cycle_crash_report(run_id: int, workspace: AbstractWorkspaceManager) -> None: """ Attempts to rename error.log and error.dmp(crash files) into new names with the timestamp on it. :param run_id: editor id that will be used for differentiating paths @@ -99,10 +105,11 @@ def cycle_crash_report(run_id : int, workspace): except Exception as ex: logger.warning(f"Couldn't cycle file {filepath}. Error: {str(ex)}") -def retrieve_editor_log_content(run_id : int, log_name : str, workspace, timeout=10): +def retrieve_editor_log_content(run_id: int, log_name: str, workspace: AbstractWorkspaceManager, timeout: int = 10) -> str: """ Retrieves the contents of the given editor log file. :param run_id: editor id that will be used for differentiating paths + :log_name: The name of the editor log to retrieve :param workspace: Workspace fixture :timeout: Maximum time to wait for the log file to appear :return str: The contents of the log @@ -124,7 +131,7 @@ def retrieve_editor_log_content(run_id : int, log_name : str, workspace, timeout editor_info = f"-- Error reading editor.log: {str(ex)} --" return editor_info -def retrieve_last_run_test_index_from_output(test_spec_list, output : str): +def retrieve_last_run_test_index_from_output(test_spec_list: list[EditorTestBase], output: str) -> int: """ Finds out what was the last test that was run by inspecting the input. This is used for determining what was the batched test has crashed the editor diff --git a/Tools/LyTestTools/tests/unit/test_editor_test_utils.py b/Tools/LyTestTools/tests/unit/test_editor_test_utils.py new file mode 100644 index 0000000000..bf7fae7189 --- /dev/null +++ b/Tools/LyTestTools/tests/unit/test_editor_test_utils.py @@ -0,0 +1,162 @@ +""" +Copyright (c) Contributors to the Open 3D Engine Project. +For complete copyright and license terms please see the LICENSE at the root of this distribution. + +SPDX-License-Identifier: Apache-2.0 OR MIT +""" +import pytest +import os +import unittest.mock as mock +import unittest + +import ly_test_tools.o3de.editor_test_utils as editor_test_utils + +pytestmark = pytest.mark.SUITE_smoke + +class TestEditorTestUtils(unittest.TestCase): + + @mock.patch('ly_test_tools.environment.process_utils.kill_processes_named') + def test_KillAllLyProcesses_IncludeAP_CallsCorrectly(self, mock_kill_processes_named): + process_list = ['Editor', 'Profiler', 'RemoteConsole', 'AssetProcessor', 'AssetProcessorBatch', 'AssetBuilder'] + + editor_test_utils.kill_all_ly_processes(include_asset_processor=True) + mock_kill_processes_named.assert_called_once_with(process_list, ignore_extensions=True) + + @mock.patch('ly_test_tools.environment.process_utils.kill_processes_named') + def test_KillAllLyProcesses_NotIncludeAP_CallsCorrectly(self, mock_kill_processes_named): + process_list = ['Editor', 'Profiler', 'RemoteConsole'] + ap_process_list = ['AssetProcessor', 'AssetProcessorBatch', 'AssetBuilder'] + + editor_test_utils.kill_all_ly_processes(include_asset_processor=False) + mock_kill_processes_named.assert_called_once() + assert ap_process_list not in mock_kill_processes_named.call_args[0] + + def test_GetTestcaseModuleFilepath_NoExtension_ReturnsPYExtension(self): + mock_module = mock.MagicMock() + file_path = os.path.join('path', 'under_test') + mock_module.__file__ = file_path + + assert file_path + '.py' == editor_test_utils.get_testcase_module_filepath(mock_module) + + def test_GetTestcaseModuleFilepath_PYExtension_ReturnsPYExtension(self): + mock_module = mock.MagicMock() + file_path = os.path.join('path', 'under_test.py') + mock_module.__file__ = file_path + + assert file_path == editor_test_utils.get_testcase_module_filepath(mock_module) + + def test_GetModuleFilename_PythonModule_ReturnsFilename(self): + mock_module = mock.MagicMock() + file_path = os.path.join('path', 'under_test.py') + mock_module.__file__ = file_path + + assert 'under_test' == editor_test_utils.get_module_filename(mock_module) + + def test_RetrieveLogPath_NormalProject_ReturnsLogPath(self): + mock_workspace = mock.MagicMock() + mock_workspace.paths.project.return_value = 'mock_project_path' + expected = os.path.join('mock_project_path', 'user', 'log_test_0') + + assert expected == editor_test_utils.retrieve_log_path(0, mock_workspace) + + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock()) + def test_RetrieveCrashOutput_CrashLogExists_ReturnsLogInfo(self, mock_retrieve_log_path): + mock_retrieve_log_path.return_value = 'mock_log_path' + mock_workspace = mock.MagicMock() + mock_log = 'mock crash info' + + with mock.patch('builtins.open', mock.mock_open(read_data=mock_log)) as mock_file: + assert mock_log == editor_test_utils.retrieve_crash_output(0, mock_workspace, 0) + + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock()) + def test_RetrieveCrashOutput_CrashLogNotExists_ReturnsError(self, mock_retrieve_log_path): + mock_retrieve_log_path.return_value = 'mock_log_path' + mock_workspace = mock.MagicMock() + expected = "-- No crash log available --\n[Errno 2] No such file or directory: 'mock_log_path\\\\error.log'" + + assert expected == editor_test_utils.retrieve_crash_output(0, mock_workspace, 0) + + @mock.patch('os.path.getmtime', mock.MagicMock()) + @mock.patch('os.rename') + @mock.patch('time.strftime') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('os.path.exists') + def test_CycleCrashReport_DmpExists_NamedCorrectly(self, mock_exists, mock_retrieve_log_path, mock_strftime, + mock_rename): + mock_exists.side_effect = [False, True] + mock_retrieve_log_path.return_value = 'mock_log_path' + mock_workspace = mock.MagicMock() + mock_strftime.return_value = 'mock_strftime' + + editor_test_utils.cycle_crash_report(0, mock_workspace) + mock_rename.assert_called_once_with(os.path.join('mock_log_path', 'error.dmp'), + os.path.join('mock_log_path', 'error_mock_strftime.dmp')) + + @mock.patch('os.path.getmtime', mock.MagicMock()) + @mock.patch('os.rename') + @mock.patch('time.strftime') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('os.path.exists') + def test_CycleCrashReport_LogExists_NamedCorrectly(self, mock_exists, mock_retrieve_log_path, mock_strftime, + mock_rename): + mock_exists.side_effect = [True, False] + mock_retrieve_log_path.return_value = 'mock_log_path' + mock_workspace = mock.MagicMock() + mock_strftime.return_value = 'mock_strftime' + + editor_test_utils.cycle_crash_report(0, mock_workspace) + mock_rename.assert_called_once_with(os.path.join('mock_log_path', 'error.log'), + os.path.join('mock_log_path', 'error_mock_strftime.log')) + + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock()) + def test_RetrieveEditorLogContent_CrashLogExists_ReturnsLogInfo(self, mock_retrieve_log_path): + mock_retrieve_log_path.return_value = 'mock_log_path' + mock_logname = 'mock_log.log' + mock_workspace = mock.MagicMock() + mock_log = 'mock log info' + + with mock.patch('builtins.open', mock.mock_open(read_data=mock_log)) as mock_file: + assert f'[editor.log] {mock_log}' == editor_test_utils.retrieve_editor_log_content(0, mock_logname, mock_workspace) + + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock()) + def test_RetrieveEditorLogContent_CrashLogNotExists_ReturnsError(self, mock_retrieve_log_path): + mock_retrieve_log_path.return_value = 'mock_log_path' + mock_logname = 'mock_log.log' + mock_workspace = mock.MagicMock() + expected = f"-- Error reading editor.log" + + assert expected in editor_test_utils.retrieve_editor_log_content(0, mock_logname, mock_workspace) + + def test_RetrieveLastRunTestIndexFromOutput_SecondTestFailed_Returns0(self): + mock_test = mock.MagicMock() + mock_test.__name__ = 'mock_test_name' + mock_test_list = [mock_test] + mock_editor_output = 'mock_test_name\n' \ + 'mock_test_name_1' + + assert 0 == editor_test_utils.retrieve_last_run_test_index_from_output(mock_test_list, mock_editor_output) + + def test_RetrieveLastRunTestIndexFromOutput_TenthTestFailed_Returns9(self): + mock_test_list = [] + mock_editor_output = '' + for x in range(10): + mock_test = mock.MagicMock() + mock_test.__name__ = f'mock_test_name_{x}' + mock_test_list.append(mock_test) + mock_editor_output += f'{mock_test.__name__}\n' + mock_editor_output += 'mock_test_name_x' + assert 9 == editor_test_utils.retrieve_last_run_test_index_from_output(mock_test_list, mock_editor_output) + + def test_RetrieveLastRunTestIndexFromOutput_FirstItemFailed_Returns0(self): + mock_test_list = [] + mock_editor_output = '' + for x in range(10): + mock_test = mock.MagicMock() + mock_test.__name__ = f'mock_test_name_{x}' + mock_test_list.append(mock_test) + + assert 0 == editor_test_utils.retrieve_last_run_test_index_from_output(mock_test_list, mock_editor_output) diff --git a/Tools/LyTestTools/tests/unit/test_o3de_editor_test.py b/Tools/LyTestTools/tests/unit/test_o3de_editor_test.py new file mode 100644 index 0000000000..3f6c23ea3d --- /dev/null +++ b/Tools/LyTestTools/tests/unit/test_o3de_editor_test.py @@ -0,0 +1,1099 @@ +""" +Copyright (c) Contributors to the Open 3D Engine Project. +For complete copyright and license terms please see the LICENSE at the root of this distribution. + +SPDX-License-Identifier: Apache-2.0 OR MIT +""" +import unittest + +import pytest +import unittest.mock as mock + +import ly_test_tools +import ly_test_tools.o3de.editor_test as editor_test + +pytestmark = pytest.mark.SUITE_smoke + +class TestEditorTestBase(unittest.TestCase): + + def test_EditorSharedTest_Init_CorrectAttributes(self): + mock_editorsharedtest = editor_test.EditorSharedTest() + assert mock_editorsharedtest.is_batchable == True + assert mock_editorsharedtest.is_parallelizable == True + + def test_EditorParallelTest_Init_CorrectAttributes(self): + mock_editorsharedtest = editor_test.EditorParallelTest() + assert mock_editorsharedtest.is_batchable == False + assert mock_editorsharedtest.is_parallelizable == True + + def test_EditorBatchedTest_Init_CorrectAttributes(self): + mock_editorsharedtest = editor_test.EditorBatchedTest() + assert mock_editorsharedtest.is_batchable == True + assert mock_editorsharedtest.is_parallelizable == False + +class TestResultBase(unittest.TestCase): + + def setUp(self): + self.mock_result = editor_test.Result.Base() + + def test_GetOutputStr_HasOutput_ReturnsCorrectly(self): + self.mock_result.output = 'expected output' + assert self.mock_result.get_output_str() == 'expected output' + + def test_GetOutputStr_NoOutput_ReturnsCorrectly(self): + self.mock_result.output = None + assert self.mock_result.get_output_str() == '-- No output --' + + def test_GetEditorLogStr_HasOutput_ReturnsCorrectly(self): + self.mock_result.editor_log = 'expected log output' + assert self.mock_result.get_editor_log_str() == 'expected log output' + + def test_GetEditorLogStr_NoOutput_ReturnsCorrectly(self): + self.mock_result.editor_log = None + assert self.mock_result.get_editor_log_str() == '-- No editor log found --' + +class TestResultPass(unittest.TestCase): + + def test_Create_ValidArgs_CorrectAttributes(self): + mock_test_spec = mock.MagicMock() + mock_output = mock.MagicMock() + mock_editor_log = mock.MagicMock() + + mock_pass = editor_test.Result.Pass.create(mock_test_spec, mock_output, mock_editor_log) + assert mock_pass.test_spec == mock_test_spec + assert mock_pass.output == mock_output + assert mock_pass.editor_log == mock_editor_log + + def test_Str_ValidString_ReturnsOutput(self): + mock_test_spec = mock.MagicMock() + mock_output = 'mock_output' + mock_editor_log = mock.MagicMock() + + mock_pass = editor_test.Result.Pass.create(mock_test_spec, mock_output, mock_editor_log) + assert mock_output in str(mock_pass) + +class TestResultFail(unittest.TestCase): + + def test_Create_ValidArgs_CorrectAttributes(self): + mock_test_spec = mock.MagicMock() + mock_output = mock.MagicMock() + mock_editor_log = mock.MagicMock() + + mock_pass = editor_test.Result.Fail.create(mock_test_spec, mock_output, mock_editor_log) + assert mock_pass.test_spec == mock_test_spec + assert mock_pass.output == mock_output + assert mock_pass.editor_log == mock_editor_log + + def test_Str_ValidString_ReturnsOutput(self): + mock_test_spec = mock.MagicMock() + mock_output = 'mock_output' + mock_editor_log = 'mock_editor_log' + + mock_pass = editor_test.Result.Fail.create(mock_test_spec, mock_output, mock_editor_log) + assert mock_output in str(mock_pass) + assert mock_editor_log in str(mock_pass) + +class TestResultCrash(unittest.TestCase): + + def test_Create_ValidArgs_CorrectAttributes(self): + mock_test_spec = mock.MagicMock() + mock_output = mock.MagicMock() + mock_editor_log = mock.MagicMock() + mock_ret_code = mock.MagicMock() + mock_stacktrace = mock.MagicMock() + + mock_pass = editor_test.Result.Crash.create(mock_test_spec, mock_output, mock_ret_code, mock_stacktrace, + mock_editor_log) + assert mock_pass.test_spec == mock_test_spec + assert mock_pass.output == mock_output + assert mock_pass.editor_log == mock_editor_log + assert mock_pass.ret_code == mock_ret_code + assert mock_pass.stacktrace == mock_stacktrace + + def test_Str_ValidString_ReturnsOutput(self): + mock_test_spec = mock.MagicMock() + mock_output = 'mock_output' + mock_editor_log = 'mock_editor_log' + mock_return_code = 0 + mock_stacktrace = 'mock stacktrace' + + mock_pass = editor_test.Result.Crash.create(mock_test_spec, mock_output, mock_return_code, mock_stacktrace, + mock_editor_log) + assert mock_stacktrace in str(mock_pass) + assert mock_output in str(mock_pass) + assert mock_editor_log in str(mock_pass) + + def test_Str_MissingStackTrace_ReturnsCorrectly(self): + mock_test_spec = mock.MagicMock() + mock_output = 'mock_output' + mock_editor_log = 'mock_editor_log' + mock_return_code = 0 + mock_stacktrace = None + mock_pass = editor_test.Result.Crash.create(mock_test_spec, mock_output, mock_return_code, mock_stacktrace, + mock_editor_log) + assert mock_output in str(mock_pass) + assert mock_editor_log in str(mock_pass) + +class TestResultTimeout(unittest.TestCase): + + def test_Create_ValidArgs_CorrectAttributes(self): + mock_test_spec = mock.MagicMock() + mock_output = mock.MagicMock() + mock_editor_log = mock.MagicMock() + mock_timeout = mock.MagicMock() + + mock_pass = editor_test.Result.Timeout.create(mock_test_spec, mock_output, mock_timeout, mock_editor_log) + assert mock_pass.test_spec == mock_test_spec + assert mock_pass.output == mock_output + assert mock_pass.editor_log == mock_editor_log + assert mock_pass.time_secs == mock_timeout + + def test_Str_ValidString_ReturnsOutput(self): + mock_test_spec = mock.MagicMock() + mock_output = 'mock_output' + mock_editor_log = 'mock_editor_log' + mock_timeout = 0 + + mock_pass = editor_test.Result.Timeout.create(mock_test_spec, mock_output, mock_timeout, mock_editor_log) + assert mock_output in str(mock_pass) + assert mock_editor_log in str(mock_pass) + +class TestResultUnknown(unittest.TestCase): + + def test_Create_ValidArgs_CorrectAttributes(self): + mock_test_spec = mock.MagicMock() + mock_output = mock.MagicMock() + mock_editor_log = mock.MagicMock() + mock_extra_info = mock.MagicMock() + + mock_pass = editor_test.Result.Unknown.create(mock_test_spec, mock_output, mock_extra_info, mock_editor_log) + assert mock_pass.test_spec == mock_test_spec + assert mock_pass.output == mock_output + assert mock_pass.editor_log == mock_editor_log + assert mock_pass.extra_info == mock_extra_info + + def test_Str_ValidString_ReturnsOutput(self): + mock_test_spec = mock.MagicMock() + mock_output = 'mock_output' + mock_editor_log = 'mock_editor_log' + mock_extra_info = 'mock extra info' + + mock_pass = editor_test.Result.Unknown.create(mock_test_spec, mock_output, mock_extra_info, mock_editor_log) + assert mock_output in str(mock_pass) + assert mock_editor_log in str(mock_pass) + +class TestEditorTestSuite(unittest.TestCase): + + @mock.patch('ly_test_tools.o3de.editor_test_utils.kill_all_ly_processes') + def test_EditorTestData_ValidAP_TeardownAPOnce(self, mock_kill_processes): + mock_editor_test_suite = editor_test.EditorTestSuite() + mock_test_data_generator = mock_editor_test_suite._editor_test_data(mock.MagicMock()) + mock_asset_processor = mock.MagicMock() + for test_data in mock_test_data_generator: + test_data.asset_processor = mock_asset_processor + mock_asset_processor.stop.assert_called_once_with(1) + mock_asset_processor.teardown.assert_called() + assert test_data.asset_processor is None + mock_kill_processes.assert_called_once_with(include_asset_processor=True) + + @mock.patch('ly_test_tools.o3de.editor_test_utils.kill_all_ly_processes') + def test_EditorTestData_NoAP_NoTeardownAP(self, mock_kill_processes): + mock_editor_test_suite = editor_test.EditorTestSuite() + mock_test_data_generator = mock_editor_test_suite._editor_test_data(mock.MagicMock()) + for test_data in mock_test_data_generator: + test_data.asset_processor = None + mock_kill_processes.assert_called_once_with(include_asset_processor=False) + + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite.filter_session_shared_tests') + def test_PytestCustomModifyItems_FunctionsMatch_AddsRunners(self, mock_filter_tests): + class MockTestSuite(editor_test.EditorTestSuite): + pass + mock_func_1 = mock.MagicMock() + mock_test = mock.MagicMock() + runner_1 = editor_test.EditorTestSuite.Runner('mock_runner_1', mock_func_1, [mock_test]) + mock_run_pytest_func = mock.MagicMock() + runner_1.run_pytestfunc = mock_run_pytest_func + mock_result_pytestfuncs = [mock.MagicMock()] + runner_1.result_pytestfuncs = mock_result_pytestfuncs + mock_items = [] + mock_items.extend(mock_result_pytestfuncs) + + MockTestSuite._runners = [runner_1] + mock_test_1 = mock.MagicMock() + mock_test_2 = mock.MagicMock() + mock_filter_tests.return_value = [mock_test_1, mock_test_2] + + MockTestSuite.pytest_custom_modify_items(mock.MagicMock(), mock_items, mock.MagicMock()) + assert mock_items == [mock_run_pytest_func, mock_result_pytestfuncs[0]] + + def test_GetSingleTests_NoSingleTests_EmptyList(self): + class MockTestSuite(editor_test.EditorTestSuite): + pass + mock_test_suite = MockTestSuite() + tests = mock_test_suite.get_single_tests() + assert len(tests) == 0 + + def test_GetSingleTests_OneSingleTests_ReturnsOne(self): + class MockTestSuite(editor_test.EditorTestSuite): + class MockSingleTest(editor_test.EditorSingleTest): + pass + mock_test_suite = MockTestSuite() + tests = mock_test_suite.get_single_tests() + assert len(tests) == 1 + assert tests[0].__name__ == "MockSingleTest" + assert issubclass(tests[0], editor_test.EditorSingleTest) + + def test_GetSingleTests_AllTests_ReturnsOnlySingles(self): + class MockTestSuite(editor_test.EditorTestSuite): + class MockSingleTest(editor_test.EditorSingleTest): + pass + class MockAnotherSingleTest(editor_test.EditorSingleTest): + pass + class MockNotSingleTest(editor_test.EditorSharedTest): + pass + mock_test_suite = MockTestSuite() + tests = mock_test_suite.get_single_tests() + assert len(tests) == 2 + for test in tests: + assert issubclass(test, editor_test.EditorSingleTest) + + def test_GetSharedTests_NoSharedTests_EmptyList(self): + class MockTestSuite(editor_test.EditorTestSuite): + pass + mock_test_suite = MockTestSuite() + tests = mock_test_suite.get_shared_tests() + assert len(tests) == 0 + + def test_GetSharedTests_OneSharedTests_ReturnsOne(self): + class MockTestSuite(editor_test.EditorTestSuite): + class MockSharedTest(editor_test.EditorSharedTest): + pass + mock_test_suite = MockTestSuite() + tests = mock_test_suite.get_shared_tests() + assert len(tests) == 1 + assert tests[0].__name__ == 'MockSharedTest' + assert issubclass(tests[0], editor_test.EditorSharedTest) + + def test_GetSharedTests_AllTests_ReturnsOnlyShared(self): + class MockTestSuite(editor_test.EditorTestSuite): + class MockSharedTest(editor_test.EditorSharedTest): + pass + class MockAnotherSharedTest(editor_test.EditorSharedTest): + pass + class MockNotSharedTest(editor_test.EditorSingleTest): + pass + mock_test_suite = MockTestSuite() + tests = mock_test_suite.get_shared_tests() + assert len(tests) == 2 + for test in tests: + assert issubclass(test, editor_test.EditorSharedTest) + + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite.filter_session_shared_tests') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite.get_shared_tests') + def test_GetSessionSharedTests_Valid_CallsCorrectly(self, mock_get_shared_tests, mock_filter_session): + editor_test.EditorTestSuite.get_session_shared_tests(mock.MagicMock()) + assert mock_get_shared_tests.called + assert mock_filter_session.called + + @mock.patch('ly_test_tools.o3de.editor_test.skipping_pytest_runtest_setup', mock.MagicMock()) + def test_FilterSessionSharedTests_OneSharedTest_ReturnsOne(self): + def mock_test(): + pass + mock_test.originalname = 'mock_test' + mock_test.__name__ = mock_test.originalname + mock_session_items = [mock_test] + mock_shared_tests = [mock_test] + + selected_tests = editor_test.EditorTestSuite.filter_session_shared_tests(mock_session_items, mock_shared_tests) + assert selected_tests == mock_session_items + assert len(selected_tests) == 1 + + @mock.patch('ly_test_tools.o3de.editor_test.skipping_pytest_runtest_setup', mock.MagicMock()) + def test_FilterSessionSharedTests_ManyTests_ReturnsCorrectTests(self): + def mock_test(): + pass + def mock_test_2(): + pass + def mock_test_3(): + pass + mock_test.originalname = 'mock_test' + mock_test.__name__ = mock_test.originalname + mock_test_2.originalname = 'mock_test_2' + mock_test_2.__name__ = mock_test_2.originalname + mock_test_3.originalname = 'mock_test_3' + mock_test_3.__name__ = mock_test_3.originalname + mock_session_items = [mock_test, mock_test_2] + mock_shared_tests = [mock_test, mock_test_2, mock_test_3] + + selected_tests = editor_test.EditorTestSuite.filter_session_shared_tests(mock_session_items, mock_shared_tests) + assert selected_tests == mock_session_items + + @mock.patch('ly_test_tools.o3de.editor_test.skipping_pytest_runtest_setup') + def test_FilterSessionSharedTests_SkipOneTest_ReturnsCorrectTests(self, mock_skip): + def mock_test(): + pass + def mock_test_2(): + pass + def mock_test_3(): + pass + mock_skip.side_effect = [True, Exception] + mock_test.originalname = 'mock_test' + mock_test.__name__ = mock_test.originalname + mock_test_2.originalname = 'mock_test_2' + mock_test_2.__name__ = mock_test_2.originalname + mock_test_3.originalname = 'mock_test_3' + mock_test_3.__name__ = mock_test_3.originalname + mock_session_items = [mock_test, mock_test_2] + mock_shared_tests = [mock_test, mock_test_2, mock_test_3] + + selected_tests = editor_test.EditorTestSuite.filter_session_shared_tests(mock_session_items, mock_shared_tests) + assert selected_tests == [mock_test] + + @mock.patch('ly_test_tools.o3de.editor_test.skipping_pytest_runtest_setup', mock.MagicMock(side_effect=Exception)) + def test_FilterSessionSharedTests_ExceptionDuringSkipSetup_SkipsAddingTest(self): + def mock_test(): + pass + mock_test.originalname = 'mock_test' + mock_test.__name__ = mock_test.originalname + mock_session_items = [mock_test] + mock_shared_tests = [mock_test] + + selected_tests = editor_test.EditorTestSuite.filter_session_shared_tests(mock_session_items, mock_shared_tests) + assert len(selected_tests) == 0 + + def test_FilterSharedTests_TrueParams_ReturnsTrueTests(self): + mock_test = mock.MagicMock() + mock_test.is_batchable = True + mock_test.is_parallelizable = True + mock_test_2 = mock.MagicMock() + mock_test_2.is_batchable = False + mock_test_2.is_parallelizable = False + mock_shared_tests = [mock_test, mock_test_2] + + filtered_tests = editor_test.EditorTestSuite.filter_shared_tests( + mock_shared_tests, is_batchable=True, is_parallelizable=True) + assert filtered_tests == [mock_test] + + def test_FilterSharedTests_FalseParams_ReturnsFalseTests(self): + mock_test = mock.MagicMock() + mock_test.is_batchable = True + mock_test.is_parallelizable = True + mock_test_2 = mock.MagicMock() + mock_test_2.is_batchable = False + mock_test_2.is_parallelizable = False + mock_shared_tests = [mock_test, mock_test_2] + + filtered_tests = editor_test.EditorTestSuite.filter_shared_tests( + mock_shared_tests, is_batchable=False, is_parallelizable=False) + assert filtered_tests == [mock_test_2] + +class TestUtils(unittest.TestCase): + + @mock.patch('ly_test_tools.o3de.editor_test_utils.kill_all_ly_processes') + def test_PrepareAssetProcessor_APExists_StartsAP(self, mock_kill_processes): + mock_test_suite = editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor_data = mock.MagicMock() + mock_ap = mock.MagicMock() + mock_editor_data.asset_processor = mock_ap + + mock_test_suite._prepare_asset_processor(mock_workspace, mock_editor_data) + assert mock_ap.start.called + assert not mock_kill_processes.called + + @mock.patch('ly_test_tools.o3de.asset_processor.AssetProcessor.start') + @mock.patch('ly_test_tools.environment.process_utils.process_exists') + @mock.patch('ly_test_tools.o3de.editor_test_utils.kill_all_ly_processes') + def test_PrepareAssetProcessor_NoAP_KillAndCreateAP(self, mock_kill_processes, mock_proc_exists, mock_start): + mock_test_suite = editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor_data = mock.MagicMock() + mock_editor_data.asset_processor = None + mock_proc_exists.return_value = False + + mock_test_suite._prepare_asset_processor(mock_workspace, mock_editor_data) + mock_kill_processes.assert_called_with(include_asset_processor=True) + assert isinstance(mock_editor_data.asset_processor, ly_test_tools.o3de.asset_processor.AssetProcessor) + assert mock_start.called + + @mock.patch('ly_test_tools.o3de.asset_processor.AssetProcessor.start') + @mock.patch('ly_test_tools.environment.process_utils.process_exists') + @mock.patch('ly_test_tools.o3de.editor_test_utils.kill_all_ly_processes') + def test_PrepareAssetProcessor_NoAPButProcExists_NoKill(self, mock_kill_processes, mock_proc_exists, mock_start): + mock_test_suite = editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor_data = mock.MagicMock() + mock_editor_data.asset_processor = None + mock_proc_exists.return_value = True + + mock_test_suite._prepare_asset_processor(mock_workspace, mock_editor_data) + mock_kill_processes.assert_called_with(include_asset_processor=False) + assert not mock_start.called + assert mock_editor_data.asset_processor is None + + + @mock.patch('ly_test_tools.o3de.asset_processor.AssetProcessor.start') + @mock.patch('ly_test_tools.environment.process_utils.process_exists') + @mock.patch('ly_test_tools.o3de.editor_test_utils.kill_all_ly_processes') + def test_PrepareAssetProcessor_NoAPButProcExists_NoKill(self, mock_kill_processes, mock_proc_exists, mock_start): + mock_test_suite = editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor_data = mock.MagicMock() + mock_editor_data.asset_processor = None + mock_proc_exists.return_value = True + + mock_test_suite._prepare_asset_processor(mock_workspace, mock_editor_data) + mock_kill_processes.assert_called_with(include_asset_processor=False) + assert not mock_start.called + assert mock_editor_data.asset_processor is None + + @mock.patch('ly_test_tools.o3de.editor_test_utils.kill_all_ly_processes') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._prepare_asset_processor') + def test_SetupEditorTest_ValidArgs_CallsCorrectly(self, mock_prepare_ap, mock_kill_processes): + mock_test_suite = editor_test.EditorTestSuite() + mock_editor = mock.MagicMock() + mock_test_suite._setup_editor_test(mock_editor, mock.MagicMock(), mock.MagicMock()) + + assert mock_editor.configure_settings.called + assert mock_prepare_ap.called + mock_kill_processes.assert_called_once_with(include_asset_processor=False) + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Pass.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_module_filename') + def test_GetResultsUsingOutput_ValidJsonSuccess_CreatesPassResult(self, mock_get_module, mock_create): + mock_get_module.return_value = 'mock_module_name' + mock_test_suite = editor_test.EditorTestSuite() + mock_test = mock.MagicMock() + mock_test.__name__ = 'mock_test_name' + mock_test_list = [mock_test] + mock_output = 'JSON_START(' \ + '{"name": "mock_module_name", "output": "mock_std_out", "success": "mock_success_data"}' \ + ')JSON_END' + mock_pass = mock.MagicMock() + mock_create.return_value = mock_pass + + results = mock_test_suite._get_results_using_output(mock_test_list, mock_output, '') + assert mock_create.called + assert len(results) == 1 + assert results[mock_test.__name__] == mock_pass + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Fail.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_module_filename') + def test_GetResultsUsingOutput_ValidJsonFail_CreatesFailResult(self, mock_get_module, mock_create): + mock_get_module.return_value = 'mock_module_name' + mock_test_suite = editor_test.EditorTestSuite() + mock_test = mock.MagicMock() + mock_test.__name__ = 'mock_test_name' + mock_test_list = [mock_test] + mock_output = 'JSON_START(' \ + '{"name": "mock_module_name", "output": "mock_std_out", "failed": "mock_fail_data", "success": ""}' \ + ')JSON_END' + mock_fail = mock.MagicMock() + mock_create.return_value = mock_fail + + results = mock_test_suite._get_results_using_output(mock_test_list, mock_output, '') + assert mock_create.called + assert len(results) == 1 + assert results[mock_test.__name__] == mock_fail + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Unknown.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_module_filename') + def test_GetResultsUsingOutput_ModuleNotInLog_CreatesUnknownResult(self, mock_get_module, mock_create): + mock_get_module.return_value = 'different_module_name' + mock_test_suite = editor_test.EditorTestSuite() + mock_test = mock.MagicMock() + mock_test.__name__ = 'mock_test_name' + mock_test_list = [mock_test] + mock_output = 'JSON_START(' \ + '{"name": "mock_module_name", "output": "mock_std_out", "failed": "mock_fail_data"}' \ + ')JSON_END' + mock_unknown = mock.MagicMock() + mock_create.return_value = mock_unknown + + results = mock_test_suite._get_results_using_output(mock_test_list, mock_output, '') + assert mock_create.called + assert len(results) == 1 + assert results[mock_test.__name__] == mock_unknown + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Pass.create') + @mock.patch('ly_test_tools.o3de.editor_test.Result.Fail.create') + @mock.patch('ly_test_tools.o3de.editor_test.Result.Unknown.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_module_filename') + def test_GetResultsUsingOutput_MultipleTests_CreatesCorrectResults(self, mock_get_module, mock_create_unknown, + mock_create_fail, mock_create_pass): + mock_get_module.side_effect = ['mock_module_name_pass', 'mock_module_name_fail', 'different_module_name'] + mock_test_suite = editor_test.EditorTestSuite() + mock_test_pass = mock.MagicMock() + mock_test_pass.__name__ = 'mock_test_name_pass' + mock_test_fail = mock.MagicMock() + mock_test_fail.__name__ = 'mock_test_name_fail' + mock_test_unknown = mock.MagicMock() + mock_test_unknown.__name__ = 'mock_test_name_unknown' + mock_test_list = [mock_test_pass, mock_test_fail, mock_test_unknown] + mock_output = 'JSON_START(' \ + '{"name": "mock_module_name_pass", "output": "mock_std_out", "success": "mock_success_data"}' \ + ')JSON_END' \ + 'JSON_START(' \ + '{"name": "mock_module_name_fail", "output": "mock_std_out", "failed": "mock_fail_data", "success": ""}' \ + ')JSON_END' \ + 'JSON_START(' \ + '{"name": "mock_module_name_unknown", "output": "mock_std_out", "failed": "mock_fail_data", "success": ""}' \ + ')JSON_END' + mock_editor_log = 'JSON_START(' \ + '{"name": "mock_module_name_pass"}' \ + ')JSON_END' \ + 'JSON_START(' \ + '{"name": "mock_module_name_fail"}' \ + ')JSON_END' + mock_unknown = mock.MagicMock() + mock_pass = mock.MagicMock() + mock_fail = mock.MagicMock() + mock_create_unknown.return_value = mock_unknown + mock_create_pass.return_value = mock_pass + mock_create_fail.return_value = mock_fail + + results = mock_test_suite._get_results_using_output(mock_test_list, mock_output, mock_editor_log) + mock_create_pass.assert_called_with( + mock_test_pass, 'mock_std_out', 'JSON_START({"name": "mock_module_name_pass"})JSON_END') + mock_create_fail.assert_called_with( + mock_test_fail, 'mock_std_out', 'JSON_START({"name": "mock_module_name_fail"})JSON_END') + mock_create_unknown.assert_called_with( + mock_test_unknown, mock_output, "Couldn't find any test run information on stdout", mock_editor_log) + assert len(results) == 3 + assert results[mock_test_pass.__name__] == mock_pass + assert results[mock_test_fail.__name__] == mock_fail + assert results[mock_test_unknown.__name__] == mock_unknown + + @mock.patch('builtins.print') + def test_ReportResult_TestPassed_ReportsCorrectly(self, mock_print): + mock_test_name = 'mock name' + mock_pass = ly_test_tools.o3de.editor_test.Result.Pass() + ly_test_tools.o3de.editor_test.EditorTestSuite._report_result(mock_test_name, mock_pass) + mock_print.assert_called_with(f'Test {mock_test_name}:\nTest Passed\n------------\n| Output |\n------------\n' + f'-- No output --\n') + + @mock.patch('pytest.fail') + def test_ReportResult_TestFailed_FailsCorrectly(self, mock_pytest_fail): + mock_fail = ly_test_tools.o3de.editor_test.Result.Fail() + + ly_test_tools.o3de.editor_test.EditorTestSuite._report_result('mock_test_name', mock_fail) + mock_pytest_fail.assert_called_with('Test mock_test_name:\nTest FAILED\n------------\n| Output |' + '\n------------\n-- No output --\n--------------\n| Editor log |' + '\n--------------\n-- No editor log found --\n') + +class TestRunningTests(unittest.TestCase): + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Pass.create') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorTest_TestSucceeds_ReturnsPass(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, + mock_get_output_results, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_editor.get_returncode.return_value = 0 + mock_get_output_results.return_value = {} + mock_pass = mock.MagicMock() + mock_create.return_value = mock_pass + + results = mock_test_suite._exec_editor_test(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec, []) + assert mock_cycle_crash.called + assert mock_editor.start.called + assert mock_create.called + assert results == {mock_test_spec.__name__: mock_pass} + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Fail.create') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorTest_TestFails_ReturnsFail(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, + mock_get_output_results, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_editor.get_returncode.return_value = 15 + mock_get_output_results.return_value = {} + mock_fail = mock.MagicMock() + mock_create.return_value = mock_fail + + results = mock_test_suite._exec_editor_test(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec, []) + assert mock_cycle_crash.called + assert mock_editor.start.called + assert mock_create.called + assert results == {mock_test_spec.__name__: mock_fail} + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Crash.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_crash_output') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorTest_TestCrashes_ReturnsCrash(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, + mock_get_output_results, mock_retrieve_crash, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_editor.get_returncode.return_value = 1 + mock_get_output_results.return_value = {} + mock_crash = mock.MagicMock() + mock_create.return_value = mock_crash + + results = mock_test_suite._exec_editor_test(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec, []) + assert mock_cycle_crash.call_count == 2 + assert mock_editor.start.called + assert mock_retrieve_crash.called + assert mock_create.called + assert results == {mock_test_spec.__name__: mock_crash} + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Timeout.create') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorTest_TestTimeout_ReturnsTimeout(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, + mock_get_output_results, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_editor.wait.side_effect = ly_test_tools.launchers.exceptions.WaitTimeoutError() + mock_get_output_results.return_value = {} + mock_timeout = mock.MagicMock() + mock_create.return_value = mock_timeout + + results = mock_test_suite._exec_editor_test(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec, []) + assert mock_cycle_crash.called + assert mock_editor.start.called + assert mock_editor.kill.called + assert mock_create.called + assert results == {mock_test_spec.__name__: mock_timeout} + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Pass.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorMultitest_AllTestsPass_ReturnsPasses(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_editor.get_returncode.return_value = 0 + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_test_spec_2 = mock.MagicMock() + mock_test_spec_2.__name__ = 'mock_test_name_2' + mock_test_spec_list = [mock_test_spec, mock_test_spec_2] + mock_get_testcase_filepath.side_effect = ['mock_path', 'mock_path_2'] + mock_pass = mock.MagicMock() + mock_pass_2 = mock.MagicMock() + mock_create.side_effect = [mock_pass, mock_pass_2] + + results = mock_test_suite._exec_editor_multitest(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec_list, []) + assert results == {mock_test_spec.__name__: mock_pass, mock_test_spec_2.__name__: mock_pass_2} + assert mock_cycle_crash.called + assert mock_create.call_count == 2 + + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorMultitest_OneFailure_CallsCorrectFunc(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, mock_get_results): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_editor.get_returncode.return_value = 15 + mock_test_spec = mock.MagicMock() + mock_test_spec_2 = mock.MagicMock() + mock_test_spec_list = [mock_test_spec, mock_test_spec_2] + mock_get_testcase_filepath.side_effect = ['mock_path', 'mock_path_2'] + mock_get_results.return_value = {'mock_test_name': mock.MagicMock(), 'mock_test_name_2': mock.MagicMock()} + + results = mock_test_suite._exec_editor_multitest(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec_list, []) + assert mock_cycle_crash.called + assert mock_get_results.called + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Crash.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_crash_output') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorMultitest_OneCrash_ReportsOnUnknownResult(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, + mock_get_results, mock_retrieve_crash, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_editor.get_returncode.return_value = 1 + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_test_spec_2 = mock.MagicMock() + mock_test_spec_2.__name__ = 'mock_test_name_2' + mock_test_spec_list = [mock_test_spec, mock_test_spec_2] + mock_unknown_result = ly_test_tools.o3de.editor_test.Result.Unknown() + mock_unknown_result.test_spec = mock.MagicMock() + mock_unknown_result.editor_log = mock.MagicMock() + mock_get_testcase_filepath.side_effect = ['mock_path', 'mock_path_2'] + mock_get_results.return_value = {mock_test_spec.__name__: mock_unknown_result, + mock_test_spec_2.__name__: mock.MagicMock()} + mock_crash = mock.MagicMock() + mock_create.return_value = mock_crash + + results = mock_test_suite._exec_editor_multitest(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec_list, []) + assert mock_cycle_crash.call_count == 2 + assert mock_get_results.called + assert results[mock_test_spec.__name__] == mock_crash + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Crash.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_crash_output') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorMultitest_ManyUnknown_ReportsUnknownResults(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, + mock_get_results, mock_retrieve_crash, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_editor.get_returncode.return_value = 1 + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_test_spec_2 = mock.MagicMock() + mock_test_spec_2.__name__ = 'mock_test_name_2' + mock_test_spec_list = [mock_test_spec, mock_test_spec_2] + mock_unknown_result = ly_test_tools.o3de.editor_test.Result.Unknown() + mock_unknown_result.__name__ = 'mock_test_name' + mock_unknown_result.test_spec = mock.MagicMock() + mock_unknown_result.test_spec.__name__ = 'mock_test_spec_name' + mock_unknown_result.editor_log = mock.MagicMock() + mock_get_testcase_filepath.side_effect = ['mock_path', 'mock_path_2'] + mock_get_results.return_value = {mock_test_spec.__name__: mock_unknown_result, + mock_test_spec_2.__name__: mock_unknown_result} + mock_crash = mock.MagicMock() + mock_create.return_value = mock_crash + + results = mock_test_suite._exec_editor_multitest(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec_list, []) + assert mock_cycle_crash.call_count == 2 + assert mock_get_results.called + assert results[mock_test_spec.__name__] == mock_crash + assert results[mock_test_spec_2.__name__].extra_info + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Timeout.create') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorMultitest_EditorTimeout_ReportsCorrectly(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, + mock_get_results, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_editor.wait.side_effect = ly_test_tools.launchers.exceptions.WaitTimeoutError() + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_test_spec_2 = mock.MagicMock() + mock_test_spec_2.__name__ = 'mock_test_name_2' + mock_test_spec_list = [mock_test_spec, mock_test_spec_2] + mock_unknown_result = ly_test_tools.o3de.editor_test.Result.Unknown() + mock_unknown_result.test_spec = mock.MagicMock() + mock_unknown_result.test_spec.__name__ = 'mock_test_spec_name' + mock_unknown_result.output = mock.MagicMock() + mock_unknown_result.editor_log = mock.MagicMock() + mock_get_testcase_filepath.side_effect = ['mock_path', 'mock_path_2'] + mock_get_results.return_value = {mock_test_spec.__name__: mock_unknown_result, + mock_test_spec_2.__name__: mock_unknown_result} + mock_timeout = mock.MagicMock() + mock_create.return_value = mock_timeout + + results = mock_test_suite._exec_editor_multitest(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec_list, []) + assert mock_cycle_crash.called + assert mock_get_results.called + assert results[mock_test_spec_2.__name__].extra_info + assert results[mock_test_spec.__name__] == mock_timeout + + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._report_result') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._exec_editor_test') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunSingleTest_ValidTest_ReportsResults(self, mock_setup_test, mock_exec_editor_test, mock_report_result): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_test_data = mock.MagicMock() + mock_test_spec = mock.MagicMock() + mock_result = mock.MagicMock() + mock_test_name = 'mock_test_result' + mock_exec_editor_test.return_value = {mock_test_name: mock_result} + + mock_test_suite._run_single_test(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock_test_data, + mock_test_spec) + + assert mock_setup_test.called + assert mock_exec_editor_test.called + assert mock_test_data.results.update.called + mock_report_result.assert_called_with(mock_test_name, mock_result) + + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._exec_editor_multitest') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunBatchedTests_ValidTests_CallsCorrectly(self, mock_setup_test, mock_exec_multitest): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_test_data = mock.MagicMock() + + mock_test_suite._run_batched_tests(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock_test_data, + mock.MagicMock(), []) + + assert mock_setup_test.called + assert mock_exec_multitest.called + assert mock_test_data.results.update.called + + @mock.patch('threading.Thread') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunParallelTests_TwoTestsAndEditors_TwoThreads(self, mock_setup_test, mock_get_num_editors, mock_thread): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_get_num_editors.return_value = 2 + mock_test_spec_list = [mock.MagicMock(), mock.MagicMock()] + mock_test_data = mock.MagicMock() + + mock_test_suite._run_parallel_tests(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock_test_data, + mock_test_spec_list, []) + + assert mock_setup_test.called + assert mock_test_data.results.update.call_count == len(mock_test_spec_list) + assert mock_thread.call_count == len(mock_test_spec_list) + + @mock.patch('threading.Thread') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunParallelTests_TenTestsAndTwoEditors_TenThreads(self, mock_setup_test, mock_get_num_editors, mock_thread): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_get_num_editors.return_value = 2 + mock_test_spec_list = [] + for i in range(10): + mock_test_spec_list.append(mock.MagicMock()) + mock_test_data = mock.MagicMock() + + mock_test_suite._run_parallel_tests(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock_test_data, + mock_test_spec_list, []) + + assert mock_setup_test.called + assert mock_test_data.results.update.call_count == len(mock_test_spec_list) + assert mock_thread.call_count == len(mock_test_spec_list) + + @mock.patch('threading.Thread') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunParallelTests_TenTestsAndThreeEditors_TenThreads(self, mock_setup_test, mock_get_num_editors, + mock_thread): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_get_num_editors.return_value = 3 + mock_test_spec_list = [] + for i in range(10): + mock_test_spec_list.append(mock.MagicMock()) + mock_test_data = mock.MagicMock() + + mock_test_suite._run_parallel_tests(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock_test_data, + mock_test_spec_list, []) + + assert mock_setup_test.called + assert mock_test_data.results.update.call_count == len(mock_test_spec_list) + assert mock_thread.call_count == len(mock_test_spec_list) + + @mock.patch('threading.Thread') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunParallelBatchedTests_TwoTestsAndEditors_TwoThreads(self, mock_setup_test, mock_get_num_editors, + mock_thread): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_get_num_editors.return_value = 2 + mock_test_spec_list = [mock.MagicMock(), mock.MagicMock()] + mock_test_data = mock.MagicMock() + + mock_test_suite._run_parallel_batched_tests(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), + mock_test_data, mock_test_spec_list, []) + + assert mock_setup_test.called + assert mock_test_data.results.update.call_count == len(mock_test_spec_list) + assert mock_thread.call_count == len(mock_test_spec_list) + + @mock.patch('threading.Thread') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunParallelBatchedTests_TenTestsAndTwoEditors_2Threads(self, mock_setup_test, mock_get_num_editors, + mock_thread): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_get_num_editors.return_value = 2 + mock_test_spec_list = [] + for i in range(10): + mock_test_spec_list.append(mock.MagicMock()) + mock_test_data = mock.MagicMock() + + mock_test_suite._run_parallel_batched_tests(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), + mock_test_data, mock_test_spec_list, []) + + assert mock_setup_test.called + assert mock_test_data.results.update.call_count == 2 + assert mock_thread.call_count == 2 + + @mock.patch('threading.Thread') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunParallelBatchedTests_TenTestsAndThreeEditors_ThreeThreads(self, mock_setup_test, mock_get_num_editors, + mock_thread): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_get_num_editors.return_value = 3 + mock_test_spec_list = [] + for i in range(10): + mock_test_spec_list.append(mock.MagicMock()) + mock_test_data = mock.MagicMock() + + mock_test_suite._run_parallel_batched_tests(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), + mock_test_data, mock_test_spec_list, []) + + assert mock_setup_test.called + assert mock_test_data.results.update.call_count == 3 + assert mock_thread.call_count == 3 + + def test_GetNumberParallelEditors_ConfigExists_ReturnsConfig(self): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_request = mock.MagicMock() + mock_request.config.getoption.return_value = 1 + + num_of_editors = mock_test_suite._get_number_parallel_editors(mock_request) + assert num_of_editors == 1 + + def test_GetNumberParallelEditors_ConfigNotExists_ReturnsDefault(self): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_request = mock.MagicMock() + mock_request.config.getoption.return_value = None + + num_of_editors = mock_test_suite._get_number_parallel_editors(mock_request) + assert num_of_editors == mock_test_suite.get_number_parallel_editors() + +@mock.patch('_pytest.python.Class.collect') +class TestEditorTestClass(unittest.TestCase): + + def setUp(self): + mock_name = mock.MagicMock() + mock_collector = mock.MagicMock() + self.mock_test_class = ly_test_tools.o3de.editor_test.EditorTestSuite.EditorTestClass(mock_name, mock_collector) + self.mock_test_class.obj = mock.MagicMock() + single_1 = mock.MagicMock() + single_1.__name__ = 'single_1_name' + single_2 = mock.MagicMock() + single_2.__name__ = 'single_2_name' + self.mock_test_class.obj.get_single_tests.return_value = [single_1, single_2] + batch_1 = mock.MagicMock() + batch_1.__name__ = 'batch_1_name' + batch_2 = mock.MagicMock() + batch_2.__name__ = 'batch_2_name' + parallel_1 = mock.MagicMock() + parallel_1.__name__ = 'parallel_1_name' + parallel_2 = mock.MagicMock() + parallel_2.__name__ = 'parallel_2_name' + both_1 = mock.MagicMock() + both_1.__name__ = 'both_1_name' + both_2 = mock.MagicMock() + both_2.__name__ = 'both_2_name' + self.mock_test_class.obj.filter_shared_tests.side_effect = [ [batch_1, batch_2], + [parallel_1, parallel_2], + [both_1, both_2] ] + + def test_Collect_NoParallelNoBatched_RunsAsSingleTests(self, mock_collect): + self.mock_test_class.config.getoption.return_value = True + self.mock_test_class.collect() + assert self.mock_test_class.obj.single_1_name.__name__ == 'single_run' + assert self.mock_test_class.obj.single_2_name.__name__ == 'single_run' + assert self.mock_test_class.obj.batch_1_name.__name__ == 'single_run' + assert self.mock_test_class.obj.batch_2_name.__name__ == 'single_run' + assert self.mock_test_class.obj.parallel_1_name.__name__ == 'single_run' + assert self.mock_test_class.obj.parallel_2_name.__name__ == 'single_run' + assert self.mock_test_class.obj.both_1_name.__name__ == 'single_run' + assert self.mock_test_class.obj.both_2_name.__name__ == 'single_run' + + def test_Collect_AllValidTests_RunsAsInteded(self, mock_collect): + self.mock_test_class.config.getoption.return_value = False + self.mock_test_class.collect() + assert self.mock_test_class.obj.single_1_name.__name__ == 'single_run' + assert self.mock_test_class.obj.single_2_name.__name__ == 'single_run' + assert self.mock_test_class.obj.batch_1_name.__name__ == 'result' + assert self.mock_test_class.obj.batch_2_name.__name__ == 'result' + assert self.mock_test_class.obj.parallel_1_name.__name__ == 'result' + assert self.mock_test_class.obj.parallel_2_name.__name__ == 'result' + assert self.mock_test_class.obj.both_1_name.__name__ == 'result' + assert self.mock_test_class.obj.both_2_name.__name__ == 'result' + + def test_Collect_AllValidTests_CallsCollect(self, mock_collect): + self.mock_test_class.collect() + assert mock_collect.called + + def test_Collect_NormalCollection_ReturnsFilteredRuns(self, mock_collect): + mock_run = mock.MagicMock() + mock_run.obj.marks = {"run_type": 'run_shared'} + mock_run_2 = mock.MagicMock() + mock_run_2.obj.marks = {"run_type": 'result'} + mock_instance = mock.MagicMock() + mock_instance.collect.return_value = [mock_run, mock_run_2] + mock_collect.return_value = [mock_instance] + + collection = self.mock_test_class.collect() + assert collection == [mock_run_2] + + def test_Collect_NormalRun_ReturnsRunners(self, mock_collect): + self.mock_test_class.collect() + runners = self.mock_test_class.obj._runners + + assert runners[0].name == 'run_batched_tests' + assert runners[1].name == 'run_parallel_tests' + assert runners[2].name == 'run_parallel_batched_tests' + + def test_Collect_NormalCollection_StoresRunners(self, mock_collect): + mock_runner = mock.MagicMock() + mock_run = mock.MagicMock() + mock_run.obj.marks = {"run_type": 'run_shared'} + mock_run.function.marks = {"runner": mock_runner} + mock_runner_2 = mock.MagicMock() + mock_runner_2.result_pytestfuncs = [] + mock_run_2 = mock.MagicMock() + mock_run_2.obj.marks = {"run_type": 'result'} + mock_run_2.function.marks = {"runner": mock_runner_2} + mock_instance = mock.MagicMock() + mock_instance.collect.return_value = [mock_run, mock_run_2] + mock_collect.return_value = [mock_instance] + + self.mock_test_class.collect() + + assert mock_runner.run_pytestfunc == mock_run + assert mock_run_2 in mock_runner_2.result_pytestfuncs diff --git a/Tools/LyTestTools/tests/unit/test_pytest_plugin_editor_test.py b/Tools/LyTestTools/tests/unit/test_pytest_plugin_editor_test.py new file mode 100644 index 0000000000..10c779e546 --- /dev/null +++ b/Tools/LyTestTools/tests/unit/test_pytest_plugin_editor_test.py @@ -0,0 +1,41 @@ +""" +Copyright (c) Contributors to the Open 3D Engine Project. +For complete copyright and license terms please see the LICENSE at the root of this distribution. + +SPDX-License-Identifier: Apache-2.0 OR MIT +""" +import pytest +import os +import unittest.mock as mock +import unittest + +import ly_test_tools._internal.pytest_plugin.editor_test as editor_test + +pytestmark = pytest.mark.SUITE_smoke + +class TestEditorTest(unittest.TestCase): + + @mock.patch('inspect.isclass', mock.MagicMock(return_value=True)) + def test_PytestPycollectMakeitem_ValidArgs_CallsCorrectly(self): + mock_collector = mock.MagicMock() + mock_name = mock.MagicMock() + mock_obj = mock.MagicMock() + mock_base = mock.MagicMock() + mock_obj.__bases__ = [mock_base] + + editor_test.pytest_pycollect_makeitem(mock_collector, mock_name, mock_obj) + mock_base.pytest_custom_makeitem.assert_called_once_with(mock_collector, mock_name, mock_obj) + + def test_PytestCollectionModifyitem_OneValidClass_CallsOnce(self): + mock_item = mock.MagicMock() + mock_class = mock.MagicMock() + mock_class.pytest_custom_modify_items = mock.MagicMock() + mock_item.instance.__class__ = mock_class + mock_session = mock.MagicMock() + mock_items = [mock_item, mock.MagicMock()] + mock_config = mock.MagicMock() + + generator = editor_test.pytest_collection_modifyitems(mock_session, mock_items, mock_config) + for x in generator: + pass + assert mock_class.pytest_custom_modify_items.call_count == 1