From 8ab9f89b46866cc66735260416f35bdba51dfb1e Mon Sep 17 00:00:00 2001 From: evanchia Date: Wed, 13 Oct 2021 12:43:54 -0700 Subject: [PATCH] Adding unit tests and docstrings for editor test files Signed-off-by: evanchia --- .../_internal/pytest_plugin/editor_test.py | 31 +- .../pytest_plugin/test_tools_fixtures.py | 10 +- .../ly_test_tools/o3de/editor_test.py | 357 ++++-- .../ly_test_tools/o3de/editor_test_utils.py | 24 +- .../tests/unit/test_editor_test_utils.py | 158 +++ Tools/LyTestTools/tests/unit/test_fixtures.py | 11 + .../tests/unit/test_o3de_editor_test.py | 1017 +++++++++++++++++ .../unit/test_pytest_plugin_editor_test.py | 41 + 8 files changed, 1567 insertions(+), 82 deletions(-) create mode 100644 Tools/LyTestTools/tests/unit/test_editor_test_utils.py create mode 100644 Tools/LyTestTools/tests/unit/test_o3de_editor_test.py create mode 100644 Tools/LyTestTools/tests/unit/test_pytest_plugin_editor_test.py diff --git a/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/editor_test.py b/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/editor_test.py index 80a562977b..2df5810047 100644 --- a/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/editor_test.py +++ b/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/editor_test.py @@ -3,9 +3,7 @@ Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution. SPDX-License-Identifier: Apache-2.0 OR MIT -""" -""" Utility for specifying an Editor test, supports seamless parallelization and/or batching of tests. """ @@ -15,22 +13,41 @@ import inspect __test__ = False def pytest_addoption(parser): + # type (argparse.ArgumentParser) -> None + """ + Options when running tests in batches or parallel. + :param parser: The ArgumentParser object + :return: None + """ parser.addoption("--no-editor-batch", action="store_true", help="Don't batch multiple tests in single editor") parser.addoption("--no-editor-parallel", action="store_true", help="Don't run multiple editors in parallel") parser.addoption("--editors-parallel", type=int, action="store", help="Override the number editors to run at the same time") -# Create a custom custom item collection if the class defines pytest_custom_makeitem function -# This is used for automtically generating test functions with a custom collector def pytest_pycollect_makeitem(collector, name, obj): + # type (PyCollector, str, object) -> Collector + """ + Create a custom custom item collection if the class defines pytest_custom_makeitem function. This is used for + automtically generating test functions with a custom collector. + :param collector: The Python test collector + :param name: Name of the collector + :param obj: The custom collector, normally an EditorTestSuite.EditorTestClass object + :return: Returns the custom collector + """ if inspect.isclass(obj): for base in obj.__bases__: if hasattr(base, "pytest_custom_makeitem"): return base.pytest_custom_makeitem(collector, name, obj) -# Add custom modification of items. -# This is used for adding the runners into the item list @pytest.hookimpl(hookwrapper=True) def pytest_collection_modifyitems(session, items, config): + # type (Session, list, Config) -> None + """ + Add custom modification of items. This is used for adding the runners into the item list. + :param session: The Pytest Session + :param items: The test case functions + :param config: The Pytest Config object + :return: None + """ all_classes = set() for item in items: all_classes.add(item.instance.__class__) @@ -40,4 +57,4 @@ def pytest_collection_modifyitems(session, items, config): for cls in all_classes: if hasattr(cls, "pytest_custom_modify_items"): cls.pytest_custom_modify_items(session, items, config) - + diff --git a/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/test_tools_fixtures.py b/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/test_tools_fixtures.py index f4bda9b293..3115c28406 100755 --- a/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/test_tools_fixtures.py +++ b/Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/test_tools_fixtures.py @@ -55,8 +55,16 @@ def pytest_configure(config): ly_test_tools._internal.pytest_plugin.build_directory = _get_build_directory(config) ly_test_tools._internal.pytest_plugin.output_path = _get_output_path(config) - def pytest_pycollect_makeitem(collector, name, obj): + # type (PyCollector, str, object) -> Collector + """ + Create a custom custom item collection if the class defines pytest_custom_makeitem function. This is used for + automtically generating test functions with a custom collector. + :param collector: The Python test collector + :param name: Name of the collector + :param obj: The custom collector, normally an EditorTestSuite.EditorTestClass object + :return: Returns the custom collector + """ import inspect if inspect.isclass(obj): for base in obj.__bases__: diff --git a/Tools/LyTestTools/ly_test_tools/o3de/editor_test.py b/Tools/LyTestTools/ly_test_tools/o3de/editor_test.py index 781977cf33..39413c22a3 100644 --- a/Tools/LyTestTools/ly_test_tools/o3de/editor_test.py +++ b/Tools/LyTestTools/ly_test_tools/o3de/editor_test.py @@ -25,16 +25,16 @@ import re import ly_test_tools.environment.file_system as file_system import ly_test_tools.environment.waiter as waiter import ly_test_tools.environment.process_utils as process_utils +import ly_test_tools.o3de.editor_test_utils as editor_utils from ly_test_tools.o3de.asset_processor import AssetProcessor from ly_test_tools.launchers.exceptions import WaitTimeoutError -from . import editor_test_utils as editor_utils # This file provides editor testing functionality to easily write automated editor tests for O3DE. -# For using these utilities, you can subclass your test suite from EditorTestSuite, this allows an easy way of specifying -# python test scripts that the editor will run without needing to write any boilerplace code. -# It supports out of the box parallelization(running multiple editor instances at once), batching(running multiple tests in the same editor instance) and -# crash detection. +# For using these utilities, you can subclass your test suite from EditorTestSuite, this allows an easy way of +# specifying python test scripts that the editor will run without needing to write any boilerplace code. +# It supports out of the box parallelization(running multiple editor instances at once), batching(running multiple tests +# in the same editor instance) and crash detection. # Usage example: # class MyTestSuite(EditorTestSuite): # @@ -48,7 +48,8 @@ from . import editor_test_utils as editor_utils # from . import yet_another_script_to_be_run_by_editor as test_module # # -# EditorTestSuite does introspection of the defined classes inside of it and automatically prepares the tests, parallelizing/batching as required +# EditorTestSuite does introspection of the defined classes inside of it and automatically prepares the tests, +# parallelizing/batching as required # This file contains no tests, but with this we make sure it won't be picked up by the runner since the file ends with _test __test__ = False @@ -109,12 +110,22 @@ class EditorBatchedTest(EditorSharedTest): class Result: class Base: def get_output_str(self): + # type () -> str + """ + Checks if the output attribute exists and returns it. + :return: Either the output string or a no output message + """ if hasattr(self, "output") and self.output is not None: return self.output else: return "-- No output --" def get_editor_log_str(self): + # type () -> str + """ + Checks if the editor_log attribute exists and returns it. + :return: Either the editor_log string or a no output message + """ if hasattr(self, "editor_log") and self.editor_log is not None: return self.editor_log else: @@ -122,7 +133,15 @@ class Result: class Pass(Base): @classmethod - def create(cls, test_spec : EditorTestBase, output : str, editor_log : str): + def create(cls, test_spec, output, editor_log): + # type (EditorTestBase, str, str) -> Pass + """ + Creates a Pass object with a given test spec, output string, and editor log string. + :test_spec: The type of EditorTestBase + :output: The test output + :editor_log: The editor log's output + :return: the Pass object + """ r = cls() r.test_spec = test_spec r.output = output @@ -141,7 +160,15 @@ class Result: class Fail(Base): @classmethod - def create(cls, test_spec : EditorTestBase, output, editor_log : str): + def create(cls, test_spec, output, editor_log): + # type (EditorTestBase, str, str) -> Fail + """ + Creates a Fail object with a given test spec, output string, and editor log string. + :test_spec: The type of EditorTestBase + :output: The test output + :editor_log: The editor log's output + :return: the Fail object + """ r = cls() r.test_spec = test_spec r.output = output @@ -164,7 +191,18 @@ class Result: class Crash(Base): @classmethod - def create(cls, test_spec : EditorTestBase, output : str, ret_code : int, stacktrace : str, editor_log : str): + def create(cls, test_spec, output, ret_code, stacktrace, editor_log): + # type (EditorTestBase, str, int, str, str) -> Crash + """ + Creates a Crash object with a given test spec, output string, and editor log string. This also includes the + return code and stacktrace. + :test_spec: The type of EditorTestBase + :output: The test output + :ret_code: The test's return code + :stacktrace: The test's stacktrace if available + :editor_log: The editor log's output + :return: The Crash object + """ r = cls() r.output = output r.test_spec = test_spec @@ -174,7 +212,7 @@ class Result: return r def __str__(self): - stacktrace_str = "-- No stacktrace data found --" if not self.stacktrace else self.stacktrace + stacktrace_str = "-- No stacktrace data found --\n" if not self.stacktrace else self.stacktrace output = ( f"Test CRASHED, return code {hex(self.ret_code)}\n" f"---------------\n" @@ -190,12 +228,21 @@ class Result: f"--------------\n" f"{self.get_editor_log_str()}\n" ) - crash_str = "-- No crash information found --" return output class Timeout(Base): @classmethod - def create(cls, test_spec : EditorTestBase, output : str, time_secs : float, editor_log : str): + def create(cls, test_spec, output, time_secs, editor_log): + # type (EditorTestBase, str, float, str) -> Timeout + """ + Creates a Timeout object with a given test spec, output string, and editor log string. The timeout time + should be provided in seconds + :test_spec: The type of EditorTestBase + :output: The test output + :time_secs: The timeout duration in seconds + :editor_log: The editor log's output + :return: The Timeout object + """ r = cls() r.output = output r.test_spec = test_spec @@ -219,14 +266,23 @@ class Result: class Unknown(Base): @classmethod - def create(cls, test_spec : EditorTestBase, output : str, extra_info : str, editor_log : str): + def create(cls, test_spec, output, extra_info, editor_log): + # type (EditorTestBase, str, str , str) -> Unknown + """ + Creates an Unknown test results object if something goes wrong. + :test_spec: The type of EditorTestBase + :output: The test output + :extra_info: Any extra information as a string + :editor_log: The editor log's output + :return: The Unknown object + """ r = cls() r.output = output r.test_spec = test_spec r.editor_log = editor_log r.extra_info = extra_info return r - + def __str__(self): output = ( f"Unknown test result, possible cause: {self.extra_info}\n" @@ -263,6 +319,18 @@ class EditorTestSuite(): @pytest.fixture(scope="class") def editor_test_data(self, request): + # type (request) -> TestData + """ + Yields a generator to capture the test results and an AssetProcessor object. + :request: The pytest request + :yield: The TestData object + """ + self._editor_test_data(request) + + def _editor_test_data(self, request): + """ + A wrapper function for unit testing to call directly + """ class TestData(): def __init__(self): self.results = {} # Dict of str(test_spec.__name__) -> Result @@ -445,6 +513,10 @@ class EditorTestSuite(): @classmethod def pytest_custom_modify_items(cls, session, items, config): + # type () -> None + """ + + """ # Add here the runners functions and filter the tests that will be run. # The runners will be added if they have any selected tests new_items = [] @@ -463,23 +535,53 @@ class EditorTestSuite(): @classmethod def get_single_tests(cls): + # type () -> list + """ + Grabs all of the EditorSingleTests subclassed tests from the EditorTestSuite class + Usage example: + class MyTestSuite(EditorTestSuite): + class MyFirstTest(EditorSingleTest): + from . import script_to_be_run_by_editor as test_module + :return: The list of single tests + """ single_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSingleTest)] return single_tests @classmethod def get_shared_tests(cls): + # type () -> list + """ + Grabs all of the EditorSharedTests from the EditorTestSuite + Usage example: + class MyTestSuite(EditorTestSuite): + class MyFirstTest(EditorSharedTest): + from . import script_to_be_run_by_editor as test_module + :return: The list of shared tests + """ shared_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSharedTest)] return shared_tests @classmethod def get_session_shared_tests(cls, session): + # type (Session) -> list[EditorTestBase] + """ + Filters and returns all of the shared tests in a given session. + :session: The test session + :return: The list of tests + """ shared_tests = cls.get_shared_tests() return cls.filter_session_shared_tests(session, shared_tests) @staticmethod def filter_session_shared_tests(session_items, shared_tests): - # Retrieve the test sub-set that was collected - # this can be less than the original set if were overriden via -k argument or similars + # type (list, list) -> list[EditorTestBase] + """ + Retrieve the test sub-set that was collected this can be less than the original set if were overriden via -k + argument or similars + :session_items: The tests in a session to run + :shared_tests: All of the shared tests + :return: The list of filtered tests + """ def will_run(item): try: skipping_pytest_runtest_setup(item) @@ -488,13 +590,20 @@ class EditorTestSuite(): return False session_items_by_name = { item.originalname:item for item in session_items } - selected_shared_tests = [test for test in shared_tests if test.__name__ in session_items_by_name.keys() and will_run(session_items_by_name[test.__name__])] + selected_shared_tests = [test for test in shared_tests if test.__name__ in session_items_by_name.keys() and + will_run(session_items_by_name[test.__name__])] return selected_shared_tests @staticmethod def filter_shared_tests(shared_tests, is_batchable=False, is_parallelizable=False): - # Retrieve the test sub-set that was collected - # this can be less than the original set if were overriden via -k argument or similars + # type (list, bool, bool) -> list + """ + Filters and returns all tests based off of if they are batchable and/or parallelizable + :shared_tests: All shared tests + :is_batchable: Filter to batchable tests + :is_parallelizable: Filter to parallelizable tests + :return: The list of filtered tests + """ return [ t for t in shared_tests if ( getattr(t, "is_batchable", None) is is_batchable @@ -504,9 +613,15 @@ class EditorTestSuite(): ] ### Utils ### - - # Prepares the asset processor for the test def _prepare_asset_processor(self, workspace, editor_test_data): + # type (AbstractWorkspace, TestData) -> None + """ + Prepares the asset processor for the test depending on whether or not the process is open and if the current + test owns it. + :workspace: The workspace object in case an AssetProcessor object needs to be created + :editor_test_data: The test data from calling editor_test_data() + :return: None + """ try: # Start-up an asset processor if we are not running one # If another AP process exist, don't kill it, as we don't own it @@ -525,14 +640,29 @@ class EditorTestSuite(): raise ex def _setup_editor_test(self, editor, workspace, editor_test_data): + # type(Editor, AbstractWorkspace, TestData) -> None + """ + Sets up an editor test by preparing the Asset Processor, killing all other O3DE processes, and configuring + :editor: The launcher Editor object + :workspace: The test Workspace object + :editor_test_data: The TestData from calling editor_test_data() + :return: None + """ self._prepare_asset_processor(workspace, editor_test_data) editor_utils.kill_all_ly_processes(include_asset_processor=False) editor.configure_settings() - # Utility function for parsing the output information from the editor. - # It deserializes the JSON content printed in the output for every test and returns that information. @staticmethod def _get_results_using_output(test_spec_list, output, editor_log_content): + # type(list, str, str) -> dict{str: Result} + """ + Utility function for parsing the output information from the editor. It deserializes the JSON content printed in + the output for every test and returns that information. + :test_spec_list: The list of EditorTests + :output: The Editor from Editor.get_output() + :editor_log_content: The contents of the editor log as a string + :return: A dict of the tests and their respective Result objects + """ results = {} pattern = re.compile(r"JSON_START\((.+?)\)JSON_END") out_matches = pattern.finditer(output) @@ -541,7 +671,8 @@ class EditorTestSuite(): try: elem = json.loads(m.groups()[0]) found_jsons[elem["name"]] = elem - except Exception: + except Exception as e: + raise e continue # Avoid to fail if the output data is corrupt # Try to find the element in the log, this is used for cutting the log contents later @@ -558,7 +689,9 @@ class EditorTestSuite(): for test_spec in test_spec_list: name = editor_utils.get_module_filename(test_spec.test_module) if name not in found_jsons.keys(): - results[test_spec.__name__] = Result.Unknown.create(test_spec, output, "Couldn't find any test run information on stdout", editor_log_content) + results[test_spec.__name__] = Result.Unknown.create(test_spec, output, + "Couldn't find any test run information on stdout", + editor_log_content) else: result = None json_result = found_jsons[name] @@ -573,7 +706,7 @@ class EditorTestSuite(): cur_log = editor_log_content[log_start : end] log_start = end - if json_result["success"]: + if "success" in json_result.keys(): result = Result.Pass.create(test_spec, json_output, cur_log) else: result = Result.Fail.create(test_spec, json_output, cur_log) @@ -581,9 +714,15 @@ class EditorTestSuite(): return results - # Fails the test if the test result is not a PASS, specifying the information @staticmethod - def _report_result(name : str, result : Result.Base): + def _report_result(name, result): + # type (str, Result) -> None + """ + Fails the test if the test result is not a PASS, specifying the information + :name: Name of the test + :result: The Result object which denotes if the test passed or not + :return: None + """ if isinstance(result, Result.Pass): output_str = f"Test {name}:\n{str(result)}" print(output_str) @@ -592,10 +731,19 @@ class EditorTestSuite(): pytest.fail(error_str) ### Running tests ### - # Starts the editor with the given test and retuns an result dict with a single element specifying the result - def _exec_editor_test(self, request, workspace, editor, run_id : int, log_name : str, - test_spec : EditorTestBase, cmdline_args : List[str] = []): - + def _exec_editor_test(self, request, workspace, editor, run_id, log_name, test_spec, cmdline_args = []): + # type (Request, AbstractWorkspace, Editor, int, str, EditorTestBase, list[str] -> dict{str: Result} + """ + Starts the editor with the given test and retuns an result dict with a single element specifying the result + :request: The pytest request + :workspace: The LyTestTools Workspace object + :editor: The LyTestTools Editor object + :run_id: The unique run id + :log_name: The name of the editor log to retrieve + :test_spec: The type of EditorTestBase + :cmdline_args: Any additional command line args + :return: a dictionary of Result objects + """ test_cmdline_args = self.global_extra_cmdline_args + cmdline_args test_spec_uses_null_renderer = getattr(test_spec, "use_null_renderer", None) if test_spec_uses_null_renderer or (test_spec_uses_null_renderer is None and self.use_null_renderer): @@ -629,12 +777,14 @@ class EditorTestSuite(): else: has_crashed = return_code != EditorTestSuite._TEST_FAIL_RETCODE if has_crashed: - test_result = Result.Crash.create(test_spec, output, return_code, editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG), None) + test_result = Result.Crash.create(test_spec, output, return_code, editor_utils.retrieve_crash_output + (run_id, workspace, self._TIMEOUT_CRASH_LOG), None) editor_utils.cycle_crash_report(run_id, workspace) else: test_result = Result.Fail.create(test_spec, output, editor_log_content) except WaitTimeoutError: - editor.kill() + output = editor.get_output() + editor.kill() editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace) test_result = Result.Timeout.create(test_spec, output, test_spec.timeout, editor_log_content) @@ -643,11 +793,21 @@ class EditorTestSuite(): results[test_spec.__name__] = test_result return results - # Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that editor - # instance. In case of failure this function also parses the editor output to find out what specific tests failed - def _exec_editor_multitest(self, request, workspace, editor, run_id : int, log_name : str, - test_spec_list : List[EditorTestBase], cmdline_args=[]): - + def _exec_editor_multitest(self, request, workspace, editor, run_id, log_name, test_spec_list, cmdline_args=[]): + # type (Request, AbstractWorkspace, Editor, int, str, list[EditorTestBase], list[str]) -> dict{str: Result} + """ + Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that + editor instance. In case of failure this function also parses the editor output to find out what specific tests + failed. + :request: The pytest request + :workspace: The LyTestTools Workspace object + :editor: The LyTestTools Editor object + :run_id: The unique run id + :log_name: The name of the editor log to retrieve + :test_spec_list: A list of EditorTestBase tests to run + :cmdline_args: Any additional command line args + :return: A dict of Result objects + """ test_cmdline_args = self.global_extra_cmdline_args + cmdline_args if self.use_null_renderer: test_cmdline_args += ["-rhi=null"] @@ -660,7 +820,8 @@ class EditorTestSuite(): editor_utils.cycle_crash_report(run_id, workspace) results = {} - test_filenames_str = ";".join(editor_utils.get_testcase_module_filepath(test_spec.test_module) for test_spec in test_spec_list) + test_filenames_str = ";".join(editor_utils.get_testcase_module_filepath(test_spec.test_module) for + test_spec in test_spec_list) cmdline = [ "--runpythontest", test_filenames_str, "-logfile", f"@log@/{log_name}", @@ -685,7 +846,8 @@ class EditorTestSuite(): # Scrap the output to attempt to find out which tests failed. # This function should always populate the result list, if it didn't find it, it will have "Unknown" type of result results = self._get_results_using_output(test_spec_list, output, editor_log_content) - assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results don't match the tests ran" + assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results" \ + "don't match the tests ran" # If the editor crashed, find out in which test it happened and update the results has_crashed = return_code != EditorTestSuite._TEST_FAIL_RETCODE @@ -695,50 +857,67 @@ class EditorTestSuite(): if isinstance(result, Result.Unknown): if not crashed_result: # The first test with "Unknown" result (no data in output) is likely the one that crashed - crash_error = editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG) + crash_error = editor_utils.retrieve_crash_output(run_id, workspace, + self._TIMEOUT_CRASH_LOG) editor_utils.cycle_crash_report(run_id, workspace) - results[test_spec_name] = Result.Crash.create(result.test_spec, output, return_code, crash_error, result.editor_log) + results[test_spec_name] = Result.Crash.create(result.test_spec, output, return_code, + crash_error, result.editor_log) crashed_result = result else: - # If there are remaning "Unknown" results, these couldn't execute because of the crash, update with info about the offender - results[test_spec_name].extra_info = f"This test has unknown result, test '{crashed_result.test_spec.__name__}' crashed before this test could be executed" - + # If there are remaning "Unknown" results, these couldn't execute because of the crash, + # update with info about the offender + results[test_spec_name].extra_info = f"This test has unknown result, test " \ + f"'{crashed_result.test_spec.__name__}' crashed " \ + f"before this test could be executed" # if all the tests ran, the one that has caused the crash is the last test if not crashed_result: crash_error = editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG) editor_utils.cycle_crash_report(run_id, workspace) - results[test_spec_name] = Result.Crash.create(crashed_result.test_spec, output, return_code, crash_error, crashed_result.editor_log) - - + results[test_spec_name] = Result.Crash.create(crashed_result.test_spec, output, return_code, + crash_error, crashed_result.editor_log) except WaitTimeoutError: editor.kill() - output = editor.get_output() editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace) # The editor timed out when running the tests, get the data from the output to find out which ones ran results = self._get_results_using_output(test_spec_list, output, editor_log_content) - assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results don't match the tests ran" - + assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results " \ + "don't match the tests ran" # Similar logic here as crashes, the first test that has no result is the one that timed out timed_out_result = None for test_spec_name, result in results.items(): if isinstance(result, Result.Unknown): if not timed_out_result: - results[test_spec_name] = Result.Timeout.create(result.test_spec, result.output, self.timeout_editor_shared_test, result.editor_log) + results[test_spec_name] = Result.Timeout.create(result.test_spec, result.output, + self.timeout_editor_shared_test, + result.editor_log) timed_out_result = result else: - # If there are remaning "Unknown" results, these couldn't execute because of the timeout, update with info about the offender - results[test_spec_name].extra_info = f"This test has unknown result, test '{timed_out_result.test_spec.__name__}' timed out before this test could be executed" - + # If there are remaning "Unknown" results, these couldn't execute because of the timeout, + # update with info about the offender + results[test_spec_name].extra_info = f"This test has unknown result, test " \ + f"'{timed_out_result.test_spec.__name__}' timed out " \ + f"before this test could be executed" # if all the tests ran, the one that has caused the timeout is the last test, as it didn't close the editor if not timed_out_result: - results[test_spec_name] = Result.Timeout.create(timed_out_result.test_spec, results[test_spec_name].output, self.timeout_editor_shared_test, result.editor_log) + results[test_spec_name] = Result.Timeout.create(timed_out_result.test_spec, + results[test_spec_name].output, + self.timeout_editor_shared_test, result.editor_log) return results - # Runs a single test (one editor, one test) with the given specs - def _run_single_test(self, request, workspace, editor, editor_test_data, test_spec : EditorSingleTest): + def _run_single_test(self, request, workspace, editor, editor_test_data, test_spec): + # type (Request, AbstractWorkspace, Editor, TestData, EditorSingleTest) -> None + """ + Runs a single test (one editor, one test) with the given specs + :request: The Pytest Request + :workspace: The LyTestTools Workspace object + :editor: The LyTestTools Editor object + :editor_test_data: The TestData from calling editor_test_data() + :test_spec: The test class that should be a subclass of EditorSingleTest + :return: None + """ self._setup_editor_test(editor, workspace, editor_test_data) extra_cmdline_args = [] if hasattr(test_spec, "extra_cmdline_args"): @@ -749,18 +928,39 @@ class EditorTestSuite(): test_name, test_result = next(iter(results.items())) self._report_result(test_name, test_result) - # Runs a batch of tests in one single editor with the given spec list (one editor, multiple tests) - def _run_batched_tests(self, request, workspace, editor, editor_test_data, test_spec_list : List[EditorSharedTest], extra_cmdline_args=[]): + def _run_batched_tests(self, request, workspace, editor, editor_test_data, test_spec_list, extra_cmdline_args=[]): + # type (Request, AbstractWorkspace, Editor, TestData, list[EditorSharedTest], list[str]) -> None + """ + Runs a batch of tests in one single editor with the given spec list (one editor, multiple tests) + :request: The Pytest Request + :workspace: The LyTestTools Workspace object + :editor: The LyTestTools Editor object + :editor_test_data: The TestData from calling editor_test_data() + :test_spec_list: A list of EditorSharedTest tests to run + :extra_cmdline_args: Any extra command line args in a list + :return: None + """ if not test_spec_list: return self._setup_editor_test(editor, workspace, editor_test_data) - results = self._exec_editor_multitest(request, workspace, editor, 1, "editor_test.log", test_spec_list, extra_cmdline_args) + results = self._exec_editor_multitest(request, workspace, editor, 1, "editor_test.log", test_spec_list, + extra_cmdline_args) assert results is not None editor_test_data.results.update(results) - # Runs multiple editors with one test on each editor (multiple editor, one test each) - def _run_parallel_tests(self, request, workspace, editor, editor_test_data, test_spec_list : List[EditorSharedTest], extra_cmdline_args=[]): + def _run_parallel_tests(self, request, workspace, editor, editor_test_data, test_spec_list, extra_cmdline_args=[]): + # type(Request, AbstractWorkspace, Editor, TestData, list[EditorSharedTest], list[str]) -> None + """ + Runs multiple editors with one test on each editor (multiple editor, one test each) + :request: The Pytest Request + :workspace: The LyTestTools Workspace object + :editor: The LyTestTools Editor object + :editor_test_data: The TestData from calling editor_test_data() + :test_spec_list: A list of EditorSharedTest tests to run + :extra_cmdline_args: Any extra command line args in a list + :return: None + """ if not test_spec_list: return @@ -778,7 +978,8 @@ class EditorTestSuite(): for i in range(total_threads): def make_func(test_spec, index, my_editor): def run(request, workspace, extra_cmdline_args): - results = self._exec_editor_test(request, workspace, my_editor, index+1, f"editor_test.log", test_spec, extra_cmdline_args) + results = self._exec_editor_test(request, workspace, my_editor, index+1, f"editor_test.log", + test_spec, extra_cmdline_args) assert results is not None results_per_thread[index] = results return run @@ -796,8 +997,19 @@ class EditorTestSuite(): for result in results_per_thread: editor_test_data.results.update(result) - # Runs multiple editors with a batch of tests for each editor (multiple editor, multiple tests each) - def _run_parallel_batched_tests(self, request, workspace, editor, editor_test_data, test_spec_list : List[EditorSharedTest], extra_cmdline_args=[]): + def _run_parallel_batched_tests(self, request, workspace, editor, editor_test_data, test_spec_list, + extra_cmdline_args=[]): + # type(Request, AbstractWorkspace, Editor, TestData, list[EditorSharedTest], list[str] -> None + """ + Runs multiple editors with a batch of tests for each editor (multiple editor, multiple tests each) + :request: The Pytest Request + :workspace: The LyTestTools Workspace object + :editor: The LyTestTools Editor object + :editor_test_data: The TestData from calling editor_test_data() + :test_spec_list: A list of EditorSharedTest tests to run + :extra_cmdline_args: Any extra command line args in a list + :return: None + """ if not test_spec_list: return @@ -813,7 +1025,9 @@ class EditorTestSuite(): def run(request, workspace, extra_cmdline_args): results = None if len(test_spec_list_for_editor) > 0: - results = self._exec_editor_multitest(request, workspace, my_editor, index+1, f"editor_test.log", test_spec_list_for_editor, extra_cmdline_args) + results = self._exec_editor_multitest(request, workspace, my_editor, index+1, + f"editor_test.log", test_spec_list_for_editor, + extra_cmdline_args) assert results is not None else: results = {} @@ -833,8 +1047,13 @@ class EditorTestSuite(): for result in results_per_thread: editor_test_data.results.update(result) - # Retrieves the number of parallel preference cmdline overrides def _get_number_parallel_editors(self, request): + # type(Request) -> int + """ + Retrieves the number of parallel preference cmdline overrides + :request: The Pytest Request + :return: The number of parallel editors to use + """ parallel_editors_value = request.config.getoption("--editors-parallel", None) if parallel_editors_value: return int(parallel_editors_value) diff --git a/Tools/LyTestTools/ly_test_tools/o3de/editor_test_utils.py b/Tools/LyTestTools/ly_test_tools/o3de/editor_test_utils.py index feff78d866..838f929cfa 100644 --- a/Tools/LyTestTools/ly_test_tools/o3de/editor_test_utils.py +++ b/Tools/LyTestTools/ly_test_tools/o3de/editor_test_utils.py @@ -3,6 +3,8 @@ Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution. SPDX-License-Identifier: Apache-2.0 OR MIT + +Utility functions for the editor_test module """ import os @@ -15,6 +17,12 @@ import ly_test_tools.environment.waiter as waiter logger = logging.getLogger(__name__) def kill_all_ly_processes(include_asset_processor=True): + # type (bool) -> None + """ + Kills all common O3DE processes such as the Editor, Game Launchers, and Asset Processor. + :param include_asset_processor: Boolean flag whether or not to kill the AP + :return: None + """ LY_PROCESSES = [ 'Editor', 'Profiler', 'RemoteConsole', ] @@ -47,7 +55,8 @@ def get_module_filename(testcase_module): """ return os.path.splitext(os.path.basename(testcase_module.__file__))[0] -def retrieve_log_path(run_id : int, workspace): +def retrieve_log_path(run_id, workspace): + # type (int, ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager) -> str """ return the log/ project path for this test run. :param run_id: editor id that will be used for differentiating paths @@ -56,7 +65,8 @@ def retrieve_log_path(run_id : int, workspace): """ return os.path.join(workspace.paths.project(), "user", f"log_test_{run_id}") -def retrieve_crash_output(run_id : int, workspace, timeout : float): +def retrieve_crash_output(run_id, workspace, timeout): + # type (int, ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager, float) -> str """ returns the crash output string for the given test run. :param run_id: editor id that will be used for differentiating paths @@ -79,7 +89,8 @@ def retrieve_crash_output(run_id : int, workspace, timeout : float): crash_info += f"\n{str(ex)}" return crash_info -def cycle_crash_report(run_id : int, workspace): +def cycle_crash_report(run_id, workspace): + # type (int, ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager) -> None """ Attempts to rename error.log and error.dmp(crash files) into new names with the timestamp on it. :param run_id: editor id that will be used for differentiating paths @@ -99,10 +110,12 @@ def cycle_crash_report(run_id : int, workspace): except Exception as ex: logger.warning(f"Couldn't cycle file {filepath}. Error: {str(ex)}") -def retrieve_editor_log_content(run_id : int, log_name : str, workspace, timeout=10): +def retrieve_editor_log_content(run_id, log_name, workspace, timeout=10): + # type (int , str, ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager, int) -> str """ Retrieves the contents of the given editor log file. :param run_id: editor id that will be used for differentiating paths + :log_name: The name of the editor log to retrieve :param workspace: Workspace fixture :timeout: Maximum time to wait for the log file to appear :return str: The contents of the log @@ -124,7 +137,8 @@ def retrieve_editor_log_content(run_id : int, log_name : str, workspace, timeout editor_info = f"-- Error reading editor.log: {str(ex)} --" return editor_info -def retrieve_last_run_test_index_from_output(test_spec_list, output : str): +def retrieve_last_run_test_index_from_output(test_spec_list, output): + # type (list, str) -> int """ Finds out what was the last test that was run by inspecting the input. This is used for determining what was the batched test has crashed the editor diff --git a/Tools/LyTestTools/tests/unit/test_editor_test_utils.py b/Tools/LyTestTools/tests/unit/test_editor_test_utils.py new file mode 100644 index 0000000000..134d1e1ad8 --- /dev/null +++ b/Tools/LyTestTools/tests/unit/test_editor_test_utils.py @@ -0,0 +1,158 @@ +""" +Copyright (c) Contributors to the Open 3D Engine Project. +For complete copyright and license terms please see the LICENSE at the root of this distribution. + +SPDX-License-Identifier: Apache-2.0 OR MIT +""" +import pytest +import os +import unittest.mock as mock +import unittest + +import ly_test_tools.o3de.editor_test_utils as editor_test_utils + +pytestmark = pytest.mark.SUITE_smoke + +class TestEditorTestUtils(unittest.TestCase): + + @mock.patch('ly_test_tools.environment.process_utils.kill_processes_named') + def test_KillAllLyProcesses_IncludeAP_CallsCorrectly(self, under_test): + process_list = ['Editor', 'Profiler', 'RemoteConsole', 'AssetProcessor', 'AssetProcessorBatch', 'AssetBuilder'] + + editor_test_utils.kill_all_ly_processes(include_asset_processor=True) + under_test.assert_called_once_with(process_list, ignore_extensions=True) + + @mock.patch('ly_test_tools.environment.process_utils.kill_processes_named') + def test_KillAllLyProcesses_NotIncludeAP_CallsCorrectly(self, under_test): + process_list = ['Editor', 'Profiler', 'RemoteConsole'] + + editor_test_utils.kill_all_ly_processes(include_asset_processor=False) + under_test.assert_called_once_with(process_list, ignore_extensions=True) + + def test_GetTestcaseModuleFilepath_NoExtension_ReturnsPYExtension(self): + mock_module = mock.MagicMock() + file_path = os.path.join('path', 'under_test') + mock_module.__file__ = file_path + + assert file_path + '.py' == editor_test_utils.get_testcase_module_filepath(mock_module) + + def test_GetTestcaseModuleFilepath_PYExtension_ReturnsPYExtension(self): + mock_module = mock.MagicMock() + file_path = os.path.join('path', 'under_test.py') + mock_module.__file__ = file_path + + assert file_path == editor_test_utils.get_testcase_module_filepath(mock_module) + + def test_GetModuleFilename_PythonModule_ReturnsFilename(self): + mock_module = mock.MagicMock() + file_path = os.path.join('path', 'under_test.py') + mock_module.__file__ = file_path + + assert 'under_test' == editor_test_utils.get_module_filename(mock_module) + + def test_RetrieveLogPath_NormalProject_ReturnsLogPath(self): + mock_workspace = mock.MagicMock() + mock_workspace.paths.project.return_value = 'mock_project_path' + expected = os.path.join('mock_project_path', 'user', 'log_test_0') + + assert expected == editor_test_utils.retrieve_log_path(0, mock_workspace) + + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock()) + def test_RetrieveCrashOutput_CrashLogExists_ReturnsLogInfo(self, mock_retrieve_log_path): + mock_retrieve_log_path.return_value = 'mock_log_path' + mock_workspace = mock.MagicMock() + mock_log = 'mock crash info' + + with mock.patch('builtins.open', mock.mock_open(read_data=mock_log)) as mock_file: + assert mock_log == editor_test_utils.retrieve_crash_output(0, mock_workspace, 0) + + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock()) + def test_RetrieveCrashOutput_CrashLogNotExists_ReturnsError(self, mock_retrieve_log_path): + mock_retrieve_log_path.return_value = 'mock_log_path' + mock_workspace = mock.MagicMock() + expected = "-- No crash log available --\n[Errno 2] No such file or directory: 'mock_log_path\\\\error.log'" + + assert expected == editor_test_utils.retrieve_crash_output(0, mock_workspace, 0) + + @mock.patch('os.rename') + @mock.patch('os.path.getmtime') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('os.path.exists') + def test_CycleCrashReport_LogExists_NamedCorrectly(self, mock_exists, mock_retrieve_log_path, mock_getmtime, + under_test): + mock_exists.side_effect = [True, False] + mock_retrieve_log_path.return_value = 'mock_log_path' + mock_workspace = mock.MagicMock() + mock_getmtime.return_value = 1 + + editor_test_utils.cycle_crash_report(0, mock_workspace) + under_test.assert_called_once_with(os.path.join('mock_log_path', 'error.log'), + os.path.join('mock_log_path', 'error_1969_12_31_16_00_01.log')) + + @mock.patch('os.rename') + @mock.patch('os.path.getmtime') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('os.path.exists') + def test_CycleCrashReport_DmpExists_NamedCorrectly(self, mock_exists, mock_retrieve_log_path, mock_getmtime, + under_test): + mock_exists.side_effect = [False, True] + mock_retrieve_log_path.return_value = 'mock_log_path' + mock_workspace = mock.MagicMock() + mock_getmtime.return_value = 1 + + editor_test_utils.cycle_crash_report(0, mock_workspace) + under_test.assert_called_once_with(os.path.join('mock_log_path', 'error.dmp'), + os.path.join('mock_log_path', 'error_1969_12_31_16_00_01.dmp')) + + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock()) + def test_RetrieveEditorLogContent_CrashLogExists_ReturnsLogInfo(self, mock_retrieve_log_path): + mock_retrieve_log_path.return_value = 'mock_log_path' + mock_logname = 'mock_log.log' + mock_workspace = mock.MagicMock() + mock_log = 'mock log info' + + with mock.patch('builtins.open', mock.mock_open(read_data=mock_log)) as mock_file: + assert f'[editor.log] {mock_log}' == editor_test_utils.retrieve_editor_log_content(0, mock_logname, mock_workspace) + + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock()) + def test_RetrieveEditorLogContent_CrashLogNotExists_ReturnsError(self, mock_retrieve_log_path): + mock_retrieve_log_path.return_value = 'mock_log_path' + mock_logname = 'mock_log.log' + mock_workspace = mock.MagicMock() + expected = f"-- Error reading editor.log: [Errno 2] No such file or directory: 'mock_log_path\\\\mock_log.log' --" + + assert expected == editor_test_utils.retrieve_editor_log_content(0, mock_logname, mock_workspace) + + def test_RetrieveLastRunTestIndexFromOutput_SecondTestFailed_Returns0(self): + mock_test = mock.MagicMock() + mock_test.__name__ = 'mock_test_name' + mock_test_list = [mock_test] + mock_editor_output = 'mock_test_name\n' \ + 'mock_test_name_1' + + assert 0 == editor_test_utils.retrieve_last_run_test_index_from_output(mock_test_list, mock_editor_output) + + def test_RetrieveLastRunTestIndexFromOutput_TenthTestFailed_Returns9(self): + mock_test_list = [] + mock_editor_output = '' + for x in range(10): + mock_test = mock.MagicMock() + mock_test.__name__ = f'mock_test_name_{x}' + mock_test_list.append(mock_test) + mock_editor_output += f'{mock_test.__name__}\n' + mock_editor_output += 'mock_test_name_x' + assert 9 == editor_test_utils.retrieve_last_run_test_index_from_output(mock_test_list, mock_editor_output) + + def test_RetrieveLastRunTestIndexFromOutput_FirstItemFailed_Returns0(self): + mock_test_list = [] + mock_editor_output = '' + for x in range(10): + mock_test = mock.MagicMock() + mock_test.__name__ = f'mock_test_name_{x}' + mock_test_list.append(mock_test) + + assert 0 == editor_test_utils.retrieve_last_run_test_index_from_output(mock_test_list, mock_editor_output) diff --git a/Tools/LyTestTools/tests/unit/test_fixtures.py b/Tools/LyTestTools/tests/unit/test_fixtures.py index 32dc204bd2..911e6fef91 100755 --- a/Tools/LyTestTools/tests/unit/test_fixtures.py +++ b/Tools/LyTestTools/tests/unit/test_fixtures.py @@ -369,3 +369,14 @@ class TestFixtures(object): mock_request.addfinalizer.call_args[0][0]() mock_stop.assert_called_once() + + @mock.patch('inspect.isclass', mock.MagicMock(return_value=True)) + def test_PytestPycollectMakeitem_ValidArgs_CallsCorrectly(self): + mock_collector = mock.MagicMock() + mock_name = mock.MagicMock() + mock_obj = mock.MagicMock() + mock_base = mock.MagicMock() + mock_obj.__bases__ = [mock_base] + + test_tools_fixtures.pytest_pycollect_makeitem(mock_collector, mock_name, mock_obj) + mock_base.pytest_custom_makeitem.assert_called_once_with(mock_collector, mock_name, mock_obj) diff --git a/Tools/LyTestTools/tests/unit/test_o3de_editor_test.py b/Tools/LyTestTools/tests/unit/test_o3de_editor_test.py new file mode 100644 index 0000000000..ef49934887 --- /dev/null +++ b/Tools/LyTestTools/tests/unit/test_o3de_editor_test.py @@ -0,0 +1,1017 @@ +""" +Copyright (c) Contributors to the Open 3D Engine Project. +For complete copyright and license terms please see the LICENSE at the root of this distribution. + +SPDX-License-Identifier: Apache-2.0 OR MIT +""" +import unittest + +import pytest +import unittest.mock as mock + +import ly_test_tools +import ly_test_tools.o3de.editor_test as editor_test + +pytestmark = pytest.mark.SUITE_smoke + +class TestEditorTestBase(unittest.TestCase): + + def test_EditorSharedTest_Init_CorrectAttributes(self): + mock_editorsharedtest = editor_test.EditorSharedTest() + assert mock_editorsharedtest.is_batchable == True + assert mock_editorsharedtest.is_parallelizable == True + + def test_EditorParallelTest_Init_CorrectAttributes(self): + mock_editorsharedtest = editor_test.EditorParallelTest() + assert mock_editorsharedtest.is_batchable == False + assert mock_editorsharedtest.is_parallelizable == True + + def test_EditorBatchedTest_Init_CorrectAttributes(self): + mock_editorsharedtest = editor_test.EditorBatchedTest() + assert mock_editorsharedtest.is_batchable == True + assert mock_editorsharedtest.is_parallelizable == False + +class TestBase(unittest.TestCase): + + def setUp(self): + self.mock_result = editor_test.Result.Base() + + def test_GetOutputStr_HasOutput_ReturnsCorrectly(self): + self.mock_result.output = 'expected output' + assert self.mock_result.get_output_str() == 'expected output' + + def test_GetOutputStr_NoOutput_ReturnsCorrectly(self): + self.mock_result.output = None + assert self.mock_result.get_output_str() == '-- No output --' + + def test_GetEditorLogStr_HasOutput_ReturnsCorrectly(self): + self.mock_result.editor_log = 'expected log output' + assert self.mock_result.get_editor_log_str() == 'expected log output' + + def test_GetEditorLogStr_NoOutput_ReturnsCorrectly(self): + self.mock_result.editor_log = None + assert self.mock_result.get_editor_log_str() == '-- No editor log found --' + +class TestPass(unittest.TestCase): + + def test_Create_ValidArgs_CorrectAttributes(self): + mock_test_spec = mock.MagicMock() + mock_output = mock.MagicMock() + mock_editor_log = mock.MagicMock() + + mock_pass = editor_test.Result.Pass.create(mock_test_spec, mock_output, mock_editor_log) + assert mock_pass.test_spec == mock_test_spec + assert mock_pass.output == mock_output + assert mock_pass.editor_log == mock_editor_log + + def test_Str_ValidString_ReturnsOutput(self): + mock_test_spec = mock.MagicMock() + mock_output = 'mock_output' + mock_editor_log = mock.MagicMock() + expected = f"Test Passed\n"\ + f"------------\n"\ + f"| Output |\n"\ + f"------------\n"\ + f"{mock_output}\n" + + mock_pass = editor_test.Result.Pass.create(mock_test_spec, mock_output, mock_editor_log) + assert str(mock_pass) == expected + +class TestFail(unittest.TestCase): + + def test_Create_ValidArgs_CorrectAttributes(self): + mock_test_spec = mock.MagicMock() + mock_output = mock.MagicMock() + mock_editor_log = mock.MagicMock() + + mock_pass = editor_test.Result.Fail.create(mock_test_spec, mock_output, mock_editor_log) + assert mock_pass.test_spec == mock_test_spec + assert mock_pass.output == mock_output + assert mock_pass.editor_log == mock_editor_log + + def test_Str_ValidString_ReturnsOutput(self): + mock_test_spec = mock.MagicMock() + mock_output = 'mock_output' + mock_editor_log = 'mock_editor_log' + expected = f"Test FAILED\n"\ + f"------------\n"\ + f"| Output |\n"\ + f"------------\n"\ + f"{mock_output}\n"\ + f"--------------\n"\ + f"| Editor log |\n"\ + f"--------------\n"\ + f"{mock_editor_log}\n" + + mock_pass = editor_test.Result.Fail.create(mock_test_spec, mock_output, mock_editor_log) + assert str(mock_pass) == expected + +class TestCrash(unittest.TestCase): + + def test_Create_ValidArgs_CorrectAttributes(self): + mock_test_spec = mock.MagicMock() + mock_output = mock.MagicMock() + mock_editor_log = mock.MagicMock() + mock_ret_code = mock.MagicMock() + mock_stacktrace = mock.MagicMock() + + mock_pass = editor_test.Result.Crash.create(mock_test_spec, mock_output, mock_ret_code, mock_stacktrace, + mock_editor_log) + assert mock_pass.test_spec == mock_test_spec + assert mock_pass.output == mock_output + assert mock_pass.editor_log == mock_editor_log + assert mock_pass.ret_code == mock_ret_code + assert mock_pass.stacktrace == mock_stacktrace + + def test_Str_ValidString_ReturnsOutput(self): + mock_test_spec = mock.MagicMock() + mock_output = 'mock_output' + mock_editor_log = 'mock_editor_log' + mock_return_code = 0 + mock_stacktrace = 'mock stacktrace' + expected = f"Test CRASHED, return code {hex(mock_return_code)}\n"\ + f"---------------\n"\ + f"| Stacktrace |\n"\ + f"---------------\n"\ + f"{mock_stacktrace}"\ + f"------------\n" \ + f"| Output |\n" \ + f"------------\n" \ + f"{mock_output}\n" \ + f"--------------\n" \ + f"| Editor log |\n" \ + f"--------------\n" \ + f"{mock_editor_log}\n" + + mock_pass = editor_test.Result.Crash.create(mock_test_spec, mock_output, mock_return_code, mock_stacktrace, + mock_editor_log) + assert str(mock_pass) == expected + + def test_Str_MissingStackTrace_ReturnsCorrectly(self): + mock_test_spec = mock.MagicMock() + mock_output = 'mock_output' + mock_editor_log = 'mock_editor_log' + mock_return_code = 0 + mock_stacktrace = None + expected = f"Test CRASHED, return code {hex(mock_return_code)}\n"\ + f"---------------\n"\ + f"| Stacktrace |\n"\ + f"---------------\n"\ + f"-- No stacktrace data found --\n"\ + f"------------\n" \ + f"| Output |\n" \ + f"------------\n" \ + f"{mock_output}\n" \ + f"--------------\n" \ + f"| Editor log |\n" \ + f"--------------\n" \ + f"{mock_editor_log}\n" + + mock_pass = editor_test.Result.Crash.create(mock_test_spec, mock_output, mock_return_code, mock_stacktrace, + mock_editor_log) + assert str(mock_pass) == expected + +class Timeout(unittest.TestCase): + + def test_Create_ValidArgs_CorrectAttributes(self): + mock_test_spec = mock.MagicMock() + mock_output = mock.MagicMock() + mock_editor_log = mock.MagicMock() + mock_timeout = mock.MagicMock() + + mock_pass = editor_test.Result.Timeout.create(mock_test_spec, mock_output, mock_timeout, mock_editor_log) + assert mock_pass.test_spec == mock_test_spec + assert mock_pass.output == mock_output + assert mock_pass.editor_log == mock_editor_log + assert mock_pass.time_secs == mock_timeout + + def test_Str_ValidString_ReturnsOutput(self): + mock_test_spec = mock.MagicMock() + mock_output = 'mock_output' + mock_editor_log = 'mock_editor_log' + mock_timeout = 0 + expected = f"Test TIMED OUT after {mock_timeout} seconds\n"\ + f"------------\n" \ + f"| Output |\n" \ + f"------------\n" \ + f"{mock_output}\n" \ + f"--------------\n" \ + f"| Editor log |\n" \ + f"--------------\n" \ + f"{mock_editor_log}\n" + + mock_pass = editor_test.Result.Timeout.create(mock_test_spec, mock_output, mock_timeout, mock_editor_log) + assert str(mock_pass) == expected + +class Unknown(unittest.TestCase): + + def test_Create_ValidArgs_CorrectAttributes(self): + mock_test_spec = mock.MagicMock() + mock_output = mock.MagicMock() + mock_editor_log = mock.MagicMock() + mock_extra_info = mock.MagicMock() + + mock_pass = editor_test.Result.Unknown.create(mock_test_spec, mock_output, mock_extra_info, mock_editor_log) + assert mock_pass.test_spec == mock_test_spec + assert mock_pass.output == mock_output + assert mock_pass.editor_log == mock_editor_log + assert mock_pass.extra_info == mock_extra_info + + def test_Str_ValidString_ReturnsOutput(self): + mock_test_spec = mock.MagicMock() + mock_output = 'mock_output' + mock_editor_log = 'mock_editor_log' + mock_extra_info = 'mock extra info' + expected = f"Unknown test result, possible cause: {mock_extra_info}\n"\ + f"------------\n" \ + f"| Output |\n" \ + f"------------\n" \ + f"{mock_output}\n" \ + f"--------------\n" \ + f"| Editor log |\n" \ + f"--------------\n" \ + f"{mock_editor_log}\n" + + mock_pass = editor_test.Result.Unknown.create(mock_test_spec, mock_output, mock_extra_info, mock_editor_log) + assert str(mock_pass) == expected + +class TestEditorTestSuite(unittest.TestCase): + + @mock.patch('ly_test_tools.o3de.editor_test_utils.kill_all_ly_processes') + def test_EditorTestData_ValidAP_TeardownProperly(self, mock_kill_processes): + mock_editor_test_suite = editor_test.EditorTestSuite() + mock_test_data_generator = mock_editor_test_suite._editor_test_data(mock.MagicMock()) + mock_asset_processor = mock.MagicMock() + for test_data in mock_test_data_generator: + test_data.asset_processor = mock_asset_processor + mock_asset_processor.stop.assert_called_once_with(1) + mock_asset_processor.teardown.assert_called() + assert test_data.asset_processor is None + mock_kill_processes.assert_called_once_with(include_asset_processor=True) + + @mock.patch('ly_test_tools.o3de.editor_test_utils.kill_all_ly_processes') + def test_EditorTestData_NoAP_TeardownProperly(self, mock_kill_processes): + mock_editor_test_suite = editor_test.EditorTestSuite() + mock_test_data_generator = mock_editor_test_suite._editor_test_data(mock.MagicMock()) + for test_data in mock_test_data_generator: + test_data.asset_processor = None + mock_kill_processes.assert_called_once_with(include_asset_processor=False) + + def test_RunnerInit_ValidArgs_InitProperly(self): + mock_name = mock.MagicMock() + mock_func = mock.MagicMock() + mock_tests = mock.MagicMock() + + mock_runner = editor_test.EditorTestSuite.Runner(mock_name, mock_func, mock_tests) + mock_runner.name = mock_name + mock_runner.func = mock_func + mock_runner.tests = mock_tests + mock_runner.run_pytestfunc = None + mock_runner.result_pytestfuncs = [] + + def test_PytestCustomMakeitem_Called_ReturnsClass(self): + mock_test_class = editor_test.EditorTestSuite.pytest_custom_makeitem(mock.MagicMock(), mock.MagicMock(), + mock.MagicMock()) + assert isinstance(mock_test_class, editor_test.EditorTestSuite.EditorTestClass) + + def test_PytestCustomModifyItems(self): + pass + + def test_GetSingleTests_NoSingleTests_EmptyList(self): + class MockTestSuite(editor_test.EditorTestSuite): + pass + mock_test_suite = MockTestSuite() + tests = mock_test_suite.get_single_tests() + assert len(tests) == 0 + + def test_GetSingleTests_OneSingleTests_ReturnsOne(self): + class MockTestSuite(editor_test.EditorTestSuite): + class MockSingleTest(editor_test.EditorSingleTest): + pass + mock_test_suite = MockTestSuite() + tests = mock_test_suite.get_single_tests() + assert len(tests) == 1 + + + def test_GetSingleTests_AllTests_ReturnsOnlySingles(self): + class MockTestSuite(editor_test.EditorTestSuite): + class MockSingleTest(editor_test.EditorSingleTest): + pass + class MockAnotherSingleTest(editor_test.EditorSingleTest): + pass + class MockNotSingleTest(editor_test.EditorSharedTest): + pass + mock_test_suite = MockTestSuite() + tests = mock_test_suite.get_single_tests() + assert len(tests) == 2 + + def test_GetSharedTests_NoSharedTests_EmptyList(self): + class MockTestSuite(editor_test.EditorTestSuite): + pass + mock_test_suite = MockTestSuite() + tests = mock_test_suite.get_shared_tests() + assert len(tests) == 0 + + def test_GetSharedTests_OneSharedTests_ReturnsOne(self): + class MockTestSuite(editor_test.EditorTestSuite): + class MockSharedTest(editor_test.EditorSharedTest): + pass + mock_test_suite = MockTestSuite() + tests = mock_test_suite.get_shared_tests() + assert len(tests) == 1 + + def test_GetSharedTests_AllTests_ReturnsOnlyShared(self): + class MockTestSuite(editor_test.EditorTestSuite): + class MockSharedTest(editor_test.EditorSharedTest): + pass + class MockAnotherSharedTest(editor_test.EditorSharedTest): + pass + class MockNotSharedTest(editor_test.EditorSingleTest): + pass + mock_test_suite = MockTestSuite() + tests = mock_test_suite.get_shared_tests() + assert len(tests) == 2 + + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite.filter_session_shared_tests') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite.get_shared_tests') + def test_GetSessionSharedTests_Valid_CallsCorrectly(self, mock_get_shared_tests, mock_filter_session): + editor_test.EditorTestSuite.get_session_shared_tests(mock.MagicMock()) + assert mock_get_shared_tests.called + assert mock_filter_session.called + + @mock.patch('ly_test_tools.o3de.editor_test.skipping_pytest_runtest_setup', mock.MagicMock()) + def test_FilterSessionSharedTests_OneSharedTest_ReturnsOne(self): + def mock_test(): + pass + mock_session_items = mock.MagicMock() + mock_shared_tests = mock.MagicMock() + mock_test.originalname = 'mock_test' + mock_test.__name__ = mock_test.originalname + mock_session_items = [mock_test] + mock_shared_tests = [mock_test] + + selected_tests = editor_test.EditorTestSuite.filter_session_shared_tests(mock_session_items, mock_shared_tests) + assert selected_tests == mock_session_items + + @mock.patch('ly_test_tools.o3de.editor_test.skipping_pytest_runtest_setup', mock.MagicMock()) + def test_FilterSessionSharedTests_ManyTests_ReturnsCorrectTests(self): + def mock_test(): + pass + def mock_test_2(): + pass + def mock_test_3(): + pass + mock_session_items = mock.MagicMock() + mock_shared_tests = mock.MagicMock() + mock_test.originalname = 'mock_test' + mock_test.__name__ = mock_test.originalname + mock_test_2.originalname = 'mock_test_2' + mock_test_2.__name__ = mock_test_2.originalname + mock_test_3.originalname = 'mock_test_3' + mock_test_3.__name__ = mock_test_3.originalname + mock_session_items = [mock_test, mock_test_2] + mock_shared_tests = [mock_test, mock_test_2, mock_test_3] + + selected_tests = editor_test.EditorTestSuite.filter_session_shared_tests(mock_session_items, mock_shared_tests) + assert selected_tests == mock_session_items + + @mock.patch('ly_test_tools.o3de.editor_test.skipping_pytest_runtest_setup', mock.MagicMock(side_effect=Exception)) + def test_FilterSessionSharedTests_SkippingPytestRaises_SkipsAddingTest(self): + def mock_test(): + pass + mock_session_items = mock.MagicMock() + mock_shared_tests = mock.MagicMock() + mock_test.originalname = 'mock_test' + mock_test.__name__ = mock_test.originalname + mock_session_items = [mock_test] + mock_shared_tests = [mock_test] + + selected_tests = editor_test.EditorTestSuite.filter_session_shared_tests(mock_session_items, mock_shared_tests) + assert len(selected_tests) == 0 + + def test_FilterSharedTests_TrueParams_ReturnsTrueTests(self): + mock_test = mock.MagicMock() + mock_test.is_batchable = True + mock_test.is_parallelizable = True + mock_test_2 = mock.MagicMock() + mock_test_2.is_batchable = False + mock_test_2.is_parallelizable = False + mock_shared_tests = [mock_test, mock_test_2] + + filtered_tests = editor_test.EditorTestSuite.filter_shared_tests(mock_shared_tests, True, True) + assert filtered_tests == [mock_test] + + def test_FilterSharedTests_FalseParams_ReturnsFalseTests(self): + mock_test = mock.MagicMock() + mock_test.is_batchable = True + mock_test.is_parallelizable = True + mock_test_2 = mock.MagicMock() + mock_test_2.is_batchable = False + mock_test_2.is_parallelizable = False + mock_shared_tests = [mock_test, mock_test_2] + + filtered_tests = editor_test.EditorTestSuite.filter_shared_tests(mock_shared_tests, False, False) + assert filtered_tests == [mock_test_2] + +class TestUtils(unittest.TestCase): + + @mock.patch('ly_test_tools.o3de.editor_test_utils.kill_all_ly_processes') + def test_PrepareAssetProcessor_APExists_StartsAP(self, mock_kill_processes): + mock_test_suite = editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor_data = mock.MagicMock() + mock_ap = mock.MagicMock() + mock_editor_data.asset_processor = mock_ap + + mock_test_suite._prepare_asset_processor(mock_workspace, mock_editor_data) + assert mock_ap.start.called + assert not mock_kill_processes.called + + @mock.patch('ly_test_tools.o3de.asset_processor.AssetProcessor.start') + @mock.patch('ly_test_tools.environment.process_utils.process_exists') + @mock.patch('ly_test_tools.o3de.editor_test_utils.kill_all_ly_processes') + def test_PrepareAssetProcessor_NoAP_KillAndCreateAP(self, mock_kill_processes, mock_proc_exists, mock_start): + mock_test_suite = editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor_data = mock.MagicMock() + mock_editor_data.asset_processor = None + mock_proc_exists.return_value = False + + mock_test_suite._prepare_asset_processor(mock_workspace, mock_editor_data) + mock_kill_processes.assert_called_with(include_asset_processor=True) + assert isinstance(mock_editor_data.asset_processor, ly_test_tools.o3de.asset_processor.AssetProcessor) + assert mock_start.called + + @mock.patch('ly_test_tools.o3de.asset_processor.AssetProcessor.start') + @mock.patch('ly_test_tools.environment.process_utils.process_exists') + @mock.patch('ly_test_tools.o3de.editor_test_utils.kill_all_ly_processes') + def test_PrepareAssetProcessor_NoAPButProcExists_NoKill(self, mock_kill_processes, mock_proc_exists, mock_start): + mock_test_suite = editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor_data = mock.MagicMock() + mock_editor_data.asset_processor = None + mock_proc_exists.return_value = True + + mock_test_suite._prepare_asset_processor(mock_workspace, mock_editor_data) + mock_kill_processes.assert_called_with(include_asset_processor=False) + assert not mock_start.called + assert mock_editor_data.asset_processor is None + + + @mock.patch('ly_test_tools.o3de.asset_processor.AssetProcessor.start') + @mock.patch('ly_test_tools.environment.process_utils.process_exists') + @mock.patch('ly_test_tools.o3de.editor_test_utils.kill_all_ly_processes') + def test_PrepareAssetProcessor_NoAPButProcExists_NoKill(self, mock_kill_processes, mock_proc_exists, mock_start): + mock_test_suite = editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor_data = mock.MagicMock() + mock_editor_data.asset_processor = None + mock_proc_exists.return_value = True + + mock_test_suite._prepare_asset_processor(mock_workspace, mock_editor_data) + mock_kill_processes.assert_called_with(include_asset_processor=False) + assert not mock_start.called + assert mock_editor_data.asset_processor is None + + @mock.patch('ly_test_tools.o3de.editor_test_utils.kill_all_ly_processes') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._prepare_asset_processor') + def test_SetupEditorTest_ValidArgs_CallsCorrectly(self, mock_prepare_ap, mock_kill_processes): + mock_test_suite = editor_test.EditorTestSuite() + mock_editor = mock.MagicMock() + mock_test_suite._setup_editor_test(mock_editor, mock.MagicMock(), mock.MagicMock()) + + assert mock_editor.configure_settings.called + assert mock_prepare_ap.called + mock_kill_processes.assert_called_once_with(include_asset_processor=False) + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Pass.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_module_filename') + def test_GetResultsUsingOutput_ValidJsonSuccess_CreatesPassResult(self, mock_get_module, mock_create): + mock_get_module.return_value = 'mock_module_name' + mock_test_suite = editor_test.EditorTestSuite() + mock_test = mock.MagicMock() + mock_test.__name__ = 'mock_test_name' + mock_test_list = [mock_test] + mock_output = 'JSON_START(' \ + '{"name": "mock_module_name", "output": "mock_std_out", "success": "mock_success_data"}' \ + ')JSON_END' + mock_editor_log = 'JSON_START(' \ + ')JSON_END' + + results = mock_test_suite._get_results_using_output(mock_test_list, mock_output, mock_editor_log) + assert mock_create.called + assert len(results) == 1 + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Fail.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_module_filename') + def test_GetResultsUsingOutput_ValidJsonFail_CreatesFailResult(self, mock_get_module, mock_create): + mock_get_module.return_value = 'mock_module_name' + mock_test_suite = editor_test.EditorTestSuite() + mock_test = mock.MagicMock() + mock_test.__name__ = 'mock_test_name' + mock_test_list = [mock_test] + mock_output = 'JSON_START(' \ + '{"name": "mock_module_name", "output": "mock_std_out", "failed": "mock_fail_data"}' \ + ')JSON_END' + mock_editor_log = 'JSON_START(' \ + ')JSON_END' + + results = mock_test_suite._get_results_using_output(mock_test_list, mock_output, mock_editor_log) + assert mock_create.called + assert len(results) == 1 + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Unknown.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_module_filename') + def test_GetResultsUsingOutput_ModuleNotInLog_CreatesUnknownResult(self, mock_get_module, mock_create): + mock_get_module.return_value = 'different_module_name' + mock_test_suite = editor_test.EditorTestSuite() + mock_test = mock.MagicMock() + mock_test.__name__ = 'mock_test_name' + mock_test_list = [mock_test] + mock_output = 'JSON_START(' \ + '{"name": "mock_module_name", "output": "mock_std_out", "failed": "mock_fail_data"}' \ + ')JSON_END' + mock_editor_log = 'JSON_START(' \ + ')JSON_END' + + results = mock_test_suite._get_results_using_output(mock_test_list, mock_output, mock_editor_log) + assert mock_create.called + assert len(results) == 1 + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Pass.create') + @mock.patch('ly_test_tools.o3de.editor_test.Result.Fail.create') + @mock.patch('ly_test_tools.o3de.editor_test.Result.Unknown.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_module_filename') + def test_GetResultsUsingOutput_MultipleTests_CreatesCorrectResults(self, mock_get_module, mock_create_unknown, + mock_create_fail, mock_create_pass): + mock_get_module.side_effect = ['mock_module_name_pass', 'mock_module_name_fail', 'different_module_name'] + mock_test_suite = editor_test.EditorTestSuite() + mock_test_pass = mock.MagicMock() + mock_test_pass.__name__ = 'mock_test_name_pass' + mock_test_fail = mock.MagicMock() + mock_test_fail.__name__ = 'mock_test_name_fail' + mock_test_unknown = mock.MagicMock() + mock_test_unknown.__name__ = 'mock_test_name_unknown' + mock_test_list = [mock_test_pass, mock_test_fail, mock_test_unknown] + mock_output = 'JSON_START(' \ + '{"name": "mock_module_name_pass", "output": "mock_std_out", "success": "mock_success_data"}' \ + ')JSON_END' \ + 'JSON_START(' \ + '{"name": "mock_module_name_fail", "output": "mock_std_out", "failed": "mock_fail_data"}' \ + ')JSON_END' \ + 'JSON_START(' \ + '{"name": "mock_module_name_unknown", "output": "mock_std_out", "failed": "mock_fail_data"}' \ + ')JSON_END' + mock_editor_log = 'JSON_START(' \ + '{"name": "mock_module_name_pass"}' \ + ')JSON_END' \ + 'JSON_START(' \ + '{"name": "mock_module_name_fail"}' \ + ')JSON_END' \ + + results = mock_test_suite._get_results_using_output(mock_test_list, mock_output, mock_editor_log) + mock_create_pass.assert_called_with( + mock_test_pass, 'mock_std_out', 'JSON_START({"name": "mock_module_name_pass"})JSON_END') + mock_create_fail.assert_called_with( + mock_test_fail, 'mock_std_out', 'JSON_START({"name": "mock_module_name_fail"})JSON_END') + mock_create_unknown.assert_called_with( + mock_test_unknown, mock_output, "Couldn't find any test run information on stdout", mock_editor_log) + assert len(results) == 3 + + @mock.patch('builtins.print') + def test_ReportResult_TestPassed_ReportsCorrectly(self, mock_print): + mock_test_name = 'mock name' + mock_pass = ly_test_tools.o3de.editor_test.Result.Pass() + ly_test_tools.o3de.editor_test.EditorTestSuite._report_result(mock_test_name, mock_pass) + mock_print.assert_called_with(f'Test {mock_test_name}:\nTest Passed\n------------\n| Output |\n------------\n' + f'-- No output --\n') + + @mock.patch('pytest.fail') + def test_ReportResult_TestFailed_FailsCorrectly(self, mock_pytest_fail): + mock_fail = ly_test_tools.o3de.editor_test.Result.Fail() + + ly_test_tools.o3de.editor_test.EditorTestSuite._report_result('mock_test_name', mock_fail) + mock_pytest_fail.assert_called_with('Test mock_test_name:\nTest FAILED\n------------\n| Output |' + '\n------------\n-- No output --\n--------------\n| Editor log |' + '\n--------------\n-- No editor log found --\n') + +class TestRunningTests(unittest.TestCase): + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Pass.create') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorTest_TestSucceeds_ReturnsPass(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, + mock_get_output_results, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_editor.get_returncode.return_value = 0 + mock_get_output_results.return_value = {} + mock_pass = mock.MagicMock() + mock_create.return_value = mock_pass + + results = mock_test_suite._exec_editor_test(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec, []) + assert mock_cycle_crash.called + assert mock_editor.start.called + assert mock_create.called + assert results == {mock_test_spec.__name__: mock_pass} + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Fail.create') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorTest_TestFails_ReturnsFail(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, + mock_get_output_results, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_editor.get_returncode.return_value = 15 + mock_get_output_results.return_value = {} + mock_fail = mock.MagicMock() + mock_create.return_value = mock_fail + + results = mock_test_suite._exec_editor_test(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec, []) + assert mock_cycle_crash.called + assert mock_editor.start.called + assert mock_create.called + assert results == {mock_test_spec.__name__: mock_fail} + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Crash.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_crash_output') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorTest_TestCrashes_ReturnsCrash(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, + mock_get_output_results, mock_retrieve_crash, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_editor.get_returncode.return_value = 1 + mock_get_output_results.return_value = {} + mock_crash = mock.MagicMock() + mock_create.return_value = mock_crash + + results = mock_test_suite._exec_editor_test(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec, []) + assert mock_cycle_crash.call_count == 2 + assert mock_editor.start.called + assert mock_retrieve_crash.called + assert mock_create.called + assert results == {mock_test_spec.__name__: mock_crash} + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Timeout.create') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorTest_TestTimeout_ReturnsTimeout(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, + mock_get_output_results, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_editor.wait.side_effect = ly_test_tools.launchers.exceptions.WaitTimeoutError() + mock_get_output_results.return_value = {} + mock_timeout = mock.MagicMock() + mock_create.return_value = mock_timeout + + results = mock_test_suite._exec_editor_test(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec, []) + assert mock_cycle_crash.called + assert mock_editor.start.called + assert mock_editor.kill.called + assert mock_create.called + assert results == {mock_test_spec.__name__: mock_timeout} + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Pass.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorMultitest_AllTestsPass_ReturnsPasses(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_editor.get_returncode.return_value = 0 + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_test_spec_2 = mock.MagicMock() + mock_test_spec_2.__name__ = 'mock_test_name_2' + mock_test_spec_list = [mock_test_spec, mock_test_spec_2] + mock_get_testcase_filepath.side_effect = ['mock_path', 'mock_path_2'] + mock_pass = mock.MagicMock() + mock_pass_2 = mock.MagicMock() + mock_create.side_effect = [mock_pass, mock_pass_2] + + results = mock_test_suite._exec_editor_multitest(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec_list, []) + assert results == {mock_test_spec.__name__: mock_pass, mock_test_spec_2.__name__: mock_pass_2} + assert mock_cycle_crash.called + assert mock_create.call_count == 2 + + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorMultitest_OneFailure_CallsCorrectFunc(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, mock_get_results): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_editor.get_returncode.return_value = 15 + mock_test_spec = mock.MagicMock() + mock_test_spec_2 = mock.MagicMock() + mock_test_spec_list = [mock_test_spec, mock_test_spec_2] + mock_get_testcase_filepath.side_effect = ['mock_path', 'mock_path_2'] + mock_get_results.return_value = {'mock_test_name': mock.MagicMock(), 'mock_test_name_2': mock.MagicMock()} + + results = mock_test_suite._exec_editor_multitest(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec_list, []) + assert mock_cycle_crash.called + assert mock_get_results.called + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Crash.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_crash_output') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorMultitest_OneCrash_ReportsOnUnknownResult(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, + mock_get_results, mock_retrieve_crash, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_editor.get_returncode.return_value = 1 + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_test_spec_2 = mock.MagicMock() + mock_test_spec_2.__name__ = 'mock_test_name_2' + mock_test_spec_list = [mock_test_spec, mock_test_spec_2] + mock_unknown_result = ly_test_tools.o3de.editor_test.Result.Unknown() + mock_unknown_result.test_spec = mock.MagicMock() + mock_unknown_result.editor_log = mock.MagicMock() + mock_get_testcase_filepath.side_effect = ['mock_path', 'mock_path_2'] + mock_get_results.return_value = {mock_test_spec.__name__: mock_unknown_result, + mock_test_spec_2.__name__: mock.MagicMock()} + mock_crash = mock.MagicMock() + mock_create.return_value = mock_crash + + results = mock_test_suite._exec_editor_multitest(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec_list, []) + assert mock_cycle_crash.call_count == 2 + assert mock_get_results.called + assert results[mock_test_spec.__name__] == mock_crash + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Crash.create') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_crash_output') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorMultitest_ManyUnknown_ReportsUnknownResults(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, + mock_get_results, mock_retrieve_crash, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_editor.get_returncode.return_value = 1 + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_test_spec_2 = mock.MagicMock() + mock_test_spec_2.__name__ = 'mock_test_name_2' + mock_test_spec_list = [mock_test_spec, mock_test_spec_2] + mock_unknown_result = ly_test_tools.o3de.editor_test.Result.Unknown() + mock_unknown_result.__name__ = 'mock_test_name' + mock_unknown_result.test_spec = mock.MagicMock() + mock_unknown_result.test_spec.__name__ = 'mock_test_spec_name' + mock_unknown_result.editor_log = mock.MagicMock() + mock_get_testcase_filepath.side_effect = ['mock_path', 'mock_path_2'] + mock_get_results.return_value = {mock_test_spec.__name__: mock_unknown_result, + mock_test_spec_2.__name__: mock_unknown_result} + mock_crash = mock.MagicMock() + mock_create.return_value = mock_crash + + results = mock_test_suite._exec_editor_multitest(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec_list, []) + assert mock_cycle_crash.call_count == 2 + assert mock_get_results.called + assert results[mock_test_spec.__name__] == mock_crash + assert results[mock_test_spec_2.__name__].extra_info + + @mock.patch('ly_test_tools.o3de.editor_test.Result.Timeout.create') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content') + @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path') + @mock.patch('ly_test_tools.o3de.editor_test_utils.get_testcase_module_filepath') + @mock.patch('ly_test_tools.o3de.editor_test_utils.cycle_crash_report') + def test_ExecEditorMultitest_EditorTimeout_ReportsCorrectly(self, mock_cycle_crash, mock_get_testcase_filepath, + mock_retrieve_log, mock_retrieve_editor_log, + mock_get_results, mock_create): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_workspace = mock.MagicMock() + mock_editor = mock.MagicMock() + mock_editor.wait.side_effect = ly_test_tools.launchers.exceptions.WaitTimeoutError() + mock_test_spec = mock.MagicMock() + mock_test_spec.__name__ = 'mock_test_name' + mock_test_spec_2 = mock.MagicMock() + mock_test_spec_2.__name__ = 'mock_test_name_2' + mock_test_spec_list = [mock_test_spec, mock_test_spec_2] + mock_unknown_result = ly_test_tools.o3de.editor_test.Result.Unknown() + mock_unknown_result.test_spec = mock.MagicMock() + mock_unknown_result.test_spec.__name__ = 'mock_test_spec_name' + mock_unknown_result.output = mock.MagicMock() + mock_unknown_result.editor_log = mock.MagicMock() + mock_get_testcase_filepath.side_effect = ['mock_path', 'mock_path_2'] + mock_get_results.return_value = {mock_test_spec.__name__: mock_unknown_result, + mock_test_spec_2.__name__: mock_unknown_result} + mock_timeout = mock.MagicMock() + mock_create.return_value = mock_timeout + + results = mock_test_suite._exec_editor_multitest(mock.MagicMock(), mock_workspace, mock_editor, 0, + 'mock_log_name', mock_test_spec_list, []) + assert mock_cycle_crash.called + assert mock_get_results.called + assert results[mock_test_spec_2.__name__].extra_info + assert results[mock_test_spec.__name__] == mock_timeout + + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._report_result') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._exec_editor_test') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunSingleTest_ValidTest_ReportsResults(self, mock_setup_test, mock_exec_editor_test, mock_report_result): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_test_data = mock.MagicMock() + mock_test_spec = mock.MagicMock() + mock_result = mock.MagicMock() + mock_test_name = 'mock_test_result' + mock_exec_editor_test.return_value = {mock_test_name: mock_result} + + mock_test_suite._run_single_test(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock_test_data, + mock_test_spec) + + assert mock_setup_test.called + assert mock_exec_editor_test.called + assert mock_test_data.results.update.called + mock_report_result.assert_called_with(mock_test_name, mock_result) + + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._exec_editor_multitest') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunBatchedTests_ValidTests_CallsCorrectly(self, mock_setup_test, mock_exec_multitest): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_test_data = mock.MagicMock() + + mock_test_suite._run_batched_tests(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock_test_data, + mock.MagicMock(), []) + + assert mock_setup_test.called + assert mock_exec_multitest.called + assert mock_test_data.results.update.called + + @mock.patch('threading.Thread') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunParallelTests_TwoTestsAndEditors_TwoThreads(self, mock_setup_test, mock_get_num_editors, mock_thread): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_get_num_editors.return_value = 2 + mock_test_spec_list = [mock.MagicMock(), mock.MagicMock()] + mock_test_data = mock.MagicMock() + + mock_test_suite._run_parallel_tests(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock_test_data, + mock_test_spec_list, []) + + assert mock_setup_test.called + assert mock_test_data.results.update.call_count == len(mock_test_spec_list) + assert mock_thread.call_count == len(mock_test_spec_list) + + @mock.patch('threading.Thread') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunParallelTests_TenTestsAndTwoEditors_TenThreads(self, mock_setup_test, mock_get_num_editors, mock_thread): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_get_num_editors.return_value = 2 + mock_test_spec_list = [] + for i in range(10): + mock_test_spec_list.append(mock.MagicMock()) + mock_test_data = mock.MagicMock() + + mock_test_suite._run_parallel_tests(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock_test_data, + mock_test_spec_list, []) + + assert mock_setup_test.called + assert mock_test_data.results.update.call_count == len(mock_test_spec_list) + assert mock_thread.call_count == len(mock_test_spec_list) + + @mock.patch('threading.Thread') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunParallelTests_TenTestsAndThreeEditors_TenThreads(self, mock_setup_test, mock_get_num_editors, + mock_thread): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_get_num_editors.return_value = 3 + mock_test_spec_list = [] + for i in range(10): + mock_test_spec_list.append(mock.MagicMock()) + mock_test_data = mock.MagicMock() + + mock_test_suite._run_parallel_tests(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock_test_data, + mock_test_spec_list, []) + + assert mock_setup_test.called + assert mock_test_data.results.update.call_count == len(mock_test_spec_list) + assert mock_thread.call_count == len(mock_test_spec_list) + + @mock.patch('threading.Thread') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunParallelBatchedTests_TwoTestsAndEditors_TwoThreads(self, mock_setup_test, mock_get_num_editors, + mock_thread): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_get_num_editors.return_value = 2 + mock_test_spec_list = [mock.MagicMock(), mock.MagicMock()] + mock_test_data = mock.MagicMock() + + mock_test_suite._run_parallel_tests(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock_test_data, + mock_test_spec_list, []) + + assert mock_setup_test.called + assert mock_test_data.results.update.call_count == len(mock_test_spec_list) + assert mock_thread.call_count == len(mock_test_spec_list) + + @mock.patch('threading.Thread') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunParallelBatchedTests_TenTestsAndTwoEditors_TenThreads(self, mock_setup_test, mock_get_num_editors, + mock_thread): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_get_num_editors.return_value = 2 + mock_test_spec_list = [] + for i in range(10): + mock_test_spec_list.append(mock.MagicMock()) + mock_test_data = mock.MagicMock() + + mock_test_suite._run_parallel_tests(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock_test_data, + mock_test_spec_list, []) + + assert mock_setup_test.called + assert mock_test_data.results.update.call_count == len(mock_test_spec_list) + assert mock_thread.call_count == len(mock_test_spec_list) + + @mock.patch('threading.Thread') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors') + @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test') + def test_RunParallelBatchedTests_TenTestsAndThreeEditors_TenThreads(self, mock_setup_test, mock_get_num_editors, + mock_thread): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_get_num_editors.return_value = 3 + mock_test_spec_list = [] + for i in range(10): + mock_test_spec_list.append(mock.MagicMock()) + mock_test_data = mock.MagicMock() + + mock_test_suite._run_parallel_tests(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock_test_data, + mock_test_spec_list, []) + + assert mock_setup_test.called + assert mock_test_data.results.update.call_count == len(mock_test_spec_list) + assert mock_thread.call_count == len(mock_test_spec_list) + + def test_GetNumberParallelEditors_ConfigExists_ReturnsConfig(self): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_request = mock.MagicMock() + mock_request.config.getoption.return_value = 1 + + num_of_editors = mock_test_suite._get_number_parallel_editors(mock_request) + assert num_of_editors == 1 + + def test_GetNumberParallelEditors_ConfigNotExists_ReturnsDefault(self): + mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite() + mock_request = mock.MagicMock() + mock_request.config.getoption.return_value = None + + num_of_editors = mock_test_suite._get_number_parallel_editors(mock_request) + assert num_of_editors == mock_test_suite.get_number_parallel_editors() \ No newline at end of file diff --git a/Tools/LyTestTools/tests/unit/test_pytest_plugin_editor_test.py b/Tools/LyTestTools/tests/unit/test_pytest_plugin_editor_test.py new file mode 100644 index 0000000000..968c07aca6 --- /dev/null +++ b/Tools/LyTestTools/tests/unit/test_pytest_plugin_editor_test.py @@ -0,0 +1,41 @@ +""" +Copyright (c) Contributors to the Open 3D Engine Project. +For complete copyright and license terms please see the LICENSE at the root of this distribution. + +SPDX-License-Identifier: Apache-2.0 OR MIT +""" +import pytest +import os +import unittest.mock as mock +import unittest + +import ly_test_tools._internal.pytest_plugin.editor_test as editor_test + +pytestmark = pytest.mark.SUITE_smoke + +class TestEditorTest(unittest.TestCase): + + @mock.patch('inspect.isclass', mock.MagicMock(return_value=True)) + def test_PytestPycollectMakeitem_ValidArgs_CallsCorrectly(self): + mock_collector = mock.MagicMock() + mock_name = mock.MagicMock() + mock_obj = mock.MagicMock() + mock_base = mock.MagicMock() + mock_obj.__bases__ = [mock_base] + + editor_test.pytest_pycollect_makeitem(mock_collector, mock_name, mock_obj) + mock_base.pytest_custom_makeitem.assert_called_once_with(mock_collector, mock_name, mock_obj) + + def test_PytestCollectionModifyitem_OneValidClass_CallsOnce(self): + mock_item = mock.MagicMock() + mock_class = mock.MagicMock() + mock_class.pytest_custom_modify_items = mock.MagicMock() + mock_item.instance.__class__ = mock_class + mock_session = mock.MagicMock() + mock_items = [mock_item, mock.MagicMock()] + mock_config = mock.MagicMock() + + generator = editor_test.pytest_collection_modifyitems(mock_session, mock_items, mock_config) + for x in generator: + pass + assert mock_class.pytest_custom_modify_items.call_count == 1 \ No newline at end of file