From 64a20c45b56551d7f0841e2702cca7d05df61fc5 Mon Sep 17 00:00:00 2001 From: evanchia Date: Tue, 19 Oct 2021 15:51:44 -0700 Subject: [PATCH] adding editor integ tests to AR Signed-off-by: evanchia --- .../Gem/PythonTests/CMakeLists.txt | 3 + .../editor_test_testing/CMakeLists.txt | 38 +++ .../TestSuiteLinux_Main.py | 267 ++++++++++++++++++ ...Suite_Main.py => TestSuiteWindows_Main.py} | 7 +- .../launchers/launcher_helper.py | 2 +- 5 files changed, 315 insertions(+), 2 deletions(-) create mode 100644 AutomatedTesting/Gem/PythonTests/editor_test_testing/CMakeLists.txt create mode 100644 AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuiteLinux_Main.py rename AutomatedTesting/Gem/PythonTests/editor_test_testing/{TestSuite_Main.py => TestSuiteWindows_Main.py} (98%) diff --git a/AutomatedTesting/Gem/PythonTests/CMakeLists.txt b/AutomatedTesting/Gem/PythonTests/CMakeLists.txt index 466a4b1679..7af10dba66 100644 --- a/AutomatedTesting/Gem/PythonTests/CMakeLists.txt +++ b/AutomatedTesting/Gem/PythonTests/CMakeLists.txt @@ -58,3 +58,6 @@ add_subdirectory(smoke) ## AWS ## add_subdirectory(AWS) + +## Test Tools ## +add_subdirectory(editor_test_testing) diff --git a/AutomatedTesting/Gem/PythonTests/editor_test_testing/CMakeLists.txt b/AutomatedTesting/Gem/PythonTests/editor_test_testing/CMakeLists.txt new file mode 100644 index 0000000000..fac6192673 --- /dev/null +++ b/AutomatedTesting/Gem/PythonTests/editor_test_testing/CMakeLists.txt @@ -0,0 +1,38 @@ +# +# Copyright (c) Contributors to the Open 3D Engine Project. +# For complete copyright and license terms please see the LICENSE at the root of this distribution. +# +# SPDX-License-Identifier: Apache-2.0 OR MIT +# + +if(PAL_TRAIT_BUILD_TESTS_SUPPORTED AND PAL_TRAIT_BUILD_HOST_TOOLS) + + ly_add_pytest( + NAME AutomatedTesting::ParallelEditorTestsWindows + TEST_SUITE main + TEST_SERIAL + PATH ${CMAKE_CURRENT_LIST_DIR}/TestSuiteWindows_Main.py + PYTEST_MARKS "SUITE_main" + RUNTIME_DEPENDENCIES + AZ::AssetProcessor + Legacy::Editor + AutomatedTesting.Assets + COMPONENT + TestTools + ) + + ly_add_pytest( + NAME AutomatedTesting::ParallelEditorTestsLinux + TEST_SUITE main + TEST_SERIAL + PATH ${CMAKE_CURRENT_LIST_DIR}/TestSuiteLinux_Main.py + PYTEST_MARKS "SUITE_main" + RUNTIME_DEPENDENCIES + AZ::AssetProcessor + Legacy::Editor + AutomatedTesting.Assets + COMPONENT + TestTools + ) + +endif() diff --git a/AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuiteLinux_Main.py b/AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuiteLinux_Main.py new file mode 100644 index 0000000000..8686c43357 --- /dev/null +++ b/AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuiteLinux_Main.py @@ -0,0 +1,267 @@ +""" +Copyright (c) Contributors to the Open 3D Engine Project. +For complete copyright and license terms please see the LICENSE at the root of this distribution. + +SPDX-License-Identifier: Apache-2.0 OR MIT +""" + +""" +This suite contains the tests for editor_test utilities. +""" + +import pytest +import os +import sys +import importlib +import re + +import ly_test_tools +from ly_test_tools import LAUNCHERS + +sys.path.append(os.path.dirname(os.path.abspath(__file__))) + +from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorTestSuite, Result +from ly_test_tools.o3de.asset_processor import AssetProcessor +import ly_test_tools.environment.process_utils as process_utils + +import argparse, sys + +if ly_test_tools.LINUX: + pytestmark = pytest.mark.SUITE_main +else: + pytestmark = pytest.mark.skipif(not ly_test_tools.LINUX, reason="Only runs on Linux") + +@pytest.mark.parametrize("launcher_platform", ['linux_editor']) +@pytest.mark.parametrize("project", ["AutomatedTesting"]) +class TestEditorTest: + + args = None + path = None + @classmethod + def setup_class(cls): + TestEditorTest.args = sys.argv.copy() + build_dir_arg_index = TestEditorTest.args.index("--build-directory") + if build_dir_arg_index < 0: + print("Error: Must pass --build-directory argument in order to run this test") + sys.exit(-2) + + TestEditorTest.args[build_dir_arg_index+1] = os.path.abspath(TestEditorTest.args[build_dir_arg_index+1]) + TestEditorTest.args.append("-s") + TestEditorTest.path = os.path.dirname(os.path.abspath(__file__)) + cls._asset_processor = None + + def teardown_class(cls): + if cls._asset_processor: + cls._asset_processor.stop(1) + cls._asset_processor.teardown() + + # Test runs # + @classmethod + def _run_single_test(cls, testdir, workspace, module_name): + if cls._asset_processor is None: + if not process_utils.process_exists("AssetProcessor", ignore_extensions=True): + cls._asset_processor = AssetProcessor(workspace) + cls._asset_processor.start() + + testdir.makepyfile( + f""" + import pytest + import os + import sys + + from ly_test_tools import LAUNCHERS + from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorTestSuite + + @pytest.mark.SUITE_main + @pytest.mark.parametrize("launcher_platform", ['linux_editor']) + @pytest.mark.parametrize("project", ["AutomatedTesting"]) + class TestAutomation(EditorTestSuite): + class test_single(EditorSingleTest): + import {module_name} as test_module + + """) + result = testdir.runpytest(*TestEditorTest.args[2:]) + + def get_class(module_name): + class test_single(EditorSingleTest): + test_module = importlib.import_module(module_name) + return test_single + + output = "".join(result.outlines) + extracted_results = EditorTestSuite._get_results_using_output([get_class(module_name)], output, output) + extracted_result = next(iter(extracted_results.items())) + return (extracted_result[1], result) + + def test_single_passing_test(self, request, workspace, launcher_platform, testdir): + (extracted_result, result) = TestEditorTest._run_single_test(testdir, workspace, "EditorTest_That_Passes") + result.assert_outcomes(passed=1) + assert isinstance(extracted_result, Result.Pass) + + def test_single_failing_test(self, request, workspace, launcher_platform, testdir): + (extracted_result, result) = TestEditorTest._run_single_test(testdir, workspace, "EditorTest_That_Fails") + result.assert_outcomes(failed=1) + assert isinstance(extracted_result, Result.Fail) + + def test_single_crashing_test(self, request, workspace, launcher_platform, testdir): + (extracted_result, result) = TestEditorTest._run_single_test(testdir, workspace, "EditorTest_That_Crashes") + result.assert_outcomes(failed=1) + assert isinstance(extracted_result, Result.Unknown) + + @classmethod + def _run_shared_test(cls, testdir, module_class_code, extra_cmd_line=None): + if not extra_cmd_line: + extra_cmd_line = [] + + if cls._asset_processor is None: + if not process_utils.process_exists("AssetProcessor", ignore_extensions=True): + cls._asset_processor = AssetProcessor(workspace) + cls._asset_processor.start() + + testdir.makepyfile( + f""" + import pytest + import os + import sys + + from ly_test_tools import LAUNCHERS + from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorTestSuite + + @pytest.mark.SUITE_main + @pytest.mark.parametrize("launcher_platform", ['linux_editor']) + @pytest.mark.parametrize("project", ["AutomatedTesting"]) + class TestAutomation(EditorTestSuite): + {module_class_code} + """) + result = testdir.runpytest(*TestEditorTest.args[2:] + extra_cmd_line) + return result + + def test_batched_two_passing(self, request, workspace, launcher_platform, testdir): + result = self._run_shared_test(testdir, + """ + class test_pass(EditorSharedTest): + import EditorTest_That_Passes as test_module + is_parallelizable = False + + class test_2(EditorSharedTest): + import EditorTest_That_PassesToo as test_module + is_parallelizable = False + """ + ) + # 2 Passes +1(batch runner) + result.assert_outcomes(passed=3) + + def test_batched_one_pass_one_fail(self, request, workspace, launcher_platform, testdir): + result = self._run_shared_test(testdir, + """ + class test_pass(EditorSharedTest): + import EditorTest_That_Passes as test_module + is_parallelizable = False + + class test_fail(EditorSharedTest): + import EditorTest_That_Fails as test_module + is_parallelizable = False + """ + ) + # 1 Fail, 1 Passes +1(batch runner) + result.assert_outcomes(passed=2, failed=1) + + def test_batched_one_pass_one_fail_one_crash(self, request, workspace, launcher_platform, testdir): + result = self._run_shared_test(testdir, + """ + class test_pass(EditorSharedTest): + import EditorTest_That_Passes as test_module + is_parallelizable = False + + class test_fail(EditorSharedTest): + import EditorTest_That_Fails as test_module + is_parallelizable = False + + class test_crash(EditorSharedTest): + import EditorTest_That_Crashes as test_module + is_parallelizable = False + """ + ) + # 2 Fail, 1 Passes + 1(batch runner) + result.assert_outcomes(passed=2, failed=2) + + def test_parallel_two_passing(self, request, workspace, launcher_platform, testdir): + result = self._run_shared_test(testdir, + """ + class test_pass_1(EditorSharedTest): + import EditorTest_That_Passes as test_module + is_batchable = False + + class test_pass_2(EditorSharedTest): + import EditorTest_That_PassesToo as test_module + is_batchable = False + """ + ) + # 2 Passes +1(parallel runner) + result.assert_outcomes(passed=3) + + def test_parallel_one_passing_one_failing_one_crashing(self, request, workspace, launcher_platform, testdir): + result = self._run_shared_test(testdir, + """ + class test_pass(EditorSharedTest): + import EditorTest_That_Passes as test_module + is_batchable = False + + class test_fail(EditorSharedTest): + import EditorTest_That_Fails as test_module + is_batchable = False + + class test_crash(EditorSharedTest): + import EditorTest_That_Crashes as test_module + is_batchable = False + """ + ) + # 2 Fail, 1 Passes + 1(parallel runner) + result.assert_outcomes(passed=2, failed=2) + + def test_parallel_batched_two_passing(self, request, workspace, launcher_platform, testdir): + result = self._run_shared_test(testdir, + """ + class test_pass_1(EditorSharedTest): + import EditorTest_That_Passes as test_module + + class test_pass_2(EditorSharedTest): + import EditorTest_That_PassesToo as test_module + """ + ) + # 2 Passes +1(batched+parallel runner) + result.assert_outcomes(passed=3) + + def test_parallel_batched_one_passing_one_failing_one_crashing(self, request, workspace, launcher_platform, testdir): + result = self._run_shared_test(testdir, + """ + class test_pass(EditorSharedTest): + import EditorTest_That_Passes as test_module + + class test_fail(EditorSharedTest): + import EditorTest_That_Fails as test_module + + class test_crash(EditorSharedTest): + import EditorTest_That_Crashes as test_module + """ + ) + # 2 Fail, 1 Passes + 1(batched+parallel runner) + result.assert_outcomes(passed=2, failed=2) + + def test_selection_2_deselected_1_selected(self, request, workspace, launcher_platform, testdir): + result = self._run_shared_test(testdir, + """ + class test_pass(EditorSharedTest): + import EditorTest_That_Passes as test_module + + class test_fail(EditorSharedTest): + import EditorTest_That_Fails as test_module + + class test_crash(EditorSharedTest): + import EditorTest_That_Crashes as test_module + """, extra_cmd_line=["-k", "fail"] + ) + # 1 Fail + 1 Success(parallel runner) + result.assert_outcomes(failed=1, passed=1) + outcomes = result.parseoutcomes() + deselected = outcomes.get("deselected") + assert deselected == 2 diff --git a/AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuite_Main.py b/AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuiteWindows_Main.py similarity index 98% rename from AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuite_Main.py rename to AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuiteWindows_Main.py index 7c7e063951..3643ab8145 100644 --- a/AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuite_Main.py +++ b/AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuiteWindows_Main.py @@ -15,6 +15,7 @@ import sys import importlib import re +import ly_test_tools from ly_test_tools import LAUNCHERS sys.path.append(os.path.dirname(os.path.abspath(__file__))) @@ -25,7 +26,11 @@ import ly_test_tools.environment.process_utils as process_utils import argparse, sys -@pytest.mark.SUITE_main +if ly_test_tools.WINDOWS: + pytestmark = pytest.mark.SUITE_main +else: + pytestmark = pytest.mark.skipif(not ly_test_tools.WINDOWS, reason="Only runs on Windows") + @pytest.mark.parametrize("launcher_platform", ['windows_editor']) @pytest.mark.parametrize("project", ["AutomatedTesting"]) class TestEditorTest: diff --git a/Tools/LyTestTools/ly_test_tools/launchers/launcher_helper.py b/Tools/LyTestTools/ly_test_tools/launchers/launcher_helper.py index d37623f352..e4552ec119 100755 --- a/Tools/LyTestTools/ly_test_tools/launchers/launcher_helper.py +++ b/Tools/LyTestTools/ly_test_tools/launchers/launcher_helper.py @@ -51,7 +51,7 @@ def create_editor(workspace, launcher_platform=ly_test_tools.HOST_OS_EDITOR, arg Editor is only officially supported on the Windows Platform. :param workspace: lumberyard workspace to use - :param launcher_platform: the platform to target for a launcher (i.e. 'windows_dedicated' for DedicatedWinLauncher) + :param launcher_platform: the platform to target for a launcher (i.e. 'windows_dedicated' for DedicatedWinLauncher) :param args: List of arguments to pass to the launcher's 'args' argument during construction :return: Editor instance """