updating tests for screenshot using editor_test.py

Signed-off-by: Scott Murray <scottmur@amazon.com>
monroegm-disable-blank-issue-2
Scott Murray 4 years ago
parent 27abad7564
commit b340877f7c

@ -12,7 +12,7 @@ import pytest
import editor_python_test_tools.hydra_test_utils as hydra import editor_python_test_tools.hydra_test_utils as hydra
import ly_test_tools.environment.file_system as file_system import ly_test_tools.environment.file_system as file_system
from ly_test_tools.benchmark.data_aggregator import BenchmarkDataAggregator from ly_test_tools.benchmark.data_aggregator import BenchmarkDataAggregator
from ly_test_tools.o3de.editor_test import EditorSharedTest, EditorTestSuite from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorTestSuite
from Atom.atom_utils.atom_component_helper import compare_screenshot_to_golden_image, golden_images_directory from Atom.atom_utils.atom_component_helper import compare_screenshot_to_golden_image, golden_images_directory
DEFAULT_SUBFOLDER_PATH = 'user/PythonTests/Automated/Screenshots' DEFAULT_SUBFOLDER_PATH = 'user/PythonTests/Automated/Screenshots'
@ -23,63 +23,117 @@ logger = logging.getLogger(__name__)
@pytest.mark.parametrize("launcher_platform", ['windows_editor']) @pytest.mark.parametrize("launcher_platform", ['windows_editor'])
class TestAutomation(EditorTestSuite): class TestAutomation(EditorTestSuite):
# Remove -autotest_mode from global_extra_cmdline_args since we need rendering for these tests. # Remove -autotest_mode from global_extra_cmdline_args since we need rendering for these tests.
global_extra_cmdline_args = ["-BatchMode"] # Default is ["-BatchMode", "-autotest_mode"] global_extra_cmdline_args = ["-autotest_mode"] # Default is ["-BatchMode", "-autotest_mode"]
use_null_renderer = False # Default is True
enable_prefab_system = False enable_prefab_system = False
@pytest.mark.test_case_id("C34603773") @staticmethod
class AtomGPU_BasicLevelSetup_SetsUpLevel(EditorSharedTest): def screenshot_setup(screenshot_directory, screenshot_names):
use_null_renderer = False # Default is True """
screenshot_name = "AtomBasicLevelSetup.ppm" :param screenshot_names: list of screenshot file names with extensions
test_screenshots = [] # Gets set by setup() :return: tuple test_screenshots, golden_images each a list of full file paths
screenshot_directory = "" # Gets set by setup() """
test_screenshots = []
# Clear existing test screenshots before starting test. golden_images = []
def setup(self, workspace): for screenshot in screenshot_names:
screenshot_directory = os.path.join(workspace.paths.project(), DEFAULT_SUBFOLDER_PATH) screenshot_path = os.path.join(screenshot_directory, screenshot)
test_screenshots = [os.path.join(screenshot_directory, self.screenshot_name)] test_screenshots.append(screenshot_path)
file_system.delete(test_screenshots, True, True) file_system.delete(test_screenshots, True, True)
for golden_image in screenshot_names:
golden_image_path = os.path.join(golden_images_directory(), golden_image)
golden_images.append(golden_image_path)
return test_screenshots, golden_images
golden_images = [os.path.join(golden_images_directory(), screenshot_name)]
@pytest.mark.test_case_id("C34603773")
@pytest.mark.REQUIRES_gpu
class AtomGPU_BasicLevelSetup_SetsUpLevel_DX12(EditorSingleTest):
from Atom.tests import hydra_AtomGPU_BasicLevelSetup as test_module from Atom.tests import hydra_AtomGPU_BasicLevelSetup as test_module
assert compare_screenshot_to_golden_image(screenshot_directory, test_screenshots, golden_images, 0.99) is True extra_cmdline_args = ["-rhi=dx12"]
# Custom setup/teardown to remove old screenshots and establish paths to golden images
def setup(self, request, workspace, editor, editor_test_results, launcher_platform):
self.screenshot_directory = os.path.join(workspace.paths.project(), DEFAULT_SUBFOLDER_PATH)
self.screenshot_names = ["AtomBasicLevelSetup.ppm"]
self.test_screenshots, self.golden_images = TestAutomation.screenshot_setup(
screenshot_directory=self.screenshot_directory,
screenshot_names=self.screenshot_names)
def wrap_run(self, request, workspace, editor, editor_test_results, launcher_platform):
yield
assert compare_screenshot_to_golden_image(self.screenshot_directory,
self.test_screenshots,
self.golden_images,
similarity_threshold=0.96) is True
@pytest.mark.test_case_id("C34525095") @pytest.mark.test_case_id("C34525095")
class AtomGPU_LightComponent_AreaLightScreenshotsMatchGoldenImages(EditorSharedTest): @pytest.mark.REQUIRES_gpu
use_null_renderer = False # Default is True class AtomGPU_LightComponent_AreaLightScreenshotsMatchGoldenImages_DX12(EditorSingleTest):
screenshot_names = [ from Atom.tests import hydra_AtomGPU_AreaLightScreenshotTest as test_module
extra_cmdline_args = ["-rhi=dx12"]
# Custom setup/teardown to remove old screenshots and establish paths to golden images
def setup(self, request, workspace, editor, editor_test_results, launcher_platform):
self.screenshot_directory = os.path.join(workspace.paths.project(), DEFAULT_SUBFOLDER_PATH)
self.screenshot_names = [
"AreaLight_1.ppm", "AreaLight_1.ppm",
"AreaLight_2.ppm", "AreaLight_2.ppm",
"AreaLight_3.ppm", "AreaLight_3.ppm",
"AreaLight_4.ppm", "AreaLight_4.ppm",
"AreaLight_5.ppm", "AreaLight_5.ppm",
] ]
test_screenshots = [] # Gets set by setup() self.test_screenshots, self.golden_images = TestAutomation.screenshot_setup(
screenshot_directory = "" # Gets set by setup() screenshot_directory=self.screenshot_directory,
screenshot_names=self.screenshot_names)
# Clear existing test screenshots before starting test. def wrap_run(self, request, workspace, editor, editor_test_results, launcher_platform):
def setup(self, workspace): yield
screenshot_directory = os.path.join(workspace.paths.project(), DEFAULT_SUBFOLDER_PATH) assert compare_screenshot_to_golden_image(self.screenshot_directory,
for screenshot in self.screenshot_names: self.test_screenshots,
screenshot_path = os.path.join(screenshot_directory, screenshot) self.golden_images,
self.test_screenshots.append(screenshot_path) similarity_threshold=0.96) is True
file_system.delete(self.test_screenshots, True, True)
golden_images = []
for golden_image in screenshot_names:
golden_image_path = os.path.join(golden_images_directory(), golden_image)
golden_images.append(golden_image_path)
@pytest.mark.test_case_id("C34525095")
@pytest.mark.REQUIRES_gpu
class AtomGPU_LightComponent_AreaLightScreenshotsMatchGoldenImages_Vulkan(EditorSingleTest):
from Atom.tests import hydra_AtomGPU_AreaLightScreenshotTest as test_module from Atom.tests import hydra_AtomGPU_AreaLightScreenshotTest as test_module
assert compare_screenshot_to_golden_image(screenshot_directory, test_screenshots, golden_images, 0.99) is True extra_cmdline_args = ["-rhi=vulkan"]
# Custom setup/teardown to remove old screenshots and establish paths to golden images
def setup(self, request, workspace, editor, editor_test_results, launcher_platform):
self.screenshot_directory = os.path.join(workspace.paths.project(), DEFAULT_SUBFOLDER_PATH)
self.screenshot_names = [
"AreaLight_1.ppm",
"AreaLight_2.ppm",
"AreaLight_3.ppm",
"AreaLight_4.ppm",
"AreaLight_5.ppm",
]
self.test_screenshots, self.golden_images = TestAutomation.screenshot_setup(
screenshot_directory=self.screenshot_directory,
screenshot_names=self.screenshot_names)
def wrap_run(self, request, workspace, editor, editor_test_results, launcher_platform):
yield
assert compare_screenshot_to_golden_image(self.screenshot_directory,
self.test_screenshots,
self.golden_images,
similarity_threshold=0.96) is True
@pytest.mark.test_case_id("C34525110") @pytest.mark.test_case_id("C34525110")
class AtomGPU_LightComponent_SpotLightScreenshotsMatchGoldenImages(EditorSharedTest): @pytest.mark.REQUIRES_gpu
use_null_renderer = False # Default is True class AtomGPU_LightComponent_SpotLightScreenshotsMatchGoldenImages_DX12(EditorSingleTest):
screenshot_names = [ from Atom.tests import hydra_AtomGPU_SpotLightScreenshotTest as test_module
extra_cmdline_args = ["-rhi=dx12"]
# Custom setup/teardown to remove old screenshots and establish paths to golden images
def setup(self, request, workspace, editor, editor_test_results, launcher_platform):
self.screenshot_directory = os.path.join(workspace.paths.project(), DEFAULT_SUBFOLDER_PATH)
self.screenshot_names = [
"SpotLight_1.ppm", "SpotLight_1.ppm",
"SpotLight_2.ppm", "SpotLight_2.ppm",
"SpotLight_3.ppm", "SpotLight_3.ppm",
@ -87,25 +141,47 @@ class TestAutomation(EditorTestSuite):
"SpotLight_5.ppm", "SpotLight_5.ppm",
"SpotLight_6.ppm", "SpotLight_6.ppm",
] ]
test_screenshots = [] # Gets set by setup() print(self.screenshot_directory)
screenshot_directory = "" # Gets set by setup() self.test_screenshots, self.golden_images = TestAutomation.screenshot_setup(
screenshot_directory=self.screenshot_directory,
screenshot_names=self.screenshot_names)
# Clear existing test screenshots before starting test. def wrap_run(self, request, workspace, editor, editor_test_results, launcher_platform):
def setup(self, workspace): yield
screenshot_directory = os.path.join(workspace.paths.project(), DEFAULT_SUBFOLDER_PATH) assert compare_screenshot_to_golden_image(self.screenshot_directory,
for screenshot in self.screenshot_names: self.test_screenshots,
screenshot_path = os.path.join(screenshot_directory, screenshot) self.golden_images,
self.test_screenshots.append(screenshot_path) similarity_threshold=0.96) is True
file_system.delete(self.test_screenshots, True, True)
golden_images = []
for golden_image in screenshot_names:
golden_image_path = os.path.join(golden_images_directory(), golden_image)
golden_images.append(golden_image_path)
@pytest.mark.test_case_id("C34525110")
@pytest.mark.REQUIRES_gpu
class AtomGPU_LightComponent_SpotLightScreenshotsMatchGoldenImages_Vulkan(EditorSingleTest):
from Atom.tests import hydra_AtomGPU_SpotLightScreenshotTest as test_module from Atom.tests import hydra_AtomGPU_SpotLightScreenshotTest as test_module
assert compare_screenshot_to_golden_image(screenshot_directory, test_screenshots, golden_images, 0.99) is True extra_cmdline_args = ["-rhi=vulkan"]
# Custom setup/teardown to remove old screenshots and establish paths to golden images
def setup(self, request, workspace, editor, editor_test_results, launcher_platform):
self.screenshot_directory = os.path.join(workspace.paths.project(), DEFAULT_SUBFOLDER_PATH)
self.screenshot_names = [
"SpotLight_1.ppm",
"SpotLight_2.ppm",
"SpotLight_3.ppm",
"SpotLight_4.ppm",
"SpotLight_5.ppm",
"SpotLight_6.ppm",
]
print(self.screenshot_directory)
self.test_screenshots, self.golden_images = TestAutomation.screenshot_setup(
screenshot_directory=self.screenshot_directory,
screenshot_names=self.screenshot_names)
def wrap_run(self, request, workspace, editor, editor_test_results, launcher_platform):
yield
assert compare_screenshot_to_golden_image(self.screenshot_directory,
self.test_screenshots,
self.golden_images,
similarity_threshold=0.96) is True
@pytest.mark.parametrize('rhi', ['dx12', 'vulkan']) @pytest.mark.parametrize('rhi', ['dx12', 'vulkan'])

@ -82,12 +82,13 @@ def compare_screenshot_similarity(
result = 'You must specify a screenshot_directory in order to create a zip archive.\n' result = 'You must specify a screenshot_directory in order to create a zip archive.\n'
mean_similarity = compare_screenshots(test_screenshot, golden_image) mean_similarity = compare_screenshots(test_screenshot, golden_image)
print(f"MEAN_SIMILARITY = {mean_similarity}")
if not mean_similarity > similarity_threshold: if not mean_similarity > similarity_threshold:
if create_zip_archive: if create_zip_archive:
create_screenshots_archive(screenshot_directory) create_screenshots_archive(screenshot_directory)
result = ( result = (
f"When comparing the test_screenshot: '{test_screenshot}' " f"When comparing the test_screenshot: '{test_screenshot}' "
f"to golden_image: '{golden_image}' the mean similarity of '{mean_similarity}' " f"to golden_image: '{golden_image}'.\nThe mean similarity of '{mean_similarity}' "
f"was lower than the similarity threshold of '{similarity_threshold}'. ") f"was lower than the similarity threshold of '{similarity_threshold}'. ")
return result return result
@ -123,7 +124,9 @@ def initial_viewport_setup(screen_width=1280, screen_height=720):
import azlmbr.legacy.general as general import azlmbr.legacy.general as general
general.set_viewport_size(screen_width, screen_height) general.set_viewport_size(screen_width, screen_height)
general.idle_wait_frames(1)
general.update_viewport() general.update_viewport()
general.idle_wait_frames(1)
def enter_exit_game_mode_take_screenshot(screenshot_name, enter_game_tuple, exit_game_tuple, timeout_in_seconds=4): def enter_exit_game_mode_take_screenshot(screenshot_name, enter_game_tuple, exit_game_tuple, timeout_in_seconds=4):
@ -137,13 +140,18 @@ def enter_exit_game_mode_take_screenshot(screenshot_name, enter_game_tuple, exit
""" """
import azlmbr.legacy.general as general import azlmbr.legacy.general as general
from editor_python_test_tools.utils import TestHelper from editor_python_test_tools.utils import TestHelper, Report
from Atom.atom_utils.screenshot_utils import ScreenshotHelper from Atom.atom_utils.screenshot_utils import ScreenshotHelper
screenshot_helper = ScreenshotHelper(general.idle_wait_frames)
TestHelper.enter_game_mode(enter_game_tuple) TestHelper.enter_game_mode(enter_game_tuple)
TestHelper.wait_for_condition(function=lambda: general.is_in_game_mode(), timeout_in_seconds=timeout_in_seconds) TestHelper.wait_for_condition(function=lambda: general.is_in_game_mode(), timeout_in_seconds=timeout_in_seconds)
ScreenshotHelper(general.idle_wait_frames).capture_screenshot_blocking(screenshot_name) screenshot_helper.prepare_viewport_for_screenshot(1920, 1080)
success_screenshot = TestHelper.wait_for_condition(
function=lambda: screenshot_helper.capture_screenshot_blocking(screenshot_name),
timeout_in_seconds=timeout_in_seconds)
Report.result(("Screenshot taken", "Screenshot failed to be taken"), success_screenshot)
TestHelper.exit_game_mode(exit_game_tuple) TestHelper.exit_game_mode(exit_game_tuple)
TestHelper.wait_for_condition(function=lambda: not general.is_in_game_mode(), timeout_in_seconds=timeout_in_seconds) TestHelper.wait_for_condition(function=lambda: not general.is_in_game_mode(), timeout_in_seconds=timeout_in_seconds)

@ -107,10 +107,8 @@ def AtomGPU_BasicLevelSetup_SetsUpLevel():
18. Add Mesh component to Sphere Entity and set the Mesh Asset property for the Mesh component. 18. Add Mesh component to Sphere Entity and set the Mesh Asset property for the Mesh component.
19. Create a Camera Entity as a child entity of the Default Level Entity then add a Camera component. 19. Create a Camera Entity as a child entity of the Default Level Entity then add a Camera component.
20. Set the Camera Entity rotation value and set the Camera component Field of View value. 20. Set the Camera Entity rotation value and set the Camera component Field of View value.
21. Enter game mode. 21. Enter/Exit game mode taking screenshot.
22. Take screenshot. 22. Look for errors.
23. Exit game mode.
24. Look for errors.
:return: None :return: None
""" """
@ -127,7 +125,7 @@ def AtomGPU_BasicLevelSetup_SetsUpLevel():
from Atom.atom_utils.atom_constants import AtomComponentProperties from Atom.atom_utils.atom_constants import AtomComponentProperties
from Atom.atom_utils.atom_component_helper import initial_viewport_setup from Atom.atom_utils.atom_component_helper import initial_viewport_setup
from Atom.atom_utils.screenshot_utils import ScreenshotHelper from Atom.atom_utils.atom_component_helper import enter_exit_game_mode_take_screenshot
DEGREE_RADIAN_FACTOR = 0.0174533 DEGREE_RADIAN_FACTOR = 0.0174533
SCREENSHOT_NAME = "AtomBasicLevelSetup" SCREENSHOT_NAME = "AtomBasicLevelSetup"
@ -300,18 +298,10 @@ def AtomGPU_BasicLevelSetup_SetsUpLevel():
Report.result(Tests.camera_fov_set, camera_component.get_component_property_value( Report.result(Tests.camera_fov_set, camera_component.get_component_property_value(
AtomComponentProperties.camera('Field of view')) == camera_fov_value) AtomComponentProperties.camera('Field of view')) == camera_fov_value)
# 21. Enter game mode. # 21. Enter/Exit game mode taking screenshot.
TestHelper.enter_game_mode(Tests.enter_game_mode) enter_exit_game_mode_take_screenshot(f"{SCREENSHOT_NAME}.ppm", Tests.enter_game_mode, Tests.exit_game_mode)
TestHelper.wait_for_condition(function=lambda: general.is_in_game_mode(), timeout_in_seconds=4.0)
# 22. Take screenshot. # 22. Look for errors.
ScreenshotHelper(general.idle_wait_frames).capture_screenshot_blocking(f"{SCREENSHOT_NAME}.ppm")
# 23. Exit game mode.
TestHelper.exit_game_mode(Tests.exit_game_mode)
TestHelper.wait_for_condition(function=lambda: not general.is_in_game_mode(), timeout_in_seconds=4.0)
# 24. Look for errors.
TestHelper.wait_for_condition(lambda: error_tracer.has_errors or error_tracer.has_asserts, 1.0) TestHelper.wait_for_condition(lambda: error_tracer.has_errors or error_tracer.has_asserts, 1.0)
for error_info in error_tracer.errors: for error_info in error_tracer.errors:
Report.info(f"Error: {error_info.filename} {error_info.function} | {error_info.message}") Report.info(f"Error: {error_info.filename} {error_info.function} | {error_info.message}")

Loading…
Cancel
Save