[ATOM-16016] Add initial level for AtomFeatureIntegrationBenchmark to AutomatedTesting. (#2405)

* [ATOM-16016] Add initial level for AtomFeatureIntegrationBenchmark to AutomatedTesting.

Signed-off-by: Cynthia Lin <cyntlin@amazon.com>

* [ATOM-16016] Add new AtomFeatureIntegrationBenchmark to AutomatedTesting.

Signed-off-by: Cynthia Lin <cyntlin@amazon.com>
monroegm-disable-blank-issue-2
Cynthia Lin 4 years ago committed by GitHub
parent db8f78890f
commit 567b4a7f28
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,102 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Hydra script that is used to create a new level with a default rendering setup.
After the level is setup, screenshots are diffed against golden images are used to verify pass/fail results of the test.
See the run() function for more in-depth test info.
"""
import os
import sys
import azlmbr.legacy.general as general
sys.path.append(os.path.join(azlmbr.paths.devroot, "AutomatedTesting", "Gem", "PythonTests"))
import editor_python_test_tools.hydra_editor_utils as hydra
from editor_python_test_tools.editor_test_helper import EditorTestHelper
from atom_renderer.atom_utils.benchmark_utils import BenchmarkHelper
SCREEN_WIDTH = 1280
SCREEN_HEIGHT = 720
DEGREE_RADIAN_FACTOR = 0.0174533
helper = EditorTestHelper(log_prefix="Test_Atom_BasicLevelSetup")
def run():
"""
1. View -> Layouts -> Restore Default Layout, sets the viewport to ratio 16:9 @ 1280 x 720
2. Runs console command r_DisplayInfo = 0
3. Opens AtomFeatureIntegrationBenchmark level
4. Initializes benchmark helper with benchmark name to capture benchmark metadata.
5. Idles for 100 frames, then collects pass timings for 100 frames.
:return: None
"""
def initial_viewport_setup(screen_width, screen_height):
general.set_viewport_size(screen_width, screen_height)
general.update_viewport()
helper.wait_for_condition(
function=lambda: helper.isclose(a=general.get_viewport_size().x, b=SCREEN_WIDTH, rel_tol=0.1)
and helper.isclose(a=general.get_viewport_size().y, b=SCREEN_HEIGHT, rel_tol=0.1),
timeout_in_seconds=4.0
)
result = helper.isclose(a=general.get_viewport_size().x, b=SCREEN_WIDTH, rel_tol=0.1) and helper.isclose(
a=general.get_viewport_size().y, b=SCREEN_HEIGHT, rel_tol=0.1)
general.log(general.get_viewport_size().x)
general.log(general.get_viewport_size().y)
general.log(general.get_viewport_size().z)
general.log(f"Viewport is set to the expected size: {result}")
general.run_console("r_DisplayInfo = 0")
def after_level_load():
"""Function to call after creating/opening a level to ensure it loads."""
# Give everything a second to initialize.
general.idle_enable(True)
general.idle_wait(1.0)
general.update_viewport()
general.idle_wait(0.5) # half a second is more than enough for updating the viewport.
# Close out problematic windows, FPS meters, and anti-aliasing.
if general.is_helpers_shown(): # Turn off the helper gizmos if visible
general.toggle_helpers()
general.idle_wait(1.0)
if general.is_pane_visible("Error Report"): # Close Error Report windows that block focus.
general.close_pane("Error Report")
if general.is_pane_visible("Error Log"): # Close Error Log windows that block focus.
general.close_pane("Error Log")
general.idle_wait(1.0)
general.run_console("r_displayInfo=0")
general.run_console("r_antialiasingmode=0")
general.idle_wait(1.0)
return True
# Wait for Editor idle loop before executing Python hydra scripts.
general.idle_enable(True)
general.open_level_no_prompt("AtomFeatureIntegrationBenchmark")
# Basic setup after opening level.
after_level_load()
initial_viewport_setup(SCREEN_WIDTH, SCREEN_HEIGHT)
general.enter_game_mode()
general.idle_wait(1.0)
helper.wait_for_condition(function=lambda: general.is_in_game_mode(), timeout_in_seconds=2.0)
benchmarker = BenchmarkHelper("AtomFeatureIntegrationBenchmark")
benchmarker.capture_benchmark_metadata()
general.idle_wait_frames(100)
for i in range(1, 101):
benchmarker.capture_pass_timestamp(i)
general.exit_game_mode()
helper.wait_for_condition(function=lambda: not general.is_in_game_mode(), timeout_in_seconds=2.0)
general.log("Capturing complete.")
if __name__ == "__main__":
run()

@ -0,0 +1,84 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import azlmbr.atom
import azlmbr.legacy.general as general
FOLDER_PATH = '@user@/Scripts/PerformanceBenchmarks'
METADATA_FILE = 'benchmark_metadata.json'
class BenchmarkHelper(object):
"""
A helper to capture benchmark data.
"""
def __init__(self, benchmark_name):
super().__init__()
self.benchmark_name = benchmark_name
self.output_path = f'{FOLDER_PATH}/{benchmark_name}'
self.done = False
self.capturedData = False
self.max_frames_to_wait = 200
def capture_benchmark_metadata(self):
"""
Capture benchmark metadata and block further execution until it has been written to the disk.
"""
self.handler = azlmbr.atom.ProfilingCaptureNotificationBusHandler()
self.handler.connect()
self.handler.add_callback('OnCaptureBenchmarkMetadataFinished', self.on_data_captured)
self.done = False
self.capturedData = False
success = azlmbr.atom.ProfilingCaptureRequestBus(
azlmbr.bus.Broadcast, "CaptureBenchmarkMetadata", self.benchmark_name, f'{self.output_path}/{METADATA_FILE}'
)
if success:
self.wait_until_data()
general.log('Benchmark metadata captured.')
else:
general.log('Failed to capture benchmark metadata.')
return self.capturedData
def capture_pass_timestamp(self, frame_number):
"""
Capture pass timestamps and block further execution until it has been written to the disk.
"""
self.handler = azlmbr.atom.ProfilingCaptureNotificationBusHandler()
self.handler.connect()
self.handler.add_callback('OnCaptureQueryTimestampFinished', self.on_data_captured)
self.done = False
self.capturedData = False
success = azlmbr.atom.ProfilingCaptureRequestBus(
azlmbr.bus.Broadcast, "CapturePassTimestamp", f'{self.output_path}/frame{frame_number}_timestamps.json')
if success:
self.wait_until_data()
general.log('Pass timestamps captured.')
else:
general.log('Failed to capture pass timestamps.')
return self.capturedData
def on_data_captured(self, parameters):
# the parameters come in as a tuple
if parameters[0]:
general.log('Captured data successfully.')
self.capturedData = True
else:
general.log('Failed to capture data.')
self.done = True
self.handler.disconnect()
def wait_until_data(self):
frames_waited = 0
while self.done == False:
general.idle_wait_frames(1)
if frames_waited > self.max_frames_to_wait:
general.log('Timed out while waiting for the data to be captured')
self.handler.disconnect()
break
else:
frames_waited = frames_waited + 1
general.log(f'(waited {frames_waited} frames)')

@ -14,6 +14,7 @@ import pytest
import ly_test_tools.environment.file_system as file_system
from ly_test_tools.image.screenshot_compare_qssim import qssim as compare_screenshots
from ly_test_tools.benchmark.data_aggregator import BenchmarkDataAggregator
import editor_python_test_tools.hydra_test_utils as hydra
logger = logging.getLogger(__name__)
@ -83,3 +84,43 @@ class TestAllComponentsIndepthTests(object):
for test_screenshot, golden_screenshot in zip(test_screenshots, golden_images):
compare_screenshots(test_screenshot, golden_screenshot)
@pytest.mark.parametrize('rhi', ['dx12', 'vulkan'])
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ["windows_editor"])
@pytest.mark.parametrize("level", ["AtomFeatureIntegrationBenchmark"])
class TestPerformanceBenchmarkSuite(object):
def test_AtomFeatureIntegrationBenchmark(
self, request, editor, workspace, rhi, project, launcher_platform, level):
"""
Please review the hydra script run by this test for more specific test info.
Tests the performance of the Simple level.
"""
expected_lines = [
"Benchmark metadata captured.",
"Pass timestamps captured.",
"Capturing complete.",
"Captured data successfully."
]
unexpected_lines = [
"Failed to capture data.",
"Failed to capture pass timestamps.",
"Failed to capture benchmark metadata."
]
hydra.launch_and_validate_results(
request,
TEST_DIRECTORY,
editor,
"hydra_GPUTest_AtomFeatureIntegrationBenchmark.py",
timeout=EDITOR_TIMEOUT,
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True,
cfg_args=[level],
null_renderer=False,
)
aggregator = BenchmarkDataAggregator(workspace, logger, 'periodic')
aggregator.upload_metrics(rhi)

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5232563c3ff322669808ac4daeda3d822e4ef8c9c87db0fa245f0f9c9c34aada
size 23379

@ -0,0 +1,6 @@
<download name="AtomFeatureIntegrationBenchmark" type="Map">
<index src="filelist.xml" dest="filelist.xml"/>
<files>
<file src="level.pak" dest="level.pak" size="154A" md5="da50266269914f05d06d80f0025953fc"/>
</files>
</download>

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e075be2cb7cf5aa98e3503c1119b94c3098b35500c98c4db32d025c9e1afa52d
size 5450

@ -0,0 +1,12 @@
495.045,510.96,35.8437,-0.166,0,-1.82124
4.79827,4.71364,64.7838,-1.41886,0,2.48964
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
Loading…
Cancel
Save