Merge pull request #7663 from aws-lumberyard-dev/Atom/jromnoa/move-benchmark-test-metrics-to-new-suite

Move benchmark test metrics to their own suite outside of the GPU suite.
monroegm-disable-blank-issue-2
jromnoa 4 years ago committed by GitHub
commit 8a1d63bab0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -47,4 +47,18 @@ if(PAL_TRAIT_BUILD_HOST_TOOLS AND PAL_TRAIT_BUILD_TESTS_SUPPORTED)
COMPONENT
Atom
)
ly_add_pytest(
NAME AutomatedTesting::Atom_TestSuite_Benchmark_GPU
TEST_SUITE main
TEST_REQUIRES gpu
TEST_SERIAL
TIMEOUT 700
PATH ${CMAKE_CURRENT_LIST_DIR}/TestSuite_Benchmark_GPU.py
RUNTIME_DEPENDENCIES
AssetProcessor
AutomatedTesting.Assets
Editor
COMPONENT
Atom
)
endif()

@ -0,0 +1,59 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import logging
import os
import pytest
import editor_python_test_tools.hydra_test_utils as hydra
from ly_test_tools.benchmark.data_aggregator import BenchmarkDataAggregator
logger = logging.getLogger(__name__)
@pytest.mark.parametrize('rhi', ['dx12', 'vulkan'])
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ["windows_editor"])
@pytest.mark.parametrize("level", ["AtomFeatureIntegrationBenchmark"])
class TestPerformanceBenchmarkSuite(object):
def test_AtomFeatureIntegrationBenchmarkTest_UploadMetrics(
self, request, editor, workspace, rhi, project, launcher_platform, level):
"""
Please review the hydra script run by this test for more specific test info.
Tests the performance of the Simple level.
"""
expected_lines = [
"Benchmark metadata captured.",
"Pass timestamps captured.",
"CPU frame time captured.",
"Captured data successfully.",
"Exited game mode"
]
unexpected_lines = [
"Failed to capture data.",
"Failed to capture pass timestamps.",
"Failed to capture CPU frame time.",
"Failed to capture benchmark metadata."
]
hydra.launch_and_validate_results(
request,
os.path.join(os.path.dirname(__file__), "tests"),
editor,
"hydra_GPUTest_AtomFeatureIntegrationBenchmark.py",
timeout=600,
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True,
cfg_args=[level],
null_renderer=False,
enable_prefab_system=False,
)
aggregator = BenchmarkDataAggregator(workspace, logger, 'periodic')
aggregator.upload_metrics(rhi)

@ -11,7 +11,6 @@ import pytest
import editor_python_test_tools.hydra_test_utils as hydra
import ly_test_tools.environment.file_system as file_system
from ly_test_tools.benchmark.data_aggregator import BenchmarkDataAggregator
from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorTestSuite
from Atom.atom_utils.atom_component_helper import compare_screenshot_to_golden_image, golden_images_directory
@ -156,50 +155,6 @@ class TestAutomation(EditorTestSuite):
similarity_threshold=0.96) is True
@pytest.mark.parametrize('rhi', ['dx12', 'vulkan'])
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ["windows_editor"])
@pytest.mark.parametrize("level", ["AtomFeatureIntegrationBenchmark"])
class TestPerformanceBenchmarkSuite(object):
def test_AtomFeatureIntegrationBenchmark(
self, request, editor, workspace, rhi, project, launcher_platform, level):
"""
Please review the hydra script run by this test for more specific test info.
Tests the performance of the Simple level.
"""
expected_lines = [
"Benchmark metadata captured.",
"Pass timestamps captured.",
"CPU frame time captured.",
"Captured data successfully.",
"Exited game mode"
]
unexpected_lines = [
"Failed to capture data.",
"Failed to capture pass timestamps.",
"Failed to capture CPU frame time.",
"Failed to capture benchmark metadata."
]
hydra.launch_and_validate_results(
request,
os.path.join(os.path.dirname(__file__), "tests"),
editor,
"hydra_GPUTest_AtomFeatureIntegrationBenchmark.py",
timeout=600,
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True,
cfg_args=[level],
null_renderer=False,
enable_prefab_system=False,
)
aggregator = BenchmarkDataAggregator(workspace, logger, 'periodic')
aggregator.upload_metrics(rhi)
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ['windows_generic'])
class TestMaterialEditor(object):

Loading…
Cancel
Save