moving benchmark tests into their own file to prevent timeout failures of other tests potentially stopping the benchmarks

Signed-off-by: jromnoa <80134229+jromnoa@users.noreply.github.com>
monroegm-disable-blank-issue-2
jromnoa 4 years ago
parent 932f0a4d84
commit 79e32ce4e5

@ -47,4 +47,18 @@ if(PAL_TRAIT_BUILD_HOST_TOOLS AND PAL_TRAIT_BUILD_TESTS_SUPPORTED)
COMPONENT
Atom
)
ly_add_pytest(
NAME AutomatedTesting::Atom_TestSuite_Benchmark_GPU
TEST_SUITE main
TEST_REQUIRES gpu
TEST_SERIAL
TIMEOUT 700
PATH ${CMAKE_CURRENT_LIST_DIR}/TestSuite_Benchmark_GPU.py
RUNTIME_DEPENDENCIES
AssetProcessor
AutomatedTesting.Assets
Editor
COMPONENT
Atom
)
endif()

@ -0,0 +1,59 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import logging
import os
import pytest
import editor_python_test_tools.hydra_test_utils as hydra
from ly_test_tools.benchmark.data_aggregator import BenchmarkDataAggregator
logger = logging.getLogger(__name__)
@pytest.mark.parametrize('rhi', ['dx12', 'vulkan'])
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ["windows_editor"])
@pytest.mark.parametrize("level", ["AtomFeatureIntegrationBenchmark"])
class TestPerformanceBenchmarkSuite(object):
def test_AtomFeatureIntegrationBenchmarkTest_UploadMetrics(
self, request, editor, workspace, rhi, project, launcher_platform, level):
"""
Please review the hydra script run by this test for more specific test info.
Tests the performance of the Simple level.
"""
expected_lines = [
"Benchmark metadata captured.",
"Pass timestamps captured.",
"CPU frame time captured.",
"Captured data successfully.",
"Exited game mode"
]
unexpected_lines = [
"Failed to capture data.",
"Failed to capture pass timestamps.",
"Failed to capture CPU frame time.",
"Failed to capture benchmark metadata."
]
hydra.launch_and_validate_results(
request,
os.path.join(os.path.dirname(__file__), "tests"),
editor,
"hydra_GPUTest_AtomFeatureIntegrationBenchmark.py",
timeout=600,
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True,
cfg_args=[level],
null_renderer=False,
enable_prefab_system=False,
)
aggregator = BenchmarkDataAggregator(workspace, logger, 'periodic')
aggregator.upload_metrics(rhi)

@ -156,50 +156,6 @@ class TestAutomation(EditorTestSuite):
similarity_threshold=0.96) is True
@pytest.mark.parametrize('rhi', ['dx12', 'vulkan'])
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ["windows_editor"])
@pytest.mark.parametrize("level", ["AtomFeatureIntegrationBenchmark"])
class TestPerformanceBenchmarkSuite(object):
def test_AtomFeatureIntegrationBenchmark(
self, request, editor, workspace, rhi, project, launcher_platform, level):
"""
Please review the hydra script run by this test for more specific test info.
Tests the performance of the Simple level.
"""
expected_lines = [
"Benchmark metadata captured.",
"Pass timestamps captured.",
"CPU frame time captured.",
"Captured data successfully.",
"Exited game mode"
]
unexpected_lines = [
"Failed to capture data.",
"Failed to capture pass timestamps.",
"Failed to capture CPU frame time.",
"Failed to capture benchmark metadata."
]
hydra.launch_and_validate_results(
request,
os.path.join(os.path.dirname(__file__), "tests"),
editor,
"hydra_GPUTest_AtomFeatureIntegrationBenchmark.py",
timeout=600,
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True,
cfg_args=[level],
null_renderer=False,
enable_prefab_system=False,
)
aggregator = BenchmarkDataAggregator(workspace, logger, 'periodic')
aggregator.upload_metrics(rhi)
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ['windows_generic'])
class TestMaterialEditor(object):

Loading…
Cancel
Save