Merge branch 'main' into LYN-3969

main
clujames 5 years ago
commit 60da667e8b

@ -46,7 +46,7 @@ SortIncludes: true
SpaceAfterLogicalNot: false
SpaceAfterTemplateKeyword: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements

2
.gitignore vendored

@ -4,6 +4,7 @@ __pycache__
AssetProcessorTemp/**
[Bb]uild/**
[Cc]ache/
/install/
Editor/EditorEventLog.xml
Editor/EditorLayout.xml
**/*egg-info/**
@ -19,3 +20,4 @@ _savebackup/
TestResults/**
*.swatches
/imgui.ini
/scripts/project_manager/logs/

File diff suppressed because it is too large Load Diff

@ -1 +1 @@
/autooptimizefile=0 /preset=AlbedoWithGenericAlpha /reduce="es3:2,ios:2,osx_gl:0,pc:0,provo:0"
/autooptimizefile=0 /preset=AlbedoWithGenericAlpha /reduce="android:2,ios:2,mac:0,pc:0,provo:0"

@ -1 +1 @@
/autooptimizefile=0 /preset=Albedo /reduce="es3:3,ios:3,osx_gl:0,pc:0,provo:0"
/autooptimizefile=0 /preset=Albedo /reduce="android:3,ios:3,mac:0,pc:0,provo:0"

@ -20,31 +20,49 @@ if(json_error)
message(FATAL_ERROR "Unable to read key 'engine' from 'project.json', error: ${json_error}")
endif()
# Read the list of paths from ~.o3de/o3de_manifest.json
file(TO_CMAKE_PATH "$ENV{USERPROFILE}" home_directory) # Windows
if((NOT home_directory) OR (NOT EXISTS ${home_directory}))
file(TO_CMAKE_PATH "$ENV{HOME}" home_directory)# Unix
if(DEFINED ENV{USERPROFILE} AND EXISTS $ENV{USERPROFILE})
set(manifest_path $ENV{USERPROFILE}/.o3de/o3de_manifest.json) # Windows
else()
set(manifest_path $ENV{HOME}/.o3de/o3de_manifest.json) # Unix
endif()
if (NOT home_directory)
message(FATAL_ERROR "Cannot find user home directory, the o3de manifest cannot be found")
endif()
# Set manifest path to path in the user home directory
set(manifest_path ${home_directory}/.o3de/o3de_manifest.json)
# Read the ~/.o3de/o3de_manifest.json file and look through the 'engines_path' object.
# Find a key that matches LY_ENGINE_NAME_TO_USE and use that as the engine path.
if(EXISTS ${manifest_path})
file(READ ${manifest_path} manifest_json)
string(JSON engines_count ERROR_VARIABLE json_error LENGTH ${manifest_json} engines)
string(JSON engines_path_count ERROR_VARIABLE json_error LENGTH ${manifest_json} engines_path)
if(json_error)
message(FATAL_ERROR "Unable to read key 'engines' from '${manifest_path}', error: ${json_error}")
message(FATAL_ERROR "Unable to read key 'engines_path' from '${manifest_path}', error: ${json_error}")
endif()
string(JSON engines_path_type ERROR_VARIABLE json_error TYPE ${manifest_json} engines_path)
if(json_error OR NOT ${engines_path_type} STREQUAL "OBJECT")
message(FATAL_ERROR "Type of 'engines_path' in '${manifest_path}' is not a JSON Object, error: ${json_error}")
endif()
math(EXPR engines_count "${engines_count}-1")
foreach(engine_path_index RANGE ${engines_count})
string(JSON engine_path ERROR_VARIABLE json_error GET ${manifest_json} engines ${engine_path_index})
if(${json_error})
message(FATAL_ERROR "Unable to read engines[${engine_path_index}] '${manifest_path}', error: ${json_error}")
math(EXPR engines_path_count "${engines_path_count}-1")
foreach(engine_path_index RANGE ${engines_path_count})
string(JSON engine_name ERROR_VARIABLE json_error MEMBER ${manifest_json} engines_path ${engine_path_index})
if(json_error)
message(FATAL_ERROR "Unable to read 'engines_path/${engine_path_index}' from '${manifest_path}', error: ${json_error}")
endif()
if(LY_ENGINE_NAME_TO_USE STREQUAL engine_name)
string(JSON engine_path ERROR_VARIABLE json_error GET ${manifest_json} engines_path ${engine_name})
if(json_error)
message(FATAL_ERROR "Unable to read value from 'engines_path/${engine_name}', error: ${json_error}")
endif()
if(engine_path)
list(APPEND CMAKE_MODULE_PATH "${engine_path}/cmake")
break()
endif()
endif()
list(APPEND CMAKE_MODULE_PATH "${engine_path}/cmake")
endforeach()
else()
# If the user is passing CMAKE_MODULE_PATH we assume thats where we will find the engine
if(NOT CMAKE_MODULE_PATH)
message(FATAL_ERROR "Engine registration is required before configuring a project. Please register an engine by running 'scripts/o3de register --this-engine'")
endif()
endif()

@ -28,30 +28,41 @@ ly_add_target(
Gem::Atom_AtomBridge.Static
)
# if enabled, AutomatedTesting is used by all kinds of applications
ly_create_alias(NAME AutomatedTesting.Builders NAMESPACE Gem TARGETS Gem::AutomatedTesting)
ly_create_alias(NAME AutomatedTesting.Tools NAMESPACE Gem TARGETS Gem::AutomatedTesting)
ly_create_alias(NAME AutomatedTesting.Clients NAMESPACE Gem TARGETS Gem::AutomatedTesting)
ly_create_alias(NAME AutomatedTesting.Servers NAMESPACE Gem TARGETS Gem::AutomatedTesting)
################################################################################
# Gem dependencies
################################################################################
ly_add_project_dependencies(
PROJECT_NAME
AutomatedTesting
TARGETS
AutomatedTesting.GameLauncher
DEPENDENCIES_FILES
runtime_dependencies.cmake
${pal_dir}/runtime_dependencies.cmake
)
if(PAL_TRAIT_BUILD_HOST_TOOLS)
ly_add_project_dependencies(
PROJECT_NAME
AutomatedTesting
TARGETS
AssetBuilder
AssetProcessor
AssetProcessorBatch
Editor
DEPENDENCIES_FILES
tool_dependencies.cmake
${pal_dir}/tool_dependencies.cmake
)
# The GameLauncher uses "Clients" gem variants:
ly_enable_gems(PROJECT_NAME AutomatedTesting GEM_FILE enabled_gems.cmake
TARGETS AutomatedTesting.GameLauncher
VARIANTS Clients)
# If we build a server, then apply the gems to the server
if(PAL_TRAIT_BUILD_SERVER_SUPPORTED)
# if we're making a server, then add the "Server" gem variants to it:
ly_enable_gems(PROJECT_NAME AutomatedTesting GEM_FILE enabled_gems.cmake
TARGETS AutomatedTesting.ServerLauncher
VARIANTS Servers)
set_property(GLOBAL APPEND PROPERTY LY_LAUNCHER_SERVER_PROJECTS AutomatedTesting)
endif()
if (PAL_TRAIT_BUILD_HOST_TOOLS)
# The Editor uses "Tools" gem variants:
ly_enable_gems(
PROJECT_NAME AutomatedTesting GEM_FILE enabled_gems.cmake
TARGETS Editor
VARIANTS Tools)
# The pipeline tools use "Builders" gem variants:
ly_enable_gems(
PROJECT_NAME AutomatedTesting GEM_FILE enabled_gems.cmake
TARGETS AssetBuilder AssetProcessor AssetProcessorBatch
VARIANTS Builders)
endif()

@ -0,0 +1,58 @@
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
set(ENABLED_GEMS
ImGui
ScriptEvents
ExpressionEvaluation
Gestures
CertificateManager
DebugDraw
SceneProcessing
GraphCanvas
InAppPurchases
AutomatedTesting
EditorPythonBindings
QtForPython
PythonAssetBuilder
Metastream
AudioSystem
Camera
EMotionFX
PhysX
CameraFramework
StartingPointMovement
StartingPointCamera
ScriptCanvas
ScriptCanvasPhysics
ScriptCanvasTesting
LyShineExamples
StartingPointInput
PhysXDebug
WhiteBox
FastNoise
SurfaceData
GradientSignal
Vegetation
GraphModel
LandscapeCanvas
NvCloth
Blast
Maestro
TextureAtlas
LmbrCentral
LyShine
HttpRequestor
Atom_AtomBridge
AWSCore
AWSClientAuth
AWSMetrics
)

@ -1,48 +0,0 @@
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the License). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# Extracted from Game
set(GEM_DEPENDENCIES
Gem::Maestro
Gem::TextureAtlas
Gem::LmbrCentral
Gem::LyShine
Gem::HttpRequestor
Gem::ScriptEvents
Gem::ExpressionEvaluation
Gem::Gestures
Gem::CertificateManager
Gem::DebugDraw
Gem::AudioSystem
Gem::InAppPurchases
Gem::AutomatedTesting
Gem::Metastream
Gem::Camera
Gem::EMotionFX
Gem::PhysX
Gem::CameraFramework
Gem::StartingPointMovement
Gem::StartingPointCamera
Gem::ScriptCanvas
Gem::ImGui
Gem::LyShineExamples
Gem::StartingPointInput
Gem::ScriptCanvasPhysics
Gem::PhysXDebug
Gem::WhiteBox
Gem::FastNoise
Gem::SurfaceData
Gem::GradientSignal
Gem::Vegetation
Gem::Atom_AtomBridge
Gem::NvCloth
Gem::Blast
)

@ -1,60 +0,0 @@
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the License). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# Extracted from Editor.xml
set(GEM_DEPENDENCIES
Gem::Maestro.Editor
Gem::TextureAtlas.Editor
Gem::LmbrCentral.Editor
Gem::LyShine.Editor
Gem::HttpRequestor
Gem::ScriptEvents.Editor
Gem::ExpressionEvaluation
Gem::Gestures
Gem::CertificateManager
Gem::DebugDraw.Editor
Gem::SceneProcessing.Editor
Gem::GraphCanvas.Editor
Gem::InAppPurchases
Gem::AutomatedTesting
Gem::EditorPythonBindings.Editor
Gem::PythonAssetBuilder.Editor
Gem::Metastream
Gem::AudioSystem.Editor
Gem::Camera.Editor
Gem::EMotionFX.Editor
Gem::PhysX.Editor
Gem::CameraFramework
Gem::StartingPointMovement
Gem::StartingPointCamera
Gem::ScriptCanvas.Editor
Gem::ScriptEvents.Editor
Gem::ImGui.Editor
Gem::LyShineExamples
Gem::StartingPointInput.Editor
Gem::ScriptCanvasPhysics
Gem::ScriptCanvasTesting.Editor
Gem::PhysXDebug.Editor
Gem::WhiteBox.Editor
Gem::FastNoise.Editor
Gem::SurfaceData.Editor
Gem::GradientSignal.Editor
Gem::Vegetation.Editor
Gem::GraphModel.Editor
Gem::LandscapeCanvas.Editor
Gem::EMotionFX.Editor
Gem::ImGui.Editor
Gem::Atom_RHI.Private
Gem::Atom_Feature_Common.Editor
Gem::Atom_AtomBridge.Editor
Gem::NvCloth.Editor
Gem::Blast.Editor
)

@ -0,0 +1,10 @@
"""
All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
its licensors.
For complete copyright and license terms please see the LICENSE at the root of this
distribution (the "License"). All use of this software is governed by the License,
or, if provided, by the license below or the license accompanying this file. Do not
remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""

@ -0,0 +1,237 @@
"""
All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
its licensors.
For complete copyright and license terms please see the LICENSE at the root of this
distribution (the "License"). All use of this software is governed by the License,
or, if provided, by the license below or the license accompanying this file. Do not
remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import logging
import os
import pytest
import time
import typing
from datetime import datetime
import ly_test_tools.log.log_monitor
from assetpipeline.ap_fixtures.asset_processor_fixture import asset_processor as asset_processor
from AWS.common.aws_utils import aws_utils
from AWS.common.aws_credentials import aws_credentials
from AWS.Windows.resource_mappings.resource_mappings import resource_mappings
from AWS.Windows.cdk.cdk import cdk
from .aws_metrics_utils import aws_metrics_utils
AWS_METRICS_FEATURE_NAME = 'AWSMetrics'
GAME_LOG_NAME = 'Game.log'
logger = logging.getLogger(__name__)
def setup(launcher: ly_test_tools.launchers.Launcher,
cdk: cdk,
asset_processor: asset_processor,
resource_mappings: resource_mappings,
context_variable: str = '') -> typing.Tuple[ly_test_tools.log.log_monitor.LogMonitor, str, str]:
"""
Set up the CDK application and start the log monitor.
:param launcher: Client launcher for running the test level.
:param cdk: CDK application for deploying the AWS resources.
:param asset_processor: asset_processor fixture.
:param resource_mappings: resource_mappings fixture.
:param context_variable: context_variable for enable optional CDK feature.
:return log monitor object, metrics file path and the metrics stack name.
"""
logger.info(f'Cdk stack names:\n{cdk.list()}')
stacks = cdk.deploy(context_variable=context_variable)
resource_mappings.populate_output_keys(stacks)
asset_processor.start()
asset_processor.wait_for_idle()
metrics_file_path = os.path.join(launcher.workspace.paths.project(), 'user',
AWS_METRICS_FEATURE_NAME, 'metrics.json')
remove_file(metrics_file_path)
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), GAME_LOG_NAME)
remove_file(file_to_monitor)
# Initialize the log monitor.
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
return log_monitor, metrics_file_path, stacks[0]
def monitor_metrics_submission(log_monitor: ly_test_tools.log.log_monitor.LogMonitor) -> None:
"""
Monitor the messages and notifications for submitting metrics.
:param log_monitor: Log monitor to check the log messages.
"""
expected_lines = [
'(Script) - Submitted metrics without buffer.',
'(Script) - Submitted metrics with buffer.',
'(Script) - Metrics is sent successfully.'
]
unexpected_lines = [
'(Script) - Failed to submit metrics without buffer.',
'(Script) - Failed to submit metrics with buffer.',
'(Script) - Failed to send metrics.'
]
result = log_monitor.monitor_log_for_lines(
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True)
# Assert the log monitor detected expected lines and did not detect any unexpected lines.
assert result, (
f'Log monitoring failed. Used expected_lines values: {expected_lines} & '
f'unexpected_lines values: {unexpected_lines}')
def remove_file(file_path: str) -> None:
"""
Remove a local file and its directory.
:param file_path: Path to the local file.
"""
if os.path.exists(file_path):
os.remove(file_path)
file_dir = os.path.dirname(file_path)
if os.path.exists(file_dir) and len(os.listdir(file_dir)) == 0:
os.rmdir(file_dir)
@pytest.mark.SUITE_periodic
@pytest.mark.usefixtures('automatic_process_killer')
@pytest.mark.parametrize('project', ['AutomatedTesting'])
@pytest.mark.parametrize('level', ['AWS/Metrics'])
@pytest.mark.parametrize('feature_name', [AWS_METRICS_FEATURE_NAME])
@pytest.mark.parametrize('resource_mappings_filename', ['aws_resource_mappings.json'])
@pytest.mark.parametrize('profile_name', ['AWSAutomationTest'])
@pytest.mark.parametrize('region_name', ['us-west-2'])
@pytest.mark.parametrize('assume_role_arn', ['arn:aws:iam::645075835648:role/o3de-automation-tests'])
@pytest.mark.parametrize('session_name', ['o3de-Automation-session'])
class TestAWSMetrics_Windows(object):
def test_AWSMetrics_RealTimeAnalytics_MetricsSentToCloudWatch(self,
level: str,
launcher: ly_test_tools.launchers.Launcher,
asset_processor: pytest.fixture,
workspace: pytest.fixture,
aws_utils: aws_utils,
aws_credentials: aws_credentials,
resource_mappings: resource_mappings,
cdk: cdk,
aws_metrics_utils: aws_metrics_utils,
):
"""
Tests that the submitted metrics are sent to CloudWatch for real-time analytics.
"""
log_monitor, metrics_file_path, stack_name = setup(launcher, cdk, asset_processor, resource_mappings)
# Start the Kinesis Data Analytics application for real-time analytics.
analytics_application_name = f'{stack_name}-AnalyticsApplication'
aws_metrics_utils.start_kinesis_data_analytics_application(analytics_application_name)
launcher.args = ['+LoadLevel', level]
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
start_time = datetime.utcnow()
monitor_metrics_submission(log_monitor)
# Verify that operational health metrics are delivered to CloudWatch.
aws_metrics_utils.verify_cloud_watch_delivery(
'AWS/Lambda',
'Invocations',
[{'Name': 'FunctionName',
'Value': f'{stack_name}-AnalyticsProcessingLambda'}],
start_time)
logger.info('Operational health metrics sent to CloudWatch.')
aws_metrics_utils.verify_cloud_watch_delivery(
AWS_METRICS_FEATURE_NAME,
'TotalLogins',
[],
start_time)
logger.info('Real-time metrics sent to CloudWatch.')
# Stop the Kinesis Data Analytics application.
aws_metrics_utils.stop_kinesis_data_analytics_application(analytics_application_name)
def test_AWSMetrics_UnauthorizedUser_RequestRejected(self,
level: str,
launcher: ly_test_tools.launchers.Launcher,
cdk: cdk,
aws_credentials: aws_credentials,
asset_processor: pytest.fixture,
resource_mappings: resource_mappings,
workspace: pytest.fixture):
"""
Tests that unauthorized users cannot send metrics events to the AWS backed backend.
"""
log_monitor, metrics_file_path, stack_name = setup(launcher, cdk, asset_processor, resource_mappings)
# Set invalid AWS credentials.
launcher.args = ['+LoadLevel', level, '+cl_awsAccessKey', 'AKIAIOSFODNN7EXAMPLE',
'+cl_awsSecretKey', 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY']
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
result = log_monitor.monitor_log_for_lines(
expected_lines=['(Script) - Failed to send metrics.'],
unexpected_lines=['(Script) - Metrics is sent successfully.'],
halt_on_unexpected=True)
assert result, 'Metrics events are sent successfully by unauthorized user'
logger.info('Unauthorized user is rejected to send metrics.')
def test_AWSMetrics_BatchAnalytics_MetricsDeliveredToS3(self,
level: str,
launcher: ly_test_tools.launchers.Launcher,
cdk: cdk,
aws_credentials: aws_credentials,
asset_processor: pytest.fixture,
resource_mappings: resource_mappings,
aws_utils: aws_utils,
aws_metrics_utils: aws_metrics_utils,
workspace: pytest.fixture):
"""
Tests that the submitted metrics are sent to the data lake for batch analytics.
"""
log_monitor, metrics_file_path, stack_name = setup(launcher, cdk, asset_processor, resource_mappings,
context_variable='batch_processing=true')
analytics_bucket_name = aws_metrics_utils.get_analytics_bucket_name(stack_name)
launcher.args = ['+LoadLevel', level]
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
start_time = datetime.utcnow()
monitor_metrics_submission(log_monitor)
# Verify that operational health metrics are delivered to CloudWatch.
aws_metrics_utils.verify_cloud_watch_delivery(
'AWS/Lambda',
'Invocations',
[{'Name': 'FunctionName',
'Value': f'{stack_name}-EventsProcessingLambda'}],
start_time)
logger.info('Operational health metrics sent to CloudWatch.')
aws_metrics_utils.verify_s3_delivery(analytics_bucket_name)
logger.info('Metrics sent to S3.')
# Run the glue crawler to populate the AWS Glue Data Catalog with tables.
aws_metrics_utils.run_glue_crawler(f'{stack_name}-EventsCrawler')
# Run named queries on the table to verify the batch analytics.
aws_metrics_utils.run_named_queries(f'{stack_name}-AthenaWorkGroup')
logger.info('Query metrics from S3 successfully.')
# Kinesis Data Firehose buffers incoming data before it delivers it to Amazon S3. Sleep for the
# default interval (60s) to make sure that all the metrics are sent to the bucket before cleanup.
time.sleep(60)
# Empty the S3 bucket. S3 buckets can only be deleted successfully when it doesn't contain any object.
aws_metrics_utils.empty_s3_bucket(analytics_bucket_name)

@ -0,0 +1,252 @@
"""
All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
its licensors.
For complete copyright and license terms please see the LICENSE at the root of this
distribution (the "License"). All use of this software is governed by the License,
or, if provided, by the license below or the license accompanying this file. Do not
remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import logging
import pathlib
import pytest
import typing
from datetime import datetime
from botocore.exceptions import WaiterError
from AWS.common.aws_utils import AwsUtils
from .aws_metrics_waiters import KinesisAnalyticsApplicationUpdatedWaiter, \
CloudWatchMetricsDeliveredWaiter, DataLakeMetricsDeliveredWaiter, GlueCrawlerReadyWaiter
logging.getLogger('boto').setLevel(logging.CRITICAL)
# Expected directory and file extension for the S3 objects.
EXPECTED_S3_DIRECTORY = 'firehose_events/'
EXPECTED_S3_OBJECT_EXTENSION = '.parquet'
class AWSMetricsUtils:
"""
Provide utils functions for the AWSMetrics gem to interact with the deployed resources.
"""
def __init__(self, aws_utils: AwsUtils):
self._aws_util = aws_utils
def start_kinesis_data_analytics_application(self, application_name: str) -> None:
"""
Start the Kenisis Data Analytics application for real-time analytics.
:param application_name: Name of the Kenisis Data Analytics application.
"""
input_id = self.get_kinesis_analytics_application_input_id(application_name)
assert input_id, 'invalid Kinesis Data Analytics application input.'
client = self._aws_util.client('kinesisanalytics')
try:
client.start_application(
ApplicationName=application_name,
InputConfigurations=[
{
'Id': input_id,
'InputStartingPositionConfiguration': {
'InputStartingPosition': 'NOW'
}
},
]
)
except client.exceptions.ResourceInUseException:
# The application has been started.
return
try:
KinesisAnalyticsApplicationUpdatedWaiter(client, 'RUNNING').wait(application_name=application_name)
except WaiterError as e:
assert False, f'Failed to start the Kinesis Data Analytics application: {str(e)}.'
def get_kinesis_analytics_application_input_id(self, application_name: str) -> str:
"""
Get the input ID for the Kenisis Data Analytics application.
:param application_name: Name of the Kenisis Data Analytics application.
:return: Input ID for the Kenisis Data Analytics application.
"""
client = self._aws_util.client('kinesisanalytics')
response = client.describe_application(
ApplicationName=application_name
)
if not response:
return ''
input_descriptions = response.get('ApplicationDetail', {}).get('InputDescriptions', [])
if len(input_descriptions) != 1:
return ''
return input_descriptions[0].get('InputId', '')
def stop_kinesis_data_analytics_application(self, application_name: str) -> None:
"""
Stop the Kenisis Data Analytics application.
:param application_name: Name of the Kenisis Data Analytics application.
"""
client = self._aws_util.client('kinesisanalytics')
client.stop_application(
ApplicationName=application_name
)
try:
KinesisAnalyticsApplicationUpdatedWaiter(client, 'READY').wait(application_name=application_name)
except WaiterError as e:
assert False, f'Failed to stop the Kinesis Data Analytics application: {str(e)}.'
def verify_cloud_watch_delivery(self, namespace: str, metrics_name: str,
dimensions: typing.List[dict], start_time: datetime) -> None:
"""
Verify that the expected metrics is delivered to CloudWatch.
:param namespace: Namespace of the metrics.
:param metrics_name: Name of the metrics.
:param dimensions: Dimensions of the metrics.
:param start_time: Start time for generating the metrics.
"""
client = self._aws_util.client('cloudwatch')
try:
CloudWatchMetricsDeliveredWaiter(client).wait(
namespace=namespace,
metrics_name=metrics_name,
dimensions=dimensions,
start_time=start_time
)
except WaiterError as e:
assert False, f'Failed to deliver metrics to CloudWatch: {str(e)}.'
def verify_s3_delivery(self, analytics_bucket_name: str) -> None:
"""
Verify that metrics are delivered to S3 for batch analytics successfully.
:param analytics_bucket_name: Name of the deployed S3 bucket.
"""
client = self._aws_util.client('s3')
bucket_name = analytics_bucket_name
try:
DataLakeMetricsDeliveredWaiter(client).wait(bucket_name=bucket_name, prefix=EXPECTED_S3_DIRECTORY)
except WaiterError as e:
assert False, f'Failed to find the S3 directory for storing metrics data: {str(e)}.'
# Check whether the data is converted to the expected data format.
response = client.list_objects_v2(
Bucket=bucket_name,
Prefix=EXPECTED_S3_DIRECTORY
)
assert response.get('KeyCount', 0) != 0, f'Failed to deliver metrics to the S3 bucket {bucket_name}.'
s3_objects = response.get('Contents', [])
for s3_object in s3_objects:
key = s3_object.get('Key', '')
assert pathlib.Path(key).suffix == EXPECTED_S3_OBJECT_EXTENSION, \
f'Invalid data format is found in the S3 bucket {bucket_name}'
def run_glue_crawler(self, crawler_name: str) -> None:
"""
Run the Glue crawler and wait for it to finish.
:param crawler_name: Name of the Glue crawler
"""
client = self._aws_util.client('glue')
try:
client.start_crawler(
Name=crawler_name
)
except client.exceptions.CrawlerRunningException:
# The crawler has already been started.
return
try:
GlueCrawlerReadyWaiter(client).wait(crawler_name=crawler_name)
except WaiterError as e:
assert False, f'Failed to run the Glue crawler: {str(e)}.'
def run_named_queries(self, work_group: str) -> None:
"""
Run the named queries under the specific Athena work group.
:param work_group: Name of the Athena work group.
"""
client = self._aws_util.client('athena')
# List all the named queries.
response = client.list_named_queries(
WorkGroup=work_group
)
named_query_ids = response.get('NamedQueryIds', [])
# Run each of the queries.
for named_query_id in named_query_ids:
get_named_query_response = client.get_named_query(
NamedQueryId=named_query_id
)
named_query = get_named_query_response.get('NamedQuery', {})
start_query_execution_response = client.start_query_execution(
QueryString=named_query.get('QueryString', ''),
QueryExecutionContext={
'Database': named_query.get('Database', '')
},
WorkGroup=work_group
)
# Wait for the query to finish.
state = 'RUNNING'
while state == 'QUEUED' or state == 'RUNNING':
get_query_execution_response = client.get_query_execution(
QueryExecutionId=start_query_execution_response.get('QueryExecutionId', '')
)
state = get_query_execution_response.get('QueryExecution', {}).get('Status', {}).get('State', '')
assert state == 'SUCCEEDED', f'Failed to run the named query {named_query.get("Name", {})}'
def empty_s3_bucket(self, bucket_name: str) -> None:
"""
Empty the S3 bucket following:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/migrations3.html
:param bucket_name: Name of the S3 bucket.
"""
s3 = self._aws_util.resource('s3')
bucket = s3.Bucket(bucket_name)
for key in bucket.objects.all():
key.delete()
def get_analytics_bucket_name(self, stack_name: str) -> str:
"""
Get the name of the deployed S3 bucket.
:param stack_name: Name of the CloudFormation stack.
:return: Name of the deployed S3 bucket.
"""
client = self._aws_util.client('cloudformation')
response = client.describe_stack_resources(
StackName=stack_name
)
resources = response.get('StackResources', [])
for resource in resources:
if resource.get('ResourceType') == 'AWS::S3::Bucket':
return resource.get('PhysicalResourceId', '')
return ''
@pytest.fixture(scope='function')
def aws_metrics_utils(
request: pytest.fixture,
aws_utils: pytest.fixture):
"""
Fixture for the AWS metrics util functions.
:param request: _pytest.fixtures.SubRequest class that handles getting
a pytest fixture from a pytest function/fixture.
:param aws_utils: aws_utils fixture.
"""
aws_utils_obj = AWSMetricsUtils(aws_utils)
return aws_utils_obj

@ -0,0 +1,142 @@
"""
All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
its licensors.
For complete copyright and license terms please see the LICENSE at the root of this
distribution (the "License"). All use of this software is governed by the License,
or, if provided, by the license below or the license accompanying this file. Do not
remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import botocore.client
import logging
from datetime import timedelta
from AWS.common.custom_waiter import CustomWaiter, WaitState
logging.getLogger('boto').setLevel(logging.CRITICAL)
class KinesisAnalyticsApplicationUpdatedWaiter(CustomWaiter):
"""
Subclass of the base custom waiter class.
Wait for the Kinesis analytics application being updated to a specific status.
"""
def __init__(self, client: botocore.client, status: str):
"""
Initialize the waiter.
:param client: Boto3 client to use.
:param status: Expected status.
"""
super().__init__(
'KinesisAnalyticsApplicationUpdated',
'DescribeApplication',
'ApplicationDetail.ApplicationStatus',
{status: WaitState.SUCCESS},
client)
def wait(self, application_name: str):
"""
Wait for the expected status.
:param application_name: Name of the Kinesis analytics application.
"""
self._wait(ApplicationName=application_name)
class GlueCrawlerReadyWaiter(CustomWaiter):
"""
Subclass of the base custom waiter class.
Wait for the Glue crawler to finish its processing.
"""
def __init__(self, client: botocore.client):
"""
Initialize the waiter.
:param client: Boto3 client to use.
"""
super().__init__(
'GlueCrawlerReady',
'GetCrawler',
'Crawler.State',
{'READY': WaitState.SUCCESS},
client)
def wait(self, crawler_name):
"""
Wait for the expected status.
:param crawler_name: Name of the Glue crawler.
"""
self._wait(Name=crawler_name)
class DataLakeMetricsDeliveredWaiter(CustomWaiter):
"""
Subclass of the base custom waiter class.
Wait for the expected directory being created in the S3 bucket.
"""
def __init__(self, client: botocore.client):
"""
Initialize the waiter.
:param client: Boto3 client to use.
"""
super().__init__(
'DataLakeMetricsDelivered',
'ListObjectsV2',
'KeyCount > `0`',
{True: WaitState.SUCCESS},
client)
def wait(self, bucket_name, prefix):
"""
Wait for the expected directory being created.
:param bucket_name: Name of the S3 bucket.
:param prefix: Name of the expected directory prefix.
"""
self._wait(Bucket=bucket_name, Prefix=prefix)
class CloudWatchMetricsDeliveredWaiter(CustomWaiter):
"""
Subclass of the base custom waiter class.
Wait for the expected metrics being delivered to CloudWatch.
"""
def __init__(self, client: botocore.client):
"""
Initialize the waiter.
:param client: Boto3 client to use.
"""
super().__init__(
'CloudWatchMetricsDelivered',
'GetMetricStatistics',
'length(Datapoints) > `0`',
{True: WaitState.SUCCESS},
client)
def wait(self, namespace, metrics_name, dimensions, start_time):
"""
Wait for the expected metrics being delivered.
:param namespace: Namespace of the metrics.
:param metrics_name: Name of the metrics.
:param dimensions: Dimensions of the metrics.
:param start_time: Start time for generating the metrics.
"""
self._wait(
Namespace=namespace,
MetricName=metrics_name,
Dimensions=dimensions,
StartTime=start_time,
EndTime=start_time + timedelta(0, self.timeout),
Period=60,
Statistics=[
'SampleCount'
],
Unit='Count'
)

@ -16,12 +16,15 @@ import boto3
import ly_test_tools.environment.process_utils as process_utils
from typing import List
BOOTSTRAP_STACK_NAME = 'CDKToolkit'
BOOTSTRAP_STAGING_BUCKET_LOGIC_ID = 'StagingBucket'
class Cdk:
"""
Cdk class that provides methods to run cdk application commands.
Expects system to have NodeJS, AWS CLI and CDK installed globally and have their paths setup as env variables.
"""
def __init__(self, cdk_path: str, project: str, account_id: str,
workspace: pytest.fixture, session: boto3.session.Session):
"""
@ -49,12 +52,24 @@ class Cdk:
env=self._cdk_env,
shell=True)
def bootstrap(self) -> None:
"""
Deploy the bootstrap stack.
"""
bootstrap_cmd = ['cdk', 'bootstrap',
f'aws://{self._cdk_env["O3DE_AWS_DEPLOY_ACCOUNT"]}/{self._cdk_env["O3DE_AWS_DEPLOY_REGION"]}']
process_utils.check_call(
bootstrap_cmd,
cwd=self._cdk_path,
env=self._cdk_env,
shell=True)
def list(self) -> List[str]:
"""
lists cdk stack names
:return List of cdk stack names
"""
if not self._cdk_path:
return []
@ -126,6 +141,38 @@ class Cdk:
self._stacks = []
self._cdk_path = ''
@staticmethod
def remove_bootstrap_stack(aws_utils: pytest.fixture) -> None:
"""
Remove the CDK bootstrap stack.
:param aws_utils: aws_utils fixture.
"""
# Check if the bootstrap stack exists.
response = aws_utils.client('cloudformation').describe_stacks(
StackName=BOOTSTRAP_STACK_NAME
)
stacks = response.get('Stacks', [])
if not stacks:
return
# Clear the bootstrap staging bucket before deleting the bootstrap stack.
response = aws_utils.client('cloudformation').describe_stack_resource(
StackName=BOOTSTRAP_STACK_NAME,
LogicalResourceId=BOOTSTRAP_STAGING_BUCKET_LOGIC_ID
)
staging_bucket_name = response.get('StackResourceDetail', {}).get('PhysicalResourceId', '')
if staging_bucket_name:
s3 = aws_utils.resource('s3')
bucket = s3.Bucket(staging_bucket_name)
for key in bucket.objects.all():
key.delete()
# Delete the bootstrap stack.
aws_utils.client('cloudformation').delete_stack(
StackName=BOOTSTRAP_STACK_NAME
)
@pytest.fixture(scope='function')
def cdk(
@ -134,6 +181,7 @@ def cdk(
feature_name: str,
workspace: pytest.fixture,
aws_utils: pytest.fixture,
bootstrap_required: bool = True,
destroy_stacks_on_teardown: bool = True) -> Cdk:
"""
Fixture for setting up a Cdk
@ -143,6 +191,8 @@ def cdk(
:param feature_name: Feature gem name to expect cdk folder in.
:param workspace: ly_test_tools workspace fixture.
:param aws_utils: aws_utils fixture.
:param bootstrap_required: Whether the bootstrap stack needs to be created to
provision resources the AWS CDK needs to perform the deployment.
:param destroy_stacks_on_teardown: option to control calling destroy ot the end of test.
:return Cdk class object.
"""
@ -150,9 +200,14 @@ def cdk(
cdk_path = f'{workspace.paths.engine_root()}/Gems/{feature_name}/cdk'
cdk_obj = Cdk(cdk_path, project, aws_utils.assume_account_id(), workspace, aws_utils.assume_session())
if bootstrap_required:
cdk_obj.bootstrap()
def teardown():
if destroy_stacks_on_teardown:
cdk_obj.destroy()
cdk_obj.remove_bootstrap_stack(aws_utils)
request.addfinalizer(teardown)
return cdk_obj

@ -0,0 +1,134 @@
"""
All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
its licensors.
For complete copyright and license terms please see the LICENSE at the root of this
distribution (the "License"). All use of this software is governed by the License,
or, if provided, by the license below or the license accompanying this file. Do not
remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import boto3
import configparser
import logging
import os
import pytest
import typing
logger = logging.getLogger(__name__)
logging.getLogger('boto').setLevel(logging.CRITICAL)
class AwsCredentials:
def __init__(self, profile_name: str):
self._profile_name = profile_name
self._credentials_path = os.environ.get('AWS_SHARED_CREDENTIALS_FILE')
if not self._credentials_path:
# Home directory location varies based on the operating system, but is referred to using the environment
# variables %UserProfile% in Windows and $HOME or ~ (tilde) in Unix-based systems.
self._credentials_path = os.path.join(os.environ.get('UserProfile', os.path.expanduser('~')),
'.aws', 'credentials')
self._credentials_file_exists = os.path.exists(self._credentials_path)
self._credentials = configparser.ConfigParser()
self._credentials.read(self._credentials_path)
def get_aws_credentials(self) -> typing.Tuple[str, str, str]:
"""
Get aws credentials stored in the specific named profile.
:return AWS credentials.
"""
access_key_id = self._get_aws_credential_attribute_value('aws_access_key_id')
secret_access_key = self._get_aws_credential_attribute_value('aws_secret_access_key')
session_token = self._get_aws_credential_attribute_value('aws_session_token')
return access_key_id, secret_access_key, session_token
def set_aws_credentials_by_session(self, session: boto3.Session) -> None:
"""
Set AWS credentials stored in the specific named profile using an assumed role session.
:param session: assumed role session.
"""
credentials = session.get_credentials().get_frozen_credentials()
self.set_aws_credentials(credentials.access_key, credentials.secret_key, credentials.token)
def set_aws_credentials(self, aws_access_key_id: str, aws_secret_access_key: str,
aws_session_token: str) -> None:
"""
Set AWS credentials stored in the specific named profile.
:param aws_access_key_id: AWS access key id.
:param aws_secret_access_key: AWS secrete access key.
:param aws_session_token: AWS assumed role session.
"""
self._set_aws_credential_attribute_value('aws_access_key_id', aws_access_key_id)
self._set_aws_credential_attribute_value('aws_secret_access_key', aws_secret_access_key)
self._set_aws_credential_attribute_value('aws_session_token', aws_session_token)
if (len(self._credentials.sections()) == 0) and (not self._credentials_file_exists):
os.remove(self._credentials_path)
return
with open(self._credentials_path, 'w+') as credential_file:
self._credentials.write(credential_file)
def _get_aws_credential_attribute_value(self, attribute_name: str) -> str:
"""
Get the value of an AWS credential attribute stored in the specific named profile.
:param attribute_name: Name of the AWS credential attribute.
:return Value of the AWS credential attribute.
"""
try:
value = self._credentials.get(self._profile_name, attribute_name)
except configparser.NoSectionError:
# Named profile or key doesn't exist
value = None
except configparser.NoOptionError:
# Named profile doesn't have the specified attribute
value = None
return value
def _set_aws_credential_attribute_value(self, attribute_name: str, attribute_value: str) -> None:
"""
Set the value of an AWS credential attribute stored in the specific named profile.
:param attribute_name: Name of the AWS credential attribute.
:param attribute_value: Value of the AWS credential attribute.
"""
if self._profile_name not in self._credentials:
self._credentials[self._profile_name] = {}
if attribute_value is None:
self._credentials.remove_option(self._profile_name, attribute_name)
# Remove the named profile if it doesn't have any AWS credential attribute.
if len(self._credentials[self._profile_name]) == 0:
self._credentials.remove_section(self._profile_name)
else:
self._credentials[self._profile_name][attribute_name] = attribute_value
@pytest.fixture(scope='function')
def aws_credentials(request: pytest.fixture, aws_utils: pytest.fixture, profile_name: str):
"""
Fixture for setting up temporary AWS credentials from assume role.
:param request: _pytest.fixtures.SubRequest class that handles getting
a pytest fixture from a pytest function/fixture.
:param aws_utils: aws_utils fixture.
:param profile_name: Named AWS profile to store temporary credentials.
"""
aws_credentials_obj = AwsCredentials(profile_name)
original_access_key, original_secret_access_key, original_token = aws_credentials_obj.get_aws_credentials()
aws_credentials_obj.set_aws_credentials_by_session(aws_utils.assume_session())
def teardown():
# Reset to the named profile using the original AWS credentials
aws_credentials_obj.set_aws_credentials(original_access_key, original_secret_access_key, original_token)
request.addfinalizer(teardown)
return aws_credentials_obj

@ -1,82 +1,90 @@
"""
All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
its licensors.
For complete copyright and license terms please see the LICENSE at the root of this
distribution (the "License"). All use of this software is governed by the License,
or, if provided, by the license below or the license accompanying this file. Do not
remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import boto3
import pytest
import logging
logger = logging.getLogger(__name__)
class AwsUtils:
def __init__(self, arn: str, session_name: str, region_name: str):
local_session = boto3.Session(profile_name='default')
local_sts_client = local_session.client('sts')
self._local_account_id = local_sts_client.get_caller_identity()["Account"]
logger.info(f'Local Account Id: {self._local_account_id}')
response = local_sts_client.assume_role(RoleArn=arn, RoleSessionName=session_name)
self._assume_session = boto3.Session(aws_access_key_id=response['Credentials']['AccessKeyId'],
aws_secret_access_key=response['Credentials']['SecretAccessKey'],
aws_session_token=response['Credentials']['SessionToken'],
region_name=region_name)
assume_sts_client = self._assume_session.client('sts')
assume_account_id = assume_sts_client.get_caller_identity()["Account"]
logger.info(f'Assume Account Id: {assume_account_id}')
self._assume_account_id = assume_account_id
def client(self, service: str):
"""
Get the client for a specific AWS service from configured session
:return: Client for the AWS service.
"""
return self._assume_session.client(service)
def assume_session(self):
return self._assume_session
def local_account_id(self):
return self._local_account_id
def assume_account_id(self):
return self._assume_account_id
def destroy(self) -> None:
"""
clears stored session
"""
self._assume_session = None
@pytest.fixture(scope='function')
def aws_utils(
request: pytest.fixture,
assume_role_arn: str,
session_name: str,
region_name: str):
"""
Fixture for setting up a Cdk
:param request: _pytest.fixtures.SubRequest class that handles getting
a pytest fixture from a pytest function/fixture.
:param assume_role_arn: Role used to fetch temporary aws credentials, configure service clients with obtained credentials.
:param session_name: Session name to set.
:param region_name: AWS account region to set for session.
:return AWSUtils class object.
"""
aws_utils_obj = AwsUtils(assume_role_arn, session_name, region_name)
def teardown():
aws_utils_obj.destroy()
request.addfinalizer(teardown)
return aws_utils_obj
"""
All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
its licensors.
For complete copyright and license terms please see the LICENSE at the root of this
distribution (the "License"). All use of this software is governed by the License,
or, if provided, by the license below or the license accompanying this file. Do not
remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import boto3
import pytest
import logging
logger = logging.getLogger(__name__)
logging.getLogger('boto').setLevel(logging.CRITICAL)
class AwsUtils:
def __init__(self, arn: str, session_name: str, region_name: str):
local_session = boto3.Session(profile_name='default')
local_sts_client = local_session.client('sts')
self._local_account_id = local_sts_client.get_caller_identity()["Account"]
logger.info(f'Local Account Id: {self._local_account_id}')
response = local_sts_client.assume_role(RoleArn=arn, RoleSessionName=session_name)
self._assume_session = boto3.Session(aws_access_key_id=response['Credentials']['AccessKeyId'],
aws_secret_access_key=response['Credentials']['SecretAccessKey'],
aws_session_token=response['Credentials']['SessionToken'],
region_name=region_name)
assume_sts_client = self._assume_session.client('sts')
assume_account_id = assume_sts_client.get_caller_identity()["Account"]
logger.info(f'Assume Account Id: {assume_account_id}')
self._assume_account_id = assume_account_id
def client(self, service: str):
"""
Get the client for a specific AWS service from configured session
:return: Client for the AWS service.
"""
return self._assume_session.client(service)
def resource(self, service: str):
"""
Get the resource for a specific AWS service from configured session
:return: Client for the AWS service.
"""
return self._assume_session.resource(service)
def assume_session(self):
return self._assume_session
def local_account_id(self):
return self._local_account_id
def assume_account_id(self):
return self._assume_account_id
def destroy(self) -> None:
"""
clears stored session
"""
self._assume_session = None
@pytest.fixture(scope='function')
def aws_utils(
request: pytest.fixture,
assume_role_arn: str,
session_name: str,
region_name: str):
"""
Fixture for AWS util functions
:param request: _pytest.fixtures.SubRequest class that handles getting
a pytest fixture from a pytest function/fixture.
:param assume_role_arn: Role used to fetch temporary aws credentials, configure service clients with obtained credentials.
:param session_name: Session name to set.
:param region_name: AWS account region to set for session.
:return AWSUtils class object.
"""
aws_utils_obj = AwsUtils(assume_role_arn, session_name, region_name)
def teardown():
aws_utils_obj.destroy()
request.addfinalizer(teardown)
return aws_utils_obj

@ -0,0 +1,91 @@
"""
All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
its licensors.
For complete copyright and license terms please see the LICENSE at the root of this
distribution (the "License"). All use of this software is governed by the License,
or, if provided, by the license below or the license accompanying this file. Do not
remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
from enum import Enum
import botocore.client
import botocore.waiter
import logging
logging.getLogger('boto').setLevel(logging.CRITICAL)
class WaitState(Enum):
SUCCESS = 'success'
FAILURE = 'failure'
class CustomWaiter:
"""
Base class for a custom waiter.
Modified from:
https://docs.aws.amazon.com/code-samples/latest/catalog/python-demo_tools-custom_waiter.py.html
"""
def __init__(
self, name: str, operation: str, argument: str,
acceptors: dict, client: botocore.client, delay: int = 30, max_tries: int = 10,
matcher='path'):
"""
Subclasses should pass specific operations, arguments, and acceptors to
their superclass.
:param name: The name of the waiter. This can be any descriptive string.
:param operation: The operation to wait for. This must match the casing of
the underlying operation model, which is typically in
CamelCase.
:param argument: The dict keys used to access the result of the operation, in
dot notation. For example, 'Job.Status' will access
result['Job']['Status'].
:param acceptors: The list of acceptors that indicate the wait is over. These
can indicate either success or failure. The acceptor values
are compared to the result of the operation after the
argument keys are applied.
:param client: The Boto3 client.
:param delay: The number of seconds to wait between each call to the operation. Default to 30 seconds.
:param max_tries: The maximum number of tries before exiting. Default to 10.
:param matcher: The kind of matcher to use. Default to 'path'.
"""
self.name = name
self.operation = operation
self.argument = argument
self.client = client
self.waiter_model = botocore.waiter.WaiterModel({
'version': 2,
'waiters': {
name: {
"delay": delay,
"operation": operation,
"maxAttempts": max_tries,
"acceptors": [{
"state": state.value,
"matcher": matcher,
"argument": argument,
"expected": expected
} for expected, state in acceptors.items()]
}}})
self.waiter = botocore.waiter.create_waiter_with_client(
self.name, self.waiter_model, self.client)
self._timeout = delay * max_tries
def _wait(self, **kwargs):
"""
Starts the botocore wait loop.
:param kwargs: Keyword arguments that are passed to the operation being polled.
"""
self.waiter.wait(**kwargs)
@property
def timeout(self):
return self._timeout

@ -31,13 +31,13 @@ class TestPythonAssetProcessing(object):
unexpected_lines = []
expected_lines = [
'Mock asset exists',
'Expected subId for asset (gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_z_positive_1.azmodel) found',
'Expected subId for asset (gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_z_negative_1.azmodel) found',
'Expected subId for asset (gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_y_positive_1.azmodel) found',
'Expected subId for asset (gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_y_negative_1.azmodel) found',
'Expected subId for asset (gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_x_positive_1.azmodel) found',
'Expected subId for asset (gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_x_negative_1.azmodel) found',
'Expected subId for asset (gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_center_1.azmodel) found'
'Expected subId for asset (gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_z_positive.azmodel) found',
'Expected subId for asset (gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_z_negative.azmodel) found',
'Expected subId for asset (gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_y_positive.azmodel) found',
'Expected subId for asset (gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_y_negative.azmodel) found',
'Expected subId for asset (gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_x_positive.azmodel) found',
'Expected subId for asset (gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_x_negative.azmodel) found',
'Expected subId for asset (gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_center.azmodel) found'
]
timeout = 180
halt_on_unexpected = False

@ -38,16 +38,16 @@ def test_azmodel_product(generatedModelAssetPath, expectedSubId):
assetId = azlmbr.asset.AssetCatalogRequestBus(azlmbr.bus.Broadcast, 'GetAssetIdByPath', generatedModelAssetPath, azModelAssetType, False)
assetIdString = assetId.to_string()
if (assetIdString.endswith(':' + expectedSubId) is False):
raise_and_stop(f'Asset has unexpected asset ID ({assetIdString}) for ({generatedModelAssetPath})!')
raise_and_stop(f'Asset at path {generatedModelAssetPath} has unexpected asset ID ({assetIdString}) for ({generatedModelAssetPath}), expected {expectedSubId}!')
else:
print(f'Expected subId for asset ({generatedModelAssetPath}) found')
test_azmodel_product('gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_z_positive_1.azmodel', '10315ae0')
test_azmodel_product('gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_z_negative_1.azmodel', '10661093')
test_azmodel_product('gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_y_positive_1.azmodel', '10af8810')
test_azmodel_product('gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_y_negative_1.azmodel', '10f8c263')
test_azmodel_product('gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_x_positive_1.azmodel', '100ac47f')
test_azmodel_product('gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_x_negative_1.azmodel', '105d8e0c')
test_azmodel_product('gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_center_1.azmodel', '1002d464')
test_azmodel_product('gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_z_positive.azmodel', '1024be55')
test_azmodel_product('gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_z_negative.azmodel', '1052c94e')
test_azmodel_product('gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_y_positive.azmodel', '10130556')
test_azmodel_product('gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_y_negative.azmodel', '1065724d')
test_azmodel_product('gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_x_positive.azmodel', '10d16e68')
test_azmodel_product('gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_x_negative.azmodel', '10a71973')
test_azmodel_product('gem/pythontests/pythonassetbuilder/geom_group_fbx_cube_100cm_center.azmodel', '10412075')
azlmbr.editor.EditorToolsApplicationRequestBus(azlmbr.bus.Broadcast, 'ExitNoPrompt')

@ -34,10 +34,10 @@ def ap_all_platforms_setup_fixture(request, workspace, ap_setup_fixture) -> Dict
# Specific platform cache locations
resources["pc_cache_location"] = os.path.join(cache_dir, "pc")
resources["es3_cache_location"] = os.path.join(cache_dir, "es3")
resources["android_cache_location"] = os.path.join(cache_dir, "android")
resources["ios_cache_location"] = os.path.join(cache_dir, "ios")
resources["osx_gl_cache_location"] = os.path.join(cache_dir, "osx_gl")
resources["mac_cache_location"] = os.path.join(cache_dir, "mac")
resources["provo_cache_location"] = os.path.join(cache_dir, "provo")
resources["all_platforms"] = ["pc", "es3", "ios", "osx_gl", "provo"]
resources["all_platforms"] = ["pc", "android", "ios", "mac", "provo"]
return resources

@ -54,7 +54,7 @@ def bundler_batch_setup_fixture(request, workspace, asset_processor, timeout) ->
platforms = [platform.strip() for platform in platforms.split(",")]
else:
# No commandline argument provided, default to mac and pc
platforms = ["pc", "osx_gl"]
platforms = ["pc", "mac"]
class BundlerBatchFixture:
"""
@ -162,7 +162,7 @@ def bundler_batch_setup_fixture(request, workspace, asset_processor, timeout) ->
else:
cmd.append(f"--{key}")
if append_defaults:
cmd.append(f"--project={workspace.project}")
cmd.append(f"--project-path={workspace.project}")
return cmd
# ******
@ -241,11 +241,11 @@ def bundler_batch_setup_fixture(request, workspace, asset_processor, timeout) ->
def get_platform_flag(self, platform_name: str) -> int:
if (platform_name == "pc"):
return 1
elif (platform_name == "es3"):
elif (platform_name == "android"):
return 2
elif (platform_name == "ios"):
return 4
elif (platform_name == "osx_gl"):
elif (platform_name == "mac"):
return 8
elif (platform_name == "server"):
return 128
@ -300,9 +300,9 @@ def bundler_batch_setup_fixture(request, workspace, asset_processor, timeout) ->
workspace.paths.engine_root(),
"Code",
"Framework",
"AzFramework",
"AzFramework",
"Platform",
"AzCore",
"AzCore",
"PlatformId",
"PlatformDefaults.h",
)
@ -318,7 +318,7 @@ def bundler_batch_setup_fixture(request, workspace, asset_processor, timeout) ->
if start_gathering:
result = get_platform.match(line) # Try the regex
if result:
platform_values[result.group(1).lower()] = counter
platform_values[result.group(1).replace("_ID", "").lower()] = counter
counter = counter << 1
elif "(Invalid, -1)" in line: # The line right before the first platform
start_gathering = True

@ -128,16 +128,5 @@ if(PAL_TRAIT_BUILD_TESTS_SUPPORTED AND PAL_TRAIT_BUILD_HOST_TOOLS)
RUNTIME_DEPENDENCIES
AZ::AssetProcessorBatch
)
# Need performance improvements LYN-1218
# ly_add_pytest(
# NAME AssetPipelineTests.AssetRelocator
# PATH ${CMAKE_CURRENT_LIST_DIR}/asset_relocator_tests.py
# EXCLUDE_TEST_RUN_TARGET_FROM_IDE
# TEST_SUITE periodic
# TEST_SERIAL
# RUNTIME_DEPENDENCIES
# AZ::AssetProcessorBatch
# )
endif()

@ -64,6 +64,15 @@ class TestsAssetBuilder_WindowsAndMac(object):
):
"""
Verifying -debug parameter for AssetBuilder
Test Steps:
1. Create temporary workspace
2. Launch Asset Processor GUI
3. Add test assets to workspace
4. Run Asset Builder with debug on an intact slice
5. Check Asset Builder didn't fail to build
6. Run Asset Builder with debug on a corrupted slice
7. Verify corrupted slice produced an error
"""
env = ap_setup_fixture
intact_slice_failed = False

@ -80,6 +80,8 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
def test_WindowsAndMac_RunHelpCmd_ZeroExitCode(self, workspace, bundler_batch_helper):
"""
Simple calls to all AssetBundlerBatch --help to make sure a non-zero exit codes are returned.
Test will call each Asset Bundler Batch sub-command with help and will error on a non-0 exit code
"""
bundler_batch_helper.call_bundlerbatch(help="")
bundler_batch_helper.call_seeds(help="")
@ -98,6 +100,12 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
r"""
Tests that an asset list created maps dependencies correctly.
testdependencieslevel\level.pak and lists of known dependencies are used for validation
Test Steps:
1. Create an asset list from the level.pak
2. Create Lists of expected assets in the level.pak
3. Add lists of expected assets to a single list
4. Compare list of expected assets to actual assets
"""
helper = bundler_batch_helper
@ -300,9 +308,18 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
"""
Validates destructive overwriting for asset lists and
that generating debug information does not affect asset list creation
1. Create an asset list from seed_list
2. Validate asset list was created
3. Read and store contents of asset list into memory
4. Attempt to create a new asset list in without using --allowOverwrites
5. Verify that Asset Bundler returns false
6. Verify that file contents of the orignally created asset list did not change from what was stored in memory
7. Attempt to create a new asset list without debug while allowing overwrites
8. Verify that file contents of the orignally created asset list changed from what was stored in memory
"""
helper = bundler_batch_helper
seed_list = os.path.join(workspace.paths.engine_root(), "Engine", "SeedAssetList.seed") # Engine seed list
seed_list = os.path.join(workspace.paths.engine_root(), "Assets", "Engine", "SeedAssetList.seed") # Engine seed list
asset = r"levels\testdependencieslevel\level.pak"
# Create Asset list
@ -375,9 +392,17 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
"""
Validates bundle creation both through the 'bundles' and 'bundlesettings'
subcommands.
Test Steps:
1. Create an asset list
2. Create a bundle with the asset list and without a bundle settings file
3. Create a bundle with the asset list and a bundle settings file
4. Validate calling bundle doesn't perform destructive overwrite without --allowOverwrites
5. Calling bundle again with --alowOverwrites performs destructive overwrite
6. Validate contents of original bundle and overwritten bundle
"""
helper = bundler_batch_helper
seed_list = os.path.join(workspace.paths.engine_root(), "Engine", "SeedAssetList.seed") # Engine seed list
seed_list = os.path.join(workspace.paths.engine_root(), "Assets", "Engine", "SeedAssetList.seed") # Engine seed list
asset = r"levels\testdependencieslevel\level.pak"
# Useful bundle locations / names (2 for comparing contents)
@ -457,15 +482,25 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
"""
Creates bundles using the same asset list and compares that they are created equally. Also
validates that platform bundles exclude/include an expected file. (excluded for WIN, included for MAC)
Test Steps:
1. Create an asset list
2. Create bundles for both PC & Mac
3. Validate that bundles were created
4. Verify that expected missing file is not in windows bundle
5. Verify that expected file is in the mac bundle
6. Create duplicate bundles with allowOverwrites
7. Verify that files were generated
8. Verify original bundle checksums are equal to new bundle checksums
"""
helper = bundler_batch_helper
# fmt:off
assert "pc" in helper["platforms"] and "osx_gl" in helper["platforms"], \
assert "pc" in helper["platforms"] and "mac" in helper["platforms"], \
"This test requires both PC and MAC platforms to be enabled. " \
"Please rerun with commandline option: '--bundle_platforms=pc,osx_gl'"
"Please rerun with commandline option: '--bundle_platforms=pc,mac'"
# fmt:on
seed_list = os.path.join(workspace.paths.engine_root(), "Engine", "SeedAssetList.seed") # Engine seed list
seed_list = os.path.join(workspace.paths.engine_root(), "Assets", "Engine", "SeedAssetList.seed") # Engine seed list
# Useful bundle / asset list locations
bundle_dir = os.path.dirname(helper["bundle_file"])
@ -502,21 +537,21 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
for bundle_file in bundle_files.values():
assert os.path.isfile(bundle_file)
# This asset is created on osx_gl platform but not on windows
file_to_check = b"engineassets/shading/defaultprobe_cm.dds.5" # [use byte str because file is in binary]
# This asset is created both on mac and windows platform
file_to_check = b"engineassets/shading/defaultprobe_cm_ibldiffuse.tif.streamingimage" # [use byte str because file is in binary]
# Extract the delta catalog file from pc archive. {file_to_check} SHOULD NOT be present for PC
file_contents = helper.extract_file_content(bundle_files["pc"], "DeltaCatalog.xml")
# fmt:off
assert file_to_check not in file_contents, \
assert file_to_check in file_contents, \
f"{file_to_check} was found in DeltaCatalog.xml in pc bundle file {bundle_files['pc']}"
# fmt:on
# Extract the delta catalog file from osx_gl archive. {file_to_check} SHOULD be present for MAC
file_contents = helper.extract_file_content(bundle_files["osx_gl"], "DeltaCatalog.xml")
# Extract the delta catalog file from mac archive. {file_to_check} SHOULD be present for MAC
file_contents = helper.extract_file_content(bundle_files["mac"], "DeltaCatalog.xml")
# fmt:off
assert file_to_check in file_contents, \
f"{file_to_check} was not found in DeltaCatalog.xml in darwin bundle file {bundle_files['osx_gl']}"
f"{file_to_check} was not found in DeltaCatalog.xml in darwin bundle file {bundle_files['mac']}"
# fmt:on
# Gather checksums for first set of bundles
@ -571,6 +606,24 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
"""
Validates that the 'seeds' subcommand can add and remove seeds and seed platforms properly.
Also checks that destructive overwrites require the --allowOverwrites flag
Test Steps:
1. Create a PC Seed List from a test asset
2. Validate that seed list was generated with proper platform flag
3. Add Mac & PC as platforms to the seed list
4. Verify that seed has both Mac & PC platform flags
5. Remove Mac as a platform from the seed list
6. Verify that seed only has PC as a platform flag
7. Attempt to add a platform without using the --platform argument
8. Verify that asset bundler returns False and file contents did not change
9. Add Mac platform via --addPlatformToSeeds
10. Validate that seed has both Mac & PC platform flags
11. Attempt to remove platform without specifying a platform
12. Validate that seed has both Mac & PC platform flags
13. Validate that seed list contents did not change
14. Remove seed
15. Validate that seed was removed from the seed list
"""
helper = bundler_batch_helper
@ -613,20 +666,20 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
helper.call_seeds(
seedListFile=helper["seed_list_file"],
addSeed=test_asset,
platform="pc,osx_gl",
platform="pc,mac",
)
# Validate both mac and pc are activated for seed
# fmt:off
check_seed_platform(helper["seed_list_file"], test_asset,
helper["platform_values"]["pc"] + helper["platform_values"]["osx"])
helper["platform_values"]["pc"] + helper["platform_values"]["mac"])
# fmt:on
# Remove MAC platform
helper.call_seeds(
seedListFile=helper["seed_list_file"],
removePlatformFromSeeds="",
platform="osx_gl",
platform="mac",
)
# Validate only pc platform for seed. Save file contents to variable
all_lines = check_seed_platform(helper["seed_list_file"], test_asset, helper["platform_values"]["pc"])
@ -646,12 +699,12 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
helper.call_seeds(
seedListFile=helper["seed_list_file"],
addPlatformToSeeds="",
platform="osx_gl",
platform="mac",
)
# Validate Mac platform was added back on. Save file contents
# fmt:off
all_lines = check_seed_platform(helper["seed_list_file"], test_asset,
helper["platform_values"]["pc"] + helper["platform_values"]["osx"])
helper["platform_values"]["pc"] + helper["platform_values"]["mac"])
# fmt:on
# Try to remove platform without specifying a platform to remove (should fail)
@ -670,7 +723,7 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
helper.call_seeds(
seedListFile=helper["seed_list_file"],
removeSeed=test_asset,
platform="pc,osx_gl",
platform="pc,mac",
)
# Validate seed was removed from file
@ -692,14 +745,20 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
"""
Tests asset list comparison, both by file and by comparison type. Uses a set
of controlled test assets to compare resulting output asset lists
1. Create comparison rules files
2. Create seed files for different sets of test assets
3. Create assetlist files for seed files
4. Validate assetlists were created properly
5. Compare using comparison rules files and just command line arguments
"""
helper = bundler_batch_helper
env = ap_setup_fixture
# fmt:off
assert "pc" in helper["platforms"] and "osx_gl" in helper["platforms"], \
assert "pc" in helper["platforms"] and "mac" in helper["platforms"], \
"This test requires both PC and MAC platforms to be enabled. " \
"Please rerun with commandline option: '--bundle_platforms=pc,osx_gl'"
"Please rerun with commandline option: '--bundle_platforms=pc,mac'"
# fmt:on
# Test assets arranged in common lists: six (0-5) .txt files and .dat files
@ -717,16 +776,16 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
file_platforms = {
"txtfile_0.txt": "pc",
"txtfile_1.txt": "pc",
"txtfile_2.txt": "pc,osx_gl",
"txtfile_3.txt": "pc,osx_gl",
"txtfile_4.txt": "osx_gl",
"txtfile_5.txt": "osx_gl",
"txtfile_2.txt": "pc,mac",
"txtfile_3.txt": "pc,mac",
"txtfile_4.txt": "mac",
"txtfile_5.txt": "mac",
"datfile_0.dat": "pc",
"datfile_1.dat": "pc",
"datfile_2.dat": "pc,osx_gl",
"datfile_3.dat": "pc,osx_gl",
"datfile_4.dat": "osx_gl",
"datfile_5.dat": "osx_gl",
"datfile_2.dat": "pc,mac",
"datfile_3.dat": "pc,mac",
"datfile_4.dat": "mac",
"datfile_5.dat": "mac",
}
# Comparison rules files and their associated 'comparisonType' flags
@ -741,7 +800,7 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
# Get our test assets ready and processed
utils.prepare_test_assets(env["tests_dir"], "C16877178", env["project_test_assets_dir"])
asset_processor.batch_process(timeout=timeout, fastscan=False, platforms="pc,osx_gl")
asset_processor.batch_process(timeout=timeout, fastscan=False, platforms="pc,mac")
# *** Some helper functions *** #
@ -759,7 +818,7 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
helper.call_assetLists(
assetListFile=os.path.join(helper["test_dir"], asset_list_file_name),
seedListFile=os.path.join(helper["test_dir"], seed_file_name),
platform="pc,osx_gl",
platform="pc,mac",
)
def get_platform_assets(asset_name_list: List[str]) -> Dict[str, List[str]]:
@ -769,7 +828,7 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
for asset_name in asset_name_list:
if "pc" in file_platforms[asset_name]:
win_assets.append(asset_name)
if "osx_gl" in file_platforms[asset_name]:
if "mac" in file_platforms[asset_name]:
mac_assets.append(asset_name)
return {"win": win_assets, "mac": mac_assets}
@ -798,7 +857,7 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
# Get platform result file names
win_asset_list_file = helper.platform_file_name(request_file, platforms["pc"])
mac_asset_list_file = helper.platform_file_name(request_file, platforms["osx_gl"])
mac_asset_list_file = helper.platform_file_name(request_file, platforms["mac"])
# Get expected platforms for each asset in asset_names
platform_files = get_platform_assets(asset_names)
@ -879,14 +938,14 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
# fmt:on
# End verify_asset_list_contents()
def run_compare_command_and_verify(platform_arg: str, expect_pc_output: bool, expect_osx_gl_output: bool) -> None:
def run_compare_command_and_verify(platform_arg: str, expect_pc_output: bool, expect_mac_output: bool) -> None:
# Expected asset list to equal result of comparison
expected_pc_asset_list = None
expected_osx_gl_asset_list = None
expected_mac_asset_list = None
# Last output file. Use this for comparison to 'expected'
output_pc_asset_list = None
output_osx_gl_asset_list = None
output_mac_asset_list = None
# Add the platform to the file name to match what the Bundler will create
last_output_arg = output_arg.split(",")[-1]
@ -895,10 +954,10 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
expected_pc_asset_list = os.path.join(helper["test_dir"], helper.platform_file_name(expected_asset_list, platform))
output_pc_asset_list = helper.platform_file_name(last_output_arg, platform)
if expect_osx_gl_output:
platform = platforms["osx_gl"]
expected_osx_gl_asset_list = os.path.join(helper["test_dir"], helper.platform_file_name(expected_asset_list, platform))
output_osx_gl_asset_list = helper.platform_file_name(last_output_arg, platform)
if expect_mac_output:
platform = platforms["mac"]
expected_mac_asset_list = os.path.join(helper["test_dir"], helper.platform_file_name(expected_asset_list, platform))
output_mac_asset_list = helper.platform_file_name(last_output_arg, platform)
# Build execution command
cmd = generate_compare_command(platform_arg)
@ -911,15 +970,15 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
verify_asset_list_contents(expected_pc_asset_list, output_pc_asset_list)
fs.delete([output_pc_asset_list], True, True)
if expect_osx_gl_output:
verify_asset_list_contents(expected_osx_gl_asset_list, output_osx_gl_asset_list)
fs.delete([output_osx_gl_asset_list], True, True)
if expect_mac_output:
verify_asset_list_contents(expected_mac_asset_list, output_mac_asset_list)
fs.delete([output_mac_asset_list], True, True)
# End run_compare_command_and_verify()
# Generate command, run and validate for each platform
run_compare_command_and_verify("pc", True, False)
run_compare_command_and_verify("osx_gl", False, True)
run_compare_command_and_verify("pc,osx_gl", True, True)
run_compare_command_and_verify("mac", False, True)
run_compare_command_and_verify("pc,mac", True, True)
#run_compare_command_and_verify(None, True, True)
# End compare_and_check()
@ -1021,6 +1080,16 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
"""
Tests that assetlists are created equivalent to the output while being created, and
makes sure overwriting an existing file without the --allowOverwrites fails
Test Steps:
1. Check that Asset List creation requires PC platform flag
2. Create a PC Asset List using asset info file and default seed lists using --print
3. Validate all assets output are present in the asset list
4. Create a seed file
5. Attempt to overwrite Asset List without using --allowOverwrites
6. Validate that command returned an error and file contents did not change
7. Specifying platform but not "add" or "remove" should fail
8. Verify file Has changed
"""
helper = bundler_batch_helper
@ -1046,7 +1115,7 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
"--addDefaultSeedListFiles",
"--platform=pc",
"--print",
f"--project={workspace.project}"
f"--project-path={workspace.project}"
],
universal_newlines=True,
)
@ -1102,7 +1171,16 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
def test_WindowsAndMac_AP_BundleProcessing_BundleProcessedAtRuntime(self, workspace, bundler_batch_helper,
asset_processor, request):
# fmt:on
"""Test to make sure the AP GUI will process a newly created bundle file"""
"""
Test to make sure the AP GUI will process a newly created bundle file
Test Steps:
1. Make asset list file (used for bundle creation)
2. Start Asset Processor GUI
3. Make bundle in <project_folder>/Bundles
4. Validate file was created in Bundles folder
5. Make sure bundle now exists in cache
"""
# Set up helpers and variables
helper = bundler_batch_helper
@ -1115,7 +1193,7 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
bundle_result_path = os.path.join(bundles_folder,
helper.platform_file_name("bundle.pak", workspace.asset_processor_platform))
bundle_cache_path = os.path.join(workspace.paths.platform_cache(), workspace.project,
bundle_cache_path = os.path.join(workspace.paths.platform_cache(),
"Bundles",
helper.platform_file_name("bundle.pak", workspace.asset_processor_platform))
@ -1131,6 +1209,8 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
addSeed=level_pak,
assetListFile=helper["asset_info_file_request"],
)
# Run Asset Processor GUI
result, _ = asset_processor.gui_process()
assert result, "AP GUI failed"
@ -1155,14 +1235,22 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
@pytest.mark.assetpipeline
# fmt:off
def test_WindowsAndMac_FilesMarkedSkip_FilesAreSkipped(self, workspace, bundler_batch_helper):
"""
Test Steps:
1. Create an asset list with a file marked as skip
2. Verify file was created
3. Verify that only the expected assets are present in the created asset list
"""
expected_assets = [
"libs/particles/milestone2particles.xml",
"textures/milestone2/particles/fx_sparkstreak_01.dds"
"ui/canvases/lyshineexamples/animation/multiplesequences.uicanvas",
"ui/textures/prefab/button_normal.sprite"
]
bundler_batch_helper.call_assetLists(
assetListFile=bundler_batch_helper['asset_info_file_request'],
addSeed="libs/particles/milestone2particles.xml",
skip="textures/milestone2/particles/fx_launchermuzzlering_01.dds,textures/milestone2/particles/fx_launchermuzzlefront_01.dds"
addSeed="ui/canvases/lyshineexamples/animation/multiplesequences.uicanvas",
skip="ui/textures/prefab/button_disabled.sprite,ui/scripts/lyshineexamples/animation/multiplesequences.luac,"
"ui/textures/prefab/tooltip_sliced.sprite,ui/scripts/lyshineexamples/unloadthiscanvasbutton.luac,fonts/vera.fontfamily,fonts/vera-italic.font,"
"fonts/vera.font,fonts/vera-bold.font,fonts/vera-bold-italic.font,fonts/vera-italic.ttf,fonts/vera.ttf,fonts/vera-bold.ttf,fonts/vera-bold-italic.ttf"
)
assert os.path.isfile(bundler_batch_helper["asset_info_file_result"])
assets_in_list = []
@ -1176,6 +1264,12 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
# fmt:off
def test_WindowsAndMac_AssetListSkipOneOfTwoParents_SharedDependencyIsIncluded(self, workspace,
bundler_batch_helper):
"""
Test Steps:
1. Create Asset List with a parent asset that is skipped
2. Verify that Asset List was created
3. Verify that only the expected assets are present in the asset list
"""
expected_assets = [
"testassets/bundlerskiptest_grandparent.dynamicslice",
"testassets/bundlerskiptest_parenta.dynamicslice",
@ -1204,6 +1298,13 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
@pytest.mark.assetpipeline
# fmt:off
def test_WindowsAndMac_AssetLists_SkipRoot_ExcludesAll(self, workspace, bundler_batch_helper):
"""
Negative scenario test that skips the same file being used as the parent seed.
Test Steps:
1. Create an asset list that skips the root asset
2. Verify that asset list was not generated
"""
result, _ = bundler_batch_helper.call_assetLists(
assetListFile=bundler_batch_helper['asset_info_file_request'],
@ -1220,6 +1321,13 @@ class TestsAssetBundlerBatch_WindowsAndMac(object):
@pytest.mark.assetpipeline
# fmt:off
def test_WindowsAndMac_AssetLists_SkipUniversalWildcard_ExcludesAll(self, workspace, bundler_batch_helper):
"""
Negative scenario test that uses the all wildcard when generating an asset list.
Test Steps:
1. Create an Asset List while using the universal all wildcard "*"
2. Verify that asset list was not generated
"""
result, _ = bundler_batch_helper.call_assetLists(
assetListFile=bundler_batch_helper['asset_info_file_request'],

@ -67,7 +67,19 @@ class TestsAssetProcessorBatch_DependenycyTests(object):
libs/materialeffects/surfacetypes.xml is listed as an entry engine_dependencies.xml
libs/materialeffects/surfacetypes.xml is not listed as a missing dependency
in the 'assetprocessorbatch' console output
Test Steps:
1. Assets are pre-processed
2. Verify that engine_dependencies.xml exists
3. Verify engine_dependencies.xml has surfacetypes.xml present
4. Run Missing Dependency scanner against the engine_dependenciese.xml
5. Verify that Surfacetypes.xml is NOT in the missing depdencies output
6. Add the schema file which allows our xml parser to understand dependencies for our engine_dependencies file
7. Process assets
8. Run Missing Dependency scanner against the engine_dependenciese.xml
9. Verify that surfacetypes.xml is in the missing dependencies out
"""
env = ap_setup_fixture
BATCH_LOG_PATH = env["ap_batch_log_file"]
asset_processor.create_temp_asset_root()
@ -137,6 +149,11 @@ class TestsAssetProcessorBatch_DependenycyTests(object):
def test_WindowsMacPlatforms_BatchCheckSchema_ValidateErrorChecking(self, workspace, asset_processor,
ap_setup_fixture, folder, schema):
# fmt:on
"""
Test Steps:
1. Run the Missing Dependency Scanner against everything
2. Verify that there are no missing dependencies.
"""
env = ap_setup_fixture
def missing_dependency_log_lines(log) -> [str]:

@ -60,6 +60,15 @@ class TestsAssetProcessorBatch_DependenycyTests(object):
Verify that Schemas can be loaded via Gems utilizing the fonts schema
:returns: None
Test Steps:
1. Run Missing Dependency Scanner against %fonts%.xml when no fonts are present
2. Verify fonts are scanned
3. Verify that missing dependencies are found for fonts
4. Add fonts to game project
5. Run Missing Dependency Scanner against %fonts%.xml when fonts are present
6. Verify that same amount of fonts are scanned
7. Verify that there are no missing dependencies.
"""
schema_name = "Font.xmlschema"
asset_processor.create_temp_asset_root()

@ -100,9 +100,17 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
@pytest.mark.BAT
@pytest.mark.assetpipeline
def test_RunAPBatch_TwoPlatforms_ExitCodeZero(self, asset_processor):
"""
Tests Process assets for PC & Mac and verifies that processing exited without error
Test Steps:
1. Add Mac and PC as enabled platforms
2. Process Assets
3. Validate that AP exited cleanly
"""
asset_processor.create_temp_asset_root()
asset_processor.enable_asset_processor_platform("pc")
asset_processor.enable_asset_processor_platform("osx_gl")
asset_processor.enable_asset_processor_platform("mac")
result, _ = asset_processor.batch_process()
assert result, "AP Batch failed"
@ -111,6 +119,14 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
@pytest.mark.assetpipeline
@pytest.mark.test_case_id('C1571826')
def test_RunAPBatch_OnlyIncludeInvalidAssets_NoAssetsAdded(self, asset_processor, ap_setup_fixture):
"""
Tests processing invalid assets and validating that no assets were moved to the cache
Test Steps:
1. Create a test environment with invalid assets
2. Run asset processor
3. Validate that no assets were found in the cache
"""
asset_processor.prepare_test_environment(ap_setup_fixture["tests_dir"], "test_ProcessAssets_OnlyIncludeInvalidAssets_NoAssetsAdded")
result, _ = asset_processor.batch_process()
@ -127,6 +143,16 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
"recognized as failing in the logs. There appears to be a window where the AutoFailJob doesn't complete"
"before the shutdown completes and the failure doesn't end up counting")
def test_ProcessAssets_IncludeTwoAssetsWithSameProduct_FailingOnSecondAsset(self, asset_processor, ap_setup_fixture):
"""
Tests processing two source assets with the same product file and validates that the second source will error
Test Steps:
1. Create a test environment that has two source files with the same product
2. Run asset processor
3. Validate that 1 asset failed to process
4. Validate that only one product file with the expected name is found in the cache
"""
asset_processor.prepare_test_environment(ap_setup_fixture["tests_dir"], "test_ProcessAssets_IncludeTwoAssetsWithSameProduct_FailingOnSecondAsset")
result, output = asset_processor.batch_process(capture_output = True, expect_failure = True)
@ -143,6 +169,17 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
@pytest.mark.assetpipeline
@pytest.mark.test_case_id('C1587615')
def test_ProcessAndDeleteCache_APBatchShouldReprocess(self, asset_processor, ap_setup_fixture):
"""
Tests processing once, deleting the generated cache, then processing again and validates the cache is created
Test Steps:
1. Run asset processor
2. Compare the cache with expected output
3. Delete Cache
4. Compare the cache with expected output to verify that cache is gone
5. Run asset processor with fastscan disabled
6. Compare the cache with expected output
"""
# Deleting assets from Cache will make them re-processed in AP (after start)
# Copying test assets to project folder and deleting them from cache to make sure APBatch will process them
@ -174,6 +211,18 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
@pytest.mark.assetpipeline
@pytest.mark.test_case_id('C1591564')
def test_ProcessAndChangeSource_APBatchShouldReprocess(self, asset_processor, ap_setup_fixture):
"""
Tests reprocessing of a modified asset and verifies that it was reprocessed
Test Steps:
1. Prepare test environment and copy test asset over
2. Run asset processor
3. Verify asset processed
4. Verify asset is in cache
4. Modify asset
5. Re-run asset processor
6. Verify asset was processed
"""
# AP Batch Processing changed files (after start)
# Copying test assets to project folder and deleting them from cache to make sure APBatch will process them
@ -208,6 +257,18 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
@pytest.mark.BAT
@pytest.mark.assetpipeline
def test_ProcessByBothApAndBatch_Md5ShouldMatch(self, asset_processor, ap_setup_fixture):
"""
Tests that a cache generated by AP GUI is the same as AP Batch
Test Steps:
1. Create test environment with test assets
2. Call asset processor batch
3. Get checksum for file cache
4. Clean up test environment
5. Call asset processor gui with quitonidle
6. Get checksum for file cache
7. Verify that checksums are equal
"""
# AP Batch and AP app processed assets MD5 sums should be the same
# Copying test assets to project folder and deleting them from cache to make sure APBatch will process them
@ -240,6 +301,16 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
@pytest.mark.assetpipeline
@pytest.mark.test_case_id('C1612446')
def test_AddSameAssetsDifferentNames_ShouldProcess(self, asset_processor, ap_setup_fixture):
"""
Tests Asset Processing of duplicate assets with different names and verifies that both assets are processed
Test Steps:
1. Create test environment with two identical source assets with different names
2. Run asset processor
3. Verify that assets didn't fail to process
4. Verify the correct number of jobs were performed
5. Verify that product files are in the cache
"""
# Feed two similar slices and texture with different names - should process without any issues
# Copying test assets to project folder and deleting them from cache to make sure APBatch will process them
@ -277,6 +348,19 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
"recognized as failing in the logs. There appears to be a window where the AutoFailJob doesn't complete"
"before the shutdown completes and the failure doesn't end up counting")
def test_AddTwoTexturesWithSameName_ShouldProcessAfterRename(self, asset_processor, ap_setup_fixture):
"""
Tests processing of two textures with the same name then verifies that AP will successfully process after
renaming one of the textures
Test Steps:
1. Create test environment with two textures that have the same name
2. Launch Asset Processor
3. Validate that Asset Processor generates an error
4. Rename texture files
5. Run asset processor
6. Verify that asset processor does not error
7. Verify that expected product files are in the cache
"""
# Feed two different textures with same name (but different extensions) - ap will fail
# Rename one of textures and failure should go away
@ -312,6 +396,15 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
@pytest.mark.BAT
@pytest.mark.assetpipeline
def test_InvalidServerAddress_Warning_Logs(self, asset_processor):
"""
Tests running Asset Processor with an invalid server address and verifies that AP returns a warning about
an invalid server address
Test Steps:
1. Launch asset processor while providing an invalid server address
2. Verify asset processor does not fail
3. Verify that asset processor generated a warning informing the user about an invalid server address
"""
asset_processor.create_temp_asset_root()
# Launching AP and making sure that the warning exists
@ -327,6 +420,12 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
def test_AllSupportedPlatforms_IncludeValidAssets_AssetsProcessed(self, asset_processor, ap_setup_fixture):
"""
AssetProcessorBatch is successfully processing newly added assets
Test Steps:
1. Create a test environment with test assets
2. Launch Asset Processor
3. Verify that asset processor does not fail to process
4. Verify assets are not missing from the cache
"""
env = ap_setup_fixture
@ -350,6 +449,14 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
def test_AllSupportedPlatforms_DeletedAssets_DeletedFromCache(self, asset_processor, ap_setup_fixture):
"""
AssetProcessor successfully deletes cached items when removed from project
Test Steps:
1. Create a test environment with test assets
2. Run asset processor
3. Verify expected assets are in the cache
4. Delete test assets
5. Run asset processor
6. Verify expected assets are in the cache
"""
env = ap_setup_fixture
@ -385,6 +492,10 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
"""
Tests that when cache is deleted (no cache) and AssetProcessorBatch runs,
it successfully starts and processes assets.
Test Steps:
1. Run asset processor
2. Verify asset processor exits cleanly
"""
asset_processor.create_temp_asset_root()
@ -402,6 +513,14 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
# fmt:on
"""
AssetProcessor successfully recovers assets from cache when deleted.
Test Steps:
1. Create test enviornment with test assets
2. Run Asset Processor and verify it exits cleanly
3. Make sure cache folder was generated
4. Delete temp cache assets but leave database behind
5. Run asset processor and verify it exits cleanly
6. Verify expected files were generated in the cache
"""
env = ap_setup_fixture
@ -434,6 +553,14 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
@pytest.mark.assetpipeline
# fmt:off
def test_AllSupportedPlatforms_RunFastScanOnEmptyCache_FullScanRuns(self, ap_setup_fixture, asset_processor):
"""
Tests fast scan processing on an empty cache and verifies that a full analyis will be peformed
Test Steps:
1. Create a test environment
2. Execute asset processor batch with fast scan enabled
3. Verify that a full analysis is performed
"""
# fmt:on
env = ap_setup_fixture
asset_processor.create_temp_asset_root()
@ -455,6 +582,11 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
"""
After running the APBatch and AP GUI, Logs directory should exist (C1564055),
JobLogs, Batch log, and GUI log should exist in the logs directory (C1564056)
Test Steps:
1. Run asset processor batch
2. Run asset processor gui with quit on idle
3. Verify that logs exist for both AP Batch & AP GUI
"""
asset_processor.create_temp_asset_root()
LOG_PATH = {
@ -536,6 +668,11 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
"""
Utilizing corrupted test assets, run the batch process to verify the
AP logs the failure to process the corrupted file.
Test Steps:
1. Create test environment with corrupted slice
2. Launch Asset Processor
3. Verify that asset processor fails to process corrupted slice
"""
env = ap_setup_fixture
error_line_found = False
@ -552,6 +689,15 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
@pytest.mark.BAT
@pytest.mark.assetpipeline
def test_validateDirectPreloadDependency_Found(self, asset_processor, ap_setup_fixture, workspace):
"""
Tests processing an asset with a circular dependency and verifies that Asset Processor will return an error
notifying the user about a circular dependency.
Test Steps:
1. Create test environment with an asset that has a circular dependency
2. Launch asset processor
3. Verify that error is returned informing the user that the asset has a circular dependency
"""
env = ap_setup_fixture
error_line_found = False
@ -567,6 +713,15 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
@pytest.mark.BAT
@pytest.mark.assetpipeline
def test_validateNestedPreloadDependency_Found(self, asset_processor, ap_setup_fixture, workspace):
"""
Tests processing of a nested circular dependency and verifies that Asset Processor will return an error
notifying the user about a circular depdency
Test Steps:
1. Create test environment with an asset that has a nested circular dependency
2. Launch asset processor
3. Verify that error is returned informing the user that the asset has a circular dependency
"""
env = ap_setup_fixture
error_line_found = False

@ -80,6 +80,15 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
# fmt:on
"""
Tests that fast scan mode can be used and is faster than full scan mode.
Test Steps:
1. Ensure all assets are processed
2. Run Asset Processor without fast scan and measure the time it takes to run
3. Capture Full Analysis was performed and number of assets processed
4. Run Asset Processor with full scan and measure the time it takes to run
5. Capture Full Analysis wans't performed and number of assets processed
6. Verify that fast scan was faster than full scan
7. Verify that full scan scanned more assets
"""
asset_processor.create_temp_asset_root()
@ -111,76 +120,23 @@ class TestsAssetProcessorBatch_AllPlatforms(object):
assert full_scan_time > fast_scan_time, "Fast scan was slower that full scan"
assert full_scan_analysis[0] > fast_scan_analysis[0], "Full scan did not process more assets than fast scan"
@pytest.mark.test_case_id("C18787404")
@pytest.mark.BAT
@pytest.mark.assetpipeline
@pytest.mark.skip(reason="External project is currently broken.") # LY-119863
def test_AllSupportedPlatforms_ExternalProject_APRuns(self, workspace, ap_external_project_setup_fixture):
external_resources = ap_external_project_setup_fixture
logger.info(f"Running external project test at path {external_resources['project_dir']}")
# Delete existing "external project" build if it exists
if os.path.exists(external_resources["project_dir"]):
fs.delete([external_resources["project_dir"]], True, True)
# fmt:off
assert not os.path.exists(external_resources["project_dir"]), \
f'{external_resources["project_dir"]} was not deleted'
# fmt:on
lmbr_cmd = [
workspace.paths.lmbr(),
"projects",
"create",
external_resources["project_name"],
"--template",
"EmptyTemplate",
"--app-root",
external_resources["project_dir"],
]
logger.info(f"Running lmbr projects create command '{lmbr_cmd}'")
try:
subprocess.check_call(lmbr_cmd)
except subprocess.CalledProcessError as e:
assert False, f"lmbr projects create failed\n{e.stderr}"
logger.info("...lmbr finished")
assert os.path.exists(external_resources["project_dir"]), "Project folder was not created"
# AssetProcessor for new External project. Uses mock workspace to emulate external project workspace
external_ap = AssetProcessor(external_resources["external_workspace"])
# fmt:off
assert external_ap.batch_process(fastscan=False), \
"Asset Processor Batch failed on external project"
# fmt:on
# Parse log looking for errors or failures
log = APLogParser(workspace.paths.ap_batch_log())
failures, errors = log.runs[-1]["Failures"], log.runs[-1]["Errors"]
assert failures == 0, f"There were {failures} asset processing failures"
assert errors == 0, f"There were {errors} asset processing errors"
# Check that project cache was created (DNE until AP makes it)
project_cache = os.path.join(external_resources["project_dir"], "Cache")
assert os.path.exists(project_cache), f"{project_cache} was not created by AP"
# Clean up external project
fs.delete([external_resources["project_dir"]], True, True)
# fmt:off
assert not os.path.exists(external_resources["project_dir"]), \
f"{external_resources['project_dir']} was not deleted"
# fmt:on
@pytest.mark.test_case_id("C4874121")
@pytest.mark.BAT
@pytest.mark.assetpipeline
@pytest.mark.parametrize("clear_type", ["rewrite", "delete_asset", "delete_dir"])
def test_AllSupportedPlatforms_DeleteBadAssets_BatchFailedJobsCleared(
self, workspace, request, ap_setup_fixture, asset_processor, clear_type):
"""
Tests the ability of Asset Processor to recover from processing of bad assets by removing them from scan folder
Test Steps:
1. Create testing environment with good and multiple bad assets
2. Run Asset Processor
3. Verify that bad assets fail to process
4. Fix a bad asset & delete the others
5. Run Asset Processor
6. Verify Asset Processor does not have any asset failues
"""
env = ap_setup_fixture
error_search_terms = ["WWWWWWWWWWWW"]
@ -250,6 +206,14 @@ class TestsAssetProcessorBatch_Windows(object):
Verify the AP batch and Gui can run and process assets independent of the Editor
We do not want or need to kill running Editors here as they can be involved in other tests
or simply being run locally in this branch or another
Test Steps:
1. Create temporary testing environment
2. Run asset processor GUI
3. Verify AP GUI doesn't error
4. Stop AP GUI
5. Run Asset Processor Batch with Fast Scan
5. Verify Asset Processor Batch exits cleanly
"""
asset_processor.create_temp_asset_root()
@ -272,6 +236,11 @@ class TestsAssetProcessorBatch_Windows(object):
"""
Request a run for an invalid platform
"AssetProcessor: Error: Platform in config file or command line 'notaplatform'" should be present in the logs
Test Steps:
1. Create temporary testing environment
2. Run Asset Processor with an invalid platform
3. Check that asset processor returns an Error notifying the user that the invalid platform is not supported
"""
asset_processor.create_temp_asset_root()
error_search_terms = 'AssetProcessor: Error: The list of enabled platforms in the settings registry does not contain platform ' \

@ -77,6 +77,13 @@ class TestsAssetProcessorGUI_Windows(object):
def test_SendInputOnControlChannel_ReceivedAndResponded(self, asset_processor):
"""
Test that the control channel connects and that communication works both directions
Test Steps:
1. Start Asset Processor
2. Send a Ping message to Asset Processor
3. Listen for Asset Processor response
4. Verify Asset Processor responds
5. Stop asset Processor
"""
asset_processor.create_temp_asset_root()
@ -129,7 +136,15 @@ class TestsAssetProcessorGUI_Windows(object):
# fmt:on
"""
Asset Processor Deletes processed assets when source is removed from project folder (while running)
Test Steps:
1. Create a temporary test environment
2. Run Asset Processor GUI set to stay open on idle and verify that it does not fail
3. Verify that assets were copied to the cache
4. Delete the source test asset directory
5. Verify assets are deleted from the cache
"""
env = ap_setup_fixture
# Copy test assets to project folder and verify test assets folder exists
@ -170,7 +185,18 @@ class TestsAssetProcessorGUI_Windows(object):
# fmt:on
"""
Processing changed files (while running)
Test Steps:
1. Create temporary test environment with test assets
2. Open Asset Processor GUI with set to stay open after idle and verify it does not fail
3. Verify contents of source asset for later comparison
4. Verify contents of product asset for later comparison
5. Modify contents of source asset
6. Wait for Asset Processor to go back to idle state
7. Verify contents of source asset are the modified version
8. Verify contents of product asset are the modified version
"""
env = ap_setup_fixture
# Copy test assets to project folder and verify test assets folder exists
@ -184,7 +210,7 @@ class TestsAssetProcessorGUI_Windows(object):
result, _ = asset_processor.gui_process(quitonidle=False)
assert result, "AP GUI failed"
# Verify contents of test asset in project folder before modication
# Verify contents of test asset in project folder before modification
with open(project_asset_path, "r") as project_asset_file:
assert project_asset_file.read() == "before_state"
@ -217,7 +243,14 @@ class TestsAssetProcessorGUI_Windows(object):
def test_WindowsPlatforms_RunAP_ProcessesIdle(self, asset_processor):
"""
Asset Processor goes idle
Test Steps:
1. Create a temporary testing evnironment
2. Run Asset Processor GUI without quitonidle
3. Verify AP Goes Idle
4. Verify AP goes below 1% CPU usage
"""
CPU_USAGE_THRESHOLD = 1.0 # CPU usage percentage delimiting idle from active
CPU_USAGE_WIND_DOWN = 10 # Time allowed in seconds for idle processes to stop using CPU
@ -245,7 +278,16 @@ class TestsAssetProcessorGUI_Windows(object):
):
"""
Processing newly added files to project folder (while running)
Test Steps:
1. Create a temporary testing environment with test assets
2. Create a secondary set of testing assets that have not been copied into the the testing environment
3. Start Asset Processor without quitonidle
4. While Asset Processor is running add secondary set of testing assets to the testing environment
5. Wait for Asset Processor to go idle
6. Verify that all assets are in the cache
"""
env = ap_setup_fixture
level_name = "C1564064_level"
new_asset = "C1564064.scriptcanvas"
@ -316,7 +358,14 @@ class TestsAssetProcessorGUI_Windows(object):
def test_WindowsPlatforms_LaunchAP_LogReportsIdle(self, asset_processor, workspace, ap_idle):
"""
Asset Processor creates a log entry when it goes idle
Test Steps:
1. Create temporary testing environment
2. Run Asset Processor batch to pre-process assets
3. Run Asset Processor GUI
4. Check if Asset Processor GUI reports that it has gone idle
"""
asset_processor.create_temp_asset_root()
# Run batch process to ensure project assets are processed
assert asset_processor.batch_process(), "AP Batch failed"
@ -331,6 +380,17 @@ class TestsAssetProcessorGUI_Windows(object):
@pytest.mark.assetpipeline
def test_APStopTimesOut_ExceptionThrown(self, ap_setup_fixture, asset_processor):
"""
Tests whether or not Asset Processor will Time Out
Test Steps:
1. Create a temporary testing environment
2. Start the Asset Processor
3. Copy in assets to the test environment
4. Try to stop the Asset Processor with a timeout of 1 second (This cannot be done manually).
5. Verify that Asset Processor times out and returns the expected error
"""
asset_processor.create_temp_asset_root()
asset_processor.start()
@ -347,9 +407,20 @@ class TestsAssetProcessorGUI_Windows(object):
@pytest.mark.assetpipeline
def test_APStopDefaultTimeout_NoException(self, asset_processor):
# If this test fails, it means other tests using the default timeout may have issues.
# In that case, either the default timeout should either be raised, or the performance
# of AP launching should be improved.
"""
Tests the default timeout of the Asset Processor
If this test fails, it means other tests using the default timeout may have issues.
In that case, either the default timeout should either be raised, or the performance
of AP launching should be improved.
Test Steps:
1. Create a temporary testing environment
2. Start the Asset Processor
3. Stop the asset Processor without sending a timeout to it
4. Verify that the asset processor times out and returns the expected error
"""
asset_processor.create_temp_asset_root()
asset_processor.start()
ap_quit_timed_out = False

@ -75,10 +75,17 @@ class TestsAssetProcessorGUI_WindowsAndMac(object):
@pytest.mark.test_case_id("C3540434")
@pytest.mark.BAT
@pytest.mark.assetpipeline
def test_WindowsAndMacPlatforms_AP_GUI_FastScanSettingCreated(self, asset_processor, fast_scan_backup):
def test_WindowsAndMacPlatforms_GUIFastScanNoSettingSet_FastScanSettingCreated(self, asset_processor, fast_scan_backup):
"""
Tests that a fast scan settings entry gets created for the AP if it does not exist
and ensures that the entry is defaulted to fast-scan enabled
Test Steps:
1. Create temporary testing environment
2. Delete existing fast scan setting if exists
3. Run Asset Processor GUI without setting FastScan setting (default:true) and without quitonidle
4. Wait and check to see if Windows Registry fast scan setting is created
5. Verify that Fast Scan setting is set to true
"""
asset_processor.create_temp_asset_root()
@ -119,6 +126,14 @@ class TestsAssetProcessorGUI_WindowsAndMac(object):
Make sure game launcher working with Asset Processor set to turbo mode
Validate that no fatal errors (crashes) are reported within a certain
time frame for the AP and the GameLauncher
Test Steps:
1. Create temporary testing environment
2. Set fast scan to true
3. Verify fast scan is set to true
4. Launch game launcher
5. Verify launcher has launched without error
6. Verify that asset processor has launched
"""
CHECK_ALIVE_SECONDS = 15
@ -166,6 +181,14 @@ class TestsAssetProcessorGUI_AllPlatforms(object):
# fmt:on
"""
Deleting slices and uicanvases while AP is running
Test Steps:
1. Create temporary testing environment with test assets
2. Launch Asset Processor and wait for it to go idle
3. Verify product assets were created in the cache
4. Delete test assets from the cache
5. Wait for Asset Processor to go idle
6. Verify product assets were regenerated in the cache
"""
env = ap_setup_fixture
@ -201,6 +224,15 @@ class TestsAssetProcessorGUI_AllPlatforms(object):
):
"""
Process slice files and uicanvas files from the additional scanfolder
Test Steps:
1. Create temporary testing environment
2. Run asset processor batch
3. Validate that product assets were generated in the cache
4. Create an additional scan folder with assets
5. Create additional scan folder params to pass to Asset Processor
6. Run Asset Processor GUI with QuitOnIdle and pass in params for the additional scan folder settings
7. Verify additional product assets from additional scan folder are present in the cache
"""
env = ap_setup_fixture
# Copy test assets to new folder in dev folder
@ -250,6 +282,12 @@ class TestsAssetProcessorGUI_AllPlatforms(object):
"""
Launch AP with invalid address in bootstrap.cfg
Assets should process regardless of the new address
Test Steps:
1. Create a temporary testing environment
2. Set an invalid ip address in Asset Processor settings file
3. Launch Asset Processor GUI
4. Verify that it processes assets and exits cleanly even though it has an invalid IP.
"""
test_ip_address = "1.1.1.1" # an IP address without Asset Processor
@ -269,6 +307,14 @@ class TestsAssetProcessorGUI_AllPlatforms(object):
def test_AllSupportedPlatforms_ModifyAssetInfo_AssetsReprocessed(self, ap_setup_fixture, asset_processor):
"""
Modifying assetinfo files triggers file reprocessing
Test Steps:
1. Create temporary testing environment with test assets
2. Run Asset Processor GUI
3. Verify that Asset Processor exited cleanly and product assets are in the cache
4. Modify the .assetinfo file by adding a newline
5. Wait for Asset Processor to go idle
6. Verify that product files were regenerated (Time Stamp compare)
"""
env = ap_setup_fixture

@ -85,6 +85,18 @@ class TestsAssetRelocator_WindowsAndMac(object):
def test_WindowsMacPlatforms_RelocatorMoveFileWithConfirm_MoveSuccess(self, request, workspace, asset_processor,
ap_setup_fixture, testId, readonly, confirm,
success):
"""
Tests whether tests with Move File Confirm are successful
Test Steps:
1. Create temporary testing environment
2. Set move location
3. Determine if confirm flag is set
4. Attempt to move the files
5. If confirm flag set:
* Validate Move was successful
* Else: Validate move was not successful
"""
env = ap_setup_fixture
copied_asset = ''
@ -141,6 +153,11 @@ class TestsAssetRelocator_WindowsAndMac(object):
User should be warned that LeaveEmptyFolders needs to be used with the move or delete command
:return: None
Test Steps:
1. Create temporary testing environment
2. Attempt to move with --LeaveEmptyFolders set
3. Verify user is given a message that command requires to be used with --move or --delete
"""
env = ap_setup_fixture
expected_message = "Command --leaveEmptyFolders must be used with command --move or --delete"
@ -162,6 +179,11 @@ class TestsAssetRelocator_WindowsAndMac(object):
Asset with UUID/AssetId reference in non-standard format is
successfully scanned and relocated to the MoveOutput folder.
This test uses a pre-corrupted .slice file.
Test Steps:
1. Create temporary testing environment with a corrupted slice
2. Attempt to move the corrupted slice
3. Verify that corrupted slice was moved successfully
"""
env = ap_setup_fixture
@ -194,6 +216,11 @@ class TestsAssetRelocator_WindowsAndMac(object):
def test_WindowsMacPlatforms_UpdateReferences_MoveCommandMessage(self, ap_setup_fixture, asset_processor):
"""
UpdateReferences without move or delete
Test Steps:
1. Create temporary testing environment
2. Attempt to move with UpdateReferences but without move or delete flags
3. Verify that message is returned to the user that additional flags are required
"""
env = ap_setup_fixture
expected_message = "Command --updateReferences must be used with command --move"
@ -215,6 +242,11 @@ class TestsAssetRelocator_WindowsAndMac(object):
"""
When running the relocator command --AllowBrokenDependencies without the move or delete flags, the user should
be warned that the flags are necessary for the functionality to be used
Test Steps:
1. Create temporary testing environment
2. Attempt to move with AllowBrokenDependencies without the move or delete flag
3. Verify that message is returned to the user that additional flags are required
"""
env = ap_setup_fixture
@ -302,10 +334,19 @@ class TestsAssetRelocator_WindowsAndMac(object):
project
):
"""
Dynamic data test for deleting a file with Asset Relocator:
C21968355 Delete a file with confirm
C21968356 Delete a file without confirm
C21968359 Delete a file that is marked as ReadOnly
C21968360 Delete a file that is not marked as ReadOnly
Test Steps:
1. Create temporary testing environment
2. Set the read-only status of the file based on the test case
3. Run asset relocator with --delete and the confirm status based on the test case
4. Assert file existence or nonexistence based on the test case
5. Validate the relocation report based on expected and unexpected messages
"""
env = ap_setup_fixture
test_file = "testFile.txt"
@ -430,6 +471,15 @@ class TestsAssetRelocator_WindowsAndMac(object):
Test the LeaveEmptyFolders flag in various configurations
:returns: None
Test Steps:
1. Create temporary testing environment
2. Build the various move/delete commands here based on test data
3. Run the move command with the various triggers based on test data
4. Verify the original assets folder still exists based on test data
5. Verify the files successfully moved to new location based on test data
6. Verify that the files were removed from original location based on test data
7. Verify the files have not been deleted or moved from original location based on test data
"""
# # Start test setup # #
env = ap_setup_fixture
@ -517,6 +567,12 @@ class TestsAssetRelocator_WindowsAndMac(object):
"""
The test will attempt to move test assets that are not tracked under P4 source control using the EnableSCM flag
Because the files are not tracked by source control, the relocation should fail
Test Steps:
1. Create temporary testing environment
2. Set ReadOnly or Not-ReadOnly for the test files based on test data
3. Generate and run the enableSCM command
4. Verify the move failed and expected messages are present
"""
# Move the test assets into the project folder
env = ap_setup_fixture
@ -1037,6 +1093,13 @@ class TestsAssetRelocator_WindowsAndMac(object):
C21968370 AllowBrokenDependencies with move and confirm
C21968371 AllowBrokenDependencies with move and without confirm
C21968375 AllowBrokenDependencies with delete
Test Steps:
1. Create temporary testing environment
2. Run Asset Processor to Process Assets
3. Build primary AP Batch parameter value and destination paths
4. Validate resulting file paths in source and output directories
5. Validate the log based on expected and unexpected messages
"""
env = ap_setup_fixture
all_test_asset_rel_paths = [
@ -1254,6 +1317,18 @@ class TestsAssetRelocator_WindowsAndMac(object):
@pytest.mark.parametrize("test", tests)
def test_WindowsAndMac_MoveMetadataFiles_PathExistenceAndMessage(self, workspace, request, ap_setup_fixture,
asset_processor, test):
"""
Tests whether moving metadata files can be moved
Test Steps:
1. Create temporary testing environment
2. Determine if using wildcards on paths or not
3. Determine if excludeMetaDataFiles is set or not
4. Build primary AP Batch parameter value and destination paths
5. Build and run the AP Batch command with parameters
6. Validate resulting file paths in source and output directories
7. Validate the log based on expected and unexpected messages
"""
env = ap_setup_fixture
def teardown():
@ -1342,7 +1417,7 @@ class TestsAssetRelocator_WindowsAndMac(object):
@dataclass
class MoveTest:
description: str # test case title directly copied from Testrail
description: str # test case title
asset_folder: str # which folder in ./assets will be used for this test
encoded_command: str # the command to execute
encoded_output_dir: str # the destination directory to validate
@ -1350,7 +1425,7 @@ class MoveTest:
name_change_map: dict = None
files_that_stay: List[str] = field(default_factory=lambda: [])
output_messages: List[str] = field(default_factory=lambda: [])
step: str = None # the step of the test from Testrail
step: str = None # the step of the test from test repository
prefix_commands: List[str] = field(default_factory=lambda: ["AssetProcessorBatch", "--zeroAnalysisMode"])
suffix_commands: List[str] = field(default_factory=lambda: ["--confirm"])
env: dict = field(init=False, default=None) # inject the ap_setup_fixture at runtime
@ -3718,7 +3793,18 @@ class TestsAssetProcessorMove_WindowsAndMac:
# -k C19462747
@pytest.mark.parametrize("test", move_a_file_tests + move_a_folder_tests)
def test_WindowsMacPlatforms_MoveCommand(self, asset_processor, ap_setup_fixture, test: MoveTest, project):
def test_WindowsMacPlatforms_MoveCommand_CommandResult(self, asset_processor, ap_setup_fixture, test: MoveTest, project):
"""
Test Steps:
1. Create temporary testing environment based on test data
2. Validate that temporary testing environment was created successfully
3. Execute the move command based upon the test data
4. Validate that files are where they're expected according to the test data
5. Validate unexpected files are not found according to the test data
6. Validate output messages according to the test data
7. Validate move status according to the test data
"""
source_folder, _ = asset_processor.prepare_test_environment(ap_setup_fixture["tests_dir"], test.asset_folder)
test.map_env(ap_setup_fixture, source_folder)

@ -75,6 +75,15 @@ class TestsMissingDependencies_WindowsAndMac(object):
def do_missing_dependency_test(self, source_product, expected_dependencies,
dsp_param,
platforms=None, max_iterations=0):
"""
Test Steps:
1. Determine what platforms to run against
2. Process assets for that platform
3. Determine the missing dependency params to set
4. Set the max iteration param
5. Run missing dependency scanner against target platforms and search params based on test data
6. Validate missing dependencies against test data
"""
platforms = platforms or ASSET_PROCESSOR_PLATFORM_MAP[self._workspace.asset_processor_platform]
if not isinstance(platforms, list):
@ -104,7 +113,14 @@ class TestsMissingDependencies_WindowsAndMac(object):
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C17226567")
def test_WindowsAndMac_ValidUUIDNotDependency_ReportsMissingDependency(self):
"""Tests that a valid UUID referenced in a file will report any missing dependencies"""
"""
Tests that a valid UUID referenced in a file will report any missing dependencies
Test Steps:
1. Set the expected product
2. Set the expected missing dependencies
3. Execute test
"""
# Relative path to the txt file with missing dependencies
expected_product = f"testassets\\validuuidsnotdependency.txt"
@ -141,7 +157,14 @@ class TestsMissingDependencies_WindowsAndMac(object):
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C17226567")
def test_WindowsAndMac_InvalidUUIDsNotDependencies_NoReportedMessage(self):
"""Tests that invalid UUIDs do not count as missing dependencies"""
"""
Tests that invalid UUIDs do not count as missing dependencies
Test Steps:
1. Set the expected product
2. Set the expected missing dependencies
3. Execute test
"""
# Relative path to the txt file with invalid UUIDs
expected_product = f"testassets\\invaliduuidnoreport.txt"
expected_dependencies = [] # No expected missing dependencies
@ -153,7 +176,14 @@ class TestsMissingDependencies_WindowsAndMac(object):
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C17226567")
def test_WindowsAndMac_ValidAssetIdsNotDependencies_ReportsMissingDependency(self):
"""Tests that valid asset IDs but not dependencies, show missing dependencies"""
"""
Tests that valid asset IDs but not dependencies, show missing dependencies
Test Steps:
1. Set the expected product
2. Set the expected missing dependencies
3. Execute test
"""
# Relative path to the txt file with valid asset ids but not dependencies
expected_product = f"testassets\\validassetidnotdependency.txt"
@ -173,7 +203,14 @@ class TestsMissingDependencies_WindowsAndMac(object):
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C17226567")
def test_WindowsAndMac_InvalidAssetsIDNotDependencies_NoReportedMessage(self):
"""Tests that invalid asset IDs do not count as missing dependencies"""
"""
Tests that invalid asset IDs do not count as missing dependencies
Test Steps:
1. Set the expected product
2. Set the expected missing dependencies
3. Execute test
"""
# Relative path to the txt file with invalid asset IDs
expected_product = f"testassets\\invalidassetidnoreport.txt"
@ -188,7 +225,14 @@ class TestsMissingDependencies_WindowsAndMac(object):
# fmt:off
def test_WindowsAndMac_ValidSourcePathsNotDependencies_ReportsMissingDependencies(self):
# fmt:on
"""Tests that valid source paths can translate to missing dependencies"""
"""
Tests that valid source paths can translate to missing dependencies
Test Steps:
1. Set the expected product
2. Set the expected missing dependencies
3. Execute test
"""
# Relative path to the txt file with missing dependencies as source paths
expected_product = f"testassets\\relativesourcepathsnotdependencies.txt"
@ -212,7 +256,14 @@ class TestsMissingDependencies_WindowsAndMac(object):
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C17226567")
def test_WindowsAndMac_InvalidARelativePathsNotDependencies_NoReportedMessage(self):
"""Tests that invalid relative paths do not resolve to missing dependencies"""
"""
Tests that invalid relative paths do not resolve to missing dependencies
Test Steps:
1. Set the expected product
2. Set the expected missing dependencies
3. Execute test
"""
# Relative path to the txt file with invalid relative paths
expected_product = f"testassets\\invalidrelativepathsnoreport.txt"
@ -227,7 +278,14 @@ class TestsMissingDependencies_WindowsAndMac(object):
# fmt:off
def test_WindowsAndMac_ValidProductPathsNotDependencies_ReportsMissingDependencies(self):
# fmt:on
"""Tests that valid product paths can resolve to missing dependencies"""
"""
Tests that valid product paths can resolve to missing dependencies
Test Steps:
1. Set the expected product
2. Set the expected missing dependencies
3. Execute test
"""
self._asset_processor.add_source_folder_assets(f"Gems\\LyShineExamples\\Assets\\UI\\Fonts\\LyShineExamples")
self._asset_processor.add_scan_folder(f"Gems\\LyShineExamples\\Assets")
@ -260,7 +318,14 @@ class TestsMissingDependencies_WindowsAndMac(object):
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C17226567")
def test_WindowsAndMac_WildcardScan_FindsAllExpectedFiles(self):
"""Tests that the wildcard scanning will pick up multiple files"""
"""
Tests that the wildcard scanning will pick up multiple files
Test Steps:
1. Set the expected product
2. Set the expected missing dependencies
3. Execute test
"""
helper = self._missing_dep_helper
@ -291,6 +356,11 @@ class TestsMissingDependencies_WindowsAndMac(object):
For these references that are valid, all but one have available, matching dependencies. This test is
primarily meant to verify that the missing dependency reporter checks the product dependency table before
emitting missing dependencies.
Test Steps:
1. Set the expected product
2. Set the expected missing dependencies
3. Execute test
"""
# Relative path to target test file
expected_product = f"testassets\\reportonemissingdependency.txt"
@ -305,7 +375,14 @@ class TestsMissingDependencies_WindowsAndMac(object):
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C17226567")
def test_WindowsAndMac_ReferencesSelfPath_NoReportedMessage(self):
"""Tests that a file that references itself via relative path does not report itself as a missing dependency"""
"""
Tests that a file that references itself via relative path does not report itself as a missing dependency
Test Steps:
1. Set the expected product
2. Set the expected missing dependencies
3. Execute test
"""
# Relative path to file that references itself via relative path
expected_product = f"testassets\\selfreferencepath.txt"
expected_dependencies = []
@ -317,7 +394,14 @@ class TestsMissingDependencies_WindowsAndMac(object):
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C17226567")
def test_WindowsAndMac_ReferencesSelfUUID_NoReportedMessage(self):
"""Tests that a file that references itself via its UUID does not report itself as a missing dependency"""
"""
Tests that a file that references itself via its UUID does not report itself as a missing dependency
Test Steps:
1. Set the expected product
2. Set the expected missing dependencies
3. Execute test
"""
# Relative path to file that references itself via its UUID
expected_product = f"testassets\\selfreferenceuuid.txt"
@ -330,7 +414,14 @@ class TestsMissingDependencies_WindowsAndMac(object):
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C17226567")
def test_WindowsAndMac_ReferencesSelfAssetID_NoReportedMessage(self):
"""Tests that a file that references itself via its Asset ID does not report itself as a missing dependency"""
"""
Tests that a file that references itself via its Asset ID does not report itself as a missing dependency
Test Steps:
1. Set the expected product
2. Set the expected missing dependencies
3. Execute test
"""
# Relative path to file that references itself via its Asset ID
expected_product = f"testassets\\selfreferenceassetid.txt"
@ -347,6 +438,11 @@ class TestsMissingDependencies_WindowsAndMac(object):
Tests that the scan limit fails to find a missing dependency that is out of reach.
The max iteration count is set to just under where a valid missing dependency is on a line in the file,
so this will not report any missing dependencies.
Test Steps:
1. Set the expected product
2. Set the expected missing dependencies
3. Execute test
"""
# Relative path to file that has a missing dependency at 31 iterations deep
@ -364,7 +460,13 @@ class TestsMissingDependencies_WindowsAndMac(object):
Tests that the scan limit succeeds in finding a missing dependency that is barely in reach.
In the previous test, the scanner was set to stop recursion just before a missing dependency was found.
This test runs with the recursion limit set deep enough to actually find the missing dependency.
Test Steps:
1. Set the expected product
2. Set the expected missing dependencies
3. Execute test
"""
# Relative path to file that has a missing dependency at 31 iterations deep
expected_product = f"testassets\\maxiteration31deep.txt"
@ -383,7 +485,14 @@ class TestsMissingDependencies_WindowsAndMac(object):
# fmt:off
def test_WindowsAndMac_PotentialMatchesLongerThanUUIDString_OnlyReportsCorrectLengthUUIDs(self):
# fmt:on
"""Tests that dependency references that are longer than expected are ignored"""
"""
Tests that dependency references that are longer than expected are ignored
Test Steps:
1. Set the expected product
2. Set the expected missing dependencies
3. Execute test
"""
# Relative path to text file with varying length UUID references
expected_product = f"testassets\\onlymatchescorrectlengthuuids.txt"
@ -408,7 +517,14 @@ class TestsMissingDependencies_WindowsAndMac(object):
def test_WindowsAndMac_MissingDependencyScanner_GradImageSuccess(
self, ap_setup_fixture
):
"""Tests the Missing Dependency Scanner can scan gradimage files"""
"""
Tests the Missing Dependency Scanner can scan gradimage files
Test Steps:
1. Create temporary testing environment
2. Run the move dependency scanner against the gradimage
2. Validate that the expected product files and and expected depdencies match
"""
env = ap_setup_fixture
helper = self._missing_dep_helper

@ -51,6 +51,11 @@ class TestAuxiliaryContent:
def test_CreateAuxiliaryContent_DontSkipLevelPaks(self, workspace, level):
"""
This test ensure that Auxiliary Content contain level.pak files
Test Steps:
1. Run auxiliary content against project under test
2. Validate auxiliary content exists
3. Verifies that level.pak exists
"""
path_to_dev = workspace.paths.engine_root()
@ -70,6 +75,11 @@ class TestAuxiliaryContent:
def test_CreateAuxiliaryContent_SkipLevelPaks(self, workspace, level):
"""
This test ensure that Auxiliary Content contain no level.pak file
Test Steps:
1. Run auxiliary content against project under test with skiplevelPaks flag
2. Validate auxiliary content exists
3. Validate level.pak was added to auxiliary content
"""
path_to_dev = workspace.paths.engine_root()

@ -533,6 +533,14 @@ class TestsFBX_AllPlatforms(object):
def test_FBXBlackboxTest_SourceFiles_Processed_ResultInExpectedProducts(self, workspace,
ap_setup_fixture, asset_processor, project,
blackbox_param):
"""
Please see run_fbx_test(...) for details
Test Steps:
1. Determine if blackbox is set to none
2. Run FBX Test
"""
if blackbox_param == None:
return
self.run_fbx_test(workspace, ap_setup_fixture,
@ -544,6 +552,15 @@ class TestsFBX_AllPlatforms(object):
workspace, ap_setup_fixture,
asset_processor, project,
blackbox_param):
"""
Please see run_fbx_test(...) for details
Test Steps:
1. Determine if blackbox is set to none
2. Run FBX Test
2. Re-run FBX test and validate the information in override assets
"""
if blackbox_param == None:
return
self.run_fbx_test(workspace, ap_setup_fixture,
@ -567,6 +584,19 @@ class TestsFBX_AllPlatforms(object):
def run_fbx_test(self, workspace, ap_setup_fixture, asset_processor,
project, blackbox_params: BlackboxAssetTest, overrideAsset = False):
"""
These tests work by having the test case ingest the test data and determine the run pattern.
Tests will process scene settings files and will additionally do a verification against a provided debug file
Additionally, if an override is passed, the output is checked against the override.
Test Steps:
1. Create temporary test environment
2. Process Assets
3. Determine what assets to validate based upon test data
4. Validate assets were created in cache
5. If debug file provided, verify scene files were generated correctly
6. Verify that each given source asset resulted in the expected jobs and products
"""
test_assets_folder = blackbox_params.override_asset_folder if overrideAsset else blackbox_params.asset_folder
logger.info(f"{blackbox_params.test_name}: Processing assets in folder '"

@ -26,6 +26,18 @@ def soundbank_metadata_generator_setup_fixture(workspace):
def success_case_test(test_folder, expected_dependencies_dict, bank_info, expected_result_code=0):
"""
Test Steps:
1. Make sure the return code is what was expected, and that the expected number of banks were returned.
2. Validate bank is in the expected dependencies dictionary.
3. Validate the path to output the metadata file to was assembled correctly.
4. Validate metadata object for this bank is set, and that it has an object assigned to its dependencies field
and its includedEvents field
5. Validate metadata object has the correct number of dependencies, and validated that every expected dependency
exists in the dependencies list of the metadata object.
6. Validate metadata object has the correct number of events, and validate that every expected event exists in the
events of the metadata object.
"""
expected_bank_count = len(expected_dependencies_dict)
banks, result_code = bank_info.generate_metadata(
@ -80,8 +92,17 @@ class TestSoundBankMetadataGenerator:
def test_NoMetadataTooFewBanks_ReturnCodeIsError(self, workspace, soundbank_metadata_generator_setup_fixture):
# Trying to generate metadata for banks in a folder with one or fewer banks and no metadata is not possible
# and should fail.
"""
Trying to generate metadata for banks in a folder with one or fewer banks and no metadata is not possible
and should fail.
Test Steps:
1. Setup testing environment with only 1 bank file
2. Get Sound Bank Info
3. Attempt to generate sound bank metadata
4. Verify that proper error code is returned
"""
#
test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets',
'test_NoMetadataTooFewBanks_ReturnCodeIsError')
if not os.path.isdir(test_assets_folder):
@ -97,15 +118,30 @@ class TestSoundBankMetadataGenerator:
assert error_code is 2, 'Metadata was generated when there were fewer than two banks in the target directory.'
def test_NoMetadataNoContentBank_NoMetadataGenerated(self, workspace, soundbank_metadata_generator_setup_fixture):
"""
Test Steps:
1. Setup testing environment
2. No expected dependencies
3. Call success case test
"""
test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets',
'test_NoMetadataNoContentBank_NoMetadataGenerated')
expected_dependencies = dict()
success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace))
def test_NoMetadataOneContentBank_NoStreamedFiles_OneDependency(self, workspace, soundbank_metadata_generator_setup_fixture):
# When no Wwise metadata is present, and there is only one content bank in the target directory with no wem
# files, then only the content bank should have metadata associated with it. The generated metadata should
# only describe a dependency on the init bank.
"""
When no Wwise metadata is present, and there is only one content bank in the target directory with no wem
files, then only the content bank should have metadata associated with it. The generated metadata should
only describe a dependency on the init bank.
Test Steps:
1. Setup testing environment
2. Get current bank info
3. Build expected dependencies
4. Call success case test
"""
test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets',
'test_NoMetadataOneContentBank_NoStreamedFiles_OneDependency')
@ -116,9 +152,18 @@ class TestSoundBankMetadataGenerator:
def test_NoMetadataOneContentBank_StreamedFiles_MultipleDependencies(self, workspace,
soundbank_metadata_generator_setup_fixture):
# When no Wwise metadata is present, and there is only one content bank in the target directory with wem files
# present, then only the content bank should have metadata associated with it. The generated metadata should
# describe a dependency on the init bank and all wem files in the folder.
"""
When no Wwise metadata is present, and there is only one content bank in the target directory with wem files
present, then only the content bank should have metadata associated with it. The generated metadata should
describe a dependency on the init bank and all wem files in the folder.
Test Steps:
1. Setup testing environment
2. Get current bank info
3. Build expected dependencies
4. Call success case test
"""
test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets',
'test_NoMetadataOneContentBank_StreamedFiles_MultipleDependencies')
@ -136,10 +181,19 @@ class TestSoundBankMetadataGenerator:
success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace))
def test_NoMetadataMultipleBanks_OneDependency_ReturnCodeIsWarning(self, workspace, soundbank_metadata_generator_setup_fixture):
# When no Wwise metadata is present, and there are multiple content banks in the target directory with wem files
# present, there is no way to tell which bank requires which wem files. A warning should be emitted,
# stating that the full dependency graph could not be created, and only dependencies on the init bank are
# described in the generated metadata files.
"""
When no Wwise metadata is present, and there are multiple content banks in the target directory with wem files
present, there is no way to tell which bank requires which wem files. A warning should be emitted,
stating that the full dependency graph could not be created, and only dependencies on the init bank are
described in the generated metadata files.
Test Steps:
1. Setup testing environment
2. Get current bank info
3. Build expected dependencies
4. Call success case test
"""
test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets',
'test_NoMetadataMultipleBanks_OneDependency_ReturnCodeIsWarning')
bank_info = get_bank_info(workspace)
@ -150,8 +204,17 @@ class TestSoundBankMetadataGenerator:
success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace), expected_result_code=1)
def test_OneContentBank_NoStreamedFiles_OneDependency(self, workspace, soundbank_metadata_generator_setup_fixture):
# Wwise metadata describes one content bank that contains all media needed by its events. Generated metadata
# describes a dependency only on the init bank.
"""
Wwise metadata describes one content bank that contains all media needed by its events. Generated metadata
describes a dependency only on the init bank.
Test Steps:
1. Setup testing environment
2. Get current bank info
3. Build expected dependencies
4. Call success case test
"""
test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets',
'test_OneContentBank_NoStreamedFiles_OneDependency')
@ -165,8 +228,17 @@ class TestSoundBankMetadataGenerator:
success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace))
def test_OneContentBank_StreamedFiles_MultipleDependencies(self, workspace, soundbank_metadata_generator_setup_fixture):
# Wwise metadata describes one content bank that references streamed media files needed by its events. Generated
# metadata describes dependencies on the init bank and wems named by the IDs of referenced streamed media.
"""
Wwise metadata describes one content bank that references streamed media files needed by its events. Generated
metadata describes dependencies on the init bank and wems named by the IDs of referenced streamed media.
Test Steps:
1. Setup testing environment
2. Get current bank info
3. Build expected dependencies
4. Call success case test
"""
test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets',
'test_OneContentBank_StreamedFiles_MultipleDependencies')
@ -187,8 +259,17 @@ class TestSoundBankMetadataGenerator:
success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace))
def test_MultipleContentBanks_NoStreamedFiles_OneDependency(self, workspace, soundbank_metadata_generator_setup_fixture):
# Wwise metadata describes multiple content banks. Each bank contains all media needed by its events. Generated
# metadata describes each bank having a dependency only on the init bank.
"""
Wwise metadata describes multiple content banks. Each bank contains all media needed by its events. Generated
metadata describes each bank having a dependency only on the init bank.
Test Steps:
1. Setup testing environment
2. Get current bank info
3. Build expected dependencies
4. Call success case test
"""
test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets',
'test_MultipleContentBanks_NoStreamedFiles_OneDependency')
@ -206,8 +287,17 @@ class TestSoundBankMetadataGenerator:
success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace))
def test_MultipleContentBanks_Bank1StreamedFiles(self, workspace, soundbank_metadata_generator_setup_fixture):
# Wwise metadata describes multiple content banks. Bank 1 references streamed media files needed by its events,
# while bank 2 contains all media need by its events.
"""
Wwise metadata describes multiple content banks. Bank 1 references streamed media files needed by its events,
while bank 2 contains all media need by its events.
Test Steps:
1. Setup testing environment
2. Get current bank info
3. Build expected dependencies
4. Call success case test
"""
test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets',
'test_MultipleContentBanks_Bank1StreamedFiles')
@ -228,9 +318,18 @@ class TestSoundBankMetadataGenerator:
success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace))
def test_MultipleContentBanks_SplitBanks_OnlyBankDependenices(self, workspace, soundbank_metadata_generator_setup_fixture):
# Wwise metadata describes multiple content banks. Bank 3 events require media that is contained in bank 4.
# Generated metadata describes each bank having a dependency on the init bank, while bank 3 has an additional
# dependency on bank 4.
"""
Wwise metadata describes multiple content banks. Bank 3 events require media that is contained in bank 4.
Generated metadata describes each bank having a dependency on the init bank, while bank 3 has an additional
dependency on bank 4.
Test Steps:
1. Setup testing environment
2. Get current bank info
3. Build expected dependencies
4. Call success case test
"""
test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets',
'test_MultipleContentBanks_SplitBanks_OnlyBankDependenices')
@ -248,9 +347,18 @@ class TestSoundBankMetadataGenerator:
success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace))
def test_MultipleContentBanks_ReferencedEvent_MediaEmbeddedInBank(self, workspace, soundbank_metadata_generator_setup_fixture):
# Wwise metadata describes multiple content banks. Bank 1 contains all media required by its events, while bank
# 5 contains a reference to an event in bank 1, but no media for that event. Generated metadata describes both
# banks having a dependency on the init bank, while bank 5 has an additional dependency on bank 1.
"""
Wwise metadata describes multiple content banks. Bank 1 contains all media required by its events, while bank
5 contains a reference to an event in bank 1, but no media for that event. Generated metadata describes both
banks having a dependency on the init bank, while bank 5 has an additional dependency on bank 1.
Test Steps:
1. Setup testing environment
2. Get current bank info
3. Build expected dependencies
4. Call success case test
"""
test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets',
'test_MultipleContentBanks_ReferencedEvent_MediaEmbeddedInBank')
@ -271,10 +379,19 @@ class TestSoundBankMetadataGenerator:
success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace))
def test_MultipleContentBanks_ReferencedEvent_MediaStreamed(self, workspace, soundbank_metadata_generator_setup_fixture):
# Wwise metadata describes multiple content banks. Bank 1 references streamed media files needed by its events,
# while bank 5 contains a reference to an event in bank 1. This causes bank 5 to also describe a reference to
# the streamed media file referenced by the event from bank 1. Generated metadata describes both banks having
# dependencies on the init bank, as well as the wem named by the ID of referenced streamed media.
"""
Wwise metadata describes multiple content banks. Bank 1 references streamed media files needed by its events,
while bank 5 contains a reference to an event in bank 1. This causes bank 5 to also describe a reference to
the streamed media file referenced by the event from bank 1. Generated metadata describes both banks having
dependencies on the init bank, as well as the wem named by the ID of referenced streamed media.
Test Steps:
1. Setup testing environment
2. Get current bank info
3. Build expected dependencies
4. Call success case test
"""
test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets',
'test_MultipleContentBanks_ReferencedEvent_MediaStreamed')
@ -298,11 +415,20 @@ class TestSoundBankMetadataGenerator:
success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace))
def test_MultipleContentBanks_ReferencedEvent_MixedSources(self, workspace, soundbank_metadata_generator_setup_fixture):
# Wwise metadata describes multiple content banks. Bank 1 references a streamed media files needed by one of its
# events, and contains all media needed for its other events, while bank 5 contains a reference to two events
# in bank 1: one that requires streamed media, and one that requires media embedded in bank 1. Generated
# metadata describes both banks having dependencies on the init bank and the wem named by the ID of referenced
# streamed media, while bank 5 has an additional dependency on bank 1.
"""
Wwise metadata describes multiple content banks. Bank 1 references a streamed media files needed by one of its
events, and contains all media needed for its other events, while bank 5 contains a reference to two events
in bank 1: one that requires streamed media, and one that requires media embedded in bank 1. Generated
metadata describes both banks having dependencies on the init bank and the wem named by the ID of referenced
streamed media, while bank 5 has an additional dependency on bank 1.
Test Steps:
1. Setup testing environment
2. Get current bank info
3. Build expected dependencies
4. Call success case test
"""
test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets',
'test_MultipleContentBanks_ReferencedEvent_MixedSources')
@ -332,8 +458,17 @@ class TestSoundBankMetadataGenerator:
success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace))
def test_MultipleContentBanks_VaryingDependencies_MixedSources(self, workspace, soundbank_metadata_generator_setup_fixture):
# Wwise metadata describes multiple content banks that have varying dependencies on each other, and dependencies
# on streamed media files.
"""
Wwise metadata describes multiple content banks that have varying dependencies on each other, and dependencies
on streamed media files.
Test Steps:
1. Setup testing environment
2. Get current bank info
3. Build expected dependencies
4. Call success case test
"""
test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets',
'test_MultipleContentBanks_VaryingDependencies_MixedSources')

@ -39,4 +39,19 @@ if(PAL_TRAIT_BUILD_TESTS_SUPPORTED AND PAL_TRAIT_BUILD_HOST_TOOLS AND PAL_TRAIT_
COMPONENT
Editor
)
ly_add_pytest(
NAME AutomatedTesting::EditorTests_Sandbox
TEST_SUITE sandbox
TEST_SERIAL
PATH ${CMAKE_CURRENT_LIST_DIR}
PYTEST_MARKS "SUITE_sandbox"
TIMEOUT 1500
RUNTIME_DEPENDENCIES
Legacy::Editor
AZ::AssetProcessor
AutomatedTesting.Assets
COMPONENT
Editor
)
endif()

@ -39,7 +39,7 @@ class TestDocking(object):
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True)
@pytest.mark.test_case_id("C6376081")
@pytest.mark.SUITE_periodic
@pytest.mark.SUITE_sandbox
def test_Docking_BasicDockedTools(self, request, editor, level, launcher_platform):
expected_lines = [
"The tools are all docked together in a tabbed widget",

@ -39,7 +39,7 @@ class TestMenus(object):
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True)
@pytest.mark.test_case_id("C16780783", "C2174438")
@pytest.mark.SUITE_periodic
@pytest.mark.SUITE_sandbox
def test_Menus_EditMenuOptions_Work(self, request, editor, level, launcher_platform):
expected_lines = [
"Undo Action triggered",
@ -113,7 +113,7 @@ class TestMenus(object):
)
@pytest.mark.test_case_id("C16780778")
@pytest.mark.SUITE_periodic
@pytest.mark.SUITE_sandbox
def test_Menus_FileMenuOptions_Work(self, request, editor, level, launcher_platform):
expected_lines = [
"New Level Action triggered",

@ -23,6 +23,25 @@ class TestSurfaceMaskFilter_BasicSurfaceTagCreation(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="TestSurfaceMaskFilter_BasicSurfaceTagCreation", args=["level"])
def run_test(self):
"""
Summary:
Verifies basic surface tag value equality
Expected Behavior:
Surface tags of the same name are equal, and different names aren't.
Test Steps:
1) Open level
2) Create 2 new surface tags of identical names and verify they resolve as equal.
3) Create another new tag of a different name and verify they resolve as different.
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
self.log("SurfaceTag test started")
# Create a level

@ -33,6 +33,25 @@ class TestVegetationInstances_DespawnWhenOutOfRange(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix='VegetationInstances_DespawnWhenOutOfRange', args=['level'])
def run_test(self):
"""
Summary:
Verifies that vegetation instances properly spawn/despawn based on camera range.
Expected Behavior:
Vegetation instances despawn when out of camera range.
Test Steps:
1) Create a new level
2) Create a simple vegetation area, and set the view position near the spawner. Verify instances plant.
3) Move the view position away from the spawner. Verify instances despawn.
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
# Create a new level
self.test_success = self.create_level(

@ -28,8 +28,21 @@ class TestGradientGeneratorIncompatibilities(EditorTestHelper):
def run_test(self):
"""
Summary:
Verify that Entities are not active when a Gradient Generator and incompatible component are both present
on the same Entity.
This test verifies that components are disabled when conflicting components are present on the same entity.
Expected Behavior:
Gradient Generator components are incompatible with Vegetation area components.
Test Steps:
1) Create a new level
2) Create a new entity in the level
3) Add each Gradient Generator component to an entity, and add a Vegetation Area component to the same entity
4) Verify that components are only enabled when entity is free of a conflicting component
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""

@ -28,8 +28,21 @@ class TestGradientModifiersIncompatibilities(EditorTestHelper):
def run_test(self):
"""
Summary:
Verify that Entities are not active when a Gradient Modifier and incompatible component are both present
on the same Entity.
This test verifies that components are disabled when conflicting components are present on the same entity.
Expected Behavior:
Gradient Modifier components are incompatible with Vegetation area components.
Test Steps:
1) Create a new level
2) Create a new entity in the level
3) Add each Gradient Modifier component to an entity, and add a Vegetation Area component to the same entity
4) Verify that components are only enabled when entity is free of a conflicting component
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""

@ -9,19 +9,6 @@ remove or modify any license notices. This file is distributed on an "AS IS" BAS
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
"""
The below cases are combined in this script
C2676829
C3961326
C3980659
C3980664
C3980669
C3416548
C2676823
C3961321
C2676826
"""
import os
import sys

@ -44,7 +44,21 @@ class TestGradientPreviewSettings(EditorTestHelper):
def run_test(self):
"""
Summary:
Verify if the current entity is set to the pin preview to shape entity by default for several components.
This test verifies default values for the pinned entity for Gradient Preview settings.
Expected Behavior:
Pinned entity is self for all gradient generator/modifiers.
Test Steps:
1) Create a new level
2) Create a new entity in the level
3) Add each Gradient Generator component to an entity, and verify the Pin Preview to Shape property is set to
self
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""

@ -31,11 +31,21 @@ class TestGradientSurfaceTagEmitterDependencies(EditorTestHelper):
def run_test(self):
"""
Summary:
Component has a dependency on a Gradient component
This test verifies that the Gradient Surface Tag Emitter component is dependent on a gradient component.
Expected Result:
Component is disabled until a Gradient Generator, Modifier or Gradient Reference component
(and any sub-dependencies) is added to the entity.
Gradient Surface Tag Emitter component is disabled until a Gradient Generator, Modifier or Gradient Reference
component (and any sub-dependencies) is added to the entity.
Test Steps:
1) Open level
2) Create a new entity with a Gradient Surface Tag Emitter component
3) Verify the component is disabled until a dependent component is also added to the entity
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""

@ -28,8 +28,20 @@ class TestGradientTransformRequiresShape(EditorTestHelper):
def run_test(self):
"""
Summary:
Verify that Gradient Transform Modifier component requires a
Shape component before the Entity can become active.
This test verifies that the Gradient Transform Modifier component is dependent on a shape component.
Expected Result:
Gradient Transform Modifier component is disabled until a shape component is added to the entity.
Test Steps:
1) Open level
2) Create a new entity with a Gradient Transform Modifier component
3) Verify the component is disabled until a shape component is also added to the entity
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""

@ -28,8 +28,20 @@ class TestImageGradientRequiresShape(EditorTestHelper):
def run_test(self):
"""
Summary:
Verify that Image Gradient component requires a
Shape component before the Entity can become active.
This test verifies that the Image Gradient component is dependent on a shape component.
Expected Result:
Gradient Transform Modifier component is disabled until a shape component is added to the entity.
Test Steps:
1) Open level
2) Create a new entity with a Image Gradient component
3) Verify the component is disabled until a shape component is also added to the entity
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""

@ -33,6 +33,26 @@ class TestAreaNodeComponentDependency(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="AreaNodeComponentDependency", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that the Landscape Canvas nodes can be added to a graph, and correctly create entities with
proper dependent components.
Expected Behavior:
All expected component dependencies are met when adding an area node to a graph.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Drag each of the area nodes to the graph area, and ensure the proper dependent components are added
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def onEntityCreated(parameters):
global newEntityId

@ -33,7 +33,25 @@ class TestGradientNodeEntityCreate(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="AreaNodeEntityCreate", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that the Landscape Canvas nodes can be added to a graph, and correctly create entities.
Expected Behavior:
New entities are created when dragging area nodes to graph area.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Drag each of the area nodes to the graph area, and ensure a new entity is created
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def onEntityCreated(parameters):
global newEntityId
newEntityId = parameters[0]

@ -34,7 +34,26 @@ class TestAreaNodeEntityDelete(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="AreaNodeEntityDelete", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that the Landscape Canvas node deletion properly cleans up entities in the Editor.
Expected Behavior:
Entities are removed when area nodes are deleted from a graph.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Drag each of the area nodes to the graph area, and ensure a new entity is created
4) Delete the nodes, and ensure the newly created entities are removed
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def onEntityCreated(parameters):
global createdEntityId
createdEntityId = parameters[0]

@ -9,24 +9,6 @@ remove or modify any license notices. This file is distributed on an "AS IS" BAS
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
"""
C22602072 - Graph is updated when underlying components are added/removed
1. Open Level.
2. Find LandscapeCanvas named entity.
3. Ensure Vegetation Distribution Component is present on the BushSpawner entity.
4. Open graph and ensure Distribution Filter wrapped node is present.
5. Delete the Vegetation Distribution Filter component from the BushSpawner entity via Entity Inspector.
6. Ensure the Vegetation Distribution Filter component was deleted from the BushSpawner entity and node is no longer
present in the graph.
7. Add Vegetation Altitude Filter to the BushSpawner entity through Entity Inspector.
8. Ensure Altitude Filter was added to the BushSpawner node in the open graph.
9. Add a new entity with unique name as a child of the Landscape Canvas entity.
10. Add a Box Shape component to the new child entity.
11. Ensure Box Shape node is present on the open graph.
"""
import os
import sys
@ -50,6 +32,36 @@ class TestComponentUpdatesUpdateGraph(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="ComponentUpdatesUpdateGraph", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that the Landscape Canvas graphs update properly when components are added/removed outside of
Landscape Canvas.
Expected Behavior:
Graphs properly reflect component changes made to entities outside of Landscape Canvas.
Test Steps:
1. Open Level
2. Find LandscapeCanvas named entity
3. Ensure Vegetation Distribution Component is present on the BushSpawner entity
4. Open graph and ensure Distribution Filter wrapped node is present
5. Delete the Vegetation Distribution Filter component from the BushSpawner entity via Entity Inspector
6. Ensure the Vegetation Distribution Filter component was deleted from the BushSpawner entity and node is
no longer present in the graph
7. Add Vegetation Altitude Filter to the BushSpawner entity through Entity Inspector
8. Ensure Altitude Filter was added to the BushSpawner node in the open graph
9. Add a new entity with unique name as a child of the Landscape Canvas entity
10. Add a Box Shape component to the new child entity
11. Ensure Box Shape node is present on the open graph
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
# Create a new empty level and instantiate LC_BushFlowerBlender.slice
self.test_success = self.create_level(
self.args["level"],

@ -37,6 +37,25 @@ class TestCreateNewGraph(EditorTestHelper):
print("New root entity created")
def run_test(self):
"""
Summary:
This test verifies that new graphs can be created in Landscape Canvas.
Expected Behavior:
New graphs can be created, and proper entity is created to hold graph data with a Landscape Canvas component.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Ensures the root entity created contains a Landscape Canvas component
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
self.test_success = self.create_level(
self.args["level"],
heightmap_resolution=128,

@ -33,7 +33,25 @@ class TestDisabledNodeDuplication(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="DisabledNodeDuplication", args=["level"])
def run_test(self):
"""
Summary:
This test verifies Editor stability after duplicating disabled Landscape Canvas nodes.
Expected Behavior:
Editor remains stable and free of crashes.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Create several new nodes, disable the nodes via disabling/deleting components, and duplicate the nodes
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def onEntityCreated(parameters):
global newEntityId
newEntityId = parameters[0]

@ -9,17 +9,6 @@ remove or modify any license notices. This file is distributed on an "AS IS" BAS
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
"""
C30813586 - Editor remains stable after Undoing deletion of a node on a slice entity
1. Open level with instantiated slice.
2. Open the graph.
3. Find the BushSpawner's Vegetation Layer Spawner node.
4. Delete the node.
5. Undo to restore the node.
"""
import os
import sys
@ -44,7 +33,26 @@ class TestUndoNodeDeleteSlice(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="UndoNodeDeleteSlice", args=["level"])
def run_test(self):
"""
Summary:
This test verifies Editor stability after undoing the deletion of nodes on a slice entity.
Expected Behavior:
Editor remains stable and free of crashes.
Test Steps:
1) Create a new level
2) Instantiate a slice with a Landscape Canvas setup
3) Find a specific node on the graph, and delete it
4) Restore the node with Undo
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
# Create a new empty level and instantiate LC_BushFlowerBlender.slice
self.test_success = self.create_level(
self.args["level"],

@ -34,6 +34,27 @@ class TestGradientMixerNodeConstruction(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="GradientMixerNodeConstruction", args=["level"])
def run_test(self):
"""
Summary:
This test verifies a Gradient Mixer vegetation setup can be constructed through Landscape Canvas.
Expected Behavior:
Entities contain all required components and component references after creating nodes and setting connections
on a Landscape Canvas graph.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Add all necessary nodes to the graph and set connections to form a Gradient Mixer setup
4) Verify all components and component references were properly set during graph construction
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def onEntityCreated(parameters):
global newEntityId

@ -33,6 +33,25 @@ class TestGradientModifierNodeEntityCreate(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="GradientModifierNodeEntityCreate", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that the Landscape Canvas nodes can be added to a graph, and correctly create entities.
Expected Behavior:
New entities are created when dragging Gradient Modifier nodes to graph area.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Drag each of the Gradient Modifier nodes to the graph area, and ensure a new entity is created
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def onEntityCreated(parameters):
global newEntityId

@ -34,7 +34,26 @@ class TestGradientModifierNodeEntityDelete(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="GradientModifierNodeEntityDelete", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that the Landscape Canvas node deletion properly cleans up entities in the Editor.
Expected Behavior:
Entities are removed when Gradient Modifier nodes are deleted from a graph.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Drag each of the Gradient Modifier nodes to the graph area, and ensure a new entity is created
4) Delete the nodes, and ensure the newly created entities are removed
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def onEntityCreated(parameters):
global createdEntityId
createdEntityId = parameters[0]

@ -33,6 +33,27 @@ class TestGradientNodeComponentDependency(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="GradientNodeComponentDependency", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that the Landscape Canvas nodes can be added to a graph, and correctly create entities with
proper dependent components.
Expected Behavior:
All expected component dependencies are met when adding a Gradient Modifier node to a graph.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Drag each of the Gradient Modifier nodes to the graph area, and ensure the proper dependent components are
added
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def onEntityCreated(parameters):
global newEntityId

@ -32,6 +32,25 @@ class TestGradientNodeEntityCreate(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="GradientNodeEntityCreate", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that the Landscape Canvas nodes can be added to a graph, and correctly create entities.
Expected Behavior:
New entities are created when dragging Gradient nodes to graph area.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Drag each of the Gradient nodes to the graph area, and ensure a new entity is created
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def onEntityCreated(parameters):
global newEntityId

@ -34,6 +34,26 @@ class TestGradientNodeEntityDelete(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="GradientNodeEntityDelete", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that the Landscape Canvas node deletion properly cleans up entities in the Editor.
Expected Behavior:
Entities are removed when Gradient nodes are deleted from a graph.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Drag each of the Gradient nodes to the graph area, and ensure a new entity is created
4) Delete the nodes, and ensure the newly created entities are removed
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def onEntityCreated(parameters):
global createdEntityId

@ -31,6 +31,26 @@ class TestGraphClosedOnEntityDelete(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="GraphClosedOnEntityDelete", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that Landscape Canvas graphs are auto-closed when the corresponding entity is deleted.
Expected Behavior:
When a Landscape Canvas root entity is deleted, the corresponding graph automatically closes.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Delete the automatically created entity
4) Verify the open graph is closed
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def onEntityCreated(parameters):
global newRootEntityId

@ -29,7 +29,26 @@ class TestGraphClosedOnLevelChange(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="GraphClosedOnLevelChange", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that Landscape Canvas graphs are auto-closed when the currently open level changes.
Expected Behavior:
When a new level is loaded in the Editor, open Landscape Canvas graphs are automatically closed.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Open a different level
4) Verify the open graph is closed
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
# Create a new empty level
self.test_success = self.create_level(
self.args["level"],

@ -29,6 +29,26 @@ class TestGraphClosedTabbedGraph(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="GraphClosedTabbedGraph", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that Landscape Canvas tabbed graphs can be independently closed.
Expected Behavior:
Closing a tabbed graph only closes the appropriate graph.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create several new graphs
3) Close one of the open graphs
4) Ensure the graph properly closed, and other open graphs remain open
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
# Create a new empty level
self.test_success = self.create_level(

@ -9,21 +9,6 @@ remove or modify any license notices. This file is distributed on an "AS IS" BAS
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
"""
C22715182 - Components are updated when nodes are added/removed/updated
1. Open Level.
2. Open the graph on LC_BushFlowerBlender.slice
3. Find the Rotation Modifier node on the BushSpawner entity
4. Delete the Rotation Modifier node
5. Ensure the Vegetation Rotation Modifier component is removed from the BushSpawner entity
6. Delete the Vegetation Layer Spawner node from the graph
7. Ensure BushSpawner entity is deleted
8. Change connection from second Rotation Modifier node to a different Gradient
9. Ensure Gradient reference on component is updated
"""
import os
import sys
@ -50,6 +35,31 @@ class TestGraphUpdatesUpdateComponents(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="GraphUpdatesUpdateComponents", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that components are properly updated as nodes are added/removed/updated.
Expected Behavior:
Landscape Canvas node CRUD properly updates component entities.
Test Steps:
1. Open Level.
2. Open the graph on LC_BushFlowerBlender.slice
3. Find the Rotation Modifier node on the BushSpawner entity
4. Delete the Rotation Modifier node
5. Ensure the Vegetation Rotation Modifier component is removed from the BushSpawner entity
6. Delete the Vegetation Layer Spawner node from the graph
7. Ensure BushSpawner entity is deleted
8. Change connection from second Rotation Modifier node to a different Gradient
9. Ensure Gradient reference on component is updated
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
# Create a new empty level and instantiate LC_BushFlowerBlender.slice
self.test_success = self.create_level(
self.args["level"],

@ -30,6 +30,26 @@ class TestLandscapeCanvasComponentAddedRemoved(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="LandscapeCanvasComponentAddedRemoved", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that the Landscape Canvas component can be added to/removed from an entity.
Expected Behavior:
Closing a tabbed graph only closes the appropriate graph.
Test Steps:
1) Create a new level
2) Create a new entity
3) Add a Landscape Canvas component to the entity
4) Remove the Landscape Canvas component from the entity
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
# Create a new empty level
self.test_success = self.create_level(

@ -30,12 +30,21 @@ class TestLandscapeCanvasSliceCreateInstantiate(EditorTestHelper):
def run_test(self):
"""
Summary:
C22602016 A slice containing the LandscapeCanvas component can be created/instantiated.
A slice containing the LandscapeCanvas component can be created/instantiated.
Expected Result:
Slice is created and processed successfully and free of errors/warnings.
Another copy of the slice is instantiated.
Slice is created/processed/instantiated successfully and free of errors/warnings.
Test Steps:
1) Create a new level
2) Create a new entity with a Landscape Canvas component
3) Create a slice of the new entity
4) Instantiate a new copy of the slice
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""

@ -34,6 +34,27 @@ class TestLayerBlenderNodeConstruction(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="LayerBlenderNodeConstruction", args=["level"])
def run_test(self):
"""
Summary:
This test verifies a Layer Blender vegetation setup can be constructed through Landscape Canvas.
Expected Behavior:
Entities contain all required components and component references after creating nodes and setting connections
on a Landscape Canvas graph.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Add all necessary nodes to the graph and set connections to form a Layer Blender setup
4) Verify all components and component references were properly set during graph construction
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def onEntityCreated(parameters):
global newEntityId

@ -34,6 +34,25 @@ class TestLayerExtenderNodeComponentEntitySync(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="LayerExtenderNodeComponentEntitySync", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that all wrapped nodes can be successfully added to/removed from parent nodes.
Expected Behavior:
All wrapped extender nodes can be added to/removed from appropriate parent nodes.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Add Area Blender and Layer Spawner nodes to the graph, and add/remove each extender node to/from each
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def onEntityCreated(parameters):
global newEntityId

@ -33,6 +33,25 @@ class TestShapeNodeEntityCreate(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="ShapeNodeEntityCreate", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that the Landscape Canvas nodes can be added to a graph, and correctly create entities.
Expected Behavior:
New entities are created when dragging shape nodes to graph area.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Drag each of the shape nodes to the graph area, and ensure a new entity is created
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def onEntityCreated(parameters):
global newEntityId

@ -34,7 +34,27 @@ class TestShapeNodeEntityDelete(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="ShapeNodeEntityDelete", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that the Landscape Canvas node deletion properly cleans up entities in the Editor.
Expected Behavior:
Entities are removed when shape nodes are deleted from a graph.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Drag each of the shape nodes to the graph area, and ensure a new entity is created
4) Delete the nodes, and ensure the newly created entities are removed
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def onEntityCreated(parameters):
global createdEntityId
createdEntityId = parameters[0]

@ -33,6 +33,27 @@ class TestSlotConnectionsUpdateComponents(EditorTestHelper):
EditorTestHelper.__init__(self, log_prefix="SlotConnectionsUpdateComponents", args=["level"])
def run_test(self):
"""
Summary:
This test verifies that the Landscape Canvas slot connections properly update component references.
Expected Behavior:
A reference created through slot connections in Landscape Canvas is reflected in the Entity Inspector.
Test Steps:
1) Create a new level
2) Open Landscape Canvas and create a new graph
3) Several nodes are added to a graph, and connections are set between the nodes
4) Component references are verified via Entity Inspector
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
# Retrieve the proper component TypeIds per component name
componentNames = [
'Random Noise Gradient',

@ -42,6 +42,7 @@ class TestAutomation(TestAutomationBase):
self._run_test(request, workspace, editor, test_module)
@revert_physics_config
@fm.file_override('physxsystemconfiguration.setreg','C4044459_Material_DynamicFriction.setreg_override', 'AutomatedTesting/Registry')
def test_C4044459_Material_DynamicFriction(self, request, workspace, editor, launcher_platform):
from . import C4044459_Material_DynamicFriction as test_module
self._run_test(request, workspace, editor, test_module)

@ -0,0 +1,131 @@
"""
All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
its licensors.
For complete copyright and license terms please see the LICENSE at the root of this
distribution (the "License"). All use of this software is governed by the License,
or, if provided, by the license below or the license accompanying this file. Do not
remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
# fmt: off
class Tests():
new_event_created = ("Successfully created a new event", "Failed to create a new event")
child_event_created = ("Successfully created Child Event", "Failed to create Child Event")
file_saved = ("Successfully saved event asset", "Failed to save event asset")
parameter_created = ("Successfully added parameter", "Failed to add parameter")
parameter_removed = ("Successfully removed parameter", "Failed to remove parameter")
# fmt: on
def ScriptEvent_AddRemoveParameter_ActionsSuccessful():
"""
Summary:
Parameter can be removed from a Script Event method
Expected Behavior:
Upon saving the updated .scriptevents asset the removed paramenter should no longer be present on the Script Event
Test Steps:
1) Open Asset Editor
2) Get Asset Editor Qt object
3) Create new Script Event Asset
4) Add Parameter to Event
5) Remove Parameter from Event
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
import os
from PySide2 import QtWidgets
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
import editor_python_test_tools.pyside_utils as pyside_utils
import azlmbr.bus as bus
import azlmbr.editor as editor
import azlmbr.legacy.general as general
GENERAL_WAIT = 1.0 # seconds
FILE_PATH = os.path.join("AutomatedTesting", "ScriptCanvas", "test_file.scriptevent")
QtObject = object
def create_script_event(asset_editor: QtObject, file_path: str) -> None:
action = pyside_utils.find_child_by_pattern(menu_bar, {"type": QtWidgets.QAction, "text": "Script Events"})
action.trigger()
result = helper.wait_for_condition(
lambda: container.findChild(QtWidgets.QFrame, "Events") is not None, 3 * GENERAL_WAIT
)
Report.result(Tests.new_event_created, result)
# Add new child event
add_event = container.findChild(QtWidgets.QFrame, "Events").findChild(QtWidgets.QToolButton, "")
add_event.click()
result = helper.wait_for_condition(
lambda: asset_editor.findChild(QtWidgets.QFrame, "EventName") is not None, GENERAL_WAIT
)
Report.result(Tests.child_event_created, result)
# Save the Script Event file
editor.AssetEditorWidgetRequestsBus(bus.Broadcast, "SaveAssetAs", file_path)
# Verify if file is created
result = helper.wait_for_condition(lambda: os.path.exists(file_path), 3 * GENERAL_WAIT)
Report.result(Tests.file_saved, result)
def create_parameter(file_path: str) -> None:
add_param = container.findChild(QtWidgets.QFrame, "Parameters").findChild(QtWidgets.QToolButton, "")
add_param.click()
result = helper.wait_for_condition(
lambda: asset_editor_widget.findChild(QtWidgets.QFrame, "[0]") is not None, GENERAL_WAIT
)
Report.result(Tests.parameter_created, result)
editor.AssetEditorWidgetRequestsBus(bus.Broadcast, "SaveAssetAs", file_path)
def remove_parameter(file_path: str) -> None:
remove_param = container.findChild(QtWidgets.QFrame, "[0]").findChild(QtWidgets.QToolButton, "")
remove_param.click()
result = helper.wait_for_condition(
lambda: asset_editor_widget.findChild(QtWidgets.QFrame, "[0]") is None, GENERAL_WAIT
)
Report.result(Tests.parameter_removed, result)
editor.AssetEditorWidgetRequestsBus(bus.Broadcast, "SaveAssetAs", file_path)
# 1) Open Asset Editor
general.idle_enable(True)
# Initially close the Asset Editor and then reopen to ensure we don't have any existing assets open
general.close_pane("Asset Editor")
general.open_pane("Asset Editor")
helper.wait_for_condition(lambda: general.is_pane_visible("Asset Editor"), 5.0)
# 2) Get Asset Editor Qt object
editor_window = pyside_utils.get_editor_main_window()
asset_editor_widget = editor_window.findChild(QtWidgets.QDockWidget, "Asset Editor").findChild(
QtWidgets.QWidget, "AssetEditorWindowClass"
)
container = asset_editor_widget.findChild(QtWidgets.QWidget, "ContainerForRows")
menu_bar = asset_editor_widget.findChild(QtWidgets.QMenuBar)
# 3) Create new Script Event Asset
create_script_event(asset_editor_widget, FILE_PATH)
# 4) Add Parameter to Event
create_parameter(FILE_PATH)
# 5) Remove Parameter from Event
remove_parameter(FILE_PATH)
if __name__ == "__main__":
import ImportPathHelper as imports
imports.init()
from editor_python_test_tools.utils import Report
Report.start_test(ScriptEvent_AddRemoveParameter_ActionsSuccessful)

@ -0,0 +1,210 @@
"""
All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
its licensors.
For complete copyright and license terms please see the LICENSE at the root of this
distribution (the "License"). All use of this software is governed by the License,
or, if provided, by the license below or the license accompanying this file. Do not
remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
# fmt: off
class Tests():
new_event_created = ("New Script Event created", "New Script Event not created")
child_event_created = ("Child Event created", "Child Event not created")
params_added = ("New parameters added", "New parameters are not added")
file_saved = ("Script event file saved", "Script event file did not save")
node_found = ("Node found in Script Canvas", "Node not found in Script Canvas")
# fmt: on
def ScriptEvents_AllParamDatatypes_CreationSuccess():
"""
Summary:
Parameters of all types can be created.
Expected Behavior:
The Method handles the large number of Parameters gracefully.
Parameters of all data types can be successfully created.
Updated ScriptEvent toast appears in Script Canvas.
Test Steps:
1) Open Asset Editor
2) Initially create new Script Event file with one method
3) Add new method and set name to it
4) Add new parameters of each type
5) Verify if parameters are added
6) Expand the parameter rows
7) Set different names and datatypes for each parameter
8) Save file and verify node in SC Node Palette
9) Close Asset Editor
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
import os
from utils import TestHelper as helper
import pyside_utils
# Open 3D Engine imports
import azlmbr.legacy.general as general
import azlmbr.editor as editor
import azlmbr.bus as bus
# Pyside imports
from PySide2 import QtWidgets, QtTest, QtCore
GENERAL_WAIT = 1.0 # seconds
FILE_PATH = os.path.join("AutomatedTesting", "TestAssets", "test_file.scriptevents")
N_VAR_TYPES = 10 # Top 10 variable types
TEST_METHOD_NAME = "test_method_name"
editor_window = pyside_utils.get_editor_main_window()
asset_editor = asset_editor_widget = container = menu_bar = None
sc = node_palette = tree = search_frame = search_box = None
def initialize_asset_editor_qt_objects():
nonlocal asset_editor, asset_editor_widget, container, menu_bar
asset_editor = editor_window.findChild(QtWidgets.QDockWidget, "Asset Editor")
asset_editor_widget = asset_editor.findChild(QtWidgets.QWidget, "AssetEditorWindowClass")
container = asset_editor_widget.findChild(QtWidgets.QWidget, "ContainerForRows")
menu_bar = asset_editor_widget.findChild(QtWidgets.QMenuBar)
def initialize_sc_qt_objects():
nonlocal sc, node_palette, tree, search_frame, search_box
sc = editor_window.findChild(QtWidgets.QDockWidget, "Script Canvas")
if sc.findChild(QtWidgets.QDockWidget, "NodePalette") is None:
action = pyside_utils.find_child_by_pattern(sc, {"text": "Node Palette", "type": QtWidgets.QAction})
action.trigger()
node_palette = sc.findChild(QtWidgets.QDockWidget, "NodePalette")
tree = node_palette.findChild(QtWidgets.QTreeView, "treeView")
search_frame = node_palette.findChild(QtWidgets.QFrame, "searchFrame")
search_box = search_frame.findChild(QtWidgets.QLineEdit, "searchFilter")
def save_file():
editor.AssetEditorWidgetRequestsBus(bus.Broadcast, "SaveAssetAs", FILE_PATH)
action = pyside_utils.find_child_by_pattern(menu_bar, {"type": QtWidgets.QAction, "iconText": "Save"})
action.trigger()
# wait till file is saved, to validate that check the text of QLabel at the bottom of the AssetEditor,
# if there are no unsaved changes we will not have any * in the text
label = asset_editor.findChild(QtWidgets.QLabel, "textEdit")
return helper.wait_for_condition(lambda: "*" not in label.text(), 3.0)
def expand_container_rows(object_name):
children = container.findChildren(QtWidgets.QFrame, object_name)
for child in children:
check_box = child.findChild(QtWidgets.QCheckBox)
if check_box and not check_box.isChecked():
QtTest.QTest.mouseClick(check_box, QtCore.Qt.LeftButton, QtCore.Qt.NoModifier)
def node_palette_search(node_name):
search_box.setText(node_name)
helper.wait_for_condition(lambda: search_box.text() == node_name, 1.0)
# Try clicking ENTER in search box multiple times
for _ in range(20):
QtTest.QTest.keyClick(search_box, QtCore.Qt.Key_Enter, QtCore.Qt.NoModifier)
if pyside_utils.find_child_by_pattern(tree, {"text": node_name}) is not None:
break
def verify_added_params():
for index in range(N_VAR_TYPES):
if container.findChild(QtWidgets.QFrame, f"[{index}]") is None:
return False
return True
# 1) Open Asset Editor
general.idle_enable(True)
# Initially close the Asset Editor and then reopen to ensure we don't have any existing assets open
general.close_pane("Asset Editor")
general.open_pane("Asset Editor")
helper.wait_for_condition(lambda: general.is_pane_visible("Asset Editor"), 5.0)
# 2) Initially create new Script Event file with one method
initialize_asset_editor_qt_objects()
action = pyside_utils.find_child_by_pattern(menu_bar, {"type": QtWidgets.QAction, "text": "Script Events"})
action.trigger()
result = helper.wait_for_condition(
lambda: container.findChild(QtWidgets.QFrame, "Events") is not None
and container.findChild(QtWidgets.QFrame, "Events").findChild(QtWidgets.QToolButton, "") is not None,
3 * GENERAL_WAIT,
)
Report.result(Tests.new_event_created, result)
# 3) Add new method and set name to it
add_event = container.findChild(QtWidgets.QFrame, "Events").findChild(QtWidgets.QToolButton, "")
add_event.click()
result = helper.wait_for_condition(
lambda: asset_editor_widget.findChild(QtWidgets.QFrame, "EventName") is not None, GENERAL_WAIT
)
Report.result(Tests.child_event_created, result)
expand_container_rows("EventName")
expand_container_rows("Name")
initialize_asset_editor_qt_objects()
children = container.findChildren(QtWidgets.QFrame, "Name")
for child in children:
line_edit = child.findChild(QtWidgets.QLineEdit)
if line_edit is not None and line_edit.text() == "MethodName":
line_edit.setText(TEST_METHOD_NAME)
# 4) Add new parameters of each type
helper.wait_for_condition(lambda: container.findChild(QtWidgets.QFrame, "Parameters") is not None, 2.0)
parameters = container.findChild(QtWidgets.QFrame, "Parameters")
add_param = parameters.findChild(QtWidgets.QToolButton, "")
for _ in range(N_VAR_TYPES):
add_param.click()
# 5) Verify if parameters are added
result = helper.wait_for_condition(verify_added_params, 3.0)
Report.result(Tests.params_added, result)
# 6) Expand the parameter rows (to render QFrame 'Type' for each param)
for index in range(N_VAR_TYPES):
expand_container_rows(f"[{index}]")
# 7) Set different names and datatypes for each parameter
expand_container_rows("Name")
children = container.findChildren(QtWidgets.QFrame, "Name")
index = 0
for child in children:
line_edit = child.findChild(QtWidgets.QLineEdit)
if line_edit is not None and line_edit.text() == "ParameterName":
line_edit.setText(f"param_{index}")
index += 1
children = container.findChildren(QtWidgets.QFrame, "Type")
index = 0
for child in children:
combo_box = child.findChild(QtWidgets.QComboBox)
if combo_box is not None and index < N_VAR_TYPES:
combo_box.setCurrentIndex(index)
index += 1
# 8) Save file and verify node in SC Node Palette
Report.result(Tests.file_saved, save_file())
general.open_pane("Script Canvas")
helper.wait_for_condition(lambda: general.is_pane_visible("Script Canvas"), 5.0)
initialize_sc_qt_objects()
node_palette_search(TEST_METHOD_NAME)
get_node_index = lambda: pyside_utils.find_child_by_pattern(tree, {"text": TEST_METHOD_NAME}) is not None
result = helper.wait_for_condition(get_node_index, 2.0)
Report.result(Tests.node_found, result)
# 9) Close Asset Editor
general.close_pane("Asset Editor")
general.close_pane("Script Canvas")
if __name__ == "__main__":
import ImportPathHelper as imports
imports.init()
from utils import Report
Report.start_test(ScriptEvents_AllParamDatatypes_CreationSuccess)

@ -113,10 +113,6 @@ class TestAutomation(TestAutomationBase):
from . import Debugger_HappyPath_TargetMultipleGraphs as test_module
self._run_test(request, workspace, editor, test_module)
def test_Debugging_TargetMultipleGraphs(self, request, workspace, editor, launcher_platform, project):
from . import Debugging_TargetMultipleGraphs as test_module
self._run_test(request, workspace, editor, test_module)
@pytest.mark.parametrize("level", ["tmp_level"])
def test_Debugger_HappyPath_TargetMultipleEntities(self, request, workspace, editor, launcher_platform, project, level):
def teardown():
@ -190,6 +186,18 @@ class TestAutomation(TestAutomationBase):
from . import Node_HappyPath_DuplicateNode as test_module
self._run_test(request, workspace, editor, test_module)
def test_ScriptEvent_AddRemoveParameter_ActionsSuccessful(self, request, workspace, editor, launcher_platform):
def teardown():
file_system.delete(
[os.path.join(workspace.paths.project(), "ScriptCanvas", "test_file.scriptevent")], True, True
)
request.addfinalizer(teardown)
file_system.delete(
[os.path.join(workspace.paths.project(), "ScriptCanvas", "test_file.scriptevent")], True, True
)
from . import ScriptEvent_AddRemoveParameter_ActionsSuccessful as test_module
self._run_test(request, workspace, editor, test_module)
# NOTE: We had to use hydra_test_utils.py, as TestAutomationBase run_test method
# fails because of pyside_utils import
@pytest.mark.SUITE_periodic
@ -317,4 +325,30 @@ class TestScriptCanvasTests(object):
auto_test_mode=False,
timeout=60,
)
def test_ScriptEvents_AllParamDatatypes_CreationSuccess(self, request, workspace, editor, launcher_platform):
def teardown():
file_system.delete(
[os.path.join(workspace.paths.project(), "TestAssets", "test_file.scriptevents")], True, True
)
request.addfinalizer(teardown)
file_system.delete(
[os.path.join(workspace.paths.project(), "TestAssets", "test_file.scriptevents")], True, True
)
expected_lines = [
"Success: New Script Event created",
"Success: Child Event created",
"Success: New parameters added",
"Success: Script event file saved",
"Success: Node found in Script Canvas",
]
hydra.launch_and_validate_results(
request,
TEST_DIRECTORY,
editor,
"ScriptEvents_AllParamDatatypes_CreationSuccess.py",
expected_lines,
auto_test_mode=False,
timeout=60,
)

@ -14,6 +14,24 @@ if(PAL_TRAIT_BUILD_TESTS_SUPPORTED AND PAL_TRAIT_BUILD_HOST_TOOLS)
TEST_SUITE smoke
TEST_SERIAL
PATH ${CMAKE_CURRENT_LIST_DIR}
PYTEST_MARKS "SUITE_smoke"
TIMEOUT 1500
RUNTIME_DEPENDENCIES
AZ::AssetProcessor
AZ::PythonBindingsExample
Legacy::Editor
AutomatedTesting.GameLauncher
AutomatedTesting.Assets
COMPONENT
Smoke
)
ly_add_pytest(
NAME AutomatedTesting::SandboxTest
TEST_SUITE sandbox
TEST_SERIAL
PATH ${CMAKE_CURRENT_LIST_DIR}
PYTEST_MARKS "SUITE_sandbox"
TIMEOUT 1500
RUNTIME_DEPENDENCIES
AZ::AssetProcessor

@ -15,7 +15,7 @@ from automatedtesting_shared.base import TestAutomationBase
import ly_test_tools.environment.file_system as file_system
@pytest.mark.SUITE_smoke
@pytest.mark.SUITE_sandbox
@pytest.mark.parametrize("launcher_platform", ["windows_editor"])
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("level", ["temp_level"])

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:804193a2afd68cd1e6bec8155ea11400566f2941fbd6eb0c324839ebcd10192d
size 8492
oid sha256:302d6172156e8ed665e44e206d81f54f1b0f1008d73327300ea92f8c1159780b
size 11820

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d49aceca5ad4e0b9f46c8127afb5c53b68aa30272950b1abd66fba310977ff0c
size 15032

@ -1,6 +0,0 @@
<download name="blank2" type="Map">
<index src="filelist.xml" dest="filelist.xml"/>
<files>
<file src="level.pak" dest="level.pak" size="39815" md5="3874c8411da272b96974e24a9fff06e8"/>
</files>
</download>

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5f221acd847ec8a15e1333a5163d6d0fd886b8eda46fa7b133f76ddbf1d11216
size 41472

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c8e5dcfbe65fd2fd8ea29a38a96e703683c544fd42b9424857b1df3718c7775a
size 41472

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0378911c27933302042550d5a031a5f9104296162edc2b21e44893f1b8cff969
size 44124

@ -1,14 +0,0 @@
<Environment>
<Fog ViewDistance="8000" ViewDistanceLowSpec="1000" LDRGlobalDensMult="1.0"/>
<Terrain DetailLayersViewDistRatio="1.0" HeightMapAO="0"/>
<EnvState WindVector="1,0,0" BreezeGeneration="0" BreezeStrength="1.f" BreezeMovementSpeed="8.f" BreezeVariation="1.f" BreezeLifeTime="15.f" BreezeCount="4" BreezeSpawnRadius="25.f" BreezeSpread="0.f" BreezeRadius="5.f" ConsoleMergedMeshesPool="2750" ShowTerrainSurface="1" SunShadowsMinSpec="1" SunShadowsAdditionalCascadeMinSpec="0" SunShadowsClipPlaneRange="256.0f" SunShadowsClipPlaneRangeShift="0.0f" UseLayersActivation="0" SunLinkedToTOD="1"/>
<VolFogShadows Enable="0" EnableForClouds="0"/>
<CloudShadows CloudShadowTexture="" CloudShadowSpeed="0,0,0" CloudShadowTiling="1.0" CloudShadowBrightness="1.0" CloudShadowInvert="0"/>
<ParticleLighting AmbientMul="1.0" LightsMul="1.0"/>
<SkyBox Material="EngineAssets/Materials/Sky/Sky" MaterialLowSpec="EngineAssets/Materials/Sky/Sky" Angle="0" Stretching="0.5"/>
<Ocean Material="EngineAssets/Materials/Water/Ocean_default" CausticsDistanceAtten="100.0" CausticDepth="8.0" CausticIntensity="1.0" CausticsTilling="1.0"/>
<OceanAnimation WindDirection="1.0" WindSpeed="4.0" WavesAmount="1.5" WavesSize="0.4" WavesSpeed="1.0"/>
<Moon Latitude="240.0" Longitude="45.0" Size="0.5" Texture="Textures/Skys/Night/half_moon.dds"/>
<DynTexSource Width="256" Height="256"/>
<Total_Illumination_v2 Active="0" IntegrationMode="0" InjectionMultiplier="1.0" SkyColorMultiplier="1.0" UseTODSkyColor="0.5" PortalsDeform="0" PortalsInject="0" DiffuseAmplifier="1.0" SpecularAmplifier="0" NumberOfBounces="1" Saturation="0.8" PropagationBooster="1.5" DiffuseBias="0.05" DiffuseConeWidth="24" ConeMaxLength="12.0" UpdateLighting="0" UpdateGeometry="0" MinNodeSize="8.0" SkipNonGILights="0" LowSpecMode="0" HalfresKernel="0" UseLightProbes="0" VoxelizaionLODRatio="1.8" VoxelPoolResolution="128" SSAOAmount="0.7" ObjectsMaxViewDistance="64" SunRSMInject="0" SSDepthTrace="0"/>
</Environment>

@ -1,7 +0,0 @@
<TerrainTexture TileCountX="1" TileCountY="1" TileResolution="512">
<RGBLayer>
<Tiles>
<tile />
</Tiles>
</RGBLayer>
</TerrainTexture>

@ -1,356 +0,0 @@
<TimeOfDay Time="12" TimeStart="12" TimeEnd="12" TimeAnimSpeed="0">
<Variable Name="Sun color" Color="0.94730699,0.74540401,0.57758099">
<Spline Keys="-0.000628322:(0.783538:0.89627:0.930341):36,0:(0.783538:0.887923:0.921582):36,0.229167:(0.783538:0.879623:0.921582):36,0.25:(0.947307:0.745404:0.577581):36,0.5:(0.947307:0.745404:0.577581):262180,0.75:(0.947307:0.745404:0.577581):36,0.770833:(0.783538:0.879623:0.921582):36,1:(0.783538:0.89627:0.930556):36,"/>
</Variable>
<Variable Name="Sun intensity" Value="120000">
<Spline Keys="0:1000:36,0.229167:1000:36,0.5:120000:36,0.770833:1000:65572,0.999306:1000:36,"/>
</Variable>
<Variable Name="Sun specular multiplier" Value="1">
<Spline Keys="0:1:36,0.25:1:36,0.5:1:36,0.75:1:36,1:1:36,"/>
</Variable>
<Variable Name="Fog color" Color="0.27049801,0.47353199,0.83077002">
<Spline Keys="0:(0.00651209:0.00972122:0.0137021):36,0.229167:(0.00604883:0.00972122:0.0137021):36,0.25:(0.270498:0.473532:0.83077):36,0.5:(0.270498:0.473532:0.83077):458788,0.75:(0.270498:0.473532:0.83077):36,0.770833:(0.00604883:0.00972122:0.0137021):36,1:(0.00651209:0.00972122:0.0137021):36,"/>
</Variable>
<Variable Name="Fog color multiplier" Value="1">
<Spline Keys="0:0.5:36,0.229167:0.5:36,0.25:1:36,0.5:1:36,0.75:1:36,0.770833:0.5:36,1:0.5:65572,"/>
</Variable>
<Variable Name="Fog height (bottom)" Value="0">
<Spline Keys="0:0:36,0.25:0:36,0.5:0:36,0.75:0:36,1:0:36,"/>
</Variable>
<Variable Name="Fog layer density (bottom)" Value="1">
<Spline Keys="0:1:36,0.25:1:36,0.5:1:36,0.75:1:36,1:1:36,"/>
</Variable>
<Variable Name="Fog color (top)" Color="0.597202,0.72305501,0.91309899">
<Spline Keys="0:(0.00699541:0.00972122:0.0122865):36,0.229167:(0.00699541:0.00972122:0.0122865):36,0.25:(0.597202:0.723055:0.913099):36,0.5:(0.597202:0.723055:0.913099):458788,0.75:(0.597202:0.723055:0.913099):36,0.770833:(0.00699541:0.00972122:0.0122865):36,1:(0.00699541:0.00972122:0.0122865):36,"/>
</Variable>
<Variable Name="Fog color (top) multiplier" Value="1">
<Spline Keys="-4.40702e-06:0.5:36,0.0297507:0.499195:36,0.229167:0.5:36,0.5:1:36,0.770833:0.5:36,1:0.5:36,"/>
</Variable>
<Variable Name="Fog height (top)" Value="100">
<Spline Keys="0:100:36,0.25:100:36,0.5:100:36,0.75:100:65572,1:100:36,"/>
</Variable>
<Variable Name="Fog layer density (top)" Value="9.9999997e-05">
<Spline Keys="0:0.0001:36,0.25:0.0001:36,0.5:0.0001:65572,0.75:0.0001:36,1:0.0001:36,"/>
</Variable>
<Variable Name="Fog color height offset" Value="0">
<Spline Keys="0:0:36,0.25:0:36,0.5:0:36,0.75:0:36,1:0:65572,"/>
</Variable>
<Variable Name="Fog color (radial)" Color="0.76815099,0.51491803,0.16826899">
<Spline Keys="0:(0:0:0):36,0.229167:(0.00439144:0.00367651:0.00334654):36,0.25:(0.838799:0.564712:0.184475):36,0.5:(0.768151:0.514918:0.168269):458788,0.75:(0.838799:0.564712:0.184475):36,0.770833:(0.00402472:0.00334654:0.00303527):36,1:(0:0:0):36,"/>
</Variable>
<Variable Name="Fog color (radial) multiplier" Value="6">
<Spline Keys="0:0:36,0.25:6:36,0.5:6:36,0.75:6:36,1:0:36,"/>
</Variable>
<Variable Name="Fog radial size" Value="0.85000002">
<Spline Keys="0:0:36,0.25:0.85:65572,0.5:0.85:36,0.75:0.85:36,1:0:36,"/>
</Variable>
<Variable Name="Fog radial lobe" Value="0.75">
<Spline Keys="0:0:36,0.25:0.75:36,0.5:0.75:36,0.75:0.75:65572,1:0:36,"/>
</Variable>
<Variable Name="Volumetric fog: Final density clamp" Value="1">
<Spline Keys="0:1:36,0.25:1:36,0.5:1:65572,0.75:1:36,1:1:36,"/>
</Variable>
<Variable Name="Volumetric fog: Global density" Value="1.5">
<Spline Keys="0:1.5:36,0.25:1.5:36,0.5:1.5:65572,0.75:1.5:36,1:1.5:36,"/>
</Variable>
<Variable Name="Volumetric fog: Ramp start" Value="25">
<Spline Keys="0:25:36,0.25:25:36,0.5:25:65572,0.75:25:36,1:25:36,"/>
</Variable>
<Variable Name="Volumetric fog: Ramp end" Value="1000">
<Spline Keys="0:1000:36,0.25:1000:36,0.5:1000:65572,0.75:1000:36,1:1000:36,"/>
</Variable>
<Variable Name="Volumetric fog: Ramp influence" Value="0.69999999">
<Spline Keys="0:0.7:36,0.25:0.7:36,0.5:0.7:65572,0.75:0.7:36,1:0.7:36,"/>
</Variable>
<Variable Name="Volumetric fog: Shadow darkening" Value="0.2">
<Spline Keys="0:0.2:36,0.25:0.2:36,0.5:0.2:65572,0.75:0.2:36,1:0.2:36,"/>
</Variable>
<Variable Name="Volumetric fog: Shadow darkening sun" Value="0.5">
<Spline Keys="0:0.5:36,0.25:0.5:36,0.5:0.5:65572,0.75:0.5:36,1:0.5:36,"/>
</Variable>
<Variable Name="Volumetric fog: Shadow darkening ambient" Value="1">
<Spline Keys="0:1:36,0.25:1:36,0.5:1:65572,0.75:1:36,1:1:36,"/>
</Variable>
<Variable Name="Volumetric fog: Shadow range" Value="0.1">
<Spline Keys="0:0.1:36,0.25:0.1:36,0.5:0.1:65572,0.75:0.1:36,1:0.1:36,"/>
</Variable>
<Variable Name="Volumetric fog 2: Fog height (bottom)" Value="0">
<Spline Keys="0:0:0,1:0:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Fog layer density (bottom)" Value="1">
<Spline Keys="0:1:0,1:1:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Fog height (top)" Value="4000">
<Spline Keys="0:4000:0,1:4000:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Fog layer density (top)" Value="9.9999997e-05">
<Spline Keys="0:0.0001:0,1:0.0001:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Global fog density" Value="0.10000001">
<Spline Keys="0:0.1:0,1:0.1:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Ramp start" Value="0">
<Spline Keys="0:0:0,1:0:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Ramp end" Value="0">
<Spline Keys="0:0:0,1:0:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Fog albedo color (atmosphere)" Color="1,1,1">
<Spline Keys="0:(1:1:1):0,1:(1:1:1):0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Anisotropy factor (atmosphere)" Value="0.60000002">
<Spline Keys="0:0.6:0,1:0.6:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Fog albedo color (sun radial)" Color="1,1,1">
<Spline Keys="0:(1:1:1):0,1:(1:1:1):0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Anisotropy factor (sun radial)" Value="0.94999993">
<Spline Keys="0:0.95:0,1:0.95:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Blend factor for sun scattering" Value="1">
<Spline Keys="0:1:0,1:1:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Blend mode for sun scattering" Value="0">
<Spline Keys="0:0:0,1:0:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Fog albedo color (entities)" Color="1,1,1">
<Spline Keys="0:(1:1:1):0,1:(1:1:1):0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Anisotropy factor (entities)" Value="0.60000002">
<Spline Keys="0:0.6:0,1:0.6:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Maximum range of ray-marching" Value="64">
<Spline Keys="0:64:0,1:64:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: In-scattering factor" Value="1">
<Spline Keys="0:1:0,1:1:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Extinction factor" Value="0.30000001">
<Spline Keys="0:0.3:0,1:0.3:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Analytical volumetric fog visibility" Value="0.5">
<Spline Keys="0:0.5:0,1:0.5:0,"/>
</Variable>
<Variable Name="Volumetric fog 2: Final density clamp" Value="1">
<Spline Keys="0:1:0,0.5:1:36,1:1:0,"/>
</Variable>
<Variable Name="Sky light: Sun intensity" Color="1,1,1">
<Spline Keys="0:(1:1:1):36,0.25:(1:1:1):36,0.494381:(1:1:1):65572,0.5:(1:1:1):36,0.75:(1:1:1):36,1:(1:1:1):36,"/>
</Variable>
<Variable Name="Sky light: Sun intensity multiplier" Value="200">
<Spline Keys="0:200:36,0.25:200:36,0.5:200:36,0.75:200:36,1:200:36,"/>
</Variable>
<Variable Name="Sky light: Mie scattering" Value="2">
<Spline Keys="0:40:36,0.5:2:36,1:40:36,"/>
</Variable>
<Variable Name="Sky light: Rayleigh scattering" Value="0.2">
<Spline Keys="0:0.2:36,0.229167:0.2:36,0.25:1:36,0.291667:0.2:36,0.5:0.2:36,0.729167:0.2:36,0.75:1:36,0.770833:0.2:36,1:0.2:36,"/>
</Variable>
<Variable Name="Sky light: Sun anisotropy factor" Value="-0.99989998">
<Spline Keys="0:-0.9999:36,0.25:-0.9999:36,0.5:-0.9999:65572,0.75:-0.9999:36,1:-0.9999:36,"/>
</Variable>
<Variable Name="Sky light: Wavelength (R)" Value="694">
<Spline Keys="0:694:36,0.25:694:36,0.5:694:65572,0.75:694:36,1:694:36,"/>
</Variable>
<Variable Name="Sky light: Wavelength (G)" Value="597">
<Spline Keys="0:597:36,0.25:597:36,0.5:597:36,0.75:597:36,1:597:36,"/>
</Variable>
<Variable Name="Sky light: Wavelength (B)" Value="488">
<Spline Keys="0:488:36,0.25:488:36,0.5:488:65572,0.75:488:36,1:488:36,"/>
</Variable>
<Variable Name="Night sky: Horizon color" Color="0.27049801,0.39157301,0.52711499">
<Spline Keys="0:(0.270498:0.391573:0.520996):36,0.25:(0.270498:0.391573:0.527115):36,0.5:(0.270498:0.391573:0.527115):262180,0.75:(0.270498:0.391573:0.527115):36,1:(0.270498:0.391573:0.520996):36,"/>
</Variable>
<Variable Name="Night sky: Horizon color multiplier" Value="0">
<Spline Keys="0:0.1:36,0.25:0:36,0.5:0:65572,0.75:0:36,1:0.1:36,"/>
</Variable>
<Variable Name="Night sky: Zenith color" Color="0.361307,0.434154,0.46778399">
<Spline Keys="0:(0.361307:0.434154:0.467784):36,0.25:(0.361307:0.434154:0.467784):36,0.5:(0.361307:0.434154:0.467784):262180,0.75:(0.361307:0.434154:0.467784):36,1:(0.361307:0.434154:0.467784):36,"/>
</Variable>
<Variable Name="Night sky: Zenith color multiplier" Value="0">
<Spline Keys="0:0.02:36,0.25:0:36,0.5:0:65572,0.75:0:36,1:0.02:36,"/>
</Variable>
<Variable Name="Night sky: Zenith shift" Value="0.5">
<Spline Keys="0:0.5:36,0.25:0.5:36,0.5:0.5:65572,0.75:0.5:36,1:0.5:36,"/>
</Variable>
<Variable Name="Night sky: Star intensity" Value="0">
<Spline Keys="0:3:36,0.25:0:36,0.5:0:65572,0.75:0:36,0.836647:1.03977:36,1:3:36,"/>
</Variable>
<Variable Name="Night sky: Moon color" Color="1,1,1">
<Spline Keys="0:(1:1:1):36,0.25:(1:1:1):36,0.5:(1:1:1):458788,0.75:(1:1:1):36,1:(1:1:1):36,"/>
</Variable>
<Variable Name="Night sky: Moon color multiplier" Value="0">
<Spline Keys="0:0.4:36,0.25:0:36,0.5:0:36,0.75:0:65572,1:0.4:36,"/>
</Variable>
<Variable Name="Night sky: Moon inner corona color" Color="0.904661,1,1">
<Spline Keys="0:(0.89627:1:1):36,0.25:(0.904661:1:1):36,0.5:(0.904661:1:1):393252,0.75:(0.904661:1:1):36,0.836647:(0.89627:1:1):36,1:(0.89627:1:1):36,"/>
</Variable>
<Variable Name="Night sky: Moon inner corona color multiplier" Value="0">
<Spline Keys="0:0.1:36,0.25:0:36,0.5:0:65572,0.75:0:36,1:0.1:36,"/>
</Variable>
<Variable Name="Night sky: Moon inner corona scale" Value="0">
<Spline Keys="0:2:36,0.25:0:36,0.5:0:65572,0.75:0:36,0.836647:0.693178:36,1:2:36,"/>
</Variable>
<Variable Name="Night sky: Moon outer corona color" Color="0.201556,0.22696599,0.254152">
<Spline Keys="0:(0.198069:0.226966:0.250158):36,0.25:(0.201556:0.226966:0.254152):36,0.5:(0.201556:0.226966:0.254152):36,0.75:(0.201556:0.226966:0.254152):36,1:(0.198069:0.226966:0.250158):36,"/>
</Variable>
<Variable Name="Night sky: Moon outer corona color multiplier" Value="0">
<Spline Keys="0:0.1:36,0.25:0:36,0.5:0:65572,0.75:0:36,1:0.1:36,"/>
</Variable>
<Variable Name="Night sky: Moon outer corona scale" Value="0">
<Spline Keys="0:0.01:36,0.25:0:36,0.5:0:65572,0.75:0:36,1:0.01:36,"/>
</Variable>
<Variable Name="Cloud shading: Sun light multiplier" Value="1">
<Spline Keys="0:1:36,0.25:1:36,0.5:1:65572,0.75:1:36,1:1:36,"/>
</Variable>
<Variable Name="Cloud shading: Sun custom color" Color="0.83077002,0.76815099,0.65837502">
<Spline Keys="0:(0.737911:0.737911:0.737911):36,0.25:(0.83077:0.768151:0.658375):36,0.5:(0.83077:0.768151:0.658375):458788,0.75:(0.83077:0.768151:0.658375):36,1:(0.737911:0.737911:0.737911):36,"/>
</Variable>
<Variable Name="Cloud shading: Sun custom color multiplier" Value="1">
<Spline Keys="0:0.1:36,0.25:1:36,0.5:1:65572,0.75:1:36,1:0.1:36,"/>
</Variable>
<Variable Name="Cloud shading: Sun custom color influence" Value="0">
<Spline Keys="0:0.5:36,0.25:0:36,0.5:0:65572,0.75:0:36,1:0.5:36,"/>
</Variable>
<Variable Name="Sun shafts visibility" Value="0">
<Spline Keys="0:0:36,0.25:0:36,0.5:0:65572,0.75:0:36,1:0:36,"/>
</Variable>
<Variable Name="Sun rays visibility" Value="1.5">
<Spline Keys="0:1:36,0.25:1.5:36,0.5:1.5:65572,0.75:1.5:36,1:1:36,"/>
</Variable>
<Variable Name="Sun rays attenuation" Value="1.5">
<Spline Keys="0:0.1:36,0.25:1.5:36,0.5:1.5:65572,0.75:1.5:36,1:0.1:36,"/>
</Variable>
<Variable Name="Sun rays suncolor influence" Value="0.5">
<Spline Keys="0:0.5:36,0.25:0.5:36,0.5:0.5:65572,0.75:0.5:36,1:0.5:36,"/>
</Variable>
<Variable Name="Sun rays custom color" Color="0.66538697,0.838799,0.94730699">
<Spline Keys="0:(0.665387:0.838799:0.947307):36,0.25:(0.665387:0.838799:0.947307):36,0.5:(0.665387:0.838799:0.947307):458788,0.75:(0.665387:0.838799:0.947307):36,1:(0.665387:0.838799:0.947307):36,"/>
</Variable>
<Variable Name="Ocean fog color" Color="0.0012141099,0.0091340598,0.017642001">
<Spline Keys="0:(0.00121411:0.00913406:0.017642):36,0.25:(0.00121411:0.00913406:0.017642):36,0.5:(0.00121411:0.00913406:0.017642):458788,0.75:(0.00121411:0.00913406:0.017642):36,1:(0.00121411:0.00913406:0.017642):36,"/>
</Variable>
<Variable Name="Ocean fog color multiplier" Value="0.5">
<Spline Keys="0:0.5:36,0.25:0.5:36,0.5:0.5:65572,0.75:0.5:36,1:0.5:36,"/>
</Variable>
<Variable Name="Ocean fog density" Value="0.5">
<Spline Keys="0:0.5:36,0.25:0.5:36,0.5:0.5:65572,0.75:0.5:36,1:0.5:36,"/>
</Variable>
<Variable Name="Skybox multiplier" Value="1">
<Spline Keys="0:1:36,0.25:1:36,0.5:1:65572,0.75:1:36,1:1:36,"/>
</Variable>
<Variable Name="Film curve shoulder scale" Value="2">
<Spline Keys="0:3:36,0.229167:3:36,0.5:2:36,0.770833:3:36,1:3:36,"/>
</Variable>
<Variable Name="Film curve midtones scale" Value="1">
<Spline Keys="0:0.5:36,0.229167:0.5:36,0.5:1:36,0.770833:0.5:36,1:0.5:36,"/>
</Variable>
<Variable Name="Film curve toe scale" Value="1">
<Spline Keys="0:1:36,0.25:1:36,0.5:1:65572,0.75:1:36,1:1:36,"/>
</Variable>
<Variable Name="Film curve whitepoint" Value="4">
<Spline Keys="0:4:36,0.25:4:36,0.5:4:65572,0.75:4:36,1:4:36,"/>
</Variable>
<Variable Name="Saturation" Value="1">
<Spline Keys="0:0.8:36,0.229167:0.8:36,0.5:1:36,0.751391:1:65572,0.770833:0.8:36,1:0.8:36,"/>
</Variable>
<Variable Name="Color balance" Color="1,1,1">
<Spline Keys="0:(1:1:1):36,0.25:(1:1:1):36,0.5:(1:1:1):36,0.75:(1:1:1):36,1:(1:1:1):36,"/>
</Variable>
<Variable Name="Scene key" Value="0.18000001">
<Spline Keys="0:0.18:36,0.25:0.18:36,0.5:0.18:65572,0.75:0.18:36,1:0.18:36,"/>
</Variable>
<Variable Name="Min exposure" Value="1">
<Spline Keys="0:1:36,0.25:1:36,0.5:1:65572,0.75:1:36,1:1:36,"/>
</Variable>
<Variable Name="Max exposure" Value="2.8">
<Spline Keys="0:2:36,0.229167:2:36,0.5:2.8:36,0.770833:2:36,1:2:36,"/>
</Variable>
<Variable Name="EV Min" Value="4.5">
<Spline Keys="0:4.5:0,1:4.5:0,"/>
</Variable>
<Variable Name="EV Max" Value="17">
<Spline Keys="0:17:0,1:17:0,"/>
</Variable>
<Variable Name="EV Auto compensation" Value="1.5">
<Spline Keys="0:1.5:0,1:1.5:0,"/>
</Variable>
<Variable Name="Bloom amount" Value="0.1">
<Spline Keys="0:1:36,0.229167:1:36,0.5:0.1:36,0.770833:1:36,1:1:36,"/>
</Variable>
<Variable Name="Filters: grain" Value="0">
<Spline Keys="0:0.3:65572,0.229167:0.3:36,0.25:0:36,0.5:0:36,0.75:0:36,1:0.3:36,"/>
</Variable>
<Variable Name="Filters: photofilter color" Color="0,0,0">
<Spline Keys="0:(0:0:0):36,0.25:(0:0:0):36,0.5:(0:0:0):458788,0.75:(0:0:0):36,1:(0:0:0):36,"/>
</Variable>
<Variable Name="Filters: photofilter density" Value="0">
<Spline Keys="0:0:36,0.25:0:36,0.5:0:36,0.75:0:36,1:0:36,"/>
</Variable>
<Variable Name="Dof: focus range" Value="500">
<Spline Keys="0:500:36,0.25:500:36,0.5:500:65572,0.75:500:36,1:500:36,"/>
</Variable>
<Variable Name="Dof: blur amount" Value="0.1">
<Spline Keys="0:0.1:36,0.25:0.1:36,0.5:0.1:65572,0.75:0.1:36,1:0.1:36,"/>
</Variable>
<Variable Name="Cascade 0: Bias" Value="0.1">
<Spline Keys="0:0.1:36,0.25:0.1:36,0.5:0.1:65572,0.75:0.1:36,1:0.1:36,"/>
</Variable>
<Variable Name="Cascade 0: Slope Bias" Value="64">
<Spline Keys="0:64:36,0.25:64:36,0.5:64:65572,0.75:64:36,1:64:36,"/>
</Variable>
<Variable Name="Cascade 1: Bias" Value="0.1">
<Spline Keys="0:0.1:36,0.25:0.1:36,0.5:0.1:65572,0.75:0.1:36,1:0.1:36,"/>
</Variable>
<Variable Name="Cascade 1: Slope Bias" Value="8">
<Spline Keys="0:8:36,0.25:8:36,0.5:8:65572,0.75:8:36,1:8:36,"/>
</Variable>
<Variable Name="Cascade 2: Bias" Value="0.1">
<Spline Keys="0:0.1:36,0.25:0.1:36,0.5:0.1:65572,0.75:0.1:36,1:0.1:36,"/>
</Variable>
<Variable Name="Cascade 2: Slope Bias" Value="4">
<Spline Keys="0:4:36,0.25:4:36,0.5:4:65572,0.75:4:36,1:4:36,"/>
</Variable>
<Variable Name="Cascade 3: Bias" Value="0.1">
<Spline Keys="0:0.1:36,0.25:0.1:36,0.5:0.1:36,0.75:0.1:36,1:0.1:36,"/>
</Variable>
<Variable Name="Cascade 3: Slope Bias" Value="1">
<Spline Keys="0:1:36,0.25:1:36,0.5:1:65572,0.75:1:36,1:1:36,"/>
</Variable>
<Variable Name="Cascade 4: Bias" Value="0.1">
<Spline Keys="0:0.1:0,0.25:0.1:36,0.5:0.1:65572,0.75:0.1:36,1:0.1:36,"/>
</Variable>
<Variable Name="Cascade 4: Slope Bias" Value="1">
<Spline Keys="0:1:0,0.25:1:36,0.5:1:65572,0.75:1:36,1:1:36,"/>
</Variable>
<Variable Name="Cascade 5: Bias" Value="0.0099999998">
<Spline Keys="0:0.01:0,0.25:0.01:36,0.5:0.01:65572,0.75:0.01:36,1:0.01:36,"/>
</Variable>
<Variable Name="Cascade 5: Slope Bias" Value="1">
<Spline Keys="0:1:0,0.25:1:36,0.5:1:65572,0.75:1:36,1:1:36,"/>
</Variable>
<Variable Name="Cascade 6: Bias" Value="0.1">
<Spline Keys="0:0.1:0,0.25:0.1:36,0.5:0.1:36,0.75:0.1:36,1:0.1:36,"/>
</Variable>
<Variable Name="Cascade 6: Slope Bias" Value="1">
<Spline Keys="0:1:0,0.25:1:36,0.5:1:65572,0.75:1:36,1:1:36,"/>
</Variable>
<Variable Name="Cascade 7: Bias" Value="0.1">
<Spline Keys="0:0.1:0,0.25:0.1:36,0.5:0.1:36,0.75:0.1:36,1:0.1:36,"/>
</Variable>
<Variable Name="Cascade 7: Slope Bias" Value="1">
<Spline Keys="0:1:0,0.25:1:36,0.5:1:65572,0.75:1:36,1:1:36,"/>
</Variable>
<Variable Name="Shadow jittering" Value="2.5">
<Spline Keys="0:5:36,0.25:2.5:36,0.5:2.5:65572,0.75:2.5:36,1:5:0,"/>
</Variable>
<Variable Name="HDR dynamic power factor" Value="0">
<Spline Keys="0:0:36,0.25:0:36,0.5:0:65572,0.75:0:36,1:0:36,"/>
</Variable>
<Variable Name="Sky brightening (terrain occlusion)" Value="0">
<Spline Keys="0:0:36,0.25:0:36,0.5:0:36,0.75:0:36,1:0:36,"/>
</Variable>
<Variable Name="Sun color multiplier" Value="10">
<Spline Keys="0:0.1:36,0.25:10:36,0.5:10:36,0.75:10:36,1:0.1:36,"/>
</Variable>
</TimeOfDay>

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0e6a5435c928079b27796f6b202bbc2623e7e454244ddc099a3cadf33b7cb9e9
size 63

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:12ca8f1942331abde4d58724aea22609c8d7951cc415afa6e5f1c550a14e67b0
size 363624

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f5b525a410730d84c0b3e97396d392e1e72f4b894742ddef3de4ede5542b0f8e
size 86148

@ -1,12 +0,0 @@
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0
0,0,0,0,0,0

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8739c76e681f900923b900c9df0ef75cf421d39cabb54650c4b9ad19b6a76d85
size 22

@ -1,7 +0,0 @@
<Material MtlFlags="2623488" Shader="Watervolume" GenMask="80000013" StringGenMask="" SurfaceType="mat_water" Diffuse="1,1,1,1" Specular="0.27583286,0.27583286,0.27583286,1" Opacity="1" Shininess="255" vertModifType="0" LayerAct="1">
<Textures>
<Texture Map="Specular" File="engineassets/textures/water_gloss.dds" Filter="7"/>
<Texture Map="Environment" File="nearest_cubemap" TexType="7"/>
</Textures>
<PublicParams NormalsScale="0.5" GlossMapTilling="1" SoftIntersectionFactor="1" Tilling="0.1" DetailNormalsScale="0.5" GlossMapBias="0" EnvCubeReflMul="16" VertexWaveScale="0.125" DetailTilling="2.5" EnvCubeScale="16" GlossMapScale="1.5" RealtimeReflMul="1" RainTilling="1" WaterFlowSpeed="0.5"/>
</Material>

@ -1 +1 @@
/autooptimizefile=0 /M=50,50,0,50,50,50 /preset=NormalsWithSmoothness /reduce="es3:1,ios:1,osx_gl:0,pc:0,provo:0"
/autooptimizefile=0 /M=50,50,0,50,50,50 /preset=NormalsWithSmoothness /reduce="android:1,ios:1,mac:0,pc:0,provo:0"

@ -119,6 +119,9 @@
]
}
},
"DefaultMaterial": {
"SurfaceType": "Default_1"
},
"MaterialLibrary": {
"assetId": {
"guid": "{3A055A3F-8CB7-5FEE-B437-EB365FACD0D4}"

@ -0,0 +1,118 @@
{
"Amazon": {
"Gems": {
"PhysX": {
"PhysXSystemConfiguration": {
"CollisionConfig": {
"Layers": {
"LayerNames": [
"Default",
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
{},
"TouchBend"
]
},
"Groups": {
"GroupPresets": [
{
"Name": "All",
"ReadOnly": true
},
{
"Id": {
"GroupId": "{CDB6B8D8-5CD0-40A8-874D-839B00A92EBB}"
},
"Name": "None",
"Group": {
"Mask": 0
},
"ReadOnly": true
},
{
"Id": {
"GroupId": "{22769429-5D46-429B-829A-0115239D9AAA}"
},
"Name": "All_NoTouchBend",
"Group": {
"Mask": 9223372036854775807
},
"ReadOnly": true
}
]
}
},
"DefaultMaterial": {
"SurfaceType": "Default_1"
},
"MaterialLibrary": {
"assetId": {
"guid": "{6AA79EE4-7EC3-5717-87AE-EDD7D886FD7F}"
},
"loadBehavior": "QueueLoad",
"assetHint": "levels/physics/c4044459_material_dynamicfriction/dynamic_friction.physmaterial"
}
}
}
}
}
}

@ -107,6 +107,9 @@
]
}
},
"DefaultMaterial": {
"SurfaceType": "Default_1"
},
"MaterialLibrary": {
"assetId": {
"guid": "{3A055A3F-8CB7-5FEE-B437-EB365FACD0D4}"

@ -119,6 +119,9 @@
]
}
},
"DefaultMaterial": {
"SurfaceType": "Default_1"
},
"MaterialLibrary": {
"assetId": {
"guid": "{3A055A3F-8CB7-5FEE-B437-EB365FACD0D4}"

@ -119,6 +119,9 @@
]
}
},
"DefaultMaterial": {
"SurfaceType": "Default_1"
},
"MaterialLibrary": {
"assetId": {
"guid": "{3A055A3F-8CB7-5FEE-B437-EB365FACD0D4}"

@ -119,6 +119,9 @@
]
}
},
"DefaultMaterial": {
"SurfaceType": "Default_1"
},
"MaterialLibrary": {
"assetId": {
"guid": "{3A055A3F-8CB7-5FEE-B437-EB365FACD0D4}"

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save