merging latest dev

Signed-off-by: antonmic <56370189+antonmic@users.noreply.github.com>
monroegm-disable-blank-issue-2
antonmic 4 years ago
commit 9f0b6d65f4

9
.gitignore vendored

@ -2,18 +2,14 @@
.vs/
.vscode/
__pycache__
AssetProcessorTemp/**
[Bb]uild/
[Oo]ut/**
CMakeUserPresets.json
[Cc]ache/
/[Ii]nstall/
Editor/EditorEventLog.xml
Editor/EditorLayout.xml
**/*egg-info/**
**/*egg-link
**/[Rr]estricted
UserSettings.xml
[Uu]ser/
FrameCapture/**
.DS_Store
@ -22,9 +18,6 @@ client*.cfg
server*.cfg
.mayaSwatches/
_savebackup/
#Output folder for test results when running Automated Tests
TestResults/**
*.swatches
/imgui.ini
/scripts/project_manager/logs/
/AutomatedTesting/Gem/PythonTests/scripting/TestResults

@ -0,0 +1,14 @@
<?xml version="1.0" encoding="utf-8"?>
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 24 24" style="enable-background:new 0 0 24 24;" xml:space="preserve">
<path d="M3.145,8.433c0-1.47,1.196-2.666,2.666-2.666h9.544c-0.158-0.819-0.88-1.443-1.744-1.443H3.487
c-0.978,0-1.778,0.8-1.778,1.778v5.356c0,0.861,0.62,1.582,1.436,1.743V8.433z" fill="#FFFFFF"/>
<g>
<path d="M6.833,11.654c0-1.47,1.196-2.666,2.666-2.666h9.069c-0.158-0.819-0.88-1.443-1.744-1.443H6.7
c-0.978,0-1.778,0.8-1.778,1.778v5.356c0,0.978,0.8,1.778,1.778,1.778h0.133V11.654z" fill="#FFFFFF"/>
</g>
<path d="M20.513,10.765H10.388c-0.978,0-1.778,0.8-1.778,1.777v5.356c0,0.978,0.8,1.778,1.778,1.778h10.125
c0.978,0,1.778-0.8,1.778-1.778v-5.356C22.29,11.565,21.49,10.765,20.513,10.765z M19.332,15.967h-7.763
c-0.264,0-0.478-0.355-0.478-0.793c0-0.438,0.214-0.793,0.478-0.793h7.763c0.264,0,0.478,0.355,0.478,0.793
C19.81,15.612,19.597,15.967,19.332,15.967z" fill="#FFFFFF"/>
</svg>

After

Width:  |  Height:  |  Size: 1.0 KiB

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:bfc2ef8c6dbf2fba078e27e4e94384099e090468e679327dd826a5cbf22b04ed
size 1019

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:708b12d41229afab78e0f7d59097ae3de855fea8525a920c5c214fc0ce79f1bd
size 1209

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fab63af9b50790dca25330058e70517987ea8bf11c00f9353dd951ebdbd1dbe5
size 5008

File diff suppressed because it is too large Load Diff

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7f94f2634eacb4d7bee20dacc45edef96e4d268f1adb7960b8aa8f3b6e2906ed
size 6867609

@ -0,0 +1,129 @@
{
"ContainerEntity": {
"Id": "ContainerEntity",
"Name": "Bush",
"Components": {
"Component_[1140272189295067758]": {
"$type": "EditorInspectorComponent",
"Id": 1140272189295067758
},
"Component_[13437832196484687256]": {
"$type": "EditorOnlyEntityComponent",
"Id": 13437832196484687256
},
"Component_[1553903646452669645]": {
"$type": "EditorDisabledCompositionComponent",
"Id": 1553903646452669645
},
"Component_[15914009348632444632]": {
"$type": "EditorEntitySortComponent",
"Id": 15914009348632444632,
"Child Entity Order": [
"Entity_[7511491868318]"
]
},
"Component_[18046340308818780248]": {
"$type": "EditorPrefabComponent",
"Id": 18046340308818780248
},
"Component_[1948833233489872938]": {
"$type": "{27F1E1A1-8D9D-4C3B-BD3A-AFB9762449C0} TransformComponent",
"Id": 1948833233489872938,
"Parent Entity": ""
},
"Component_[2903632350157981339]": {
"$type": "SelectionComponent",
"Id": 2903632350157981339
},
"Component_[48827510535192710]": {
"$type": "EditorPendingCompositionComponent",
"Id": 48827510535192710
},
"Component_[5609536793322429681]": {
"$type": "EditorLockComponent",
"Id": 5609536793322429681
},
"Component_[5859168386298620990]": {
"$type": "EditorEntityIconComponent",
"Id": 5859168386298620990
},
"Component_[6604616929271524505]": {
"$type": "EditorVisibilityComponent",
"Id": 6604616929271524505
}
}
},
"Entities": {
"Entity_[7511491868318]": {
"Id": "Entity_[7511491868318]",
"Name": "Bush",
"Components": {
"Component_[10227459330338484901]": {
"$type": "EditorInspectorComponent",
"Id": 10227459330338484901,
"ComponentOrderEntryArray": [
{
"ComponentId": 4998941225335869157
},
{
"ComponentId": 9922994635792843826,
"SortIndex": 1
}
]
},
"Component_[10972351222359420947]": {
"$type": "EditorOnlyEntityComponent",
"Id": 10972351222359420947
},
"Component_[12101122374155214392]": {
"$type": "EditorPendingCompositionComponent",
"Id": 12101122374155214392
},
"Component_[1535264614652988260]": {
"$type": "SelectionComponent",
"Id": 1535264614652988260
},
"Component_[16367811417907891218]": {
"$type": "EditorVisibilityComponent",
"Id": 16367811417907891218
},
"Component_[17044216787716682880]": {
"$type": "EditorEntitySortComponent",
"Id": 17044216787716682880
},
"Component_[2129822594969629430]": {
"$type": "EditorEntityIconComponent",
"Id": 2129822594969629430
},
"Component_[2838015156782745450]": {
"$type": "EditorLockComponent",
"Id": 2838015156782745450
},
"Component_[4998941225335869157]": {
"$type": "{27F1E1A1-8D9D-4C3B-BD3A-AFB9762449C0} TransformComponent",
"Id": 4998941225335869157,
"Parent Entity": "ContainerEntity"
},
"Component_[8773358049076362578]": {
"$type": "EditorDisabledCompositionComponent",
"Id": 8773358049076362578
},
"Component_[9922994635792843826]": {
"$type": "AZ::Render::EditorMeshComponent",
"Id": 9922994635792843826,
"Controller": {
"Configuration": {
"ModelAsset": {
"assetId": {
"guid": "{1201406D-FB20-5B5F-B9B5-6A6E8DE00A14}",
"subId": 276506120
},
"assetHint": "assets/objects/foliage/bush_privet_01.azmodel"
}
}
}
}
}
}
}
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,129 @@
{
"ContainerEntity": {
"Id": "ContainerEntity",
"Name": "PurpleFlower",
"Components": {
"Component_[10519928302743666073]": {
"$type": "EditorPrefabComponent",
"Id": 10519928302743666073
},
"Component_[13894087802180240181]": {
"$type": "{27F1E1A1-8D9D-4C3B-BD3A-AFB9762449C0} TransformComponent",
"Id": 13894087802180240181,
"Parent Entity": ""
},
"Component_[15788541052719571801]": {
"$type": "EditorEntityIconComponent",
"Id": 15788541052719571801
},
"Component_[15842981265136092481]": {
"$type": "SelectionComponent",
"Id": 15842981265136092481
},
"Component_[16360384897559021149]": {
"$type": "EditorInspectorComponent",
"Id": 16360384897559021149
},
"Component_[16713545675046303279]": {
"$type": "EditorVisibilityComponent",
"Id": 16713545675046303279
},
"Component_[1806734194268113785]": {
"$type": "EditorPendingCompositionComponent",
"Id": 1806734194268113785
},
"Component_[5392020700593853313]": {
"$type": "EditorEntitySortComponent",
"Id": 5392020700593853313,
"Child Entity Order": [
"Entity_[14335611090324]"
]
},
"Component_[5995854518752659458]": {
"$type": "EditorLockComponent",
"Id": 5995854518752659458
},
"Component_[6963022284400845376]": {
"$type": "EditorDisabledCompositionComponent",
"Id": 6963022284400845376
},
"Component_[8055275578170091546]": {
"$type": "EditorOnlyEntityComponent",
"Id": 8055275578170091546
}
}
},
"Entities": {
"Entity_[14335611090324]": {
"Id": "Entity_[14335611090324]",
"Name": "PurpleFlower",
"Components": {
"Component_[10887353073528055802]": {
"$type": "EditorPendingCompositionComponent",
"Id": 10887353073528055802
},
"Component_[12641127425852859189]": {
"$type": "AZ::Render::EditorMeshComponent",
"Id": 12641127425852859189,
"Controller": {
"Configuration": {
"ModelAsset": {
"assetId": {
"guid": "{D493A670-6D82-5AE9-A2C8-A2EB02684F71}",
"subId": 284799939
},
"assetHint": "assets/objects/foliage/grass_flower_purple.azmodel"
}
}
}
},
"Component_[14406733303466080015]": {
"$type": "EditorInspectorComponent",
"Id": 14406733303466080015,
"ComponentOrderEntryArray": [
{
"ComponentId": 9231452352781000222
},
{
"ComponentId": 12641127425852859189,
"SortIndex": 1
}
]
},
"Component_[1452384341905923012]": {
"$type": "EditorLockComponent",
"Id": 1452384341905923012
},
"Component_[2215454016415585892]": {
"$type": "EditorDisabledCompositionComponent",
"Id": 2215454016415585892
},
"Component_[4104108067383423623]": {
"$type": "EditorVisibilityComponent",
"Id": 4104108067383423623
},
"Component_[4197335450471807917]": {
"$type": "SelectionComponent",
"Id": 4197335450471807917
},
"Component_[6877680739064997650]": {
"$type": "EditorOnlyEntityComponent",
"Id": 6877680739064997650
},
"Component_[7372550507186490390]": {
"$type": "EditorEntityIconComponent",
"Id": 7372550507186490390
},
"Component_[7673532337364366244]": {
"$type": "EditorEntitySortComponent",
"Id": 7673532337364366244
},
"Component_[9231452352781000222]": {
"$type": "{27F1E1A1-8D9D-4C3B-BD3A-AFB9762449C0} TransformComponent",
"Id": 9231452352781000222,
"Parent Entity": "ContainerEntity"
}
}
}
}
}

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:3789abdf439a6d70438fd4bb1e06881ae6686a4699209c6bc371d22d161e5347
size 26476

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c987d7d79685fda83efcffb7e1afbcd356c37fc68ec5c663a89b02d4df10caea
size 46412

@ -2,11 +2,11 @@
<Class name="DescriptorListAsset" type="{60961B36-E3CA-4877-B197-1462C1363F6E}">
<Class name="AZStd::vector" field="Descriptors" type="{FC36C5E0-6152-5B06-AF30-3FC494B85FAB}">
<Class name="Descriptor" field="element" version="8" type="{A5A5E7F7-FC36-4BD1-8A93-21362574B9DA}">
<Class name="AZ::Uuid" field="SpawnerType" value="{BBA5CC1E-B4CA-4792-89F7-93711E98FBD1}" type="{E152C105-A133-4D03-BBF8-3D4B2FBA3E2A}"/>
<Class name="AZ::Uuid" field="SpawnerType" value="{74BEEDB5-81CF-409F-B375-0D93D81EF2E3}" type="{E152C105-A133-4D03-BBF8-3D4B2FBA3E2A}"/>
<Class name="AZStd::shared_ptr" field="InstanceSpawner" type="{7C7046DE-F8B1-529D-AD8C-829C6C0E2FCD}">
<Class name="DynamicSliceInstanceSpawner" field="element" type="{BBA5CC1E-B4CA-4792-89F7-93711E98FBD1}">
<Class name="PrefabInstanceSpawner" field="element" type="{74BEEDB5-81CF-409F-B375-0D93D81EF2E3}">
<Class name="InstanceSpawner" field="BaseClass1" type="{01AD0758-B04A-4B43-BC2B-BDCD77F4EF6A}"/>
<Class name="Asset" field="SliceAsset" value="id={EBFE34EB-D0A8-5E51-8234-3BE38082B28C}:2,type={78802ABF-9595-463A-8D2B-D022F906F9B1},hint={slices/pinkflower.dynamicslice}" version="1" type="{77A19D40-8731-4D3C-9041-1B43047366A4}"/>
<Class name="Asset" field="SpawnableAsset" value="id={80C0CF4E-9A5E-544B-B89E-BC980175A259}:e6f903d2,type={855E3021-D305-4845-B284-20C3F7FDF16B},hint={Assets/Prefabs/PinkFlower.prefab},loadBehavior=1" version="2" type="{77A19D40-8731-4D3C-9041-1B43047366A4}"/>
</Class>
</Class>
<Class name="float" field="Weight" value="1.0000000" type="{EA2C3E90-AFBE-44D4-A90D-FAAF79BAF93D}"/>

@ -27,5 +27,4 @@ else()
set_property(GLOBAL APPEND PROPERTY LY_PROJECTS_TARGET_NAME ${project_target_name})
add_subdirectory(Gem)
endif()

@ -6,6 +6,9 @@
#
#
set(gem_path ${CMAKE_CURRENT_LIST_DIR})
set(gem_json ${gem_path}/gem.json)
o3de_restricted_path(${gem_json} gem_restricted_path gem_parent_relative_path)
add_subdirectory(Code)
add_subdirectory(PythonTests)
add_subdirectory(PythonCoverage)
add_subdirectory(PythonTests)

@ -6,7 +6,7 @@
#
#
ly_get_list_relative_pal_filename(pal_dir ${CMAKE_CURRENT_LIST_DIR}/Platform/${PAL_PLATFORM_NAME})
o3de_pal_dir(pal_dir ${CMAKE_CURRENT_LIST_DIR}/Platform/${PAL_PLATFORM_NAME} ${gem_restricted_path} ${gem_path} ${gem_parent_relative_path})
ly_add_target(
NAME AutomatedTesting ${PAL_TRAIT_MONOLITHIC_DRIVEN_MODULE_TYPE}

@ -0,0 +1,15 @@
<?xml version="1.0"?>
<Component
Name="NetworkTestLevelEntityComponent"
Namespace="AutomatedTesting"
OverrideComponent="false"
OverrideController="false"
OverrideInclude=""
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ComponentRelation Constraint="Required" HasController="true" Name="NetworkTransformComponent" Namespace="Multiplayer" Include="Multiplayer/Components/NetworkTransformComponent.h" />
<RemoteProcedure Name="AuthorityToClientNoParams_PlayFx" InvokeFrom="Authority" HandleOn="Client" IsPublic="false" IsReliable="true" GenerateEventBindings="true" Description="" />
</Component>

@ -19,8 +19,8 @@
<RemoteProcedure Name="AutonomousToAuthorityNoParams" InvokeFrom="Autonomous" HandleOn="Authority" IsPublic="false" IsReliable="false" GenerateEventBindings="true" Description="" />
<RemoteProcedure Name="AuthorityToAutonomous" InvokeFrom="Authority" HandleOn="Autonomous" IsPublic="false" IsReliable="false" GenerateEventBindings="true" Description="" >
<Param Type="float" Name="SomeFloat" />
<RemoteProcedure Name="AuthorityToAutonomous_PlayerNumber" InvokeFrom="Authority" HandleOn="Autonomous" IsPublic="false" IsReliable="true" GenerateEventBindings="true" Description="" >
<Param Type="int" Name="player_number" />
</RemoteProcedure>
<RemoteProcedure Name="AuthorityToAutonomousNoParams" InvokeFrom="Authority" HandleOn="Autonomous" IsPublic="false" IsReliable="false" GenerateEventBindings="true" Description="" />
@ -29,10 +29,8 @@
<Param Type="float" Name="SomeFloat" />
</RemoteProcedure>
<RemoteProcedure Name="AuthorityToClientNoParams" InvokeFrom="Authority" HandleOn="Client" IsPublic="false" IsReliable="false" GenerateEventBindings="true" Description="" />
<RemoteProcedure Name="ServerToAuthority" InvokeFrom="Server" HandleOn="Authority" IsPublic="false" IsReliable="false" GenerateEventBindings="true" Description="" >
<Param Type="float" Name="SomeFloat" />
<RemoteProcedure Name="ServerToAuthority_DealDamage" InvokeFrom="Server" HandleOn="Authority" IsPublic="false" IsReliable="true" GenerateEventBindings="true" Description="" >
<Param Type="float" Name="damage" />
</RemoteProcedure>
<RemoteProcedure Name="ServerToAuthorityNoParam" InvokeFrom="Server" HandleOn="Authority" IsPublic="false" IsReliable="false" GenerateEventBindings="true" Description="" />

@ -12,4 +12,5 @@ set(FILES
Source/AutomatedTestingSystemComponent.cpp
Source/AutomatedTestingSystemComponent.h
Source/AutoGen/NetworkTestPlayerComponent.AutoComponent.xml
Source/AutoGen/NetworkTestLevelEntityComponent.AutoComponent.xml
)

@ -6,4 +6,8 @@
#
#
set(gem_path ${CMAKE_CURRENT_LIST_DIR})
set(gem_json ${gem_path}/gem.json)
o3de_restricted_path(${gem_json} gem_restricted_path gem_parent_relative_path)
add_subdirectory(Code)

@ -6,7 +6,7 @@
#
#
ly_get_list_relative_pal_filename(pal_dir ${CMAKE_CURRENT_LIST_DIR}/Platform/${PAL_PLATFORM_NAME} ${o3de_gem_restricted_path} ${o3de_gem_path} ${o3de_gem_name})
o3de_pal_dir(pal_dir ${CMAKE_CURRENT_LIST_DIR}/Platform/${PAL_PLATFORM_NAME} ${gem_restricted_path} ${gem_path} ${gem_parent_relative_path})
include(${pal_dir}/PAL_${PAL_PLATFORM_NAME_LOWERCASE}.cmake)
if(PAL_TRAIT_PYTHONCOVERAGE_SUPPORTED)

@ -4,6 +4,7 @@
"license": "Apache-2.0 Or MIT",
"license_url": "https://github.com/o3de/o3de/blob/development/LICENSE.txt",
"origin": "Open 3D Engine - o3de.org",
"origin_url": "https://github.com/o3de/o3de",
"type": "Tool",
"summary": "A tool for generating gem coverage for Python tests.",
"canonical_tags": [
@ -13,5 +14,8 @@
"PythonCoverage"
],
"icon_path": "preview.png",
"requirements": ""
"requirements": "",
"documentation_url": "",
"dependencies": [
]
}

@ -12,18 +12,12 @@
################################################################################
if(PAL_TRAIT_BUILD_TESTS_SUPPORTED AND PAL_TRAIT_BUILD_HOST_TOOLS)
# Only enable AWS automated tests on Windows
if(NOT "${PAL_PLATFORM_NAME}" STREQUAL "Windows")
return()
endif()
ly_add_pytest(
NAME AutomatedTesting::AWSTests
TEST_SUITE awsi
TEST_SERIAL
PATH ${CMAKE_CURRENT_LIST_DIR}/${PAL_PLATFORM_NAME}/
PATH ${CMAKE_CURRENT_LIST_DIR}/
RUNTIME_DEPENDENCIES
Legacy::Editor
AZ::AssetProcessor
AutomatedTesting.GameLauncher
AutomatedTesting.Assets

@ -2,30 +2,61 @@
## Prerequisites
1. Build the O3DE Editor and AutomatedTesting.GameLauncher in Profile.
2. AWS CLI is installed and configured following [Configuration and Credential File Settings](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html).
3. [AWS Cloud Development Kit (CDK)](https://docs.aws.amazon.com/cdk/latest/guide/getting_started.html#getting_started_install) is installed.
2. Install the latest version of NodeJs.
3. AWS CLI is installed and AWS crendentials are configured via [environment variables](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html) or [default profile](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html).
4. [AWS Cloud Development Kit (CDK)](https://docs.aws.amazon.com/cdk/latest/guide/getting_started.html#getting_started_install) is installed.
## Deploy CDK Applications
1. Go to the AWS IAM console and create an IAM role called o3de-automation-tests which adds your own account as as a trusted entity and uses the "AdministratorAccess" permissions policy.
2. Copy {engine_root}\scripts\build\Platform\Windows\deploy_cdk_applications.cmd to your engine root folder.
3. Open a new Command Prompt window at the engine root and set the following environment variables:
```
Set O3DE_AWS_PROJECT_NAME=AWSAUTO
Set O3DE_AWS_DEPLOY_REGION=us-east-1
Set O3DE_AWS_DEPLOY_ACCOUNT={your_aws_account_id}
Set ASSUME_ROLE_ARN=arn:aws:iam::{your_aws_account_id}:role/o3de-automation-tests
Set COMMIT_ID=HEAD
```
4. In the same Command Prompt window, Deploy the CDK applications for AWS gems by running deploy_cdk_applications.cmd.
2. Copy the following deployment script to your engine root folder:
* Windows (Command Prompt)
```
{engine_root}\scripts\build\Platform\Windows\deploy_cdk_applications.cmd
```
* Linux
```
{engine_root}/scripts/build/Platform/Linux/deploy_cdk_applications.sh
```
3. Open a new CLI window at the engine root and set the following environment variables:
* Windows
```
Set O3DE_AWS_PROJECT_NAME=AWSAUTO
Set O3DE_AWS_DEPLOY_REGION=us-east-1
Set ASSUME_ROLE_ARN=arn:aws:iam::{your_aws_account_id}:role/o3de-automation-tests
Set COMMIT_ID=HEAD
```
* Linux
```
export O3DE_AWS_PROJECT_NAME=AWSAUTO
export O3DE_AWS_DEPLOY_REGION=us-east-1
export ASSUME_ROLE_ARN=arn:aws:iam::{your_aws_account_id}:role/o3de-automation-tests
export COMMIT_ID=HEAD
```
4. In the same CLI window, Deploy the CDK applications for AWS gems by running deploy_cdk_applications.cmd.
## Run Automation Tests
### CLI
In the same Command Prompt window, run the following CLI command:
python\python.cmd -m pytest {path_to_the_test_file} --build-directory {directory_to_the_profile_build}
1. In the same CLI window, run the following CLI command:
* Windows
```
python\python.cmd -m pytest {path_to_the_test_file} --build-directory {directory_to_the_profile_build}
```
* Linux
```
python/python.sh -m pytest {path_to_the_test_file} --build-directory {directory_to_the_profile_build}
```
### Pycharm
You can also run any specific automation test directly from Pycharm by providing the "--build-directory" argument in the Run Configuration.
## Destroy CDK Applications
1. Copy {engine_root}\scripts\build\Platform\Windows\destroy_cdk_applications.cmd to your engine root folder.
2. In the same Command Prompt window, destroy the CDK applications for AWS gems by running destroy_cdk_applications.cmd.
1. Copy the following destruction script to your engine root folder:
* Windows
```
{engine_root}\scripts\build\Platform\Windows\destroy_cdk_applications.cmd
```
* Linux
```
{engine_root}/scripts/build/Platform/Linux/destroy_cdk_applications.sh
```
2. In the same CLI window, destroy the CDK applications for AWS gems by running destroy_cdk_applications.cmd.

@ -1,6 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""

@ -1,289 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import logging
import os
import pytest
import typing
from datetime import datetime
import ly_test_tools.log.log_monitor
from AWS.common import constants
from AWS.common.resource_mappings import AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY
from .aws_metrics_custom_thread import AWSMetricsThread
# fixture imports
from assetpipeline.ap_fixtures.asset_processor_fixture import asset_processor
from .aws_metrics_utils import aws_metrics_utils
AWS_METRICS_FEATURE_NAME = 'AWSMetrics'
logger = logging.getLogger(__name__)
def setup(launcher: pytest.fixture,
asset_processor: pytest.fixture) -> pytest.fixture:
"""
Set up the resource mapping configuration and start the log monitor.
:param launcher: Client launcher for running the test level.
:param asset_processor: asset_processor fixture.
:return log monitor object.
"""
asset_processor.start()
asset_processor.wait_for_idle()
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
# Initialize the log monitor.
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
return log_monitor
def monitor_metrics_submission(log_monitor: pytest.fixture) -> None:
"""
Monitor the messages and notifications for submitting metrics.
:param log_monitor: Log monitor to check the log messages.
"""
expected_lines = [
'(Script) - Submitted metrics without buffer.',
'(Script) - Submitted metrics with buffer.',
'(Script) - Flushed the buffered metrics.',
'(Script) - Metrics is sent successfully.'
]
unexpected_lines = [
'(Script) - Failed to submit metrics without buffer.',
'(Script) - Failed to submit metrics with buffer.',
'(Script) - Failed to send metrics.'
]
result = log_monitor.monitor_log_for_lines(
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True)
# Assert the log monitor detected expected lines and did not detect any unexpected lines.
assert result, (
f'Log monitoring failed. Used expected_lines values: {expected_lines} & '
f'unexpected_lines values: {unexpected_lines}')
def query_metrics_from_s3(aws_metrics_utils: pytest.fixture, resource_mappings: pytest.fixture) -> None:
"""
Verify that the metrics events are delivered to the S3 bucket and can be queried.
:param aws_metrics_utils: aws_metrics_utils fixture.
:param resource_mappings: resource_mappings fixture.
"""
aws_metrics_utils.verify_s3_delivery(
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsBucketName')
)
logger.info('Metrics are sent to S3.')
aws_metrics_utils.run_glue_crawler(
resource_mappings.get_resource_name_id('AWSMetrics.EventsCrawlerName'))
# Remove the events_json table if exists so that the sample query can create a table with the same name.
aws_metrics_utils.delete_table(resource_mappings.get_resource_name_id('AWSMetrics.EventDatabaseName'), 'events_json')
aws_metrics_utils.run_named_queries(resource_mappings.get_resource_name_id('AWSMetrics.AthenaWorkGroupName'))
logger.info('Query metrics from S3 successfully.')
def verify_operational_metrics(aws_metrics_utils: pytest.fixture,
resource_mappings: pytest.fixture, start_time: datetime) -> None:
"""
Verify that operational health metrics are delivered to CloudWatch.
:param aws_metrics_utils: aws_metrics_utils fixture.
:param resource_mappings: resource_mappings fixture.
:param start_time: Time when the game launcher starts.
"""
aws_metrics_utils.verify_cloud_watch_delivery(
'AWS/Lambda',
'Invocations',
[{'Name': 'FunctionName',
'Value': resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsProcessingLambdaName')}],
start_time)
logger.info('AnalyticsProcessingLambda metrics are sent to CloudWatch.')
aws_metrics_utils.verify_cloud_watch_delivery(
'AWS/Lambda',
'Invocations',
[{'Name': 'FunctionName',
'Value': resource_mappings.get_resource_name_id('AWSMetrics.EventProcessingLambdaName')}],
start_time)
logger.info('EventsProcessingLambda metrics are sent to CloudWatch.')
def update_kinesis_analytics_application_status(aws_metrics_utils: pytest.fixture,
resource_mappings: pytest.fixture, start_application: bool) -> None:
"""
Update the Kinesis analytics application to start or stop it.
:param aws_metrics_utils: aws_metrics_utils fixture.
:param resource_mappings: resource_mappings fixture.
:param start_application: whether to start or stop the application.
"""
if start_application:
aws_metrics_utils.start_kinesis_data_analytics_application(
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsApplicationName'))
else:
aws_metrics_utils.stop_kinesis_data_analytics_application(
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsApplicationName'))
@pytest.mark.SUITE_awsi
@pytest.mark.usefixtures('automatic_process_killer')
@pytest.mark.usefixtures('aws_credentials')
@pytest.mark.usefixtures('resource_mappings')
@pytest.mark.parametrize('assume_role_arn', [constants.ASSUME_ROLE_ARN])
@pytest.mark.parametrize('feature_name', [AWS_METRICS_FEATURE_NAME])
@pytest.mark.parametrize('profile_name', ['AWSAutomationTest'])
@pytest.mark.parametrize('project', ['AutomatedTesting'])
@pytest.mark.parametrize('region_name', [constants.AWS_REGION])
@pytest.mark.parametrize('resource_mappings_filename', [constants.AWS_RESOURCE_MAPPING_FILE_NAME])
@pytest.mark.parametrize('session_name', [constants.SESSION_NAME])
@pytest.mark.parametrize('stacks', [[f'{constants.AWS_PROJECT_NAME}-{AWS_METRICS_FEATURE_NAME}-{constants.AWS_REGION}']])
class TestAWSMetricsWindows(object):
"""
Test class to verify the real-time and batch analytics for metrics.
"""
@pytest.mark.parametrize('level', ['AWS/Metrics'])
def test_realtime_and_batch_analytics(self,
level: str,
launcher: pytest.fixture,
asset_processor: pytest.fixture,
workspace: pytest.fixture,
aws_utils: pytest.fixture,
resource_mappings: pytest.fixture,
aws_metrics_utils: pytest.fixture):
"""
Verify that the metrics events are sent to CloudWatch and S3 for analytics.
"""
# Start Kinesis analytics application on a separate thread to avoid blocking the test.
kinesis_analytics_application_thread = AWSMetricsThread(target=update_kinesis_analytics_application_status,
args=(aws_metrics_utils, resource_mappings, True))
kinesis_analytics_application_thread.start()
log_monitor = setup(launcher, asset_processor)
# Kinesis analytics application needs to be in the running state before we start the game launcher.
kinesis_analytics_application_thread.join()
launcher.args = ['+LoadLevel', level]
launcher.args.extend(['-rhi=null'])
start_time = datetime.utcnow()
with launcher.start(launch_ap=False):
monitor_metrics_submission(log_monitor)
# Verify that real-time analytics metrics are delivered to CloudWatch.
aws_metrics_utils.verify_cloud_watch_delivery(
AWS_METRICS_FEATURE_NAME,
'TotalLogins',
[],
start_time)
logger.info('Real-time metrics are sent to CloudWatch.')
# Run time-consuming operations on separate threads to avoid blocking the test.
operational_threads = list()
operational_threads.append(
AWSMetricsThread(target=query_metrics_from_s3,
args=(aws_metrics_utils, resource_mappings)))
operational_threads.append(
AWSMetricsThread(target=verify_operational_metrics,
args=(aws_metrics_utils, resource_mappings, start_time)))
operational_threads.append(
AWSMetricsThread(target=update_kinesis_analytics_application_status,
args=(aws_metrics_utils, resource_mappings, False)))
for thread in operational_threads:
thread.start()
for thread in operational_threads:
thread.join()
@pytest.mark.parametrize('level', ['AWS/Metrics'])
def test_realtime_and_batch_analytics_no_global_accountid(self,
level: str,
launcher: pytest.fixture,
asset_processor: pytest.fixture,
workspace: pytest.fixture,
aws_utils: pytest.fixture,
resource_mappings: pytest.fixture,
aws_metrics_utils: pytest.fixture):
"""
Verify that the metrics events are sent to CloudWatch and S3 for analytics.
"""
# Remove top-level account ID from resource mappings
resource_mappings.clear_select_keys([AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY])
# Start Kinesis analytics application on a separate thread to avoid blocking the test.
kinesis_analytics_application_thread = AWSMetricsThread(target=update_kinesis_analytics_application_status,
args=(aws_metrics_utils, resource_mappings, True))
kinesis_analytics_application_thread.start()
log_monitor = setup(launcher, asset_processor)
# Kinesis analytics application needs to be in the running state before we start the game launcher.
kinesis_analytics_application_thread.join()
launcher.args = ['+LoadLevel', level]
launcher.args.extend(['-rhi=null'])
start_time = datetime.utcnow()
with launcher.start(launch_ap=False):
monitor_metrics_submission(log_monitor)
# Verify that real-time analytics metrics are delivered to CloudWatch.
aws_metrics_utils.verify_cloud_watch_delivery(
AWS_METRICS_FEATURE_NAME,
'TotalLogins',
[],
start_time)
logger.info('Real-time metrics are sent to CloudWatch.')
# Run time-consuming operations on separate threads to avoid blocking the test.
operational_threads = list()
operational_threads.append(
AWSMetricsThread(target=query_metrics_from_s3,
args=(aws_metrics_utils, resource_mappings)))
operational_threads.append(
AWSMetricsThread(target=verify_operational_metrics,
args=(aws_metrics_utils, resource_mappings, start_time)))
operational_threads.append(
AWSMetricsThread(target=update_kinesis_analytics_application_status,
args=(aws_metrics_utils, resource_mappings, False)))
for thread in operational_threads:
thread.start()
for thread in operational_threads:
thread.join()
@pytest.mark.parametrize('level', ['AWS/Metrics'])
def test_unauthorized_user_request_rejected(self,
level: str,
launcher: pytest.fixture,
asset_processor: pytest.fixture,
workspace: pytest.fixture):
"""
Verify that unauthorized users cannot send metrics events to the AWS backed backend.
"""
log_monitor = setup(launcher, asset_processor)
# Set invalid AWS credentials.
launcher.args = ['+LoadLevel', level, '+cl_awsAccessKey', 'AKIAIOSFODNN7EXAMPLE',
'+cl_awsSecretKey', 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY']
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
result = log_monitor.monitor_log_for_lines(
expected_lines=['(Script) - Failed to send metrics.'],
unexpected_lines=['(Script) - Metrics is sent successfully.'],
halt_on_unexpected=True)
assert result, 'Metrics events are sent successfully by unauthorized user'
logger.info('Unauthorized user is rejected to send metrics.')
def test_clean_up_s3_bucket(self,
aws_utils: pytest.fixture,
resource_mappings: pytest.fixture,
aws_metrics_utils: pytest.fixture):
"""
Clear the analytics bucket objects so that the S3 bucket can be destroyed during tear down.
"""
aws_metrics_utils.empty_bucket(
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsBucketName'))

@ -1,29 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
from threading import Thread
class AWSMetricsThread(Thread):
"""
Custom thread for raising assertion errors on the main thread.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._error = None
def run(self) -> None:
try:
super().run()
except AssertionError as e:
self._error = e
def join(self, **kwargs) -> None:
super().join(**kwargs)
if self._error:
raise AssertionError(self._error)

@ -1,239 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import logging
import pathlib
import pytest
import typing
from datetime import datetime
from botocore.exceptions import WaiterError
from .aws_metrics_waiters import KinesisAnalyticsApplicationUpdatedWaiter, \
CloudWatchMetricsDeliveredWaiter, DataLakeMetricsDeliveredWaiter, GlueCrawlerReadyWaiter
logging.getLogger('boto').setLevel(logging.CRITICAL)
# Expected directory and file extension for the S3 objects.
EXPECTED_S3_DIRECTORY = 'firehose_events/'
EXPECTED_S3_OBJECT_EXTENSION = '.parquet'
class AWSMetricsUtils:
"""
Provide utils functions for the AWSMetrics gem to interact with the deployed resources.
"""
def __init__(self, aws_utils: pytest.fixture):
self._aws_util = aws_utils
def start_kinesis_data_analytics_application(self, application_name: str) -> None:
"""
Start the Kenisis Data Analytics application for real-time analytics.
:param application_name: Name of the Kenisis Data Analytics application.
"""
input_id = self.get_kinesis_analytics_application_input_id(application_name)
assert input_id, 'invalid Kinesis Data Analytics application input.'
client = self._aws_util.client('kinesisanalytics')
try:
client.start_application(
ApplicationName=application_name,
InputConfigurations=[
{
'Id': input_id,
'InputStartingPositionConfiguration': {
'InputStartingPosition': 'NOW'
}
},
]
)
except client.exceptions.ResourceInUseException:
# The application has been started.
return
try:
KinesisAnalyticsApplicationUpdatedWaiter(client, 'RUNNING').wait(application_name=application_name)
except WaiterError as e:
assert False, f'Failed to start the Kinesis Data Analytics application: {str(e)}.'
def get_kinesis_analytics_application_input_id(self, application_name: str) -> str:
"""
Get the input ID for the Kenisis Data Analytics application.
:param application_name: Name of the Kenisis Data Analytics application.
:return: Input ID for the Kenisis Data Analytics application.
"""
client = self._aws_util.client('kinesisanalytics')
response = client.describe_application(
ApplicationName=application_name
)
if not response:
return ''
input_descriptions = response.get('ApplicationDetail', {}).get('InputDescriptions', [])
if len(input_descriptions) != 1:
return ''
return input_descriptions[0].get('InputId', '')
def stop_kinesis_data_analytics_application(self, application_name: str) -> None:
"""
Stop the Kenisis Data Analytics application.
:param application_name: Name of the Kenisis Data Analytics application.
"""
client = self._aws_util.client('kinesisanalytics')
client.stop_application(
ApplicationName=application_name
)
try:
KinesisAnalyticsApplicationUpdatedWaiter(client, 'READY').wait(application_name=application_name)
except WaiterError as e:
assert False, f'Failed to stop the Kinesis Data Analytics application: {str(e)}.'
def verify_cloud_watch_delivery(self, namespace: str, metrics_name: str,
dimensions: typing.List[dict], start_time: datetime) -> None:
"""
Verify that the expected metrics is delivered to CloudWatch.
:param namespace: Namespace of the metrics.
:param metrics_name: Name of the metrics.
:param dimensions: Dimensions of the metrics.
:param start_time: Start time for generating the metrics.
"""
client = self._aws_util.client('cloudwatch')
try:
CloudWatchMetricsDeliveredWaiter(client).wait(
namespace=namespace,
metrics_name=metrics_name,
dimensions=dimensions,
start_time=start_time
)
except WaiterError as e:
assert False, f'Failed to deliver metrics to CloudWatch: {str(e)}.'
def verify_s3_delivery(self, analytics_bucket_name: str) -> None:
"""
Verify that metrics are delivered to S3 for batch analytics successfully.
:param analytics_bucket_name: Name of the deployed S3 bucket.
"""
client = self._aws_util.client('s3')
bucket_name = analytics_bucket_name
try:
DataLakeMetricsDeliveredWaiter(client).wait(bucket_name=bucket_name, prefix=EXPECTED_S3_DIRECTORY)
except WaiterError as e:
assert False, f'Failed to find the S3 directory for storing metrics data: {str(e)}.'
# Check whether the data is converted to the expected data format.
response = client.list_objects_v2(
Bucket=bucket_name,
Prefix=EXPECTED_S3_DIRECTORY
)
assert response.get('KeyCount', 0) != 0, f'Failed to deliver metrics to the S3 bucket {bucket_name}.'
s3_objects = response.get('Contents', [])
for s3_object in s3_objects:
key = s3_object.get('Key', '')
assert pathlib.Path(key).suffix == EXPECTED_S3_OBJECT_EXTENSION, \
f'Invalid data format is found in the S3 bucket {bucket_name}'
def run_glue_crawler(self, crawler_name: str) -> None:
"""
Run the Glue crawler and wait for it to finish.
:param crawler_name: Name of the Glue crawler
"""
client = self._aws_util.client('glue')
try:
client.start_crawler(
Name=crawler_name
)
except client.exceptions.CrawlerRunningException:
# The crawler has already been started.
return
try:
GlueCrawlerReadyWaiter(client).wait(crawler_name=crawler_name)
except WaiterError as e:
assert False, f'Failed to run the Glue crawler: {str(e)}.'
def run_named_queries(self, work_group: str) -> None:
"""
Run the named queries under the specific Athena work group.
:param work_group: Name of the Athena work group.
"""
client = self._aws_util.client('athena')
# List all the named queries.
response = client.list_named_queries(
WorkGroup=work_group
)
named_query_ids = response.get('NamedQueryIds', [])
# Run each of the queries.
for named_query_id in named_query_ids:
get_named_query_response = client.get_named_query(
NamedQueryId=named_query_id
)
named_query = get_named_query_response.get('NamedQuery', {})
start_query_execution_response = client.start_query_execution(
QueryString=named_query.get('QueryString', ''),
QueryExecutionContext={
'Database': named_query.get('Database', '')
},
WorkGroup=work_group
)
# Wait for the query to finish.
state = 'RUNNING'
while state == 'QUEUED' or state == 'RUNNING':
get_query_execution_response = client.get_query_execution(
QueryExecutionId=start_query_execution_response.get('QueryExecutionId', '')
)
state = get_query_execution_response.get('QueryExecution', {}).get('Status', {}).get('State', '')
assert state == 'SUCCEEDED', f'Failed to run the named query {named_query.get("Name", {})}'
def empty_bucket(self, bucket_name: str) -> None:
"""
Empty the S3 bucket following:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/migrations3.html
:param bucket_name: Name of the S3 bucket.
"""
s3 = self._aws_util.resource('s3')
bucket = s3.Bucket(bucket_name)
for key in bucket.objects.all():
key.delete()
def delete_table(self, database_name: str, table_name: str) -> None:
"""
Delete an existing Glue table.
:param database_name: Name of the Glue database.
:param table_name: Name of the table to delete.
"""
client = self._aws_util.client('glue')
client.delete_table(
DatabaseName=database_name,
Name=table_name
)
@pytest.fixture(scope='function')
def aws_metrics_utils(
request: pytest.fixture,
aws_utils: pytest.fixture):
"""
Fixture for the AWS metrics util functions.
:param request: _pytest.fixtures.SubRequest class that handles getting
a pytest fixture from a pytest function/fixture.
:param aws_utils: aws_utils fixture.
"""
aws_utils_obj = AWSMetricsUtils(aws_utils)
return aws_utils_obj

@ -1,139 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import botocore.client
import logging
from datetime import timedelta
from AWS.common.custom_waiter import CustomWaiter, WaitState
logging.getLogger('boto').setLevel(logging.CRITICAL)
class KinesisAnalyticsApplicationUpdatedWaiter(CustomWaiter):
"""
Subclass of the base custom waiter class.
Wait for the Kinesis analytics application being updated to a specific status.
"""
def __init__(self, client: botocore.client, status: str):
"""
Initialize the waiter.
:param client: Boto3 client to use.
:param status: Expected status.
"""
super().__init__(
'KinesisAnalyticsApplicationUpdated',
'DescribeApplication',
'ApplicationDetail.ApplicationStatus',
{status: WaitState.SUCCESS},
client)
def wait(self, application_name: str):
"""
Wait for the expected status.
:param application_name: Name of the Kinesis analytics application.
"""
self._wait(ApplicationName=application_name)
class GlueCrawlerReadyWaiter(CustomWaiter):
"""
Subclass of the base custom waiter class.
Wait for the Glue crawler to finish its processing. Return when the crawler is in the "Stopping" status
to avoid wasting too much time in the automation tests on its shutdown process.
"""
def __init__(self, client: botocore.client):
"""
Initialize the waiter.
:param client: Boto3 client to use.
"""
super().__init__(
'GlueCrawlerReady',
'GetCrawler',
'Crawler.State',
{'STOPPING': WaitState.SUCCESS},
client)
def wait(self, crawler_name):
"""
Wait for the expected status.
:param crawler_name: Name of the Glue crawler.
"""
self._wait(Name=crawler_name)
class DataLakeMetricsDeliveredWaiter(CustomWaiter):
"""
Subclass of the base custom waiter class.
Wait for the expected directory being created in the S3 bucket.
"""
def __init__(self, client: botocore.client):
"""
Initialize the waiter.
:param client: Boto3 client to use.
"""
super().__init__(
'DataLakeMetricsDelivered',
'ListObjectsV2',
'KeyCount > `0`',
{True: WaitState.SUCCESS},
client)
def wait(self, bucket_name, prefix):
"""
Wait for the expected directory being created.
:param bucket_name: Name of the S3 bucket.
:param prefix: Name of the expected directory prefix.
"""
self._wait(Bucket=bucket_name, Prefix=prefix)
class CloudWatchMetricsDeliveredWaiter(CustomWaiter):
"""
Subclass of the base custom waiter class.
Wait for the expected metrics being delivered to CloudWatch.
"""
def __init__(self, client: botocore.client):
"""
Initialize the waiter.
:param client: Boto3 client to use.
"""
super().__init__(
'CloudWatchMetricsDelivered',
'GetMetricStatistics',
'length(Datapoints) > `0`',
{True: WaitState.SUCCESS},
client)
def wait(self, namespace, metrics_name, dimensions, start_time):
"""
Wait for the expected metrics being delivered.
:param namespace: Namespace of the metrics.
:param metrics_name: Name of the metrics.
:param dimensions: Dimensions of the metrics.
:param start_time: Start time for generating the metrics.
"""
self._wait(
Namespace=namespace,
MetricName=metrics_name,
Dimensions=dimensions,
StartTime=start_time,
EndTime=start_time + timedelta(0, self.timeout),
Period=60,
Statistics=[
'SampleCount'
],
Unit='Count'
)

@ -1,7 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""

@ -1,170 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import logging
import os
import pytest
import ly_test_tools.log.log_monitor
from AWS.common import constants
from AWS.common.resource_mappings import AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY
# fixture imports
from assetpipeline.ap_fixtures.asset_processor_fixture import asset_processor
AWS_CLIENT_AUTH_FEATURE_NAME = 'AWSClientAuth'
logger = logging.getLogger(__name__)
@pytest.mark.SUITE_awsi
@pytest.mark.usefixtures('asset_processor')
@pytest.mark.usefixtures('automatic_process_killer')
@pytest.mark.usefixtures('aws_utils')
@pytest.mark.usefixtures('workspace')
@pytest.mark.parametrize('assume_role_arn', [constants.ASSUME_ROLE_ARN])
@pytest.mark.parametrize('feature_name', [AWS_CLIENT_AUTH_FEATURE_NAME])
@pytest.mark.parametrize('project', ['AutomatedTesting'])
@pytest.mark.usefixtures('resource_mappings')
@pytest.mark.parametrize('resource_mappings_filename', [constants.AWS_RESOURCE_MAPPING_FILE_NAME])
@pytest.mark.parametrize('region_name', [constants.AWS_REGION])
@pytest.mark.parametrize('session_name', [constants.SESSION_NAME])
@pytest.mark.parametrize('stacks', [[f'{constants.AWS_PROJECT_NAME}-{AWS_CLIENT_AUTH_FEATURE_NAME}-Stack-{constants.AWS_REGION}']])
class TestAWSClientAuthWindows(object):
"""
Test class to verify AWS Client Auth gem features on Windows.
"""
@pytest.mark.parametrize('level', ['AWS/ClientAuth'])
def test_anonymous_credentials(self,
level: str,
launcher: pytest.fixture,
resource_mappings: pytest.fixture,
workspace: pytest.fixture,
asset_processor: pytest.fixture
):
"""
Test to verify AWS Cognito Identity pool anonymous authorization.
Setup: Updates resource mapping file using existing CloudFormation stacks.
Tests: Getting credentials when no credentials are configured
Verification: Log monitor looks for success credentials log.
"""
asset_processor.start()
asset_processor.wait_for_idle()
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
launcher.args = ['+LoadLevel', level]
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
result = log_monitor.monitor_log_for_lines(
expected_lines=['(Script) - Success anonymous credentials'],
unexpected_lines=['(Script) - Fail anonymous credentials'],
halt_on_unexpected=True,
)
assert result, 'Anonymous credentials fetched successfully.'
@pytest.mark.parametrize('level', ['AWS/ClientAuth'])
def test_anonymous_credentials_no_global_accountid(self,
level: str,
launcher: pytest.fixture,
resource_mappings: pytest.fixture,
workspace: pytest.fixture,
asset_processor: pytest.fixture
):
"""
Test to verify AWS Cognito Identity pool anonymous authorization.
Setup: Updates resource mapping file using existing CloudFormation stacks.
Tests: Getting credentials when no credentials are configured
Verification: Log monitor looks for success credentials log.
"""
# Remove top-level account ID from resource mappings
resource_mappings.clear_select_keys([AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY])
asset_processor.start()
asset_processor.wait_for_idle()
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
launcher.args = ['+LoadLevel', level]
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
result = log_monitor.monitor_log_for_lines(
expected_lines=['(Script) - Success anonymous credentials'],
unexpected_lines=['(Script) - Fail anonymous credentials'],
halt_on_unexpected=True,
)
assert result, 'Anonymous credentials fetched successfully.'
def test_password_signin_credentials(self,
launcher: pytest.fixture,
resource_mappings: pytest.fixture,
workspace: pytest.fixture,
asset_processor: pytest.fixture,
aws_utils: pytest.fixture
):
"""
Test to verify AWS Cognito IDP Password sign in and Cognito Identity pool authenticated authorization.
Setup: Updates resource mapping file using existing CloudFormation stacks.
Tests: Sign up new test user, admin confirm the user, sign in and get aws credentials.
Verification: Log monitor looks for success credentials log.
"""
asset_processor.start()
asset_processor.wait_for_idle()
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
cognito_idp = aws_utils.client('cognito-idp')
user_pool_id = resource_mappings.get_resource_name_id(f'{AWS_CLIENT_AUTH_FEATURE_NAME}.CognitoUserPoolId')
logger.info(f'UserPoolId:{user_pool_id}')
# Remove the user if already exists
try:
cognito_idp.admin_delete_user(
UserPoolId=user_pool_id,
Username='test1'
)
except cognito_idp.exceptions.UserNotFoundException:
pass
launcher.args = ['+LoadLevel', 'AWS/ClientAuthPasswordSignUp']
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
result = log_monitor.monitor_log_for_lines(
expected_lines=['(Script) - Signup Success'],
unexpected_lines=['(Script) - Signup Fail'],
halt_on_unexpected=True,
)
assert result, 'Sign Up Success.'
launcher.stop()
cognito_idp.admin_confirm_sign_up(
UserPoolId=user_pool_id,
Username='test1'
)
launcher.args = ['+LoadLevel', 'AWS/ClientAuthPasswordSignIn']
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
result = log_monitor.monitor_log_for_lines(
expected_lines=['(Script) - SignIn Success', '(Script) - Success credentials'],
unexpected_lines=['(Script) - SignIn Fail', '(Script) - Fail credentials'],
halt_on_unexpected=True,
)
assert result, 'Sign in Success, fetched authenticated AWS temp credentials.'

@ -1,192 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import logging
import os
import shutil
import typing
from botocore.exceptions import ClientError
import pytest
import ly_test_tools
import ly_test_tools.log.log_monitor
import ly_test_tools.environment.process_utils as process_utils
import ly_test_tools.o3de.asset_processor_utils as asset_processor_utils
from AWS.common import constants
from AWS.common.resource_mappings import AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY
# fixture imports
from assetpipeline.ap_fixtures.asset_processor_fixture import asset_processor
AWS_CORE_FEATURE_NAME = 'AWSCore'
process_utils.kill_processes_named("o3de", ignore_extensions=True) # Kill ProjectManager windows
logger = logging.getLogger(__name__)
def setup(launcher: pytest.fixture, asset_processor: pytest.fixture) -> typing.Tuple[pytest.fixture, str]:
"""
Set up the resource mapping configuration and start the log monitor.
:param launcher: Client launcher for running the test level.
:param asset_processor: asset_processor fixture.
:return log monitor object, metrics file path and the metrics stack name.
"""
# Create the temporary directory for downloading test file from S3.
user_dir = os.path.join(launcher.workspace.paths.project(), 'user')
s3_download_dir = os.path.join(user_dir, 's3_download')
if not os.path.exists(s3_download_dir):
os.makedirs(s3_download_dir)
asset_processor_utils.kill_asset_processor()
asset_processor.start()
asset_processor.wait_for_idle()
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
return log_monitor, s3_download_dir
def write_test_data_to_dynamodb_table(resource_mappings: pytest.fixture, aws_utils: pytest.fixture) -> None:
"""
Write test data to the DynamoDB table created by the CDK application.
:param resource_mappings: resource_mappings fixture.
:param aws_utils: aws_utils fixture.
"""
table_name = resource_mappings.get_resource_name_id(f'{AWS_CORE_FEATURE_NAME}.ExampleDynamoTableOutput')
try:
aws_utils.client('dynamodb').put_item(
TableName=table_name,
Item={
'id': {
'S': 'Item1'
}
}
)
logger.info(f'Loaded data into table {table_name}')
except ClientError:
logger.exception(f'Failed to load data into table {table_name}')
raise
@pytest.mark.SUITE_awsi
@pytest.mark.usefixtures('automatic_process_killer')
@pytest.mark.usefixtures('asset_processor')
@pytest.mark.parametrize('feature_name', [AWS_CORE_FEATURE_NAME])
@pytest.mark.parametrize('region_name', [constants.AWS_REGION])
@pytest.mark.parametrize('assume_role_arn', [constants.ASSUME_ROLE_ARN])
@pytest.mark.parametrize('session_name', [constants.SESSION_NAME])
@pytest.mark.usefixtures('workspace')
@pytest.mark.parametrize('project', ['AutomatedTesting'])
@pytest.mark.parametrize('level', ['AWS/Core'])
@pytest.mark.usefixtures('resource_mappings')
@pytest.mark.parametrize('resource_mappings_filename', [constants.AWS_RESOURCE_MAPPING_FILE_NAME])
@pytest.mark.parametrize('stacks', [[f'{constants.AWS_PROJECT_NAME}-{AWS_CORE_FEATURE_NAME}',
f'{constants.AWS_PROJECT_NAME}-{AWS_CORE_FEATURE_NAME}-Example-{constants.AWS_REGION}']])
@pytest.mark.usefixtures('aws_credentials')
@pytest.mark.parametrize('profile_name', ['AWSAutomationTest'])
class TestAWSCoreAWSResourceInteraction(object):
"""
Test class to verify the scripting behavior for the AWSCore gem.
"""
@pytest.mark.parametrize('expected_lines', [
['(Script) - [S3] Head object request is done',
'(Script) - [S3] Head object success: Object example.txt is found.',
'(Script) - [S3] Get object success: Object example.txt is downloaded.',
'(Script) - [Lambda] Completed Invoke',
'(Script) - [Lambda] Invoke success: {"statusCode": 200, "body": {}}',
'(Script) - [DynamoDB] Results finished']])
@pytest.mark.parametrize('unexpected_lines', [
['(Script) - [S3] Head object error: No response body.',
'(Script) - [S3] Get object error: Request validation failed, output file directory doesn\'t exist.',
'(Script) - Request validation failed, output file miss full path.',
'(Script) - ']])
def test_scripting_behavior(self,
level: str,
launcher: pytest.fixture,
workspace: pytest.fixture,
asset_processor: pytest.fixture,
resource_mappings: pytest.fixture,
aws_utils: pytest.fixture,
expected_lines: typing.List[str],
unexpected_lines: typing.List[str]):
"""
Setup: Updates resource mapping file using existing CloudFormation stacks.
Tests: Interact with AWS S3, DynamoDB and Lambda services.
Verification: Script canvas nodes can communicate with AWS services successfully.
"""
log_monitor, s3_download_dir = setup(launcher, asset_processor)
write_test_data_to_dynamodb_table(resource_mappings, aws_utils)
launcher.args = ['+LoadLevel', level]
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
result = log_monitor.monitor_log_for_lines(
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True
)
assert result, "Expected lines weren't found."
assert os.path.exists(os.path.join(s3_download_dir, 'output.txt')), \
'The expected file wasn\'t successfully downloaded.'
# clean up the file directories.
shutil.rmtree(s3_download_dir)
@pytest.mark.parametrize('expected_lines', [
['(Script) - [S3] Head object request is done',
'(Script) - [S3] Head object success: Object example.txt is found.',
'(Script) - [S3] Get object success: Object example.txt is downloaded.',
'(Script) - [Lambda] Completed Invoke',
'(Script) - [Lambda] Invoke success: {"statusCode": 200, "body": {}}',
'(Script) - [DynamoDB] Results finished']])
@pytest.mark.parametrize('unexpected_lines', [
['(Script) - [S3] Head object error: No response body.',
'(Script) - [S3] Get object error: Request validation failed, output file directory doesn\'t exist.',
'(Script) - Request validation failed, output file miss full path.',
'(Script) - ']])
def test_scripting_behavior_no_global_accountid(self,
level: str,
launcher: pytest.fixture,
workspace: pytest.fixture,
asset_processor: pytest.fixture,
resource_mappings: pytest.fixture,
aws_utils: pytest.fixture,
expected_lines: typing.List[str],
unexpected_lines: typing.List[str]):
"""
Setup: Updates resource mapping file using existing CloudFormation stacks.
Tests: Interact with AWS S3, DynamoDB and Lambda services.
Verification: Script canvas nodes can communicate with AWS services successfully.
"""
resource_mappings.clear_select_keys([AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY])
log_monitor, s3_download_dir = setup(launcher, asset_processor)
write_test_data_to_dynamodb_table(resource_mappings, aws_utils)
launcher.args = ['+LoadLevel', level]
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
result = log_monitor.monitor_log_for_lines(
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True
)
assert result, "Expected lines weren't found."
assert os.path.exists(os.path.join(s3_download_dir, 'output.txt')), \
'The expected file wasn\'t successfully downloaded.'
# clean up the file directories.
shutil.rmtree(s3_download_dir)

@ -0,0 +1,289 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import logging
import os
import pytest
import typing
from datetime import datetime
import ly_test_tools.log.log_monitor
from AWS.common import constants
from AWS.common.resource_mappings import AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY
from .aws_metrics_custom_thread import AWSMetricsThread
# fixture imports
from assetpipeline.ap_fixtures.asset_processor_fixture import asset_processor
from .aws_metrics_utils import aws_metrics_utils
AWS_METRICS_FEATURE_NAME = 'AWSMetrics'
logger = logging.getLogger(__name__)
def setup(launcher: pytest.fixture,
asset_processor: pytest.fixture) -> pytest.fixture:
"""
Set up the resource mapping configuration and start the log monitor.
:param launcher: Client launcher for running the test level.
:param asset_processor: asset_processor fixture.
:return log monitor object.
"""
asset_processor.start()
asset_processor.wait_for_idle()
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
# Initialize the log monitor.
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
return log_monitor
def monitor_metrics_submission(log_monitor: pytest.fixture) -> None:
"""
Monitor the messages and notifications for submitting metrics.
:param log_monitor: Log monitor to check the log messages.
"""
expected_lines = [
'(Script) - Submitted metrics without buffer.',
'(Script) - Submitted metrics with buffer.',
'(Script) - Flushed the buffered metrics.',
'(Script) - Metrics is sent successfully.'
]
unexpected_lines = [
'(Script) - Failed to submit metrics without buffer.',
'(Script) - Failed to submit metrics with buffer.',
'(Script) - Failed to send metrics.'
]
result = log_monitor.monitor_log_for_lines(
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True)
# Assert the log monitor detected expected lines and did not detect any unexpected lines.
assert result, (
f'Log monitoring failed. Used expected_lines values: {expected_lines} & '
f'unexpected_lines values: {unexpected_lines}')
def query_metrics_from_s3(aws_metrics_utils: pytest.fixture, resource_mappings: pytest.fixture) -> None:
"""
Verify that the metrics events are delivered to the S3 bucket and can be queried.
:param aws_metrics_utils: aws_metrics_utils fixture.
:param resource_mappings: resource_mappings fixture.
"""
aws_metrics_utils.verify_s3_delivery(
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsBucketName')
)
logger.info('Metrics are sent to S3.')
aws_metrics_utils.run_glue_crawler(
resource_mappings.get_resource_name_id('AWSMetrics.EventsCrawlerName'))
# Remove the events_json table if exists so that the sample query can create a table with the same name.
aws_metrics_utils.delete_table(resource_mappings.get_resource_name_id('AWSMetrics.EventDatabaseName'), 'events_json')
aws_metrics_utils.run_named_queries(resource_mappings.get_resource_name_id('AWSMetrics.AthenaWorkGroupName'))
logger.info('Query metrics from S3 successfully.')
def verify_operational_metrics(aws_metrics_utils: pytest.fixture,
resource_mappings: pytest.fixture, start_time: datetime) -> None:
"""
Verify that operational health metrics are delivered to CloudWatch.
:param aws_metrics_utils: aws_metrics_utils fixture.
:param resource_mappings: resource_mappings fixture.
:param start_time: Time when the game launcher starts.
"""
aws_metrics_utils.verify_cloud_watch_delivery(
'AWS/Lambda',
'Invocations',
[{'Name': 'FunctionName',
'Value': resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsProcessingLambdaName')}],
start_time)
logger.info('AnalyticsProcessingLambda metrics are sent to CloudWatch.')
aws_metrics_utils.verify_cloud_watch_delivery(
'AWS/Lambda',
'Invocations',
[{'Name': 'FunctionName',
'Value': resource_mappings.get_resource_name_id('AWSMetrics.EventProcessingLambdaName')}],
start_time)
logger.info('EventsProcessingLambda metrics are sent to CloudWatch.')
def update_kinesis_analytics_application_status(aws_metrics_utils: pytest.fixture,
resource_mappings: pytest.fixture, start_application: bool) -> None:
"""
Update the Kinesis analytics application to start or stop it.
:param aws_metrics_utils: aws_metrics_utils fixture.
:param resource_mappings: resource_mappings fixture.
:param start_application: whether to start or stop the application.
"""
if start_application:
aws_metrics_utils.start_kinesis_data_analytics_application(
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsApplicationName'))
else:
aws_metrics_utils.stop_kinesis_data_analytics_application(
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsApplicationName'))
@pytest.mark.SUITE_awsi
@pytest.mark.usefixtures('automatic_process_killer')
@pytest.mark.usefixtures('aws_credentials')
@pytest.mark.usefixtures('resource_mappings')
@pytest.mark.parametrize('assume_role_arn', [constants.ASSUME_ROLE_ARN])
@pytest.mark.parametrize('feature_name', [AWS_METRICS_FEATURE_NAME])
@pytest.mark.parametrize('profile_name', ['AWSAutomationTest'])
@pytest.mark.parametrize('project', ['AutomatedTesting'])
@pytest.mark.parametrize('region_name', [constants.AWS_REGION])
@pytest.mark.parametrize('resource_mappings_filename', [constants.AWS_RESOURCE_MAPPING_FILE_NAME])
@pytest.mark.parametrize('session_name', [constants.SESSION_NAME])
@pytest.mark.parametrize('stacks', [[f'{constants.AWS_PROJECT_NAME}-{AWS_METRICS_FEATURE_NAME}-{constants.AWS_REGION}']])
class TestAWSMetricsWindows(object):
"""
Test class to verify the real-time and batch analytics for metrics.
"""
@pytest.mark.parametrize('level', ['levels/aws/metrics/metrics.spawnable'])
def test_realtime_and_batch_analytics(self,
level: str,
launcher: pytest.fixture,
asset_processor: pytest.fixture,
workspace: pytest.fixture,
aws_utils: pytest.fixture,
resource_mappings: pytest.fixture,
aws_metrics_utils: pytest.fixture):
"""
Verify that the metrics events are sent to CloudWatch and S3 for analytics.
"""
# Start Kinesis analytics application on a separate thread to avoid blocking the test.
kinesis_analytics_application_thread = AWSMetricsThread(target=update_kinesis_analytics_application_status,
args=(aws_metrics_utils, resource_mappings, True))
kinesis_analytics_application_thread.start()
log_monitor = setup(launcher, asset_processor)
# Kinesis analytics application needs to be in the running state before we start the game launcher.
kinesis_analytics_application_thread.join()
launcher.args = ['+LoadLevel', level]
launcher.args.extend(['-rhi=null'])
start_time = datetime.utcnow()
with launcher.start(launch_ap=False):
monitor_metrics_submission(log_monitor)
# Verify that real-time analytics metrics are delivered to CloudWatch.
aws_metrics_utils.verify_cloud_watch_delivery(
AWS_METRICS_FEATURE_NAME,
'TotalLogins',
[],
start_time)
logger.info('Real-time metrics are sent to CloudWatch.')
# Run time-consuming operations on separate threads to avoid blocking the test.
operational_threads = list()
operational_threads.append(
AWSMetricsThread(target=query_metrics_from_s3,
args=(aws_metrics_utils, resource_mappings)))
operational_threads.append(
AWSMetricsThread(target=verify_operational_metrics,
args=(aws_metrics_utils, resource_mappings, start_time)))
operational_threads.append(
AWSMetricsThread(target=update_kinesis_analytics_application_status,
args=(aws_metrics_utils, resource_mappings, False)))
for thread in operational_threads:
thread.start()
for thread in operational_threads:
thread.join()
@pytest.mark.parametrize('level', ['levels/aws/metrics/metrics.spawnable'])
def test_realtime_and_batch_analytics_no_global_accountid(self,
level: str,
launcher: pytest.fixture,
asset_processor: pytest.fixture,
workspace: pytest.fixture,
aws_utils: pytest.fixture,
resource_mappings: pytest.fixture,
aws_metrics_utils: pytest.fixture):
"""
Verify that the metrics events are sent to CloudWatch and S3 for analytics.
"""
# Remove top-level account ID from resource mappings
resource_mappings.clear_select_keys([AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY])
# Start Kinesis analytics application on a separate thread to avoid blocking the test.
kinesis_analytics_application_thread = AWSMetricsThread(target=update_kinesis_analytics_application_status,
args=(aws_metrics_utils, resource_mappings, True))
kinesis_analytics_application_thread.start()
log_monitor = setup(launcher, asset_processor)
# Kinesis analytics application needs to be in the running state before we start the game launcher.
kinesis_analytics_application_thread.join()
launcher.args = ['+LoadLevel', level]
launcher.args.extend(['-rhi=null'])
start_time = datetime.utcnow()
with launcher.start(launch_ap=False):
monitor_metrics_submission(log_monitor)
# Verify that real-time analytics metrics are delivered to CloudWatch.
aws_metrics_utils.verify_cloud_watch_delivery(
AWS_METRICS_FEATURE_NAME,
'TotalLogins',
[],
start_time)
logger.info('Real-time metrics are sent to CloudWatch.')
# Run time-consuming operations on separate threads to avoid blocking the test.
operational_threads = list()
operational_threads.append(
AWSMetricsThread(target=query_metrics_from_s3,
args=(aws_metrics_utils, resource_mappings)))
operational_threads.append(
AWSMetricsThread(target=verify_operational_metrics,
args=(aws_metrics_utils, resource_mappings, start_time)))
operational_threads.append(
AWSMetricsThread(target=update_kinesis_analytics_application_status,
args=(aws_metrics_utils, resource_mappings, False)))
for thread in operational_threads:
thread.start()
for thread in operational_threads:
thread.join()
@pytest.mark.parametrize('level', ['levels/aws/metrics/metrics.spawnable'])
def test_unauthorized_user_request_rejected(self,
level: str,
launcher: pytest.fixture,
asset_processor: pytest.fixture,
workspace: pytest.fixture):
"""
Verify that unauthorized users cannot send metrics events to the AWS backed backend.
"""
log_monitor = setup(launcher, asset_processor)
# Set invalid AWS credentials.
launcher.args = ['+LoadLevel', level, '+cl_awsAccessKey', 'AKIAIOSFODNN7EXAMPLE',
'+cl_awsSecretKey', 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY']
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
result = log_monitor.monitor_log_for_lines(
expected_lines=['(Script) - Failed to send metrics.'],
unexpected_lines=['(Script) - Metrics is sent successfully.'],
halt_on_unexpected=True)
assert result, 'Metrics events are sent successfully by unauthorized user'
logger.info('Unauthorized user is rejected to send metrics.')
def test_clean_up_s3_bucket(self,
aws_utils: pytest.fixture,
resource_mappings: pytest.fixture,
aws_metrics_utils: pytest.fixture):
"""
Clear the analytics bucket objects so that the S3 bucket can be destroyed during tear down.
"""
aws_metrics_utils.empty_bucket(
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsBucketName'))

@ -0,0 +1,29 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
from threading import Thread
class AWSMetricsThread(Thread):
"""
Custom thread for raising assertion errors on the main thread.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._error = None
def run(self) -> None:
try:
super().run()
except AssertionError as e:
self._error = e
def join(self, **kwargs) -> None:
super().join(**kwargs)
if self._error:
raise AssertionError(self._error)

@ -0,0 +1,239 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import logging
import pathlib
import pytest
import typing
from datetime import datetime
from botocore.exceptions import WaiterError
from .aws_metrics_waiters import KinesisAnalyticsApplicationUpdatedWaiter, \
CloudWatchMetricsDeliveredWaiter, DataLakeMetricsDeliveredWaiter, GlueCrawlerReadyWaiter
logging.getLogger('boto').setLevel(logging.CRITICAL)
# Expected directory and file extension for the S3 objects.
EXPECTED_S3_DIRECTORY = 'firehose_events/'
EXPECTED_S3_OBJECT_EXTENSION = '.parquet'
class AWSMetricsUtils:
"""
Provide utils functions for the AWSMetrics gem to interact with the deployed resources.
"""
def __init__(self, aws_utils: pytest.fixture):
self._aws_util = aws_utils
def start_kinesis_data_analytics_application(self, application_name: str) -> None:
"""
Start the Kenisis Data Analytics application for real-time analytics.
:param application_name: Name of the Kenisis Data Analytics application.
"""
input_id = self.get_kinesis_analytics_application_input_id(application_name)
assert input_id, 'invalid Kinesis Data Analytics application input.'
client = self._aws_util.client('kinesisanalytics')
try:
client.start_application(
ApplicationName=application_name,
InputConfigurations=[
{
'Id': input_id,
'InputStartingPositionConfiguration': {
'InputStartingPosition': 'NOW'
}
},
]
)
except client.exceptions.ResourceInUseException:
# The application has been started.
return
try:
KinesisAnalyticsApplicationUpdatedWaiter(client, 'RUNNING').wait(application_name=application_name)
except WaiterError as e:
assert False, f'Failed to start the Kinesis Data Analytics application: {str(e)}.'
def get_kinesis_analytics_application_input_id(self, application_name: str) -> str:
"""
Get the input ID for the Kenisis Data Analytics application.
:param application_name: Name of the Kenisis Data Analytics application.
:return: Input ID for the Kenisis Data Analytics application.
"""
client = self._aws_util.client('kinesisanalytics')
response = client.describe_application(
ApplicationName=application_name
)
if not response:
return ''
input_descriptions = response.get('ApplicationDetail', {}).get('InputDescriptions', [])
if len(input_descriptions) != 1:
return ''
return input_descriptions[0].get('InputId', '')
def stop_kinesis_data_analytics_application(self, application_name: str) -> None:
"""
Stop the Kenisis Data Analytics application.
:param application_name: Name of the Kenisis Data Analytics application.
"""
client = self._aws_util.client('kinesisanalytics')
client.stop_application(
ApplicationName=application_name
)
try:
KinesisAnalyticsApplicationUpdatedWaiter(client, 'READY').wait(application_name=application_name)
except WaiterError as e:
assert False, f'Failed to stop the Kinesis Data Analytics application: {str(e)}.'
def verify_cloud_watch_delivery(self, namespace: str, metrics_name: str,
dimensions: typing.List[dict], start_time: datetime) -> None:
"""
Verify that the expected metrics is delivered to CloudWatch.
:param namespace: Namespace of the metrics.
:param metrics_name: Name of the metrics.
:param dimensions: Dimensions of the metrics.
:param start_time: Start time for generating the metrics.
"""
client = self._aws_util.client('cloudwatch')
try:
CloudWatchMetricsDeliveredWaiter(client).wait(
namespace=namespace,
metrics_name=metrics_name,
dimensions=dimensions,
start_time=start_time
)
except WaiterError as e:
assert False, f'Failed to deliver metrics to CloudWatch: {str(e)}.'
def verify_s3_delivery(self, analytics_bucket_name: str) -> None:
"""
Verify that metrics are delivered to S3 for batch analytics successfully.
:param analytics_bucket_name: Name of the deployed S3 bucket.
"""
client = self._aws_util.client('s3')
bucket_name = analytics_bucket_name
try:
DataLakeMetricsDeliveredWaiter(client).wait(bucket_name=bucket_name, prefix=EXPECTED_S3_DIRECTORY)
except WaiterError as e:
assert False, f'Failed to find the S3 directory for storing metrics data: {str(e)}.'
# Check whether the data is converted to the expected data format.
response = client.list_objects_v2(
Bucket=bucket_name,
Prefix=EXPECTED_S3_DIRECTORY
)
assert response.get('KeyCount', 0) != 0, f'Failed to deliver metrics to the S3 bucket {bucket_name}.'
s3_objects = response.get('Contents', [])
for s3_object in s3_objects:
key = s3_object.get('Key', '')
assert pathlib.Path(key).suffix == EXPECTED_S3_OBJECT_EXTENSION, \
f'Invalid data format is found in the S3 bucket {bucket_name}'
def run_glue_crawler(self, crawler_name: str) -> None:
"""
Run the Glue crawler and wait for it to finish.
:param crawler_name: Name of the Glue crawler
"""
client = self._aws_util.client('glue')
try:
client.start_crawler(
Name=crawler_name
)
except client.exceptions.CrawlerRunningException:
# The crawler has already been started.
return
try:
GlueCrawlerReadyWaiter(client).wait(crawler_name=crawler_name)
except WaiterError as e:
assert False, f'Failed to run the Glue crawler: {str(e)}.'
def run_named_queries(self, work_group: str) -> None:
"""
Run the named queries under the specific Athena work group.
:param work_group: Name of the Athena work group.
"""
client = self._aws_util.client('athena')
# List all the named queries.
response = client.list_named_queries(
WorkGroup=work_group
)
named_query_ids = response.get('NamedQueryIds', [])
# Run each of the queries.
for named_query_id in named_query_ids:
get_named_query_response = client.get_named_query(
NamedQueryId=named_query_id
)
named_query = get_named_query_response.get('NamedQuery', {})
start_query_execution_response = client.start_query_execution(
QueryString=named_query.get('QueryString', ''),
QueryExecutionContext={
'Database': named_query.get('Database', '')
},
WorkGroup=work_group
)
# Wait for the query to finish.
state = 'RUNNING'
while state == 'QUEUED' or state == 'RUNNING':
get_query_execution_response = client.get_query_execution(
QueryExecutionId=start_query_execution_response.get('QueryExecutionId', '')
)
state = get_query_execution_response.get('QueryExecution', {}).get('Status', {}).get('State', '')
assert state == 'SUCCEEDED', f'Failed to run the named query {named_query.get("Name", {})}'
def empty_bucket(self, bucket_name: str) -> None:
"""
Empty the S3 bucket following:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/migrations3.html
:param bucket_name: Name of the S3 bucket.
"""
s3 = self._aws_util.resource('s3')
bucket = s3.Bucket(bucket_name)
for key in bucket.objects.all():
key.delete()
def delete_table(self, database_name: str, table_name: str) -> None:
"""
Delete an existing Glue table.
:param database_name: Name of the Glue database.
:param table_name: Name of the table to delete.
"""
client = self._aws_util.client('glue')
client.delete_table(
DatabaseName=database_name,
Name=table_name
)
@pytest.fixture(scope='function')
def aws_metrics_utils(
request: pytest.fixture,
aws_utils: pytest.fixture):
"""
Fixture for the AWS metrics util functions.
:param request: _pytest.fixtures.SubRequest class that handles getting
a pytest fixture from a pytest function/fixture.
:param aws_utils: aws_utils fixture.
"""
aws_utils_obj = AWSMetricsUtils(aws_utils)
return aws_utils_obj

@ -0,0 +1,139 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import botocore.client
import logging
from datetime import timedelta
from AWS.common.custom_waiter import CustomWaiter, WaitState
logging.getLogger('boto').setLevel(logging.CRITICAL)
class KinesisAnalyticsApplicationUpdatedWaiter(CustomWaiter):
"""
Subclass of the base custom waiter class.
Wait for the Kinesis analytics application being updated to a specific status.
"""
def __init__(self, client: botocore.client, status: str):
"""
Initialize the waiter.
:param client: Boto3 client to use.
:param status: Expected status.
"""
super().__init__(
'KinesisAnalyticsApplicationUpdated',
'DescribeApplication',
'ApplicationDetail.ApplicationStatus',
{status: WaitState.SUCCESS},
client)
def wait(self, application_name: str):
"""
Wait for the expected status.
:param application_name: Name of the Kinesis analytics application.
"""
self._wait(ApplicationName=application_name)
class GlueCrawlerReadyWaiter(CustomWaiter):
"""
Subclass of the base custom waiter class.
Wait for the Glue crawler to finish its processing. Return when the crawler is in the "Stopping" status
to avoid wasting too much time in the automation tests on its shutdown process.
"""
def __init__(self, client: botocore.client):
"""
Initialize the waiter.
:param client: Boto3 client to use.
"""
super().__init__(
'GlueCrawlerReady',
'GetCrawler',
'Crawler.State',
{'STOPPING': WaitState.SUCCESS},
client)
def wait(self, crawler_name):
"""
Wait for the expected status.
:param crawler_name: Name of the Glue crawler.
"""
self._wait(Name=crawler_name)
class DataLakeMetricsDeliveredWaiter(CustomWaiter):
"""
Subclass of the base custom waiter class.
Wait for the expected directory being created in the S3 bucket.
"""
def __init__(self, client: botocore.client):
"""
Initialize the waiter.
:param client: Boto3 client to use.
"""
super().__init__(
'DataLakeMetricsDelivered',
'ListObjectsV2',
'KeyCount > `0`',
{True: WaitState.SUCCESS},
client)
def wait(self, bucket_name, prefix):
"""
Wait for the expected directory being created.
:param bucket_name: Name of the S3 bucket.
:param prefix: Name of the expected directory prefix.
"""
self._wait(Bucket=bucket_name, Prefix=prefix)
class CloudWatchMetricsDeliveredWaiter(CustomWaiter):
"""
Subclass of the base custom waiter class.
Wait for the expected metrics being delivered to CloudWatch.
"""
def __init__(self, client: botocore.client):
"""
Initialize the waiter.
:param client: Boto3 client to use.
"""
super().__init__(
'CloudWatchMetricsDelivered',
'GetMetricStatistics',
'length(Datapoints) > `0`',
{True: WaitState.SUCCESS},
client)
def wait(self, namespace, metrics_name, dimensions, start_time):
"""
Wait for the expected metrics being delivered.
:param namespace: Namespace of the metrics.
:param metrics_name: Name of the metrics.
:param dimensions: Dimensions of the metrics.
:param start_time: Start time for generating the metrics.
"""
self._wait(
Namespace=namespace,
MetricName=metrics_name,
Dimensions=dimensions,
StartTime=start_time,
EndTime=start_time + timedelta(0, self.timeout),
Period=60,
Statistics=[
'SampleCount'
],
Unit='Count'
)

@ -0,0 +1,170 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import logging
import os
import pytest
import ly_test_tools.log.log_monitor
from AWS.common import constants
from AWS.common.resource_mappings import AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY
# fixture imports
from assetpipeline.ap_fixtures.asset_processor_fixture import asset_processor
AWS_CLIENT_AUTH_FEATURE_NAME = 'AWSClientAuth'
logger = logging.getLogger(__name__)
@pytest.mark.SUITE_awsi
@pytest.mark.usefixtures('asset_processor')
@pytest.mark.usefixtures('automatic_process_killer')
@pytest.mark.usefixtures('aws_utils')
@pytest.mark.usefixtures('workspace')
@pytest.mark.parametrize('assume_role_arn', [constants.ASSUME_ROLE_ARN])
@pytest.mark.parametrize('feature_name', [AWS_CLIENT_AUTH_FEATURE_NAME])
@pytest.mark.parametrize('project', ['AutomatedTesting'])
@pytest.mark.usefixtures('resource_mappings')
@pytest.mark.parametrize('resource_mappings_filename', [constants.AWS_RESOURCE_MAPPING_FILE_NAME])
@pytest.mark.parametrize('region_name', [constants.AWS_REGION])
@pytest.mark.parametrize('session_name', [constants.SESSION_NAME])
@pytest.mark.parametrize('stacks', [[f'{constants.AWS_PROJECT_NAME}-{AWS_CLIENT_AUTH_FEATURE_NAME}-Stack-{constants.AWS_REGION}']])
class TestAWSClientAuthWindows(object):
"""
Test class to verify AWS Client Auth gem features on Windows.
"""
@pytest.mark.parametrize('level', ['levels/aws/clientauth/clientauth.spawnable'])
def test_anonymous_credentials(self,
level: str,
launcher: pytest.fixture,
resource_mappings: pytest.fixture,
workspace: pytest.fixture,
asset_processor: pytest.fixture
):
"""
Test to verify AWS Cognito Identity pool anonymous authorization.
Setup: Updates resource mapping file using existing CloudFormation stacks.
Tests: Getting credentials when no credentials are configured
Verification: Log monitor looks for success credentials log.
"""
asset_processor.start()
asset_processor.wait_for_idle()
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
launcher.args = ['+LoadLevel', level]
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
result = log_monitor.monitor_log_for_lines(
expected_lines=['(Script) - Success anonymous credentials'],
unexpected_lines=['(Script) - Fail anonymous credentials'],
halt_on_unexpected=True,
)
assert result, 'Anonymous credentials fetched successfully.'
@pytest.mark.parametrize('level', ['levels/aws/clientauth/clientauth.spawnable'])
def test_anonymous_credentials_no_global_accountid(self,
level: str,
launcher: pytest.fixture,
resource_mappings: pytest.fixture,
workspace: pytest.fixture,
asset_processor: pytest.fixture
):
"""
Test to verify AWS Cognito Identity pool anonymous authorization.
Setup: Updates resource mapping file using existing CloudFormation stacks.
Tests: Getting credentials when no credentials are configured
Verification: Log monitor looks for success credentials log.
"""
# Remove top-level account ID from resource mappings
resource_mappings.clear_select_keys([AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY])
asset_processor.start()
asset_processor.wait_for_idle()
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
launcher.args = ['+LoadLevel', level]
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
result = log_monitor.monitor_log_for_lines(
expected_lines=['(Script) - Success anonymous credentials'],
unexpected_lines=['(Script) - Fail anonymous credentials'],
halt_on_unexpected=True,
)
assert result, 'Anonymous credentials fetched successfully.'
def test_password_signin_credentials(self,
launcher: pytest.fixture,
resource_mappings: pytest.fixture,
workspace: pytest.fixture,
asset_processor: pytest.fixture,
aws_utils: pytest.fixture
):
"""
Test to verify AWS Cognito IDP Password sign in and Cognito Identity pool authenticated authorization.
Setup: Updates resource mapping file using existing CloudFormation stacks.
Tests: Sign up new test user, admin confirm the user, sign in and get aws credentials.
Verification: Log monitor looks for success credentials log.
"""
asset_processor.start()
asset_processor.wait_for_idle()
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
cognito_idp = aws_utils.client('cognito-idp')
user_pool_id = resource_mappings.get_resource_name_id(f'{AWS_CLIENT_AUTH_FEATURE_NAME}.CognitoUserPoolId')
logger.info(f'UserPoolId:{user_pool_id}')
# Remove the user if already exists
try:
cognito_idp.admin_delete_user(
UserPoolId=user_pool_id,
Username='test1'
)
except cognito_idp.exceptions.UserNotFoundException:
pass
launcher.args = ['+LoadLevel', 'levels/aws/clientauthpasswordsignup/clientauthpasswordsignup.spawnable']
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
result = log_monitor.monitor_log_for_lines(
expected_lines=['(Script) - Signup Success'],
unexpected_lines=['(Script) - Signup Fail'],
halt_on_unexpected=True,
)
assert result, 'Sign Up Success.'
launcher.stop()
cognito_idp.admin_confirm_sign_up(
UserPoolId=user_pool_id,
Username='test1'
)
launcher.args = ['+LoadLevel', 'levels/aws/clientauthpasswordsignin/clientauthpasswordsignin.spawnable']
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
result = log_monitor.monitor_log_for_lines(
expected_lines=['(Script) - SignIn Success', '(Script) - Success credentials'],
unexpected_lines=['(Script) - SignIn Fail', '(Script) - Fail credentials'],
halt_on_unexpected=True,
)
assert result, 'Sign in Success, fetched authenticated AWS temp credentials.'

@ -68,6 +68,10 @@ class AwsCredentials:
if (len(self._credentials.sections()) == 0) and (not self._credentials_file_exists):
os.remove(self._credentials_path)
return
credentials_file_dir = os.path.dirname(self._credentials_path)
if not os.path.isdir(credentials_file_dir):
os.makedirs(credentials_file_dir)
with open(self._credentials_path, 'w+') as credential_file:
self._credentials.write(credential_file)

@ -16,7 +16,7 @@ logging.getLogger('nose').setLevel(logging.WARNING)
class AwsUtils:
def __init__(self, arn: str, session_name: str, region_name: str):
local_session = boto3.Session(profile_name='default')
local_session = boto3.Session()
local_sts_client = local_session.client('sts')
self._local_account_id = local_sts_client.get_caller_identity()["Account"]
logger.info(f'Local Account Id: {self._local_account_id}')

@ -6,11 +6,13 @@ SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import os
import platform
# ARN of the IAM role to assume for retrieving temporary AWS credentials
ASSUME_ROLE_ARN = os.environ.get('ASSUME_ROLE_ARN', 'arn:aws:iam::645075835648:role/o3de-automation-tests')
# Name of the AWS project deployed by the CDK applications
AWS_PROJECT_NAME = os.environ.get('O3DE_AWS_PROJECT_NAME', 'AWSAUTO')
AWS_PROJECT_NAME = os.environ.get('O3DE_AWS_PROJECT_NAME').upper() if os.environ.get('O3DE_AWS_PROJECT_NAME') else \
(os.environ.get('BRANCH_NAME', '') + '-' + os.environ.get('PIPELINE_NAME', '') + '-' + platform.system()).upper()
# Region for the existing CloudFormation stacks used by the automation tests
AWS_REGION = os.environ.get('O3DE_AWS_DEPLOY_REGION', 'us-east-1')
# Name of the default resource mapping config file used by the automation tests

@ -0,0 +1,6 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""

@ -0,0 +1,192 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import logging
import os
import shutil
import typing
from botocore.exceptions import ClientError
import pytest
import ly_test_tools
import ly_test_tools.log.log_monitor
import ly_test_tools.environment.process_utils as process_utils
import ly_test_tools.o3de.asset_processor_utils as asset_processor_utils
from AWS.common import constants
from AWS.common.resource_mappings import AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY
# fixture imports
from assetpipeline.ap_fixtures.asset_processor_fixture import asset_processor
AWS_CORE_FEATURE_NAME = 'AWSCore'
process_utils.kill_processes_named("o3de", ignore_extensions=True) # Kill ProjectManager windows
logger = logging.getLogger(__name__)
def setup(launcher: pytest.fixture, asset_processor: pytest.fixture) -> typing.Tuple[pytest.fixture, str]:
"""
Set up the resource mapping configuration and start the log monitor.
:param launcher: Client launcher for running the test level.
:param asset_processor: asset_processor fixture.
:return log monitor object, metrics file path and the metrics stack name.
"""
# Create the temporary directory for downloading test file from S3.
user_dir = os.path.join(launcher.workspace.paths.project(), 'user')
s3_download_dir = os.path.join(user_dir, 's3_download')
if not os.path.exists(s3_download_dir):
os.makedirs(s3_download_dir)
asset_processor_utils.kill_asset_processor()
asset_processor.start()
asset_processor.wait_for_idle()
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
return log_monitor, s3_download_dir
def write_test_data_to_dynamodb_table(resource_mappings: pytest.fixture, aws_utils: pytest.fixture) -> None:
"""
Write test data to the DynamoDB table created by the CDK application.
:param resource_mappings: resource_mappings fixture.
:param aws_utils: aws_utils fixture.
"""
table_name = resource_mappings.get_resource_name_id(f'{AWS_CORE_FEATURE_NAME}.ExampleDynamoTableOutput')
try:
aws_utils.client('dynamodb').put_item(
TableName=table_name,
Item={
'id': {
'S': 'Item1'
}
}
)
logger.info(f'Loaded data into table {table_name}')
except ClientError:
logger.exception(f'Failed to load data into table {table_name}')
raise
@pytest.mark.SUITE_awsi
@pytest.mark.usefixtures('automatic_process_killer')
@pytest.mark.usefixtures('asset_processor')
@pytest.mark.parametrize('feature_name', [AWS_CORE_FEATURE_NAME])
@pytest.mark.parametrize('region_name', [constants.AWS_REGION])
@pytest.mark.parametrize('assume_role_arn', [constants.ASSUME_ROLE_ARN])
@pytest.mark.parametrize('session_name', [constants.SESSION_NAME])
@pytest.mark.usefixtures('workspace')
@pytest.mark.parametrize('project', ['AutomatedTesting'])
@pytest.mark.parametrize('level', ['levels/aws/core/core.spawnable'])
@pytest.mark.usefixtures('resource_mappings')
@pytest.mark.parametrize('resource_mappings_filename', [constants.AWS_RESOURCE_MAPPING_FILE_NAME])
@pytest.mark.parametrize('stacks', [[f'{constants.AWS_PROJECT_NAME}-{AWS_CORE_FEATURE_NAME}',
f'{constants.AWS_PROJECT_NAME}-{AWS_CORE_FEATURE_NAME}-Example-{constants.AWS_REGION}']])
@pytest.mark.usefixtures('aws_credentials')
@pytest.mark.parametrize('profile_name', ['AWSAutomationTest'])
class TestAWSCoreAWSResourceInteraction(object):
"""
Test class to verify the scripting behavior for the AWSCore gem.
"""
@pytest.mark.parametrize('expected_lines', [
['(Script) - [S3] Head object request is done',
'(Script) - [S3] Head object success: Object example.txt is found.',
'(Script) - [S3] Get object success: Object example.txt is downloaded.',
'(Script) - [Lambda] Completed Invoke',
'(Script) - [Lambda] Invoke success: {"statusCode": 200, "body": {}}',
'(Script) - [DynamoDB] Results finished']])
@pytest.mark.parametrize('unexpected_lines', [
['(Script) - [S3] Head object error: No response body.',
'(Script) - [S3] Get object error: Request validation failed, output file directory doesn\'t exist.',
'(Script) - Request validation failed, output file miss full path.',
'(Script) - ']])
def test_scripting_behavior(self,
level: str,
launcher: pytest.fixture,
workspace: pytest.fixture,
asset_processor: pytest.fixture,
resource_mappings: pytest.fixture,
aws_utils: pytest.fixture,
expected_lines: typing.List[str],
unexpected_lines: typing.List[str]):
"""
Setup: Updates resource mapping file using existing CloudFormation stacks.
Tests: Interact with AWS S3, DynamoDB and Lambda services.
Verification: Script canvas nodes can communicate with AWS services successfully.
"""
log_monitor, s3_download_dir = setup(launcher, asset_processor)
write_test_data_to_dynamodb_table(resource_mappings, aws_utils)
launcher.args = ['+LoadLevel', level]
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
result = log_monitor.monitor_log_for_lines(
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True
)
assert result, "Expected lines weren't found."
assert os.path.exists(os.path.join(s3_download_dir, 'output.txt')), \
'The expected file wasn\'t successfully downloaded.'
# clean up the file directories.
shutil.rmtree(s3_download_dir)
@pytest.mark.parametrize('expected_lines', [
['(Script) - [S3] Head object request is done',
'(Script) - [S3] Head object success: Object example.txt is found.',
'(Script) - [S3] Get object success: Object example.txt is downloaded.',
'(Script) - [Lambda] Completed Invoke',
'(Script) - [Lambda] Invoke success: {"statusCode": 200, "body": {}}',
'(Script) - [DynamoDB] Results finished']])
@pytest.mark.parametrize('unexpected_lines', [
['(Script) - [S3] Head object error: No response body.',
'(Script) - [S3] Get object error: Request validation failed, output file directory doesn\'t exist.',
'(Script) - Request validation failed, output file miss full path.',
'(Script) - ']])
def test_scripting_behavior_no_global_accountid(self,
level: str,
launcher: pytest.fixture,
workspace: pytest.fixture,
asset_processor: pytest.fixture,
resource_mappings: pytest.fixture,
aws_utils: pytest.fixture,
expected_lines: typing.List[str],
unexpected_lines: typing.List[str]):
"""
Setup: Updates resource mapping file using existing CloudFormation stacks.
Tests: Interact with AWS S3, DynamoDB and Lambda services.
Verification: Script canvas nodes can communicate with AWS services successfully.
"""
resource_mappings.clear_select_keys([AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY])
log_monitor, s3_download_dir = setup(launcher, asset_processor)
write_test_data_to_dynamodb_table(resource_mappings, aws_utils)
launcher.args = ['+LoadLevel', level]
launcher.args.extend(['-rhi=null'])
with launcher.start(launch_ap=False):
result = log_monitor.monitor_log_for_lines(
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True
)
assert result, "Expected lines weren't found."
assert os.path.exists(os.path.join(s3_download_dir, 'output.txt')), \
'The expected file wasn\'t successfully downloaded.'
# clean up the file directories.
shutil.rmtree(s3_download_dir)

@ -4,16 +4,28 @@ For complete copyright and license terms please see the LICENSE at the root of t
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import logging
import os
import pytest
import ly_test_tools.environment.file_system as file_system
import editor_python_test_tools.hydra_test_utils as hydra
from ly_test_tools.o3de.editor_test import EditorSharedTest, EditorTestSuite
logger = logging.getLogger(__name__)
TEST_DIRECTORY = os.path.join(os.path.dirname(__file__), "tests")
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
class TestAutomation(EditorTestSuite):
enable_prefab_system = False
enable_prefab_system = True
@pytest.mark.test_case_id("C36529679")
class AtomLevelLoadTest_Editor(EditorSharedTest):
from Atom.tests import hydra_Atom_LevelLoadTest as test_module
@pytest.mark.test_case_id("C36525657")
class AtomEditorComponents_BloomAdded(EditorSharedTest):
@ -39,10 +51,6 @@ class TestAutomation(EditorTestSuite):
class AtomEditorComponents_DirectionalLightAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_DirectionalLightAdded as test_module
@pytest.mark.test_case_id("C36525660")
class AtomEditorComponents_DisplayMapperAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_DisplayMapperAdded as test_module
@pytest.mark.test_case_id("C36525661")
class AtomEditorComponents_EntityReferenceAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_EntityReferenceAdded as test_module
@ -116,5 +124,85 @@ class TestAutomation(EditorTestSuite):
class AtomEditorComponents_SSAOAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_SSAOAdded as test_module
@pytest.mark.test_case_id("C36529666")
class AtomEditorComponentsLevel_DiffuseGlobalIlluminationAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponentsLevel_DiffuseGlobalIlluminationAdded as test_module
@pytest.mark.test_case_id("C36525660")
class AtomEditorComponentsLevel_DisplayMapperAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponentsLevel_DisplayMapperAdded as test_module
class ShaderAssetBuilder_RecompilesShaderAsChainOfDependenciesChanges(EditorSharedTest):
from Atom.tests import hydra_ShaderAssetBuilder_RecompilesShaderAsChainOfDependenciesChanges as test_module
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ['windows_generic'])
class TestMaterialEditorBasicTests(object):
@pytest.fixture(autouse=True)
def setup_teardown(self, request, workspace, project):
def delete_files():
file_system.delete(
[
os.path.join(workspace.paths.project(), "Materials", "test_material.material"),
os.path.join(workspace.paths.project(), "Materials", "test_material_1.material"),
os.path.join(workspace.paths.project(), "Materials", "test_material_2.material"),
],
True,
True,
)
# Cleanup our newly created materials
delete_files()
def teardown():
# Cleanup our newly created materials
delete_files()
request.addfinalizer(teardown)
@pytest.mark.parametrize("exe_file_name", ["MaterialEditor"])
@pytest.mark.test_case_id("C34448113") # Creating a New Asset.
@pytest.mark.test_case_id("C34448114") # Opening an Existing Asset.
@pytest.mark.test_case_id("C34448115") # Closing Selected Material.
@pytest.mark.test_case_id("C34448116") # Closing All Materials.
@pytest.mark.test_case_id("C34448117") # Closing all but Selected Material.
@pytest.mark.test_case_id("C34448118") # Saving Material.
@pytest.mark.test_case_id("C34448119") # Saving as a New Material.
@pytest.mark.test_case_id("C34448120") # Saving as a Child Material.
@pytest.mark.test_case_id("C34448121") # Saving all Open Materials.
def test_MaterialEditorBasicTests(
self, request, workspace, project, launcher_platform, generic_launcher, exe_file_name):
expected_lines = [
"Material opened: True",
"Test asset doesn't exist initially: True",
"New asset created: True",
"New Material opened: True",
"Material closed: True",
"All documents closed: True",
"Close All Except Selected worked as expected: True",
"Actual Document saved with changes: True",
"Document saved as copy is saved with changes: True",
"Document saved as child is saved with changes: True",
"Save All worked as expected: True",
"P1: Asset Browser visibility working as expected: True",
"P1: Inspector visibility working as expected: True",
]
unexpected_lines = [
# Including any lines in unexpected_lines will cause the test to run for the duration of the timeout
]
hydra.launch_and_validate_results(
request,
TEST_DIRECTORY,
generic_launcher,
"hydra_AtomMaterialEditor_BasicTests.py",
run_python="--runpython",
timeout=43,
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True,
null_renderer=True,
log_file_name="MaterialEditor.log",
enable_prefab_system=False,
)

@ -9,168 +9,20 @@ import os
import pytest
import ly_test_tools.environment.file_system as file_system
import editor_python_test_tools.hydra_test_utils as hydra
from ly_test_tools.o3de.editor_test import EditorSharedTest, EditorTestSuite
from Atom.atom_utils.atom_constants import LIGHT_TYPES
logger = logging.getLogger(__name__)
TEST_DIRECTORY = os.path.join(os.path.dirname(__file__), "tests")
class TestAtomEditorComponentsSandbox(object):
# It requires at least one test
def test_Dummy(self, request, editor, level, workspace, project, launcher_platform):
pass
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
@pytest.mark.parametrize("level", ["auto_test"])
class TestAtomEditorComponentsMain(object):
"""Holds tests for Atom components."""
@pytest.mark.test_case_id("C34525095")
def test_AtomEditorComponents_LightComponent(
self, request, editor, workspace, project, launcher_platform, level):
"""
Please review the hydra script run by this test for more specific test info.
Tests that the Light component has the expected property options available to it.
"""
cfg_args = [level]
expected_lines = [
"light_entity Entity successfully created",
"Entity has a Light component",
"light_entity_test: Component added to the entity: True",
f"light_entity_test: Property value is {LIGHT_TYPES['sphere']} which matches {LIGHT_TYPES['sphere']}",
"Controller|Configuration|Shadows|Enable shadow set to True",
"light_entity Controller|Configuration|Shadows|Shadowmap size: SUCCESS",
"Controller|Configuration|Shadows|Shadow filter method set to 1", # PCF
"Controller|Configuration|Shadows|Filtering sample count set to 4",
"Controller|Configuration|Shadows|Filtering sample count set to 64",
"Controller|Configuration|Shadows|Shadow filter method set to 2", # ESM
"Controller|Configuration|Shadows|ESM exponent set to 50.0",
"Controller|Configuration|Shadows|ESM exponent set to 5000.0",
"Controller|Configuration|Shadows|Shadow filter method set to 3", # ESM+PCF
f"light_entity_test: Property value is {LIGHT_TYPES['spot_disk']} which matches {LIGHT_TYPES['spot_disk']}",
f"light_entity_test: Property value is {LIGHT_TYPES['capsule']} which matches {LIGHT_TYPES['capsule']}",
f"light_entity_test: Property value is {LIGHT_TYPES['quad']} which matches {LIGHT_TYPES['quad']}",
"light_entity Controller|Configuration|Fast approximation: SUCCESS",
"light_entity Controller|Configuration|Both directions: SUCCESS",
f"light_entity_test: Property value is {LIGHT_TYPES['polygon']} which matches {LIGHT_TYPES['polygon']}",
f"light_entity_test: Property value is {LIGHT_TYPES['simple_point']} "
f"which matches {LIGHT_TYPES['simple_point']}",
"Controller|Configuration|Attenuation radius|Mode set to 0",
"Controller|Configuration|Attenuation radius|Radius set to 100.0",
f"light_entity_test: Property value is {LIGHT_TYPES['simple_spot']} "
f"which matches {LIGHT_TYPES['simple_spot']}",
"Controller|Configuration|Shutters|Outer angle set to 45.0",
"Controller|Configuration|Shutters|Outer angle set to 90.0",
"light_entity_test: Component added to the entity: True",
"Light component test (non-GPU) completed.",
]
unexpected_lines = ["Traceback (most recent call last):"]
hydra.launch_and_validate_results(
request,
TEST_DIRECTORY,
editor,
"hydra_AtomEditorComponents_LightComponent.py",
timeout=120,
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True,
null_renderer=True,
cfg_args=cfg_args,
enable_prefab_system=False,
)
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ['windows_generic'])
@pytest.mark.system
class TestMaterialEditorBasicTests(object):
@pytest.fixture(autouse=True)
def setup_teardown(self, request, workspace, project):
def delete_files():
file_system.delete(
[
os.path.join(workspace.paths.project(), "Materials", "test_material.material"),
os.path.join(workspace.paths.project(), "Materials", "test_material_1.material"),
os.path.join(workspace.paths.project(), "Materials", "test_material_2.material"),
],
True,
True,
)
# Cleanup our newly created materials
delete_files()
def teardown():
# Cleanup our newly created materials
delete_files()
request.addfinalizer(teardown)
@pytest.mark.parametrize("exe_file_name", ["MaterialEditor"])
@pytest.mark.test_case_id("C34448113") # Creating a New Asset.
@pytest.mark.test_case_id("C34448114") # Opening an Existing Asset.
@pytest.mark.test_case_id("C34448115") # Closing Selected Material.
@pytest.mark.test_case_id("C34448116") # Closing All Materials.
@pytest.mark.test_case_id("C34448117") # Closing all but Selected Material.
@pytest.mark.test_case_id("C34448118") # Saving Material.
@pytest.mark.test_case_id("C34448119") # Saving as a New Material.
@pytest.mark.test_case_id("C34448120") # Saving as a Child Material.
@pytest.mark.test_case_id("C34448121") # Saving all Open Materials.
def test_MaterialEditorBasicTests(
self, request, workspace, project, launcher_platform, generic_launcher, exe_file_name):
expected_lines = [
"Material opened: True",
"Test asset doesn't exist initially: True",
"New asset created: True",
"New Material opened: True",
"Material closed: True",
"All documents closed: True",
"Close All Except Selected worked as expected: True",
"Actual Document saved with changes: True",
"Document saved as copy is saved with changes: True",
"Document saved as child is saved with changes: True",
"Save All worked as expected: True",
]
unexpected_lines = [
"Traceback (most recent call last):"
]
hydra.launch_and_validate_results(
request,
TEST_DIRECTORY,
generic_launcher,
"hydra_AtomMaterialEditor_BasicTests.py",
run_python="--runpython",
timeout=120,
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True,
null_renderer=True,
log_file_name="MaterialEditor.log",
enable_prefab_system=False,
)
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
class TestAutomation(EditorTestSuite):
enable_prefab_system = False
@pytest.mark.test_case_id("C36529666")
class AtomEditorComponentsLevel_DiffuseGlobalIlluminationAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponentsLevel_DiffuseGlobalIlluminationAdded as test_module
# this test is intermittently timing out without ever having executed. sandboxing while we investigate cause.
@pytest.mark.test_case_id("C36525660")
class AtomEditorComponentsLevel_DisplayMapperAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponentsLevel_DisplayMapperAdded as test_module
class AtomEditorComponents_DisplayMapperAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_DisplayMapperAdded as test_module

@ -32,6 +32,8 @@ GLOBAL_ILLUMINATION_QUALITY = {
'High': 2,
}
# Level list used in Editor Level Load Test
LEVEL_LIST = ["hermanubis", "hermanubis_high", "macbeth_shaderballs", "PbrMaterialChart", "ShadowTest", "Sponza"]
class AtomComponentProperties:
"""
@ -195,11 +197,13 @@ class AtomComponentProperties:
def entity_reference(property: str = 'name') -> str:
"""
Entity Reference component properties.
- 'EntityIdReferences' component container of entityId references. Initially empty.
:param property: From the last element of the property tree path. Default 'name' for component name string.
:return: Full property path OR component name if no property specified.
"""
properties = {
'name': 'Entity Reference',
'EntityIdReferences': 'Controller|Configuration|EntityIdReferences',
}
return properties[property]

@ -125,11 +125,11 @@ def is_pane_visible(pane_name):
"""
:return: bool
"""
return atomtools.AtomToolsWindowRequestBus(bus.Broadcast, "IsDockWidgetVisible", pane_name)
return atomtools.AtomToolsMainWindowRequestBus(bus.Broadcast, "IsDockWidgetVisible", pane_name)
def set_pane_visibility(pane_name, value):
atomtools.AtomToolsWindowRequestBus(bus.Broadcast, "SetDockWidgetVisible", pane_name, value)
atomtools.AtomToolsMainWindowRequestBus(bus.Broadcast, "SetDockWidgetVisible", pane_name, value)
def select_lighting_config(config_name):
@ -162,6 +162,13 @@ def select_model_config(configname):
azlmbr.materialeditor.MaterialViewportRequestBus(azlmbr.bus.Broadcast, "SelectModelPresetByName", configname)
def destroy_main_window():
"""
Closes the Material Editor window
"""
azlmbr.atomtools.AtomToolsMainWindowFactoryRequestBus(azlmbr.bus.Broadcast, "DestroyMainWindow")
def wait_for_condition(function, timeout_in_seconds=1.0):
# type: (function, float) -> bool
"""

@ -60,7 +60,7 @@ def AtomEditorComponentsLevel_DiffuseGlobalIllumination_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Add Diffuse Global Illumination level component to the level entity.
@ -86,10 +86,10 @@ def AtomEditorComponentsLevel_DiffuseGlobalIllumination_AddedToEntity():
# 4. Set Quality Level property to Low
diffuse_global_illumination_component.set_component_property_value(
AtomComponentProperties.diffuse_global_illumination('Quality Level', GLOBAL_ILLUMINATION_QUALITY['Low']))
AtomComponentProperties.diffuse_global_illumination('Quality Level'), GLOBAL_ILLUMINATION_QUALITY['Low'])
quality = diffuse_global_illumination_component.get_component_property_value(
AtomComponentProperties.diffuse_global_illumination('Quality Level'))
Report.result(diffuse_global_illumination_quality, quality == GLOBAL_ILLUMINATION_QUALITY['Low'])
Report.result(Tests.diffuse_global_illumination_quality, quality == GLOBAL_ILLUMINATION_QUALITY['Low'])
# 5. Enter/Exit game mode.
TestHelper.enter_game_mode(Tests.enter_game_mode)

@ -67,7 +67,7 @@ def AtomEditorComponentsLevel_DisplayMapper_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Add Display Mapper level component to the level entity.
@ -102,7 +102,7 @@ def AtomEditorComponentsLevel_DisplayMapper_AddedToEntity():
display_mapper_component.set_component_property_value(
AtomComponentProperties.display_mapper('Enable LDR color grading LUT'), True)
Report.result(
Test.enable_ldr_color_grading_lut,
Tests.enable_ldr_color_grading_lut,
display_mapper_component.get_component_property_value(
AtomComponentProperties.display_mapper('Enable LDR color grading LUT')) is True)

@ -97,7 +97,7 @@ def AtomEditorComponents_Bloom_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create an Bloom entity with no components.
@ -170,10 +170,12 @@ def AtomEditorComponents_Bloom_AddedToEntity():
# 13. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, bloom_entity.exists())
# 14. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not bloom_entity.exists())
# 15. Look for errors and asserts.

@ -95,7 +95,7 @@ def AtomEditorComponents_Decal_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a Decal entity with no components.
@ -162,6 +162,7 @@ def AtomEditorComponents_Decal_AddedToEntity():
# 11. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not decal_entity.exists())
# 12. Look for errors and asserts.

@ -97,7 +97,7 @@ def AtomEditorComponents_DeferredFog_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create an Deferred Fog entity with no components.
@ -174,10 +174,12 @@ def AtomEditorComponents_DeferredFog_AddedToEntity():
# 13. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, deferred_fog_entity.exists())
# 14. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not deferred_fog_entity.exists())
# 15. Look for errors and asserts.

@ -107,7 +107,7 @@ def AtomEditorComponents_DepthOfField_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a DepthOfField entity with no components.
@ -189,10 +189,12 @@ def AtomEditorComponents_DepthOfField_AddedToEntity():
# 15. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, depth_of_field_entity.exists())
# 16. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not depth_of_field_entity.exists())
# 17. Look for errors and asserts.

@ -90,7 +90,7 @@ def AtomEditorComponents_DiffuseProbeGrid_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a Diffuse Probe Grid entity with no components.
@ -168,10 +168,12 @@ def AtomEditorComponents_DiffuseProbeGrid_AddedToEntity():
# 12. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, diffuse_probe_grid_entity.exists())
# 13. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not diffuse_probe_grid_entity.exists())
# 14. Look for errors or asserts.

@ -95,7 +95,7 @@ def AtomEditorComponents_DirectionalLight_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a Directional Light entity with no components.
@ -168,10 +168,12 @@ def AtomEditorComponents_DirectionalLight_AddedToEntity():
# 12. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, directional_light_entity.exists())
# 13. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not directional_light_entity.exists())
# 14. Look for errors and asserts.

@ -91,7 +91,7 @@ def AtomEditorComponents_DisplayMapper_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a Display Mapper entity with no components.
@ -166,10 +166,12 @@ def AtomEditorComponents_DisplayMapper_AddedToEntity():
# 11. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, display_mapper_entity.exists())
# 12. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not display_mapper_entity.exists())
# 13. Look for errors and asserts.

@ -9,37 +9,58 @@ SPDX-License-Identifier: Apache-2.0 OR MIT
class Tests:
creation_undo = (
"UNDO Entity creation success",
"UNDO Entity creation failed")
"P0: UNDO Entity creation failed")
creation_redo = (
"REDO Entity creation success",
"REDO Entity creation failed")
"P0: REDO Entity creation failed")
entity_reference_creation = (
"Entity Reference Entity successfully created",
"Entity Reference Entity failed to be created")
"P0: Entity Reference Entity failed to be created")
entity_reference_component = (
"Entity has an Entity Reference component",
"Entity failed to find Entity Reference component")
"P0: Entity failed to find Entity Reference component")
enter_game_mode = (
"Entered game mode",
"Failed to enter game mode")
"P0: Failed to enter game mode")
exit_game_mode = (
"Exited game mode",
"Couldn't exit game mode")
"P0: Couldn't exit game mode")
is_visible = (
"Entity is visible",
"Entity was not visible")
"P0: Entity was not visible")
is_hidden = (
"Entity is hidden",
"Entity was not hidden")
"P0: Entity was not hidden")
entity_deleted = (
"Entity deleted",
"Entity was not deleted")
"P0: Entity was not deleted")
deletion_undo = (
"UNDO deletion success",
"UNDO deletion failed")
"P0: UNDO deletion failed")
deletion_redo = (
"REDO deletion success",
"REDO deletion failed")
"P0: REDO deletion failed")
entity_id_references_is_container = (
"EntityIdReferences is a container property",
"P1: EntityIdReferences is NOT a container property")
container_append = (
"EntityIdReferences append succeeded",
"P1: EntityIdReferences append did not succeed")
container_add = (
"EntityIdReferences add succeeded",
"P1: EntityIdReferences add did not succeed")
container_update = (
"EntityIdReferences update succeeded",
"P1: EntityIdReferences update did not succeed")
container_remove = (
"EntityIdReferences remove succeeded",
"P1: EntityIdReferences remove did not succeed")
container_reset = (
"EntityIdReferences reset succeeded",
"P1: EntityIdReferences reset did not succeed")
entity_reference_component_removed = (
"Entity Reference component removed from entity",
"P1: Entity Reference component NOT removed from entity")
def AtomEditorComponents_EntityReference_AddedToEntity():
@ -60,13 +81,21 @@ def AtomEditorComponents_EntityReference_AddedToEntity():
2) Add Entity Reference component to Entity Reference entity.
3) UNDO the entity creation and component addition.
4) REDO the entity creation and component addition.
5) Enter/Exit game mode.
6) Test IsHidden.
7) Test IsVisible.
8) Delete Entity Reference entity.
9) UNDO deletion.
10) REDO deletion.
11) Look for errors.
5) 'EntityIdReferences' is a container property
6) Append item to 'EntityIdReferences'
7) Add item to 'EntityIdReferences'
8) Update item in 'EntityIdReferences'
9) Remove item from 'EntityIdReferences'
10) Reset the container property then put one entity reference back for further tests
11) Remove component
12) UNDO component remove
13) Enter/Exit game mode.
14) Test IsHidden.
15) Test IsVisible.
16) Delete Entity Reference entity.
17) UNDO deletion.
18) REDO deletion.
19) Look for errors.
:return: None
"""
@ -81,7 +110,7 @@ def AtomEditorComponents_EntityReference_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create an Entity Reference entity with no components.
@ -119,33 +148,107 @@ def AtomEditorComponents_EntityReference_AddedToEntity():
general.idle_wait_frames(1)
Report.result(Tests.creation_redo, entity_reference_entity.exists())
# 5. Enter/Exit game mode.
# Entities for EntityIdReferences tests
test_1 = EditorEntity.create_editor_entity('test_1')
test_2 = EditorEntity.create_editor_entity('test_2')
test_3 = EditorEntity.create_editor_entity('test_3')
# 5. 'EntityIdReferences' is a container property
Report.result(
Tests.entity_id_references_is_container,
entity_reference_component.is_property_container(
AtomComponentProperties.entity_reference('EntityIdReferences')))
# 6. Append item to 'EntityIdReferences'
entity_reference_component.append_container_item(
AtomComponentProperties.entity_reference('EntityIdReferences'), test_1.id)
Report.result(
Tests.container_append,
entity_reference_component.get_container_item(
AtomComponentProperties.entity_reference('EntityIdReferences'), 0) == test_1.id)
# 7. Add item to 'EntityIdReferences'
entity_reference_component.add_container_item(
AtomComponentProperties.entity_reference('EntityIdReferences'), 1, test_1.id)
Report.result(
Tests.container_add,
entity_reference_component.get_container_count(
AtomComponentProperties.entity_reference('EntityIdReferences')) == 2)
# 8. Update item in 'EntityIdReferences'
entity_reference_component.update_container_item(
AtomComponentProperties.entity_reference('EntityIdReferences'), 1, test_2.id)
Report.result(
Tests.container_update,
entity_reference_component.get_container_item(
AtomComponentProperties.entity_reference('EntityIdReferences'), 1) == test_2.id)
# 9. Remove item from 'EntityIdReferences'
entity_reference_component.append_container_item(
AtomComponentProperties.entity_reference('EntityIdReferences'), test_3.id)
count_before = entity_reference_component.get_container_count(
AtomComponentProperties.entity_reference('EntityIdReferences'))
entity_reference_component.remove_container_item(
AtomComponentProperties.entity_reference('EntityIdReferences'), 1)
count_after = entity_reference_component.get_container_count(
AtomComponentProperties.entity_reference('EntityIdReferences'))
Report.result(
Tests.container_remove,
((count_before == 3) and (count_after == 2) and
(entity_reference_component.get_container_item(
AtomComponentProperties.entity_reference('EntityIdReferences'), 1) == test_3.id)))
# 10. Reset the container property then put one entity reference back for further tests
entity_reference_component.reset_container(AtomComponentProperties.entity_reference('EntityIdReferences'))
general.idle_wait_frames(1)
Report.result(
Tests.container_reset,
entity_reference_component.get_container_count(
AtomComponentProperties.entity_reference('EntityIdReferences')) == 0)
entity_reference_component.append_container_item(
AtomComponentProperties.entity_reference('EntityIdReferences'), test_1.id)
# 11. Remove component
entity_reference_entity.remove_component(AtomComponentProperties.entity_reference())
general.idle_wait_frames(1)
Report.result(Tests.entity_reference_component_removed, not entity_reference_entity.has_component(
AtomComponentProperties.entity_reference()))
# 12. UNDO component remove
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.entity_reference_component, entity_reference_entity.has_component(
AtomComponentProperties.entity_reference()))
# 13. Enter/Exit game mode.
TestHelper.enter_game_mode(Tests.enter_game_mode)
general.idle_wait_frames(1)
TestHelper.exit_game_mode(Tests.exit_game_mode)
# 6. Test IsHidden.
# 14. Test IsHidden.
entity_reference_entity.set_visibility_state(False)
Report.result(Tests.is_hidden, entity_reference_entity.is_hidden() is True)
# 7. Test IsVisible.
# 15. Test IsVisible.
entity_reference_entity.set_visibility_state(True)
general.idle_wait_frames(1)
Report.result(Tests.is_visible, entity_reference_entity.is_visible() is True)
# 8. Delete Entity Reference entity.
# 16. Delete Entity Reference entity.
entity_reference_entity.delete()
Report.result(Tests.entity_deleted, not entity_reference_entity.exists())
# 9. UNDO deletion.
# 17. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, entity_reference_entity.exists())
# 10. REDO deletion.
# 18. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not entity_reference_entity.exists())
# 11. Look for errors and asserts.
# 19. Look for errors and asserts.
TestHelper.wait_for_condition(lambda: error_tracer.has_errors or error_tracer.has_asserts, 1.0)
for error_info in error_tracer.errors:
Report.info(f"Error: {error_info.filename} {error_info.function} | {error_info.message}")

@ -101,7 +101,7 @@ def AtomEditorComponents_ExposureControl_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Creation of Exposure Control entity with no components.
@ -169,10 +169,12 @@ def AtomEditorComponents_ExposureControl_AddedToEntity():
# 12. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, exposure_control_entity.exists())
# 13. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not exposure_control_entity.exists())
# 14. Look for errors and asserts.

@ -99,7 +99,7 @@ def AtomEditorComponents_GlobalSkylightIBL_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a Global Skylight (IBL) entity with no components.
@ -176,10 +176,12 @@ def AtomEditorComponents_GlobalSkylightIBL_AddedToEntity():
# 11. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, global_skylight_entity.exists())
# 12. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not global_skylight_entity.exists())
# 13. Look for errors and asserts.

@ -82,7 +82,7 @@ def AtomEditorComponents_Grid_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a Grid entity with no components.
@ -139,10 +139,12 @@ def AtomEditorComponents_Grid_AddedToEntity():
# 9. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, grid_entity.exists())
# 10. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not grid_entity.exists())
# 11. Look for errors or asserts.

@ -96,7 +96,7 @@ def AtomEditorComponents_HDRColorGrading_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create an HDR Color Grading entity with no components.
@ -173,10 +173,12 @@ def AtomEditorComponents_HDRColorGrading_AddedToEntity():
# 13. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, hdr_color_grading_entity.exists())
# 14. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not hdr_color_grading_entity.exists())
# 15. Look for errors and asserts.

@ -87,7 +87,7 @@ def AtomEditorComponents_HDRiSkybox_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create an HDRi Skybox with no components.
@ -158,10 +158,12 @@ def AtomEditorComponents_HDRiSkybox_AddedToEntity():
# 10. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, hdri_skybox_entity.exists())
# 11. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not hdri_skybox_entity.exists())
# 12. Look for errors or asserts.

@ -89,7 +89,7 @@ def AtomEditorComponents_Light_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a Light entity with no components.
@ -144,10 +144,12 @@ def AtomEditorComponents_Light_AddedToEntity():
# 9. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, light_entity.exists())
# 10. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not light_entity.exists())
# 11. Look for errors asserts.

@ -1,213 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import os
import sys
import azlmbr.bus as bus
import azlmbr.editor as editor
import azlmbr.math as math
import azlmbr.paths
import azlmbr.legacy.general as general
sys.path.append(os.path.join(azlmbr.paths.projectroot, "Gem", "PythonTests"))
import editor_python_test_tools.hydra_editor_utils as hydra
from Atom.atom_utils.atom_constants import LIGHT_TYPES
LIGHT_TYPE_PROPERTY = 'Controller|Configuration|Light type'
SPHERE_AND_SPOT_DISK_LIGHT_PROPERTIES = [
("Controller|Configuration|Shadows|Enable shadow", True),
("Controller|Configuration|Shadows|Shadowmap size", 0), # 256
("Controller|Configuration|Shadows|Shadowmap size", 1), # 512
("Controller|Configuration|Shadows|Shadowmap size", 2), # 1024
("Controller|Configuration|Shadows|Shadowmap size", 3), # 2048
("Controller|Configuration|Shadows|Shadow filter method", 1), # PCF
("Controller|Configuration|Shadows|Filtering sample count", 4.0),
("Controller|Configuration|Shadows|Filtering sample count", 64.0),
("Controller|Configuration|Shadows|Shadow filter method", 2), # ECM
("Controller|Configuration|Shadows|ESM exponent", 50),
("Controller|Configuration|Shadows|ESM exponent", 5000),
("Controller|Configuration|Shadows|Shadow filter method", 3), # ESM+PCF
]
QUAD_LIGHT_PROPERTIES = [
("Controller|Configuration|Both directions", True),
("Controller|Configuration|Fast approximation", True),
]
SIMPLE_POINT_LIGHT_PROPERTIES = [
("Controller|Configuration|Attenuation radius|Mode", 0),
("Controller|Configuration|Attenuation radius|Radius", 100.0),
]
SIMPLE_SPOT_LIGHT_PROPERTIES = [
("Controller|Configuration|Shutters|Inner angle", 45.0),
("Controller|Configuration|Shutters|Outer angle", 90.0),
]
def verify_required_component_property_value(entity_name, component, property_path, expected_property_value):
"""
Compares the property value of component against the expected_property_value.
:param entity_name: name of the entity to use (for test verification purposes).
:param component: component to check on a given entity for its current property value.
:param property_path: the path to the property inside the component.
:param expected_property_value: The value expected from the value inside property_path.
:return: None, but prints to general.log() which the test uses to verify against.
"""
property_value = editor.EditorComponentAPIBus(
bus.Broadcast, "GetComponentProperty", component, property_path).GetValue()
general.log(f"{entity_name}_test: Property value is {property_value} "
f"which matches {expected_property_value}")
def run():
"""
Test Case - Light Component
1. Creates a "light_entity" Entity and attaches a "Light" component to it.
2. Updates the Light component to each light type option from the LIGHT_TYPES constant.
3. The test will check the Editor log to ensure each light type was selected.
4. Prints the string "Light component test (non-GPU) completed" after completion.
Tests will fail immediately if any of these log lines are found:
1. Trace::Assert
2. Trace::Error
3. Traceback (most recent call last):
:return: None
"""
# Create a "light_entity" entity with "Light" component.
light_entity_name = "light_entity"
light_component = "Light"
light_entity = hydra.Entity(light_entity_name)
light_entity.create_entity(math.Vector3(-1.0, -2.0, 3.0), [light_component])
general.log(
f"{light_entity_name}_test: Component added to the entity: "
f"{hydra.has_components(light_entity.id, [light_component])}")
# Populate the light_component_id_pair value so that it can be used to select all Light component options.
light_component_id_pair = None
component_type_id_list = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'FindComponentTypeIdsByEntityType', [light_component], 0)
if len(component_type_id_list) < 1:
general.log(f"ERROR: A component class with name {light_component} doesn't exist")
light_component_id_pair = None
elif len(component_type_id_list) > 1:
general.log(f"ERROR: Found more than one component classes with same name: {light_component}")
light_component_id_pair = None
entity_component_id_pair = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'GetComponentOfType', light_entity.id, component_type_id_list[0])
if entity_component_id_pair.IsSuccess():
light_component_id_pair = entity_component_id_pair.GetValue()
# Test each Light component option can be selected and it's properties updated.
# Point (sphere) light type checks.
light_type_property_test(
light_type=LIGHT_TYPES['sphere'],
light_properties=SPHERE_AND_SPOT_DISK_LIGHT_PROPERTIES,
light_component_id_pair=light_component_id_pair,
light_entity_name=light_entity_name,
light_entity=light_entity
)
# Spot (disk) light type checks.
light_type_property_test(
light_type=LIGHT_TYPES['spot_disk'],
light_properties=SPHERE_AND_SPOT_DISK_LIGHT_PROPERTIES,
light_component_id_pair=light_component_id_pair,
light_entity_name=light_entity_name,
light_entity=light_entity
)
# Capsule light type checks.
azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast,
'SetComponentProperty',
light_component_id_pair,
LIGHT_TYPE_PROPERTY,
LIGHT_TYPES['capsule']
)
verify_required_component_property_value(
entity_name=light_entity_name,
component=light_entity.components[0],
property_path=LIGHT_TYPE_PROPERTY,
expected_property_value=LIGHT_TYPES['capsule']
)
# Quad light type checks.
light_type_property_test(
light_type=LIGHT_TYPES['quad'],
light_properties=QUAD_LIGHT_PROPERTIES,
light_component_id_pair=light_component_id_pair,
light_entity_name=light_entity_name,
light_entity=light_entity
)
# Polygon light type checks.
azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast,
'SetComponentProperty',
light_component_id_pair,
LIGHT_TYPE_PROPERTY,
LIGHT_TYPES['polygon']
)
verify_required_component_property_value(
entity_name=light_entity_name,
component=light_entity.components[0],
property_path=LIGHT_TYPE_PROPERTY,
expected_property_value=LIGHT_TYPES['polygon']
)
# Point (simple punctual) light type checks.
light_type_property_test(
light_type=LIGHT_TYPES['simple_point'],
light_properties=SIMPLE_POINT_LIGHT_PROPERTIES,
light_component_id_pair=light_component_id_pair,
light_entity_name=light_entity_name,
light_entity=light_entity
)
# Spot (simple punctual) light type checks.
light_type_property_test(
light_type=LIGHT_TYPES['simple_spot'],
light_properties=SIMPLE_SPOT_LIGHT_PROPERTIES,
light_component_id_pair=light_component_id_pair,
light_entity_name=light_entity_name,
light_entity=light_entity
)
general.log("Light component test (non-GPU) completed.")
def light_type_property_test(light_type, light_properties, light_component_id_pair, light_entity_name, light_entity):
"""
Updates the current light type and modifies its properties, then verifies they are accurate to what was set.
:param light_type: The type of light to update, must match a value in LIGHT_TYPES
:param light_properties: List of tuples detailing properties to modify with update values.
:param light_component_id_pair: Entity + component ID pair for updating the light component on a given entity.
:param light_entity_name: the name of the Entity holding the light component.
:param light_entity: the Entity object containing the light component.
:return: None
"""
azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast,
'SetComponentProperty',
light_component_id_pair,
LIGHT_TYPE_PROPERTY,
light_type
)
verify_required_component_property_value(
entity_name=light_entity_name,
component=light_entity.components[0],
property_path=LIGHT_TYPE_PROPERTY,
expected_property_value=light_type
)
for light_property in light_properties:
light_entity.get_set_test(0, light_property[0], light_property[1])
if __name__ == "__main__":
run()

@ -104,7 +104,7 @@ def AtomEditorComponents_LookModification_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create an Look Modification entity with no components.
@ -192,10 +192,12 @@ def AtomEditorComponents_LookModification_AddedToEntity():
# 14. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, look_modification_entity.exists())
# 15. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not look_modification_entity.exists())
# 16. Look for errors and asserts.

@ -102,7 +102,7 @@ def AtomEditorComponents_Material_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a Material entity with no components.
@ -184,10 +184,12 @@ def AtomEditorComponents_Material_AddedToEntity():
# 16. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, material_entity.exists())
# 17. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not material_entity.exists())
# 18. Look for errors or asserts.

@ -87,7 +87,7 @@ def AtomEditorComponents_Mesh_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a Mesh entity with no components.
@ -151,10 +151,12 @@ def AtomEditorComponents_Mesh_AddedToEntity():
# 10. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, mesh_entity.exists())
# 11. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not mesh_entity.exists())
# 12. Look for errors or asserts.

@ -80,7 +80,7 @@ def AtomEditorComponents_OcclusionCullingPlane_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a occlusion culling plane entity with no components.
@ -140,10 +140,12 @@ def AtomEditorComponents_OcclusionCullingPlane_AddedToEntity():
# 9. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, occlusion_culling_plane_entity.exists())
# 10. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not occlusion_culling_plane_entity.exists())
# 11. Look for errors or asserts.

@ -89,7 +89,7 @@ def AtomEditorComponents_PhysicalSky_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a Physical Sky entity with no components.
@ -146,10 +146,12 @@ def AtomEditorComponents_PhysicalSky_AddedToEntity():
# 9. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, physical_sky_entity.exists())
# 10. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not physical_sky_entity.exists())
# 11. Look for errors and asserts.

@ -92,7 +92,7 @@ def AtomEditorComponents_PostFXGradientWeightModifier_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a PostFX Gradient Weight Modifier entity with no components.
@ -162,10 +162,12 @@ def AtomEditorComponents_PostFXGradientWeightModifier_AddedToEntity():
# 12. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, postfx_gradient_weight_entity.exists())
# 13. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not postfx_gradient_weight_entity.exists())
# 14. Look for errors or asserts.

@ -80,7 +80,7 @@ def AtomEditorComponents_postfx_layer_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a PostFX Layer entity with no components.
@ -137,10 +137,12 @@ def AtomEditorComponents_postfx_layer_AddedToEntity():
# 9. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, postfx_layer_entity.exists())
# 10. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not postfx_layer_entity.exists())
# 11. Look for errors or asserts.

@ -92,7 +92,7 @@ def AtomEditorComponents_PostFXRadiusWeightModifier_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a Post FX Radius Weight Modifier entity with no components.
@ -161,10 +161,12 @@ def AtomEditorComponents_PostFXRadiusWeightModifier_AddedToEntity():
# 12. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, postfx_radius_weight_entity.exists())
# 13. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not postfx_radius_weight_entity.exists())
# 14. Look for errors and asserts.

@ -98,7 +98,7 @@ def AtomEditorComponents_postfx_shape_weight_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a PostFx Shape Weight Modifier entity with no components.
@ -188,10 +188,12 @@ def AtomEditorComponents_postfx_shape_weight_AddedToEntity():
# 15. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, postfx_shape_weight_entity.exists())
# 16. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not postfx_shape_weight_entity.exists())
# 17. Look for errors or asserts.

@ -97,7 +97,7 @@ def AtomEditorComponents_ReflectionProbe_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a Reflection Probe entity with no components.
@ -183,10 +183,12 @@ def AtomEditorComponents_ReflectionProbe_AddedToEntity():
# 13. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, reflection_probe_entity.exists())
# 14. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not reflection_probe_entity.exists())
# 15. Look for errors or asserts.

@ -94,7 +94,7 @@ def AtomEditorComponents_SSAO_AddedToEntity():
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("", "Base")
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a SSAO entity with no components.
@ -163,10 +163,12 @@ def AtomEditorComponents_SSAO_AddedToEntity():
# 12. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, ssao_entity.exists())
# 13. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not ssao_entity.exists())
# 14. Look for errors and asserts.

@ -36,6 +36,19 @@ MATERIAL_TYPE_PATH = os.path.join(
CACHE_FILE_EXTENSION = ".azmaterial"
def verify_pane_visibility(pane_name: str):
"""
print log lines indicating Material Editor pane visibility function
:param pane_name: Name of the pane to be tested
"""
initial_value = material_editor.is_pane_visible(pane_name)
material_editor.set_pane_visibility(pane_name, not initial_value)
result = (material_editor.is_pane_visible(pane_name) is not initial_value)
material_editor.set_pane_visibility(pane_name, initial_value)
result = result and (initial_value is material_editor.is_pane_visible(pane_name))
print(f"P1: {pane_name} visibility working as expected: {result}")
def run():
"""
Summary:
@ -49,9 +62,12 @@ def run():
7. Saving as a New Material
8. Saving as a Child Material
9. Saving all Open Materials
10. Verify Asset Browser pane visibility
11. Verify Material Inspector pane visibility
Expected Result:
All the above functions work as expected in Material Editor.
Pane visibility functions as expected
:return: None
"""
@ -187,6 +203,19 @@ def run():
material_editor.save_all()
material_editor.close_all_documents()
# 10) Verify Asset Browser pane visibility
verify_pane_visibility("Asset Browser")
# 11) Verify Material Inspector pane visibility
verify_pane_visibility("Inspector")
# Confirm documents closed and exit Material Editor
material_editor.wait_for_condition(lambda:
(not material_editor.is_open(document1_id)) and
(not material_editor.is_open(document2_id)) and
(not material_editor.is_open(document3_id)), 2.0)
material_editor.destroy_main_window()
if __name__ == "__main__":
run()

@ -0,0 +1,72 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
def Atom_LevelLoadTest():
"""
Summary:
Loads all graphics levels within the AutomatedTesting project in editor. For each level this script will verify that
the level loads, and can enter/exit gameplay without crashing the editor.
Test setup:
- Store all available levels in a list.
- Set up a for loop to run all checks for each level.
Expected Behavior:
Test verifies that each level loads, enters/exits game mode, and reports success for all test actions.
Test Steps for each level:
1) Create tuple with level load success and failure messages
2) Open the level using the python test tools command
3) Verify level is loaded using a separate command, and report success/failure
4) Enter gameplay and report result using a tuple
5) Exit Gameplay and report result using a tuple
6) Look for errors or asserts.
:return: None
"""
import azlmbr.legacy.general as general
from editor_python_test_tools.utils import Report, Tracer, TestHelper
from Atom.atom_utils.atom_constants import LEVEL_LIST
with Tracer() as error_tracer:
for level in LEVEL_LIST:
# 1. Create tuple with level load success and failure messages
level_check_tuple = (f"loaded {level}", f"failed to load {level}")
# 2. Open the level using the python test tools command
TestHelper.init_idle()
TestHelper.open_level("Graphics", level)
# 3. Verify level is loaded using a separate command, and report success/failure
Report.result(level_check_tuple, level == general.get_current_level_name())
# 4. Enter gameplay and report result using a tuple
enter_game_mode_tuple = (f"{level} entered gameplay successfully ", f"{level} failed to enter gameplay")
TestHelper.enter_game_mode(enter_game_mode_tuple)
general.idle_wait_frames(1)
# 5. Exit gameplay and report result using a tuple
exit_game_mode_tuple = (f"{level} exited gameplay successfully ", f"{level} failed to exit gameplay")
TestHelper.exit_game_mode(exit_game_mode_tuple)
# 6. Look for errors or asserts.
TestHelper.wait_for_condition(lambda: error_tracer.has_errors or error_tracer.has_asserts, 1.0)
for error_info in error_tracer.errors:
Report.info(f"Error: {error_info.filename} {error_info.function} | {error_info.message}")
for assert_info in error_tracer.asserts:
Report.info(f"Assert: {assert_info.filename} {assert_info.function} | {assert_info.message}")
if __name__ == "__main__":
from editor_python_test_tools.utils import Report
Report.start_test(Atom_LevelLoadTest)

@ -6,7 +6,7 @@
#
#
ly_get_list_relative_pal_filename(pal_dir ${CMAKE_CURRENT_LIST_DIR}/Platform/${PAL_PLATFORM_NAME})
o3de_pal_dir(pal_dir ${CMAKE_CURRENT_LIST_DIR}/Platform/${PAL_PLATFORM_NAME} ${gem_restricted_path} ${gem_path} ${gem_parent_relative_path})
include(${pal_dir}/PAL_${PAL_PLATFORM_NAME_LOWERCASE}.cmake) # for PAL_TRAIT_BLAST Traits

@ -10,7 +10,7 @@
# Automated Tests
################################################################################
ly_get_list_relative_pal_filename(pal_dir ${CMAKE_CURRENT_LIST_DIR}/Platform/${PAL_PLATFORM_NAME})
o3de_pal_dir(pal_dir ${CMAKE_CURRENT_LIST_DIR}/Platform/${PAL_PLATFORM_NAME} ${gem_restricted_path} ${gem_path} ${gem_parent_relative_path})
include(${pal_dir}/PAL_traits_${PAL_PLATFORM_NAME_LOWERCASE}.cmake)

@ -8,7 +8,8 @@ SPDX-License-Identifier: Apache-2.0 OR MIT
# Built-in Imports
from __future__ import annotations
from typing import List, Tuple, Union
from enum import Enum
import warnings
# Open 3D Engine Imports
import azlmbr
@ -21,14 +22,25 @@ import azlmbr.legacy.general as general
from editor_python_test_tools.utils import Report
class EditorEntityType(Enum):
GAME = azlmbr.entity.EntityType().Game
LEVEL = azlmbr.entity.EntityType().Level
class EditorComponent:
"""
EditorComponent class used to set and get the component property value using path
EditorComponent object is returned from either of
EditorEntity.add_component() or Entity.add_components() or EditorEntity.get_components_of_type()
which also assigns self.id and self.type_id to the EditorComponent object.
self.type_id is the UUID for the component type as provided by an ebus call.
"""
def __init__(self, type_id: uuid):
self.type_id = type_id
self.id = None
self.property_tree_editor = None
def get_component_name(self) -> str:
"""
Used to get name of component
@ -38,9 +50,9 @@ class EditorComponent:
assert len(type_names) != 0, "Component object does not have type id"
return type_names[0]
def get_property_tree(self):
def get_property_tree(self, force_get: bool = False):
"""
Used to get the property tree object of component that has following functions associated with it:
Used to get and cache the property tree editor of component that has following functions associated with it:
1. prop_tree.is_container(path)
2. prop_tree.get_container_count(path)
3. prop_tree.reset_container(path)
@ -48,17 +60,161 @@ class EditorComponent:
5. prop_tree.remove_container_item(path, key)
6. prop_tree.update_container_item(path, key, value)
7. prop_tree.get_container_item(path, key)
:return: Property tree object of a component
:param force_get: Force a fresh property tree editor rather than the cached self.property_tree_editor
:return: Property tree editor of the component
"""
if (not force_get) and (self.property_tree_editor is not None):
return self.property_tree_editor
build_prop_tree_outcome = editor.EditorComponentAPIBus(
bus.Broadcast, "BuildComponentPropertyTreeEditor", self.id
)
assert (
build_prop_tree_outcome.IsSuccess()
), f"Failure: Could not build property tree of component: '{self.get_component_name()}'"
), f"Failure: Could not build property tree editor of component: '{self.get_component_name()}'"
prop_tree = build_prop_tree_outcome.GetValue()
Report.info(prop_tree.build_paths_list())
return prop_tree
self.property_tree_editor = prop_tree
return self.property_tree_editor
def is_property_container(self, component_property_path: str) -> bool:
"""
Used to determine if a component property is a container.
Containers are a collection of same typed values that can expand/shrink to contain more or less.
There are two types of containers; indexed and associative.
Indexed containers use integer key and are something like a linked list
Associative containers utilize keys of the same type which could be any supported type.
:param component_property_path: String of component property. (e.g. 'Settings|Visible')
:return: Boolean True if the property is a container False if it is not.
"""
if self.property_tree_editor is None:
self.get_property_tree()
result = self.property_tree_editor.is_container(component_property_path)
if not result:
Report.info(f"{self.get_component_name()}: '{component_property_path}' is not a container")
return result
def get_container_count(self, component_property_path: str) -> int:
"""
Used to get the count of items in the container.
:param component_property_path: String of component property. (e.g. 'Settings|Visible')
:return: Count of items in the container as unsigned integer
"""
assert (
self.is_property_container(component_property_path)
), f"Failure: '{component_property_path}' is not a property container"
container_count_outcome = self.property_tree_editor.get_container_count(component_property_path)
assert (
container_count_outcome.IsSuccess()
), f"Failure: get_container_count did not return success for '{component_property_path}'"
return container_count_outcome.GetValue()
def reset_container(self, component_property_path: str):
"""
Used to reset a container to empty
:param component_property_path: String of component property. (e.g. 'Settings|Visible')
:return: None
"""
assert (
self.is_property_container(component_property_path)
), f"Failure: '{component_property_path}' is not a property container"
reset_outcome = self.property_tree_editor.reset_container(component_property_path)
assert (
reset_outcome.IsSuccess()
), f"Failure: could not reset_container on '{component_property_path}'"
def append_container_item(self, component_property_path: str, value: any):
"""
Used to append a value to an indexed container item without providing an index key.
Append will fail on an associative container
:param component_property_path: String of component property. (e.g. 'Settings|Visible')
:param value: Value to be set
:return: None
"""
assert (
self.is_property_container(component_property_path)
), f"Failure: '{component_property_path}' is not a property container"
append_outcome = self.property_tree_editor.append_container_item(component_property_path, value)
assert (
append_outcome.IsSuccess()
), f"Failure: could not append_container_item to '{component_property_path}'"
def add_container_item(self, component_property_path: str, key: any, value: any):
"""
Used to add a container item at a specified key.
There are two types of containers; indexed and associative.
Indexed containers use integer key.
Associative containers utilize keys of the same type which could be any supported type.
:param component_property_path: String of component property. (e.g. 'Settings|Visible')
:param key: Zero index integer key or any supported type for associative container
:param value: Value to be set
:return: None
"""
assert (
self.is_property_container(component_property_path)
), f"Failure: '{component_property_path}' is not a property container"
add_outcome = self.property_tree_editor.add_container_item(component_property_path, key, value)
assert (
add_outcome.IsSuccess()
), f"Failure: could not add_container_item '{key}' to '{component_property_path}'"
def get_container_item(self, component_property_path: str, key: any) -> any:
"""
Used to retrieve a container item value at the specified key.
There are two types of containers; indexed and associative.
Indexed containers use integer key.
Associative containers utilize keys of the same type which could be any supported type.
:param component_property_path: String of component property. (e.g. 'Settings|Visible')
:param key: Zero index integer key or any supported type for associative container
:return: Value stored at the key specified
"""
assert (
self.is_property_container(component_property_path)
), f"Failure: '{component_property_path}' is not a property container"
get_outcome = self.property_tree_editor.get_container_item(component_property_path, key)
assert (
get_outcome.IsSuccess()
), (
f"Failure: could not get a value for {self.get_component_name()}: '{component_property_path}' [{key}]. "
f"Error returned by get_container_item: {get_outcome.GetError()}")
return get_outcome.GetValue()
def remove_container_item(self, component_property_path: str, key: any):
"""
Used to remove a container item value at the specified key.
There are two types of containers; indexed and associative.
Indexed containers use integer key.
Associative containers utilize keys of the same type which could be any supported type.
:param component_property_path: String of component property. (e.g. 'Settings|Visible')
:param key: Zero index integer key or any supported type for associative container
:return: None
"""
assert (
self.is_property_container(component_property_path)
), f"Failure: '{component_property_path}' is not a property container"
remove_outcome = self.property_tree_editor.remove_container_item(component_property_path, key)
assert (
remove_outcome.IsSuccess()
), f"Failure: could not remove_container_item '{key}' from '{component_property_path}'"
def update_container_item(self, component_property_path: str, key: any, value: any):
"""
Used to update a container item at a specified key.
There are two types of containers; indexed and associative.
Indexed containers use integer key.
Associative containers utilize keys of the same type which could be any supported type.
:param component_property_path: String of component property. (e.g. 'Settings|Visible')
:param key: Zero index integer key or any supported type for associative container
:param value: Value to be set
:return: None
"""
assert (
self.is_property_container(component_property_path)
), f"Failure: '{component_property_path}' is not a property container"
update_outcome = self.property_tree_editor.update_container_item(component_property_path, key, value)
assert (
update_outcome.IsSuccess()
), f"Failure: could not update '{key}' in '{component_property_path}'"
def get_component_property_value(self, component_property_path: str):
"""
@ -94,23 +250,36 @@ class EditorComponent:
"""
return editor.EditorComponentAPIBus(bus.Broadcast, "IsComponentEnabled", self.id)
def set_enabled(self, new_state: bool):
"""
Used to set the component enabled state
:param new_state: Boolean enabled True, disabled False
:return: None
"""
if new_state:
editor.EditorComponentAPIBus(bus.Broadcast, "EnableComponents", [self.id])
else:
editor.EditorComponentAPIBus(bus.Broadcast, "DisableComponents", [self.id])
def disable_component(self):
"""
Used to disable the component using its id value.
Deprecation warning! Use set_enabled(False) instead as this method is in deprecation
:return: None
"""
warnings.warn("disable_component is deprecated, use set_enabled(False) instead.", DeprecationWarning)
editor.EditorComponentAPIBus(bus.Broadcast, "DisableComponents", [self.id])
@staticmethod
def get_type_ids(component_names: list) -> list:
def get_type_ids(component_names: list, entity_type: EditorEntityType = EditorEntityType.GAME) -> list:
"""
Used to get type ids of given components list
:param: component_names: List of components to get type ids
:return: List of type ids of given components.
:param component_names: List of components to get type ids
:param entity_type: Entity_Type enum value Entity_Type.GAME is the default
:return: List of type ids of given components. Type id is a UUID as provided by the ebus call
"""
type_ids = editor.EditorComponentAPIBus(
bus.Broadcast, "FindComponentTypeIdsByEntityType", component_names, azlmbr.entity.EntityType().Game
)
bus.Broadcast, "FindComponentTypeIdsByEntityType", component_names, entity_type.value)
return type_ids
@ -131,7 +300,7 @@ class EditorEntity:
"""
Entity class is used to create and interact with Editor Entities.
Example: To create Editor Entity, Use the code:
test_entity = Entity.create_editor_entity("TestEntity")
test_entity = EditorEntity.create_editor_entity("TestEntity")
# This creates a python object with 'test_entity' linked to entity name "TestEntity" in Editor.
# To add component, use:
test_entity.add_component(<COMPONENT_NAME>)
@ -276,10 +445,9 @@ class EditorEntity:
:return: List of newly added components to the entity
"""
components = []
type_ids = EditorComponent.get_type_ids(component_names)
type_ids = EditorComponent.get_type_ids(component_names, EditorEntityType.GAME)
for type_id in type_ids:
new_comp = EditorComponent()
new_comp.type_id = type_id
new_comp = EditorComponent(type_id)
add_component_outcome = editor.EditorComponentAPIBus(
bus.Broadcast, "AddComponentsOfType", self.id, [type_id]
)
@ -291,6 +459,26 @@ class EditorEntity:
self.components.append(new_comp)
return components
def remove_component(self, component_name: str) -> None:
"""
Used to remove a component from Entity
:param component_name: String of component name to remove
:return: None
"""
self.remove_components([component_name])
def remove_components(self, component_names: list):
"""
Used to remove a list of components from Entity
:param component_names: List of component names to remove
:return: None
"""
component_ids = [component.id for component in self.get_components_of_type(component_names)]
remove_success = editor.EditorComponentAPIBus(bus.Broadcast, "RemoveComponents", component_ids)
assert (
remove_success
), f"Failure: could not remove component from entity '{self.get_name()}'"
def get_components_of_type(self, component_names: list) -> List[EditorComponent]:
"""
Used to get components of type component_name that already exists on Entity
@ -298,10 +486,9 @@ class EditorEntity:
:return: List of Entity Component objects of given component name
"""
component_list = []
type_ids = EditorComponent.get_type_ids(component_names)
type_ids = EditorComponent.get_type_ids(component_names, EditorEntityType.GAME)
for type_id in type_ids:
component = EditorComponent()
component.type_id = type_id
component = EditorComponent(type_id)
get_component_of_type_outcome = editor.EditorComponentAPIBus(
bus.Broadcast, "GetComponentOfType", self.id, type_id
)
@ -319,7 +506,7 @@ class EditorEntity:
:param component_name: Name of component to check for
:return: True, if entity has specified component. Else, False
"""
type_ids = EditorComponent.get_type_ids([component_name])
type_ids = EditorComponent.get_type_ids([component_name], EditorEntityType.GAME)
return editor.EditorComponentAPIBus(bus.Broadcast, "HasComponentOfType", self.id, type_ids[0])
def get_start_status(self) -> int:
@ -359,6 +546,21 @@ class EditorEntity:
set_status = self.get_start_status()
assert set_status == status_to_set, f"Failed to set start status of {desired_start_status} to {self.get_name}"
def is_locked(self) -> bool:
"""
Used to get the locked status of the entity
:return: Boolean True if locked False if not locked
"""
return editor.EditorEntityInfoRequestBus(bus.Event, "IsLocked", self.id)
def set_lock_state(self, is_locked: bool) -> None:
"""
Sets the lock state on the object to locked or not locked.
:param is_locked: True for locking, False to unlock.
:return: None
"""
editor.EditorEntityAPIBus(bus.Event, "SetLockState", self.id, is_locked)
def delete(self) -> None:
"""
Used to delete the Entity.
@ -488,18 +690,6 @@ class EditorLevelEntity:
EditorLevelComponentAPIBus requests.
"""
@staticmethod
def get_type_ids(component_names: list) -> list:
"""
Used to get type ids of given components list for EntityType Level
:param: component_names: List of components to get type ids
:return: List of type ids of given components.
"""
type_ids = editor.EditorComponentAPIBus(
bus.Broadcast, "FindComponentTypeIdsByEntityType", component_names, azlmbr.entity.EntityType().Level
)
return type_ids
@staticmethod
def add_component(component_name: str) -> EditorComponent:
"""
@ -518,10 +708,9 @@ class EditorLevelEntity:
:return: List of newly added components to the level
"""
components = []
type_ids = EditorLevelEntity.get_type_ids(component_names)
type_ids = EditorComponent.get_type_ids(component_names, EditorEntityType.LEVEL)
for type_id in type_ids:
new_comp = EditorComponent()
new_comp.type_id = type_id
new_comp = EditorComponent(type_id)
add_component_outcome = editor.EditorLevelComponentAPIBus(
bus.Broadcast, "AddComponentsOfType", [type_id]
)
@ -540,10 +729,9 @@ class EditorLevelEntity:
:return: List of Level Component objects of given component name
"""
component_list = []
type_ids = EditorLevelEntity.get_type_ids(component_names)
type_ids = EditorComponent.get_type_ids(component_names, EditorEntityType.LEVEL)
for type_id in type_ids:
component = EditorComponent()
component.type_id = type_id
component = EditorComponent(type_id)
get_component_of_type_outcome = editor.EditorLevelComponentAPIBus(
bus.Broadcast, "GetComponentOfType", type_id
)
@ -562,7 +750,7 @@ class EditorLevelEntity:
:param component_name: Name of component to check for
:return: True, if level has specified component. Else, False
"""
type_ids = EditorLevelEntity.get_type_ids([component_name])
type_ids = EditorComponent.get_type_ids([component_name], EditorEntityType.LEVEL)
return editor.EditorLevelComponentAPIBus(bus.Broadcast, "HasComponentOfType", type_ids[0])
@staticmethod
@ -572,5 +760,5 @@ class EditorLevelEntity:
:param component_name: Name of component to check for
:return: integer count of occurences of level component attached to level or zero if none are present
"""
type_ids = EditorLevelEntity.get_type_ids([component_name])
type_ids = EditorComponent.get_type_ids([component_name], EditorEntityType.LEVEL)
return editor.EditorLevelComponentAPIBus(bus.Broadcast, "CountComponentsOfType", type_ids[0])

@ -5,15 +5,22 @@ For complete copyright and license terms please see the LICENSE at the root of t
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import collections.abc
from typing import List
from math import isclose
import azlmbr.bus as bus
import azlmbr.editor as editor
import azlmbr.entity as entity
import azlmbr.legacy.general as general
import azlmbr.object
from typing import List
from math import isclose
import collections.abc
from editor_python_test_tools.utils import TestHelper as helper
def open_base_level():
helper.init_idle()
helper.open_level("Prefab", "Base")
def find_entity_by_name(entity_name):

@ -48,8 +48,8 @@ def wait_for_propagation():
# This is a helper class which contains some of the useful information about a prefab instance.
class PrefabInstance:
def __init__(self, prefab_file_name: str = None, container_entity: EditorEntity = None):
self.prefab_file_name: str = prefab_file_name
def __init__(self, prefab_file_path: str = None, container_entity: EditorEntity = None):
self.prefab_file_path: str = prefab_file_path
self.container_entity: EditorEntity = container_entity
def __eq__(self, other):
@ -66,7 +66,7 @@ class PrefabInstance:
See if this instance is valid to be used with other prefab operations.
:return: Whether the target instance is valid or not.
"""
return self.container_entity.id.IsValid() and self.prefab_file_name in Prefab.existing_prefabs
return self.container_entity.id.IsValid() and self.prefab_file_path in Prefab.existing_prefabs
def has_editor_prefab_component(self) -> bool:
"""
@ -131,7 +131,7 @@ class PrefabInstance:
has_correct_parent = reparented_container_entity_parent_id.ToString() == parent_entity_id.ToString()
assert has_correct_parent, "Prefab Instance reparented is *not* under the expected parent entity"
current_instance_prefab = Prefab.get_prefab(self.prefab_file_name)
current_instance_prefab = Prefab.get_prefab(self.prefab_file_path)
current_instance_prefab.instances.remove(self)
self.container_entity = reparented_container_entity
@ -161,46 +161,48 @@ class Prefab:
:param file_path: A unique file path of the target prefab.
:return: Whether the target prefab is loaded or not.
"""
return file_path in Prefab.existing_prefabs
for entry in Prefab.existing_prefabs:
Report.info(f"PrefabPath: '{entry}'")
return get_prefab_file_path(file_path) in Prefab.existing_prefabs
@classmethod
def prefab_exists(cls, file_path: str) -> bool:
"""
Check if a prefab exists in the directory for files of prefab tests.
:param file_name: A unique file name of the target prefab.
:param file_path: A unique file path of the target prefab.
:return: Whether the target prefab exists or not.
"""
return path.exists(get_prefab_file_path(file_path))
@classmethod
def get_prefab(cls, file_name: str) -> Prefab:
def get_prefab(cls, file_path: str) -> Prefab:
"""
Return a prefab which can be used immediately.
:param file_name: A unique file name of the target prefab.
:param file_path: A unique file path of the target prefab.
:return: The prefab with given file name.
"""
assert file_name, "Received an empty file_name"
if Prefab.is_prefab_loaded(file_name):
return Prefab.existing_prefabs[file_name]
assert file_path, "Received an empty file_path"
if Prefab.is_prefab_loaded(file_path):
return Prefab.existing_prefabs[get_prefab_file_path(file_path)]
else:
assert Prefab.prefab_exists(file_name), f"Attempted to get a prefab \"{file_name}\" that doesn't exist"
new_prefab = Prefab(file_name)
Prefab.existing_prefabs[file_name] = Prefab(file_name)
assert Prefab.prefab_exists(file_path), f"Attempted to get a prefab \"{file_path}\" that doesn't exist"
new_prefab = Prefab(file_path)
Prefab.existing_prefabs[new_prefab.file_path] = new_prefab
return new_prefab
@classmethod
def create_prefab(cls, entities: list[EditorEntity], file_name: str, prefab_instance_name: str=None) -> tuple(Prefab, PrefabInstance):
def create_prefab(cls, entities: list[EditorEntity], file_path: str, prefab_instance_name: str=None) -> tuple(Prefab, PrefabInstance):
"""
Create a prefab in memory and return it. The very first instance of this prefab will also be created.
:param entities: The entities that should form the new prefab (along with their descendants).
:param file_name: A unique file name of new prefab.
:param prefab_instance_name: A name for the very first instance generated while prefab creation. The default instance name is the same as file_name.
:param file_path: A unique file path for new prefab.
:param prefab_instance_name: A name for the very first instance generated while prefab creation. The default instance name is the same as the file name in file_path.
:return: Created Prefab object and the very first PrefabInstance object owned by the prefab.
"""
assert not Prefab.is_prefab_loaded(file_name), f"Can't create Prefab '{file_name}' since the prefab already exists"
assert not Prefab.is_prefab_loaded(file_path), f"Can't create Prefab '{file_path}' since the prefab already exists"
new_prefab = Prefab(file_name)
new_prefab = Prefab(file_path)
entity_ids = [entity.id for entity in entities]
create_prefab_result = prefab.PrefabPublicRequestBus(bus.Broadcast, 'CreatePrefabInMemory', entity_ids, new_prefab.file_path)
assert create_prefab_result.IsSuccess(), f"Prefab operation 'CreatePrefab' failed. Error: {create_prefab_result.GetError()}"
@ -210,15 +212,14 @@ class Prefab:
children_entity_ids = container_entity.get_children_ids()
assert len(children_entity_ids) == len(entities), f"Entity count of created prefab instance does *not* match the count of given entities."
if prefab_instance_name:
container_entity.set_name(prefab_instance_name)
wait_for_propagation()
new_prefab_instance = PrefabInstance(file_name, EditorEntity(container_entity_id))
new_prefab_instance = PrefabInstance(new_prefab.file_path, EditorEntity(container_entity_id))
if prefab_instance_name:
new_prefab_instance.container_entity.set_name(prefab_instance_name)
new_prefab.instances.add(new_prefab_instance)
Prefab.existing_prefabs[file_name] = new_prefab
Prefab.existing_prefabs[new_prefab.file_path] = new_prefab
return new_prefab, new_prefab_instance
@classmethod
@ -250,7 +251,7 @@ class Prefab:
assert False, "Not all entities and descendants in target prefabs are deleted."
for instance in prefab_instances:
instance_deleted_prefab = Prefab.get_prefab(instance.prefab_file_name)
instance_deleted_prefab = Prefab.get_prefab(instance.prefab_file_path)
instance_deleted_prefab.instances.remove(instance)
instance = PrefabInstance()
@ -290,8 +291,7 @@ class Prefab:
prefab_file_path = prefab.PrefabPublicRequestBus(bus.Broadcast, 'GetOwningInstancePrefabPath', duplicate_container_entity_id)
assert prefab_file_path, "Returned file path should *not* be empty."
prefab_file_name = Path(prefab_file_path).stem
duplicate_instance_prefab = Prefab.get_prefab(prefab_file_name)
duplicate_instance_prefab = Prefab.get_prefab(prefab_file_path)
duplicate_instance = PrefabInstance(prefab_file_path, EditorEntity(duplicate_container_entity_id))
duplicate_instance_prefab.instances.add(duplicate_instance)
duplicate_instances.append(duplicate_instance)
@ -324,7 +324,7 @@ class Prefab:
wait_for_propagation()
instance_owner_prefab = Prefab.get_prefab(prefab_instance.prefab_file_name)
instance_owner_prefab = Prefab.get_prefab(prefab_instance.prefab_file_path)
instance_owner_prefab.instances.remove(prefab_instance)
prefab_instance = PrefabInstance()
@ -346,13 +346,13 @@ class Prefab:
container_entity_id = instantiate_prefab_result.GetValue()
container_entity = EditorEntity(container_entity_id)
if name:
container_entity.set_name(name)
wait_for_propagation()
new_prefab_instance = PrefabInstance(self.file_path, EditorEntity(container_entity_id))
assert not new_prefab_instance in self.instances, "This prefab instance is already existed before this instantiation."
assert not new_prefab_instance in self.instances, "This prefab instance already existed before this instantiation."
if name:
new_prefab_instance.container_entity.set_name(name)
self.instances.add(new_prefab_instance)
assert new_prefab_instance.is_at_position(prefab_position), "This prefab instance is *not* at expected position."

@ -101,30 +101,56 @@ class TestHelper:
Report.critical_result(msgtuple_success_fail, general.is_in_game_mode())
@staticmethod
def multiplayer_enter_game_mode(msgtuple_success_fail: Tuple[str, str], sv_default_player_spawn_asset: str) -> None:
def find_line(window, line, print_infos):
"""
:param msgtuple_success_fail: The tuple with the expected/unexpected messages for entering game mode.
:param sv_default_player_spawn_asset: The path to the network player prefab that will be automatically spawned upon entering gamemode. The engine default is "prefabs/player.network.spawnable"
Looks for an expected line in a list of tracer log lines
:param window: The log's window name. For example, logs printed via script-canvas use the "Script" window.
:param line: The log message to search for.
:param print_infos: A list of PrintInfos collected by Tracer to search. Example options: your_tracer.warnings, your_tracer.errors, your_tracer.asserts, or your_tracer.prints
:return: None
:return: True if the line is found, otherwise false.
"""
for printInfo in print_infos:
if printInfo.window == window.strip() and printInfo.message.strip() == line:
return True
return False
# looks for an expected line in a list of tracers lines
# lines: the tracer list of lines to search. options are section_tracer.warnings, section_tracer.errors, section_tracer.asserts, section_tracer.prints
# return: true if the line is found, otherwise false
def find_expected_line(expected_line, lines):
found_lines = [printInfo.message.strip() for printInfo in lines]
return expected_line in found_lines
@staticmethod
def succeed_if_log_line_found(window, line, print_infos, time_out):
"""
Looks for a line in a list of tracer log lines and reports success if found.
:param window: The log's window name. For example, logs printed via script-canvas use the "Script" window.
:param line: The log message we're hoping to find.
:param print_infos: A list of PrintInfos collected by Tracer to search. Example options: your_tracer.warnings, your_tracer.errors, your_tracer.asserts, or your_tracer.prints
:param time_out: The total amount of time to wait before giving up looking for the expected line.
def wait_for_critical_expected_line(expected_line, lines, time_out):
TestHelper.wait_for_condition(lambda : find_expected_line(expected_line, lines), time_out)
Report.critical_result(("Found expected line: " + expected_line, "Failed to find expected line: " + expected_line), find_expected_line(expected_line, lines))
:return: No return value, but if the message is found, a successful critical result is reported; otherwise failure.
"""
TestHelper.wait_for_condition(lambda : TestHelper.find_line(window, line, print_infos), time_out)
Report.critical_result(("Found expected line: " + line, "Failed to find expected line: " + line), TestHelper.find_line(window, line, print_infos))
def wait_for_critical_unexpected_line(unexpected_line, lines, time_out):
TestHelper.wait_for_condition(lambda : find_expected_line(unexpected_line, lines), time_out)
Report.critical_result(("Unexpected line not found: " + unexpected_line, "Unexpected line found: " + unexpected_line), not find_expected_line(unexpected_line, lines))
@staticmethod
def fail_if_log_line_found(window, line, print_infos, time_out):
"""
Reports a failure if a log line in a list of tracer log lines is found.
:param window: The log's window name. For example, logs printed via script-canvas use the "Script" window.
:param line: The log message we're hoping to not find.
:param print_infos: A list of PrintInfos collected by Tracer to search. Example options: your_tracer.warnings, your_tracer.errors, your_tracer.asserts, or your_tracer.prints
:param time_out: The total amount of time to wait before giving up looking for the unexpected line. If time runs out and we don't see the unexpected line then report a success.
:return: No return value, but if the line is found, a failed critical result is reported; otherwise success.
"""
TestHelper.wait_for_condition(lambda : TestHelper.find_line(window, line, print_infos), time_out)
Report.critical_result(("Unexpected line not found: " + line, "Unexpected line found: " + line), not TestHelper.find_line(window, line, print_infos))
@staticmethod
def multiplayer_enter_game_mode(msgtuple_success_fail: Tuple[str, str], sv_default_player_spawn_asset: str) -> None:
"""
:param msgtuple_success_fail: The tuple with the expected/unexpected messages for entering game mode.
:param sv_default_player_spawn_asset: The path to the network player prefab that will be automatically spawned upon entering gamemode. The engine default is "prefabs/player.network.spawnable"
:return: None
"""
Report.info("Entering game mode")
if sv_default_player_spawn_asset :
general.set_cvar("sv_defaultPlayerSpawnAsset", sv_default_player_spawn_asset)
@ -135,20 +161,20 @@ class TestHelper:
multiplayer.PythonEditorFuncs_enter_game_mode()
# make sure the server launcher binary exists
wait_for_critical_unexpected_line("LaunchEditorServer failed! The ServerLauncher binary is missing!", section_tracer.errors, 0.5)
TestHelper.fail_if_log_line_found("MultiplayerEditor", "LaunchEditorServer failed! The ServerLauncher binary is missing!", section_tracer.errors, 0.5)
# make sure the server launcher is running
waiter.wait_for(lambda: process_utils.process_exists("AutomatedTesting.ServerLauncher", ignore_extensions=True), timeout=5.0, exc=AssertionError("AutomatedTesting.ServerLauncher has NOT launched!"), interval=1.0)
wait_for_critical_expected_line("MultiplayerEditorConnection: Editor-server activation has found and connected to the editor.", section_tracer.prints, 15.0)
TestHelper.succeed_if_log_line_found("EditorServer", "MultiplayerEditorConnection: Editor-server activation has found and connected to the editor.", section_tracer.prints, 15.0)
wait_for_critical_expected_line("Editor is sending the editor-server the level data packet.", section_tracer.prints, 5.0)
TestHelper.succeed_if_log_line_found("MultiplayerEditor", "Editor is sending the editor-server the level data packet.", section_tracer.prints, 5.0)
wait_for_critical_expected_line("Logger: Editor Server completed receiving the editor's level assets, responding to Editor...", section_tracer.prints, 5.0)
TestHelper.succeed_if_log_line_found("EditorServer", "Logger: Editor Server completed receiving the editor's level assets, responding to Editor...", section_tracer.prints, 5.0)
wait_for_critical_expected_line("Editor-server ready. Editor has successfully connected to the editor-server's network simulation.", section_tracer.prints, 5.0)
TestHelper.succeed_if_log_line_found("MultiplayerEditorConnection", "Editor-server ready. Editor has successfully connected to the editor-server's network simulation.", section_tracer.prints, 5.0)
wait_for_critical_unexpected_line(f"MultiplayerSystemComponent: SpawnDefaultPlayerPrefab failed. Missing sv_defaultPlayerSpawnAsset at path '{sv_default_player_spawn_asset.lower()}'.", section_tracer.prints, 0.5)
TestHelper.fail_if_log_line_found("EditorServer", f"MultiplayerSystemComponent: SpawnDefaultPlayerPrefab failed. Missing sv_defaultPlayerSpawnAsset at path '{sv_default_player_spawn_asset.lower()}'.", section_tracer.prints, 0.5)
TestHelper.wait_for_condition(lambda : multiplayer.PythonEditorFuncs_is_in_game_mode(), 5.0)
Report.critical_result(msgtuple_success_fail, multiplayer.PythonEditorFuncs_is_in_game_mode())

@ -32,3 +32,10 @@ class TestAutomation(TestAutomationBase):
from .tests import Multiplayer_AutoComponent_NetworkInput as test_module
self._run_prefab_test(request, workspace, editor, test_module)
def test_Multiplayer_AutoComponent_RPC(self, request, workspace, editor, launcher_platform):
from .tests import Multiplayer_AutoComponent_RPC as test_module
self._run_prefab_test(request, workspace, editor, test_module)
def test_Multiplayer_SimpleNetworkLevelEntity(self, request, workspace, editor, launcher_platform):
from .tests import Multiplayer_SimpleNetworkLevelEntity as test_module
self._run_prefab_test(request, workspace, editor, test_module)

@ -0,0 +1,83 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# Test Case Title : Check that the four network RPCs can be sent and received
# fmt: off
class TestSuccessFailTuples():
enter_game_mode = ("Entered game mode", "Failed to enter game mode")
exit_game_mode = ("Exited game mode", "Couldn't exit game mode")
find_network_player = ("Found network player", "Couldn't find network player")
# fmt: on
def Multiplayer_AutoComponent_RPC():
r"""
Summary:
Runs a test to make sure that RPCs can be sent and received via script canvas
Level Description:
- Dynamic
1. Although the level is nearly empty, when the server and editor connect the server will spawn and replicate the player network prefab.
a. The player network prefab has a NetworkTestPlayerComponent.AutoComponent and a script canvas attached which sends and receives various RPCs.
Print logs occur upon sending and receiving the RPCs; we are testing to make sure the expected events and values are received.
- Static
1. NetLevelEntity. This is a networked entity which has a script attached. Used for cross-entity communication. The net-player prefab will send this level entity Server->Authority RPCs
Expected Outcome:
We should see editor logs stating that RPCs have been sent and received.
However, if the script receives unexpected values for the Process event we will see print logs for bad data as well.
:return:
"""
import azlmbr.legacy.general as general
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import Tracer
from editor_python_test_tools.utils import TestHelper as helper
from ly_remote_console.remote_console_commands import RemoteConsole as RemoteConsole
level_name = "AutoComponent_RPC"
player_prefab_name = "Player"
player_prefab_path = f"levels/multiplayer/{level_name}/{player_prefab_name}.network.spawnable"
helper.init_idle()
# 1) Open Level
helper.open_level("Multiplayer", level_name)
with Tracer() as section_tracer:
# 2) Enter game mode
helper.multiplayer_enter_game_mode(TestSuccessFailTuples.enter_game_mode, player_prefab_path.lower())
# 3) Make sure the network player was spawned
player_id = general.find_game_entity(player_prefab_name)
Report.critical_result(TestSuccessFailTuples.find_network_player, player_id.IsValid())
# 4) Check the editor logs for expected and unexpected log output
PLAYERID_RPC_WAIT_TIME_SECONDS = 1.0 # The player id is sent from the server as soon as the player script is spawned. 1 second should be more than enough time to send/receive that RPC.
helper.succeed_if_log_line_found('EditorServer', 'Script: AutoComponent_RPC: Sending client PlayerNumber 1', section_tracer.prints, PLAYERID_RPC_WAIT_TIME_SECONDS)
helper.succeed_if_log_line_found('Script', "AutoComponent_RPC: I'm Player #1", section_tracer.prints, PLAYERID_RPC_WAIT_TIME_SECONDS)
# Uncomment once editor game-play mode supports level entities with net-binding
#PLAYFX_RPC_WAIT_TIME_SECONDS = 1.1 # The server will send an RPC to play an fx on the client every second.
#helper.succeed_if_log_line_found('EditorServer', "Script: AutoComponent_RPC_NetLevelEntity Activated on entity: NetLevelEntity", section_tracer.prints, PLAYFX_RPC_WAIT_TIME_SECONDS)
#helper.succeed_if_log_line_found('EditorServer', "Script: AutoComponent_RPC_NetLevelEntity: Authority sending RPC to play some fx.", section_tracer.prints, PLAYFX_RPC_WAIT_TIME_SECONDS)
#helper.succeed_if_log_line_found('Script', "AutoComponent_RPC_NetLevelEntity: I'm a client playing some superficial fx.", section_tracer.prints, PLAYFX_RPC_WAIT_TIME_SECONDS)
# Exit game mode
helper.exit_game_mode(TestSuccessFailTuples.exit_game_mode)
if __name__ == "__main__":
from editor_python_test_tools.utils import Report
Report.start_test(Multiplayer_AutoComponent_RPC)

@ -0,0 +1,76 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# Test Case Title : Check that level entities with network bindings are properly replicated.
# Note: This test should be ran on a fresh editor run; some bugs with spawnables occur only on the first editor play-mode.
# fmt: off
class TestSuccessFailTuples():
enter_game_mode = ("Entered game mode", "Failed to enter game mode")
exit_game_mode = ("Exited game mode", "Couldn't exit game mode")
find_network_player = ("Found network player", "Couldn't find network player")
# fmt: on
def Multiplayer_SimpleNetworkLevelEntity():
r"""
Summary:
Test to make sure that network entities in a level function and are replicated to clients as expected
Level Description:
- Static
1. NetLevelEntity. This is a networked entity which has a script attached which prints logs to ensure it's replicated.
Expected Outcome:
We should see logs stating that the net-sync'd level entity exists on both server and client.
:return:
"""
import azlmbr.legacy.general as general
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import Tracer
from editor_python_test_tools.utils import TestHelper as helper
from ly_remote_console.remote_console_commands import RemoteConsole as RemoteConsole
level_name = "SimpleNetworkLevelEntity"
player_prefab_name = "Player"
player_prefab_path = f"levels/multiplayer/{level_name}/{player_prefab_name}.network.spawnable"
helper.init_idle()
# 1) Open Level
helper.open_level("Multiplayer", level_name)
with Tracer() as section_tracer:
# 2) Enter game mode
helper.multiplayer_enter_game_mode(TestSuccessFailTuples.enter_game_mode, player_prefab_path.lower())
# 3) Make sure the network player was spawned
player_id = general.find_game_entity(player_prefab_name)
Report.critical_result(TestSuccessFailTuples.find_network_player, player_id.IsValid())
# 4) Check the editor logs for network spawnable errors
ATTEMPTING_INVALID_NETSPAWN_WAIT_TIME_SECONDS = 0.0 # The editor will try to net-spawn its networked level entity before it's even a client. Make sure this didn't happen.
helper.fail_if_log_line_found('NetworkEntityManager', "RequestNetSpawnableInstantiation: Requested spawnable Root.network.spawnable doesn't exist in the NetworkSpawnableLibrary. Please make sure it is a network spawnable", section_tracer.errors, ATTEMPTING_INVALID_NETSPAWN_WAIT_TIME_SECONDS)
# 5) Ensure the script graph attached to the level entity is running on the server
SCRIPTGRAPH_ENABLED_WAIT_TIME_SECONDS = 0.25
helper.succeed_if_log_line_found('EditorServer', "Script: SimpleNetworkLevelEntity: On Graph Start", section_tracer.prints, SCRIPTGRAPH_ENABLED_WAIT_TIME_SECONDS)
# Exit game mode
helper.exit_game_mode(TestSuccessFailTuples.exit_game_mode)
if __name__ == "__main__":
from editor_python_test_tools.utils import Report
Report.start_test(Multiplayer_SimpleNetworkLevelEntity)

@ -127,7 +127,7 @@ def Collider_SameCollisionGroupSameCustomLayerCollide():
# Main Script
# 1) Load the level
helper.init_idle()
helper.open_level("physics", "Collider_SameCollisionGroupSameCustomLayerCollide")
helper.open_level("Physics", "Collider_SameCollisionGroupSameCustomLayerCollide")
# 2) Enter Game Mode
helper.enter_game_mode(Tests.enter_game_mode)

@ -162,7 +162,7 @@ def ForceRegion_MultipleForcesInSameComponentCombineForces():
helper.init_idle()
# 1) Load Level
helper.open_level("physics", "ForceRegion_MultipleForcesInSameComponentCombineForces")
helper.open_level("Physics", "ForceRegion_MultipleForcesInSameComponentCombineForces")
# 2) Enter Game Mode
helper.enter_game_mode(Tests.enter_game_mode)

@ -229,8 +229,8 @@ def Material_DefaultLibraryUpdatedAcrossLevels_after():
for test in test_list:
# 1) Open the correct level is open
helper.open_level(
"physics",
f"Material_DefaultLibraryUpdatedAcrossLevels\\{test.level}"
"Physics",
os.path.join("Material_DefaultLibraryUpdatedAcrossLevels", str(test.level))
)
# 2) Enter Game Mode

@ -189,7 +189,7 @@ def Material_DefaultLibraryUpdatedAcrossLevels_before():
# 1) Open the correct level is open
helper.open_level(
"Physics",
f"Material_DefaultLibraryUpdatedAcrossLevels\\{test.level}"
os.path.join("Material_DefaultLibraryUpdatedAcrossLevels", str(test.level))
)
# 2) Enter Game Mode

@ -252,10 +252,8 @@ def Material_LibraryUpdatedAcrossLevels():
for test in test_list:
# 1) Open the correct level for the test
helper.open_level(
"physics",
"Material_LibraryUpdatedAcrossLevels\\Material_LibraryUpdatedAcrossLevels_{}".format(
test.level_index
),
"Physics",
os.path.join("Material_LibraryUpdatedAcrossLevels", str(test.level_index))
)
# 2) Open Game Mode

@ -106,7 +106,7 @@ def ScriptCanvas_SpawnEntityWithPhysComponents():
# Main Script
helper.init_idle()
# 1) Open Level
helper.open_level("physics", "ScriptCanvas_SpawnEntityWithPhysComponents")
helper.open_level("Physics", "ScriptCanvas_SpawnEntityWithPhysComponents")
# 2) Enter Game Mode
helper.enter_game_mode(Tests.enter_game_mode)

@ -38,6 +38,10 @@ class TestAutomation(TestAutomationBase):
from Prefab.tests.instantiate_prefab import InstantiatePrefab_ContainingASingleEntity as test_module
self._run_prefab_test(request, workspace, editor, test_module)
def test_InstantiatePrefab_FromCreatedPrefabWithSingleEntity(self, request, workspace, editor, launcher_platform):
from Prefab.tests.instantiate_prefab import InstantiatePrefab_FromCreatedPrefabWithSingleEntity as test_module
self._run_prefab_test(request, workspace, editor, test_module)
def test_DeletePrefab_ContainingASingleEntity(self, request, workspace, editor, launcher_platform):
from Prefab.tests.delete_prefab import DeletePrefab_ContainingASingleEntity as test_module
self._run_prefab_test(request, workspace, editor, test_module)

@ -45,8 +45,12 @@ class TestAutomationNoAutoTestMode(EditorTestSuite):
class test_InstantiatePrefab_ContainingASingleEntity(EditorSharedTest):
from .tests.instantiate_prefab import InstantiatePrefab_ContainingASingleEntity as test_module
class test_InstantiatePrefab_FromCreatedPrefabWithSingleEntity(EditorSharedTest):
from .tests.instantiate_prefab import InstantiatePrefab_FromCreatedPrefabWithSingleEntity as test_module
class test_DeletePrefab_ContainingASingleEntity(EditorSharedTest):
from .tests.delete_prefab import DeletePrefab_ContainingASingleEntity as test_module
class test_DuplicatePrefab_ContainingASingleEntity(EditorSharedTest):
from .tests.duplicate_prefab import DuplicatePrefab_ContainingASingleEntity as test_module
from .tests.duplicate_prefab import DuplicatePrefab_ContainingASingleEntity as test_module

@ -13,7 +13,8 @@ def CreatePrefab_UnderAnEntity():
Test is successful if the new instanced prefab of the child has the parent entity id
"""
CAR_PREFAB_FILE_NAME = 'car_prefab'
from pathlib import Path
CAR_PREFAB_FILE_NAME = Path(__file__).stem + 'car_prefab'
from editor_python_test_tools.editor_entity_utils import EditorEntity
from editor_python_test_tools.prefab_utils import Prefab

@ -7,7 +7,8 @@ SPDX-License-Identifier: Apache-2.0 OR MIT
def CreatePrefab_WithSingleEntity():
CAR_PREFAB_FILE_NAME = 'car_prefab'
from pathlib import Path
CAR_PREFAB_FILE_NAME = Path(__file__).stem + 'car_prefab'
from editor_python_test_tools.editor_entity_utils import EditorEntity
from editor_python_test_tools.utils import Report

@ -7,7 +7,8 @@ SPDX-License-Identifier: Apache-2.0 OR MIT
def DeletePrefab_ContainingASingleEntity():
CAR_PREFAB_FILE_NAME = 'car_prefab'
from pathlib import Path
CAR_PREFAB_FILE_NAME = Path(__file__).stem + 'car_prefab'
from editor_python_test_tools.editor_entity_utils import EditorEntity
from editor_python_test_tools.prefab_utils import Prefab

@ -7,8 +7,9 @@ SPDX-License-Identifier: Apache-2.0 OR MIT
def DetachPrefab_UnderAnotherPrefab():
CAR_PREFAB_FILE_NAME = 'car_prefab2'
WHEEL_PREFAB_FILE_NAME = 'wheel_prefab2'
from pathlib import Path
CAR_PREFAB_FILE_NAME = Path(__file__).stem + 'car_prefab'
WHEEL_PREFAB_FILE_NAME = Path(__file__).stem + 'wheel_prefab'
import editor_python_test_tools.pyside_utils as pyside_utils

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save