merging latest dev
Signed-off-by: antonmic <56370189+antonmic@users.noreply.github.com>monroegm-disable-blank-issue-2
commit
9f0b6d65f4
@ -0,0 +1,14 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||||
|
viewBox="0 0 24 24" style="enable-background:new 0 0 24 24;" xml:space="preserve">
|
||||||
|
<path d="M3.145,8.433c0-1.47,1.196-2.666,2.666-2.666h9.544c-0.158-0.819-0.88-1.443-1.744-1.443H3.487
|
||||||
|
c-0.978,0-1.778,0.8-1.778,1.778v5.356c0,0.861,0.62,1.582,1.436,1.743V8.433z" fill="#FFFFFF"/>
|
||||||
|
<g>
|
||||||
|
<path d="M6.833,11.654c0-1.47,1.196-2.666,2.666-2.666h9.069c-0.158-0.819-0.88-1.443-1.744-1.443H6.7
|
||||||
|
c-0.978,0-1.778,0.8-1.778,1.778v5.356c0,0.978,0.8,1.778,1.778,1.778h0.133V11.654z" fill="#FFFFFF"/>
|
||||||
|
</g>
|
||||||
|
<path d="M20.513,10.765H10.388c-0.978,0-1.778,0.8-1.778,1.777v5.356c0,0.978,0.8,1.778,1.778,1.778h10.125
|
||||||
|
c0.978,0,1.778-0.8,1.778-1.778v-5.356C22.29,11.565,21.49,10.765,20.513,10.765z M19.332,15.967h-7.763
|
||||||
|
c-0.264,0-0.478-0.355-0.478-0.793c0-0.438,0.214-0.793,0.478-0.793h7.763c0.264,0,0.478,0.355,0.478,0.793
|
||||||
|
C19.81,15.612,19.597,15.967,19.332,15.967z" fill="#FFFFFF"/>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 1.0 KiB |
@ -1,3 +0,0 @@
|
|||||||
version https://git-lfs.github.com/spec/v1
|
|
||||||
oid sha256:bfc2ef8c6dbf2fba078e27e4e94384099e090468e679327dd826a5cbf22b04ed
|
|
||||||
size 1019
|
|
||||||
@ -1,3 +0,0 @@
|
|||||||
version https://git-lfs.github.com/spec/v1
|
|
||||||
oid sha256:708b12d41229afab78e0f7d59097ae3de855fea8525a920c5c214fc0ce79f1bd
|
|
||||||
size 1209
|
|
||||||
@ -1,3 +0,0 @@
|
|||||||
version https://git-lfs.github.com/spec/v1
|
|
||||||
oid sha256:fab63af9b50790dca25330058e70517987ea8bf11c00f9353dd951ebdbd1dbe5
|
|
||||||
size 5008
|
|
||||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:7f94f2634eacb4d7bee20dacc45edef96e4d268f1adb7960b8aa8f3b6e2906ed
|
||||||
|
size 6867609
|
||||||
@ -0,0 +1,129 @@
|
|||||||
|
{
|
||||||
|
"ContainerEntity": {
|
||||||
|
"Id": "ContainerEntity",
|
||||||
|
"Name": "Bush",
|
||||||
|
"Components": {
|
||||||
|
"Component_[1140272189295067758]": {
|
||||||
|
"$type": "EditorInspectorComponent",
|
||||||
|
"Id": 1140272189295067758
|
||||||
|
},
|
||||||
|
"Component_[13437832196484687256]": {
|
||||||
|
"$type": "EditorOnlyEntityComponent",
|
||||||
|
"Id": 13437832196484687256
|
||||||
|
},
|
||||||
|
"Component_[1553903646452669645]": {
|
||||||
|
"$type": "EditorDisabledCompositionComponent",
|
||||||
|
"Id": 1553903646452669645
|
||||||
|
},
|
||||||
|
"Component_[15914009348632444632]": {
|
||||||
|
"$type": "EditorEntitySortComponent",
|
||||||
|
"Id": 15914009348632444632,
|
||||||
|
"Child Entity Order": [
|
||||||
|
"Entity_[7511491868318]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Component_[18046340308818780248]": {
|
||||||
|
"$type": "EditorPrefabComponent",
|
||||||
|
"Id": 18046340308818780248
|
||||||
|
},
|
||||||
|
"Component_[1948833233489872938]": {
|
||||||
|
"$type": "{27F1E1A1-8D9D-4C3B-BD3A-AFB9762449C0} TransformComponent",
|
||||||
|
"Id": 1948833233489872938,
|
||||||
|
"Parent Entity": ""
|
||||||
|
},
|
||||||
|
"Component_[2903632350157981339]": {
|
||||||
|
"$type": "SelectionComponent",
|
||||||
|
"Id": 2903632350157981339
|
||||||
|
},
|
||||||
|
"Component_[48827510535192710]": {
|
||||||
|
"$type": "EditorPendingCompositionComponent",
|
||||||
|
"Id": 48827510535192710
|
||||||
|
},
|
||||||
|
"Component_[5609536793322429681]": {
|
||||||
|
"$type": "EditorLockComponent",
|
||||||
|
"Id": 5609536793322429681
|
||||||
|
},
|
||||||
|
"Component_[5859168386298620990]": {
|
||||||
|
"$type": "EditorEntityIconComponent",
|
||||||
|
"Id": 5859168386298620990
|
||||||
|
},
|
||||||
|
"Component_[6604616929271524505]": {
|
||||||
|
"$type": "EditorVisibilityComponent",
|
||||||
|
"Id": 6604616929271524505
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Entities": {
|
||||||
|
"Entity_[7511491868318]": {
|
||||||
|
"Id": "Entity_[7511491868318]",
|
||||||
|
"Name": "Bush",
|
||||||
|
"Components": {
|
||||||
|
"Component_[10227459330338484901]": {
|
||||||
|
"$type": "EditorInspectorComponent",
|
||||||
|
"Id": 10227459330338484901,
|
||||||
|
"ComponentOrderEntryArray": [
|
||||||
|
{
|
||||||
|
"ComponentId": 4998941225335869157
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ComponentId": 9922994635792843826,
|
||||||
|
"SortIndex": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Component_[10972351222359420947]": {
|
||||||
|
"$type": "EditorOnlyEntityComponent",
|
||||||
|
"Id": 10972351222359420947
|
||||||
|
},
|
||||||
|
"Component_[12101122374155214392]": {
|
||||||
|
"$type": "EditorPendingCompositionComponent",
|
||||||
|
"Id": 12101122374155214392
|
||||||
|
},
|
||||||
|
"Component_[1535264614652988260]": {
|
||||||
|
"$type": "SelectionComponent",
|
||||||
|
"Id": 1535264614652988260
|
||||||
|
},
|
||||||
|
"Component_[16367811417907891218]": {
|
||||||
|
"$type": "EditorVisibilityComponent",
|
||||||
|
"Id": 16367811417907891218
|
||||||
|
},
|
||||||
|
"Component_[17044216787716682880]": {
|
||||||
|
"$type": "EditorEntitySortComponent",
|
||||||
|
"Id": 17044216787716682880
|
||||||
|
},
|
||||||
|
"Component_[2129822594969629430]": {
|
||||||
|
"$type": "EditorEntityIconComponent",
|
||||||
|
"Id": 2129822594969629430
|
||||||
|
},
|
||||||
|
"Component_[2838015156782745450]": {
|
||||||
|
"$type": "EditorLockComponent",
|
||||||
|
"Id": 2838015156782745450
|
||||||
|
},
|
||||||
|
"Component_[4998941225335869157]": {
|
||||||
|
"$type": "{27F1E1A1-8D9D-4C3B-BD3A-AFB9762449C0} TransformComponent",
|
||||||
|
"Id": 4998941225335869157,
|
||||||
|
"Parent Entity": "ContainerEntity"
|
||||||
|
},
|
||||||
|
"Component_[8773358049076362578]": {
|
||||||
|
"$type": "EditorDisabledCompositionComponent",
|
||||||
|
"Id": 8773358049076362578
|
||||||
|
},
|
||||||
|
"Component_[9922994635792843826]": {
|
||||||
|
"$type": "AZ::Render::EditorMeshComponent",
|
||||||
|
"Id": 9922994635792843826,
|
||||||
|
"Controller": {
|
||||||
|
"Configuration": {
|
||||||
|
"ModelAsset": {
|
||||||
|
"assetId": {
|
||||||
|
"guid": "{1201406D-FB20-5B5F-B9B5-6A6E8DE00A14}",
|
||||||
|
"subId": 276506120
|
||||||
|
},
|
||||||
|
"assetHint": "assets/objects/foliage/bush_privet_01.azmodel"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,129 @@
|
|||||||
|
{
|
||||||
|
"ContainerEntity": {
|
||||||
|
"Id": "ContainerEntity",
|
||||||
|
"Name": "PurpleFlower",
|
||||||
|
"Components": {
|
||||||
|
"Component_[10519928302743666073]": {
|
||||||
|
"$type": "EditorPrefabComponent",
|
||||||
|
"Id": 10519928302743666073
|
||||||
|
},
|
||||||
|
"Component_[13894087802180240181]": {
|
||||||
|
"$type": "{27F1E1A1-8D9D-4C3B-BD3A-AFB9762449C0} TransformComponent",
|
||||||
|
"Id": 13894087802180240181,
|
||||||
|
"Parent Entity": ""
|
||||||
|
},
|
||||||
|
"Component_[15788541052719571801]": {
|
||||||
|
"$type": "EditorEntityIconComponent",
|
||||||
|
"Id": 15788541052719571801
|
||||||
|
},
|
||||||
|
"Component_[15842981265136092481]": {
|
||||||
|
"$type": "SelectionComponent",
|
||||||
|
"Id": 15842981265136092481
|
||||||
|
},
|
||||||
|
"Component_[16360384897559021149]": {
|
||||||
|
"$type": "EditorInspectorComponent",
|
||||||
|
"Id": 16360384897559021149
|
||||||
|
},
|
||||||
|
"Component_[16713545675046303279]": {
|
||||||
|
"$type": "EditorVisibilityComponent",
|
||||||
|
"Id": 16713545675046303279
|
||||||
|
},
|
||||||
|
"Component_[1806734194268113785]": {
|
||||||
|
"$type": "EditorPendingCompositionComponent",
|
||||||
|
"Id": 1806734194268113785
|
||||||
|
},
|
||||||
|
"Component_[5392020700593853313]": {
|
||||||
|
"$type": "EditorEntitySortComponent",
|
||||||
|
"Id": 5392020700593853313,
|
||||||
|
"Child Entity Order": [
|
||||||
|
"Entity_[14335611090324]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Component_[5995854518752659458]": {
|
||||||
|
"$type": "EditorLockComponent",
|
||||||
|
"Id": 5995854518752659458
|
||||||
|
},
|
||||||
|
"Component_[6963022284400845376]": {
|
||||||
|
"$type": "EditorDisabledCompositionComponent",
|
||||||
|
"Id": 6963022284400845376
|
||||||
|
},
|
||||||
|
"Component_[8055275578170091546]": {
|
||||||
|
"$type": "EditorOnlyEntityComponent",
|
||||||
|
"Id": 8055275578170091546
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Entities": {
|
||||||
|
"Entity_[14335611090324]": {
|
||||||
|
"Id": "Entity_[14335611090324]",
|
||||||
|
"Name": "PurpleFlower",
|
||||||
|
"Components": {
|
||||||
|
"Component_[10887353073528055802]": {
|
||||||
|
"$type": "EditorPendingCompositionComponent",
|
||||||
|
"Id": 10887353073528055802
|
||||||
|
},
|
||||||
|
"Component_[12641127425852859189]": {
|
||||||
|
"$type": "AZ::Render::EditorMeshComponent",
|
||||||
|
"Id": 12641127425852859189,
|
||||||
|
"Controller": {
|
||||||
|
"Configuration": {
|
||||||
|
"ModelAsset": {
|
||||||
|
"assetId": {
|
||||||
|
"guid": "{D493A670-6D82-5AE9-A2C8-A2EB02684F71}",
|
||||||
|
"subId": 284799939
|
||||||
|
},
|
||||||
|
"assetHint": "assets/objects/foliage/grass_flower_purple.azmodel"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Component_[14406733303466080015]": {
|
||||||
|
"$type": "EditorInspectorComponent",
|
||||||
|
"Id": 14406733303466080015,
|
||||||
|
"ComponentOrderEntryArray": [
|
||||||
|
{
|
||||||
|
"ComponentId": 9231452352781000222
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ComponentId": 12641127425852859189,
|
||||||
|
"SortIndex": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Component_[1452384341905923012]": {
|
||||||
|
"$type": "EditorLockComponent",
|
||||||
|
"Id": 1452384341905923012
|
||||||
|
},
|
||||||
|
"Component_[2215454016415585892]": {
|
||||||
|
"$type": "EditorDisabledCompositionComponent",
|
||||||
|
"Id": 2215454016415585892
|
||||||
|
},
|
||||||
|
"Component_[4104108067383423623]": {
|
||||||
|
"$type": "EditorVisibilityComponent",
|
||||||
|
"Id": 4104108067383423623
|
||||||
|
},
|
||||||
|
"Component_[4197335450471807917]": {
|
||||||
|
"$type": "SelectionComponent",
|
||||||
|
"Id": 4197335450471807917
|
||||||
|
},
|
||||||
|
"Component_[6877680739064997650]": {
|
||||||
|
"$type": "EditorOnlyEntityComponent",
|
||||||
|
"Id": 6877680739064997650
|
||||||
|
},
|
||||||
|
"Component_[7372550507186490390]": {
|
||||||
|
"$type": "EditorEntityIconComponent",
|
||||||
|
"Id": 7372550507186490390
|
||||||
|
},
|
||||||
|
"Component_[7673532337364366244]": {
|
||||||
|
"$type": "EditorEntitySortComponent",
|
||||||
|
"Id": 7673532337364366244
|
||||||
|
},
|
||||||
|
"Component_[9231452352781000222]": {
|
||||||
|
"$type": "{27F1E1A1-8D9D-4C3B-BD3A-AFB9762449C0} TransformComponent",
|
||||||
|
"Id": 9231452352781000222,
|
||||||
|
"Parent Entity": "ContainerEntity"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:3789abdf439a6d70438fd4bb1e06881ae6686a4699209c6bc371d22d161e5347
|
||||||
|
size 26476
|
||||||
@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:c987d7d79685fda83efcffb7e1afbcd356c37fc68ec5c663a89b02d4df10caea
|
||||||
|
size 46412
|
||||||
@ -0,0 +1,15 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
|
||||||
|
<Component
|
||||||
|
Name="NetworkTestLevelEntityComponent"
|
||||||
|
Namespace="AutomatedTesting"
|
||||||
|
OverrideComponent="false"
|
||||||
|
OverrideController="false"
|
||||||
|
OverrideInclude=""
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||||
|
|
||||||
|
<ComponentRelation Constraint="Required" HasController="true" Name="NetworkTransformComponent" Namespace="Multiplayer" Include="Multiplayer/Components/NetworkTransformComponent.h" />
|
||||||
|
|
||||||
|
<RemoteProcedure Name="AuthorityToClientNoParams_PlayFx" InvokeFrom="Authority" HandleOn="Client" IsPublic="false" IsReliable="true" GenerateEventBindings="true" Description="" />
|
||||||
|
|
||||||
|
</Component>
|
||||||
@ -1,6 +0,0 @@
|
|||||||
"""
|
|
||||||
Copyright (c) Contributors to the Open 3D Engine Project.
|
|
||||||
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0 OR MIT
|
|
||||||
"""
|
|
||||||
@ -1,289 +0,0 @@
|
|||||||
"""
|
|
||||||
Copyright (c) Contributors to the Open 3D Engine Project.
|
|
||||||
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0 OR MIT
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import pytest
|
|
||||||
import typing
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
import ly_test_tools.log.log_monitor
|
|
||||||
|
|
||||||
from AWS.common import constants
|
|
||||||
from AWS.common.resource_mappings import AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY
|
|
||||||
from .aws_metrics_custom_thread import AWSMetricsThread
|
|
||||||
|
|
||||||
# fixture imports
|
|
||||||
from assetpipeline.ap_fixtures.asset_processor_fixture import asset_processor
|
|
||||||
from .aws_metrics_utils import aws_metrics_utils
|
|
||||||
|
|
||||||
AWS_METRICS_FEATURE_NAME = 'AWSMetrics'
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def setup(launcher: pytest.fixture,
|
|
||||||
asset_processor: pytest.fixture) -> pytest.fixture:
|
|
||||||
"""
|
|
||||||
Set up the resource mapping configuration and start the log monitor.
|
|
||||||
:param launcher: Client launcher for running the test level.
|
|
||||||
:param asset_processor: asset_processor fixture.
|
|
||||||
:return log monitor object.
|
|
||||||
"""
|
|
||||||
asset_processor.start()
|
|
||||||
asset_processor.wait_for_idle()
|
|
||||||
|
|
||||||
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
|
|
||||||
|
|
||||||
# Initialize the log monitor.
|
|
||||||
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
|
|
||||||
|
|
||||||
return log_monitor
|
|
||||||
|
|
||||||
|
|
||||||
def monitor_metrics_submission(log_monitor: pytest.fixture) -> None:
|
|
||||||
"""
|
|
||||||
Monitor the messages and notifications for submitting metrics.
|
|
||||||
:param log_monitor: Log monitor to check the log messages.
|
|
||||||
"""
|
|
||||||
expected_lines = [
|
|
||||||
'(Script) - Submitted metrics without buffer.',
|
|
||||||
'(Script) - Submitted metrics with buffer.',
|
|
||||||
'(Script) - Flushed the buffered metrics.',
|
|
||||||
'(Script) - Metrics is sent successfully.'
|
|
||||||
]
|
|
||||||
|
|
||||||
unexpected_lines = [
|
|
||||||
'(Script) - Failed to submit metrics without buffer.',
|
|
||||||
'(Script) - Failed to submit metrics with buffer.',
|
|
||||||
'(Script) - Failed to send metrics.'
|
|
||||||
]
|
|
||||||
|
|
||||||
result = log_monitor.monitor_log_for_lines(
|
|
||||||
expected_lines=expected_lines,
|
|
||||||
unexpected_lines=unexpected_lines,
|
|
||||||
halt_on_unexpected=True)
|
|
||||||
|
|
||||||
# Assert the log monitor detected expected lines and did not detect any unexpected lines.
|
|
||||||
assert result, (
|
|
||||||
f'Log monitoring failed. Used expected_lines values: {expected_lines} & '
|
|
||||||
f'unexpected_lines values: {unexpected_lines}')
|
|
||||||
|
|
||||||
|
|
||||||
def query_metrics_from_s3(aws_metrics_utils: pytest.fixture, resource_mappings: pytest.fixture) -> None:
|
|
||||||
"""
|
|
||||||
Verify that the metrics events are delivered to the S3 bucket and can be queried.
|
|
||||||
:param aws_metrics_utils: aws_metrics_utils fixture.
|
|
||||||
:param resource_mappings: resource_mappings fixture.
|
|
||||||
"""
|
|
||||||
aws_metrics_utils.verify_s3_delivery(
|
|
||||||
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsBucketName')
|
|
||||||
)
|
|
||||||
logger.info('Metrics are sent to S3.')
|
|
||||||
|
|
||||||
aws_metrics_utils.run_glue_crawler(
|
|
||||||
resource_mappings.get_resource_name_id('AWSMetrics.EventsCrawlerName'))
|
|
||||||
|
|
||||||
# Remove the events_json table if exists so that the sample query can create a table with the same name.
|
|
||||||
aws_metrics_utils.delete_table(resource_mappings.get_resource_name_id('AWSMetrics.EventDatabaseName'), 'events_json')
|
|
||||||
aws_metrics_utils.run_named_queries(resource_mappings.get_resource_name_id('AWSMetrics.AthenaWorkGroupName'))
|
|
||||||
logger.info('Query metrics from S3 successfully.')
|
|
||||||
|
|
||||||
|
|
||||||
def verify_operational_metrics(aws_metrics_utils: pytest.fixture,
|
|
||||||
resource_mappings: pytest.fixture, start_time: datetime) -> None:
|
|
||||||
"""
|
|
||||||
Verify that operational health metrics are delivered to CloudWatch.
|
|
||||||
:param aws_metrics_utils: aws_metrics_utils fixture.
|
|
||||||
:param resource_mappings: resource_mappings fixture.
|
|
||||||
:param start_time: Time when the game launcher starts.
|
|
||||||
"""
|
|
||||||
aws_metrics_utils.verify_cloud_watch_delivery(
|
|
||||||
'AWS/Lambda',
|
|
||||||
'Invocations',
|
|
||||||
[{'Name': 'FunctionName',
|
|
||||||
'Value': resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsProcessingLambdaName')}],
|
|
||||||
start_time)
|
|
||||||
logger.info('AnalyticsProcessingLambda metrics are sent to CloudWatch.')
|
|
||||||
|
|
||||||
aws_metrics_utils.verify_cloud_watch_delivery(
|
|
||||||
'AWS/Lambda',
|
|
||||||
'Invocations',
|
|
||||||
[{'Name': 'FunctionName',
|
|
||||||
'Value': resource_mappings.get_resource_name_id('AWSMetrics.EventProcessingLambdaName')}],
|
|
||||||
start_time)
|
|
||||||
logger.info('EventsProcessingLambda metrics are sent to CloudWatch.')
|
|
||||||
|
|
||||||
|
|
||||||
def update_kinesis_analytics_application_status(aws_metrics_utils: pytest.fixture,
|
|
||||||
resource_mappings: pytest.fixture, start_application: bool) -> None:
|
|
||||||
"""
|
|
||||||
Update the Kinesis analytics application to start or stop it.
|
|
||||||
:param aws_metrics_utils: aws_metrics_utils fixture.
|
|
||||||
:param resource_mappings: resource_mappings fixture.
|
|
||||||
:param start_application: whether to start or stop the application.
|
|
||||||
"""
|
|
||||||
if start_application:
|
|
||||||
aws_metrics_utils.start_kinesis_data_analytics_application(
|
|
||||||
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsApplicationName'))
|
|
||||||
else:
|
|
||||||
aws_metrics_utils.stop_kinesis_data_analytics_application(
|
|
||||||
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsApplicationName'))
|
|
||||||
|
|
||||||
@pytest.mark.SUITE_awsi
|
|
||||||
@pytest.mark.usefixtures('automatic_process_killer')
|
|
||||||
@pytest.mark.usefixtures('aws_credentials')
|
|
||||||
@pytest.mark.usefixtures('resource_mappings')
|
|
||||||
@pytest.mark.parametrize('assume_role_arn', [constants.ASSUME_ROLE_ARN])
|
|
||||||
@pytest.mark.parametrize('feature_name', [AWS_METRICS_FEATURE_NAME])
|
|
||||||
@pytest.mark.parametrize('profile_name', ['AWSAutomationTest'])
|
|
||||||
@pytest.mark.parametrize('project', ['AutomatedTesting'])
|
|
||||||
@pytest.mark.parametrize('region_name', [constants.AWS_REGION])
|
|
||||||
@pytest.mark.parametrize('resource_mappings_filename', [constants.AWS_RESOURCE_MAPPING_FILE_NAME])
|
|
||||||
@pytest.mark.parametrize('session_name', [constants.SESSION_NAME])
|
|
||||||
@pytest.mark.parametrize('stacks', [[f'{constants.AWS_PROJECT_NAME}-{AWS_METRICS_FEATURE_NAME}-{constants.AWS_REGION}']])
|
|
||||||
class TestAWSMetricsWindows(object):
|
|
||||||
"""
|
|
||||||
Test class to verify the real-time and batch analytics for metrics.
|
|
||||||
"""
|
|
||||||
@pytest.mark.parametrize('level', ['AWS/Metrics'])
|
|
||||||
def test_realtime_and_batch_analytics(self,
|
|
||||||
level: str,
|
|
||||||
launcher: pytest.fixture,
|
|
||||||
asset_processor: pytest.fixture,
|
|
||||||
workspace: pytest.fixture,
|
|
||||||
aws_utils: pytest.fixture,
|
|
||||||
resource_mappings: pytest.fixture,
|
|
||||||
aws_metrics_utils: pytest.fixture):
|
|
||||||
"""
|
|
||||||
Verify that the metrics events are sent to CloudWatch and S3 for analytics.
|
|
||||||
"""
|
|
||||||
# Start Kinesis analytics application on a separate thread to avoid blocking the test.
|
|
||||||
kinesis_analytics_application_thread = AWSMetricsThread(target=update_kinesis_analytics_application_status,
|
|
||||||
args=(aws_metrics_utils, resource_mappings, True))
|
|
||||||
kinesis_analytics_application_thread.start()
|
|
||||||
|
|
||||||
log_monitor = setup(launcher, asset_processor)
|
|
||||||
|
|
||||||
# Kinesis analytics application needs to be in the running state before we start the game launcher.
|
|
||||||
kinesis_analytics_application_thread.join()
|
|
||||||
launcher.args = ['+LoadLevel', level]
|
|
||||||
launcher.args.extend(['-rhi=null'])
|
|
||||||
start_time = datetime.utcnow()
|
|
||||||
with launcher.start(launch_ap=False):
|
|
||||||
monitor_metrics_submission(log_monitor)
|
|
||||||
|
|
||||||
# Verify that real-time analytics metrics are delivered to CloudWatch.
|
|
||||||
aws_metrics_utils.verify_cloud_watch_delivery(
|
|
||||||
AWS_METRICS_FEATURE_NAME,
|
|
||||||
'TotalLogins',
|
|
||||||
[],
|
|
||||||
start_time)
|
|
||||||
logger.info('Real-time metrics are sent to CloudWatch.')
|
|
||||||
|
|
||||||
# Run time-consuming operations on separate threads to avoid blocking the test.
|
|
||||||
operational_threads = list()
|
|
||||||
operational_threads.append(
|
|
||||||
AWSMetricsThread(target=query_metrics_from_s3,
|
|
||||||
args=(aws_metrics_utils, resource_mappings)))
|
|
||||||
operational_threads.append(
|
|
||||||
AWSMetricsThread(target=verify_operational_metrics,
|
|
||||||
args=(aws_metrics_utils, resource_mappings, start_time)))
|
|
||||||
operational_threads.append(
|
|
||||||
AWSMetricsThread(target=update_kinesis_analytics_application_status,
|
|
||||||
args=(aws_metrics_utils, resource_mappings, False)))
|
|
||||||
for thread in operational_threads:
|
|
||||||
thread.start()
|
|
||||||
for thread in operational_threads:
|
|
||||||
thread.join()
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('level', ['AWS/Metrics'])
|
|
||||||
def test_realtime_and_batch_analytics_no_global_accountid(self,
|
|
||||||
level: str,
|
|
||||||
launcher: pytest.fixture,
|
|
||||||
asset_processor: pytest.fixture,
|
|
||||||
workspace: pytest.fixture,
|
|
||||||
aws_utils: pytest.fixture,
|
|
||||||
resource_mappings: pytest.fixture,
|
|
||||||
aws_metrics_utils: pytest.fixture):
|
|
||||||
"""
|
|
||||||
Verify that the metrics events are sent to CloudWatch and S3 for analytics.
|
|
||||||
"""
|
|
||||||
# Remove top-level account ID from resource mappings
|
|
||||||
resource_mappings.clear_select_keys([AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY])
|
|
||||||
# Start Kinesis analytics application on a separate thread to avoid blocking the test.
|
|
||||||
kinesis_analytics_application_thread = AWSMetricsThread(target=update_kinesis_analytics_application_status,
|
|
||||||
args=(aws_metrics_utils, resource_mappings, True))
|
|
||||||
kinesis_analytics_application_thread.start()
|
|
||||||
|
|
||||||
log_monitor = setup(launcher, asset_processor)
|
|
||||||
|
|
||||||
# Kinesis analytics application needs to be in the running state before we start the game launcher.
|
|
||||||
kinesis_analytics_application_thread.join()
|
|
||||||
launcher.args = ['+LoadLevel', level]
|
|
||||||
launcher.args.extend(['-rhi=null'])
|
|
||||||
start_time = datetime.utcnow()
|
|
||||||
with launcher.start(launch_ap=False):
|
|
||||||
monitor_metrics_submission(log_monitor)
|
|
||||||
|
|
||||||
# Verify that real-time analytics metrics are delivered to CloudWatch.
|
|
||||||
aws_metrics_utils.verify_cloud_watch_delivery(
|
|
||||||
AWS_METRICS_FEATURE_NAME,
|
|
||||||
'TotalLogins',
|
|
||||||
[],
|
|
||||||
start_time)
|
|
||||||
logger.info('Real-time metrics are sent to CloudWatch.')
|
|
||||||
|
|
||||||
# Run time-consuming operations on separate threads to avoid blocking the test.
|
|
||||||
operational_threads = list()
|
|
||||||
operational_threads.append(
|
|
||||||
AWSMetricsThread(target=query_metrics_from_s3,
|
|
||||||
args=(aws_metrics_utils, resource_mappings)))
|
|
||||||
operational_threads.append(
|
|
||||||
AWSMetricsThread(target=verify_operational_metrics,
|
|
||||||
args=(aws_metrics_utils, resource_mappings, start_time)))
|
|
||||||
operational_threads.append(
|
|
||||||
AWSMetricsThread(target=update_kinesis_analytics_application_status,
|
|
||||||
args=(aws_metrics_utils, resource_mappings, False)))
|
|
||||||
for thread in operational_threads:
|
|
||||||
thread.start()
|
|
||||||
for thread in operational_threads:
|
|
||||||
thread.join()
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('level', ['AWS/Metrics'])
|
|
||||||
def test_unauthorized_user_request_rejected(self,
|
|
||||||
level: str,
|
|
||||||
launcher: pytest.fixture,
|
|
||||||
asset_processor: pytest.fixture,
|
|
||||||
workspace: pytest.fixture):
|
|
||||||
"""
|
|
||||||
Verify that unauthorized users cannot send metrics events to the AWS backed backend.
|
|
||||||
"""
|
|
||||||
log_monitor = setup(launcher, asset_processor)
|
|
||||||
|
|
||||||
# Set invalid AWS credentials.
|
|
||||||
launcher.args = ['+LoadLevel', level, '+cl_awsAccessKey', 'AKIAIOSFODNN7EXAMPLE',
|
|
||||||
'+cl_awsSecretKey', 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY']
|
|
||||||
launcher.args.extend(['-rhi=null'])
|
|
||||||
|
|
||||||
with launcher.start(launch_ap=False):
|
|
||||||
result = log_monitor.monitor_log_for_lines(
|
|
||||||
expected_lines=['(Script) - Failed to send metrics.'],
|
|
||||||
unexpected_lines=['(Script) - Metrics is sent successfully.'],
|
|
||||||
halt_on_unexpected=True)
|
|
||||||
assert result, 'Metrics events are sent successfully by unauthorized user'
|
|
||||||
logger.info('Unauthorized user is rejected to send metrics.')
|
|
||||||
|
|
||||||
def test_clean_up_s3_bucket(self,
|
|
||||||
aws_utils: pytest.fixture,
|
|
||||||
resource_mappings: pytest.fixture,
|
|
||||||
aws_metrics_utils: pytest.fixture):
|
|
||||||
"""
|
|
||||||
Clear the analytics bucket objects so that the S3 bucket can be destroyed during tear down.
|
|
||||||
"""
|
|
||||||
aws_metrics_utils.empty_bucket(
|
|
||||||
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsBucketName'))
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
"""
|
|
||||||
Copyright (c) Contributors to the Open 3D Engine Project.
|
|
||||||
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0 OR MIT
|
|
||||||
"""
|
|
||||||
|
|
||||||
from threading import Thread
|
|
||||||
|
|
||||||
|
|
||||||
class AWSMetricsThread(Thread):
|
|
||||||
"""
|
|
||||||
Custom thread for raising assertion errors on the main thread.
|
|
||||||
"""
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
self._error = None
|
|
||||||
|
|
||||||
def run(self) -> None:
|
|
||||||
try:
|
|
||||||
super().run()
|
|
||||||
except AssertionError as e:
|
|
||||||
self._error = e
|
|
||||||
|
|
||||||
def join(self, **kwargs) -> None:
|
|
||||||
super().join(**kwargs)
|
|
||||||
|
|
||||||
if self._error:
|
|
||||||
raise AssertionError(self._error)
|
|
||||||
@ -1,239 +0,0 @@
|
|||||||
"""
|
|
||||||
Copyright (c) Contributors to the Open 3D Engine Project.
|
|
||||||
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0 OR MIT
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import pathlib
|
|
||||||
import pytest
|
|
||||||
import typing
|
|
||||||
|
|
||||||
from datetime import datetime
|
|
||||||
from botocore.exceptions import WaiterError
|
|
||||||
|
|
||||||
from .aws_metrics_waiters import KinesisAnalyticsApplicationUpdatedWaiter, \
|
|
||||||
CloudWatchMetricsDeliveredWaiter, DataLakeMetricsDeliveredWaiter, GlueCrawlerReadyWaiter
|
|
||||||
|
|
||||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
|
||||||
|
|
||||||
# Expected directory and file extension for the S3 objects.
|
|
||||||
EXPECTED_S3_DIRECTORY = 'firehose_events/'
|
|
||||||
EXPECTED_S3_OBJECT_EXTENSION = '.parquet'
|
|
||||||
|
|
||||||
|
|
||||||
class AWSMetricsUtils:
|
|
||||||
"""
|
|
||||||
Provide utils functions for the AWSMetrics gem to interact with the deployed resources.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, aws_utils: pytest.fixture):
|
|
||||||
self._aws_util = aws_utils
|
|
||||||
|
|
||||||
def start_kinesis_data_analytics_application(self, application_name: str) -> None:
|
|
||||||
"""
|
|
||||||
Start the Kenisis Data Analytics application for real-time analytics.
|
|
||||||
:param application_name: Name of the Kenisis Data Analytics application.
|
|
||||||
"""
|
|
||||||
input_id = self.get_kinesis_analytics_application_input_id(application_name)
|
|
||||||
assert input_id, 'invalid Kinesis Data Analytics application input.'
|
|
||||||
|
|
||||||
client = self._aws_util.client('kinesisanalytics')
|
|
||||||
try:
|
|
||||||
client.start_application(
|
|
||||||
ApplicationName=application_name,
|
|
||||||
InputConfigurations=[
|
|
||||||
{
|
|
||||||
'Id': input_id,
|
|
||||||
'InputStartingPositionConfiguration': {
|
|
||||||
'InputStartingPosition': 'NOW'
|
|
||||||
}
|
|
||||||
},
|
|
||||||
]
|
|
||||||
)
|
|
||||||
except client.exceptions.ResourceInUseException:
|
|
||||||
# The application has been started.
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
KinesisAnalyticsApplicationUpdatedWaiter(client, 'RUNNING').wait(application_name=application_name)
|
|
||||||
except WaiterError as e:
|
|
||||||
assert False, f'Failed to start the Kinesis Data Analytics application: {str(e)}.'
|
|
||||||
|
|
||||||
def get_kinesis_analytics_application_input_id(self, application_name: str) -> str:
|
|
||||||
"""
|
|
||||||
Get the input ID for the Kenisis Data Analytics application.
|
|
||||||
:param application_name: Name of the Kenisis Data Analytics application.
|
|
||||||
:return: Input ID for the Kenisis Data Analytics application.
|
|
||||||
"""
|
|
||||||
client = self._aws_util.client('kinesisanalytics')
|
|
||||||
response = client.describe_application(
|
|
||||||
ApplicationName=application_name
|
|
||||||
)
|
|
||||||
if not response:
|
|
||||||
return ''
|
|
||||||
input_descriptions = response.get('ApplicationDetail', {}).get('InputDescriptions', [])
|
|
||||||
if len(input_descriptions) != 1:
|
|
||||||
return ''
|
|
||||||
|
|
||||||
return input_descriptions[0].get('InputId', '')
|
|
||||||
|
|
||||||
def stop_kinesis_data_analytics_application(self, application_name: str) -> None:
|
|
||||||
"""
|
|
||||||
Stop the Kenisis Data Analytics application.
|
|
||||||
:param application_name: Name of the Kenisis Data Analytics application.
|
|
||||||
"""
|
|
||||||
client = self._aws_util.client('kinesisanalytics')
|
|
||||||
client.stop_application(
|
|
||||||
ApplicationName=application_name
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
KinesisAnalyticsApplicationUpdatedWaiter(client, 'READY').wait(application_name=application_name)
|
|
||||||
except WaiterError as e:
|
|
||||||
assert False, f'Failed to stop the Kinesis Data Analytics application: {str(e)}.'
|
|
||||||
|
|
||||||
def verify_cloud_watch_delivery(self, namespace: str, metrics_name: str,
|
|
||||||
dimensions: typing.List[dict], start_time: datetime) -> None:
|
|
||||||
"""
|
|
||||||
Verify that the expected metrics is delivered to CloudWatch.
|
|
||||||
:param namespace: Namespace of the metrics.
|
|
||||||
:param metrics_name: Name of the metrics.
|
|
||||||
:param dimensions: Dimensions of the metrics.
|
|
||||||
:param start_time: Start time for generating the metrics.
|
|
||||||
"""
|
|
||||||
client = self._aws_util.client('cloudwatch')
|
|
||||||
|
|
||||||
try:
|
|
||||||
CloudWatchMetricsDeliveredWaiter(client).wait(
|
|
||||||
namespace=namespace,
|
|
||||||
metrics_name=metrics_name,
|
|
||||||
dimensions=dimensions,
|
|
||||||
start_time=start_time
|
|
||||||
)
|
|
||||||
except WaiterError as e:
|
|
||||||
assert False, f'Failed to deliver metrics to CloudWatch: {str(e)}.'
|
|
||||||
|
|
||||||
def verify_s3_delivery(self, analytics_bucket_name: str) -> None:
|
|
||||||
"""
|
|
||||||
Verify that metrics are delivered to S3 for batch analytics successfully.
|
|
||||||
:param analytics_bucket_name: Name of the deployed S3 bucket.
|
|
||||||
"""
|
|
||||||
client = self._aws_util.client('s3')
|
|
||||||
bucket_name = analytics_bucket_name
|
|
||||||
|
|
||||||
try:
|
|
||||||
DataLakeMetricsDeliveredWaiter(client).wait(bucket_name=bucket_name, prefix=EXPECTED_S3_DIRECTORY)
|
|
||||||
except WaiterError as e:
|
|
||||||
assert False, f'Failed to find the S3 directory for storing metrics data: {str(e)}.'
|
|
||||||
|
|
||||||
# Check whether the data is converted to the expected data format.
|
|
||||||
response = client.list_objects_v2(
|
|
||||||
Bucket=bucket_name,
|
|
||||||
Prefix=EXPECTED_S3_DIRECTORY
|
|
||||||
)
|
|
||||||
assert response.get('KeyCount', 0) != 0, f'Failed to deliver metrics to the S3 bucket {bucket_name}.'
|
|
||||||
|
|
||||||
s3_objects = response.get('Contents', [])
|
|
||||||
for s3_object in s3_objects:
|
|
||||||
key = s3_object.get('Key', '')
|
|
||||||
assert pathlib.Path(key).suffix == EXPECTED_S3_OBJECT_EXTENSION, \
|
|
||||||
f'Invalid data format is found in the S3 bucket {bucket_name}'
|
|
||||||
|
|
||||||
def run_glue_crawler(self, crawler_name: str) -> None:
|
|
||||||
"""
|
|
||||||
Run the Glue crawler and wait for it to finish.
|
|
||||||
:param crawler_name: Name of the Glue crawler
|
|
||||||
"""
|
|
||||||
client = self._aws_util.client('glue')
|
|
||||||
try:
|
|
||||||
client.start_crawler(
|
|
||||||
Name=crawler_name
|
|
||||||
)
|
|
||||||
except client.exceptions.CrawlerRunningException:
|
|
||||||
# The crawler has already been started.
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
GlueCrawlerReadyWaiter(client).wait(crawler_name=crawler_name)
|
|
||||||
except WaiterError as e:
|
|
||||||
assert False, f'Failed to run the Glue crawler: {str(e)}.'
|
|
||||||
|
|
||||||
def run_named_queries(self, work_group: str) -> None:
|
|
||||||
"""
|
|
||||||
Run the named queries under the specific Athena work group.
|
|
||||||
:param work_group: Name of the Athena work group.
|
|
||||||
"""
|
|
||||||
client = self._aws_util.client('athena')
|
|
||||||
# List all the named queries.
|
|
||||||
response = client.list_named_queries(
|
|
||||||
WorkGroup=work_group
|
|
||||||
)
|
|
||||||
named_query_ids = response.get('NamedQueryIds', [])
|
|
||||||
|
|
||||||
# Run each of the queries.
|
|
||||||
for named_query_id in named_query_ids:
|
|
||||||
get_named_query_response = client.get_named_query(
|
|
||||||
NamedQueryId=named_query_id
|
|
||||||
)
|
|
||||||
named_query = get_named_query_response.get('NamedQuery', {})
|
|
||||||
|
|
||||||
start_query_execution_response = client.start_query_execution(
|
|
||||||
QueryString=named_query.get('QueryString', ''),
|
|
||||||
QueryExecutionContext={
|
|
||||||
'Database': named_query.get('Database', '')
|
|
||||||
},
|
|
||||||
WorkGroup=work_group
|
|
||||||
)
|
|
||||||
|
|
||||||
# Wait for the query to finish.
|
|
||||||
state = 'RUNNING'
|
|
||||||
while state == 'QUEUED' or state == 'RUNNING':
|
|
||||||
get_query_execution_response = client.get_query_execution(
|
|
||||||
QueryExecutionId=start_query_execution_response.get('QueryExecutionId', '')
|
|
||||||
)
|
|
||||||
|
|
||||||
state = get_query_execution_response.get('QueryExecution', {}).get('Status', {}).get('State', '')
|
|
||||||
|
|
||||||
assert state == 'SUCCEEDED', f'Failed to run the named query {named_query.get("Name", {})}'
|
|
||||||
|
|
||||||
def empty_bucket(self, bucket_name: str) -> None:
|
|
||||||
"""
|
|
||||||
Empty the S3 bucket following:
|
|
||||||
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/migrations3.html
|
|
||||||
|
|
||||||
:param bucket_name: Name of the S3 bucket.
|
|
||||||
"""
|
|
||||||
s3 = self._aws_util.resource('s3')
|
|
||||||
bucket = s3.Bucket(bucket_name)
|
|
||||||
|
|
||||||
for key in bucket.objects.all():
|
|
||||||
key.delete()
|
|
||||||
|
|
||||||
def delete_table(self, database_name: str, table_name: str) -> None:
|
|
||||||
"""
|
|
||||||
Delete an existing Glue table.
|
|
||||||
|
|
||||||
:param database_name: Name of the Glue database.
|
|
||||||
:param table_name: Name of the table to delete.
|
|
||||||
"""
|
|
||||||
client = self._aws_util.client('glue')
|
|
||||||
client.delete_table(
|
|
||||||
DatabaseName=database_name,
|
|
||||||
Name=table_name
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope='function')
|
|
||||||
def aws_metrics_utils(
|
|
||||||
request: pytest.fixture,
|
|
||||||
aws_utils: pytest.fixture):
|
|
||||||
"""
|
|
||||||
Fixture for the AWS metrics util functions.
|
|
||||||
:param request: _pytest.fixtures.SubRequest class that handles getting
|
|
||||||
a pytest fixture from a pytest function/fixture.
|
|
||||||
:param aws_utils: aws_utils fixture.
|
|
||||||
"""
|
|
||||||
aws_utils_obj = AWSMetricsUtils(aws_utils)
|
|
||||||
return aws_utils_obj
|
|
||||||
@ -1,139 +0,0 @@
|
|||||||
"""
|
|
||||||
Copyright (c) Contributors to the Open 3D Engine Project.
|
|
||||||
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0 OR MIT
|
|
||||||
"""
|
|
||||||
|
|
||||||
import botocore.client
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from datetime import timedelta
|
|
||||||
from AWS.common.custom_waiter import CustomWaiter, WaitState
|
|
||||||
|
|
||||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
|
||||||
|
|
||||||
|
|
||||||
class KinesisAnalyticsApplicationUpdatedWaiter(CustomWaiter):
|
|
||||||
"""
|
|
||||||
Subclass of the base custom waiter class.
|
|
||||||
Wait for the Kinesis analytics application being updated to a specific status.
|
|
||||||
"""
|
|
||||||
def __init__(self, client: botocore.client, status: str):
|
|
||||||
"""
|
|
||||||
Initialize the waiter.
|
|
||||||
|
|
||||||
:param client: Boto3 client to use.
|
|
||||||
:param status: Expected status.
|
|
||||||
"""
|
|
||||||
super().__init__(
|
|
||||||
'KinesisAnalyticsApplicationUpdated',
|
|
||||||
'DescribeApplication',
|
|
||||||
'ApplicationDetail.ApplicationStatus',
|
|
||||||
{status: WaitState.SUCCESS},
|
|
||||||
client)
|
|
||||||
|
|
||||||
def wait(self, application_name: str):
|
|
||||||
"""
|
|
||||||
Wait for the expected status.
|
|
||||||
|
|
||||||
:param application_name: Name of the Kinesis analytics application.
|
|
||||||
"""
|
|
||||||
self._wait(ApplicationName=application_name)
|
|
||||||
|
|
||||||
|
|
||||||
class GlueCrawlerReadyWaiter(CustomWaiter):
|
|
||||||
"""
|
|
||||||
Subclass of the base custom waiter class.
|
|
||||||
Wait for the Glue crawler to finish its processing. Return when the crawler is in the "Stopping" status
|
|
||||||
to avoid wasting too much time in the automation tests on its shutdown process.
|
|
||||||
"""
|
|
||||||
def __init__(self, client: botocore.client):
|
|
||||||
"""
|
|
||||||
Initialize the waiter.
|
|
||||||
|
|
||||||
:param client: Boto3 client to use.
|
|
||||||
"""
|
|
||||||
super().__init__(
|
|
||||||
'GlueCrawlerReady',
|
|
||||||
'GetCrawler',
|
|
||||||
'Crawler.State',
|
|
||||||
{'STOPPING': WaitState.SUCCESS},
|
|
||||||
client)
|
|
||||||
|
|
||||||
def wait(self, crawler_name):
|
|
||||||
"""
|
|
||||||
Wait for the expected status.
|
|
||||||
|
|
||||||
:param crawler_name: Name of the Glue crawler.
|
|
||||||
"""
|
|
||||||
self._wait(Name=crawler_name)
|
|
||||||
|
|
||||||
|
|
||||||
class DataLakeMetricsDeliveredWaiter(CustomWaiter):
|
|
||||||
"""
|
|
||||||
Subclass of the base custom waiter class.
|
|
||||||
Wait for the expected directory being created in the S3 bucket.
|
|
||||||
"""
|
|
||||||
def __init__(self, client: botocore.client):
|
|
||||||
"""
|
|
||||||
Initialize the waiter.
|
|
||||||
|
|
||||||
:param client: Boto3 client to use.
|
|
||||||
"""
|
|
||||||
super().__init__(
|
|
||||||
'DataLakeMetricsDelivered',
|
|
||||||
'ListObjectsV2',
|
|
||||||
'KeyCount > `0`',
|
|
||||||
{True: WaitState.SUCCESS},
|
|
||||||
client)
|
|
||||||
|
|
||||||
def wait(self, bucket_name, prefix):
|
|
||||||
"""
|
|
||||||
Wait for the expected directory being created.
|
|
||||||
|
|
||||||
:param bucket_name: Name of the S3 bucket.
|
|
||||||
:param prefix: Name of the expected directory prefix.
|
|
||||||
"""
|
|
||||||
self._wait(Bucket=bucket_name, Prefix=prefix)
|
|
||||||
|
|
||||||
|
|
||||||
class CloudWatchMetricsDeliveredWaiter(CustomWaiter):
|
|
||||||
"""
|
|
||||||
Subclass of the base custom waiter class.
|
|
||||||
Wait for the expected metrics being delivered to CloudWatch.
|
|
||||||
"""
|
|
||||||
def __init__(self, client: botocore.client):
|
|
||||||
"""
|
|
||||||
Initialize the waiter.
|
|
||||||
|
|
||||||
:param client: Boto3 client to use.
|
|
||||||
"""
|
|
||||||
super().__init__(
|
|
||||||
'CloudWatchMetricsDelivered',
|
|
||||||
'GetMetricStatistics',
|
|
||||||
'length(Datapoints) > `0`',
|
|
||||||
{True: WaitState.SUCCESS},
|
|
||||||
client)
|
|
||||||
|
|
||||||
def wait(self, namespace, metrics_name, dimensions, start_time):
|
|
||||||
"""
|
|
||||||
Wait for the expected metrics being delivered.
|
|
||||||
|
|
||||||
:param namespace: Namespace of the metrics.
|
|
||||||
:param metrics_name: Name of the metrics.
|
|
||||||
:param dimensions: Dimensions of the metrics.
|
|
||||||
:param start_time: Start time for generating the metrics.
|
|
||||||
"""
|
|
||||||
self._wait(
|
|
||||||
Namespace=namespace,
|
|
||||||
MetricName=metrics_name,
|
|
||||||
Dimensions=dimensions,
|
|
||||||
StartTime=start_time,
|
|
||||||
EndTime=start_time + timedelta(0, self.timeout),
|
|
||||||
Period=60,
|
|
||||||
Statistics=[
|
|
||||||
'SampleCount'
|
|
||||||
],
|
|
||||||
Unit='Count'
|
|
||||||
)
|
|
||||||
@ -1,7 +0,0 @@
|
|||||||
"""
|
|
||||||
Copyright (c) Contributors to the Open 3D Engine Project.
|
|
||||||
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0 OR MIT
|
|
||||||
"""
|
|
||||||
|
|
||||||
@ -1,170 +0,0 @@
|
|||||||
"""
|
|
||||||
Copyright (c) Contributors to the Open 3D Engine Project.
|
|
||||||
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0 OR MIT
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
import ly_test_tools.log.log_monitor
|
|
||||||
|
|
||||||
from AWS.common import constants
|
|
||||||
from AWS.common.resource_mappings import AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY
|
|
||||||
|
|
||||||
# fixture imports
|
|
||||||
from assetpipeline.ap_fixtures.asset_processor_fixture import asset_processor
|
|
||||||
|
|
||||||
AWS_CLIENT_AUTH_FEATURE_NAME = 'AWSClientAuth'
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.SUITE_awsi
|
|
||||||
@pytest.mark.usefixtures('asset_processor')
|
|
||||||
@pytest.mark.usefixtures('automatic_process_killer')
|
|
||||||
@pytest.mark.usefixtures('aws_utils')
|
|
||||||
@pytest.mark.usefixtures('workspace')
|
|
||||||
@pytest.mark.parametrize('assume_role_arn', [constants.ASSUME_ROLE_ARN])
|
|
||||||
@pytest.mark.parametrize('feature_name', [AWS_CLIENT_AUTH_FEATURE_NAME])
|
|
||||||
@pytest.mark.parametrize('project', ['AutomatedTesting'])
|
|
||||||
@pytest.mark.usefixtures('resource_mappings')
|
|
||||||
@pytest.mark.parametrize('resource_mappings_filename', [constants.AWS_RESOURCE_MAPPING_FILE_NAME])
|
|
||||||
@pytest.mark.parametrize('region_name', [constants.AWS_REGION])
|
|
||||||
@pytest.mark.parametrize('session_name', [constants.SESSION_NAME])
|
|
||||||
@pytest.mark.parametrize('stacks', [[f'{constants.AWS_PROJECT_NAME}-{AWS_CLIENT_AUTH_FEATURE_NAME}-Stack-{constants.AWS_REGION}']])
|
|
||||||
class TestAWSClientAuthWindows(object):
|
|
||||||
"""
|
|
||||||
Test class to verify AWS Client Auth gem features on Windows.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('level', ['AWS/ClientAuth'])
|
|
||||||
def test_anonymous_credentials(self,
|
|
||||||
level: str,
|
|
||||||
launcher: pytest.fixture,
|
|
||||||
resource_mappings: pytest.fixture,
|
|
||||||
workspace: pytest.fixture,
|
|
||||||
asset_processor: pytest.fixture
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Test to verify AWS Cognito Identity pool anonymous authorization.
|
|
||||||
|
|
||||||
Setup: Updates resource mapping file using existing CloudFormation stacks.
|
|
||||||
Tests: Getting credentials when no credentials are configured
|
|
||||||
Verification: Log monitor looks for success credentials log.
|
|
||||||
"""
|
|
||||||
asset_processor.start()
|
|
||||||
asset_processor.wait_for_idle()
|
|
||||||
|
|
||||||
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
|
|
||||||
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
|
|
||||||
|
|
||||||
launcher.args = ['+LoadLevel', level]
|
|
||||||
launcher.args.extend(['-rhi=null'])
|
|
||||||
|
|
||||||
with launcher.start(launch_ap=False):
|
|
||||||
result = log_monitor.monitor_log_for_lines(
|
|
||||||
expected_lines=['(Script) - Success anonymous credentials'],
|
|
||||||
unexpected_lines=['(Script) - Fail anonymous credentials'],
|
|
||||||
halt_on_unexpected=True,
|
|
||||||
)
|
|
||||||
assert result, 'Anonymous credentials fetched successfully.'
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('level', ['AWS/ClientAuth'])
|
|
||||||
def test_anonymous_credentials_no_global_accountid(self,
|
|
||||||
level: str,
|
|
||||||
launcher: pytest.fixture,
|
|
||||||
resource_mappings: pytest.fixture,
|
|
||||||
workspace: pytest.fixture,
|
|
||||||
asset_processor: pytest.fixture
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Test to verify AWS Cognito Identity pool anonymous authorization.
|
|
||||||
|
|
||||||
Setup: Updates resource mapping file using existing CloudFormation stacks.
|
|
||||||
Tests: Getting credentials when no credentials are configured
|
|
||||||
Verification: Log monitor looks for success credentials log.
|
|
||||||
"""
|
|
||||||
# Remove top-level account ID from resource mappings
|
|
||||||
resource_mappings.clear_select_keys([AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY])
|
|
||||||
|
|
||||||
asset_processor.start()
|
|
||||||
asset_processor.wait_for_idle()
|
|
||||||
|
|
||||||
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
|
|
||||||
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
|
|
||||||
|
|
||||||
launcher.args = ['+LoadLevel', level]
|
|
||||||
launcher.args.extend(['-rhi=null'])
|
|
||||||
|
|
||||||
with launcher.start(launch_ap=False):
|
|
||||||
result = log_monitor.monitor_log_for_lines(
|
|
||||||
expected_lines=['(Script) - Success anonymous credentials'],
|
|
||||||
unexpected_lines=['(Script) - Fail anonymous credentials'],
|
|
||||||
halt_on_unexpected=True,
|
|
||||||
)
|
|
||||||
assert result, 'Anonymous credentials fetched successfully.'
|
|
||||||
|
|
||||||
def test_password_signin_credentials(self,
|
|
||||||
launcher: pytest.fixture,
|
|
||||||
resource_mappings: pytest.fixture,
|
|
||||||
workspace: pytest.fixture,
|
|
||||||
asset_processor: pytest.fixture,
|
|
||||||
aws_utils: pytest.fixture
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Test to verify AWS Cognito IDP Password sign in and Cognito Identity pool authenticated authorization.
|
|
||||||
|
|
||||||
Setup: Updates resource mapping file using existing CloudFormation stacks.
|
|
||||||
Tests: Sign up new test user, admin confirm the user, sign in and get aws credentials.
|
|
||||||
Verification: Log monitor looks for success credentials log.
|
|
||||||
"""
|
|
||||||
asset_processor.start()
|
|
||||||
asset_processor.wait_for_idle()
|
|
||||||
|
|
||||||
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
|
|
||||||
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
|
|
||||||
|
|
||||||
cognito_idp = aws_utils.client('cognito-idp')
|
|
||||||
user_pool_id = resource_mappings.get_resource_name_id(f'{AWS_CLIENT_AUTH_FEATURE_NAME}.CognitoUserPoolId')
|
|
||||||
logger.info(f'UserPoolId:{user_pool_id}')
|
|
||||||
|
|
||||||
# Remove the user if already exists
|
|
||||||
try:
|
|
||||||
cognito_idp.admin_delete_user(
|
|
||||||
UserPoolId=user_pool_id,
|
|
||||||
Username='test1'
|
|
||||||
)
|
|
||||||
except cognito_idp.exceptions.UserNotFoundException:
|
|
||||||
pass
|
|
||||||
|
|
||||||
launcher.args = ['+LoadLevel', 'AWS/ClientAuthPasswordSignUp']
|
|
||||||
launcher.args.extend(['-rhi=null'])
|
|
||||||
|
|
||||||
with launcher.start(launch_ap=False):
|
|
||||||
result = log_monitor.monitor_log_for_lines(
|
|
||||||
expected_lines=['(Script) - Signup Success'],
|
|
||||||
unexpected_lines=['(Script) - Signup Fail'],
|
|
||||||
halt_on_unexpected=True,
|
|
||||||
)
|
|
||||||
assert result, 'Sign Up Success.'
|
|
||||||
|
|
||||||
launcher.stop()
|
|
||||||
|
|
||||||
cognito_idp.admin_confirm_sign_up(
|
|
||||||
UserPoolId=user_pool_id,
|
|
||||||
Username='test1'
|
|
||||||
)
|
|
||||||
|
|
||||||
launcher.args = ['+LoadLevel', 'AWS/ClientAuthPasswordSignIn']
|
|
||||||
launcher.args.extend(['-rhi=null'])
|
|
||||||
|
|
||||||
with launcher.start(launch_ap=False):
|
|
||||||
result = log_monitor.monitor_log_for_lines(
|
|
||||||
expected_lines=['(Script) - SignIn Success', '(Script) - Success credentials'],
|
|
||||||
unexpected_lines=['(Script) - SignIn Fail', '(Script) - Fail credentials'],
|
|
||||||
halt_on_unexpected=True,
|
|
||||||
)
|
|
||||||
assert result, 'Sign in Success, fetched authenticated AWS temp credentials.'
|
|
||||||
@ -1,192 +0,0 @@
|
|||||||
"""
|
|
||||||
Copyright (c) Contributors to the Open 3D Engine Project.
|
|
||||||
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0 OR MIT
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
import typing
|
|
||||||
from botocore.exceptions import ClientError
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
import ly_test_tools
|
|
||||||
import ly_test_tools.log.log_monitor
|
|
||||||
import ly_test_tools.environment.process_utils as process_utils
|
|
||||||
import ly_test_tools.o3de.asset_processor_utils as asset_processor_utils
|
|
||||||
|
|
||||||
from AWS.common import constants
|
|
||||||
from AWS.common.resource_mappings import AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY
|
|
||||||
|
|
||||||
# fixture imports
|
|
||||||
from assetpipeline.ap_fixtures.asset_processor_fixture import asset_processor
|
|
||||||
|
|
||||||
AWS_CORE_FEATURE_NAME = 'AWSCore'
|
|
||||||
|
|
||||||
process_utils.kill_processes_named("o3de", ignore_extensions=True) # Kill ProjectManager windows
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def setup(launcher: pytest.fixture, asset_processor: pytest.fixture) -> typing.Tuple[pytest.fixture, str]:
|
|
||||||
"""
|
|
||||||
Set up the resource mapping configuration and start the log monitor.
|
|
||||||
:param launcher: Client launcher for running the test level.
|
|
||||||
:param asset_processor: asset_processor fixture.
|
|
||||||
:return log monitor object, metrics file path and the metrics stack name.
|
|
||||||
"""
|
|
||||||
# Create the temporary directory for downloading test file from S3.
|
|
||||||
user_dir = os.path.join(launcher.workspace.paths.project(), 'user')
|
|
||||||
s3_download_dir = os.path.join(user_dir, 's3_download')
|
|
||||||
if not os.path.exists(s3_download_dir):
|
|
||||||
os.makedirs(s3_download_dir)
|
|
||||||
|
|
||||||
asset_processor_utils.kill_asset_processor()
|
|
||||||
asset_processor.start()
|
|
||||||
asset_processor.wait_for_idle()
|
|
||||||
|
|
||||||
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
|
|
||||||
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
|
|
||||||
|
|
||||||
return log_monitor, s3_download_dir
|
|
||||||
|
|
||||||
|
|
||||||
def write_test_data_to_dynamodb_table(resource_mappings: pytest.fixture, aws_utils: pytest.fixture) -> None:
|
|
||||||
"""
|
|
||||||
Write test data to the DynamoDB table created by the CDK application.
|
|
||||||
:param resource_mappings: resource_mappings fixture.
|
|
||||||
:param aws_utils: aws_utils fixture.
|
|
||||||
"""
|
|
||||||
table_name = resource_mappings.get_resource_name_id(f'{AWS_CORE_FEATURE_NAME}.ExampleDynamoTableOutput')
|
|
||||||
try:
|
|
||||||
aws_utils.client('dynamodb').put_item(
|
|
||||||
TableName=table_name,
|
|
||||||
Item={
|
|
||||||
'id': {
|
|
||||||
'S': 'Item1'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
logger.info(f'Loaded data into table {table_name}')
|
|
||||||
except ClientError:
|
|
||||||
logger.exception(f'Failed to load data into table {table_name}')
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.SUITE_awsi
|
|
||||||
@pytest.mark.usefixtures('automatic_process_killer')
|
|
||||||
@pytest.mark.usefixtures('asset_processor')
|
|
||||||
@pytest.mark.parametrize('feature_name', [AWS_CORE_FEATURE_NAME])
|
|
||||||
@pytest.mark.parametrize('region_name', [constants.AWS_REGION])
|
|
||||||
@pytest.mark.parametrize('assume_role_arn', [constants.ASSUME_ROLE_ARN])
|
|
||||||
@pytest.mark.parametrize('session_name', [constants.SESSION_NAME])
|
|
||||||
@pytest.mark.usefixtures('workspace')
|
|
||||||
@pytest.mark.parametrize('project', ['AutomatedTesting'])
|
|
||||||
@pytest.mark.parametrize('level', ['AWS/Core'])
|
|
||||||
@pytest.mark.usefixtures('resource_mappings')
|
|
||||||
@pytest.mark.parametrize('resource_mappings_filename', [constants.AWS_RESOURCE_MAPPING_FILE_NAME])
|
|
||||||
@pytest.mark.parametrize('stacks', [[f'{constants.AWS_PROJECT_NAME}-{AWS_CORE_FEATURE_NAME}',
|
|
||||||
f'{constants.AWS_PROJECT_NAME}-{AWS_CORE_FEATURE_NAME}-Example-{constants.AWS_REGION}']])
|
|
||||||
@pytest.mark.usefixtures('aws_credentials')
|
|
||||||
@pytest.mark.parametrize('profile_name', ['AWSAutomationTest'])
|
|
||||||
class TestAWSCoreAWSResourceInteraction(object):
|
|
||||||
"""
|
|
||||||
Test class to verify the scripting behavior for the AWSCore gem.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('expected_lines', [
|
|
||||||
['(Script) - [S3] Head object request is done',
|
|
||||||
'(Script) - [S3] Head object success: Object example.txt is found.',
|
|
||||||
'(Script) - [S3] Get object success: Object example.txt is downloaded.',
|
|
||||||
'(Script) - [Lambda] Completed Invoke',
|
|
||||||
'(Script) - [Lambda] Invoke success: {"statusCode": 200, "body": {}}',
|
|
||||||
'(Script) - [DynamoDB] Results finished']])
|
|
||||||
@pytest.mark.parametrize('unexpected_lines', [
|
|
||||||
['(Script) - [S3] Head object error: No response body.',
|
|
||||||
'(Script) - [S3] Get object error: Request validation failed, output file directory doesn\'t exist.',
|
|
||||||
'(Script) - Request validation failed, output file miss full path.',
|
|
||||||
'(Script) - ']])
|
|
||||||
def test_scripting_behavior(self,
|
|
||||||
level: str,
|
|
||||||
launcher: pytest.fixture,
|
|
||||||
workspace: pytest.fixture,
|
|
||||||
asset_processor: pytest.fixture,
|
|
||||||
resource_mappings: pytest.fixture,
|
|
||||||
aws_utils: pytest.fixture,
|
|
||||||
expected_lines: typing.List[str],
|
|
||||||
unexpected_lines: typing.List[str]):
|
|
||||||
"""
|
|
||||||
Setup: Updates resource mapping file using existing CloudFormation stacks.
|
|
||||||
Tests: Interact with AWS S3, DynamoDB and Lambda services.
|
|
||||||
Verification: Script canvas nodes can communicate with AWS services successfully.
|
|
||||||
"""
|
|
||||||
|
|
||||||
log_monitor, s3_download_dir = setup(launcher, asset_processor)
|
|
||||||
write_test_data_to_dynamodb_table(resource_mappings, aws_utils)
|
|
||||||
|
|
||||||
launcher.args = ['+LoadLevel', level]
|
|
||||||
launcher.args.extend(['-rhi=null'])
|
|
||||||
|
|
||||||
with launcher.start(launch_ap=False):
|
|
||||||
result = log_monitor.monitor_log_for_lines(
|
|
||||||
expected_lines=expected_lines,
|
|
||||||
unexpected_lines=unexpected_lines,
|
|
||||||
halt_on_unexpected=True
|
|
||||||
)
|
|
||||||
|
|
||||||
assert result, "Expected lines weren't found."
|
|
||||||
|
|
||||||
assert os.path.exists(os.path.join(s3_download_dir, 'output.txt')), \
|
|
||||||
'The expected file wasn\'t successfully downloaded.'
|
|
||||||
# clean up the file directories.
|
|
||||||
shutil.rmtree(s3_download_dir)
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('expected_lines', [
|
|
||||||
['(Script) - [S3] Head object request is done',
|
|
||||||
'(Script) - [S3] Head object success: Object example.txt is found.',
|
|
||||||
'(Script) - [S3] Get object success: Object example.txt is downloaded.',
|
|
||||||
'(Script) - [Lambda] Completed Invoke',
|
|
||||||
'(Script) - [Lambda] Invoke success: {"statusCode": 200, "body": {}}',
|
|
||||||
'(Script) - [DynamoDB] Results finished']])
|
|
||||||
@pytest.mark.parametrize('unexpected_lines', [
|
|
||||||
['(Script) - [S3] Head object error: No response body.',
|
|
||||||
'(Script) - [S3] Get object error: Request validation failed, output file directory doesn\'t exist.',
|
|
||||||
'(Script) - Request validation failed, output file miss full path.',
|
|
||||||
'(Script) - ']])
|
|
||||||
def test_scripting_behavior_no_global_accountid(self,
|
|
||||||
level: str,
|
|
||||||
launcher: pytest.fixture,
|
|
||||||
workspace: pytest.fixture,
|
|
||||||
asset_processor: pytest.fixture,
|
|
||||||
resource_mappings: pytest.fixture,
|
|
||||||
aws_utils: pytest.fixture,
|
|
||||||
expected_lines: typing.List[str],
|
|
||||||
unexpected_lines: typing.List[str]):
|
|
||||||
"""
|
|
||||||
Setup: Updates resource mapping file using existing CloudFormation stacks.
|
|
||||||
Tests: Interact with AWS S3, DynamoDB and Lambda services.
|
|
||||||
Verification: Script canvas nodes can communicate with AWS services successfully.
|
|
||||||
"""
|
|
||||||
|
|
||||||
resource_mappings.clear_select_keys([AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY])
|
|
||||||
log_monitor, s3_download_dir = setup(launcher, asset_processor)
|
|
||||||
write_test_data_to_dynamodb_table(resource_mappings, aws_utils)
|
|
||||||
|
|
||||||
launcher.args = ['+LoadLevel', level]
|
|
||||||
launcher.args.extend(['-rhi=null'])
|
|
||||||
|
|
||||||
with launcher.start(launch_ap=False):
|
|
||||||
result = log_monitor.monitor_log_for_lines(
|
|
||||||
expected_lines=expected_lines,
|
|
||||||
unexpected_lines=unexpected_lines,
|
|
||||||
halt_on_unexpected=True
|
|
||||||
)
|
|
||||||
|
|
||||||
assert result, "Expected lines weren't found."
|
|
||||||
|
|
||||||
assert os.path.exists(os.path.join(s3_download_dir, 'output.txt')), \
|
|
||||||
'The expected file wasn\'t successfully downloaded.'
|
|
||||||
# clean up the file directories.
|
|
||||||
shutil.rmtree(s3_download_dir)
|
|
||||||
@ -0,0 +1,289 @@
|
|||||||
|
"""
|
||||||
|
Copyright (c) Contributors to the Open 3D Engine Project.
|
||||||
|
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
||||||
|
|
||||||
|
SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import pytest
|
||||||
|
import typing
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
import ly_test_tools.log.log_monitor
|
||||||
|
|
||||||
|
from AWS.common import constants
|
||||||
|
from AWS.common.resource_mappings import AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY
|
||||||
|
from .aws_metrics_custom_thread import AWSMetricsThread
|
||||||
|
|
||||||
|
# fixture imports
|
||||||
|
from assetpipeline.ap_fixtures.asset_processor_fixture import asset_processor
|
||||||
|
from .aws_metrics_utils import aws_metrics_utils
|
||||||
|
|
||||||
|
AWS_METRICS_FEATURE_NAME = 'AWSMetrics'
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def setup(launcher: pytest.fixture,
|
||||||
|
asset_processor: pytest.fixture) -> pytest.fixture:
|
||||||
|
"""
|
||||||
|
Set up the resource mapping configuration and start the log monitor.
|
||||||
|
:param launcher: Client launcher for running the test level.
|
||||||
|
:param asset_processor: asset_processor fixture.
|
||||||
|
:return log monitor object.
|
||||||
|
"""
|
||||||
|
asset_processor.start()
|
||||||
|
asset_processor.wait_for_idle()
|
||||||
|
|
||||||
|
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
|
||||||
|
|
||||||
|
# Initialize the log monitor.
|
||||||
|
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
|
||||||
|
|
||||||
|
return log_monitor
|
||||||
|
|
||||||
|
|
||||||
|
def monitor_metrics_submission(log_monitor: pytest.fixture) -> None:
|
||||||
|
"""
|
||||||
|
Monitor the messages and notifications for submitting metrics.
|
||||||
|
:param log_monitor: Log monitor to check the log messages.
|
||||||
|
"""
|
||||||
|
expected_lines = [
|
||||||
|
'(Script) - Submitted metrics without buffer.',
|
||||||
|
'(Script) - Submitted metrics with buffer.',
|
||||||
|
'(Script) - Flushed the buffered metrics.',
|
||||||
|
'(Script) - Metrics is sent successfully.'
|
||||||
|
]
|
||||||
|
|
||||||
|
unexpected_lines = [
|
||||||
|
'(Script) - Failed to submit metrics without buffer.',
|
||||||
|
'(Script) - Failed to submit metrics with buffer.',
|
||||||
|
'(Script) - Failed to send metrics.'
|
||||||
|
]
|
||||||
|
|
||||||
|
result = log_monitor.monitor_log_for_lines(
|
||||||
|
expected_lines=expected_lines,
|
||||||
|
unexpected_lines=unexpected_lines,
|
||||||
|
halt_on_unexpected=True)
|
||||||
|
|
||||||
|
# Assert the log monitor detected expected lines and did not detect any unexpected lines.
|
||||||
|
assert result, (
|
||||||
|
f'Log monitoring failed. Used expected_lines values: {expected_lines} & '
|
||||||
|
f'unexpected_lines values: {unexpected_lines}')
|
||||||
|
|
||||||
|
|
||||||
|
def query_metrics_from_s3(aws_metrics_utils: pytest.fixture, resource_mappings: pytest.fixture) -> None:
|
||||||
|
"""
|
||||||
|
Verify that the metrics events are delivered to the S3 bucket and can be queried.
|
||||||
|
:param aws_metrics_utils: aws_metrics_utils fixture.
|
||||||
|
:param resource_mappings: resource_mappings fixture.
|
||||||
|
"""
|
||||||
|
aws_metrics_utils.verify_s3_delivery(
|
||||||
|
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsBucketName')
|
||||||
|
)
|
||||||
|
logger.info('Metrics are sent to S3.')
|
||||||
|
|
||||||
|
aws_metrics_utils.run_glue_crawler(
|
||||||
|
resource_mappings.get_resource_name_id('AWSMetrics.EventsCrawlerName'))
|
||||||
|
|
||||||
|
# Remove the events_json table if exists so that the sample query can create a table with the same name.
|
||||||
|
aws_metrics_utils.delete_table(resource_mappings.get_resource_name_id('AWSMetrics.EventDatabaseName'), 'events_json')
|
||||||
|
aws_metrics_utils.run_named_queries(resource_mappings.get_resource_name_id('AWSMetrics.AthenaWorkGroupName'))
|
||||||
|
logger.info('Query metrics from S3 successfully.')
|
||||||
|
|
||||||
|
|
||||||
|
def verify_operational_metrics(aws_metrics_utils: pytest.fixture,
|
||||||
|
resource_mappings: pytest.fixture, start_time: datetime) -> None:
|
||||||
|
"""
|
||||||
|
Verify that operational health metrics are delivered to CloudWatch.
|
||||||
|
:param aws_metrics_utils: aws_metrics_utils fixture.
|
||||||
|
:param resource_mappings: resource_mappings fixture.
|
||||||
|
:param start_time: Time when the game launcher starts.
|
||||||
|
"""
|
||||||
|
aws_metrics_utils.verify_cloud_watch_delivery(
|
||||||
|
'AWS/Lambda',
|
||||||
|
'Invocations',
|
||||||
|
[{'Name': 'FunctionName',
|
||||||
|
'Value': resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsProcessingLambdaName')}],
|
||||||
|
start_time)
|
||||||
|
logger.info('AnalyticsProcessingLambda metrics are sent to CloudWatch.')
|
||||||
|
|
||||||
|
aws_metrics_utils.verify_cloud_watch_delivery(
|
||||||
|
'AWS/Lambda',
|
||||||
|
'Invocations',
|
||||||
|
[{'Name': 'FunctionName',
|
||||||
|
'Value': resource_mappings.get_resource_name_id('AWSMetrics.EventProcessingLambdaName')}],
|
||||||
|
start_time)
|
||||||
|
logger.info('EventsProcessingLambda metrics are sent to CloudWatch.')
|
||||||
|
|
||||||
|
|
||||||
|
def update_kinesis_analytics_application_status(aws_metrics_utils: pytest.fixture,
|
||||||
|
resource_mappings: pytest.fixture, start_application: bool) -> None:
|
||||||
|
"""
|
||||||
|
Update the Kinesis analytics application to start or stop it.
|
||||||
|
:param aws_metrics_utils: aws_metrics_utils fixture.
|
||||||
|
:param resource_mappings: resource_mappings fixture.
|
||||||
|
:param start_application: whether to start or stop the application.
|
||||||
|
"""
|
||||||
|
if start_application:
|
||||||
|
aws_metrics_utils.start_kinesis_data_analytics_application(
|
||||||
|
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsApplicationName'))
|
||||||
|
else:
|
||||||
|
aws_metrics_utils.stop_kinesis_data_analytics_application(
|
||||||
|
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsApplicationName'))
|
||||||
|
|
||||||
|
@pytest.mark.SUITE_awsi
|
||||||
|
@pytest.mark.usefixtures('automatic_process_killer')
|
||||||
|
@pytest.mark.usefixtures('aws_credentials')
|
||||||
|
@pytest.mark.usefixtures('resource_mappings')
|
||||||
|
@pytest.mark.parametrize('assume_role_arn', [constants.ASSUME_ROLE_ARN])
|
||||||
|
@pytest.mark.parametrize('feature_name', [AWS_METRICS_FEATURE_NAME])
|
||||||
|
@pytest.mark.parametrize('profile_name', ['AWSAutomationTest'])
|
||||||
|
@pytest.mark.parametrize('project', ['AutomatedTesting'])
|
||||||
|
@pytest.mark.parametrize('region_name', [constants.AWS_REGION])
|
||||||
|
@pytest.mark.parametrize('resource_mappings_filename', [constants.AWS_RESOURCE_MAPPING_FILE_NAME])
|
||||||
|
@pytest.mark.parametrize('session_name', [constants.SESSION_NAME])
|
||||||
|
@pytest.mark.parametrize('stacks', [[f'{constants.AWS_PROJECT_NAME}-{AWS_METRICS_FEATURE_NAME}-{constants.AWS_REGION}']])
|
||||||
|
class TestAWSMetricsWindows(object):
|
||||||
|
"""
|
||||||
|
Test class to verify the real-time and batch analytics for metrics.
|
||||||
|
"""
|
||||||
|
@pytest.mark.parametrize('level', ['levels/aws/metrics/metrics.spawnable'])
|
||||||
|
def test_realtime_and_batch_analytics(self,
|
||||||
|
level: str,
|
||||||
|
launcher: pytest.fixture,
|
||||||
|
asset_processor: pytest.fixture,
|
||||||
|
workspace: pytest.fixture,
|
||||||
|
aws_utils: pytest.fixture,
|
||||||
|
resource_mappings: pytest.fixture,
|
||||||
|
aws_metrics_utils: pytest.fixture):
|
||||||
|
"""
|
||||||
|
Verify that the metrics events are sent to CloudWatch and S3 for analytics.
|
||||||
|
"""
|
||||||
|
# Start Kinesis analytics application on a separate thread to avoid blocking the test.
|
||||||
|
kinesis_analytics_application_thread = AWSMetricsThread(target=update_kinesis_analytics_application_status,
|
||||||
|
args=(aws_metrics_utils, resource_mappings, True))
|
||||||
|
kinesis_analytics_application_thread.start()
|
||||||
|
|
||||||
|
log_monitor = setup(launcher, asset_processor)
|
||||||
|
|
||||||
|
# Kinesis analytics application needs to be in the running state before we start the game launcher.
|
||||||
|
kinesis_analytics_application_thread.join()
|
||||||
|
launcher.args = ['+LoadLevel', level]
|
||||||
|
launcher.args.extend(['-rhi=null'])
|
||||||
|
start_time = datetime.utcnow()
|
||||||
|
with launcher.start(launch_ap=False):
|
||||||
|
monitor_metrics_submission(log_monitor)
|
||||||
|
|
||||||
|
# Verify that real-time analytics metrics are delivered to CloudWatch.
|
||||||
|
aws_metrics_utils.verify_cloud_watch_delivery(
|
||||||
|
AWS_METRICS_FEATURE_NAME,
|
||||||
|
'TotalLogins',
|
||||||
|
[],
|
||||||
|
start_time)
|
||||||
|
logger.info('Real-time metrics are sent to CloudWatch.')
|
||||||
|
|
||||||
|
# Run time-consuming operations on separate threads to avoid blocking the test.
|
||||||
|
operational_threads = list()
|
||||||
|
operational_threads.append(
|
||||||
|
AWSMetricsThread(target=query_metrics_from_s3,
|
||||||
|
args=(aws_metrics_utils, resource_mappings)))
|
||||||
|
operational_threads.append(
|
||||||
|
AWSMetricsThread(target=verify_operational_metrics,
|
||||||
|
args=(aws_metrics_utils, resource_mappings, start_time)))
|
||||||
|
operational_threads.append(
|
||||||
|
AWSMetricsThread(target=update_kinesis_analytics_application_status,
|
||||||
|
args=(aws_metrics_utils, resource_mappings, False)))
|
||||||
|
for thread in operational_threads:
|
||||||
|
thread.start()
|
||||||
|
for thread in operational_threads:
|
||||||
|
thread.join()
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('level', ['levels/aws/metrics/metrics.spawnable'])
|
||||||
|
def test_realtime_and_batch_analytics_no_global_accountid(self,
|
||||||
|
level: str,
|
||||||
|
launcher: pytest.fixture,
|
||||||
|
asset_processor: pytest.fixture,
|
||||||
|
workspace: pytest.fixture,
|
||||||
|
aws_utils: pytest.fixture,
|
||||||
|
resource_mappings: pytest.fixture,
|
||||||
|
aws_metrics_utils: pytest.fixture):
|
||||||
|
"""
|
||||||
|
Verify that the metrics events are sent to CloudWatch and S3 for analytics.
|
||||||
|
"""
|
||||||
|
# Remove top-level account ID from resource mappings
|
||||||
|
resource_mappings.clear_select_keys([AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY])
|
||||||
|
# Start Kinesis analytics application on a separate thread to avoid blocking the test.
|
||||||
|
kinesis_analytics_application_thread = AWSMetricsThread(target=update_kinesis_analytics_application_status,
|
||||||
|
args=(aws_metrics_utils, resource_mappings, True))
|
||||||
|
kinesis_analytics_application_thread.start()
|
||||||
|
|
||||||
|
log_monitor = setup(launcher, asset_processor)
|
||||||
|
|
||||||
|
# Kinesis analytics application needs to be in the running state before we start the game launcher.
|
||||||
|
kinesis_analytics_application_thread.join()
|
||||||
|
launcher.args = ['+LoadLevel', level]
|
||||||
|
launcher.args.extend(['-rhi=null'])
|
||||||
|
start_time = datetime.utcnow()
|
||||||
|
with launcher.start(launch_ap=False):
|
||||||
|
monitor_metrics_submission(log_monitor)
|
||||||
|
|
||||||
|
# Verify that real-time analytics metrics are delivered to CloudWatch.
|
||||||
|
aws_metrics_utils.verify_cloud_watch_delivery(
|
||||||
|
AWS_METRICS_FEATURE_NAME,
|
||||||
|
'TotalLogins',
|
||||||
|
[],
|
||||||
|
start_time)
|
||||||
|
logger.info('Real-time metrics are sent to CloudWatch.')
|
||||||
|
|
||||||
|
# Run time-consuming operations on separate threads to avoid blocking the test.
|
||||||
|
operational_threads = list()
|
||||||
|
operational_threads.append(
|
||||||
|
AWSMetricsThread(target=query_metrics_from_s3,
|
||||||
|
args=(aws_metrics_utils, resource_mappings)))
|
||||||
|
operational_threads.append(
|
||||||
|
AWSMetricsThread(target=verify_operational_metrics,
|
||||||
|
args=(aws_metrics_utils, resource_mappings, start_time)))
|
||||||
|
operational_threads.append(
|
||||||
|
AWSMetricsThread(target=update_kinesis_analytics_application_status,
|
||||||
|
args=(aws_metrics_utils, resource_mappings, False)))
|
||||||
|
for thread in operational_threads:
|
||||||
|
thread.start()
|
||||||
|
for thread in operational_threads:
|
||||||
|
thread.join()
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('level', ['levels/aws/metrics/metrics.spawnable'])
|
||||||
|
def test_unauthorized_user_request_rejected(self,
|
||||||
|
level: str,
|
||||||
|
launcher: pytest.fixture,
|
||||||
|
asset_processor: pytest.fixture,
|
||||||
|
workspace: pytest.fixture):
|
||||||
|
"""
|
||||||
|
Verify that unauthorized users cannot send metrics events to the AWS backed backend.
|
||||||
|
"""
|
||||||
|
log_monitor = setup(launcher, asset_processor)
|
||||||
|
|
||||||
|
# Set invalid AWS credentials.
|
||||||
|
launcher.args = ['+LoadLevel', level, '+cl_awsAccessKey', 'AKIAIOSFODNN7EXAMPLE',
|
||||||
|
'+cl_awsSecretKey', 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY']
|
||||||
|
launcher.args.extend(['-rhi=null'])
|
||||||
|
|
||||||
|
with launcher.start(launch_ap=False):
|
||||||
|
result = log_monitor.monitor_log_for_lines(
|
||||||
|
expected_lines=['(Script) - Failed to send metrics.'],
|
||||||
|
unexpected_lines=['(Script) - Metrics is sent successfully.'],
|
||||||
|
halt_on_unexpected=True)
|
||||||
|
assert result, 'Metrics events are sent successfully by unauthorized user'
|
||||||
|
logger.info('Unauthorized user is rejected to send metrics.')
|
||||||
|
|
||||||
|
def test_clean_up_s3_bucket(self,
|
||||||
|
aws_utils: pytest.fixture,
|
||||||
|
resource_mappings: pytest.fixture,
|
||||||
|
aws_metrics_utils: pytest.fixture):
|
||||||
|
"""
|
||||||
|
Clear the analytics bucket objects so that the S3 bucket can be destroyed during tear down.
|
||||||
|
"""
|
||||||
|
aws_metrics_utils.empty_bucket(
|
||||||
|
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsBucketName'))
|
||||||
@ -0,0 +1,29 @@
|
|||||||
|
"""
|
||||||
|
Copyright (c) Contributors to the Open 3D Engine Project.
|
||||||
|
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
||||||
|
|
||||||
|
SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||||
|
"""
|
||||||
|
|
||||||
|
from threading import Thread
|
||||||
|
|
||||||
|
|
||||||
|
class AWSMetricsThread(Thread):
|
||||||
|
"""
|
||||||
|
Custom thread for raising assertion errors on the main thread.
|
||||||
|
"""
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
self._error = None
|
||||||
|
|
||||||
|
def run(self) -> None:
|
||||||
|
try:
|
||||||
|
super().run()
|
||||||
|
except AssertionError as e:
|
||||||
|
self._error = e
|
||||||
|
|
||||||
|
def join(self, **kwargs) -> None:
|
||||||
|
super().join(**kwargs)
|
||||||
|
|
||||||
|
if self._error:
|
||||||
|
raise AssertionError(self._error)
|
||||||
@ -0,0 +1,239 @@
|
|||||||
|
"""
|
||||||
|
Copyright (c) Contributors to the Open 3D Engine Project.
|
||||||
|
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
||||||
|
|
||||||
|
SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import pathlib
|
||||||
|
import pytest
|
||||||
|
import typing
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from botocore.exceptions import WaiterError
|
||||||
|
|
||||||
|
from .aws_metrics_waiters import KinesisAnalyticsApplicationUpdatedWaiter, \
|
||||||
|
CloudWatchMetricsDeliveredWaiter, DataLakeMetricsDeliveredWaiter, GlueCrawlerReadyWaiter
|
||||||
|
|
||||||
|
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||||
|
|
||||||
|
# Expected directory and file extension for the S3 objects.
|
||||||
|
EXPECTED_S3_DIRECTORY = 'firehose_events/'
|
||||||
|
EXPECTED_S3_OBJECT_EXTENSION = '.parquet'
|
||||||
|
|
||||||
|
|
||||||
|
class AWSMetricsUtils:
|
||||||
|
"""
|
||||||
|
Provide utils functions for the AWSMetrics gem to interact with the deployed resources.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, aws_utils: pytest.fixture):
|
||||||
|
self._aws_util = aws_utils
|
||||||
|
|
||||||
|
def start_kinesis_data_analytics_application(self, application_name: str) -> None:
|
||||||
|
"""
|
||||||
|
Start the Kenisis Data Analytics application for real-time analytics.
|
||||||
|
:param application_name: Name of the Kenisis Data Analytics application.
|
||||||
|
"""
|
||||||
|
input_id = self.get_kinesis_analytics_application_input_id(application_name)
|
||||||
|
assert input_id, 'invalid Kinesis Data Analytics application input.'
|
||||||
|
|
||||||
|
client = self._aws_util.client('kinesisanalytics')
|
||||||
|
try:
|
||||||
|
client.start_application(
|
||||||
|
ApplicationName=application_name,
|
||||||
|
InputConfigurations=[
|
||||||
|
{
|
||||||
|
'Id': input_id,
|
||||||
|
'InputStartingPositionConfiguration': {
|
||||||
|
'InputStartingPosition': 'NOW'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
]
|
||||||
|
)
|
||||||
|
except client.exceptions.ResourceInUseException:
|
||||||
|
# The application has been started.
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
KinesisAnalyticsApplicationUpdatedWaiter(client, 'RUNNING').wait(application_name=application_name)
|
||||||
|
except WaiterError as e:
|
||||||
|
assert False, f'Failed to start the Kinesis Data Analytics application: {str(e)}.'
|
||||||
|
|
||||||
|
def get_kinesis_analytics_application_input_id(self, application_name: str) -> str:
|
||||||
|
"""
|
||||||
|
Get the input ID for the Kenisis Data Analytics application.
|
||||||
|
:param application_name: Name of the Kenisis Data Analytics application.
|
||||||
|
:return: Input ID for the Kenisis Data Analytics application.
|
||||||
|
"""
|
||||||
|
client = self._aws_util.client('kinesisanalytics')
|
||||||
|
response = client.describe_application(
|
||||||
|
ApplicationName=application_name
|
||||||
|
)
|
||||||
|
if not response:
|
||||||
|
return ''
|
||||||
|
input_descriptions = response.get('ApplicationDetail', {}).get('InputDescriptions', [])
|
||||||
|
if len(input_descriptions) != 1:
|
||||||
|
return ''
|
||||||
|
|
||||||
|
return input_descriptions[0].get('InputId', '')
|
||||||
|
|
||||||
|
def stop_kinesis_data_analytics_application(self, application_name: str) -> None:
|
||||||
|
"""
|
||||||
|
Stop the Kenisis Data Analytics application.
|
||||||
|
:param application_name: Name of the Kenisis Data Analytics application.
|
||||||
|
"""
|
||||||
|
client = self._aws_util.client('kinesisanalytics')
|
||||||
|
client.stop_application(
|
||||||
|
ApplicationName=application_name
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
KinesisAnalyticsApplicationUpdatedWaiter(client, 'READY').wait(application_name=application_name)
|
||||||
|
except WaiterError as e:
|
||||||
|
assert False, f'Failed to stop the Kinesis Data Analytics application: {str(e)}.'
|
||||||
|
|
||||||
|
def verify_cloud_watch_delivery(self, namespace: str, metrics_name: str,
|
||||||
|
dimensions: typing.List[dict], start_time: datetime) -> None:
|
||||||
|
"""
|
||||||
|
Verify that the expected metrics is delivered to CloudWatch.
|
||||||
|
:param namespace: Namespace of the metrics.
|
||||||
|
:param metrics_name: Name of the metrics.
|
||||||
|
:param dimensions: Dimensions of the metrics.
|
||||||
|
:param start_time: Start time for generating the metrics.
|
||||||
|
"""
|
||||||
|
client = self._aws_util.client('cloudwatch')
|
||||||
|
|
||||||
|
try:
|
||||||
|
CloudWatchMetricsDeliveredWaiter(client).wait(
|
||||||
|
namespace=namespace,
|
||||||
|
metrics_name=metrics_name,
|
||||||
|
dimensions=dimensions,
|
||||||
|
start_time=start_time
|
||||||
|
)
|
||||||
|
except WaiterError as e:
|
||||||
|
assert False, f'Failed to deliver metrics to CloudWatch: {str(e)}.'
|
||||||
|
|
||||||
|
def verify_s3_delivery(self, analytics_bucket_name: str) -> None:
|
||||||
|
"""
|
||||||
|
Verify that metrics are delivered to S3 for batch analytics successfully.
|
||||||
|
:param analytics_bucket_name: Name of the deployed S3 bucket.
|
||||||
|
"""
|
||||||
|
client = self._aws_util.client('s3')
|
||||||
|
bucket_name = analytics_bucket_name
|
||||||
|
|
||||||
|
try:
|
||||||
|
DataLakeMetricsDeliveredWaiter(client).wait(bucket_name=bucket_name, prefix=EXPECTED_S3_DIRECTORY)
|
||||||
|
except WaiterError as e:
|
||||||
|
assert False, f'Failed to find the S3 directory for storing metrics data: {str(e)}.'
|
||||||
|
|
||||||
|
# Check whether the data is converted to the expected data format.
|
||||||
|
response = client.list_objects_v2(
|
||||||
|
Bucket=bucket_name,
|
||||||
|
Prefix=EXPECTED_S3_DIRECTORY
|
||||||
|
)
|
||||||
|
assert response.get('KeyCount', 0) != 0, f'Failed to deliver metrics to the S3 bucket {bucket_name}.'
|
||||||
|
|
||||||
|
s3_objects = response.get('Contents', [])
|
||||||
|
for s3_object in s3_objects:
|
||||||
|
key = s3_object.get('Key', '')
|
||||||
|
assert pathlib.Path(key).suffix == EXPECTED_S3_OBJECT_EXTENSION, \
|
||||||
|
f'Invalid data format is found in the S3 bucket {bucket_name}'
|
||||||
|
|
||||||
|
def run_glue_crawler(self, crawler_name: str) -> None:
|
||||||
|
"""
|
||||||
|
Run the Glue crawler and wait for it to finish.
|
||||||
|
:param crawler_name: Name of the Glue crawler
|
||||||
|
"""
|
||||||
|
client = self._aws_util.client('glue')
|
||||||
|
try:
|
||||||
|
client.start_crawler(
|
||||||
|
Name=crawler_name
|
||||||
|
)
|
||||||
|
except client.exceptions.CrawlerRunningException:
|
||||||
|
# The crawler has already been started.
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
GlueCrawlerReadyWaiter(client).wait(crawler_name=crawler_name)
|
||||||
|
except WaiterError as e:
|
||||||
|
assert False, f'Failed to run the Glue crawler: {str(e)}.'
|
||||||
|
|
||||||
|
def run_named_queries(self, work_group: str) -> None:
|
||||||
|
"""
|
||||||
|
Run the named queries under the specific Athena work group.
|
||||||
|
:param work_group: Name of the Athena work group.
|
||||||
|
"""
|
||||||
|
client = self._aws_util.client('athena')
|
||||||
|
# List all the named queries.
|
||||||
|
response = client.list_named_queries(
|
||||||
|
WorkGroup=work_group
|
||||||
|
)
|
||||||
|
named_query_ids = response.get('NamedQueryIds', [])
|
||||||
|
|
||||||
|
# Run each of the queries.
|
||||||
|
for named_query_id in named_query_ids:
|
||||||
|
get_named_query_response = client.get_named_query(
|
||||||
|
NamedQueryId=named_query_id
|
||||||
|
)
|
||||||
|
named_query = get_named_query_response.get('NamedQuery', {})
|
||||||
|
|
||||||
|
start_query_execution_response = client.start_query_execution(
|
||||||
|
QueryString=named_query.get('QueryString', ''),
|
||||||
|
QueryExecutionContext={
|
||||||
|
'Database': named_query.get('Database', '')
|
||||||
|
},
|
||||||
|
WorkGroup=work_group
|
||||||
|
)
|
||||||
|
|
||||||
|
# Wait for the query to finish.
|
||||||
|
state = 'RUNNING'
|
||||||
|
while state == 'QUEUED' or state == 'RUNNING':
|
||||||
|
get_query_execution_response = client.get_query_execution(
|
||||||
|
QueryExecutionId=start_query_execution_response.get('QueryExecutionId', '')
|
||||||
|
)
|
||||||
|
|
||||||
|
state = get_query_execution_response.get('QueryExecution', {}).get('Status', {}).get('State', '')
|
||||||
|
|
||||||
|
assert state == 'SUCCEEDED', f'Failed to run the named query {named_query.get("Name", {})}'
|
||||||
|
|
||||||
|
def empty_bucket(self, bucket_name: str) -> None:
|
||||||
|
"""
|
||||||
|
Empty the S3 bucket following:
|
||||||
|
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/migrations3.html
|
||||||
|
|
||||||
|
:param bucket_name: Name of the S3 bucket.
|
||||||
|
"""
|
||||||
|
s3 = self._aws_util.resource('s3')
|
||||||
|
bucket = s3.Bucket(bucket_name)
|
||||||
|
|
||||||
|
for key in bucket.objects.all():
|
||||||
|
key.delete()
|
||||||
|
|
||||||
|
def delete_table(self, database_name: str, table_name: str) -> None:
|
||||||
|
"""
|
||||||
|
Delete an existing Glue table.
|
||||||
|
|
||||||
|
:param database_name: Name of the Glue database.
|
||||||
|
:param table_name: Name of the table to delete.
|
||||||
|
"""
|
||||||
|
client = self._aws_util.client('glue')
|
||||||
|
client.delete_table(
|
||||||
|
DatabaseName=database_name,
|
||||||
|
Name=table_name
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope='function')
|
||||||
|
def aws_metrics_utils(
|
||||||
|
request: pytest.fixture,
|
||||||
|
aws_utils: pytest.fixture):
|
||||||
|
"""
|
||||||
|
Fixture for the AWS metrics util functions.
|
||||||
|
:param request: _pytest.fixtures.SubRequest class that handles getting
|
||||||
|
a pytest fixture from a pytest function/fixture.
|
||||||
|
:param aws_utils: aws_utils fixture.
|
||||||
|
"""
|
||||||
|
aws_utils_obj = AWSMetricsUtils(aws_utils)
|
||||||
|
return aws_utils_obj
|
||||||
@ -0,0 +1,139 @@
|
|||||||
|
"""
|
||||||
|
Copyright (c) Contributors to the Open 3D Engine Project.
|
||||||
|
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
||||||
|
|
||||||
|
SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||||
|
"""
|
||||||
|
|
||||||
|
import botocore.client
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from datetime import timedelta
|
||||||
|
from AWS.common.custom_waiter import CustomWaiter, WaitState
|
||||||
|
|
||||||
|
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||||
|
|
||||||
|
|
||||||
|
class KinesisAnalyticsApplicationUpdatedWaiter(CustomWaiter):
|
||||||
|
"""
|
||||||
|
Subclass of the base custom waiter class.
|
||||||
|
Wait for the Kinesis analytics application being updated to a specific status.
|
||||||
|
"""
|
||||||
|
def __init__(self, client: botocore.client, status: str):
|
||||||
|
"""
|
||||||
|
Initialize the waiter.
|
||||||
|
|
||||||
|
:param client: Boto3 client to use.
|
||||||
|
:param status: Expected status.
|
||||||
|
"""
|
||||||
|
super().__init__(
|
||||||
|
'KinesisAnalyticsApplicationUpdated',
|
||||||
|
'DescribeApplication',
|
||||||
|
'ApplicationDetail.ApplicationStatus',
|
||||||
|
{status: WaitState.SUCCESS},
|
||||||
|
client)
|
||||||
|
|
||||||
|
def wait(self, application_name: str):
|
||||||
|
"""
|
||||||
|
Wait for the expected status.
|
||||||
|
|
||||||
|
:param application_name: Name of the Kinesis analytics application.
|
||||||
|
"""
|
||||||
|
self._wait(ApplicationName=application_name)
|
||||||
|
|
||||||
|
|
||||||
|
class GlueCrawlerReadyWaiter(CustomWaiter):
|
||||||
|
"""
|
||||||
|
Subclass of the base custom waiter class.
|
||||||
|
Wait for the Glue crawler to finish its processing. Return when the crawler is in the "Stopping" status
|
||||||
|
to avoid wasting too much time in the automation tests on its shutdown process.
|
||||||
|
"""
|
||||||
|
def __init__(self, client: botocore.client):
|
||||||
|
"""
|
||||||
|
Initialize the waiter.
|
||||||
|
|
||||||
|
:param client: Boto3 client to use.
|
||||||
|
"""
|
||||||
|
super().__init__(
|
||||||
|
'GlueCrawlerReady',
|
||||||
|
'GetCrawler',
|
||||||
|
'Crawler.State',
|
||||||
|
{'STOPPING': WaitState.SUCCESS},
|
||||||
|
client)
|
||||||
|
|
||||||
|
def wait(self, crawler_name):
|
||||||
|
"""
|
||||||
|
Wait for the expected status.
|
||||||
|
|
||||||
|
:param crawler_name: Name of the Glue crawler.
|
||||||
|
"""
|
||||||
|
self._wait(Name=crawler_name)
|
||||||
|
|
||||||
|
|
||||||
|
class DataLakeMetricsDeliveredWaiter(CustomWaiter):
|
||||||
|
"""
|
||||||
|
Subclass of the base custom waiter class.
|
||||||
|
Wait for the expected directory being created in the S3 bucket.
|
||||||
|
"""
|
||||||
|
def __init__(self, client: botocore.client):
|
||||||
|
"""
|
||||||
|
Initialize the waiter.
|
||||||
|
|
||||||
|
:param client: Boto3 client to use.
|
||||||
|
"""
|
||||||
|
super().__init__(
|
||||||
|
'DataLakeMetricsDelivered',
|
||||||
|
'ListObjectsV2',
|
||||||
|
'KeyCount > `0`',
|
||||||
|
{True: WaitState.SUCCESS},
|
||||||
|
client)
|
||||||
|
|
||||||
|
def wait(self, bucket_name, prefix):
|
||||||
|
"""
|
||||||
|
Wait for the expected directory being created.
|
||||||
|
|
||||||
|
:param bucket_name: Name of the S3 bucket.
|
||||||
|
:param prefix: Name of the expected directory prefix.
|
||||||
|
"""
|
||||||
|
self._wait(Bucket=bucket_name, Prefix=prefix)
|
||||||
|
|
||||||
|
|
||||||
|
class CloudWatchMetricsDeliveredWaiter(CustomWaiter):
|
||||||
|
"""
|
||||||
|
Subclass of the base custom waiter class.
|
||||||
|
Wait for the expected metrics being delivered to CloudWatch.
|
||||||
|
"""
|
||||||
|
def __init__(self, client: botocore.client):
|
||||||
|
"""
|
||||||
|
Initialize the waiter.
|
||||||
|
|
||||||
|
:param client: Boto3 client to use.
|
||||||
|
"""
|
||||||
|
super().__init__(
|
||||||
|
'CloudWatchMetricsDelivered',
|
||||||
|
'GetMetricStatistics',
|
||||||
|
'length(Datapoints) > `0`',
|
||||||
|
{True: WaitState.SUCCESS},
|
||||||
|
client)
|
||||||
|
|
||||||
|
def wait(self, namespace, metrics_name, dimensions, start_time):
|
||||||
|
"""
|
||||||
|
Wait for the expected metrics being delivered.
|
||||||
|
|
||||||
|
:param namespace: Namespace of the metrics.
|
||||||
|
:param metrics_name: Name of the metrics.
|
||||||
|
:param dimensions: Dimensions of the metrics.
|
||||||
|
:param start_time: Start time for generating the metrics.
|
||||||
|
"""
|
||||||
|
self._wait(
|
||||||
|
Namespace=namespace,
|
||||||
|
MetricName=metrics_name,
|
||||||
|
Dimensions=dimensions,
|
||||||
|
StartTime=start_time,
|
||||||
|
EndTime=start_time + timedelta(0, self.timeout),
|
||||||
|
Period=60,
|
||||||
|
Statistics=[
|
||||||
|
'SampleCount'
|
||||||
|
],
|
||||||
|
Unit='Count'
|
||||||
|
)
|
||||||
@ -0,0 +1,170 @@
|
|||||||
|
"""
|
||||||
|
Copyright (c) Contributors to the Open 3D Engine Project.
|
||||||
|
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
||||||
|
|
||||||
|
SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
import ly_test_tools.log.log_monitor
|
||||||
|
|
||||||
|
from AWS.common import constants
|
||||||
|
from AWS.common.resource_mappings import AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY
|
||||||
|
|
||||||
|
# fixture imports
|
||||||
|
from assetpipeline.ap_fixtures.asset_processor_fixture import asset_processor
|
||||||
|
|
||||||
|
AWS_CLIENT_AUTH_FEATURE_NAME = 'AWSClientAuth'
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.SUITE_awsi
|
||||||
|
@pytest.mark.usefixtures('asset_processor')
|
||||||
|
@pytest.mark.usefixtures('automatic_process_killer')
|
||||||
|
@pytest.mark.usefixtures('aws_utils')
|
||||||
|
@pytest.mark.usefixtures('workspace')
|
||||||
|
@pytest.mark.parametrize('assume_role_arn', [constants.ASSUME_ROLE_ARN])
|
||||||
|
@pytest.mark.parametrize('feature_name', [AWS_CLIENT_AUTH_FEATURE_NAME])
|
||||||
|
@pytest.mark.parametrize('project', ['AutomatedTesting'])
|
||||||
|
@pytest.mark.usefixtures('resource_mappings')
|
||||||
|
@pytest.mark.parametrize('resource_mappings_filename', [constants.AWS_RESOURCE_MAPPING_FILE_NAME])
|
||||||
|
@pytest.mark.parametrize('region_name', [constants.AWS_REGION])
|
||||||
|
@pytest.mark.parametrize('session_name', [constants.SESSION_NAME])
|
||||||
|
@pytest.mark.parametrize('stacks', [[f'{constants.AWS_PROJECT_NAME}-{AWS_CLIENT_AUTH_FEATURE_NAME}-Stack-{constants.AWS_REGION}']])
|
||||||
|
class TestAWSClientAuthWindows(object):
|
||||||
|
"""
|
||||||
|
Test class to verify AWS Client Auth gem features on Windows.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('level', ['levels/aws/clientauth/clientauth.spawnable'])
|
||||||
|
def test_anonymous_credentials(self,
|
||||||
|
level: str,
|
||||||
|
launcher: pytest.fixture,
|
||||||
|
resource_mappings: pytest.fixture,
|
||||||
|
workspace: pytest.fixture,
|
||||||
|
asset_processor: pytest.fixture
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Test to verify AWS Cognito Identity pool anonymous authorization.
|
||||||
|
|
||||||
|
Setup: Updates resource mapping file using existing CloudFormation stacks.
|
||||||
|
Tests: Getting credentials when no credentials are configured
|
||||||
|
Verification: Log monitor looks for success credentials log.
|
||||||
|
"""
|
||||||
|
asset_processor.start()
|
||||||
|
asset_processor.wait_for_idle()
|
||||||
|
|
||||||
|
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
|
||||||
|
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
|
||||||
|
|
||||||
|
launcher.args = ['+LoadLevel', level]
|
||||||
|
launcher.args.extend(['-rhi=null'])
|
||||||
|
|
||||||
|
with launcher.start(launch_ap=False):
|
||||||
|
result = log_monitor.monitor_log_for_lines(
|
||||||
|
expected_lines=['(Script) - Success anonymous credentials'],
|
||||||
|
unexpected_lines=['(Script) - Fail anonymous credentials'],
|
||||||
|
halt_on_unexpected=True,
|
||||||
|
)
|
||||||
|
assert result, 'Anonymous credentials fetched successfully.'
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('level', ['levels/aws/clientauth/clientauth.spawnable'])
|
||||||
|
def test_anonymous_credentials_no_global_accountid(self,
|
||||||
|
level: str,
|
||||||
|
launcher: pytest.fixture,
|
||||||
|
resource_mappings: pytest.fixture,
|
||||||
|
workspace: pytest.fixture,
|
||||||
|
asset_processor: pytest.fixture
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Test to verify AWS Cognito Identity pool anonymous authorization.
|
||||||
|
|
||||||
|
Setup: Updates resource mapping file using existing CloudFormation stacks.
|
||||||
|
Tests: Getting credentials when no credentials are configured
|
||||||
|
Verification: Log monitor looks for success credentials log.
|
||||||
|
"""
|
||||||
|
# Remove top-level account ID from resource mappings
|
||||||
|
resource_mappings.clear_select_keys([AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY])
|
||||||
|
|
||||||
|
asset_processor.start()
|
||||||
|
asset_processor.wait_for_idle()
|
||||||
|
|
||||||
|
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
|
||||||
|
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
|
||||||
|
|
||||||
|
launcher.args = ['+LoadLevel', level]
|
||||||
|
launcher.args.extend(['-rhi=null'])
|
||||||
|
|
||||||
|
with launcher.start(launch_ap=False):
|
||||||
|
result = log_monitor.monitor_log_for_lines(
|
||||||
|
expected_lines=['(Script) - Success anonymous credentials'],
|
||||||
|
unexpected_lines=['(Script) - Fail anonymous credentials'],
|
||||||
|
halt_on_unexpected=True,
|
||||||
|
)
|
||||||
|
assert result, 'Anonymous credentials fetched successfully.'
|
||||||
|
|
||||||
|
def test_password_signin_credentials(self,
|
||||||
|
launcher: pytest.fixture,
|
||||||
|
resource_mappings: pytest.fixture,
|
||||||
|
workspace: pytest.fixture,
|
||||||
|
asset_processor: pytest.fixture,
|
||||||
|
aws_utils: pytest.fixture
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Test to verify AWS Cognito IDP Password sign in and Cognito Identity pool authenticated authorization.
|
||||||
|
|
||||||
|
Setup: Updates resource mapping file using existing CloudFormation stacks.
|
||||||
|
Tests: Sign up new test user, admin confirm the user, sign in and get aws credentials.
|
||||||
|
Verification: Log monitor looks for success credentials log.
|
||||||
|
"""
|
||||||
|
asset_processor.start()
|
||||||
|
asset_processor.wait_for_idle()
|
||||||
|
|
||||||
|
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
|
||||||
|
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
|
||||||
|
|
||||||
|
cognito_idp = aws_utils.client('cognito-idp')
|
||||||
|
user_pool_id = resource_mappings.get_resource_name_id(f'{AWS_CLIENT_AUTH_FEATURE_NAME}.CognitoUserPoolId')
|
||||||
|
logger.info(f'UserPoolId:{user_pool_id}')
|
||||||
|
|
||||||
|
# Remove the user if already exists
|
||||||
|
try:
|
||||||
|
cognito_idp.admin_delete_user(
|
||||||
|
UserPoolId=user_pool_id,
|
||||||
|
Username='test1'
|
||||||
|
)
|
||||||
|
except cognito_idp.exceptions.UserNotFoundException:
|
||||||
|
pass
|
||||||
|
|
||||||
|
launcher.args = ['+LoadLevel', 'levels/aws/clientauthpasswordsignup/clientauthpasswordsignup.spawnable']
|
||||||
|
launcher.args.extend(['-rhi=null'])
|
||||||
|
|
||||||
|
with launcher.start(launch_ap=False):
|
||||||
|
result = log_monitor.monitor_log_for_lines(
|
||||||
|
expected_lines=['(Script) - Signup Success'],
|
||||||
|
unexpected_lines=['(Script) - Signup Fail'],
|
||||||
|
halt_on_unexpected=True,
|
||||||
|
)
|
||||||
|
assert result, 'Sign Up Success.'
|
||||||
|
|
||||||
|
launcher.stop()
|
||||||
|
|
||||||
|
cognito_idp.admin_confirm_sign_up(
|
||||||
|
UserPoolId=user_pool_id,
|
||||||
|
Username='test1'
|
||||||
|
)
|
||||||
|
|
||||||
|
launcher.args = ['+LoadLevel', 'levels/aws/clientauthpasswordsignin/clientauthpasswordsignin.spawnable']
|
||||||
|
launcher.args.extend(['-rhi=null'])
|
||||||
|
|
||||||
|
with launcher.start(launch_ap=False):
|
||||||
|
result = log_monitor.monitor_log_for_lines(
|
||||||
|
expected_lines=['(Script) - SignIn Success', '(Script) - Success credentials'],
|
||||||
|
unexpected_lines=['(Script) - SignIn Fail', '(Script) - Fail credentials'],
|
||||||
|
halt_on_unexpected=True,
|
||||||
|
)
|
||||||
|
assert result, 'Sign in Success, fetched authenticated AWS temp credentials.'
|
||||||
@ -0,0 +1,6 @@
|
|||||||
|
"""
|
||||||
|
Copyright (c) Contributors to the Open 3D Engine Project.
|
||||||
|
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
||||||
|
|
||||||
|
SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||||
|
"""
|
||||||
@ -0,0 +1,192 @@
|
|||||||
|
"""
|
||||||
|
Copyright (c) Contributors to the Open 3D Engine Project.
|
||||||
|
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
||||||
|
|
||||||
|
SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import typing
|
||||||
|
from botocore.exceptions import ClientError
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import ly_test_tools
|
||||||
|
import ly_test_tools.log.log_monitor
|
||||||
|
import ly_test_tools.environment.process_utils as process_utils
|
||||||
|
import ly_test_tools.o3de.asset_processor_utils as asset_processor_utils
|
||||||
|
|
||||||
|
from AWS.common import constants
|
||||||
|
from AWS.common.resource_mappings import AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY
|
||||||
|
|
||||||
|
# fixture imports
|
||||||
|
from assetpipeline.ap_fixtures.asset_processor_fixture import asset_processor
|
||||||
|
|
||||||
|
AWS_CORE_FEATURE_NAME = 'AWSCore'
|
||||||
|
|
||||||
|
process_utils.kill_processes_named("o3de", ignore_extensions=True) # Kill ProjectManager windows
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def setup(launcher: pytest.fixture, asset_processor: pytest.fixture) -> typing.Tuple[pytest.fixture, str]:
|
||||||
|
"""
|
||||||
|
Set up the resource mapping configuration and start the log monitor.
|
||||||
|
:param launcher: Client launcher for running the test level.
|
||||||
|
:param asset_processor: asset_processor fixture.
|
||||||
|
:return log monitor object, metrics file path and the metrics stack name.
|
||||||
|
"""
|
||||||
|
# Create the temporary directory for downloading test file from S3.
|
||||||
|
user_dir = os.path.join(launcher.workspace.paths.project(), 'user')
|
||||||
|
s3_download_dir = os.path.join(user_dir, 's3_download')
|
||||||
|
if not os.path.exists(s3_download_dir):
|
||||||
|
os.makedirs(s3_download_dir)
|
||||||
|
|
||||||
|
asset_processor_utils.kill_asset_processor()
|
||||||
|
asset_processor.start()
|
||||||
|
asset_processor.wait_for_idle()
|
||||||
|
|
||||||
|
file_to_monitor = os.path.join(launcher.workspace.paths.project_log(), constants.GAME_LOG_NAME)
|
||||||
|
log_monitor = ly_test_tools.log.log_monitor.LogMonitor(launcher=launcher, log_file_path=file_to_monitor)
|
||||||
|
|
||||||
|
return log_monitor, s3_download_dir
|
||||||
|
|
||||||
|
|
||||||
|
def write_test_data_to_dynamodb_table(resource_mappings: pytest.fixture, aws_utils: pytest.fixture) -> None:
|
||||||
|
"""
|
||||||
|
Write test data to the DynamoDB table created by the CDK application.
|
||||||
|
:param resource_mappings: resource_mappings fixture.
|
||||||
|
:param aws_utils: aws_utils fixture.
|
||||||
|
"""
|
||||||
|
table_name = resource_mappings.get_resource_name_id(f'{AWS_CORE_FEATURE_NAME}.ExampleDynamoTableOutput')
|
||||||
|
try:
|
||||||
|
aws_utils.client('dynamodb').put_item(
|
||||||
|
TableName=table_name,
|
||||||
|
Item={
|
||||||
|
'id': {
|
||||||
|
'S': 'Item1'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
logger.info(f'Loaded data into table {table_name}')
|
||||||
|
except ClientError:
|
||||||
|
logger.exception(f'Failed to load data into table {table_name}')
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.SUITE_awsi
|
||||||
|
@pytest.mark.usefixtures('automatic_process_killer')
|
||||||
|
@pytest.mark.usefixtures('asset_processor')
|
||||||
|
@pytest.mark.parametrize('feature_name', [AWS_CORE_FEATURE_NAME])
|
||||||
|
@pytest.mark.parametrize('region_name', [constants.AWS_REGION])
|
||||||
|
@pytest.mark.parametrize('assume_role_arn', [constants.ASSUME_ROLE_ARN])
|
||||||
|
@pytest.mark.parametrize('session_name', [constants.SESSION_NAME])
|
||||||
|
@pytest.mark.usefixtures('workspace')
|
||||||
|
@pytest.mark.parametrize('project', ['AutomatedTesting'])
|
||||||
|
@pytest.mark.parametrize('level', ['levels/aws/core/core.spawnable'])
|
||||||
|
@pytest.mark.usefixtures('resource_mappings')
|
||||||
|
@pytest.mark.parametrize('resource_mappings_filename', [constants.AWS_RESOURCE_MAPPING_FILE_NAME])
|
||||||
|
@pytest.mark.parametrize('stacks', [[f'{constants.AWS_PROJECT_NAME}-{AWS_CORE_FEATURE_NAME}',
|
||||||
|
f'{constants.AWS_PROJECT_NAME}-{AWS_CORE_FEATURE_NAME}-Example-{constants.AWS_REGION}']])
|
||||||
|
@pytest.mark.usefixtures('aws_credentials')
|
||||||
|
@pytest.mark.parametrize('profile_name', ['AWSAutomationTest'])
|
||||||
|
class TestAWSCoreAWSResourceInteraction(object):
|
||||||
|
"""
|
||||||
|
Test class to verify the scripting behavior for the AWSCore gem.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('expected_lines', [
|
||||||
|
['(Script) - [S3] Head object request is done',
|
||||||
|
'(Script) - [S3] Head object success: Object example.txt is found.',
|
||||||
|
'(Script) - [S3] Get object success: Object example.txt is downloaded.',
|
||||||
|
'(Script) - [Lambda] Completed Invoke',
|
||||||
|
'(Script) - [Lambda] Invoke success: {"statusCode": 200, "body": {}}',
|
||||||
|
'(Script) - [DynamoDB] Results finished']])
|
||||||
|
@pytest.mark.parametrize('unexpected_lines', [
|
||||||
|
['(Script) - [S3] Head object error: No response body.',
|
||||||
|
'(Script) - [S3] Get object error: Request validation failed, output file directory doesn\'t exist.',
|
||||||
|
'(Script) - Request validation failed, output file miss full path.',
|
||||||
|
'(Script) - ']])
|
||||||
|
def test_scripting_behavior(self,
|
||||||
|
level: str,
|
||||||
|
launcher: pytest.fixture,
|
||||||
|
workspace: pytest.fixture,
|
||||||
|
asset_processor: pytest.fixture,
|
||||||
|
resource_mappings: pytest.fixture,
|
||||||
|
aws_utils: pytest.fixture,
|
||||||
|
expected_lines: typing.List[str],
|
||||||
|
unexpected_lines: typing.List[str]):
|
||||||
|
"""
|
||||||
|
Setup: Updates resource mapping file using existing CloudFormation stacks.
|
||||||
|
Tests: Interact with AWS S3, DynamoDB and Lambda services.
|
||||||
|
Verification: Script canvas nodes can communicate with AWS services successfully.
|
||||||
|
"""
|
||||||
|
|
||||||
|
log_monitor, s3_download_dir = setup(launcher, asset_processor)
|
||||||
|
write_test_data_to_dynamodb_table(resource_mappings, aws_utils)
|
||||||
|
|
||||||
|
launcher.args = ['+LoadLevel', level]
|
||||||
|
launcher.args.extend(['-rhi=null'])
|
||||||
|
|
||||||
|
with launcher.start(launch_ap=False):
|
||||||
|
result = log_monitor.monitor_log_for_lines(
|
||||||
|
expected_lines=expected_lines,
|
||||||
|
unexpected_lines=unexpected_lines,
|
||||||
|
halt_on_unexpected=True
|
||||||
|
)
|
||||||
|
|
||||||
|
assert result, "Expected lines weren't found."
|
||||||
|
|
||||||
|
assert os.path.exists(os.path.join(s3_download_dir, 'output.txt')), \
|
||||||
|
'The expected file wasn\'t successfully downloaded.'
|
||||||
|
# clean up the file directories.
|
||||||
|
shutil.rmtree(s3_download_dir)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('expected_lines', [
|
||||||
|
['(Script) - [S3] Head object request is done',
|
||||||
|
'(Script) - [S3] Head object success: Object example.txt is found.',
|
||||||
|
'(Script) - [S3] Get object success: Object example.txt is downloaded.',
|
||||||
|
'(Script) - [Lambda] Completed Invoke',
|
||||||
|
'(Script) - [Lambda] Invoke success: {"statusCode": 200, "body": {}}',
|
||||||
|
'(Script) - [DynamoDB] Results finished']])
|
||||||
|
@pytest.mark.parametrize('unexpected_lines', [
|
||||||
|
['(Script) - [S3] Head object error: No response body.',
|
||||||
|
'(Script) - [S3] Get object error: Request validation failed, output file directory doesn\'t exist.',
|
||||||
|
'(Script) - Request validation failed, output file miss full path.',
|
||||||
|
'(Script) - ']])
|
||||||
|
def test_scripting_behavior_no_global_accountid(self,
|
||||||
|
level: str,
|
||||||
|
launcher: pytest.fixture,
|
||||||
|
workspace: pytest.fixture,
|
||||||
|
asset_processor: pytest.fixture,
|
||||||
|
resource_mappings: pytest.fixture,
|
||||||
|
aws_utils: pytest.fixture,
|
||||||
|
expected_lines: typing.List[str],
|
||||||
|
unexpected_lines: typing.List[str]):
|
||||||
|
"""
|
||||||
|
Setup: Updates resource mapping file using existing CloudFormation stacks.
|
||||||
|
Tests: Interact with AWS S3, DynamoDB and Lambda services.
|
||||||
|
Verification: Script canvas nodes can communicate with AWS services successfully.
|
||||||
|
"""
|
||||||
|
|
||||||
|
resource_mappings.clear_select_keys([AWS_RESOURCE_MAPPINGS_ACCOUNT_ID_KEY])
|
||||||
|
log_monitor, s3_download_dir = setup(launcher, asset_processor)
|
||||||
|
write_test_data_to_dynamodb_table(resource_mappings, aws_utils)
|
||||||
|
|
||||||
|
launcher.args = ['+LoadLevel', level]
|
||||||
|
launcher.args.extend(['-rhi=null'])
|
||||||
|
|
||||||
|
with launcher.start(launch_ap=False):
|
||||||
|
result = log_monitor.monitor_log_for_lines(
|
||||||
|
expected_lines=expected_lines,
|
||||||
|
unexpected_lines=unexpected_lines,
|
||||||
|
halt_on_unexpected=True
|
||||||
|
)
|
||||||
|
|
||||||
|
assert result, "Expected lines weren't found."
|
||||||
|
|
||||||
|
assert os.path.exists(os.path.join(s3_download_dir, 'output.txt')), \
|
||||||
|
'The expected file wasn\'t successfully downloaded.'
|
||||||
|
# clean up the file directories.
|
||||||
|
shutil.rmtree(s3_download_dir)
|
||||||
@ -1,213 +0,0 @@
|
|||||||
"""
|
|
||||||
Copyright (c) Contributors to the Open 3D Engine Project.
|
|
||||||
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0 OR MIT
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import azlmbr.bus as bus
|
|
||||||
import azlmbr.editor as editor
|
|
||||||
import azlmbr.math as math
|
|
||||||
import azlmbr.paths
|
|
||||||
import azlmbr.legacy.general as general
|
|
||||||
|
|
||||||
sys.path.append(os.path.join(azlmbr.paths.projectroot, "Gem", "PythonTests"))
|
|
||||||
|
|
||||||
import editor_python_test_tools.hydra_editor_utils as hydra
|
|
||||||
from Atom.atom_utils.atom_constants import LIGHT_TYPES
|
|
||||||
|
|
||||||
LIGHT_TYPE_PROPERTY = 'Controller|Configuration|Light type'
|
|
||||||
SPHERE_AND_SPOT_DISK_LIGHT_PROPERTIES = [
|
|
||||||
("Controller|Configuration|Shadows|Enable shadow", True),
|
|
||||||
("Controller|Configuration|Shadows|Shadowmap size", 0), # 256
|
|
||||||
("Controller|Configuration|Shadows|Shadowmap size", 1), # 512
|
|
||||||
("Controller|Configuration|Shadows|Shadowmap size", 2), # 1024
|
|
||||||
("Controller|Configuration|Shadows|Shadowmap size", 3), # 2048
|
|
||||||
("Controller|Configuration|Shadows|Shadow filter method", 1), # PCF
|
|
||||||
("Controller|Configuration|Shadows|Filtering sample count", 4.0),
|
|
||||||
("Controller|Configuration|Shadows|Filtering sample count", 64.0),
|
|
||||||
("Controller|Configuration|Shadows|Shadow filter method", 2), # ECM
|
|
||||||
("Controller|Configuration|Shadows|ESM exponent", 50),
|
|
||||||
("Controller|Configuration|Shadows|ESM exponent", 5000),
|
|
||||||
("Controller|Configuration|Shadows|Shadow filter method", 3), # ESM+PCF
|
|
||||||
]
|
|
||||||
QUAD_LIGHT_PROPERTIES = [
|
|
||||||
("Controller|Configuration|Both directions", True),
|
|
||||||
("Controller|Configuration|Fast approximation", True),
|
|
||||||
]
|
|
||||||
SIMPLE_POINT_LIGHT_PROPERTIES = [
|
|
||||||
("Controller|Configuration|Attenuation radius|Mode", 0),
|
|
||||||
("Controller|Configuration|Attenuation radius|Radius", 100.0),
|
|
||||||
]
|
|
||||||
SIMPLE_SPOT_LIGHT_PROPERTIES = [
|
|
||||||
("Controller|Configuration|Shutters|Inner angle", 45.0),
|
|
||||||
("Controller|Configuration|Shutters|Outer angle", 90.0),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def verify_required_component_property_value(entity_name, component, property_path, expected_property_value):
|
|
||||||
"""
|
|
||||||
Compares the property value of component against the expected_property_value.
|
|
||||||
:param entity_name: name of the entity to use (for test verification purposes).
|
|
||||||
:param component: component to check on a given entity for its current property value.
|
|
||||||
:param property_path: the path to the property inside the component.
|
|
||||||
:param expected_property_value: The value expected from the value inside property_path.
|
|
||||||
:return: None, but prints to general.log() which the test uses to verify against.
|
|
||||||
"""
|
|
||||||
property_value = editor.EditorComponentAPIBus(
|
|
||||||
bus.Broadcast, "GetComponentProperty", component, property_path).GetValue()
|
|
||||||
general.log(f"{entity_name}_test: Property value is {property_value} "
|
|
||||||
f"which matches {expected_property_value}")
|
|
||||||
|
|
||||||
|
|
||||||
def run():
|
|
||||||
"""
|
|
||||||
Test Case - Light Component
|
|
||||||
1. Creates a "light_entity" Entity and attaches a "Light" component to it.
|
|
||||||
2. Updates the Light component to each light type option from the LIGHT_TYPES constant.
|
|
||||||
3. The test will check the Editor log to ensure each light type was selected.
|
|
||||||
4. Prints the string "Light component test (non-GPU) completed" after completion.
|
|
||||||
|
|
||||||
Tests will fail immediately if any of these log lines are found:
|
|
||||||
1. Trace::Assert
|
|
||||||
2. Trace::Error
|
|
||||||
3. Traceback (most recent call last):
|
|
||||||
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
# Create a "light_entity" entity with "Light" component.
|
|
||||||
light_entity_name = "light_entity"
|
|
||||||
light_component = "Light"
|
|
||||||
light_entity = hydra.Entity(light_entity_name)
|
|
||||||
light_entity.create_entity(math.Vector3(-1.0, -2.0, 3.0), [light_component])
|
|
||||||
general.log(
|
|
||||||
f"{light_entity_name}_test: Component added to the entity: "
|
|
||||||
f"{hydra.has_components(light_entity.id, [light_component])}")
|
|
||||||
|
|
||||||
# Populate the light_component_id_pair value so that it can be used to select all Light component options.
|
|
||||||
light_component_id_pair = None
|
|
||||||
component_type_id_list = azlmbr.editor.EditorComponentAPIBus(
|
|
||||||
azlmbr.bus.Broadcast, 'FindComponentTypeIdsByEntityType', [light_component], 0)
|
|
||||||
if len(component_type_id_list) < 1:
|
|
||||||
general.log(f"ERROR: A component class with name {light_component} doesn't exist")
|
|
||||||
light_component_id_pair = None
|
|
||||||
elif len(component_type_id_list) > 1:
|
|
||||||
general.log(f"ERROR: Found more than one component classes with same name: {light_component}")
|
|
||||||
light_component_id_pair = None
|
|
||||||
entity_component_id_pair = azlmbr.editor.EditorComponentAPIBus(
|
|
||||||
azlmbr.bus.Broadcast, 'GetComponentOfType', light_entity.id, component_type_id_list[0])
|
|
||||||
if entity_component_id_pair.IsSuccess():
|
|
||||||
light_component_id_pair = entity_component_id_pair.GetValue()
|
|
||||||
|
|
||||||
# Test each Light component option can be selected and it's properties updated.
|
|
||||||
# Point (sphere) light type checks.
|
|
||||||
light_type_property_test(
|
|
||||||
light_type=LIGHT_TYPES['sphere'],
|
|
||||||
light_properties=SPHERE_AND_SPOT_DISK_LIGHT_PROPERTIES,
|
|
||||||
light_component_id_pair=light_component_id_pair,
|
|
||||||
light_entity_name=light_entity_name,
|
|
||||||
light_entity=light_entity
|
|
||||||
)
|
|
||||||
|
|
||||||
# Spot (disk) light type checks.
|
|
||||||
light_type_property_test(
|
|
||||||
light_type=LIGHT_TYPES['spot_disk'],
|
|
||||||
light_properties=SPHERE_AND_SPOT_DISK_LIGHT_PROPERTIES,
|
|
||||||
light_component_id_pair=light_component_id_pair,
|
|
||||||
light_entity_name=light_entity_name,
|
|
||||||
light_entity=light_entity
|
|
||||||
)
|
|
||||||
|
|
||||||
# Capsule light type checks.
|
|
||||||
azlmbr.editor.EditorComponentAPIBus(
|
|
||||||
azlmbr.bus.Broadcast,
|
|
||||||
'SetComponentProperty',
|
|
||||||
light_component_id_pair,
|
|
||||||
LIGHT_TYPE_PROPERTY,
|
|
||||||
LIGHT_TYPES['capsule']
|
|
||||||
)
|
|
||||||
verify_required_component_property_value(
|
|
||||||
entity_name=light_entity_name,
|
|
||||||
component=light_entity.components[0],
|
|
||||||
property_path=LIGHT_TYPE_PROPERTY,
|
|
||||||
expected_property_value=LIGHT_TYPES['capsule']
|
|
||||||
)
|
|
||||||
|
|
||||||
# Quad light type checks.
|
|
||||||
light_type_property_test(
|
|
||||||
light_type=LIGHT_TYPES['quad'],
|
|
||||||
light_properties=QUAD_LIGHT_PROPERTIES,
|
|
||||||
light_component_id_pair=light_component_id_pair,
|
|
||||||
light_entity_name=light_entity_name,
|
|
||||||
light_entity=light_entity
|
|
||||||
)
|
|
||||||
|
|
||||||
# Polygon light type checks.
|
|
||||||
azlmbr.editor.EditorComponentAPIBus(
|
|
||||||
azlmbr.bus.Broadcast,
|
|
||||||
'SetComponentProperty',
|
|
||||||
light_component_id_pair,
|
|
||||||
LIGHT_TYPE_PROPERTY,
|
|
||||||
LIGHT_TYPES['polygon']
|
|
||||||
)
|
|
||||||
verify_required_component_property_value(
|
|
||||||
entity_name=light_entity_name,
|
|
||||||
component=light_entity.components[0],
|
|
||||||
property_path=LIGHT_TYPE_PROPERTY,
|
|
||||||
expected_property_value=LIGHT_TYPES['polygon']
|
|
||||||
)
|
|
||||||
|
|
||||||
# Point (simple punctual) light type checks.
|
|
||||||
light_type_property_test(
|
|
||||||
light_type=LIGHT_TYPES['simple_point'],
|
|
||||||
light_properties=SIMPLE_POINT_LIGHT_PROPERTIES,
|
|
||||||
light_component_id_pair=light_component_id_pair,
|
|
||||||
light_entity_name=light_entity_name,
|
|
||||||
light_entity=light_entity
|
|
||||||
)
|
|
||||||
|
|
||||||
# Spot (simple punctual) light type checks.
|
|
||||||
light_type_property_test(
|
|
||||||
light_type=LIGHT_TYPES['simple_spot'],
|
|
||||||
light_properties=SIMPLE_SPOT_LIGHT_PROPERTIES,
|
|
||||||
light_component_id_pair=light_component_id_pair,
|
|
||||||
light_entity_name=light_entity_name,
|
|
||||||
light_entity=light_entity
|
|
||||||
)
|
|
||||||
|
|
||||||
general.log("Light component test (non-GPU) completed.")
|
|
||||||
|
|
||||||
|
|
||||||
def light_type_property_test(light_type, light_properties, light_component_id_pair, light_entity_name, light_entity):
|
|
||||||
"""
|
|
||||||
Updates the current light type and modifies its properties, then verifies they are accurate to what was set.
|
|
||||||
:param light_type: The type of light to update, must match a value in LIGHT_TYPES
|
|
||||||
:param light_properties: List of tuples detailing properties to modify with update values.
|
|
||||||
:param light_component_id_pair: Entity + component ID pair for updating the light component on a given entity.
|
|
||||||
:param light_entity_name: the name of the Entity holding the light component.
|
|
||||||
:param light_entity: the Entity object containing the light component.
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
azlmbr.editor.EditorComponentAPIBus(
|
|
||||||
azlmbr.bus.Broadcast,
|
|
||||||
'SetComponentProperty',
|
|
||||||
light_component_id_pair,
|
|
||||||
LIGHT_TYPE_PROPERTY,
|
|
||||||
light_type
|
|
||||||
)
|
|
||||||
verify_required_component_property_value(
|
|
||||||
entity_name=light_entity_name,
|
|
||||||
component=light_entity.components[0],
|
|
||||||
property_path=LIGHT_TYPE_PROPERTY,
|
|
||||||
expected_property_value=light_type
|
|
||||||
)
|
|
||||||
|
|
||||||
for light_property in light_properties:
|
|
||||||
light_entity.get_set_test(0, light_property[0], light_property[1])
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
run()
|
|
||||||
@ -0,0 +1,72 @@
|
|||||||
|
"""
|
||||||
|
Copyright (c) Contributors to the Open 3D Engine Project.
|
||||||
|
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
||||||
|
|
||||||
|
SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def Atom_LevelLoadTest():
|
||||||
|
"""
|
||||||
|
Summary:
|
||||||
|
Loads all graphics levels within the AutomatedTesting project in editor. For each level this script will verify that
|
||||||
|
the level loads, and can enter/exit gameplay without crashing the editor.
|
||||||
|
|
||||||
|
Test setup:
|
||||||
|
- Store all available levels in a list.
|
||||||
|
- Set up a for loop to run all checks for each level.
|
||||||
|
|
||||||
|
Expected Behavior:
|
||||||
|
Test verifies that each level loads, enters/exits game mode, and reports success for all test actions.
|
||||||
|
|
||||||
|
Test Steps for each level:
|
||||||
|
1) Create tuple with level load success and failure messages
|
||||||
|
2) Open the level using the python test tools command
|
||||||
|
3) Verify level is loaded using a separate command, and report success/failure
|
||||||
|
4) Enter gameplay and report result using a tuple
|
||||||
|
5) Exit Gameplay and report result using a tuple
|
||||||
|
6) Look for errors or asserts.
|
||||||
|
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
|
||||||
|
import azlmbr.legacy.general as general
|
||||||
|
|
||||||
|
from editor_python_test_tools.utils import Report, Tracer, TestHelper
|
||||||
|
from Atom.atom_utils.atom_constants import LEVEL_LIST
|
||||||
|
|
||||||
|
with Tracer() as error_tracer:
|
||||||
|
|
||||||
|
for level in LEVEL_LIST:
|
||||||
|
|
||||||
|
# 1. Create tuple with level load success and failure messages
|
||||||
|
level_check_tuple = (f"loaded {level}", f"failed to load {level}")
|
||||||
|
|
||||||
|
# 2. Open the level using the python test tools command
|
||||||
|
TestHelper.init_idle()
|
||||||
|
TestHelper.open_level("Graphics", level)
|
||||||
|
|
||||||
|
# 3. Verify level is loaded using a separate command, and report success/failure
|
||||||
|
Report.result(level_check_tuple, level == general.get_current_level_name())
|
||||||
|
|
||||||
|
# 4. Enter gameplay and report result using a tuple
|
||||||
|
enter_game_mode_tuple = (f"{level} entered gameplay successfully ", f"{level} failed to enter gameplay")
|
||||||
|
TestHelper.enter_game_mode(enter_game_mode_tuple)
|
||||||
|
general.idle_wait_frames(1)
|
||||||
|
|
||||||
|
# 5. Exit gameplay and report result using a tuple
|
||||||
|
exit_game_mode_tuple = (f"{level} exited gameplay successfully ", f"{level} failed to exit gameplay")
|
||||||
|
TestHelper.exit_game_mode(exit_game_mode_tuple)
|
||||||
|
|
||||||
|
|
||||||
|
# 6. Look for errors or asserts.
|
||||||
|
TestHelper.wait_for_condition(lambda: error_tracer.has_errors or error_tracer.has_asserts, 1.0)
|
||||||
|
for error_info in error_tracer.errors:
|
||||||
|
Report.info(f"Error: {error_info.filename} {error_info.function} | {error_info.message}")
|
||||||
|
for assert_info in error_tracer.asserts:
|
||||||
|
Report.info(f"Assert: {assert_info.filename} {assert_info.function} | {assert_info.message}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
from editor_python_test_tools.utils import Report
|
||||||
|
Report.start_test(Atom_LevelLoadTest)
|
||||||
@ -0,0 +1,83 @@
|
|||||||
|
"""
|
||||||
|
Copyright (c) Contributors to the Open 3D Engine Project.
|
||||||
|
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
||||||
|
|
||||||
|
SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
# Test Case Title : Check that the four network RPCs can be sent and received
|
||||||
|
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
class TestSuccessFailTuples():
|
||||||
|
enter_game_mode = ("Entered game mode", "Failed to enter game mode")
|
||||||
|
exit_game_mode = ("Exited game mode", "Couldn't exit game mode")
|
||||||
|
find_network_player = ("Found network player", "Couldn't find network player")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
def Multiplayer_AutoComponent_RPC():
|
||||||
|
r"""
|
||||||
|
Summary:
|
||||||
|
Runs a test to make sure that RPCs can be sent and received via script canvas
|
||||||
|
|
||||||
|
Level Description:
|
||||||
|
- Dynamic
|
||||||
|
1. Although the level is nearly empty, when the server and editor connect the server will spawn and replicate the player network prefab.
|
||||||
|
a. The player network prefab has a NetworkTestPlayerComponent.AutoComponent and a script canvas attached which sends and receives various RPCs.
|
||||||
|
Print logs occur upon sending and receiving the RPCs; we are testing to make sure the expected events and values are received.
|
||||||
|
- Static
|
||||||
|
1. NetLevelEntity. This is a networked entity which has a script attached. Used for cross-entity communication. The net-player prefab will send this level entity Server->Authority RPCs
|
||||||
|
|
||||||
|
|
||||||
|
Expected Outcome:
|
||||||
|
We should see editor logs stating that RPCs have been sent and received.
|
||||||
|
However, if the script receives unexpected values for the Process event we will see print logs for bad data as well.
|
||||||
|
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
import azlmbr.legacy.general as general
|
||||||
|
from editor_python_test_tools.utils import Report
|
||||||
|
from editor_python_test_tools.utils import Tracer
|
||||||
|
|
||||||
|
from editor_python_test_tools.utils import TestHelper as helper
|
||||||
|
from ly_remote_console.remote_console_commands import RemoteConsole as RemoteConsole
|
||||||
|
|
||||||
|
level_name = "AutoComponent_RPC"
|
||||||
|
player_prefab_name = "Player"
|
||||||
|
player_prefab_path = f"levels/multiplayer/{level_name}/{player_prefab_name}.network.spawnable"
|
||||||
|
|
||||||
|
helper.init_idle()
|
||||||
|
|
||||||
|
# 1) Open Level
|
||||||
|
helper.open_level("Multiplayer", level_name)
|
||||||
|
|
||||||
|
with Tracer() as section_tracer:
|
||||||
|
# 2) Enter game mode
|
||||||
|
helper.multiplayer_enter_game_mode(TestSuccessFailTuples.enter_game_mode, player_prefab_path.lower())
|
||||||
|
|
||||||
|
# 3) Make sure the network player was spawned
|
||||||
|
player_id = general.find_game_entity(player_prefab_name)
|
||||||
|
Report.critical_result(TestSuccessFailTuples.find_network_player, player_id.IsValid())
|
||||||
|
|
||||||
|
# 4) Check the editor logs for expected and unexpected log output
|
||||||
|
PLAYERID_RPC_WAIT_TIME_SECONDS = 1.0 # The player id is sent from the server as soon as the player script is spawned. 1 second should be more than enough time to send/receive that RPC.
|
||||||
|
helper.succeed_if_log_line_found('EditorServer', 'Script: AutoComponent_RPC: Sending client PlayerNumber 1', section_tracer.prints, PLAYERID_RPC_WAIT_TIME_SECONDS)
|
||||||
|
helper.succeed_if_log_line_found('Script', "AutoComponent_RPC: I'm Player #1", section_tracer.prints, PLAYERID_RPC_WAIT_TIME_SECONDS)
|
||||||
|
|
||||||
|
# Uncomment once editor game-play mode supports level entities with net-binding
|
||||||
|
#PLAYFX_RPC_WAIT_TIME_SECONDS = 1.1 # The server will send an RPC to play an fx on the client every second.
|
||||||
|
#helper.succeed_if_log_line_found('EditorServer', "Script: AutoComponent_RPC_NetLevelEntity Activated on entity: NetLevelEntity", section_tracer.prints, PLAYFX_RPC_WAIT_TIME_SECONDS)
|
||||||
|
#helper.succeed_if_log_line_found('EditorServer', "Script: AutoComponent_RPC_NetLevelEntity: Authority sending RPC to play some fx.", section_tracer.prints, PLAYFX_RPC_WAIT_TIME_SECONDS)
|
||||||
|
#helper.succeed_if_log_line_found('Script', "AutoComponent_RPC_NetLevelEntity: I'm a client playing some superficial fx.", section_tracer.prints, PLAYFX_RPC_WAIT_TIME_SECONDS)
|
||||||
|
|
||||||
|
|
||||||
|
# Exit game mode
|
||||||
|
helper.exit_game_mode(TestSuccessFailTuples.exit_game_mode)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
from editor_python_test_tools.utils import Report
|
||||||
|
Report.start_test(Multiplayer_AutoComponent_RPC)
|
||||||
@ -0,0 +1,76 @@
|
|||||||
|
"""
|
||||||
|
Copyright (c) Contributors to the Open 3D Engine Project.
|
||||||
|
For complete copyright and license terms please see the LICENSE at the root of this distribution.
|
||||||
|
|
||||||
|
SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
# Test Case Title : Check that level entities with network bindings are properly replicated.
|
||||||
|
# Note: This test should be ran on a fresh editor run; some bugs with spawnables occur only on the first editor play-mode.
|
||||||
|
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
class TestSuccessFailTuples():
|
||||||
|
enter_game_mode = ("Entered game mode", "Failed to enter game mode")
|
||||||
|
exit_game_mode = ("Exited game mode", "Couldn't exit game mode")
|
||||||
|
find_network_player = ("Found network player", "Couldn't find network player")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
def Multiplayer_SimpleNetworkLevelEntity():
|
||||||
|
r"""
|
||||||
|
Summary:
|
||||||
|
Test to make sure that network entities in a level function and are replicated to clients as expected
|
||||||
|
|
||||||
|
Level Description:
|
||||||
|
- Static
|
||||||
|
1. NetLevelEntity. This is a networked entity which has a script attached which prints logs to ensure it's replicated.
|
||||||
|
|
||||||
|
|
||||||
|
Expected Outcome:
|
||||||
|
We should see logs stating that the net-sync'd level entity exists on both server and client.
|
||||||
|
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
import azlmbr.legacy.general as general
|
||||||
|
from editor_python_test_tools.utils import Report
|
||||||
|
from editor_python_test_tools.utils import Tracer
|
||||||
|
|
||||||
|
from editor_python_test_tools.utils import TestHelper as helper
|
||||||
|
from ly_remote_console.remote_console_commands import RemoteConsole as RemoteConsole
|
||||||
|
|
||||||
|
level_name = "SimpleNetworkLevelEntity"
|
||||||
|
player_prefab_name = "Player"
|
||||||
|
player_prefab_path = f"levels/multiplayer/{level_name}/{player_prefab_name}.network.spawnable"
|
||||||
|
|
||||||
|
helper.init_idle()
|
||||||
|
|
||||||
|
# 1) Open Level
|
||||||
|
helper.open_level("Multiplayer", level_name)
|
||||||
|
|
||||||
|
with Tracer() as section_tracer:
|
||||||
|
# 2) Enter game mode
|
||||||
|
helper.multiplayer_enter_game_mode(TestSuccessFailTuples.enter_game_mode, player_prefab_path.lower())
|
||||||
|
|
||||||
|
# 3) Make sure the network player was spawned
|
||||||
|
player_id = general.find_game_entity(player_prefab_name)
|
||||||
|
Report.critical_result(TestSuccessFailTuples.find_network_player, player_id.IsValid())
|
||||||
|
|
||||||
|
# 4) Check the editor logs for network spawnable errors
|
||||||
|
ATTEMPTING_INVALID_NETSPAWN_WAIT_TIME_SECONDS = 0.0 # The editor will try to net-spawn its networked level entity before it's even a client. Make sure this didn't happen.
|
||||||
|
helper.fail_if_log_line_found('NetworkEntityManager', "RequestNetSpawnableInstantiation: Requested spawnable Root.network.spawnable doesn't exist in the NetworkSpawnableLibrary. Please make sure it is a network spawnable", section_tracer.errors, ATTEMPTING_INVALID_NETSPAWN_WAIT_TIME_SECONDS)
|
||||||
|
|
||||||
|
# 5) Ensure the script graph attached to the level entity is running on the server
|
||||||
|
SCRIPTGRAPH_ENABLED_WAIT_TIME_SECONDS = 0.25
|
||||||
|
helper.succeed_if_log_line_found('EditorServer', "Script: SimpleNetworkLevelEntity: On Graph Start", section_tracer.prints, SCRIPTGRAPH_ENABLED_WAIT_TIME_SECONDS)
|
||||||
|
|
||||||
|
|
||||||
|
# Exit game mode
|
||||||
|
helper.exit_game_mode(TestSuccessFailTuples.exit_game_mode)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
from editor_python_test_tools.utils import Report
|
||||||
|
Report.start_test(Multiplayer_SimpleNetworkLevelEntity)
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue