[AWSI Automation] Export more AWS resource names via CloudFormation stack outputs for the automation tests (#4161)

* Read resources name via CloudFormation stack outputs and trim resource names when they are too long

Signed-off-by: Junbo Liang <junbo@amazon.com>
monroegm-disable-blank-issue-2
Junbo Liang 4 years ago committed by GitHub
parent 6c4c64959a
commit ac5cc04b59
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -31,7 +31,7 @@ def setup(launcher: pytest.fixture,
Set up the resource mapping configuration and start the log monitor.
:param launcher: Client launcher for running the test level.
:param asset_processor: asset_processor fixture.
:return log monitor object, metrics file path and the metrics stack name.
:return log monitor object.
"""
asset_processor.start()
asset_processor.wait_for_idle()
@ -73,12 +73,11 @@ def monitor_metrics_submission(log_monitor: pytest.fixture) -> None:
f'unexpected_lines values: {unexpected_lines}')
def query_metrics_from_s3(aws_metrics_utils: pytest.fixture, resource_mappings: pytest.fixture, stack_name: str) -> None:
def query_metrics_from_s3(aws_metrics_utils: pytest.fixture, resource_mappings: pytest.fixture) -> None:
"""
Verify that the metrics events are delivered to the S3 bucket and can be queried.
:param aws_metrics_utils: aws_metrics_utils fixture.
:param resource_mappings: resource_mappings fixture.
:param stack_name: name of the CloudFormation stack.
"""
aws_metrics_utils.verify_s3_delivery(
resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsBucketName')
@ -89,23 +88,24 @@ def query_metrics_from_s3(aws_metrics_utils: pytest.fixture, resource_mappings:
resource_mappings.get_resource_name_id('AWSMetrics.EventsCrawlerName'))
# Remove the events_json table if exists so that the sample query can create a table with the same name.
aws_metrics_utils.delete_table(f'{stack_name}-eventsdatabase', 'events_json')
aws_metrics_utils.run_named_queries(f'{stack_name}-AthenaWorkGroup')
aws_metrics_utils.delete_table(resource_mappings.get_resource_name_id('AWSMetrics.EventDatabaseName'), 'events_json')
aws_metrics_utils.run_named_queries(resource_mappings.get_resource_name_id('AWSMetrics.AthenaWorkGroupName'))
logger.info('Query metrics from S3 successfully.')
def verify_operational_metrics(aws_metrics_utils: pytest.fixture, stack_name: str, start_time: datetime) -> None:
def verify_operational_metrics(aws_metrics_utils: pytest.fixture,
resource_mappings: pytest.fixture, start_time: datetime) -> None:
"""
Verify that operational health metrics are delivered to CloudWatch.
aws_metrics_utils: aws_metrics_utils fixture.
stack_name: name of the CloudFormation stack.
start_time: Time when the game launcher starts.
:param aws_metrics_utils: aws_metrics_utils fixture.
:param resource_mappings: resource_mappings fixture.
:param start_time: Time when the game launcher starts.
"""
aws_metrics_utils.verify_cloud_watch_delivery(
'AWS/Lambda',
'Invocations',
[{'Name': 'FunctionName',
'Value': f'{stack_name}-AnalyticsProcessingLambda'}],
'Value': resource_mappings.get_resource_name_id('AWSMetrics.AnalyticsProcessingLambdaName')}],
start_time)
logger.info('AnalyticsProcessingLambda metrics are sent to CloudWatch.')
@ -113,7 +113,7 @@ def verify_operational_metrics(aws_metrics_utils: pytest.fixture, stack_name: st
'AWS/Lambda',
'Invocations',
[{'Name': 'FunctionName',
'Value': f'{stack_name}-EventsProcessingLambda'}],
'Value': resource_mappings.get_resource_name_id('AWSMetrics.EventProcessingLambdaName')}],
start_time)
logger.info('EventsProcessingLambda metrics are sent to CloudWatch.')
@ -157,7 +157,6 @@ class TestAWSMetricsWindows(object):
workspace: pytest.fixture,
aws_utils: pytest.fixture,
resource_mappings: pytest.fixture,
stacks: typing.List,
aws_metrics_utils: pytest.fixture):
"""
Verify that the metrics events are sent to CloudWatch and S3 for analytics.
@ -189,10 +188,10 @@ class TestAWSMetricsWindows(object):
operational_threads = list()
operational_threads.append(
AWSMetricsThread(target=query_metrics_from_s3,
args=(aws_metrics_utils, resource_mappings, stacks[0])))
args=(aws_metrics_utils, resource_mappings)))
operational_threads.append(
AWSMetricsThread(target=verify_operational_metrics,
args=(aws_metrics_utils, stacks[0], start_time)))
args=(aws_metrics_utils, resource_mappings, start_time)))
operational_threads.append(
AWSMetricsThread(target=update_kinesis_analytics_application_status,
args=(aws_metrics_utils, resource_mappings, False)))

@ -6,10 +6,11 @@ SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import re
from aws_cdk import core
from .resource_name_sanitizer import sanitize_resource_name
def format_aws_resource_name(feature_name: str, project_name: str, env: core.Environment, resource_type: str):
return f'{project_name}-{feature_name}-{resource_type}-{env.region}'
return sanitize_resource_name(f'{project_name}-{feature_name}-{resource_type}-{env.region}', resource_type)
def format_aws_resource_id(feature_name: str, project_name: str, env: core.Environment, resource_type: str):
@ -31,4 +32,5 @@ def format_aws_resource_authenticated_id(feature_name: str, project_name: str, e
def format_aws_resource_authenticated_name(feature_name: str, project_name: str, env: core.Environment,
resource_type: str, authenticated: bool):
authenticated_string = 'Authenticated' if authenticated else 'Unauthenticated'
return f'{project_name}{feature_name}{resource_type}{authenticated_string}-{env.region}'
return sanitize_resource_name(
f'{project_name}{feature_name}{resource_type}{authenticated_string}-{env.region}', resource_type)

@ -0,0 +1,45 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import hashlib
from aws_cdk import (
core,
aws_cognito as cognito,
aws_iam as iam
)
MAX_RESOURCE_NAME_LENGTH_MAPPING = {
core.Stack.__name__: 128,
iam.Role.__name__: 64,
iam.ManagedPolicy.__name__: 144,
cognito.CfnUserPoolClient.__name__: 128,
cognito.CfnUserPool.__name__: 128,
cognito.CfnIdentityPool.__name__: 128
}
def sanitize_resource_name(resource_name: str, resource_type: str) -> str:
"""
Truncate the resource name if its length exceeds the limit.
This is the best effort for sanitizing resource names based on the AWS documents since each AWS service
has its unique restrictions. Customers can extend this function for validation or sanitization.
:param resource_name: Original name of the resource.
:param resource_type: Type of the resource.
:return Sanitized resource name that can be deployed with AWS.
"""
result = resource_name
if not MAX_RESOURCE_NAME_LENGTH_MAPPING.get(resource_type):
return result
if len(resource_name) > MAX_RESOURCE_NAME_LENGTH_MAPPING[resource_type]:
# PYTHONHASHSEED is set to "random" by default in Python 3.3 and up. Cannot use
# the built-in hash function here since it will give a different return value in each session
digest = "-%x" % (int(hashlib.md5(resource_name.encode('ascii', 'ignore')).hexdigest(), 16) & 0xffffffff)
result = resource_name[:MAX_RESOURCE_NAME_LENGTH_MAPPING[resource_type] - len(digest)] + digest
return result

@ -53,6 +53,7 @@ class AWSMetricsStack(core.Stack):
self._batch_processing = BatchProcessing(
self,
input_stream_arn=self._data_ingestion.input_stream_arn,
application_name=application_name,
analytics_bucket_arn=self._data_lake_integration.analytics_bucket_arn,
events_database_name=self._data_lake_integration.events_database_name,
events_table_name=self._data_lake_integration.events_table_name
@ -60,6 +61,7 @@ class AWSMetricsStack(core.Stack):
self._batch_analytics = BatchAnalytics(
self,
application_name=application_name,
analytics_bucket_name=self._data_lake_integration.analytics_bucket_name,
events_database_name=self._data_lake_integration.events_database_name,
events_table_name=self._data_lake_integration.events_table_name

@ -20,10 +20,12 @@ class BatchAnalytics:
"""
def __init__(self,
stack: core.Construct,
application_name: str,
analytics_bucket_name: str,
events_database_name: str,
events_table_name) -> None:
self._stack = stack
self._application_name = application_name
self._analytics_bucket_name = analytics_bucket_name
self._events_database_name = events_database_name
self._events_table_name = events_table_name
@ -58,6 +60,12 @@ class BatchAnalytics:
)
)
)
core.CfnOutput(
self._stack,
id='AthenaWorkGroupName',
description='Name of the Athena work group that contains sample queries',
export_name=f"{self._application_name}:AthenaWorkGroup",
value=self._athena_work_group.name)
def _create_athena_queries(self) -> None:
"""

@ -26,11 +26,13 @@ class BatchProcessing:
"""
def __init__(self,
stack: core.Construct,
application_name: str,
input_stream_arn: str,
analytics_bucket_arn: str,
events_database_name: str,
events_table_name) -> None:
self._stack = stack
self._application_name = application_name
self._input_stream_arn = input_stream_arn
self._analytics_bucket_arn = analytics_bucket_arn
self._events_database_name = events_database_name
@ -60,6 +62,12 @@ class BatchProcessing:
os.path.join(os.path.dirname(__file__), 'lambdas', 'events_processing_lambda')),
role=self._events_processing_lambda_role
)
core.CfnOutput(
self._stack,
id='EventProcessingLambdaName',
description='Lambda function for processing metrics events data.',
export_name=f"{self._application_name}:EventProcessingLambda",
value=self._events_processing_lambda.function_name)
def _create_events_processing_lambda_role(self, function_name: str) -> None:
"""

@ -52,7 +52,7 @@ class Dashboard:
max_width=aws_metrics_constants.DASHBOARD_MAX_WIDGET_WIDTH)
)
dashboard_output = core.CfnOutput(
core.CfnOutput(
stack,
id='DashboardName',
description='CloudWatch dashboard to monitor the operational health and real-time metrics',

@ -69,14 +69,14 @@ class DataIngestion:
cfn_rest_api.add_property_deletion_override("BodyS3Location")
cfn_rest_api.add_property_override("FailOnWarnings", True)
api_id_output = core.CfnOutput(
core.CfnOutput(
self._stack,
id='RESTApiId',
description='Service API Id for the analytics pipeline',
export_name=f"{application_name}:RestApiId",
value=self._rest_api.rest_api_id)
stage_output = core.CfnOutput(
core.CfnOutput(
self._stack,
id='RESTApiStage',
description='Stage for the REST API deployment',

@ -67,7 +67,7 @@ class DataLakeIntegration:
cfn_bucket = self._analytics_bucket.node.find_child('Resource')
cfn_bucket.apply_removal_policy(core.RemovalPolicy.DESTROY)
analytics_bucket_output = core.CfnOutput(
core.CfnOutput(
self._stack,
id='AnalyticsBucketName',
description='Name of the S3 bucket for storing metrics event data',
@ -89,6 +89,12 @@ class DataLakeIntegration:
name=f'{self._stack.stack_name}-EventsDatabase'.lower()
)
)
core.CfnOutput(
self._stack,
id='EventDatabaseName',
description='Glue database for metrics events.',
export_name=f"{self._application_name}:EventsDatabase",
value=self._events_database.ref)
def _create_events_table(self) -> None:
"""
@ -199,7 +205,7 @@ class DataLakeIntegration:
configuration=aws_metrics_constants.CRAWLER_CONFIGURATION
)
events_crawler_output = core.CfnOutput(
core.CfnOutput(
self._stack,
id='EventsCrawlerName',
description='Glue Crawler to populate the AWS Glue Data Catalog with metrics events tables',

@ -113,7 +113,7 @@ class RealTimeDataProcessing:
),
)
analytics_application_output = core.CfnOutput(
core.CfnOutput(
self._stack,
id='AnalyticsApplicationName',
description='Kinesis Data Analytics application to process the real-time metrics data',
@ -199,6 +199,12 @@ class RealTimeDataProcessing:
os.path.join(os.path.dirname(__file__), 'lambdas', 'analytics_processing_lambda')),
role=self._analytics_processing_lambda_role
)
core.CfnOutput(
self._stack,
id='AnalyticsProcessingLambdaName',
description='Lambda function for sending processed data to CloudWatch.',
export_name=f"{self._application_name}:AnalyticsProcessingLambda",
value=self._analytics_processing_lambda.function_name)
def _create_analytics_processing_lambda_role(self, function_name: str) -> iam.Role:
"""

Loading…
Cancel
Save