[AWS][Metrics] Resource names are too long leading to CDK deployment failures (#2291)

Sanitize resource names to prevent AWS resource failures.
monroegm-disable-blank-issue-2
Junbo Liang 4 years ago committed by GitHub
parent 5b8b51924a
commit c8b55af401
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -13,6 +13,7 @@ from aws_cdk import (
from .aws_metrics_stack import AWSMetricsStack
from aws_metrics.policy_statements_builder.user_policy_statements_builder import UserPolicyStatementsBuilder
from aws_metrics.policy_statements_builder.admin_policy_statements_builder import AdminPolicyStatementsBuilder
from .aws_utils import resource_name_sanitizer
class AuthPolicy:
@ -58,12 +59,13 @@ class AuthPolicy:
policy = iam.ManagedPolicy(
self._stack,
policy_id,
managed_policy_name=f'{self._stack.stack_name}-{role_name}Policy',
managed_policy_name=resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-{role_name}Policy', 'iam_managed_policy'),
statements=policy_statements)
policy_output = core.CfnOutput(
self._stack,
id=f'{policy_id}Output',
description=f'{role_name} policy arn to call service',
export_name=f"{self._application_name}:{policy_id}",
export_name=f'{self._application_name}:{policy_id}',
value=policy.managed_policy_arn)

@ -8,6 +8,7 @@ SPDX-License-Identifier: Apache-2.0 OR MIT
from aws_cdk import core
from .aws_metrics_stack import AWSMetricsStack
from .auth import AuthPolicy
from .aws_utils import resource_name_sanitizer
class AWSMetrics(core.Construct):
@ -23,7 +24,8 @@ class AWSMetrics(core.Construct):
env: core.Environment) -> None:
super().__init__(scope, id_)
# Set-up any stack name(s) to be unique in account
stack_name = f'{project_name}-{feature_name}-{env.region}'
stack_name = resource_name_sanitizer.sanitize_resource_name(
f'{project_name}-{feature_name}-{env.region}', 'cloudformation_stack')
application_name = f'{project_name}-{feature_name}'
# Check context variables to get enabled optional features

@ -0,0 +1,6 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""

@ -0,0 +1,45 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import hashlib
MAX_RESOURCE_NAME_LENGTH_MAPPING = {
'athena_work_group': 128,
'athena_named_query': 128,
'cloudformation_stack': 128,
'cloudwatch_dashboard': 255,
'cloudwatch_log_group': 512,
'firehose_delivery_stream': 64,
'iam_managed_policy': 144,
'iam_role': 64,
'kinesis_application': 128,
'kinesis_stream': 128,
'lambda_function': 64,
's3_bucket': 63
}
def sanitize_resource_name(resource_name: str, resource_type: str) -> str:
"""
Truncate the resource name if its length exceeds the limit.
This is the best effort for sanitizing resource names based on the AWS documents since each AWS service
has its unique restrictions. Customers can extend this function for validation or sanitization.
:param resource_name: Original name of the resource.
:param resource_type: Type of the resource.
:return Sanitized resource name that can be deployed with AWS.
"""
result = resource_name
if not MAX_RESOURCE_NAME_LENGTH_MAPPING.get(resource_type):
return result
if len(resource_name) > MAX_RESOURCE_NAME_LENGTH_MAPPING[resource_type]:
# PYTHONHASHSEED is set to "random" by default in Python 3.3 and up. Cannot use
# the built-in hash function here since it will give a different return value in each session
digest = "-%x" % (int(hashlib.md5(resource_name.encode('ascii', 'ignore')).hexdigest(), 16) & 0xffffffff)
result = resource_name[:MAX_RESOURCE_NAME_LENGTH_MAPPING[resource_type] - len(digest)] + digest
return result

@ -11,6 +11,7 @@ from aws_cdk import (
)
from . import aws_metrics_constants
from .aws_utils import resource_name_sanitizer
class BatchAnalytics:
@ -37,7 +38,8 @@ class BatchAnalytics:
self._athena_work_group = athena.CfnWorkGroup(
self._stack,
id='AthenaWorkGroup',
name=f'{self._stack.stack_name}-AthenaWorkGroup',
name=resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-AthenaWorkGroup', 'athena_work_group'),
recursive_delete_option=True,
state='ENABLED',
work_group_configuration=athena.CfnWorkGroup.WorkGroupConfigurationProperty(
@ -65,7 +67,8 @@ class BatchAnalytics:
athena.CfnNamedQuery(
self._stack,
id='NamedQuery-CreatePartitionedEventsJson',
name=f'{self._stack.stack_name}-NamedQuery-CreatePartitionedEventsJson',
name=resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-NamedQuery-CreatePartitionedEventsJson', 'athena_named_query'),
database=self._events_database_name,
query_string="CREATE TABLE events_json "
"WITH (format='JSON',partitioned_by=ARRAY['application_id']) "
@ -78,7 +81,8 @@ class BatchAnalytics:
athena.CfnNamedQuery(
self._stack,
id='NamedQuery-TotalEventsLastMonth',
name=f'{self._stack.stack_name}-NamedQuery-TotalEventsLastMonth',
name=resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-NamedQuery-TotalEventsLastMonth', 'athena_named_query'),
database=self._events_database_name,
query_string="WITH detail AS "
"(SELECT date_trunc('month', date(date_parse(CONCAT(year, '-', month, '-', day), '%Y-%m-%d'))) as event_month, * "
@ -93,7 +97,8 @@ class BatchAnalytics:
athena.CfnNamedQuery(
self._stack,
id='NamedQuery-NewUsersLastMonth',
name=f'{self._stack.stack_name}-NamedQuery-NewUsersLastMonth',
name=resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-NamedQuery-NewUsersLastMonth', 'athena_named_query'),
database=self._events_database_name,
query_string="WITH detail AS ("
"SELECT date_trunc('month', date(date_parse(CONCAT(year, '-', month, '-', day), '%Y-%m-%d'))) as event_month, * "

@ -16,6 +16,7 @@ from aws_cdk import (
import os
from . import aws_metrics_constants
from .aws_utils import resource_name_sanitizer
class BatchProcessing:
@ -42,7 +43,8 @@ class BatchProcessing:
"""
Generate the events processing lambda to filter the invalid metrics events.
"""
events_processing_lambda_name = f'{self._stack.stack_name}-EventsProcessingLambda'
events_processing_lambda_name = resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-EventsProcessingLambda', 'lambda_function')
self._create_events_processing_lambda_role(events_processing_lambda_name)
self._events_processing_lambda = lambda_.Function(
@ -89,7 +91,8 @@ class BatchProcessing:
self._events_processing_lambda_role = iam.Role(
self._stack,
id='EventsProcessingLambdaRole',
role_name=f'{self._stack.stack_name}-EventsProcessingLambdaRole',
role_name=resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-EventsProcessingLambdaRole', 'iam_role'),
assumed_by=iam.ServicePrincipal(
service='lambda.amazonaws.com'
),
@ -107,8 +110,10 @@ class BatchProcessing:
self._events_firehose_delivery_stream = kinesisfirehose.CfnDeliveryStream(
self._stack,
id=f'{self._stack.stack_name}-EventsFirehoseDeliveryStream',
id=f'EventsFirehoseDeliveryStream',
delivery_stream_type='KinesisStreamAsSource',
delivery_stream_name=resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-EventsFirehoseDeliveryStream', 'firehose_delivery_stream'),
kinesis_stream_source_configuration=kinesisfirehose.CfnDeliveryStream.KinesisStreamSourceConfigurationProperty(
kinesis_stream_arn=self._input_stream_arn,
role_arn=self._firehose_delivery_stream_role.role_arn
@ -192,7 +197,8 @@ class BatchProcessing:
self._firehose_delivery_stream_log_group = logs.LogGroup(
self._stack,
id='FirehoseLogGroup',
log_group_name=f'{self._stack.stack_name}-FirehoseLogGroup',
log_group_name=resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-FirehoseLogGroup', 'cloudwatch_log_group'),
removal_policy=core.RemovalPolicy.DESTROY,
retention=logs.RetentionDays.ONE_MONTH
)
@ -299,7 +305,8 @@ class BatchProcessing:
self._firehose_delivery_stream_role = iam.Role(
self._stack,
id='GameEventsFirehoseRole',
role_name=f'{self._stack.stack_name}-GameEventsFirehoseRole',
role_name=resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-GameEventsFirehoseRole', 'iam_role'),
assumed_by=iam.ServicePrincipal(
service='firehose.amazonaws.com'
),

@ -12,6 +12,7 @@ from aws_cdk import (
from . import aws_metrics_constants
from .layout_widget_construct import LayoutWidget
from .aws_utils import resource_name_sanitizer
class Dashboard:
@ -28,7 +29,8 @@ class Dashboard:
events_processing_lambda_name: str = '',
) -> None:
self._dashboard_name = f"{stack.stack_name}-Dashboard"
self._dashboard_name = resource_name_sanitizer.sanitize_resource_name(
f'{stack.stack_name}-Dashboard', 'cloudwatch_dashboard')
self._dashboard = cloudwatch.Dashboard(
stack,
id="DashBoard",

@ -12,10 +12,11 @@ from aws_cdk import (
aws_kinesis as kinesis
)
from . import aws_metrics_constants
import json
from . import aws_metrics_constants
from .aws_utils import resource_name_sanitizer
class DataIngestion:
"""
@ -29,7 +30,8 @@ class DataIngestion:
self._input_stream = kinesis.Stream(
self._stack,
id='InputStream',
stream_name=f'{self._stack.stack_name}-InputStream',
stream_name=resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-InputStream', 'kinesis_stream'),
shard_count=1
)

@ -13,6 +13,7 @@ from aws_cdk import (
)
from . import aws_metrics_constants
from .aws_utils import resource_name_sanitizer
class DataLakeIntegration:
@ -49,7 +50,9 @@ class DataLakeIntegration:
# a specific name here, only one customer can deploy the bucket successfully.
self._analytics_bucket = s3.Bucket(
self._stack,
id=f'{self._stack.stack_name}-AnalyticsBucket'.lower(),
id=f'AnalyticsBucket'.lower(),
bucket_name=resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-AnalyticsBucket'.lower(), 's3_bucket'),
encryption=s3.BucketEncryption.S3_MANAGED,
block_public_access=s3.BlockPublicAccess(
block_public_acls=True,
@ -297,7 +300,8 @@ class DataLakeIntegration:
self._events_crawler_role = iam.Role(
self._stack,
id='EventsCrawlerRole',
role_name=f'{self._stack.stack_name}-EventsCrawlerRole',
role_name=resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-EventsCrawlerRole', 'iam_role'),
assumed_by=iam.ServicePrincipal(
service='glue.amazonaws.com'
),

@ -16,6 +16,7 @@ from aws_cdk import (
import os
from . import aws_metrics_constants
from .aws_utils import resource_name_sanitizer
class RealTimeDataProcessing:
@ -44,7 +45,8 @@ class RealTimeDataProcessing:
self._analytics_application = analytics.CfnApplication(
self._stack,
'AnalyticsApplication',
application_name=f'{self._stack.stack_name}-AnalyticsApplication',
application_name=resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-AnalyticsApplication', 'kinesis_application'),
inputs=[
analytics.CfnApplication.InputProperty(
input_schema=analytics.CfnApplication.InputSchemaProperty(
@ -162,7 +164,8 @@ class RealTimeDataProcessing:
kinesis_analytics_role = iam.Role(
self._stack,
id='AnalyticsApplicationRole',
role_name=f'{self._stack.stack_name}-AnalyticsApplicationRole',
role_name=resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-AnalyticsApplicationRole', 'iam_role'),
assumed_by=iam.ServicePrincipal(
service='kinesisanalytics.amazonaws.com'
),
@ -178,7 +181,8 @@ class RealTimeDataProcessing:
"""
Generate the analytics processing lambda to send processed data to CloudWatch for visualization.
"""
analytics_processing_function_name = f'{self._stack.stack_name}-AnalyticsProcessingLambdaName'
analytics_processing_function_name = resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-AnalyticsProcessingLambdaName', 'lambda_function')
self._analytics_processing_lambda_role = self._create_analytics_processing_lambda_role(
analytics_processing_function_name
)
@ -246,7 +250,8 @@ class RealTimeDataProcessing:
analytics_processing_lambda_role = iam.Role(
self._stack,
id='AnalyticsLambdaRole',
role_name=f'{self._stack.stack_name}-AnalyticsLambdaRole',
role_name=resource_name_sanitizer.sanitize_resource_name(
f'{self._stack.stack_name}-AnalyticsLambdaRole', 'iam_role'),
assumed_by=iam.ServicePrincipal(
service='lambda.amazonaws.com'
),

Loading…
Cancel
Save