Removed AutomatedLauncherTesting Gem and LauncherTestTools.

Signed-off-by: scspaldi <scspaldi@amazon.com>
monroegm-disable-blank-issue-2
scspaldi 4 years ago
parent f278bbcd65
commit b2a4cf711e

@ -1,8 +0,0 @@
#
# Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
add_subdirectory(Code)

@ -1,44 +0,0 @@
#
# Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
ly_add_target(
NAME AutomatedLauncherTesting.Static STATIC
NAMESPACE Gem
FILES_CMAKE
automatedlaunchertesting_files.cmake
INCLUDE_DIRECTORIES
PRIVATE
Source
PUBLIC
Include
BUILD_DEPENDENCIES
PUBLIC
AZ::AzCore
Legacy::CryCommon
Gem::LmbrCentral
)
ly_add_target(
NAME AutomatedLauncherTesting ${PAL_TRAIT_MONOLITHIC_DRIVEN_MODULE_TYPE}
NAMESPACE Gem
FILES_CMAKE
automatedlaunchertesting_shared_files.cmake
INCLUDE_DIRECTORIES
PRIVATE
Source
PUBLIC
Include
BUILD_DEPENDENCIES
PRIVATE
Gem::AutomatedLauncherTesting.Static
RUNTIME_DEPENDENCIES
Gem::LmbrCentral
)
# servers and clients use the above module.
ly_create_alias(NAME AutomatedLauncherTesting.Servers NAMESPACE Gem TARGETS Gem::AutomatedLauncherTesting)
ly_create_alias(NAME AutomatedLauncherTesting.Clients NAMESPACE Gem TARGETS Gem::AutomatedLauncherTesting)

@ -1,27 +0,0 @@
/*
* Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#pragma once
#include <AzCore/EBus/EBus.h>
namespace AutomatedLauncherTesting
{
class AutomatedLauncherTestingRequests
: public AZ::EBusTraits
{
public:
//////////////////////////////////////////////////////////////////////////
// EBusTraits overrides
static const AZ::EBusHandlerPolicy HandlerPolicy = AZ::EBusHandlerPolicy::Single;
static const AZ::EBusAddressPolicy AddressPolicy = AZ::EBusAddressPolicy::Single;
//////////////////////////////////////////////////////////////////////////
// Call this method from your test logic when a test is complete.
virtual void CompleteTest(bool success, const AZStd::string& message) = 0;
};
using AutomatedLauncherTestingRequestBus = AZ::EBus<AutomatedLauncherTestingRequests>;
} // namespace AutomatedLauncherTesting

@ -1,47 +0,0 @@
/*
* Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#include <AzCore/Memory/SystemAllocator.h>
#include <AzCore/Module/Module.h>
#include <AutomatedLauncherTestingSystemComponent.h>
namespace AutomatedLauncherTesting
{
class AutomatedLauncherTestingModule
: public AZ::Module
{
public:
AZ_RTTI(AutomatedLauncherTestingModule, "{3FC3E44A-0AC0-47C5-BD02-ADB2BA4338CA}", AZ::Module);
AZ_CLASS_ALLOCATOR(AutomatedLauncherTestingModule, AZ::SystemAllocator, 0);
AutomatedLauncherTestingModule()
: AZ::Module()
{
// Push results of [MyComponent]::CreateDescriptor() into m_descriptors here.
m_descriptors.insert(m_descriptors.end(), {
AutomatedLauncherTestingSystemComponent::CreateDescriptor(),
});
}
/**
* Add required SystemComponents to the SystemEntity.
*/
AZ::ComponentTypeList GetRequiredSystemComponents() const override
{
return AZ::ComponentTypeList{
azrtti_typeid<AutomatedLauncherTestingSystemComponent>(),
};
}
};
}
// DO NOT MODIFY THIS LINE UNLESS YOU RENAME THE GEM
// The first parameter should be GemName_GemIdLower
// The second should be the fully qualified name of the class above
AZ_DECLARE_MODULE_CLASS(Gem_AutomatedLauncherTesting, AutomatedLauncherTesting::AutomatedLauncherTestingModule)

@ -1,230 +0,0 @@
/*
* Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#include <AutomatedLauncherTestingSystemComponent.h>
#include <AzCore/Serialization/SerializeContext.h>
#include <AzCore/Serialization/EditContext.h>
#include <AzCore/Serialization/EditContextConstants.inl>
#include <AzCore/RTTI/BehaviorContext.h>
#include <AzFramework/StringFunc/StringFunc.h>
#include <CryCommon/platform.h>
#include <CryCommon/ICmdLine.h>
#include <CryCommon/IConsole.h>
#include <CryCommon/ISystem.h>
#include "SpawnDynamicSlice.h"
#include <AzCore/Component/Entity.h>
namespace AutomatedLauncherTesting
{
void AutomatedLauncherTestingSystemComponent::Reflect(AZ::ReflectContext* context)
{
if (AZ::SerializeContext* serialize = azrtti_cast<AZ::SerializeContext*>(context))
{
serialize->Class<AutomatedLauncherTestingSystemComponent, AZ::Component>()
->Version(0);
if (AZ::EditContext* ec = serialize->GetEditContext())
{
ec->Class<AutomatedLauncherTestingSystemComponent>("AutomatedLauncherTesting", "[Description of functionality provided by this System Component]")
->ClassElement(AZ::Edit::ClassElements::EditorData, "")
->Attribute(AZ::Edit::Attributes::AppearsInAddComponentMenu, AZ_CRC("System"))
->Attribute(AZ::Edit::Attributes::AutoExpand, true)
;
}
}
if (AZ::BehaviorContext* behaviorContext = azrtti_cast<AZ::BehaviorContext*>(context))
{
behaviorContext->EBus<AutomatedLauncherTestingRequestBus>("AutomatedLauncherTestingRequestBus")
->Attribute(AZ::Script::Attributes::Category, "Testing")
->Event("CompleteTest", &AutomatedLauncherTestingRequestBus::Events::CompleteTest)
;
}
}
void AutomatedLauncherTestingSystemComponent::GetProvidedServices(AZ::ComponentDescriptor::DependencyArrayType& provided)
{
provided.push_back(AZ_CRC("AutomatedLauncherTestingService"));
}
void AutomatedLauncherTestingSystemComponent::GetIncompatibleServices(AZ::ComponentDescriptor::DependencyArrayType& incompatible)
{
incompatible.push_back(AZ_CRC("AutomatedLauncherTestingService"));
}
void AutomatedLauncherTestingSystemComponent::GetRequiredServices(AZ::ComponentDescriptor::DependencyArrayType& required)
{
AZ_UNUSED(required);
}
void AutomatedLauncherTestingSystemComponent::GetDependentServices(AZ::ComponentDescriptor::DependencyArrayType& dependent)
{
AZ_UNUSED(dependent);
}
void AutomatedLauncherTestingSystemComponent::Init()
{
}
void AutomatedLauncherTestingSystemComponent::Activate()
{
AutomatedLauncherTestingRequestBus::Handler::BusConnect();
CrySystemEventBus::Handler::BusConnect();
AZ::TickBus::Handler::BusConnect();
}
void AutomatedLauncherTestingSystemComponent::Deactivate()
{
AZ::TickBus::Handler::BusDisconnect();
CrySystemEventBus::Handler::BusDisconnect();
AutomatedLauncherTestingRequestBus::Handler::BusDisconnect();
}
void AutomatedLauncherTestingSystemComponent::CompleteTest(bool success, const AZStd::string& message)
{
AZ_Assert(
m_phase == Phase::RunningTest,
"Expected current phase to be RunningTest (%d), got %d, will skip printing CompleteTest message.",
Phase::RunningTest, m_phase);
if (m_phase == Phase::RunningTest)
{
if (!message.empty())
{
LogAlways("AutomatedLauncher: %s", message.c_str());
}
// Make sure this is always printed, in case log severity is turned down.
LogAlways("AutomatedLauncher: %s", success ? "AUTO_LAUNCHER_TEST_COMPLETE" : "AUTO_LAUNCHER_TEST_FAIL");
m_phase = Phase::Complete;
}
}
void AutomatedLauncherTestingSystemComponent::OnCrySystemInitialized(ISystem& system, [[maybe_unused]] const SSystemInitParams& systemInitParams)
{
m_system = &system;
// Only allow any testing to actually happen in non-release builds.
#if !defined(_RELEASE)
ICmdLine* cmdLine = m_system->GetICmdLine();
if (cmdLine)
{
AZ_Printf("AutomatedLauncher", "Checking for automated launcher testing command line arguments.");
const ICmdLineArg* mapArg = cmdLine->FindArg(eCLAT_Pre, "ltest_map");
if (mapArg)
{
AZStd::string map = mapArg->GetValue();
AZ_Printf("AutomatedLauncher", "Found ltest_map arg %s.", map.c_str());
if(map.compare("default") != 0)
{
AZStd::lock_guard<MutexType> lock(m_testOperationsMutex);
m_testOperations.push_back(TestOperation(TestOperationType::LoadMap, map.c_str()));
}
else
{
// Allow the default menu to load, watch for the next level to load
m_phase = Phase::LoadingMap;
m_nextLevelLoad = NextLevelLoad::WatchForNextLevelLoad;
}
}
const ICmdLineArg* sliceArg = cmdLine->FindArg(eCLAT_Pre, "ltest_slice");
if (sliceArg)
{
AZStd::string slice = sliceArg->GetValue();
AZ_Printf("AutomatedLauncher", "Found ltest_slice arg %s.", slice.c_str());
AzFramework::StringFunc::Tokenize(slice.c_str(), m_slices, ",");
if (!m_slices.empty())
{
AZStd::lock_guard<MutexType> lock(m_testOperationsMutex);
m_testOperations.push_back(TestOperation(TestOperationType::SpawnDynamicSlice, m_slices[0].c_str()));
m_slices.erase(m_slices.begin());
}
}
}
#endif
}
void AutomatedLauncherTestingSystemComponent::OnCrySystemShutdown([[maybe_unused]] ISystem& system)
{
m_system = nullptr;
}
void AutomatedLauncherTestingSystemComponent::OnTick([[maybe_unused]] float deltaTime, [[maybe_unused]] AZ::ScriptTimePoint time)
{
// Check to see if there is a load map operation in flight
if (m_currentTestOperation.m_type == TestOperationType::LoadMap && !m_currentTestOperation.m_complete)
{
if (m_system->GetSystemGlobalState() == ESYSTEM_GLOBAL_STATE_LEVEL_LOAD_COMPLETE)
{
m_currentTestOperation.m_complete = true;
}
}
// Only start a new operation if there isn't already one in flight right now.
else if (!m_testOperations.empty())
{
// Grab the first operation from the list
{
AZStd::lock_guard<MutexType> lock(m_testOperationsMutex);
m_currentTestOperation = m_testOperations.at(0);
m_testOperations.erase(m_testOperations.begin());
}
// If it was a map command, go ahead and launch it.
if (m_currentTestOperation.m_type == TestOperationType::LoadMap)
{
AZ_Assert(m_phase == Phase::None, "Expected current phase to be None (%d), got %d", Phase::None, m_phase);
AZStd::string command = AZStd::string::format("map %s", m_currentTestOperation.m_value.c_str());
m_system->GetIConsole()->ExecuteString(command.c_str());
m_phase = Phase::LoadingMap;
}
// If it was a spawn dynamic slice command, go ahead and spawn it.
else if (m_currentTestOperation.m_type == TestOperationType::SpawnDynamicSlice)
{
AZ_Assert((m_phase == Phase::LoadingMap) || (m_phase == Phase::RunningTest), "Expected current phase to be LoadMap or RunningTest (%d), got %d", Phase::LoadingMap, m_phase);
AZ::Entity* spawnedEntity = SpawnDynamicSlice::CreateSpawner(m_currentTestOperation.m_value, "Automated Testing Dynamic Slice Spawner");
if (spawnedEntity)
{
m_spawnedEntities.emplace_back(std::move(spawnedEntity));
}
m_phase = Phase::RunningTest;
}
}
else if ((m_nextLevelLoad == NextLevelLoad::None) && (m_phase == Phase::RunningTest) && (m_system->GetSystemGlobalState() == ESYSTEM_GLOBAL_STATE_RUNNING))
{
AZ_Printf("AutomatedLauncher", "Running Test - Watching for a next level load");
m_nextLevelLoad = NextLevelLoad::WatchForNextLevelLoad;
}
else if ((m_nextLevelLoad == NextLevelLoad::WatchForNextLevelLoad) && (m_system->GetSystemGlobalState() == ESYSTEM_GLOBAL_STATE_LEVEL_LOAD_COMPLETE))
{
AZ_Printf("AutomatedLauncher", "Next level loaded, adding operations");
if (!m_slices.empty())
{
m_testOperations.push_back(TestOperation(TestOperationType::SpawnDynamicSlice, m_slices[0].c_str()));
m_slices.erase(m_slices.begin());
m_nextLevelLoad = NextLevelLoad::None;
}
else
{
m_nextLevelLoad = NextLevelLoad::LevelLoadsComplete;
}
}
}
void AutomatedLauncherTestingSystemComponent::LogAlways(const char* format, ...)
{
va_list args;
va_start(args, format);
m_system->GetILog()->LogV(ILog::eAlways, format, args);
va_end(args);
}
}

@ -1,114 +0,0 @@
/*
* Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#pragma once
#include <AzCore/Component/Component.h>
#include <AutomatedLauncherTesting/AutomatedLauncherTestingBus.h>
#include <CrySystemBus.h>
#include <AzCore/Component/TickBus.h>
#include <AzCore/std/smart_ptr/shared_ptr.h>
#include <AzCore/std/containers/vector.h>
namespace AZ
{
class Entity;
}
namespace AutomatedLauncherTesting
{
class AutomatedLauncherTestingSystemComponent
: public AZ::Component
, protected AutomatedLauncherTestingRequestBus::Handler
, private CrySystemEventBus::Handler
, private AZ::TickBus::Handler
{
private:
enum class Phase
{
None,
LoadingMap,
RunningTest,
Complete
};
enum class NextLevelLoad
{
None,
WatchForNextLevelLoad,
LevelLoadsComplete
};
enum class TestOperationType
{
None,
LoadMap,
SpawnDynamicSlice
};
struct TestOperation
{
TestOperation()
{
}
TestOperation(TestOperationType type, const AZStd::string& value)
: m_type(type)
, m_value(value)
{
}
TestOperationType m_type = TestOperationType::None;
AZStd::string m_value;
bool m_complete = false;
};
public:
AZ_COMPONENT(AutomatedLauncherTestingSystemComponent, "{87A405E2-390B-43A9-9A96-94BDC0DF680B}");
static void Reflect(AZ::ReflectContext* context);
static void GetProvidedServices(AZ::ComponentDescriptor::DependencyArrayType& provided);
static void GetIncompatibleServices(AZ::ComponentDescriptor::DependencyArrayType& incompatible);
static void GetRequiredServices(AZ::ComponentDescriptor::DependencyArrayType& required);
static void GetDependentServices(AZ::ComponentDescriptor::DependencyArrayType& dependent);
protected:
////////////////////////////////////////////////////////////////////////
// AutomatedLauncherTestingRequestBus interface implementation
void CompleteTest(bool success, const AZStd::string& message) override;
////////////////////////////////////////////////////////////////////////
protected:
////////////////////////////////////////////////////////////////////////////
// CrySystemEvents
void OnCrySystemInitialized(ISystem& system, const SSystemInitParams& systemInitParams) override;
void OnCrySystemShutdown(ISystem& system) override;
// AZ::Component interface implementation
void Init() override;
void Activate() override;
void Deactivate() override;
// TickBus
void OnTick(float deltaTime, AZ::ScriptTimePoint time) override;
void LogAlways(const char* format, ...);
private:
ISystem* m_system = nullptr;
AZStd::vector<TestOperation> m_testOperations;
AZStd::vector<AZStd::string> m_slices;
MutexType m_testOperationsMutex;
TestOperation m_currentTestOperation;
AZStd::vector<AZStd::shared_ptr<AZ::Entity>> m_spawnedEntities;
Phase m_phase = Phase::None;
NextLevelLoad m_nextLevelLoad = NextLevelLoad::None;
};
}

@ -1,53 +0,0 @@
/*
* Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#include "SpawnDynamicSlice.h"
#include <AzCore/Asset/AssetManager.h>
#include <AzCore/Asset/AssetManagerBus.h>
#include <AzCore/Component/Entity.h>
#include <LmbrCentral/Scripting/SpawnerComponentBus.h>
namespace AutomatedLauncherTesting
{
AZ::Entity* SpawnDynamicSlice::CreateSpawner(const AZStd::string& path, const AZStd::string& entityName)
{
AZ::Entity* spawnerEntity = nullptr;
AZ::Data::AssetId sliceAssetId;
AZ::Data::AssetCatalogRequestBus::BroadcastResult(sliceAssetId, &AZ::Data::AssetCatalogRequestBus::Events::GetAssetIdByPath, path.c_str(), AZ::Data::s_invalidAssetType, false);
if (sliceAssetId.IsValid())
{
AZ_Printf("System", "Spawning dynamic slide %s", path.c_str());
spawnerEntity = aznew AZ::Entity(entityName.c_str());
spawnerEntity->Init();
LmbrCentral::SpawnerConfig spawnerConfig;
AZ::Data::Asset<AZ::DynamicSliceAsset> sliceAssetData = AZ::Data::AssetManager::Instance().GetAsset<AZ::DynamicSliceAsset>(sliceAssetId, spawnerConfig.m_sliceAsset.GetAutoLoadBehavior());
sliceAssetData.BlockUntilLoadComplete();
AZ::Component* spawnerComponent = nullptr;
AZ::ComponentDescriptorBus::EventResult(spawnerComponent, LmbrCentral::SpawnerComponentTypeId, &AZ::ComponentDescriptorBus::Events::CreateComponent);
spawnerConfig.m_sliceAsset = sliceAssetData;
spawnerConfig.m_spawnOnActivate = true;
spawnerComponent->SetConfiguration(spawnerConfig);
spawnerEntity->AddComponent(spawnerComponent);
spawnerEntity->Activate();
}
else
{
AZ_Warning("System", false, "Could not create asset for dynamic slide %s", path.c_str());
}
return spawnerEntity;
}
} // namespace AutomatedLauncherTesting

@ -1,24 +0,0 @@
/*
* Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#pragma once
#include <AzCore/std/string/string.h>
namespace AZ
{
class Entity;
}
namespace AutomatedLauncherTesting
{
class SpawnDynamicSlice
{
public:
static AZ::Entity* CreateSpawner(const AZStd::string& path, const AZStd::string& entityName);
};
} // namespace AutomatedLauncherTesting

@ -1,14 +0,0 @@
#
# Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
set(FILES
Include/AutomatedLauncherTesting/AutomatedLauncherTestingBus.h
Source/AutomatedLauncherTestingSystemComponent.cpp
Source/AutomatedLauncherTestingSystemComponent.h
Source/SpawnDynamicSlice.cpp
Source/SpawnDynamicSlice.h
)

@ -1,10 +0,0 @@
#
# Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
set(FILES
Source/AutomatedLauncherTestingModule.cpp
)

@ -1,12 +0,0 @@
{
"gem_name": "AutomatedLauncherTesting",
"display_name": "Automated Launcher Testing",
"license": "Apache-2.0 Or MIT",
"origin": "Open 3D Engine - o3de.org",
"type": "Code",
"summary": "The Automated Launcher Testing Gem manages automated Open 3D Engine (O3DE) launcher tests.",
"canonical_tags": ["Gem"],
"user_tags": ["Debug", "Tools"],
"icon_path": "preview.png",
"requirements": ""
}

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6d6204c6730e5675791765ca194e9b1cbec282208e280507de830afc2805e5fa
size 41127

@ -1,5 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""

@ -1,80 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Device Farm Create Bundle
"""
import argparse
import logging
import os
import shutil
import stat
logger = logging.getLogger(__name__)
def on_rm_error( func, path, exc_info):
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and unlink it.
os.chmod( path, stat.S_IWRITE )
os.unlink( path )
def copy_python_code_tree(src, dest):
shutil.copytree(src, dest, ignore=shutil.ignore_patterns('*.pyc', '__pycache__'))
def create_test_bundle(project, project_launcher_tests_folder, python_test_tools_folder):
temp_folder = os.path.join('temp', project)
# Place all artifacts to send to device farm in this output folder
zip_output_folder = os.path.join(temp_folder, 'zip_output')
# clear the old virtual env folder
logger.info("deleting old zip folder ...")
if os.path.isdir(zip_output_folder):
logger.info("Removing virtual env folder \"{}\" ...".format(zip_output_folder))
shutil.rmtree(zip_output_folder, onerror = on_rm_error)
# create the output folder where we dump everything to be zipped up.
os.makedirs(zip_output_folder)
# core files to add (iOS won't be referenced on Android, but it won't hurt anything)
core_files = [
'run_launcher_tests.py',
'run_launcher_tests_ios.py',
'run_launcher_tests_android.py',
os.path.join('..', '..', project, 'project.json')]
for file in core_files:
shutil.copy2(file, os.path.join(zip_output_folder, os.path.basename(file)))
logger.info("Including test code ...")
test_output_folder = os.path.join(zip_output_folder, 'tests')
copy_python_code_tree(project_launcher_tests_folder, test_output_folder)
# Copy remote console from PythonTestTools
logger.info("Including python PythonTestTools remote console ...")
shutil.copy2(
os.path.join(python_test_tools_folder, 'shared', 'remote_console_commands.py'),
os.path.join(test_output_folder, 'remote_console_commands.py'))
# Zip the tests/ folder, wheelhouse/ folder, and the requirements.txt file into a single archive:
test_bundle_path = os.path.join(temp_folder, 'test_bundle')
logger.info("Generating test bundle zip {} ...".format(test_bundle_path))
shutil.make_archive(test_bundle_path, 'zip', zip_output_folder)
def main():
parser = argparse.ArgumentParser(description='Create the test bundle zip file for use on the Device Farm.')
parser.add_argument('--project', required=True, help='Lumberyard Project')
parser.add_argument('--project-launcher-tests-folder', required=True, help='Absolute path of the folder that contains the test code source.')
parser.add_argument('--python-test-tools-folder', required=True, help='Absolute path of the PythonTestTools folder.')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
create_test_bundle(args.project, args.project_launcher_tests_folder, args.python_test_tools_folder)
if __name__== "__main__":
main()

@ -1,10 +0,0 @@
@echo off
REM
REM Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
REM
REM SPDX-License-Identifier: Apache-2.0 OR MIT
REM
REM
REM
python device_farm_create_bundle.py --project StarterGame --project-launcher-tests-folder "../../StarterGame/LauncherTests" --python-test-tools-folder "../PythonTestTools/test_tools"

@ -1,7 +0,0 @@
[
{
"attribute": "ARN",
"operator": "IN",
"value": "[%DEVICE_ARN_LIST%]"
}
]

@ -1,286 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Device Farm Schecule Run
"""
import argparse
import datetime
import json
import logging
import os
import subprocess
import sys
import time
import requests
logger = logging.getLogger(__name__)
def bake_template(filename, values):
"""Open a template and replace values. Return path to baked file."""
# Open the options json template and replace with real values.
with open(filename, 'r') as in_file:
data = in_file.read()
for key, value in values.iteritems():
data = data.replace(key, str(value))
filename_out = os.path.join('temp', filename)
with open(filename_out, 'w') as out_file:
out_file.write(data)
return filename_out
def execute_aws_command(args):
""" Execut the aws cli devicefarm command. """
# Use .cmd on Windows, not sure exactly why, but aws will not be found without it.
aws_executable = 'aws.cmd' if sys.platform.startswith('win') else 'aws'
aws_args = [aws_executable, 'devicefarm', '--region', 'us-west-2'] + args
logger.info("Running {} ...".format(" ".join(aws_args)))
p = subprocess.Popen(aws_args, stdout=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
msg = "Command '{}' failed. return code: {} out: {} err: {}".format(
" ".join(aws_args),
p.returncode,
out,
err
)
raise Exception(msg)
return out
def find_or_create_project(project_name):
""" Find the project by name, or create a new one. """
list_projects_data = json.loads(execute_aws_command(['list-projects']))
# return the arn if it is found
for project_data in list_projects_data['projects']:
if project_data['name'] == project_name:
logger.info("Found existing project named {}.".format(project_name))
return project_data['arn']
# project not found, create a new project with the give name
project_data = json.loads(execute_aws_command(['create-project', '--name', project_name]))
return project_data['project']['arn']
def find_or_create_device_pool(project_name, device_pool_name, device_arns):
""" Find the device pool in the project by name, or create a new one. """
list_device_pools_data = json.loads(execute_aws_command(['list-device-pools', '--arn', project_name]))
# return the arn if it is found
for device_pool_data in list_device_pools_data['devicePools']:
if device_pool_data['name'] == device_pool_name:
logger.info("Found existing device pool named {}.".format(device_pool_name))
return device_pool_data['arn']
device_pool_json_path_out = bake_template(
'device_farm_default_device_pool_template.json',
{'%DEVICE_ARN_LIST%' : device_arns})
# create a default device pool
args = [
'create-device-pool',
'--project-arn',
project_name,
'--name',
device_pool_name,
'--rules',
"file://{}".format(device_pool_json_path_out)]
device_pools_data = json.loads(execute_aws_command(args))
return device_pools_data['devicePool']['arn']
def create_upload(project_arn, path, type):
""" Create an upload and return the ARN """
args = ['create-upload', '--project-arn', project_arn, '--name', os.path.basename(path), '--type', type]
upload_data = json.loads(execute_aws_command(args))
return upload_data['upload']['arn'], upload_data['upload']['url']
def send_upload(filename, url):
""" Upload a file with a put request. """
logger.info("Sending upload {} ...".format(filename))
with open(filename, 'rb') as uploadfile:
data = uploadfile.read()
headers = {"content-type": "application/octet-stream"}
output = requests.put(url, data=data, allow_redirects=True, headers=headers)
logger.info("Sent upload {}.".format(output))
def wait_for_upload_to_finish(poll_time, upload_arn):
""" Wait for an upload to finish by polling for status """
logger.info("Waiting for upload {} ...".format(upload_arn))
upload_data = json.loads(execute_aws_command(['get-upload', '--arn', upload_arn]))
while not upload_data['upload']['status'] in ['SUCCEEDED', 'FAILED']:
time.sleep(poll_time)
upload_data = json.loads(execute_aws_command(['get-upload', '--arn', upload_arn]))
if upload_data['upload']['status'] != 'SUCCEEDED':
raise Exception('Upload failed.')
def upload(poll_time, project_arn, path, type):
""" Create the upload on the Device Farm, upload the file and wait for completion. """
arn, url = create_upload(project_arn, path, type)
send_upload(path, url)
wait_for_upload_to_finish(poll_time, arn)
return arn
def schedule_run(project_arn, app_arn, device_pool_arn, test_spec_arn, test_bundle_arn, execution_timeout):
""" Schecule the test run on the Device Farm """
run_name = "LY LT {}".format(datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y"))
logger.info("Scheduling run {} ...".format(run_name))
schedule_run_test_json_path_out = bake_template(
'device_farm_schedule_run_test_template.json',
{'%TEST_SPEC_ARN%' : test_spec_arn, '%TEST_PACKAGE_ARN%' : test_bundle_arn})
execution_configuration_json_path_out = bake_template(
'device_farm_schedule_run_execution_configuration_template.json',
{'%EXECUTION_TIMEOUT%' : execution_timeout})
args = [
'schedule-run',
'--project-arn',
project_arn,
'--app-arn',
app_arn,
'--device-pool-arn',
device_pool_arn,
'--name',
"\"{}\"".format(run_name),
'--test',
"file://{}".format(schedule_run_test_json_path_out),
'--execution-configuration',
"file://{}".format(execution_configuration_json_path_out)]
schedule_run_data = json.loads(execute_aws_command(args))
return schedule_run_data['run']['arn']
def download_file(url, output_path):
""" download a file from a url, save in output_path """
try:
r = requests.get(url, stream=True)
r.raise_for_status()
output_folder = os.path.dirname(output_path)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
with open(output_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
except requests.exceptions.RequestException as e:
logging.exception("Failed request for downloading file from {}.".format(url))
return False
except IOError as e:
logging.exception("Failed writing to file {}.".format(output_path))
return False
return True
def download_artifacts(run_arn, artifacts_output_folder):
"""
Download run artifacts and write to path set in artifacts_output_folder.
"""
logging.basicConfig(level=logging.DEBUG)
list_jobs_data = json.loads(execute_aws_command(['list-jobs', '--arn', run_arn]))
for job_data in list_jobs_data['jobs']:
logger.info("Downloading artifacts for {} ...".format(job_data['name']))
safe_job_name = "".join(x for x in job_data['name'] if x.isalnum())
list_artifacts_data = json.loads(execute_aws_command(['list-artifacts', '--arn', job_data['arn'], '--type', 'FILE']))
for artifact_data in list_artifacts_data['artifacts']:
# A run may contain many jobs. Usually each job is one device type.
# Each job has 3 stages: setup, test and shutdown. You can tell what
# stage an artifact is from based on the ARN.
# We only care about artifacts from the main stage of the job,
# not the setup or tear down artifacts. So parse the ARN and look
# for the 00001 identifier.
print artifact_data['arn']
if artifact_data['arn'].split('/')[3] == '00001':
logger.info("Downloading artifacts {} ...".format(artifact_data['name']))
output_filename = "{}.{}".format(
"".join(x for x in artifact_data['name'] if x.isalnum()),
artifact_data['extension'])
output_path = os.path.join(artifacts_output_folder, safe_job_name, output_filename)
if not download_file(artifact_data['url'], output_path):
msg = "Failed to download file from {} and save to {}".format(artifact_data['url'], output_path)
logger.error(msg)
def main():
parser = argparse.ArgumentParser(description='Upload and app and schedule a run on the Device Farm.')
parser.add_argument('--app-path', required=True, help='Path of the app file.')
parser.add_argument('--test-spec-path', required=True, help='Path of the test spec yaml.')
parser.add_argument('--test-bundle-path', required=True, help='Path of the test bundle zip.')
parser.add_argument('--project-name', required=True, help='The name of the project.')
parser.add_argument('--device-pool-name', required=True, help='The name of the device pool.')
parser.add_argument('--device-arns',
default='\\"arn:aws:devicefarm:us-west-2::device:6CCDF49186B64E3FB27B9346AC9FAEC1\\"',
help='List of device ARNs. Used when existing pool is not found by name. Default is Galaxy S8.')
parser.add_argument('--wait-for-result', default="true", help='Set to "true" to wait for result of run.')
parser.add_argument('--download-artifacts', default="true", help='Set to "true" to download artifacts after run. requires --wait-for-result')
parser.add_argument('--artifacts-output-folder', default="temp", help='Folder to place the downloaded artifacts.')
parser.add_argument('--upload-poll-time', default=10, help='How long to wait between polling upload status.')
parser.add_argument('--run-poll-time', default=60, help='How long to wait between polling run status.')
parser.add_argument('--run-execution-timeout', default=60, help='Run execution timeout.')
parser.add_argument('--test-names', nargs='+', help='A list of test names to run, default runs all tests.')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
# Find the project by name, or create a new one.
project_arn = find_or_create_project(args.project_name)
# Find the device pool in the project by name, or create a new one.
device_pool_arn = find_or_create_device_pool(project_arn, args.device_pool_name, args.device_arns)
# Bake out EXTRA_ARGS option with args.test_names
extra_args = ""
if args.test_names:
extra_args = "--test-names {}".format(" ".join("\"{}\"".format(test_name) for test_name in args.test_names))
test_spec_path_out = bake_template(
args.test_spec_path,
{'%EXTRA_ARGS%' : extra_args})
# Upload test spec and test bundle (Appium js is just a generic avenue to our own custom code).
test_spec_arn = upload(args.upload_poll_time, project_arn, test_spec_path_out, 'APPIUM_NODE_TEST_SPEC')
test_bundle_arn = upload(args.upload_poll_time, project_arn, args.test_bundle_path, 'APPIUM_NODE_TEST_PACKAGE')
# Upload the app.
type = 'ANDROID_APP' if args.app_path.lower().endswith('.apk') else 'IOS_APP'
app_arn = upload(args.upload_poll_time, project_arn, args.app_path, type)
# Schedule the test run.
run_arn = schedule_run(project_arn, app_arn, device_pool_arn, test_spec_arn, test_bundle_arn, args.run_execution_timeout)
logger.info('Run scheduled.')
# Wait for run, exit with failure if test run fails.
# strcmp with true for easy of use jenkins boolean env var.
if args.wait_for_result.lower() == 'true':
# Runs can take a long time, so just poll once a mintue by default.
run_data = json.loads(execute_aws_command(['get-run', '--arn', run_arn]))
while run_data['run']['result'] == 'PENDING':
logger.info("Run status: {} waiting {} seconds ...".format(run_data['run']['result'], args.run_poll_time))
time.sleep(args.run_poll_time)
run_data = json.loads(execute_aws_command(['get-run', '--arn', run_arn]))
# Download run artifacts. strcmp with true for easy of use jenkins boolean env var.
if args.download_artifacts.lower() == 'true':
download_artifacts(run_arn, args.artifacts_output_folder)
# If the run did not pass raise an exception to fail this jenkins job.
if run_data['run']['result'] != 'PASSED':
# Dump all of the run info.
logger.info(run_data)
# Raise an exception to fail this test.
msg = "Run fail with result {}\nRun ARN: {}".format(run_data['run']['result'], run_arn)
raise Exception(msg)
logger.info('Run passed.')
if __name__== "__main__":
main()

@ -1,14 +0,0 @@
@echo off
REM
REM Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
REM
REM SPDX-License-Identifier: Apache-2.0 OR MIT
REM
REM
REM
call ../../python/python.cmd run_launcher_tests_local_validation.py --dev-root-folder "../.." --project "StarterGame"
if %ERRORLEVEL% == 0 (
call ../../python/python.cmd device_farm_create_bundle.py --project StarterGame --project-launcher-tests-folder "../../StarterGame/LauncherTests" --python-test-tools-folder "../PythonTestTools/test_tools"
call ../../python/python.cmd device_farm_schedule_run.py --app-path "../../BinAndroidArmv8Clang/StarterGameLauncher_w_assets.apk" --project-name "LyAutomatedLauncher" --device-pool-name "LyAndroid" --test-spec-path "device_farm_test_spec_android.yaml" --test-bundle-path "temp/StarterGame/test_bundle.zip" --artifacts-output-folder "temp/StarterGame"
)

@ -1,4 +0,0 @@
{
"jobTimeoutMinutes": %EXECUTION_TIMEOUT%,
"videoCapture": true
}

@ -1,9 +0,0 @@
# Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
../../python/python.sh run_launcher_tests_local_validation.py --dev-root-folder "../.." --project "StarterGame" || exit
../../python/python.sh device_farm_create_bundle.py --project StarterGame --project-launcher-tests-folder "../../StarterGame/LauncherTests" --python-test-tools-folder "../PythonTestTools/test_tools"
# Current known limitation on iOS, only one test at a time is supported, see run_launcher_tests_ios.py run_test
../../python/python.sh device_farm_schedule_run.py --app-path "$1" --project-name "LyAutomatedLauncherIOS" --device-pool-name "LyIOS" --test-spec-path "device_farm_test_spec_ios.yaml" --test-bundle-path "temp/StarterGame/test_bundle.zip" --artifacts-output-folder "temp/StarterGame" --device-arns "\\\"arn:aws:devicefarm:us-west-2::device:D125AEEE8614463BAE106865CAF4470E\\\"" --test-names "progress"

@ -1,5 +0,0 @@
{
"type": "APPIUM_NODE",
"testPackageArn": "%TEST_PACKAGE_ARN%",
"testSpecArn": "%TEST_SPEC_ARN%"
}

@ -1,16 +0,0 @@
version: 0.1
phases:
install:
commands:
pre_test:
commands:
- adb -P 5037 -s "$DEVICEFARM_DEVICE_UDID" install -r $DEVICEFARM_APP_PATH
test:
commands:
- python ./run_launcher_tests_android.py --project-json-path "./project.json" --project-launcher-tests-folder "./tests" --screenshots-folder "$SCREENSHOT_PATH" %EXTRA_ARGS%
post_test:
commands:

@ -1,16 +0,0 @@
version: 0.1
phases:
install:
commands:
pre_test:
commands:
- idevicedebug -u $DEVICEFARM_DEVICE_UDID run com.amazon.lumberyard.startergame
- sleep 10s
test:
commands:
- python ./run_launcher_tests_ios.py --project-json-path "./project.json" --project-launcher-tests-folder "./tests" --screenshots-folder "$SCREENSHOT_PATH" %EXTRA_ARGS%
post_test:
commands:

@ -1,131 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import json
import logging
import os
import time
import shutil
import subprocess
import sys
logger = logging.getLogger(__name__)
class PlatformDriver:
def __init__(self, project_json_path, project_launcher_tests_folder, test_names, screenshots_folder, screenshots_interval):
self.project_json_path = project_json_path
self.project_launcher_tests_folder = project_launcher_tests_folder
self.test_names = test_names
self.screenshots_folder = screenshots_folder
self.screenshots_interval = screenshots_interval
def read_json_data(self, path):
"""Read a json file and return the data"""
try:
with open(path) as json_file:
json_data = json.load(json_file)
except Exception, e:
logger.error("Failed to read json file: '{}'".format(path))
logger.error("Exception: '{}'".format(e))
sys.exit(1)
return json_data
def read_project_name(self):
project_data = self.read_json_data(self.project_json_path)
return project_data['project_name']
def run_test(self, map, dynamic_slice, timeout, pass_string, fail_string):
""" Meant to be overridden in derived classes. """
return True
def run_launcher_tests(self):
"""Discovers all of the available tests in launcher_tests.json and runs them.
"""
# Delete the old screenshots folder if it exists
if os.path.exists(self.screenshots_folder):
shutil.rmtree(self.screenshots_folder)
# Read the launcher_tests.json file.
launcher_tests_data = self.read_json_data(os.path.join(self.project_launcher_tests_folder, 'launcher_tests.json'))
# Run each of the tests found in the launcher_tests.json file.
ok = True
for launcher_test_data in launcher_tests_data['launcher_tests']:
# Skip over this test if specific tests are specified and this is not one of them.
if self.test_names and launcher_test_data.get('name').lower() not in [x.lower() for x in self.test_names]:
continue
ok = ok and self.run_test(
launcher_test_data.get('name'),
launcher_test_data.get('map'),
launcher_test_data.get('dynamic_slice'),
launcher_test_data.get('timeout'),
launcher_test_data.get('pass_string', 'AUTO_LAUNCHER_TEST_COMPLETE'),
launcher_test_data.get('fail_string', 'AUTO_LAUNCHER_TEST_FAIL'))
return ok
def monitor_process_output(self, test_name, command, pass_string, fail_string, timeout, log_file=None):
self.process = subprocess.Popen(command, stdout=subprocess.PIPE)
# On windows, function was failing (I think) because it checked the poll before the process
# had a chance to start, so added a short delay to give it some time to startup.
# It also failed if the log_file was open()'d when it didn't exist yet.
# Delay seems to have fixed the problem. 0.25 sec was too short.
time.sleep(0.5)
if log_file:
# The process we're starting sends it's output to the log file instead of stdout
# so we need to monitor that instead of the stdout.
fp = open(log_file)
# Detect log output messages or timeout exceeded
start = time.time()
last_time = start
screenshot_time_remaining = self.screenshots_interval
screenshot_index = 0
logger.info('Waiting for test to complete.')
message = ""
result = False
while True:
if log_file:
line = fp.readline()
else:
line = self.process.stdout.readline()
if line == '' and self.process.poll() is not None:
break
if line:
logger.info(line.rstrip())
if pass_string in line:
message = "Detected {}. Test completed.".format(pass_string)
result = True
break
if fail_string in line:
message = "Detected {}. Test failed.".format(fail_string)
break
if time.time() - start > timeout:
message = "Timeout of {} reached. Test failed.".format(timeout)
break
rc = self.process.poll()
cur_time = time.time()
screenshot_time_remaining = screenshot_time_remaining - (cur_time - last_time)
last_time = cur_time
if screenshot_time_remaining <= 0:
self.take_screenshot(os.path.join(self.screenshots_folder, "{}_screen{}".format(test_name.replace(' ', '_'), screenshot_index)))
screenshot_index = screenshot_index + 1
screenshot_time_remaining = self.screenshots_interval
logger.info(message)
if log_file:
fp.close()
return result

@ -1,108 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import argparse
import itertools
import logging
import os
from run_launcher_tests import PlatformDriver
import subprocess
import time
logger = logging.getLogger(__name__)
class AndroidDriver(PlatformDriver):
def read_package_name(self):
project_data = self.read_json_data(self.project_json_path)
return project_data['android_settings']['package_name']
def run_test(self, test_name, map, dynamic_slice, timeout, pass_string, fail_string):
package = self.read_package_name()
project = self.read_project_name()
package_and_activity = "{}/{}.{}Activity".format(package, package, project)
# clear any old logcat
command_line = ['adb', 'logcat', '-c']
p = subprocess.Popen(command_line)
p.communicate()
# increase ring buffer size because we are going to be printing a large amount of data in a short time.
command_line = ['adb', 'logcat', '-G', '10m']
p = subprocess.Popen(command_line)
p.communicate()
# Start the process and pass in the test args
logger.info("Start the activity {} ...".format(package_and_activity))
command_line = ['adb', 'shell', 'am', 'start', '-a', 'android.intent.action.MAIN', '-n', package_and_activity]
command_line += ['-e', 'ltest_map', map]
command_line += ['-e', 'ltest_slice', dynamic_slice]
p = subprocess.Popen(command_line)
p.communicate()
# Get the pid of the app to use in the logcat monitoring. If we don't
# do this we might get residual output from previous test run. Even
# though we do a clear.
for _ in itertools.repeat(None, 10):
command_line = ['adb', 'shell', 'pidof', package]
p = subprocess.Popen(command_line, stdout=subprocess.PIPE)
stdoutdata, stderrdata = p.communicate()
pid = stdoutdata.strip()
if pid:
logger.info("Get pid of {}".format(pid))
break
else:
logger.info('Failed to get pid, waiting 1 second to retry ...')
time.sleep(1)
if not pid:
raise Exception('Unable to determin the pid of the process.')
command_line = ['adb', '-d', 'logcat', "--pid={}".format(pid), 'LMBR:I', '*:S']
test_result = self.monitor_process_output(test_name, command_line, pass_string, fail_string, timeout)
logger.info("Kill the app ...")
p = subprocess.Popen(['adb', 'shell', 'am', 'force-stop', package])
p.communicate()
# Stop here if we failed.
if not test_result:
raise Exception("Test failed.")
def take_screenshot(self, output_path_no_ext):
# Create the output folder if it is not there
output_folder = os.path.dirname(output_path_no_ext)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Take the screenshot
p = subprocess.Popen(['adb', 'shell', 'screencap', '-p', '/sdcard/screen.png'])
p.communicate()
# copy it off of the device
p = subprocess.Popen(['adb', 'pull', '/sdcard/screen.png', "{}.png".format(output_path_no_ext)])
p.communicate()
def main():
parser = argparse.ArgumentParser(description='Sets up and runs Android Launcher Tests.')
parser.add_argument('--project-json-path', required=True, help='Path to the project.json project settings file.')
parser.add_argument('--project-launcher-tests-folder', required=True, help='Path to the LauncherTests folder in a Project.')
parser.add_argument('--test-names', nargs='+', help='A list of test names to run, default runs all tests.')
parser.add_argument('--screenshots-folder', default="./temp/Android/screenshots", help='Output folder for screenshots.')
parser.add_argument('--screenshots-interval', default=5, help='Time interval between taking screenshots in seconds.')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
logger.info("Running Launcher tests at {} ...".format(args.project_launcher_tests_folder))
driver = AndroidDriver(args.project_json_path, args.project_launcher_tests_folder, args.test_names, args.screenshots_folder, args.screenshots_interval)
driver.run_launcher_tests()
if __name__== "__main__":
main()

@ -1,91 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import argparse
import logging
import os
from run_launcher_tests import PlatformDriver
import subprocess
logger = logging.getLogger(__name__)
class IOSDriver(PlatformDriver):
def __init__(self, project_json_path, project_launcher_tests_folder, test_names, screenshots_folder, screenshots_interval, device_udid):
self.project_json_path = project_json_path
self.project_launcher_tests_folder = project_launcher_tests_folder
self.test_names = test_names
self.screenshots_folder = screenshots_folder
self.screenshots_interval = screenshots_interval
self.device_udid = device_udid
def run_test(self, test_name, map, dynamic_slice, timeout, pass_string, fail_string):
project = self.read_project_name()
bundle_id = "com.amazon.lumberyard.{}".format(project)
# Start the process and pass in the test args
command_line = ['idevicedebug', '-u', self.device_udid, 'run', bundle_id]
command_line += ['-ltest_map', map]
command_line += ['-ltest_slice', dynamic_slice]
test_result = self.monitor_process_output(test_name, command_line, pass_string, fail_string, timeout)
# TODO: Figure out some way to kill the running app. Because we dont know how to do this,
# we currently have a limitation on iOS we can only run one test at a time.
# Stop here if we failed.
if not test_result:
raise Exception("Test failed.")
def take_screenshot(self, output_path_no_ext):
# Create the output folder if it is not there
output_folder = os.path.dirname(output_path_no_ext)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# idevicescreenshot to take a screenshot and save to output path.
p = subprocess.Popen(['idevicescreenshot', "{}.tiff".format(output_path_no_ext)])
p.communicate()
def discover_device():
""" Discover the connected device and get the UDID."""
logger.info("Getting the connected device UDID ...")
p = subprocess.Popen(['idevice_id', '--list'], stdout=subprocess.PIPE)
out, err = p.communicate()
if not out:
raise Exception("No output.\nout:{}\nerr:{}\n".format(out, err))
lines = out.splitlines()
if not len(lines):
raise Exception("No devices connected.\nout:{}\nerr:{}\n".format(out, err))
if len(lines) != 1:
raise Exception("More than one device connected. Use --device-udid\nout:{}\nerr:{}\n".format(out, err))
return lines[0]
def main():
parser = argparse.ArgumentParser(description='Sets up and runs iOS Launcher Tests.')
parser.add_argument('--project-json-path', required=True, help='Path to the project.json project settings file.')
parser.add_argument('--project-launcher-tests-folder', required=True, help='Path to the LauncherTests folder in a Project.')
parser.add_argument('--device-udid', help='The UDID of the iOS device. Will auto detect if just one device is attached.')
parser.add_argument('--test-names', nargs='+', help='A list of test names to run, default runs all tests.')
parser.add_argument('--screenshots-folder', default="./temp/iOS/screenshots", help='Output folder for screenshots.')
parser.add_argument('--screenshots-interval', default=5, help='Time interval between taking screenshots in seconds.')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
device_udid = args.device_udid
if not device_udid:
device_udid = discover_device()
logger.info("Running Launcher tests at {} ...".format(args.project_launcher_tests_folder))
driver = IOSDriver(args.project_json_path, args.project_launcher_tests_folder, args.test_names, args.screenshots_folder, args.screenshots_interval, device_udid)
driver.run_launcher_tests()
if __name__== "__main__":
main()

@ -1,77 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import argparse
import json
import logging
import os
import sys
logger = logging.getLogger(__name__)
def report_error_and_exit(msg):
"""Log an error message and exit with error code 1."""
logger.error(msg)
sys.exit(1)
def check_autoexe_cfg(dev_root_folder, project):
"""
Make sure that the project's autoexec.cfg does not contain a Map command.
The Launcher Test framework is responsible for loading the map.
"""
# Open the autoexec.cfg file and read the contents
autoexec_cfg_path = os.path.join(dev_root_folder, project, 'autoexec.cfg')
try:
with open(autoexec_cfg_path) as f:
content = f.readlines()
except:
report_error_and_exit("Failed to read contents of {}".format(autoexec_cfg_path))
# Make sure no map command is detected, the Launcher Test code will be in charge of loading a map
for line in content:
if line.lower().startswith('map '):
report_error_and_exit("Map command '{}' detected in {}".format(line.strip(), autoexec_cfg_path))
def check_gems_enabled(dev_root_folder, project):
"""Check the project's gems to make sure the AutomatedLauncherTesting gem is enabled."""
# Read the gems.json file
gems_json_path = os.path.join(dev_root_folder, project, 'gems.json')
try:
with open(gems_json_path) as f:
json_data = json.load(f)
except:
report_error_and_exit("Failed to read contents of {}".format(gems_json_path))
# Make sure AutomatedLauncherTesting is enabled
found = False
for gem_data in json_data['Gems']:
if 'AutomatedLauncherTesting' in gem_data['Path']:
found = True
break
if not found:
report_error_and_exit("Automated Launcer Testing GEM not enabled in {}".format(gems_json_path))
def main():
parser = argparse.ArgumentParser(description='Run validation on the local environment to check for required Launcher Tests config.')
parser.add_argument('--dev-root-folder', required=True, help='Path to the root Lumberyard dev folder.')
parser.add_argument('--project', required=True, help='Lumberyard project.')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
logger.info("Running validation for project {} ...".format(args.project))
check_autoexe_cfg(args.dev_root_folder, args.project)
check_gems_enabled(args.dev_root_folder, args.project)
logger.info('Validation complete.')
if __name__== '__main__':
main()

@ -1,66 +0,0 @@
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import argparse
import logging
import os
from run_launcher_tests import PlatformDriver
import test_tools.platforms.win.win as win
from test_tools.shared.platform_map import PLATFORM_MAP
logger = logging.getLogger(__name__)
class WinDriver(PlatformDriver):
def __init__(self, project_json_path, project_launcher_tests_folder):
self.project_json_path = project_json_path
self.project_launcher_tests_folder = project_launcher_tests_folder
self.platform_map = PLATFORM_MAP[win.platform_name() + "_" + win.default_compiler_option()]
def run_test(self, map, dynamic_slice, timeout, pass_string, fail_string):
project = self.read_project_name()
# Start the process and pass in the test args
dev_dir = os.path.dirname(os.path.realpath(__file__))
dev_dir = os.path.join(dev_dir, os.path.pardir)
dev_dir = os.path.join(dev_dir, os.path.pardir)
dev_dir = os.path.realpath(dev_dir)
launcher_dir = os.path.join(dev_dir, self.platform_map["bin_dir"])
command_line = [os.path.join(launcher_dir, project + 'Launcher.exe')]
command_line += ['-ltest_map', map]
command_line += ['-ltest_slice', dynamic_slice]
log_file = os.path.join(dev_dir, "cache", project, "pc", "user", "log", "Game.log")
test_result = self.monitor_process_output(command_line, pass_string, fail_string, timeout, log_file)
if self.process:
self.process.kill()
# Stop here if we failed.
if not test_result:
raise Exception("Test failed.")
return test_result
def main():
parser = argparse.ArgumentParser(description='Sets up and runs Windows Launcher Tests.')
parser.add_argument('--project-json-path', required=True, help='Path to the project.json project settings file.')
parser.add_argument('--project-launcher-tests-folder', required=True, help='Path to the LauncherTests folder in a Project.')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
logger.info("Running Launcher tests at {} ...".format(args.project_launcher_tests_folder))
driver = WinDriver(args.project_json_path, args.project_launcher_tests_folder)
driver.run_launcher_tests()
if __name__== "__main__":
main()

@ -1,11 +0,0 @@
@echo off
REM
REM Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
REM
REM SPDX-License-Identifier: Apache-2.0 OR MIT
REM
REM
REM
REM Provided as an example of how to run the Automated Launcher Test on a developer's local machine.
../../python/python.cmd run_launcher_tests_win.py --project-json-path "../../SamplesProject/project.json" --project-launcher-tests-folder "../../SamplesProject/LauncherTests"

@ -15,7 +15,6 @@
"Gems/AtomTressFX",
"Gems/AudioEngineWwise",
"Gems/AudioSystem",
"Gems/AutomatedLauncherTesting",
"Gems/AWSClientAuth",
"Gems/AWSCore",
"Gems/AWSGameLift",

Loading…
Cancel
Save