Merge pull request #1849 from aws-lumberyard-dev/jonawals_SPEC-7604

Jonawals spec 7604
monroegm-disable-blank-issue-2
jonawals 5 years ago committed by GitHub
commit 81f9afdf13
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -43,7 +43,7 @@ namespace TestImpact
namespace AZStd
{
//! Hash function for ParentTarget types for use in maps and sets
//! Hash function for ParentTarget types for use in maps and sets.
template<> struct hash<TestImpact::ParentTarget>
{
size_t operator()(const TestImpact::ParentTarget& parentTarget) const noexcept

@ -40,7 +40,7 @@ namespace TestImpact
//! Returns true if the specified target is in the list, otherwise false.
bool HasTarget(const AZStd::string& name) const;
// Returns the number of targets in the list.
//! Returns the number of targets in the list.
size_t GetNumTargets() const;
private:

@ -28,17 +28,6 @@ namespace TestImpact
return m_coverageArtifact;
}
InstrumentedTestRunner::JobPayload ParseTestRunAndCoverageFiles(
const RepoPath& runFile,
const RepoPath& coverageFile,
AZStd::chrono::milliseconds duration)
{
TestRun run(GTest::TestRunSuitesFactory(ReadFileContents<TestEngineException>(runFile)), duration);
AZStd::vector<ModuleCoverage> moduleCoverages = Cobertura::ModuleCoveragesFactory(ReadFileContents<TestEngineException>(coverageFile));
TestCoverage coverage(AZStd::move(moduleCoverages));
return {AZStd::move(run), AZStd::move(coverage)};
}
InstrumentedTestRunner::InstrumentedTestRunner(size_t maxConcurrentRuns)
: JobRunner(maxConcurrentRuns)
{
@ -58,16 +47,35 @@ namespace TestImpact
const auto& [meta, jobInfo] = jobData;
if (meta.m_result == JobResult::ExecutedWithSuccess || meta.m_result == JobResult::ExecutedWithFailure)
{
const auto printException = [](const Exception& e)
{
AZ_Printf("RunInstrumentedTests", AZStd::string::format("%s\n.", e.what()).c_str());
};
AZStd::optional<TestRun> run;
try
{
runs[jobId] = ParseTestRunAndCoverageFiles(
jobInfo->GetRunArtifactPath(),
jobInfo->GetCoverageArtifactPath(),
run = TestRun(
GTest::TestRunSuitesFactory(ReadFileContents<TestEngineException>(jobInfo->GetRunArtifactPath())),
meta.m_duration.value());
}
catch (const Exception& e)
{
AZ_Printf("RunInstrumentedTests", AZStd::string::format("%s\n", e.what()).c_str());
// No run result is not necessarily a failure (e.g. test targets not using gtest)
printException(e);
}
try
{
AZStd::vector<ModuleCoverage> moduleCoverages =
Cobertura::ModuleCoveragesFactory(ReadFileContents<TestEngineException>(jobInfo->GetCoverageArtifactPath()));
TestCoverage coverage(AZStd::move(moduleCoverages));
runs[jobId] = { run, AZStd::move(coverage) };
}
catch (const Exception& e)
{
printException(e);
// No coverage, however, is a failure
runs[jobId] = AZStd::nullopt;
}
}

@ -30,9 +30,9 @@ namespace TestImpact
//! Runs a batch of test targets to determine the test coverage and passes/failures.
class InstrumentedTestRunner
: public TestJobRunner<InstrumentedTestRunJobData, AZStd::pair<TestRun, TestCoverage>>
: public TestJobRunner<InstrumentedTestRunJobData, AZStd::pair<AZStd::optional<TestRun>, TestCoverage>>
{
using JobRunner = TestJobRunner<InstrumentedTestRunJobData, AZStd::pair<TestRun, TestCoverage>>;
using JobRunner = TestJobRunner<InstrumentedTestRunJobData, AZStd::pair<AZStd::optional<TestRun>, TestCoverage>>;
public:
//! Constructs an instrumented test runner with the specified parameters common to all job runs of this runner.

@ -13,9 +13,9 @@ namespace TestImpact
{
namespace
{
AZStd::optional<TestRun> ReleaseTestRun(AZStd::optional<AZStd::pair<TestRun, TestCoverage>>& testRunAndCoverage)
AZStd::optional<TestRun> ReleaseTestRun(AZStd::optional<AZStd::pair<AZStd::optional<TestRun>, TestCoverage>>& testRunAndCoverage)
{
if (testRunAndCoverage.has_value())
if (testRunAndCoverage.has_value() && testRunAndCoverage->first.has_value())
{
return AZStd::move(testRunAndCoverage.value().first);
}
@ -23,7 +23,8 @@ namespace TestImpact
return AZStd::nullopt;
}
AZStd::optional<TestCoverage> ReleaseTestCoverage(AZStd::optional<AZStd::pair<TestRun, TestCoverage>>& testRunAndCoverage)
AZStd::optional<TestCoverage> ReleaseTestCoverage(
AZStd::optional<AZStd::pair<AZStd::optional<TestRun>, TestCoverage>>& testRunAndCoverage)
{
if (testRunAndCoverage.has_value())
{
@ -34,7 +35,8 @@ namespace TestImpact
}
}
TestEngineInstrumentedRun::TestEngineInstrumentedRun(TestEngineJob&& testJob, AZStd::optional<AZStd::pair<TestRun, TestCoverage>>&& testRunAndCoverage)
TestEngineInstrumentedRun::TestEngineInstrumentedRun(
TestEngineJob&& testJob, AZStd::optional<AZStd::pair<AZStd::optional<TestRun>, TestCoverage>>&& testRunAndCoverage)
: TestEngineRegularRun(AZStd::move(testJob), ReleaseTestRun(testRunAndCoverage))
, m_testCoverage(ReleaseTestCoverage(testRunAndCoverage))
{

@ -17,7 +17,7 @@ namespace TestImpact
: public TestEngineRegularRun
{
public:
TestEngineInstrumentedRun(TestEngineJob&& testJob, AZStd::optional<AZStd::pair<TestRun, TestCoverage>>&& testRunAndCoverage);
TestEngineInstrumentedRun(TestEngineJob&& testJob, AZStd::optional<AZStd::pair<AZStd::optional<TestRun>, TestCoverage>>&& testRunAndCoverage);
//! Returns the test coverage payload for this job (if any).
const AZStd::optional<TestCoverage>& GetTestCoverge() const;

@ -22,6 +22,8 @@ namespace TestImpact
{
namespace
{
static const char* const LogCallSite = "TestImpact";
//! Simple helper class for tracking basic timing information.
class Timer
{
@ -149,7 +151,8 @@ namespace TestImpact
}
catch ([[maybe_unused]]const Exception& e)
{
AZ_Printf("TestImpactRuntime",
AZ_Printf(
LogCallSite,
AZStd::string::format(
"No test impact analysis data found for suite '%s' at %s\n", GetSuiteTypeName(m_suiteFilter).c_str(), m_sparTIAFile.c_str()).c_str());
}
@ -283,8 +286,8 @@ namespace TestImpact
job.GetTestCoverge().has_value(),
RuntimeException,
AZStd::string::format(
"Test target '%s' completed its test run successfully but produced no coverage data",
job.GetTestTarget()->GetName().c_str()));
"Test target '%s' completed its test run successfully but produced no coverage data. Command string: '%s'",
job.GetTestTarget()->GetName().c_str(), job.GetCommandString().c_str()));
}
if (!job.GetTestCoverge().has_value())
@ -313,7 +316,7 @@ namespace TestImpact
}
else
{
AZ_Warning("TestImpact", false, "Ignoring source, source it outside of repo: '%s'", sourcePath.c_str());
AZ_Warning(LogCallSite, false, "Ignoring source, source it outside of repo: '%s'", sourcePath.c_str());
}
}
@ -322,17 +325,31 @@ namespace TestImpact
void Runtime::UpdateAndSerializeDynamicDependencyMap(const AZStd::vector<TestEngineInstrumentedRun>& jobs)
{
const auto sourceCoverageTestsList = CreateSourceCoveringTestFromTestCoverages(jobs);
if (!sourceCoverageTestsList.GetNumSources())
try
{
return;
}
const auto sourceCoverageTestsList = CreateSourceCoveringTestFromTestCoverages(jobs);
if (sourceCoverageTestsList.GetNumSources() == 0)
{
return;
}
m_dynamicDependencyMap->ReplaceSourceCoverage(sourceCoverageTestsList);
const auto sparTIA = m_dynamicDependencyMap->ExportSourceCoverage();
const auto sparTIAData = SerializeSourceCoveringTestsList(sparTIA);
WriteFileContents<RuntimeException>(sparTIAData, m_sparTIAFile);
m_hasImpactAnalysisData = true;
m_dynamicDependencyMap->ReplaceSourceCoverage(sourceCoverageTestsList);
const auto sparTIA = m_dynamicDependencyMap->ExportSourceCoverage();
const auto sparTIAData = SerializeSourceCoveringTestsList(sparTIA);
WriteFileContents<RuntimeException>(sparTIAData, m_sparTIAFile);
m_hasImpactAnalysisData = true;
}
catch(const RuntimeException& e)
{
if (m_integrationFailurePolicy == Policy::IntegrityFailure::Abort)
{
throw e;
}
else
{
AZ_Error(LogCallSite, false, e.what());
}
}
}
TestSequenceResult Runtime::RegularTestSequence(

@ -4,10 +4,6 @@
"timestamp": "${timestamp}"
},
"jenkins": {
"pipeline_of_truth" : [
"nightly-incremental",
"nightly-clean"
],
"use_test_impact_analysis": ${use_tiaf}
},
"repo": {

@ -13,8 +13,17 @@ EMPTY_JSON = readJSON text: '{}'
ENGINE_REPOSITORY_NAME = 'o3de'
BUILD_SNAPSHOTS = ['development', 'stabilization/2106', '']
DEFAULT_BUILD_SNAPSHOT = BUILD_SNAPSHOTS.get(0)
// Branches with build snapshots
BUILD_SNAPSHOTS = ['development', 'stabilization/2106']
// Build snapshots with empty snapshot (for use with 'SNAPSHOT' pipeline paramater)
BUILD_SNAPSHOTS_WITH_EMPTY = BUILD_SNAPSHOTS + ''
// The default build snapshot to be selected in the 'SNAPSHOT' pipeline paramater
DEFAULT_BUILD_SNAPSHOT = BUILD_SNAPSHOTS_WITH_EMPTY.get(0)
// Branches with build snapshots as comma separated value string
env.BUILD_SNAPSHOTS = BUILD_SNAPSHOTS.join(",")
def pipelineProperties = []
@ -476,7 +485,7 @@ try {
}
} else {
// Non-PR builds
pipelineParameters.add(choice(defaultValue: DEFAULT_BUILD_SNAPSHOT, name: 'SNAPSHOT', choices: BUILD_SNAPSHOTS, description: 'Selects the build snapshot to use. A more diverted snapshot will cause longer build times, but will not cause build failures.'))
pipelineParameters.add(choice(defaultValue: DEFAULT_BUILD_SNAPSHOT, name: 'SNAPSHOT', choices: BUILD_SNAPSHOTS_WITH_EMPTY, description: 'Selects the build snapshot to use. A more diverted snapshot will cause longer build times, but will not cause build failures.'))
snapshot = env.SNAPSHOT
echo "Snapshot \"${snapshot}\" selected."
}

@ -27,9 +27,7 @@
},
"profile_vs2019_pipe": {
"TAGS": [
"default",
"nightly-incremental",
"nightly-clean"
"default"
],
"steps": [
"profile_vs2019",
@ -90,7 +88,8 @@
"OUTPUT_DIRECTORY": "build/windows_vs2019",
"CONFIGURATION": "profile",
"SCRIPT_PATH": "scripts/build/TestImpactAnalysis/tiaf_driver.py",
"SCRIPT_PARAMETERS": "--testFailurePolicy=continue --suite main --pipeline !PIPELINE_NAME! --destCommit !CHANGE_ID! --config \"!OUTPUT_DIRECTORY!/bin/TestImpactFramework/persistent/tiaf.profile.json\""
"SCRIPT_PARAMETERS":
"--config=\"!OUTPUT_DIRECTORY!/bin/TestImpactFramework/persistent/tiaf.profile.json\" --suite=main --testFailurePolicy=continue --destBranch=!CHANGE_TARGET! --pipeline=!PIPELINE_NAME! --destCommit=!CHANGE_ID! --branchesOfTruth=!BUILD_SNAPSHOTS! --pipelinesOfTruth=default"
}
},
"debug_vs2019": {

@ -20,33 +20,51 @@ def is_child_path(parent_path, child_path):
return os.path.commonpath([os.path.abspath(parent_path)]) == os.path.commonpath([os.path.abspath(parent_path), os.path.abspath(child_path)])
class TestImpact:
def __init__(self, config_file, pipeline, dst_commit):
self.__pipeline = pipeline
def __init__(self, config_file, dst_commit, dst_branch, pipeline, branches_of_truth, pipelines_of_truth):
# Commit
self.__dst_commit = dst_commit
print(f"Commit: '{self.__dst_commit}'.")
self.__src_commit = None
self.__has_src_commit = False
# Branch
self.__dst_branch = dst_branch
print(f"Destination branch: '{self.__dst_branch}'.")
self.__branches_of_truth = branches_of_truth
print(f"Branches of truth: '{self.__branches_of_truth}'.")
if self.__dst_branch in self.__branches_of_truth:
self.__is_branch_of_truth = True
else:
self.__is_branch_of_truth = False
print(f"Is branch of truth: '{self.__is_branch_of_truth}'.")
# Pipeline
self.__pipeline = pipeline
print(f"Pipeline: '{self.__pipeline}'.")
self.__pipelines_of_truth = pipelines_of_truth
print(f"Pipelines of truth: '{self.__pipelines_of_truth}'.")
if self.__pipeline in self.__pipelines_of_truth:
self.__is_pipeline_of_truth = True
else:
self.__is_pipeline_of_truth = False
print(f"Is pipeline of truth: '{self.__is_pipeline_of_truth}'.")
# Config
self.__parse_config_file(config_file)
if self.__use_test_impact_analysis and not self.__is_pipeline_of_truth:
self.__generate_change_list()
# Sequence
if self.__use_test_impact_analysis:
if self.__is_pipeline_of_truth and self.__is_branch_of_truth:
self.__is_seeding = True
else:
self.__is_seeding = False
self.__generate_change_list()
# Parse the configuration file and retrieve the data needed for launching the test impact analysis runtime
def __parse_config_file(self, config_file):
print(f"Attempting to parse configuration file '{config_file}'...")
with open(config_file, "r") as config_data:
config = json.load(config_data)
# Repository
self.__repo_dir = config["repo"]["root"]
# Jenkins
self.__repo = Repo(self.__repo_dir)
# TIAF
self.__use_test_impact_analysis = config["jenkins"]["use_test_impact_analysis"]
self.__pipeline_of_truth = config["jenkins"]["pipeline_of_truth"]
print(f"Pipeline of truth: '{self.__pipeline_of_truth}'.")
print(f"This pipeline: '{self.__pipeline}'.")
if self.__pipeline in self.__pipeline_of_truth:
self.__is_pipeline_of_truth = True
else:
self.__is_pipeline_of_truth = False
print(f"Is pipeline of truth: '{self.__is_pipeline_of_truth}'.")
# TIAF binary
self.__tiaf_bin = config["repo"]["tiaf_bin"]
if self.__use_test_impact_analysis and not os.path.isfile(self.__tiaf_bin):
raise FileNotFoundError("Could not find tiaf binary")
@ -143,7 +161,7 @@ class TestImpact:
# Runs the specified test sequence
def run(self, suite, test_failure_policy, safe_mode, test_timeout, global_timeout):
args = []
pipeline_of_truth_test_failure_policy = "continue"
seed_sequence_test_failure_policy = "continue"
# Suite
args.append(f"--suite={suite}")
print(f"Test suite is set to '{suite}'.")
@ -156,15 +174,15 @@ class TestImpact:
print(f"Global sequence timeout is set to {test_timeout} seconds.")
if self.__use_test_impact_analysis:
print("Test impact analysis is enabled.")
# Pipeline of truth sequence
if self.__is_pipeline_of_truth:
# Seed sequences
if self.__is_seeding:
# Sequence type
args.append("--sequence=seed")
print("Sequence type is set to 'seed'.")
# Test failure policy
args.append(f"--fpolicy={pipeline_of_truth_test_failure_policy}")
print(f"Test failure policy is set to '{pipeline_of_truth_test_failure_policy}'.")
# Non pipeline of truth sequence
args.append(f"--fpolicy={seed_sequence_test_failure_policy}")
print(f"Test failure policy is set to '{seed_sequence_test_failure_policy}'.")
# Impact analysis sequences
else:
if self.__has_change_list:
# Change list
@ -194,8 +212,8 @@ class TestImpact:
# Pipeline of truth sequence
if self.__is_pipeline_of_truth:
# Test failure policy
args.append(f"--fpolicy={pipeline_of_truth_test_failure_policy}")
print(f"Test failure policy is set to '{pipeline_of_truth_test_failure_policy}'.")
args.append(f"--fpolicy={seed_sequence_test_failure_policy}")
print(f"Test failure policy is set to '{seed_sequence_test_failure_policy}'.")
# Non pipeline of truth sequence
else:
# Test failure policy
@ -205,7 +223,7 @@ class TestImpact:
print("Args: ", end='')
print(*args)
result = subprocess.run([self.__tiaf_bin] + args)
# If the sequence completed 9with or without failures) we will update the historical meta-data
# If the sequence completed (with or without failures) we will update the historical meta-data
if result.returncode == 0 or result.returncode == 7:
print("Test impact analysis runtime returned successfully.")
if self.__is_pipeline_of_truth:

@ -35,14 +35,16 @@ def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--config', dest="config", type=file_path, help="Path to the test impact analysis framework configuration file", required=True)
parser.add_argument('--destBranch', dest="dst_branch", help="For PR builds, the destination branch to be merged to, otherwise empty")
parser.add_argument('--branchesOfTruth', dest="branches_of_truth", type=lambda arg: arg.split(','), help="Comma separated branches that seeding will occur on", required=True)
parser.add_argument('--pipeline', dest="pipeline", help="Pipeline the test impact analysis framework is running on", required=True)
parser.add_argument('--pipelinesOfTruth', dest="pipelines_of_truth", type=lambda arg: arg.split(','), help="Comma separated pipeline that seeding will occur on", required=True)
parser.add_argument('--destCommit', dest="dst_commit", help="Commit to run test impact analysis on (ignored when seeding)", required=True)
parser.add_argument('--suite', dest="suite", help="Test suite to run", required=True)
parser.add_argument('--testFailurePolicy', dest="test_failure_policy", type=test_failure_policy, help="Test failure policy for regular and test impact sequences (ignored when seeding)", required=True)
parser.add_argument('--safeMode', dest="safe_mode", action='store_true', help="Run impact analysis tests in safe mode (ignored when seeding)")
parser.add_argument('--testTimeout', dest="test_timeout", type=timout_type, help="Maximum run time (in seconds) of any test target before being terminated", required=False)
parser.add_argument('--globalTimeout', dest="global_timeout", type=timout_type, help="Maximum run time of the sequence before being terminated", required=False)
parser.set_defaults(test_failure_policy="abort")
parser.set_defaults(test_timeout=None)
parser.set_defaults(global_timeout=None)
args = parser.parse_args()
@ -52,7 +54,7 @@ def parse_args():
if __name__ == "__main__":
try:
args = parse_args()
tiaf = TestImpact(args.config, args.pipeline, args.dst_commit)
tiaf = TestImpact(args.config, args.dst_commit, args.dst_branch, args.pipeline, args.branches_of_truth, args.pipelines_of_truth)
return_code = tiaf.run(args.suite, args.test_failure_policy, args.safe_mode, args.test_timeout, args.global_timeout)
# Non-gating will be removed from this script and handled at the job level in SPEC-7413
#sys.exit(return_code)

Loading…
Cancel
Save