Implement TIAF sequence reporting for MARS

Merge pull request #3010 from aws-lumberyard-dev/TIF/Runtime_merge
monroegm-disable-blank-issue-2
hultonha 4 years ago committed by GitHub
commit 01ed06c7e1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -6,6 +6,8 @@
*
*/
#include <TestImpactFramework/TestImpactUtils.h>
#include <TestImpactCommandLineOptions.h>
#include <TestImpactCommandLineOptionsUtils.h>
@ -19,8 +21,9 @@ namespace TestImpact
{
// Options
ConfigKey,
DataFileKey,
ChangeListKey,
OutputChangeListKey,
SequenceReportKey,
SequenceKey,
TestPrioritizationPolicyKey,
ExecutionFailurePolicyKey,
@ -55,8 +58,9 @@ namespace TestImpact
{
// Options
"config",
"datafile",
"changelist",
"ochangelist",
"report",
"sequence",
"ppolicy",
"epolicy",
@ -92,14 +96,19 @@ namespace TestImpact
return ParsePathOption(OptionKeys[ConfigKey], cmd).value_or(LY_TEST_IMPACT_DEFAULT_CONFIG_FILE);
}
AZStd::optional<RepoPath> ParseDataFile(const AZ::CommandLine& cmd)
{
return ParsePathOption(OptionKeys[DataFileKey], cmd);
}
AZStd::optional<RepoPath> ParseChangeListFile(const AZ::CommandLine& cmd)
{
return ParsePathOption(OptionKeys[ChangeListKey], cmd);
}
bool ParseOutputChangeList(const AZ::CommandLine& cmd)
AZStd::optional<RepoPath> ParseSequenceReportFile(const AZ::CommandLine& cmd)
{
return ParseOnOffOption(OptionKeys[OutputChangeListKey], BinaryStateValue<bool>{ false, true }, cmd).value_or(false);
return ParsePathOption(OptionKeys[SequenceReportKey], cmd);
}
TestSequenceType ParseTestSequenceType(const AZ::CommandLine& cmd)
@ -255,9 +264,9 @@ namespace TestImpact
{
const AZStd::vector<AZStd::pair<AZStd::string, SuiteType>> states =
{
{GetSuiteTypeName(SuiteType::Main), SuiteType::Main},
{GetSuiteTypeName(SuiteType::Periodic), SuiteType::Periodic},
{GetSuiteTypeName(SuiteType::Sandbox), SuiteType::Sandbox}
{ SuiteTypeAsString(SuiteType::Main), SuiteType::Main },
{ SuiteTypeAsString(SuiteType::Periodic), SuiteType::Periodic },
{ SuiteTypeAsString(SuiteType::Sandbox), SuiteType::Sandbox }
};
return ParseMultiStateOption(OptionKeys[SuiteFilterKey], states, cmd).value_or(SuiteType::Main);
@ -270,8 +279,9 @@ namespace TestImpact
cmd.Parse(argc, argv);
m_configurationFile = ParseConfigurationFile(cmd);
m_dataFile = ParseDataFile(cmd);
m_changeListFile = ParseChangeListFile(cmd);
m_outputChangeList = ParseOutputChangeList(cmd);
m_sequenceReportFile = ParseSequenceReportFile(cmd);
m_testSequenceType = ParseTestSequenceType(cmd);
m_testPrioritizationPolicy = ParseTestPrioritizationPolicy(cmd);
m_executionFailurePolicy = ParseExecutionFailurePolicy(cmd);
@ -286,28 +296,43 @@ namespace TestImpact
m_safeMode = ParseSafeMode(cmd);
m_suiteFilter = ParseSuiteFilter(cmd);
}
bool CommandLineOptions::HasDataFilePath() const
{
return m_dataFile.has_value();
}
bool CommandLineOptions::HasChangeListFile() const
bool CommandLineOptions::HasChangeListFilePath() const
{
return m_changeListFile.has_value();
}
bool CommandLineOptions::HasSequenceReportFilePath() const
{
return m_sequenceReportFile.has_value();
}
bool CommandLineOptions::HasSafeMode() const
{
return m_safeMode;
}
const AZStd::optional<RepoPath>& CommandLineOptions::GetChangeListFile() const
const AZStd::optional<RepoPath>& CommandLineOptions::GetDataFilePath() const
{
return m_dataFile;
}
const AZStd::optional<RepoPath>& CommandLineOptions::GetChangeListFilePath() const
{
return m_changeListFile;
}
bool CommandLineOptions::HasOutputChangeList() const
const AZStd::optional<RepoPath>& CommandLineOptions::GetSequenceReportFilePath() const
{
return m_outputChangeList;
return m_sequenceReportFile;
}
const RepoPath& CommandLineOptions::GetConfigurationFile() const
const RepoPath& CommandLineOptions::GetConfigurationFilePath() const
{
return m_configurationFile;
}
@ -379,8 +404,12 @@ namespace TestImpact
" options:\n"
" -config=<filename> Path to the configuration file for the TIAF runtime (default: \n"
" <tiaf binay build dir>.<tiaf binary build type>.json).\n"
" -datafile=<filename> Optional path to a test impact data file that will used instead of that\n"
" specified in the config file.\n"
" -changelist=<filename> Path to the JSON of source file changes to perform test impact \n"
" analysis on.\n"
" -report=<filename> Path to where the sequence report file will be written (if this option \n"
" is not specified, no report will be written).\n"
" -gtimeout=<seconds> Global timeout value to terminate the entire test sequence should it \n"
" be exceeded.\n"
" -ttimeout=<seconds> Timeout value to terminate individual test targets should it be \n"
@ -443,7 +472,6 @@ namespace TestImpact
" available, no prioritization will occur).\n"
" -maxconcurrency=<number> The maximum number of concurrent test targets/shards to be in flight at \n"
" any given moment.\n"
" -ochangelist=<on,off> Outputs the change list used for test selection.\n"
" -suite=<main, periodic, sandbox> The test suite to select from for this test sequence.";
return help;

@ -36,20 +36,29 @@ namespace TestImpact
CommandLineOptions(int argc, char** argv);
static AZStd::string GetCommandLineUsageString();
//! Returns true if a test impact data file path has been supplied, otherwise false.
bool HasDataFilePath() const;
//! Returns true if a change list file path has been supplied, otherwise false.
bool HasChangeListFile() const;
bool HasChangeListFilePath() const;
//! Returns true if a sequence report file path has been supplied, otherwise false.
bool HasSequenceReportFilePath() const;
//! Returns true if the safe mode option has been enabled, otherwise false.
bool HasSafeMode() const;
//! Returns true if the output change list option has been enabled, otherwise false.
bool HasOutputChangeList() const;
//! Returns the path to the runtime configuration file.
const RepoPath& GetConfigurationFile() const;
const RepoPath& GetConfigurationFilePath() const;
//! Returns the path to the data file (if any).
const AZStd::optional<RepoPath>& GetDataFilePath() const;
//! Returns the path to the change list file (if any).
const AZStd::optional<RepoPath>& GetChangeListFile() const;
const AZStd::optional<RepoPath>& GetChangeListFilePath() const;
//! Returns the path to the sequence report file (if any).
const AZStd::optional<RepoPath>& GetSequenceReportFilePath() const;
//! Returns the test sequence type to run.
TestSequenceType GetTestSequenceType() const;
@ -89,8 +98,9 @@ namespace TestImpact
private:
RepoPath m_configurationFile;
AZStd::optional<RepoPath> m_dataFile;
AZStd::optional<RepoPath> m_changeListFile;
bool m_outputChangeList = false;
AZStd::optional<RepoPath> m_sequenceReportFile;
TestSequenceType m_testSequenceType;
Policy::TestPrioritization m_testPrioritizationPolicy = Policy::TestPrioritization::None;
Policy::ExecutionFailure m_executionFailurePolicy = Policy::ExecutionFailure::Continue;

@ -9,14 +9,16 @@
#include <TestImpactFramework/TestImpactException.h>
#include <TestImpactFramework/TestImpactChangeListException.h>
#include <TestImpactFramework/TestImpactConfigurationException.h>
#include <TestImpactFramework/TestImpactSequenceReportException.h>
#include <TestImpactFramework/TestImpactRuntimeException.h>
#include <TestImpactFramework/TestImpactConsoleMain.h>
#include <TestImpactFramework/TestImpactChangeListSerializer.h>
#include <TestImpactFramework/TestImpactChangeList.h>
#include <TestImpactFramework/TestImpactRuntime.h>
#include <TestImpactFramework/TestImpactFileUtils.h>
#include <TestImpactFramework/TestImpactUtils.h>
#include <TestImpactFramework/TestImpactClientTestSelection.h>
#include <TestImpactFramework/TestImpactRuntime.h>
#include <TestImpactFramework/TestImpactClientSequenceReportSerializer.h>
#include <TestImpactConsoleTestSequenceEventHandler.h>
#include <TestImpactCommandLineOptions.h>
@ -33,31 +35,6 @@ namespace TestImpact
{
namespace Console
{
//! Generates a string to be used for printing to the console for the specified change list.
AZStd::string GenerateChangeListString(const ChangeList& changeList)
{
AZStd::string output;
const auto& outputFiles = [&output](const AZStd::vector<RepoPath>& files)
{
for (const auto& file : files)
{
output += AZStd::string::format("\t%s\n", file.c_str());
}
};
output += AZStd::string::format("Created files (%u):\n", changeList.m_createdFiles.size());
outputFiles(changeList.m_createdFiles);
output += AZStd::string::format("Updated files (%u):\n", changeList.m_updatedFiles.size());
outputFiles(changeList.m_updatedFiles);
output += AZStd::string::format("Deleted files (%u):\n", changeList.m_deletedFiles.size());
outputFiles(changeList.m_deletedFiles);
return output;
}
//! Gets the appropriate console return code for the specified test sequence result.
ReturnCode GetReturnCodeForTestSequenceResult(TestSequenceResult result)
{
@ -75,6 +52,20 @@ namespace TestImpact
}
}
//! Wrapper around sequence reports to optionally serialize them and transform the result into a return code.
template<typename SequenceReportType>
ReturnCode ConsumeSequenceReportAndGetReturnCode(const SequenceReportType& sequenceReport, const CommandLineOptions& options)
{
if (options.HasSequenceReportFilePath())
{
std::cout << "Exporting sequence report '" << options.GetSequenceReportFilePath().value().c_str() << "'" << std::endl;
const auto sequenceReportJson = SerializeSequenceReport(sequenceReport);
WriteFileContents<SequenceReportException>(sequenceReportJson, options.GetSequenceReportFilePath().value());
}
return GetReturnCodeForTestSequenceResult(sequenceReport.GetResult());
}
//! Wrapper around impact analysis sequences to handle the case where the safe mode option is active.
ReturnCode WrappedImpactAnalysisTestSequence(
const CommandLineOptions& options,
@ -88,35 +79,34 @@ namespace TestImpact
CommandLineOptionsException,
"Expected a change list for impact analysis but none was provided");
TestSequenceResult result = TestSequenceResult::Failure;
if (options.HasSafeMode())
{
if (options.GetTestSequenceType() == TestSequenceType::ImpactAnalysis)
{
auto safeImpactAnalysisSequenceReport = runtime.SafeImpactAnalysisTestSequence(
changeList.value(),
options.GetTestPrioritizationPolicy(),
options.GetTestTargetTimeout(),
options.GetGlobalTimeout(),
SafeImpactAnalysisTestSequenceStartCallback,
SafeImpactAnalysisTestSequenceCompleteCallback,
TestRunCompleteCallback);
result = safeImpactAnalysisSequenceReport.GetResult();
return ConsumeSequenceReportAndGetReturnCode(
runtime.SafeImpactAnalysisTestSequence(
changeList.value(),
options.GetTestPrioritizationPolicy(),
options.GetTestTargetTimeout(),
options.GetGlobalTimeout(),
SafeImpactAnalysisTestSequenceStartCallback,
SafeImpactAnalysisTestSequenceCompleteCallback,
TestRunCompleteCallback),
options);
}
else if (options.GetTestSequenceType() == TestSequenceType::ImpactAnalysisNoWrite)
{
// A no-write impact analysis sequence with safe mode enabled is functionally identical to a regular sequence type
// due to a) the selected tests being run without instrumentation and b) the discarded tests also being run without
// instrumentation
auto sequenceReport = runtime.RegularTestSequence(
options.GetTestTargetTimeout(),
options.GetGlobalTimeout(),
TestSequenceStartCallback,
TestSequenceCompleteCallback,
TestRunCompleteCallback);
result = sequenceReport.GetResult();
return ConsumeSequenceReportAndGetReturnCode(
runtime.RegularTestSequence(
options.GetTestTargetTimeout(),
options.GetGlobalTimeout(),
TestSequenceStartCallback,
RegularTestSequenceCompleteCallback,
TestRunCompleteCallback),
options);
}
else
{
@ -139,20 +129,18 @@ namespace TestImpact
throw(Exception("Unexpected sequence type"));
}
auto impactAnalysisSequenceReport = runtime.ImpactAnalysisTestSequence(
changeList.value(),
options.GetTestPrioritizationPolicy(),
dynamicDependencyMapPolicy,
options.GetTestTargetTimeout(),
options.GetGlobalTimeout(),
ImpactAnalysisTestSequenceStartCallback,
ImpactAnalysisTestSequenceCompleteCallback,
TestRunCompleteCallback);
result = impactAnalysisSequenceReport.GetResult();
return ConsumeSequenceReportAndGetReturnCode(
runtime.ImpactAnalysisTestSequence(
changeList.value(),
options.GetTestPrioritizationPolicy(),
dynamicDependencyMapPolicy,
options.GetTestTargetTimeout(),
options.GetGlobalTimeout(),
ImpactAnalysisTestSequenceStartCallback,
ImpactAnalysisTestSequenceCompleteCallback,
TestRunCompleteCallback),
options);
}
return GetReturnCodeForTestSequenceResult(result);
};
//! Entry point for the test impact analysis framework console front end application.
@ -164,28 +152,22 @@ namespace TestImpact
AZStd::optional<ChangeList> changeList;
// If we have a change list, check to see whether or not the client has requested the printing of said change list
if (options.HasChangeListFile())
if (options.HasChangeListFilePath())
{
changeList = DeserializeChangeList(ReadFileContents<CommandLineOptionsException>(*options.GetChangeListFile()));
if (options.HasOutputChangeList())
{
std::cout << "Change List:\n";
std::cout << GenerateChangeListString(*changeList).c_str();
if (options.GetTestSequenceType() == TestSequenceType::None)
{
return ReturnCode::Success;
}
}
changeList = DeserializeChangeList(ReadFileContents<CommandLineOptionsException>(*options.GetChangeListFilePath()));
}
// As of now, there are no other non-test operations other than printing a change list so getting this far is considered an error
AZ_TestImpact_Eval(options.GetTestSequenceType() != TestSequenceType::None, CommandLineOptionsException, "No action specified");
// As of now, there are no non-test operations but leave this door open for the future
if (options.GetTestSequenceType() == TestSequenceType::None)
{
return ReturnCode::Success;
}
std::cout << "Constructing in-memory model of source tree and test coverage for test suite ";
std::cout << GetSuiteTypeName(options.GetSuiteFilter()).c_str() << ", this may take a moment...\n";
std::cout << SuiteTypeAsString(options.GetSuiteFilter()).c_str() << ", this may take a moment...\n";
Runtime runtime(
RuntimeConfigurationFactory(ReadFileContents<CommandLineOptionsException>(options.GetConfigurationFile())),
RuntimeConfigurationFactory(ReadFileContents<CommandLineOptionsException>(options.GetConfigurationFilePath())),
options.GetDataFilePath(),
options.GetSuiteFilter(),
options.GetExecutionFailurePolicy(),
options.GetFailedTestCoveragePolicy(),
@ -208,25 +190,25 @@ namespace TestImpact
{
case TestSequenceType::Regular:
{
const auto sequenceReport = runtime.RegularTestSequence(
options.GetTestTargetTimeout(),
options.GetGlobalTimeout(),
TestSequenceStartCallback,
TestSequenceCompleteCallback,
TestRunCompleteCallback);
return GetReturnCodeForTestSequenceResult(sequenceReport.GetResult());
return ConsumeSequenceReportAndGetReturnCode(
runtime.RegularTestSequence(
options.GetTestTargetTimeout(),
options.GetGlobalTimeout(),
TestSequenceStartCallback,
RegularTestSequenceCompleteCallback,
TestRunCompleteCallback),
options);
}
case TestSequenceType::Seed:
{
const auto sequenceReport = runtime.SeededTestSequence(
options.GetTestTargetTimeout(),
options.GetGlobalTimeout(),
TestSequenceStartCallback,
TestSequenceCompleteCallback,
TestRunCompleteCallback);
return GetReturnCodeForTestSequenceResult(sequenceReport.GetResult());
return ConsumeSequenceReportAndGetReturnCode(
runtime.SeededTestSequence(
options.GetTestTargetTimeout(),
options.GetGlobalTimeout(),
TestSequenceStartCallback,
SeedTestSequenceCompleteCallback,
TestRunCompleteCallback),
options);
}
case TestSequenceType::ImpactAnalysisNoWrite:
case TestSequenceType::ImpactAnalysis:
@ -241,14 +223,14 @@ namespace TestImpact
}
else
{
const auto sequenceReport = runtime.SeededTestSequence(
options.GetTestTargetTimeout(),
options.GetGlobalTimeout(),
TestSequenceStartCallback,
TestSequenceCompleteCallback,
TestRunCompleteCallback);
return GetReturnCodeForTestSequenceResult(sequenceReport.GetResult());
return ConsumeSequenceReportAndGetReturnCode(
runtime.SeededTestSequence(
options.GetTestTargetTimeout(),
options.GetGlobalTimeout(),
TestSequenceStartCallback,
SeedTestSequenceCompleteCallback,
TestRunCompleteCallback),
options);
}
}
default:

@ -6,8 +6,9 @@
*
*/
#include <TestImpactConsoleTestSequenceEventHandler.h>
#include <TestImpactFramework/TestImpactUtils.h>
#include <TestImpactConsoleTestSequenceEventHandler.h>
#include <TestImpactConsoleUtils.h>
#include <iostream>
@ -20,7 +21,7 @@ namespace TestImpact
{
void TestSuiteFilter(SuiteType filter)
{
std::cout << "Test suite filter: " << GetSuiteTypeName(filter).c_str() << "\n";
std::cout << "Test suite filter: " << SuiteTypeAsString(filter).c_str() << "\n";
}
void ImpactAnalysisTestSelection(size_t numSelectedTests, size_t numDiscardedTests, size_t numExcludedTests, size_t numDraftedTests)
@ -36,69 +37,66 @@ namespace TestImpact
{
std::cout << "Sequence completed in " << (testRunReport.GetDuration().count() / 1000.f) << "s with";
if (!testRunReport.GetExecutionFailureTests().empty() ||
!testRunReport.GetFailingTests().empty() ||
!testRunReport.GetTimedOutTests().empty() ||
!testRunReport.GetUnexecutedTests().empty())
if (!testRunReport.GetExecutionFailureTestRuns().empty() ||
!testRunReport.GetFailingTestRuns().empty() ||
!testRunReport.GetTimedOutTestRuns().empty() ||
!testRunReport.GetUnexecutedTestRuns().empty())
{
std::cout << ":\n";
std::cout << SetColor(Foreground::White, Background::Red).c_str()
<< testRunReport.GetFailingTests().size()
<< testRunReport.GetFailingTestRuns().size()
<< ResetColor().c_str() << " test failures\n";
std::cout << SetColor(Foreground::White, Background::Red).c_str()
<< testRunReport.GetExecutionFailureTests().size()
<< testRunReport.GetExecutionFailureTestRuns().size()
<< ResetColor().c_str() << " execution failures\n";
std::cout << SetColor(Foreground::White, Background::Red).c_str()
<< testRunReport.GetTimedOutTests().size()
<< testRunReport.GetTimedOutTestRuns().size()
<< ResetColor().c_str() << " test timeouts\n";
std::cout << SetColor(Foreground::White, Background::Red).c_str()
<< testRunReport.GetUnexecutedTests().size()
<< testRunReport.GetUnexecutedTestRuns().size()
<< ResetColor().c_str() << " unexecuted tests\n";
if (!testRunReport.GetFailingTests().empty())
if (!testRunReport.GetFailingTestRuns().empty())
{
std::cout << "\nTest failures:\n";
for (const auto& testRunFailure : testRunReport.GetFailingTests())
for (const auto& testRunFailure : testRunReport.GetFailingTestRuns())
{
for (const auto& testCaseFailure : testRunFailure.GetTestCaseFailures())
for (const auto& test : testRunFailure.GetTests())
{
for (const auto& testFailure : testCaseFailure.GetTestFailures())
if (test.GetResult() == Client::TestResult::Failed)
{
std::cout << " "
<< testRunFailure.GetTargetName().c_str()
<< "." << testCaseFailure.GetName().c_str()
<< "." << testFailure.GetName().c_str() << "\n";
std::cout << " " << test.GetName().c_str() << "\n";
}
}
}
}
if (!testRunReport.GetExecutionFailureTests().empty())
if (!testRunReport.GetExecutionFailureTestRuns().empty())
{
std::cout << "\nExecution failures:\n";
for (const auto& executionFailure : testRunReport.GetExecutionFailureTests())
for (const auto& executionFailure : testRunReport.GetExecutionFailureTestRuns())
{
std::cout << " " << executionFailure.GetTargetName().c_str() << "\n";
std::cout << executionFailure.GetCommandString().c_str() << "\n";
}
}
if (!testRunReport.GetTimedOutTests().empty())
if (!testRunReport.GetTimedOutTestRuns().empty())
{
std::cout << "\nTimed out tests:\n";
for (const auto& testTimeout : testRunReport.GetTimedOutTests())
for (const auto& testTimeout : testRunReport.GetTimedOutTestRuns())
{
std::cout << " " << testTimeout.GetTargetName().c_str() << "\n";
}
}
if (!testRunReport.GetUnexecutedTests().empty())
if (!testRunReport.GetUnexecutedTestRuns().empty())
{
std::cout << "\nUnexecuted tests:\n";
for (const auto& unexecutedTest : testRunReport.GetUnexecutedTests())
for (const auto& unexecutedTest : testRunReport.GetUnexecutedTestRuns())
{
std::cout << " " << unexecutedTest.GetTargetName().c_str() << "\n";
}
@ -106,7 +104,7 @@ namespace TestImpact
}
else
{
std::cout << SetColor(Foreground::White, Background::Green).c_str() << " \100% passes!\n" << ResetColor().c_str() << "\n";
std::cout << " " << SetColor(Foreground::White, Background::Green).c_str() << "100% passes!\n" << ResetColor().c_str() << "\n";
}
}
}
@ -149,13 +147,17 @@ namespace TestImpact
draftedTests.size());
}
void TestSequenceCompleteCallback(const Client::SequenceReport& sequenceReport)
void RegularTestSequenceCompleteCallback(const Client::RegularSequenceReport& sequenceReport)
{
Output::FailureReport(sequenceReport.GetSelectedTestRunReport());
std::cout << "Updating and serializing the test impact analysis data, this may take a moment...\n";
}
void SeedTestSequenceCompleteCallback(const Client::SeedSequenceReport& sequenceReport)
{
Output::FailureReport(sequenceReport.GetSelectedTestRunReport());
}
void ImpactAnalysisTestSequenceCompleteCallback(const Client::ImpactAnalysisSequenceReport& sequenceReport)
{
std::cout << "Selected test run:\n";
@ -181,7 +183,7 @@ namespace TestImpact
std::cout << "Updating and serializing the test impact analysis data, this may take a moment...\n";
}
void TestRunCompleteCallback(const Client::TestRun& testRun, size_t numTestRunsCompleted, size_t totalNumTestRuns)
void TestRunCompleteCallback(const Client::TestRunBase& testRun, size_t numTestRunsCompleted, size_t totalNumTestRuns)
{
const auto progress =
AZStd::string::format("(%03u/%03u)", numTestRunsCompleted, totalNumTestRuns, testRun.GetTargetName().c_str());

@ -38,8 +38,11 @@ namespace TestImpact
const Client::TestRunSelection& discardedTests,
const AZStd::vector<AZStd::string>& draftedTests);
//! Handler for TestSequenceCompleteCallback event.
void TestSequenceCompleteCallback(const Client::SequenceReport& sequenceReport);
//! Handler for RegularTestSequenceCompleteCallback event.
void RegularTestSequenceCompleteCallback(const Client::RegularSequenceReport& sequenceReport);
//! Handler for SeedTestSequenceCompleteCallback event.
void SeedTestSequenceCompleteCallback(const Client::SeedSequenceReport& sequenceReport);
//! Handler for ImpactAnalysisTestSequenceCompleteCallback event.
void ImpactAnalysisTestSequenceCompleteCallback(const Client::ImpactAnalysisSequenceReport& sequenceReport);
@ -48,6 +51,6 @@ namespace TestImpact
void SafeImpactAnalysisTestSequenceCompleteCallback(const Client::SafeImpactAnalysisSequenceReport& sequenceReport);
//! Handler for TestRunCompleteCallback event.
void TestRunCompleteCallback(const Client::TestRun& testRun, size_t numTestRunsCompleted, size_t totalNumTestRuns);
void TestRunCompleteCallback(const Client::TestRunBase& testRun, size_t numTestRunsCompleted, size_t totalNumTestRuns);
} // namespace Console
} // namespace TestImpact

@ -7,6 +7,7 @@
*/
#include <TestImpactFramework/TestImpactConfigurationException.h>
#include <TestImpactFramework/TestImpactUtils.h>
#include <TestImpactRuntimeConfigurationFactory.h>
@ -140,17 +141,17 @@ namespace TestImpact
return tempWorkspaceConfig;
}
AZStd::array<RepoPath, 3> ParseTestImpactAnalysisDataFiles(const RepoPath& root, const rapidjson::Value& sparTIAFile)
AZStd::array<RepoPath, 3> ParseTestImpactAnalysisDataFiles(const RepoPath& root, const rapidjson::Value& sparTiaFile)
{
AZStd::array<RepoPath, 3> sparTIAFiles;
sparTIAFiles[static_cast<size_t>(SuiteType::Main)] =
GetAbsPathFromRelPath(root, sparTIAFile[GetSuiteTypeName(SuiteType::Main).c_str()].GetString());
sparTIAFiles[static_cast<size_t>(SuiteType::Periodic)] =
GetAbsPathFromRelPath(root, sparTIAFile[GetSuiteTypeName(SuiteType::Periodic).c_str()].GetString());
sparTIAFiles[static_cast<size_t>(SuiteType::Sandbox)] =
GetAbsPathFromRelPath(root, sparTIAFile[GetSuiteTypeName(SuiteType::Sandbox).c_str()].GetString());
AZStd::array<RepoPath, 3> sparTiaFiles;
sparTiaFiles[static_cast<size_t>(SuiteType::Main)] =
GetAbsPathFromRelPath(root, sparTiaFile[SuiteTypeAsString(SuiteType::Main).c_str()].GetString());
sparTiaFiles[static_cast<size_t>(SuiteType::Periodic)] =
GetAbsPathFromRelPath(root, sparTiaFile[SuiteTypeAsString(SuiteType::Periodic).c_str()].GetString());
sparTiaFiles[static_cast<size_t>(SuiteType::Sandbox)] =
GetAbsPathFromRelPath(root, sparTiaFile[SuiteTypeAsString(SuiteType::Sandbox).c_str()].GetString());
return sparTIAFiles;
return sparTiaFiles;
}
WorkspaceConfig::Active ParseActiveWorkspaceConfig(const rapidjson::Value& activeWorkspace)
@ -160,7 +161,7 @@ namespace TestImpact
activeWorkspaceConfig.m_root = activeWorkspace[Config::Keys[Config::Root]].GetString();
activeWorkspaceConfig.m_enumerationCacheDirectory
= GetAbsPathFromRelPath(activeWorkspaceConfig.m_root, relativePaths[Config::Keys[Config::EnumerationCacheDir]].GetString());
activeWorkspaceConfig.m_sparTIAFiles =
activeWorkspaceConfig.m_sparTiaFiles =
ParseTestImpactAnalysisDataFiles(activeWorkspaceConfig.m_root, relativePaths[Config::Keys[Config::TestImpactDataFiles]]);
return activeWorkspaceConfig;
}

@ -1,6 +1,7 @@
/*
* Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* Copyright (c) Contributors to the Open 3D Engine Project.
* For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
@ -11,10 +12,25 @@
#include <TestImpactFramework/TestImpactClientTestSelection.h>
#include <TestImpactFramework/TestImpactTestSequence.h>
#include <AzCore/std/optional.h>
#include <AzCore/std/chrono/chrono.h>
namespace TestImpact
{
namespace Client
{
//! The report types generated by each sequence.
enum class SequenceReportType : AZ::u8
{
RegularSequence,
SeedSequence,
ImpactAnalysisSequence,
SafeImpactAnalysisSequence
};
//! Calculates the final sequence result for a composite of multiple sequences.
TestSequenceResult CalculateMultiTestSequenceResult(const AZStd::vector<TestSequenceResult>& results);
//! Report detailing the result and duration of a given set of test runs along with the details of each individual test run.
class TestRunReport
{
@ -23,20 +39,20 @@ namespace TestImpact
//! @param result The result of this set of test runs.
//! @param startTime The time point his set of test runs started.
//! @param duration The duration this set of test runs took to complete.
//! @param passingTests The set of test runs that executed successfully with no failing tests.
//! @param failing tests The set of test runs that executed successfully but had one or more failing tests.
//! @param executionFailureTests The set of test runs that failed to execute.
//! @param timedOutTests The set of test runs that executed successfully but were terminated prematurely due to timing out.
//! @param unexecutedTests The set of test runs that were queued up for execution but did not get the opportunity to execute.
//! @param passingTestRuns The set of test runs that executed successfully with no failing test runs.
//! @param failingTestRuns The set of test runs that executed successfully but had one or more failing tests.
//! @param executionFailureTestRuns The set of test runs that failed to execute.
//! @param timedOutTestRuns The set of test runs that executed successfully but were terminated prematurely due to timing out.
//! @param unexecutedTestRuns The set of test runs that were queued up for execution but did not get the opportunity to execute.
TestRunReport(
TestSequenceResult result,
AZStd::chrono::high_resolution_clock::time_point startTime,
AZStd::chrono::milliseconds duration,
AZStd::vector<TestRun>&& passingTests,
AZStd::vector<TestRunWithTestFailures>&& failingTests,
AZStd::vector<TestRun>&& executionFailureTests,
AZStd::vector<TestRun>&& timedOutTests,
AZStd::vector<TestRun>&& unexecutedTests);
AZStd::vector<PassingTestRun>&& passingTestRuns,
AZStd::vector<FailingTestRun>&& failingTestRuns,
AZStd::vector<TestRunWithExecutionFailure>&& executionFailureTestRuns,
AZStd::vector<TimedOutTestRun>&& timedOutTestRuns,
AZStd::vector<UnexecutedTestRun>&& unexecutedTestRuns);
//! Returns the result of this sequence of test runs.
TestSequenceResult GetResult() const;
@ -50,190 +66,478 @@ namespace TestImpact
//! Returns the duration this sequence of test runs took to complete.
AZStd::chrono::milliseconds GetDuration() const;
//! Returns the total number of test runs.
size_t GetTotalNumTestRuns() const;
//! Returns the number of passing test runs.
size_t GetNumPassingTests() const;
size_t GetNumPassingTestRuns() const;
//! Returns the number of failing test runs.
size_t GetNumFailingTests() const;
size_t GetNumFailingTestRuns() const;
//! Returns the number of test runs that failed to execute.
size_t GetNumExecutionFailureTestRuns() const;
//! Returns the number of timed out test runs.
size_t GetNumTimedOutTests() const;
size_t GetNumTimedOutTestRuns() const;
//! Returns the number of unexecuted test runs.
size_t GetNumUnexecutedTests() const;
size_t GetNumUnexecutedTestRuns() const;
//! Returns the total number of passing tests across all test runs in the report.
size_t GetTotalNumPassingTests() const;
//! Returns the total number of failing tests across all test runs in the report.
size_t GetTotalNumFailingTests() const;
//! Returns the total number of disabled tests across all test runs in the report.
size_t GetTotalNumDisabledTests() const;
//! Returns the set of test runs that executed successfully with no failing tests.
const AZStd::vector<TestRun>& GetPassingTests() const;
const AZStd::vector<PassingTestRun>& GetPassingTestRuns() const;
//! Returns the set of test runs that executed successfully but had one or more failing tests.
const AZStd::vector<TestRunWithTestFailures>& GetFailingTests() const;
const AZStd::vector<FailingTestRun>& GetFailingTestRuns() const;
//! Returns the set of test runs that failed to execute.
const AZStd::vector<TestRun>& GetExecutionFailureTests() const;
const AZStd::vector<TestRunWithExecutionFailure>& GetExecutionFailureTestRuns() const;
//! Returns the set of test runs that executed successfully but were terminated prematurely due to timing out.
const AZStd::vector<TestRun>& GetTimedOutTests() const;
const AZStd::vector<TimedOutTestRun>& GetTimedOutTestRuns() const;
//! Returns the set of test runs that were queued up for execution but did not get the opportunity to execute.
const AZStd::vector<TestRun>& GetUnexecutedTests() const;
const AZStd::vector<UnexecutedTestRun>& GetUnexecutedTestRuns() const;
private:
TestSequenceResult m_result;
TestSequenceResult m_result = TestSequenceResult::Success;
AZStd::chrono::high_resolution_clock::time_point m_startTime;
AZStd::chrono::milliseconds m_duration;
AZStd::vector<TestRun> m_passingTests;
AZStd::vector<TestRunWithTestFailures> m_failingTests;
AZStd::vector<TestRun> m_executionFailureTests;
AZStd::vector<TestRun> m_timedOutTests;
AZStd::vector<TestRun> m_unexecutedTests;
AZStd::chrono::milliseconds m_duration = AZStd::chrono::milliseconds{ 0 };
AZStd::vector<PassingTestRun> m_passingTestRuns;
AZStd::vector<FailingTestRun> m_failingTestRuns;
AZStd::vector<TestRunWithExecutionFailure> m_executionFailureTestRuns;
AZStd::vector<TimedOutTestRun> m_timedOutTestRuns;
AZStd::vector<UnexecutedTestRun> m_unexecutedTestRuns;
size_t m_totalNumPassingTests = 0;
size_t m_totalNumFailingTests = 0;
size_t m_totalNumDisabledTests = 0;
};
//! Report detailing a test run sequence of selected tests.
class SequenceReport
//! Base class for all sequence report types.
template<typename PolicyStateType>
class SequenceReportBase
{
public:
//! Constructs the report for a sequence of selected tests.
//! @param type The type of sequence this report is generated for.
//! @param maxConcurrency The maximum number of concurrent test targets in flight at any given time.
//! @param testTargetTimeout The maximum duration individual test targets may be in flight for (infinite if empty).
//! @param globalTimeout The maximum duration the entire test sequence may run for (infinite if empty).
//! @param policyState The policy state this sequence was executed under.
//! @param suiteType The suite from which the tests have been selected from.
//! @param selectedTests The target names of the selected tests.
//! @param selectedTestRuns The target names of the selected test runs.
//! @param selectedTestRunReport The report for the set of selected test runs.
SequenceReport(SuiteType suiteType, const TestRunSelection& selectedTests, TestRunReport&& selectedTestRunReport);
SequenceReportBase(
SequenceReportType type,
size_t maxConcurrency,
const AZStd::optional<AZStd::chrono::milliseconds>& testTargetTimeout,
const AZStd::optional<AZStd::chrono::milliseconds>& globalTimeout,
const PolicyStateType& policyState,
SuiteType suiteType,
const TestRunSelection& selectedTestRuns,
TestRunReport&& selectedTestRunReport)
: m_type(type)
, m_maxConcurrency(maxConcurrency)
, m_testTargetTimeout(testTargetTimeout)
, m_globalTimeout(globalTimeout)
, m_policyState(policyState)
, m_suite(suiteType)
, m_selectedTestRuns(selectedTestRuns)
, m_selectedTestRunReport(AZStd::move(selectedTestRunReport))
{
}
virtual ~SequenceReportBase() = default;
//! Returns the identifying type for this sequence report.
SequenceReportType GetType() const
{
return m_type;
}
//! Returns the maximum concurrency for this sequence.
size_t GetMaxConcurrency() const
{
return m_maxConcurrency;
}
//! Returns the global timeout for this sequence.
const AZStd::optional<AZStd::chrono::milliseconds>& GetGlobalTimeout() const
{
return m_globalTimeout;
}
//! Returns the test target timeout for this sequence.
const AZStd::optional<AZStd::chrono::milliseconds>& GetTestTargetTimeout() const
{
return m_testTargetTimeout;
}
//! Returns the policy state for this sequence.
const PolicyStateType& GetPolicyState() const
{
return m_policyState;
}
//! Returns the suite for this sequence.
SuiteType GetSuite() const
{
return m_suite;
}
//! Returns the result of the sequence.
virtual TestSequenceResult GetResult() const
{
return m_selectedTestRunReport.GetResult();
}
//! Returns the tests selected for running in the sequence.
TestRunSelection GetSelectedTests() const;
TestRunSelection GetSelectedTestRuns() const
{
return m_selectedTestRuns;
}
//! Returns the report for the selected test runs.
TestRunReport GetSelectedTestRunReport() const;
TestRunReport GetSelectedTestRunReport() const
{
return m_selectedTestRunReport;
}
//! Returns the start time of the sequence.
AZStd::chrono::high_resolution_clock::time_point GetStartTime() const;
AZStd::chrono::high_resolution_clock::time_point GetStartTime() const
{
return m_selectedTestRunReport.GetStartTime();
}
//! Returns the end time of the sequence.
AZStd::chrono::high_resolution_clock::time_point GetEndTime() const;
//! Returns the result of the sequence.
virtual TestSequenceResult GetResult() const;
AZStd::chrono::high_resolution_clock::time_point GetEndTime() const
{
return GetStartTime() + GetDuration();
}
//! Returns the entire duration the sequence took from start to finish.
virtual AZStd::chrono::milliseconds GetDuration() const;
//! Get the total number of tests in the sequence that passed.
virtual size_t GetTotalNumPassingTests() const;
//! Get the total number of tests in the sequence that contain one or more test failures.
virtual size_t GetTotalNumFailingTests() const;
//! Get the total number of tests in the sequence that timed out whilst in flight.
virtual size_t GetTotalNumTimedOutTests() const;
//! Get the total number of tests in the sequence that were queued for execution but did not get the oppurtunity to execute.
virtual size_t GetTotalNumUnexecutedTests() const;
virtual AZStd::chrono::milliseconds GetDuration() const
{
return m_selectedTestRunReport.GetDuration();
}
//! Returns the total number of test runs across all test run reports.
virtual size_t GetTotalNumTestRuns() const
{
return m_selectedTestRunReport.GetTotalNumTestRuns();
}
//! Returns the total number of passing tests across all test targets in all test run reports.
virtual size_t GetTotalNumPassingTests() const
{
return m_selectedTestRunReport.GetTotalNumPassingTests();
}
//! Returns the total number of failing tests across all test targets in all test run reports.
virtual size_t GetTotalNumFailingTests() const
{
return m_selectedTestRunReport.GetTotalNumFailingTests();
}
//! Returns the total number of unexecuted tests across all test targets in all test run reports.
virtual size_t GetTotalNumDisabledTests() const
{
return m_selectedTestRunReport.GetTotalNumDisabledTests();
}
//! Get the total number of test runs in the sequence that passed.
virtual size_t GetTotalNumPassingTestRuns() const
{
return m_selectedTestRunReport.GetNumPassingTestRuns();
}
//! Get the total number of test runs in the sequence that contain one or more test failures.
virtual size_t GetTotalNumFailingTestRuns() const
{
return m_selectedTestRunReport.GetNumFailingTestRuns();
}
//! Returns the total number of test runs that failed to execute.
virtual size_t GetTotalNumExecutionFailureTestRuns() const
{
return m_selectedTestRunReport.GetNumExecutionFailureTestRuns();
}
//! Get the total number of test runs in the sequence that timed out whilst in flight.
virtual size_t GetTotalNumTimedOutTestRuns() const
{
return m_selectedTestRunReport.GetNumTimedOutTestRuns();
}
//! Get the total number of test runs in the sequence that were queued for execution but did not get the opportunity to execute.
virtual size_t GetTotalNumUnexecutedTestRuns() const
{
return m_selectedTestRunReport.GetNumUnexecutedTestRuns();
}
private:
SuiteType m_suite;
TestRunSelection m_selectedTests;
SequenceReportType m_type;
size_t m_maxConcurrency = 0;
AZStd::optional<AZStd::chrono::milliseconds> m_testTargetTimeout;
AZStd::optional<AZStd::chrono::milliseconds> m_globalTimeout;
PolicyStateType m_policyState;
SuiteType m_suite = SuiteType::Main;
TestRunSelection m_selectedTestRuns;
TestRunReport m_selectedTestRunReport;
};
//! Report type for regular test sequences.
class RegularSequenceReport
: public SequenceReportBase<SequencePolicyState>
{
public:
//! Constructs the report for a regular sequence.
//! @param maxConcurrency The maximum number of concurrent test targets in flight at any given time.
//! @param testTargetTimeout The maximum duration individual test targets may be in flight for (infinite if empty).
//! @param globalTimeout The maximum duration the entire test sequence may run for (infinite if empty).
//! @param policyState The policy state this sequence was executed under.
//! @param suiteType The suite from which the tests have been selected from.
//! @param selectedTestRuns The target names of the selected test runs.
//! @param selectedTestRunReport The report for the set of selected test runs.
RegularSequenceReport(
size_t maxConcurrency,
const AZStd::optional<AZStd::chrono::milliseconds>& testTargetTimeout,
const AZStd::optional<AZStd::chrono::milliseconds>& globalTimeout,
const SequencePolicyState& policyState,
SuiteType suiteType,
const TestRunSelection& selectedTestRuns,
TestRunReport&& selectedTestRunReport);
};
//! Report type for seed test sequences.
class SeedSequenceReport
: public SequenceReportBase<SequencePolicyState>
{
public:
//! Constructs the report for a seed sequence.
//! @param maxConcurrency The maximum number of concurrent test targets in flight at any given time.
//! @param testTargetTimeout The maximum duration individual test targets may be in flight for (infinite if empty).
//! @param globalTimeout The maximum duration the entire test sequence may run for (infinite if empty).
//! @param policyState The policy state this sequence was executed under.
//! @param suiteType The suite from which the tests have been selected from.
//! @param selectedTestRuns The target names of the selected test runs.
//! @param selectedTestRunReport The report for the set of selected test runs.
SeedSequenceReport(
size_t maxConcurrency,
const AZStd::optional<AZStd::chrono::milliseconds>& testTargetTimeout,
const AZStd::optional<AZStd::chrono::milliseconds>& globalTimeout,
const SequencePolicyState& policyState,
SuiteType suiteType,
const TestRunSelection& selectedTestRuns,
TestRunReport&& selectedTestRunReport);
};
//! Report detailing a test run sequence of selected and drafted tests.
class DraftingSequenceReport
: public SequenceReport
template<typename PolicyStateType>
class DraftingSequenceReportBase
: public SequenceReportBase<PolicyStateType>
{
public:
//! Constructs the report for a sequence of selected and drafted tests.
//! Constructs the report for sequences that draft in previously failed/newly added test targets.
//! @param type The type of sequence this report is generated for.
//! @param maxConcurrency The maximum number of concurrent test targets in flight at any given time.
//! @param testTargetTimeout The maximum duration individual test targets may be in flight for (infinite if empty).
//! @param globalTimeout The maximum duration the entire test sequence may run for (infinite if empty).
//! @param policyState The policy state this sequence was executed under.
//! @param suiteType The suite from which the tests have been selected from.
//! @param selectedTests The target names of the selected tests.
//! @param draftedTests The target names of the drafted tests.
//! @param selectedTestRuns The target names of the selected test runs.
//! @param draftedTestRuns The target names of the drafted test runs.
//! @param selectedTestRunReport The report for the set of selected test runs.
//! @param draftedTestRunReport The report for the set of drafted test runs.
DraftingSequenceReport(
DraftingSequenceReportBase(
SequenceReportType type,
size_t maxConcurrency,
const AZStd::optional<AZStd::chrono::milliseconds>& testTargetTimeout,
const AZStd::optional<AZStd::chrono::milliseconds>& globalTimeout,
const PolicyStateType& policyState,
SuiteType suiteType,
const TestRunSelection& selectedTests,
const AZStd::vector<AZStd::string>& draftedTests,
const TestRunSelection& selectedTestRuns,
const AZStd::vector<AZStd::string>& draftedTestRuns,
TestRunReport&& selectedTestRunReport,
TestRunReport&& draftedTestRunReport);
// SequenceReport overrides ...
TestSequenceResult GetResult() const override;
AZStd::chrono::milliseconds GetDuration() const override;
size_t GetTotalNumPassingTests() const override;
size_t GetTotalNumFailingTests() const override;
size_t GetTotalNumTimedOutTests() const override;
size_t GetTotalNumUnexecutedTests() const override;
//! Returns the tests drafted for running in the sequence.
const AZStd::vector<AZStd::string>& GetDraftedTests() const;
TestRunReport&& draftedTestRunReport)
: SequenceReportBase(
type,
maxConcurrency,
testTargetTimeout,
globalTimeout,
policyState,
suiteType,
selectedTestRuns,
AZStd::move(selectedTestRunReport))
, m_draftedTestRuns(draftedTestRuns)
, m_draftedTestRunReport(AZStd::move(draftedTestRunReport))
{
}
//! Returns the tests drafted for running in the sequence.
const AZStd::vector<AZStd::string>& GetDraftedTestRuns() const
{
return m_draftedTestRuns;
}
//! Returns the report for the drafted test runs.
TestRunReport GetDraftedTestRunReport() const;
TestRunReport GetDraftedTestRunReport() const
{
return m_draftedTestRunReport;
}
// SequenceReport overrides ...
AZStd::chrono::milliseconds GetDuration() const override
{
return SequenceReportBase::GetDuration() + m_draftedTestRunReport.GetDuration();
}
TestSequenceResult GetResult() const override
{
return CalculateMultiTestSequenceResult({ SequenceReportBase::GetResult(), m_draftedTestRunReport.GetResult() });
}
size_t GetTotalNumTestRuns() const override
{
return SequenceReportBase::GetTotalNumTestRuns() + m_draftedTestRunReport.GetTotalNumTestRuns();
}
size_t GetTotalNumPassingTests() const override
{
return SequenceReportBase::GetTotalNumPassingTests() + m_draftedTestRunReport.GetTotalNumPassingTests();
}
size_t GetTotalNumFailingTests() const override
{
return SequenceReportBase::GetTotalNumFailingTests() + m_draftedTestRunReport.GetTotalNumFailingTests();
}
size_t GetTotalNumDisabledTests() const override
{
return SequenceReportBase::GetTotalNumDisabledTests() + m_draftedTestRunReport.GetTotalNumDisabledTests();
}
size_t GetTotalNumPassingTestRuns() const override
{
return SequenceReportBase::GetTotalNumPassingTestRuns() + m_draftedTestRunReport.GetNumPassingTestRuns();
}
size_t GetTotalNumFailingTestRuns() const override
{
return SequenceReportBase::GetTotalNumFailingTestRuns() + m_draftedTestRunReport.GetNumFailingTestRuns();
}
size_t GetTotalNumExecutionFailureTestRuns() const override
{
return SequenceReportBase::GetTotalNumExecutionFailureTestRuns() + m_draftedTestRunReport.GetNumExecutionFailureTestRuns();
}
size_t GetTotalNumTimedOutTestRuns() const override
{
return SequenceReportBase::GetTotalNumTimedOutTestRuns() + m_draftedTestRunReport.GetNumTimedOutTestRuns();
}
size_t GetTotalNumUnexecutedTestRuns() const override
{
return SequenceReportBase::GetTotalNumUnexecutedTestRuns() + m_draftedTestRunReport.GetNumUnexecutedTestRuns();
}
private:
AZStd::vector<AZStd::string> m_draftedTests;
AZStd::vector<AZStd::string> m_draftedTestRuns;
TestRunReport m_draftedTestRunReport;
};
//! Report detailing an impact analysis sequence of selected, discarded and drafted tests.
class ImpactAnalysisSequenceReport
: public DraftingSequenceReport
: public DraftingSequenceReportBase<ImpactAnalysisSequencePolicyState>
{
public:
//! Constructs the report for a sequence of selected and drafted tests.
//! Constructs the report for an impact analysis sequence.
//! @param maxConcurrency The maximum number of concurrent test targets in flight at any given time.
//! @param testTargetTimeout The maximum duration individual test targets may be in flight for (infinite if empty).
//! @param globalTimeout The maximum duration the entire test sequence may run for (infinite if empty).
//! @param policyState The policy state this sequence was executed under.
//! @param suiteType The suite from which the tests have been selected from.
//! @param selectedTests The target names of the selected tests.
//! @param discardedTests The target names of the discarded tests.
//! @param draftedTests The target names of the drafted tests.
//! @param selectedTestRuns The target names of the selected test runs.
//! @param draftedTestRuns The target names of the drafted test runs.
//! @param selectedTestRunReport The report for the set of selected test runs.
//! @param draftedTestRunReport The report for the set of drafted test runs.
ImpactAnalysisSequenceReport(
size_t maxConcurrency,
const AZStd::optional<AZStd::chrono::milliseconds>& testTargetTimeout,
const AZStd::optional<AZStd::chrono::milliseconds>& globalTimeout,
const ImpactAnalysisSequencePolicyState& policyState,
SuiteType suiteType,
const TestRunSelection& selectedTests,
const AZStd::vector<AZStd::string>& discardedTests,
const AZStd::vector<AZStd::string>& draftedTests,
const TestRunSelection& selectedTestRuns,
const AZStd::vector<AZStd::string>& discardedTestRuns,
const AZStd::vector<AZStd::string>& draftedTestRuns,
TestRunReport&& selectedTestRunReport,
TestRunReport&& draftedTestRunReport);
//! Returns the tests discarded from running in the sequence.
const AZStd::vector<AZStd::string>& GetDiscardedTests() const;
//! Returns the test runs discarded from running in the sequence.
const AZStd::vector<AZStd::string>& GetDiscardedTestRuns() const;
private:
AZStd::vector<AZStd::string> m_discardedTests;
AZStd::vector<AZStd::string> m_discardedTestRuns;
};
//! Report detailing an impact analysis sequence of selected, discarded and drafted tests.
//! Report detailing an impact analysis sequence of selected, discarded and drafted test runs.
class SafeImpactAnalysisSequenceReport
: public DraftingSequenceReport
: public DraftingSequenceReportBase<SafeImpactAnalysisSequencePolicyState>
{
public:
//! Constructs the report for a sequence of selected and drafted tests.
//! Constructs the report for a sequence of selected, discarded and drafted test runs.
//! @param maxConcurrency The maximum number of concurrent test targets in flight at any given time.
//! @param testTargetTimeout The maximum duration individual test targets may be in flight for (infinite if empty).
//! @param globalTimeout The maximum duration the entire test sequence may run for (infinite if empty).
//! @param policyState The policy state this sequence was executed under.
//! @param suiteType The suite from which the tests have been selected from.
//! @param selectedTests The target names of the selected tests.
//! @param discardedTests The target names of the discarded tests.
//! @param draftedTests The target names of the drafted tests.
//! @param selectedTestRuns The target names of the selected test runs.
//! @param discardedTestRuns The target names of the discarded test runs.
//! @param draftedTestRuns The target names of the drafted test runs.
//! @param selectedTestRunReport The report for the set of selected test runs.
//! @param discardedTestRunReport The report for the set of discarded test runs.
//! @param draftedTestRunReport The report for the set of drafted test runs.
SafeImpactAnalysisSequenceReport(
size_t maxConcurrency,
const AZStd::optional<AZStd::chrono::milliseconds>& testTargetTimeout,
const AZStd::optional<AZStd::chrono::milliseconds>& globalTimeout,
const SafeImpactAnalysisSequencePolicyState& policyState,
SuiteType suiteType,
const TestRunSelection& selectedTests,
const TestRunSelection& discardedTests,
const AZStd::vector<AZStd::string>& draftedTests,
const TestRunSelection& selectedTestRuns,
const TestRunSelection& discardedTestRuns,
const AZStd::vector<AZStd::string>& draftedTestRuns,
TestRunReport&& selectedTestRunReport,
TestRunReport&& discardedTestRunReport,
TestRunReport&& draftedTestRunReport);
// DraftingSequenceReport overrides ...
TestSequenceResult GetResult() const override;
// SequenceReport overrides ...
AZStd::chrono::milliseconds GetDuration() const override;
TestSequenceResult GetResult() const override;
size_t GetTotalNumTestRuns() const override;
size_t GetTotalNumPassingTests() const override;
size_t GetTotalNumFailingTests() const override;
size_t GetTotalNumTimedOutTests() const override;
size_t GetTotalNumUnexecutedTests() const override;
size_t GetTotalNumDisabledTests() const override;
size_t GetTotalNumPassingTestRuns() const override;
size_t GetTotalNumFailingTestRuns() const override;
size_t GetTotalNumExecutionFailureTestRuns() const override;
size_t GetTotalNumTimedOutTestRuns() const override;
size_t GetTotalNumUnexecutedTestRuns() const override;
//! Returns the report for the discarded test runs.
const TestRunSelection GetDiscardedTests() const;
const TestRunSelection GetDiscardedTestRuns() const;
//! Returns the report for the discarded test runs.
TestRunReport GetDiscardedTestRunReport() const;
private:
TestRunSelection m_discardedTests;
TestRunSelection m_discardedTestRuns;
TestRunReport m_discardedTestRunReport;
};
} // namespace Client

@ -0,0 +1,28 @@
/*
* Copyright (c) Contributors to the Open 3D Engine Project.
* For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#pragma once
#include <TestImpactFramework/TestImpactClientSequenceReport.h>
#include <AzCore/std/string/string.h>
namespace TestImpact
{
//! Serializes a regular sequence report to JSON format.
AZStd::string SerializeSequenceReport(const Client::RegularSequenceReport& sequenceReport);
//! Serializes a seed sequence report to JSON format.
AZStd::string SerializeSequenceReport(const Client::SeedSequenceReport& sequenceReport);
//! Serializes an impact analysis sequence report to JSON format.
AZStd::string SerializeSequenceReport(const Client::ImpactAnalysisSequenceReport& sequenceReport);
//! Serializes a safe impact analysis sequence report to JSON format.
AZStd::string SerializeSequenceReport(const Client::SafeImpactAnalysisSequenceReport& sequenceReport);
} // namespace TestImpact

@ -26,8 +26,8 @@ namespace TestImpact
AllTestsPass //!< The test run completed its run and all tests passed.
};
//! Representation of a completed test run.
class TestRun
//! Representation of a test run.
class TestRunBase
{
public:
//! Constructs the client facing representation of a given test target's run.
@ -36,13 +36,15 @@ namespace TestImpact
//! @param startTime The start time, relative to the sequence start, that this run started.
//! @param duration The duration that this test run took to complete.
//! @param result The result of the run.
TestRun(
TestRunBase(
const AZStd::string& name,
const AZStd::string& commandString,
AZStd::chrono::high_resolution_clock::time_point startTime,
AZStd::chrono::milliseconds duration,
TestRunResult result);
virtual ~TestRunBase() = default;
//! Returns the test target name.
const AZStd::string& GetTargetName() const;
@ -69,75 +71,120 @@ namespace TestImpact
AZStd::chrono::milliseconds m_duration;
};
//! Represents an individual test of a test target that failed.
class TestFailure
//! Representation of a test run that failed to execute.
class TestRunWithExecutionFailure
: public TestRunBase
{
public:
TestFailure(const AZStd::string& testName, const AZStd::string& errorMessage);
using TestRunBase::TestRunBase;
TestRunWithExecutionFailure(TestRunBase&& testRun);
};
//! Returns the name of the test that failed.
const AZStd::string& GetName() const;
//! Representation of a test run that was terminated in-flight due to timing out.
class TimedOutTestRun
: public TestRunBase
{
public:
using TestRunBase::TestRunBase;
TimedOutTestRun(TestRunBase&& testRun);
};
//! Returns the error message of the test that failed.
const AZStd::string& GetErrorMessage() const;
//! Representation of a test run that was not executed.
class UnexecutedTestRun
: public TestRunBase
{
public:
using TestRunBase::TestRunBase;
UnexecutedTestRun(TestRunBase&& testRun);
};
private:
AZStd::string m_name;
AZStd::string m_errorMessage;
// Result of a test executed during a test run.
enum class TestResult : AZ::u8
{
Passed,
Failed,
NotRun
};
//! Represents a collection of tests that failed.
//! @note Only the failing tests are included in the collection.
class TestCaseFailure
//! Representation of a single test in a test target.
class Test
{
public:
TestCaseFailure(const AZStd::string& testCaseName, AZStd::vector<TestFailure>&& testFailures);
//! Constructs the test with the specified name and result.
Test(const AZStd::string& testName, TestResult result);
//! Returns the name of the test case containing the failing tests.
//! Returns the name of this test.
const AZStd::string& GetName() const;
//! Returns the collection of tests in this test case that failed.
const AZStd::vector<TestFailure>& GetTestFailures() const;
//! Returns the result of executing this test.
TestResult GetResult() const;
private:
AZStd::string m_name;
AZStd::vector<TestFailure> m_testFailures;
TestResult m_result;
};
//! Representation of a test run's failing tests.
class TestRunWithTestFailures
: public TestRun
//! Representation of a test run that completed with or without test failures.
class CompletedTestRun
: public TestRunBase
{
public:
//! Constructs the client facing representation of a given test target's run.
//! @param name The name of the test target.
//! @param commandString The command string used to execute this test target.
//! @param startTime The start time, relative to the sequence start, that this run started.
//! Constructs the test run from the specified test target executaion data.
//! @param name The name of the test target for this run.
//! @param commandString The command string used to execute the test target for this run.
//! @param startTime The start time, offset from the sequence start time, that this test run started.
//! @param duration The duration that this test run took to complete.
//! @param result The result of the run.
//! @param testFailures The failing tests for this test run.
TestRunWithTestFailures(
//! @param result The result of this test run.
//! @param tests The tests contained in the test target for this test run.
CompletedTestRun(
const AZStd::string& name,
const AZStd::string& commandString,
AZStd::chrono::high_resolution_clock::time_point startTime,
AZStd::chrono::milliseconds duration,
TestRunResult result,
AZStd::vector<TestCaseFailure>&& testFailures);
AZStd::vector<Test>&& tests);
//! Constructs the client facing representation of a given test target's run.
//! @param testRun The test run this run is to be derived from.
//! @param testFailures The failing tests for this run.
TestRunWithTestFailures(TestRun&& testRun, AZStd::vector<TestCaseFailure>&& testFailures);
//! Constructs the test run from the specified test target executaion data.
CompletedTestRun(TestRunBase&& testRun, AZStd::vector<Test>&& tests);
//! Returns the total number of tests in the run.
size_t GetTotalNumTests() const;
//! Returns the total number of failing tests in this run.
size_t GetNumTestFailures() const;
//! Returns the total number of passing tests in the run.
size_t GetTotalNumPassingTests() const;
//! Returns the test cases in this run containing failing tests.
const AZStd::vector<TestCaseFailure>& GetTestCaseFailures() const;
//! Returns the total number of failing tests in the run.
size_t GetTotalNumFailingTests() const;
//! Returns the total number of disabled tests in the run.
size_t GetTotalNumDisabledTests() const;
//! Returns the tests in the run.
const AZStd::vector<Test>& GetTests() const;
private:
AZStd::vector<TestCaseFailure> m_testCaseFailures;
size_t m_numTestFailures = 0;
AZStd::vector<Test> m_tests;
size_t m_totalNumPassingTests = 0;
size_t m_totalNumFailingTests = 0;
size_t m_totalNumDisabledTests = 0;
};
//! Representation of a test run that completed with no test failures.
class PassingTestRun
: public CompletedTestRun
{
public:
using CompletedTestRun::CompletedTestRun;
PassingTestRun(TestRunBase&& testRun, AZStd::vector<Test>&& tests);
};
//! Representation of a test run that completed with one or more test failures.
class FailingTestRun
: public CompletedTestRun
{
public:
using CompletedTestRun::CompletedTestRun;
FailingTestRun(TestRunBase&& testRun, AZStd::vector<Test>&& tests);
};
} // namespace Client
} // namespace TestImpact

@ -44,7 +44,7 @@ namespace TestImpact
{
RepoPath m_root; //!< Path to the persistent workspace tracked by the repository.
RepoPath m_enumerationCacheDirectory; //!< Path to the test enumerations cache.
AZStd::array<RepoPath, 3> m_sparTIAFiles; //!< Paths to the test impact analysis data files for each test suite.
AZStd::array<RepoPath, 3> m_sparTiaFiles; //!< Paths to the test impact analysis data files for each test suite.
};
Temp m_temp;

@ -0,0 +1,81 @@
/*
* Copyright (c) Contributors to the Open 3D Engine Project.
* For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#pragma once
#include <AzCore/base.h>
namespace TestImpact
{
namespace Policy
{
//! Policy for handling of test targets that fail to execute (e.g. due to the binary not being found).
//! @note Test targets that fail to execute will be tagged such that their execution can be attempted at a later date. This is
//! important as otherwise it would be erroneously assumed that they cover no sources due to having no entries in the dynamic
//! dependency map.
enum class ExecutionFailure : AZ::u8
{
Abort, //!< Abort the test sequence and report a failure.
Continue, //!< Continue the test sequence but treat the execution failures as test failures after the run.
Ignore //!< Continue the test sequence and ignore the execution failures.
};
//! Policy for handling the coverage data of failed tests targets (both tests that failed to execute and tests that ran but failed).
enum class FailedTestCoverage : AZ::u8
{
Discard, //!< Discard the coverage data produced by the failing tests, causing them to be drafted into future test runs.
Keep //!< Keep any existing coverage data and update the coverage data for failed test targets that produce coverage.
};
//! Policy for prioritizing selected tests.
enum class TestPrioritization : AZ::u8
{
None, //!< Do not attempt any test prioritization.
DependencyLocality //!< Prioritize test targets according to the locality of the production targets they cover in the build
//!< dependency graph.
};
//! Policy for handling test targets that report failing tests.
enum class TestFailure : AZ::u8
{
Abort, //!< Abort the test sequence and report the test failure.
Continue //!< Continue the test sequence and report the test failures after the run.
};
//! Policy for handling integrity failures of the dynamic dependency map and the source to target mappings.
enum class IntegrityFailure : AZ::u8
{
Abort, //!< Abort the test sequence and report the test failure.
Continue //!< Continue the test sequence and report the test failures after the run.
};
//! Policy for updating the dynamic dependency map with the coverage data of produced by test sequences.
enum class DynamicDependencyMap : AZ::u8
{
Discard, //!< Discard the coverage data produced by test sequences.
Update //!< Update the dynamic dependency map with the coverage data produced by test sequences.
};
//! Policy for sharding test targets that have been marked for test sharding.
enum class TestSharding : AZ::u8
{
Never, //!< Do not shard any test targets.
Always //!< Shard all test targets that have been marked for test sharding.
};
//! Standard output capture of test target runs.
enum class TargetOutputCapture : AZ::u8
{
None, //!< Do not capture any output.
StdOut, //!< Send captured output to standard output
File, //!< Write captured output to file.
StdOutAndFile //!< Send captured output to standard output and write to file.
};
} // namespace Policy
} // namespace TestImpact

@ -78,7 +78,7 @@ namespace TestImpact
//! @param testRunMeta The test that has completed.
//! @param numTestRunsCompleted The number of test runs that have completed.
//! @param totalNumTestRuns The total number of test runs in the sequence.
using TestRunCompleteCallback = AZStd::function<void(Client::TestRun& testRun, size_t numTestRunsCompleted, size_t totalNumTestRuns)>;
using TestRunCompleteCallback = AZStd::function<void(Client::TestRunBase& testRun, size_t numTestRunsCompleted, size_t totalNumTestRuns)>;
//! The API exposed to the client responsible for all test runs and persistent data management.
class Runtime
@ -86,6 +86,7 @@ namespace TestImpact
public:
//! Constructs a runtime with the specified configuration and policies.
//! @param config The configuration used for this runtime instance.
//! @param dataFile The optional data file to be used instead of that specified in the config file.
//! @param suiteFilter The test suite for which the coverage data and test selection will draw from.
//! @param executionFailurePolicy Determines how to handle test targets that fail to execute.
//! @param executionFailureDraftingPolicy Determines how test targets that previously failed to execute are drafted into subsequent test sequences.
@ -94,6 +95,7 @@ namespace TestImpact
//! @param testShardingPolicy Determines how to handle test targets that have opted in to test sharding.
Runtime(
RuntimeConfig&& config,
AZStd::optional<RepoPath> dataFile,
SuiteType suiteFilter,
Policy::ExecutionFailure executionFailurePolicy,
Policy::FailedTestCoverage failedTestCoveragePolicy,
@ -112,11 +114,11 @@ namespace TestImpact
//! @param testSequenceCompleteCallback The client function to be called after the test sequence has completed.
//! @param testRunCompleteCallback The client function to be called after an individual test run has completed.
//! @returns The test run and sequence report for the selected test sequence.
Client::SequenceReport RegularTestSequence(
Client::RegularSequenceReport RegularTestSequence(
AZStd::optional<AZStd::chrono::milliseconds> testTargetTimeout,
AZStd::optional<AZStd::chrono::milliseconds> globalTimeout,
AZStd::optional<TestSequenceStartCallback> testSequenceStartCallback,
AZStd::optional<TestSequenceCompleteCallback<Client::SequenceReport>> testSequenceCompleteCallback,
AZStd::optional<TestSequenceCompleteCallback<Client::RegularSequenceReport>> testSequenceCompleteCallback,
AZStd::optional<TestRunCompleteCallback> testRunCompleteCallback);
//! Runs a test sequence where tests are selected according to test impact analysis so long as they are not on the excluded list.
@ -164,11 +166,11 @@ namespace TestImpact
//! @param testSequenceCompleteCallback The client function to be called after the test sequence has completed.
//! @param testRunCompleteCallback The client function to be called after an individual test run has completed.
//! @returns The test run and sequence report for the selected test sequence.
Client::SequenceReport SeededTestSequence(
Client::SeedSequenceReport SeededTestSequence(
AZStd::optional<AZStd::chrono::milliseconds> testTargetTimeout,
AZStd::optional<AZStd::chrono::milliseconds> globalTimeout,
AZStd::optional<TestSequenceStartCallback> testSequenceStartCallback,
AZStd::optional<TestSequenceCompleteCallback<Client::SequenceReport>> testSequenceCompleteCallback,
AZStd::optional<TestSequenceCompleteCallback<Client::SeedSequenceReport>> testSequenceCompleteCallback,
AZStd::optional<TestRunCompleteCallback> testRunCompleteCallback);
//! Returns true if the runtime has test impact analysis data (either preexisting or generated).
@ -184,7 +186,7 @@ namespace TestImpact
//! @param changeList The change list for which the covering tests and enumeration cache updates will be generated for.
//! @param testPrioritizationPolicy The test prioritization strategy to use for the selected test targets.
//! @returns The pair of selected test targets and discarded test targets.
AZStd::pair<AZStd::vector<const TestTarget*>, AZStd::vector<const TestTarget*>> SelectCoveringTestTargetsAndUpdateEnumerationCache(
AZStd::pair<AZStd::vector<const TestTarget*>, AZStd::vector<const TestTarget*>> SelectCoveringTestTargets(
const ChangeList& changeList,
Policy::TestPrioritization testPrioritizationPolicy);
@ -204,8 +206,22 @@ namespace TestImpact
//! Updates the dynamic dependency map and serializes the entire map to disk.
void UpdateAndSerializeDynamicDependencyMap(const AZStd::vector<TestEngineInstrumentedRun>& jobs);
//! Generates a base policy state for the current runtime policy runtime configuration.
PolicyStateBase GeneratePolicyStateBase() const;
//! Generates a regular/seed sequence policy state for the current runtime policy runtime configuration.
SequencePolicyState GenerateSequencePolicyState() const;
//! Generates a safe impact analysis sequence policy state for the current runtime policy runtime configuration.
SafeImpactAnalysisSequencePolicyState GenerateSafeImpactAnalysisSequencePolicyState(
Policy::TestPrioritization testPrioritizationPolicy) const;
//! Generates an impact analysis sequence policy state for the current runtime policy runtime configuration.
ImpactAnalysisSequencePolicyState GenerateImpactAnalysisSequencePolicyState(
Policy::TestPrioritization testPrioritizationPolicy, Policy::DynamicDependencyMap dynamicDependencyMapPolicy) const;
RuntimeConfig m_config;
RepoPath m_sparTIAFile;
RepoPath m_sparTiaFile;
SuiteType m_suiteFilter;
Policy::ExecutionFailure m_executionFailurePolicy;
Policy::FailedTestCoverage m_failedTestCoveragePolicy;

@ -0,0 +1,22 @@
/*
* Copyright (c) Contributors to the Open 3D Engine Project.
* For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#pragma once
#include <TestImpactFramework/TestImpactException.h>
namespace TestImpact
{
//! Exception for sequence report operations.
class SequenceReportException
: public Exception
{
public:
using Exception::Exception;
};
} // namespace TestImpact

@ -9,76 +9,12 @@
#pragma once
#include <TestImpactFramework/TestImpactRuntimeException.h>
#include <TestImpactFramework/TestImpactPolicy.h>
#include <AzCore/std/containers/array.h>
namespace TestImpact
{
namespace Policy
{
//! Policy for handling of test targets that fail to execute (e.g. due to the binary not being found).
//! @note Test targets that fail to execute will be tagged such that their execution can be attempted at a later date. This is
//! important as otherwise it would be erroneously assumed that they cover no sources due to having no entries in the dynamic
//! dependency map.
enum class ExecutionFailure
{
Abort, //!< Abort the test sequence and report a failure.
Continue, //!< Continue the test sequence but treat the execution failures as test failures after the run.
Ignore //!< Continue the test sequence and ignore the execution failures.
};
//! Policy for handling the coverage data of failed tests targets (both test that failed to execute and tests that ran but failed).
enum class FailedTestCoverage
{
Discard, //!< Discard the coverage data produced by the failing tests, causing them to be drafted into future test runs.
Keep //!< Keep any existing coverage data and update the coverage data for failed test targetss that produce coverage.
};
//! Policy for prioritizing selected tests.
enum class TestPrioritization
{
None, //!< Do not attempt any test prioritization.
DependencyLocality //!< Prioritize test targets according to the locality of the production targets they cover in the build dependency graph.
};
//! Policy for handling test targets that report failing tests.
enum class TestFailure
{
Abort, //!< Abort the test sequence and report the test failure.
Continue //!< Continue the test sequence and report the test failures after the run.
};
//! Policy for handling integrity failures of the dynamic dependency map and the source to target mappings.
enum class IntegrityFailure
{
Abort, //!< Abort the test sequence and report the test failure.
Continue //!< Continue the test sequence and report the test failures after the run.
};
//! Policy for updating the dynamic dependency map with the coverage data of produced by test sequences.
enum class DynamicDependencyMap
{
Discard, //!< Discard the coverage data produced by test sequences.
Update //!< Update the dynamic dependency map with the coverage data produced by test sequences.
};
//! Policy for sharding test targets that have been marked for test sharding.
enum class TestSharding
{
Never, //!< Do not shard any test targets.
Always //!< Shard all test targets that have been marked for test sharding.
};
//! Standard output capture of test target runs.
enum class TargetOutputCapture
{
None, //!< Do not capture any output.
StdOut, //!< Send captured output to standard output
File, //!< Write captured output to file.
StdOutAndFile //!< Send captured output to standard output and write to file.
};
}
//! Configuration for test targets that opt in to test sharding.
enum class ShardConfiguration
{
@ -97,22 +33,6 @@ namespace TestImpact
Sandbox
};
//! User-friendly names for the test suite types.
inline AZStd::string GetSuiteTypeName(SuiteType suiteType)
{
switch (suiteType)
{
case SuiteType::Main:
return "main";
case SuiteType::Periodic:
return "periodic";
case SuiteType::Sandbox:
return "sandbox";
default:
throw(RuntimeException("Unexpected suite type"));
}
}
//! Result of a test sequence that was run.
enum class TestSequenceResult
{
@ -120,4 +40,36 @@ namespace TestImpact
Failure, //!< One or more tests failed and/or timed out and/or failed to launch and/or an integrity failure was encountered.
Timeout //!< The global timeout for the sequence was exceeded.
};
//! Base representation of runtime policies.
struct PolicyStateBase
{
Policy::ExecutionFailure m_executionFailurePolicy = Policy::ExecutionFailure::Continue;
Policy::FailedTestCoverage m_failedTestCoveragePolicy = Policy::FailedTestCoverage::Keep;
Policy::TestFailure m_testFailurePolicy = Policy::TestFailure::Abort;
Policy::IntegrityFailure m_integrityFailurePolicy = Policy::IntegrityFailure::Abort;
Policy::TestSharding m_testShardingPolicy = Policy::TestSharding::Never;
Policy::TargetOutputCapture m_targetOutputCapture = Policy::TargetOutputCapture::None;
};
//! Representation of regular and seed sequence policies.
struct SequencePolicyState
{
PolicyStateBase m_basePolicies;
};
//! Representation of impact analysis sequence policies.
struct ImpactAnalysisSequencePolicyState
{
PolicyStateBase m_basePolicies;
Policy::TestPrioritization m_testPrioritizationPolicy = Policy::TestPrioritization::None;
Policy::DynamicDependencyMap m_dynamicDependencyMap = Policy::DynamicDependencyMap::Update;
};
//! Representation of safe impact analysis sequence policies.
struct SafeImpactAnalysisSequencePolicyState
{
PolicyStateBase m_basePolicies;
Policy::TestPrioritization m_testPrioritizationPolicy = Policy::TestPrioritization::None;
};
} // namespace TestImpact

@ -6,12 +6,12 @@
*
*/
#include <TestImpactFramework/TestImpactException.h>
#include <TestImpactFramework/TestImpactRuntime.h>
#include <TestImpactFramework/TestImpactRepoPath.h>
#include <TestImpactFramework/TestImpactTestSequence.h>
#include <TestImpactFramework/TestImpactClientSequenceReport.h>
#include <AzCore/IO/SystemFile.h>
#include <AzCore/std/containers/vector.h>
#include <AzCore/std/string/string.h>
#pragma once
@ -59,23 +59,48 @@ namespace TestImpact
//! Delete the files that match the pattern from the specified directory.
//! @param path The path to the directory to pattern match the files for deletion.
//! @param pattern The pattern to match files for deletion.
inline void DeleteFiles(const RepoPath& path, const AZStd::string& pattern)
{
AZ::IO::SystemFile::FindFiles(AZStd::string::format("%s/%s", path.c_str(), pattern.c_str()).c_str(),
[&path](const char* file, bool isFile)
{
if (isFile)
{
AZ::IO::SystemFile::Delete(AZStd::string::format("%s/%s", path.c_str(), file).c_str());
}
return true;
});
}
//! @return The number of files that were deleted.
size_t DeleteFiles(const RepoPath& path, const AZStd::string& pattern);
//! Deletes the specified file.
inline void DeleteFile(const RepoPath& file)
{
DeleteFiles(file.ParentPath(), file.Filename().Native());
}
void DeleteFile(const RepoPath& file);
//! User-friendly names for the test suite types.
AZStd::string SuiteTypeAsString(SuiteType suiteType);
//! User-friendly names for the sequence report types.
AZStd::string SequenceReportTypeAsString(Client::SequenceReportType type);
//! User-friendly names for the sequence result types.
AZStd::string TestSequenceResultAsString(TestSequenceResult result);
//! User-friendly names for the test run result types.
AZStd::string TestRunResultAsString(Client::TestRunResult result);
//! User-friendly names for the execution failure policy types.
AZStd::string ExecutionFailurePolicyAsString(Policy::ExecutionFailure executionFailurePolicy);
//! User-friendly names for the failed test coverage policy types.
AZStd::string FailedTestCoveragePolicyAsString(Policy::FailedTestCoverage failedTestCoveragePolicy);
//! User-friendly names for the test prioritization policy types.
AZStd::string TestPrioritizationPolicyAsString(Policy::TestPrioritization testPrioritizationPolicy);
//! User-friendly names for the test failure policy types.
AZStd::string TestFailurePolicyAsString(Policy::TestFailure testFailurePolicy);
//! User-friendly names for the integrity failure policy types.
AZStd::string IntegrityFailurePolicyAsString(Policy::IntegrityFailure integrityFailurePolicy);
//! User-friendly names for the dynamic dependency map policy types.
AZStd::string DynamicDependencyMapPolicyAsString(Policy::DynamicDependencyMap dynamicDependencyMapPolicy);
//! User-friendly names for the test sharding policy types.
AZStd::string TestShardingPolicyAsString(Policy::TestSharding testShardingPolicy);
//! User-friendly names for the target output capture policy types.
AZStd::string TargetOutputCapturePolicyAsString(Policy::TargetOutputCapture targetOutputCapturePolicy);
//! User-friendly names for the client test result types.
AZStd::string ClientTestResultAsString(Client::TestResult result);
} // namespace TestImpact

@ -6,6 +6,8 @@
*
*/
#include <TestImpactFramework/TestImpactUtils.h>
#include <Artifact/Factory/TestImpactTestTargetMetaMapFactory.h>
#include <Artifact/TestImpactArtifactException.h>
@ -67,7 +69,7 @@ namespace TestImpact
{
// Check to see if this test target has the suite we're looking for
if (const auto suiteName = suite[Keys[SuiteKey]].GetString();
strcmp(GetSuiteTypeName(suiteType).c_str(), suiteName) == 0)
strcmp(SuiteTypeAsString(suiteType).c_str(), suiteName) == 0)
{
testMeta.m_suite = suiteName;
testMeta.m_customArgs = suite[Keys[CommandKey]].GetString();

@ -156,8 +156,6 @@ namespace TestImpact
{
coveringTestTargetIt->second.erase(source);
}
}
// 2.

@ -9,7 +9,7 @@
#pragma once
#include <TestImpactFramework/TestImpactChangeList.h>
#include <TestImpactFramework/TestImpactTestSequence.h>
#include <TestImpactFramework/TestImpactPolicy.h>
#include <Artifact/Static/TestImpactProductionTargetDescriptor.h>
#include <Artifact/Static/TestImpactTestTargetDescriptor.h>

@ -8,7 +8,7 @@
#pragma once
#include <TestImpactFramework/TestImpactTestSequence.h>
#include <TestImpactFramework/TestImpactPolicy.h>
#include <Artifact/Static/TestImpactDependencyGraphData.h>
#include <Dependency/TestImpactChangeDependencyList.h>

@ -6,7 +6,7 @@
*
*/
#include <TestImpactFramework/TestImpactFileUtils.h>
#include <TestImpactFramework/TestImpactUtils.h>
#include <Artifact/Factory/TestImpactTestEnumerationSuiteFactory.h>
#include <TestEngine/TestImpactTestEngineException.h>

@ -6,7 +6,7 @@
*
*/
#include <TestImpactFramework/TestImpactFileUtils.h>
#include <TestImpactFramework/TestImpactUtils.h>
#include <Artifact/Factory/TestImpactModuleCoverageFactory.h>
#include <Artifact/Factory/TestImpactTestRunSuiteFactory.h>

@ -6,7 +6,7 @@
*
*/
#include <TestImpactFramework/TestImpactFileUtils.h>
#include <TestImpactFramework/TestImpactUtils.h>
#include <Artifact/Factory/TestImpactTestRunSuiteFactory.h>
#include <TestEngine/TestImpactTestEngineException.h>

@ -6,7 +6,7 @@
*
*/
#include <TestImpactFramework/TestImpactFileUtils.h>
#include <TestImpactFramework/TestImpactUtils.h>
#include <Target/TestImpactTestTarget.h>
#include <TestEngine/TestImpactTestEngineException.h>

@ -1,6 +1,7 @@
/*
* Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* Copyright (c) Contributors to the Open 3D Engine Project.
* For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
@ -11,7 +12,6 @@ namespace TestImpact
{
namespace Client
{
//! Calculates the final sequence result for a composite of multiple sequences.
TestSequenceResult CalculateMultiTestSequenceResult(const AZStd::vector<TestSequenceResult>& results)
{
// Order of precedence:
@ -19,14 +19,12 @@ namespace TestImpact
// 2. TestSequenceResult::Timeout
// 3. TestSequenceResult::Success
if (const auto it = AZStd::find(results.begin(), results.end(), TestSequenceResult::Failure);
it != results.end())
if (const auto it = AZStd::find(results.begin(), results.end(), TestSequenceResult::Failure); it != results.end())
{
return TestSequenceResult::Failure;
}
if (const auto it = AZStd::find(results.begin(), results.end(), TestSequenceResult::Timeout);
it != results.end())
if (const auto it = AZStd::find(results.begin(), results.end(), TestSequenceResult::Timeout); it != results.end())
{
return TestSequenceResult::Timeout;
}
@ -38,20 +36,32 @@ namespace TestImpact
TestSequenceResult result,
AZStd::chrono::high_resolution_clock::time_point startTime,
AZStd::chrono::milliseconds duration,
AZStd::vector<TestRun>&& passingTests,
AZStd::vector<TestRunWithTestFailures>&& failingTests,
AZStd::vector<TestRun>&& executionFailureTests,
AZStd::vector<TestRun>&& timedOutTests,
AZStd::vector<TestRun>&& unexecutedTests)
AZStd::vector<PassingTestRun>&& passingTestRuns,
AZStd::vector<FailingTestRun>&& failingTestRuns,
AZStd::vector<TestRunWithExecutionFailure>&& executionFailureTestRuns,
AZStd::vector<TimedOutTestRun>&& timedOutTestRuns,
AZStd::vector<UnexecutedTestRun>&& unexecutedTestRuns)
: m_startTime(startTime)
, m_result(result)
, m_duration(duration)
, m_passingTests(AZStd::move(passingTests))
, m_failingTests(AZStd::move(failingTests))
, m_executionFailureTests(AZStd::move(executionFailureTests))
, m_timedOutTests(AZStd::move(timedOutTests))
, m_unexecutedTests(AZStd::move(unexecutedTests))
, m_passingTestRuns(AZStd::move(passingTestRuns))
, m_failingTestRuns(AZStd::move(failingTestRuns))
, m_executionFailureTestRuns(AZStd::move(executionFailureTestRuns))
, m_timedOutTestRuns(AZStd::move(timedOutTestRuns))
, m_unexecutedTestRuns(AZStd::move(unexecutedTestRuns))
{
for (const auto& failingTestRun : m_failingTestRuns)
{
m_totalNumPassingTests += failingTestRun.GetTotalNumPassingTests();
m_totalNumFailingTests += failingTestRun.GetTotalNumFailingTests();
m_totalNumDisabledTests += failingTestRun.GetTotalNumDisabledTests();
}
for (const auto& passingTestRun : m_passingTestRuns)
{
m_totalNumPassingTests += passingTestRun.GetTotalNumPassingTests();
m_totalNumDisabledTests += passingTestRun.GetTotalNumDisabledTests();
}
}
TestSequenceResult TestRunReport::GetResult() const
@ -74,234 +84,238 @@ namespace TestImpact
return m_duration;
}
size_t TestRunReport::GetNumPassingTests() const
{
return m_passingTests.size();
}
size_t TestRunReport::GetNumFailingTests() const
{
return m_failingTests.size();
}
size_t TestRunReport::GetNumTimedOutTests() const
{
return m_timedOutTests.size();
}
size_t TestRunReport::GetNumUnexecutedTests() const
size_t TestRunReport::GetTotalNumTestRuns() const
{
return m_unexecutedTests.size();
return
GetNumPassingTestRuns() +
GetNumFailingTestRuns() +
GetNumExecutionFailureTestRuns() +
GetNumTimedOutTestRuns() +
GetNumUnexecutedTestRuns();
}
const AZStd::vector<TestRun>& TestRunReport::GetPassingTests() const
size_t TestRunReport::GetNumPassingTestRuns() const
{
return m_passingTests;
return m_passingTestRuns.size();
}
const AZStd::vector<TestRunWithTestFailures>& TestRunReport::GetFailingTests() const
size_t TestRunReport::GetNumFailingTestRuns() const
{
return m_failingTests;
return m_failingTestRuns.size();
}
const AZStd::vector<TestRun>& TestRunReport::GetExecutionFailureTests() const
size_t TestRunReport::GetNumExecutionFailureTestRuns() const
{
return m_executionFailureTests;
return m_executionFailureTestRuns.size();
}
const AZStd::vector<TestRun>& TestRunReport::GetTimedOutTests() const
size_t TestRunReport::TestRunReport::GetNumTimedOutTestRuns() const
{
return m_timedOutTests;
return m_timedOutTestRuns.size();
}
const AZStd::vector<TestRun>& TestRunReport::GetUnexecutedTests() const
size_t TestRunReport::GetNumUnexecutedTestRuns() const
{
return m_unexecutedTests;
return m_unexecutedTestRuns.size();
}
SequenceReport::SequenceReport(SuiteType suiteType, const TestRunSelection& selectedTests, TestRunReport&& selectedTestRunReport)
: m_suite(suiteType)
, m_selectedTests(selectedTests)
, m_selectedTestRunReport(AZStd::move(selectedTestRunReport))
const AZStd::vector<PassingTestRun>& TestRunReport::GetPassingTestRuns() const
{
return m_passingTestRuns;
}
TestSequenceResult SequenceReport::GetResult() const
const AZStd::vector<FailingTestRun>& TestRunReport::GetFailingTestRuns() const
{
return m_selectedTestRunReport.GetResult();
return m_failingTestRuns;
}
AZStd::chrono::high_resolution_clock::time_point SequenceReport::GetStartTime() const
const AZStd::vector<TestRunWithExecutionFailure>& TestRunReport::GetExecutionFailureTestRuns() const
{
return m_selectedTestRunReport.GetStartTime();
return m_executionFailureTestRuns;
}
AZStd::chrono::high_resolution_clock::time_point SequenceReport::GetEndTime() const
const AZStd::vector<TimedOutTestRun>& TestRunReport::GetTimedOutTestRuns() const
{
return GetStartTime() + GetDuration();
return m_timedOutTestRuns;
}
AZStd::chrono::milliseconds SequenceReport::GetDuration() const
const AZStd::vector<UnexecutedTestRun>& TestRunReport::GetUnexecutedTestRuns() const
{
return m_selectedTestRunReport.GetDuration();
return m_unexecutedTestRuns;
}
TestRunSelection SequenceReport::GetSelectedTests() const
size_t TestRunReport::GetTotalNumPassingTests() const
{
return m_selectedTests;
return m_totalNumPassingTests;
}
TestRunReport SequenceReport::GetSelectedTestRunReport() const
size_t TestRunReport::GetTotalNumFailingTests() const
{
return m_selectedTestRunReport;
return m_totalNumFailingTests;
}
size_t SequenceReport::GetTotalNumPassingTests() const
size_t TestRunReport::GetTotalNumDisabledTests() const
{
return m_selectedTestRunReport.GetNumPassingTests();
return m_totalNumDisabledTests;
}
size_t SequenceReport::GetTotalNumFailingTests() const
{
return m_selectedTestRunReport.GetNumFailingTests();
}
size_t SequenceReport::GetTotalNumTimedOutTests() const
RegularSequenceReport::RegularSequenceReport(
size_t maxConcurrency,
const AZStd::optional<AZStd::chrono::milliseconds>& testTargetTimeout,
const AZStd::optional<AZStd::chrono::milliseconds>& globalTimeout,
const SequencePolicyState& policyState,
SuiteType suiteType,
const TestRunSelection& selectedTestRuns,
TestRunReport&& selectedTestRunReport)
: SequenceReportBase(
SequenceReportType::RegularSequence,
maxConcurrency,
testTargetTimeout,
globalTimeout,
policyState,
suiteType,
selectedTestRuns,
AZStd::move(selectedTestRunReport))
{
return m_selectedTestRunReport.GetNumTimedOutTests();
}
size_t SequenceReport::GetTotalNumUnexecutedTests() const
SeedSequenceReport::SeedSequenceReport(
size_t maxConcurrency,
const AZStd::optional<AZStd::chrono::milliseconds>& testTargetTimeout,
const AZStd::optional<AZStd::chrono::milliseconds>& globalTimeout,
const SequencePolicyState& policyState,
SuiteType suiteType,
const TestRunSelection& selectedTestRuns,
TestRunReport&& selectedTestRunReport)
: SequenceReportBase(
SequenceReportType::SeedSequence,
maxConcurrency,
testTargetTimeout,
globalTimeout,
policyState,
suiteType,
selectedTestRuns,
AZStd::move(selectedTestRunReport))
{
return m_selectedTestRunReport.GetNumUnexecutedTests();
}
DraftingSequenceReport::DraftingSequenceReport(
ImpactAnalysisSequenceReport::ImpactAnalysisSequenceReport(
size_t maxConcurrency,
const AZStd::optional<AZStd::chrono::milliseconds>& testTargetTimeout,
const AZStd::optional<AZStd::chrono::milliseconds>& globalTimeout,
const ImpactAnalysisSequencePolicyState& policyState,
SuiteType suiteType,
const TestRunSelection& selectedTests,
const AZStd::vector<AZStd::string>& draftedTests,
const TestRunSelection& selectedTestRuns,
const AZStd::vector<AZStd::string>& discardedTestRuns,
const AZStd::vector<AZStd::string>& draftedTestRuns,
TestRunReport&& selectedTestRunReport,
TestRunReport&& draftedTestRunReport)
: SequenceReport(suiteType, selectedTests, AZStd::move(selectedTestRunReport))
, m_draftedTests(draftedTests)
, m_draftedTestRunReport(AZStd::move(draftedTestRunReport))
{
}
TestSequenceResult DraftingSequenceReport::GetResult() const
{
return CalculateMultiTestSequenceResult({SequenceReport::GetResult(), m_draftedTestRunReport.GetResult()});
}
AZStd::chrono::milliseconds DraftingSequenceReport::GetDuration() const
{
return SequenceReport::GetDuration() + m_draftedTestRunReport.GetDuration();
}
size_t DraftingSequenceReport::GetTotalNumPassingTests() const
: DraftingSequenceReportBase(
SequenceReportType::ImpactAnalysisSequence,
maxConcurrency,
testTargetTimeout,
globalTimeout,
policyState,
suiteType,
selectedTestRuns,
draftedTestRuns,
AZStd::move(selectedTestRunReport),
AZStd::move(draftedTestRunReport))
, m_discardedTestRuns(discardedTestRuns)
{
return SequenceReport::GetTotalNumPassingTests() + m_draftedTestRunReport.GetNumPassingTests();
}
size_t DraftingSequenceReport::GetTotalNumFailingTests() const
const AZStd::vector<AZStd::string>& ImpactAnalysisSequenceReport::GetDiscardedTestRuns() const
{
return SequenceReport::GetTotalNumFailingTests() + m_draftedTestRunReport.GetNumFailingTests();
return m_discardedTestRuns;
}
size_t DraftingSequenceReport::GetTotalNumTimedOutTests() const
SafeImpactAnalysisSequenceReport::SafeImpactAnalysisSequenceReport(
size_t maxConcurrency,
const AZStd::optional<AZStd::chrono::milliseconds>& testTargetTimeout,
const AZStd::optional<AZStd::chrono::milliseconds>& globalTimeout,
const SafeImpactAnalysisSequencePolicyState& policyState,
SuiteType suiteType,
const TestRunSelection& selectedTestRuns,
const TestRunSelection& discardedTestRuns,
const AZStd::vector<AZStd::string>& draftedTestRuns,
TestRunReport&& selectedTestRunReport,
TestRunReport&& discardedTestRunReport,
TestRunReport&& draftedTestRunReport)
: DraftingSequenceReportBase(
SequenceReportType::SafeImpactAnalysisSequence,
maxConcurrency,
testTargetTimeout,
globalTimeout,
policyState,
suiteType,
selectedTestRuns,
draftedTestRuns,
AZStd::move(selectedTestRunReport),
AZStd::move(draftedTestRunReport))
, m_discardedTestRuns(discardedTestRuns)
, m_discardedTestRunReport(AZStd::move(discardedTestRunReport))
{
return SequenceReport::GetTotalNumTimedOutTests() + m_draftedTestRunReport.GetNumTimedOutTests();
}
size_t DraftingSequenceReport::GetTotalNumUnexecutedTests() const
TestSequenceResult SafeImpactAnalysisSequenceReport::GetResult() const
{
return SequenceReport::GetTotalNumUnexecutedTests() + m_draftedTestRunReport.GetNumUnexecutedTests();
return CalculateMultiTestSequenceResult({ DraftingSequenceReportBase::GetResult(), m_discardedTestRunReport.GetResult() });
}
const AZStd::vector<AZStd::string>& DraftingSequenceReport::GetDraftedTests() const
AZStd::chrono::milliseconds SafeImpactAnalysisSequenceReport::GetDuration() const
{
return m_draftedTests;
return DraftingSequenceReportBase::GetDuration() + m_discardedTestRunReport.GetDuration();
}
TestRunReport DraftingSequenceReport::GetDraftedTestRunReport() const
size_t SafeImpactAnalysisSequenceReport::GetTotalNumTestRuns() const
{
return m_draftedTestRunReport;
return DraftingSequenceReportBase::GetTotalNumTestRuns() + m_discardedTestRunReport.GetTotalNumTestRuns();
}
ImpactAnalysisSequenceReport::ImpactAnalysisSequenceReport(
SuiteType suiteType,
const TestRunSelection& selectedTests,
const AZStd::vector<AZStd::string>& discardedTests,
const AZStd::vector<AZStd::string>& draftedTests,
TestRunReport&& selectedTestRunReport,
TestRunReport&& draftedTestRunReport)
: DraftingSequenceReport(
suiteType,
selectedTests,
draftedTests,
AZStd::move(selectedTestRunReport),
AZStd::move(draftedTestRunReport))
, m_discardedTests(discardedTests)
{
}
const AZStd::vector<AZStd::string>& ImpactAnalysisSequenceReport::GetDiscardedTests() const
size_t SafeImpactAnalysisSequenceReport::GetTotalNumPassingTests() const
{
return m_discardedTests;
return DraftingSequenceReportBase::GetTotalNumPassingTests() + m_discardedTestRunReport.GetTotalNumPassingTests();
}
SafeImpactAnalysisSequenceReport::SafeImpactAnalysisSequenceReport(
SuiteType suiteType,
const TestRunSelection& selectedTests,
const TestRunSelection& discardedTests,
const AZStd::vector<AZStd::string>& draftedTests,
TestRunReport&& selectedTestRunReport,
TestRunReport&& discardedTestRunReport,
TestRunReport&& draftedTestRunReport)
: DraftingSequenceReport(
suiteType,
selectedTests,
draftedTests,
AZStd::move(selectedTestRunReport),
AZStd::move(draftedTestRunReport))
, m_discardedTests(discardedTests)
, m_discardedTestRunReport(AZStd::move(discardedTestRunReport))
size_t SafeImpactAnalysisSequenceReport::GetTotalNumFailingTests() const
{
return DraftingSequenceReportBase::GetTotalNumFailingTests() + m_discardedTestRunReport.GetTotalNumFailingTests();
}
TestSequenceResult SafeImpactAnalysisSequenceReport::GetResult() const
size_t SafeImpactAnalysisSequenceReport::GetTotalNumDisabledTests() const
{
return CalculateMultiTestSequenceResult({ DraftingSequenceReport::GetResult(), m_discardedTestRunReport.GetResult() });
return DraftingSequenceReportBase::GetTotalNumDisabledTests() + m_discardedTestRunReport.GetTotalNumDisabledTests();
}
AZStd::chrono::milliseconds SafeImpactAnalysisSequenceReport::GetDuration() const
size_t SafeImpactAnalysisSequenceReport::GetTotalNumPassingTestRuns() const
{
return DraftingSequenceReport::GetDuration() + m_discardedTestRunReport.GetDuration();
return DraftingSequenceReportBase::GetTotalNumPassingTestRuns() + m_discardedTestRunReport.GetNumPassingTestRuns();
}
size_t SafeImpactAnalysisSequenceReport::GetTotalNumPassingTests() const
size_t SafeImpactAnalysisSequenceReport::GetTotalNumFailingTestRuns() const
{
return DraftingSequenceReport::GetTotalNumPassingTests() + m_discardedTestRunReport.GetNumPassingTests();
return DraftingSequenceReportBase::GetTotalNumFailingTestRuns() + m_discardedTestRunReport.GetNumFailingTestRuns();
}
size_t SafeImpactAnalysisSequenceReport::GetTotalNumFailingTests() const
size_t SafeImpactAnalysisSequenceReport::GetTotalNumExecutionFailureTestRuns() const
{
return DraftingSequenceReport::GetTotalNumFailingTests() + m_discardedTestRunReport.GetNumFailingTests();
return DraftingSequenceReportBase::GetTotalNumExecutionFailureTestRuns() + m_discardedTestRunReport.GetNumExecutionFailureTestRuns();
}
size_t SafeImpactAnalysisSequenceReport::GetTotalNumTimedOutTests() const
size_t SafeImpactAnalysisSequenceReport::GetTotalNumTimedOutTestRuns() const
{
return DraftingSequenceReport::GetTotalNumTimedOutTests() + m_discardedTestRunReport.GetNumTimedOutTests();
return DraftingSequenceReportBase::GetTotalNumTimedOutTestRuns() + m_discardedTestRunReport.GetNumTimedOutTestRuns();
}
size_t SafeImpactAnalysisSequenceReport::GetTotalNumUnexecutedTests() const
size_t SafeImpactAnalysisSequenceReport::GetTotalNumUnexecutedTestRuns() const
{
return DraftingSequenceReport::GetTotalNumUnexecutedTests() + m_discardedTestRunReport.GetNumUnexecutedTests();
return DraftingSequenceReportBase::GetTotalNumUnexecutedTestRuns() + m_discardedTestRunReport.GetNumUnexecutedTestRuns();
}
const TestRunSelection SafeImpactAnalysisSequenceReport::GetDiscardedTests() const
const TestRunSelection SafeImpactAnalysisSequenceReport::GetDiscardedTestRuns() const
{
return m_discardedTests;
return m_discardedTestRuns;
}
TestRunReport SafeImpactAnalysisSequenceReport::GetDiscardedTestRunReport() const

@ -0,0 +1,606 @@
/*
* Copyright (c) Contributors to the Open 3D Engine Project.
* For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#include <TestImpactFramework/TestImpactClientSequenceReportSerializer.h>
#include <TestImpactFramework/TestImpactSequenceReportException.h>
#include <TestImpactFramework/TestImpactUtils.h>
#include <AzCore/JSON/document.h>
#include <AzCore/JSON/prettywriter.h>
#include <AzCore/JSON/rapidjson.h>
#include <AzCore/JSON/stringbuffer.h>
namespace TestImpact
{
namespace
{
namespace SequenceReportFields
{
// Keys for pertinent JSON node and attribute names
constexpr const char* Keys[] =
{
"name",
"command_args",
"start_time",
"end_time",
"duration",
"result",
"num_passing_tests",
"num_failing_tests",
"num_disabled_tests",
"tests",
"num_passing_test_runs",
"num_failing_test_runs",
"num_execution_failure_test_runs",
"num_timed_out_test_runs",
"num_unexecuted_test_runs",
"passing_test_runs",
"failing_test_runs",
"execution_failure_test_runs",
"timed_out_test_runs",
"unexecuted_test_runs",
"total_num_passing_tests",
"total_num_failing_tests",
"total_num_disabled_tests",
"total_num_test_runs",
"num_included_test_runs",
"num_excluded_test_runs",
"included_test_runs",
"excluded_test_runs",
"execution_failure",
"coverage_failure",
"test_failure",
"integrity_failure",
"test_sharding",
"target_output_capture",
"test_prioritization",
"dynamic_dependency_map",
"type",
"test_target_timeout",
"global_timeout",
"max_concurrency",
"policy",
"suite",
"selected_test_runs",
"selected_test_run_report",
"total_num_passing_test_runs",
"total_num_failing_test_runs",
"total_num_execution_failure_test_runs",
"total_num_timed_out_test_runs",
"total_num_unexecuted_test_runs",
"drafted_test_runs",
"drafted_test_run_report",
"discarded_test_runs",
"discarded_test_run_report"
};
enum
{
Name,
CommandArgs,
StartTime,
EndTime,
Duration,
Result,
NumPassingTests,
NumFailingTests,
NumDisabledTests,
Tests,
NumPassingTestRuns,
NumFailingTestRuns,
NumExecutionFailureTestRuns,
NumTimedOutTestRuns,
NumUnexecutedTestRuns,
PassingTestRuns,
FailingTestRuns,
ExecutionFailureTestRuns,
TimedOutTestRuns,
UnexecutedTestRuns,
TotalNumPassingTests,
TotalNumFailingTests,
TotalNumDisabledTests,
TotalNumTestRuns,
NumIncludedTestRuns,
NumExcludedTestRuns,
IncludedTestRuns,
ExcludedTestRuns,
ExecutionFailure,
CoverageFailure,
TestFailure,
IntegrityFailure,
TestSharding,
TargetOutputCapture,
TestPrioritization,
DynamicDependencyMap,
Type,
TestTargetTimeout,
GlobalTimeout,
MaxConcurrency,
Policy,
Suite,
SelectedTestRuns,
SelectedTestRunReport,
TotalNumPassingTestRuns,
TotalNumFailingTestRuns,
TotalNumExecutionFailureTestRuns,
TotalNumTimedOutTestRuns,
TotalNumUnexecutedTestRuns,
DraftedTestRuns,
DraftedTestRunReport,
DiscardedTestRuns,
DiscardedTestRunReport
};
} // namespace SequenceReportFields
AZ::u64 TimePointInMsAsInt64(AZStd::chrono::high_resolution_clock::time_point timePoint)
{
return AZStd::chrono::duration_cast<AZStd::chrono::milliseconds>(timePoint.time_since_epoch()).count();
}
void SerializeTestRunMembers(const Client::TestRunBase& testRun, rapidjson::PrettyWriter<rapidjson::StringBuffer>& writer)
{
// Name
writer.Key(SequenceReportFields::Keys[SequenceReportFields::Name]);
writer.String(testRun.GetTargetName().c_str());
// Command string
writer.Key(SequenceReportFields::Keys[SequenceReportFields::CommandArgs]);
writer.String(testRun.GetCommandString().c_str());
// Start time
writer.Key(SequenceReportFields::Keys[SequenceReportFields::StartTime]);
writer.Int64(TimePointInMsAsInt64(testRun.GetStartTime()));
// End time
writer.Key(SequenceReportFields::Keys[SequenceReportFields::EndTime]);
writer.Int64(TimePointInMsAsInt64(testRun.GetEndTime()));
// Duration
writer.Key(SequenceReportFields::Keys[SequenceReportFields::Duration]);
writer.Uint64(testRun.GetDuration().count());
// Result
writer.Key(SequenceReportFields::Keys[SequenceReportFields::Result]);
writer.String(TestRunResultAsString(testRun.GetResult()).c_str());
}
void SerializeTestRun(const Client::TestRunBase& testRun, rapidjson::PrettyWriter<rapidjson::StringBuffer>& writer)
{
writer.StartObject();
{
SerializeTestRunMembers(testRun, writer);
}
writer.EndObject();
}
void SerializeCompletedTestRun(const Client::CompletedTestRun& testRun, rapidjson::PrettyWriter<rapidjson::StringBuffer>& writer)
{
writer.StartObject();
{
SerializeTestRunMembers(testRun, writer);
// Number of passing test cases
writer.Key(SequenceReportFields::Keys[SequenceReportFields::NumPassingTests]);
writer.Uint64(testRun.GetTotalNumPassingTests());
// Number of failing test cases
writer.Key(SequenceReportFields::Keys[SequenceReportFields::NumFailingTests]);
writer.Uint64(testRun.GetTotalNumFailingTests());
// Number of disabled test cases
writer.Key(SequenceReportFields::Keys[SequenceReportFields::NumDisabledTests]);
writer.Uint64(testRun.GetTotalNumDisabledTests());
// Tests
writer.Key(SequenceReportFields::Keys[SequenceReportFields::Tests]);
writer.StartArray();
for (const auto& test : testRun.GetTests())
{
// Test
writer.StartObject();
// Name
writer.Key(SequenceReportFields::Keys[SequenceReportFields::Name]);
writer.String(test.GetName().c_str());
// Result
writer.Key(SequenceReportFields::Keys[SequenceReportFields::Result]);
writer.String(ClientTestResultAsString(test.GetResult()).c_str());
writer.EndObject(); // Test
}
writer.EndArray(); // Tests
}
writer.EndObject();
}
void SerializeTestRunReport(
const Client::TestRunReport& testRunReport, rapidjson::PrettyWriter<rapidjson::StringBuffer>& writer)
{
writer.StartObject();
{
// Result
writer.Key(SequenceReportFields::Keys[SequenceReportFields::Result]);
writer.String(TestSequenceResultAsString(testRunReport.GetResult()).c_str());
// Start time
writer.Key(SequenceReportFields::Keys[SequenceReportFields::StartTime]);
writer.Int64(TimePointInMsAsInt64(testRunReport.GetStartTime()));
// End time
writer.Key(SequenceReportFields::Keys[SequenceReportFields::EndTime]);
writer.Int64(TimePointInMsAsInt64(testRunReport.GetEndTime()));
// Duration
writer.Key(SequenceReportFields::Keys[SequenceReportFields::Duration]);
writer.Uint64(testRunReport.GetDuration().count());
// Number of passing test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::NumPassingTestRuns]);
writer.Uint64(testRunReport.GetNumPassingTestRuns());
// Number of failing test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::NumFailingTestRuns]);
writer.Uint64(testRunReport.GetNumFailingTestRuns());
// Number of test runs that failed to execute
writer.Key(SequenceReportFields::Keys[SequenceReportFields::NumExecutionFailureTestRuns]);
writer.Uint64(testRunReport.GetNumExecutionFailureTestRuns());
// Number of timed out test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::NumTimedOutTestRuns]);
writer.Uint64(testRunReport.GetNumTimedOutTestRuns());
// Number of unexecuted test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::NumUnexecutedTestRuns]);
writer.Uint64(testRunReport.GetNumUnexecutedTestRuns());
// Passing test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::PassingTestRuns]);
writer.StartArray();
for (const auto& testRun : testRunReport.GetPassingTestRuns())
{
SerializeCompletedTestRun(testRun, writer);
}
writer.EndArray(); // Passing test runs
// Failing test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::FailingTestRuns]);
writer.StartArray();
for (const auto& testRun : testRunReport.GetFailingTestRuns())
{
SerializeCompletedTestRun(testRun, writer);
}
writer.EndArray(); // Failing test runs
// Execution failures
writer.Key(SequenceReportFields::Keys[SequenceReportFields::ExecutionFailureTestRuns]);
writer.StartArray();
for (const auto& testRun : testRunReport.GetExecutionFailureTestRuns())
{
SerializeTestRun(testRun, writer);
}
writer.EndArray(); // Execution failures
// Timed out test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TimedOutTestRuns]);
writer.StartArray();
for (const auto& testRun : testRunReport.GetTimedOutTestRuns())
{
SerializeTestRun(testRun, writer);
}
writer.EndArray(); // Timed out test runs
// Unexecuted test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::UnexecutedTestRuns]);
writer.StartArray();
for (const auto& testRun : testRunReport.GetUnexecutedTestRuns())
{
SerializeTestRun(testRun, writer);
}
writer.EndArray(); // Unexecuted test runs
// Number of passing tests
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TotalNumPassingTests]);
writer.Uint64(testRunReport.GetTotalNumPassingTests());
// Number of failing tests
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TotalNumFailingTests]);
writer.Uint64(testRunReport.GetTotalNumFailingTests());
// Number of disabled tests
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TotalNumDisabledTests]);
writer.Uint64(testRunReport.GetTotalNumDisabledTests());
}
writer.EndObject();
}
void SerializeTestSelection(
const Client::TestRunSelection& testSelection, rapidjson::PrettyWriter<rapidjson::StringBuffer>& writer)
{
writer.StartObject();
{
// Total number of test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TotalNumTestRuns]);
writer.Uint64(testSelection.GetTotalNumTests());
// Number of included test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::NumIncludedTestRuns]);
writer.Uint64(testSelection.GetNumIncludedTestRuns());
// Number of excluded test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::NumExcludedTestRuns]);
writer.Uint64(testSelection.GetNumExcludedTestRuns());
// Included test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::IncludedTestRuns]);
writer.StartArray();
for (const auto& testRun : testSelection.GetIncludededTestRuns())
{
writer.String(testRun.c_str());
}
writer.EndArray(); // Included test runs
// Excluded test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::ExcludedTestRuns]);
writer.StartArray();
for (const auto& testRun : testSelection.GetExcludedTestRuns())
{
writer.String(testRun.c_str());
}
writer.EndArray(); // Excluded test runs
}
writer.EndObject();
}
void SerializePolicyStateBaseMembers(const PolicyStateBase& policyState, rapidjson::PrettyWriter<rapidjson::StringBuffer>& writer)
{
// Execution failure
writer.Key(SequenceReportFields::Keys[SequenceReportFields::ExecutionFailure]);
writer.String(ExecutionFailurePolicyAsString(policyState.m_executionFailurePolicy).c_str());
// Failed test coverage
writer.Key(SequenceReportFields::Keys[SequenceReportFields::CoverageFailure]);
writer.String(FailedTestCoveragePolicyAsString(policyState.m_failedTestCoveragePolicy).c_str());
// Test failure
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TestFailure]);
writer.String(TestFailurePolicyAsString(policyState.m_testFailurePolicy).c_str());
// Integrity failure
writer.Key(SequenceReportFields::Keys[SequenceReportFields::IntegrityFailure]);
writer.String(IntegrityFailurePolicyAsString(policyState.m_integrityFailurePolicy).c_str());
// Test sharding
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TestSharding]);
writer.String(TestShardingPolicyAsString(policyState.m_testShardingPolicy).c_str());
// Target output capture
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TargetOutputCapture]);
writer.String(TargetOutputCapturePolicyAsString(policyState.m_targetOutputCapture).c_str());
}
void SerializePolicyStateMembers(
const SequencePolicyState& policyState, rapidjson::PrettyWriter<rapidjson::StringBuffer>& writer)
{
SerializePolicyStateBaseMembers(policyState.m_basePolicies, writer);
}
void SerializePolicyStateMembers(
const SafeImpactAnalysisSequencePolicyState& policyState, rapidjson::PrettyWriter<rapidjson::StringBuffer>& writer)
{
SerializePolicyStateBaseMembers(policyState.m_basePolicies, writer);
// Test prioritization
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TestPrioritization]);
writer.String(TestPrioritizationPolicyAsString(policyState.m_testPrioritizationPolicy).c_str());
}
void SerializePolicyStateMembers(
const ImpactAnalysisSequencePolicyState& policyState, rapidjson::PrettyWriter<rapidjson::StringBuffer>& writer)
{
SerializePolicyStateBaseMembers(policyState.m_basePolicies, writer);
// Test prioritization
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TestPrioritization]);
writer.String(TestPrioritizationPolicyAsString(policyState.m_testPrioritizationPolicy).c_str());
// Dynamic dependency map
writer.Key(SequenceReportFields::Keys[SequenceReportFields::DynamicDependencyMap]);
writer.String(DynamicDependencyMapPolicyAsString(policyState.m_dynamicDependencyMap).c_str());
}
template<typename PolicyStateType>
void SerializeSequenceReportBaseMembers(
const Client::SequenceReportBase<PolicyStateType>& sequenceReport, rapidjson::PrettyWriter<rapidjson::StringBuffer>& writer)
{
// Type
writer.Key(SequenceReportFields::Keys[SequenceReportFields::Type]);
writer.String(SequenceReportTypeAsString(sequenceReport.GetType()).c_str());
// Test target timeout
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TestTargetTimeout]);
writer.Uint64(sequenceReport.GetTestTargetTimeout().value_or(AZStd::chrono::milliseconds{0}).count());
// Global timeout
writer.Key(SequenceReportFields::Keys[SequenceReportFields::GlobalTimeout]);
writer.Uint64(sequenceReport.GetGlobalTimeout().value_or(AZStd::chrono::milliseconds{ 0 }).count());
// Maximum concurrency
writer.Key(SequenceReportFields::Keys[SequenceReportFields::MaxConcurrency]);
writer.Uint64(sequenceReport.GetMaxConcurrency());
// Policies
writer.Key(SequenceReportFields::Keys[SequenceReportFields::Policy]);
writer.StartObject();
{
SerializePolicyStateMembers(sequenceReport.GetPolicyState(), writer);
}
writer.EndObject(); // Policies
// Suite
writer.Key(SequenceReportFields::Keys[SequenceReportFields::Suite]);
writer.String(SuiteTypeAsString(sequenceReport.GetSuite()).c_str());
// Selected test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::SelectedTestRuns]);
SerializeTestSelection(sequenceReport.GetSelectedTestRuns(), writer);
// Selected test run report
writer.Key(SequenceReportFields::Keys[SequenceReportFields::SelectedTestRunReport]);
SerializeTestRunReport(sequenceReport.GetSelectedTestRunReport(), writer);
// Start time
writer.Key(SequenceReportFields::Keys[SequenceReportFields::StartTime]);
writer.Int64(TimePointInMsAsInt64(sequenceReport.GetStartTime()));
// End time
writer.Key(SequenceReportFields::Keys[SequenceReportFields::EndTime]);
writer.Int64(TimePointInMsAsInt64(sequenceReport.GetEndTime()));
// Duration
writer.Key(SequenceReportFields::Keys[SequenceReportFields::Duration]);
writer.Uint64(sequenceReport.GetDuration().count());
// Result
writer.Key(SequenceReportFields::Keys[SequenceReportFields::Result]);
writer.String(TestSequenceResultAsString(sequenceReport.GetResult()).c_str());
// Total number of test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TotalNumTestRuns]);
writer.Uint64(sequenceReport.GetTotalNumTestRuns());
// Total number of passing test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TotalNumPassingTestRuns]);
writer.Uint64(sequenceReport.GetTotalNumPassingTestRuns());
// Total number of failing test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TotalNumFailingTestRuns]);
writer.Uint64(sequenceReport.GetTotalNumFailingTestRuns());
// Total number of test runs that failed to execute
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TotalNumExecutionFailureTestRuns]);
writer.Uint64(sequenceReport.GetTotalNumExecutionFailureTestRuns());
// Total number of timed out test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TotalNumTimedOutTestRuns]);
writer.Uint64(sequenceReport.GetTotalNumTimedOutTestRuns());
// Total number of unexecuted test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TotalNumUnexecutedTestRuns]);
writer.Uint64(sequenceReport.GetTotalNumUnexecutedTestRuns());
// Total number of passing tests
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TotalNumPassingTests]);
writer.Uint64(sequenceReport.GetTotalNumPassingTests());
// Total number of failing tests
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TotalNumFailingTests]);
writer.Uint64(sequenceReport.GetTotalNumFailingTests());
// Total number of disabled tests
writer.Key(SequenceReportFields::Keys[SequenceReportFields::TotalNumDisabledTests]);
writer.Uint64(sequenceReport.GetTotalNumDisabledTests());
}
template<typename PolicyStateType>
void SerializeDraftingSequenceReportMembers(
const Client::DraftingSequenceReportBase<PolicyStateType>& sequenceReport, rapidjson::PrettyWriter<rapidjson::StringBuffer>& writer)
{
SerializeSequenceReportBaseMembers(sequenceReport, writer);
// Drafted test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::DraftedTestRuns]);
writer.StartArray();
for (const auto& testRun : sequenceReport.GetDraftedTestRuns())
{
writer.String(testRun.c_str());
}
writer.EndArray(); // Drafted test runs
// Drafted test run report
writer.Key(SequenceReportFields::Keys[SequenceReportFields::DraftedTestRunReport]);
SerializeTestRunReport(sequenceReport.GetDraftedTestRunReport(), writer);
}
} // namespace
AZStd::string SerializeSequenceReport(const Client::RegularSequenceReport& sequenceReport)
{
rapidjson::StringBuffer stringBuffer;
rapidjson::PrettyWriter<rapidjson::StringBuffer> writer(stringBuffer);
writer.StartObject();
{
SerializeSequenceReportBaseMembers(sequenceReport, writer);
}
writer.EndObject();
return stringBuffer.GetString();
}
AZStd::string SerializeSequenceReport(const Client::SeedSequenceReport& sequenceReport)
{
rapidjson::StringBuffer stringBuffer;
rapidjson::PrettyWriter<rapidjson::StringBuffer> writer(stringBuffer);
writer.StartObject();
{
SerializeSequenceReportBaseMembers(sequenceReport, writer);
}
writer.EndObject();
return stringBuffer.GetString();
}
AZStd::string SerializeSequenceReport(const Client::ImpactAnalysisSequenceReport& sequenceReport)
{
rapidjson::StringBuffer stringBuffer;
rapidjson::PrettyWriter<rapidjson::StringBuffer> writer(stringBuffer);
writer.StartObject();
{
SerializeDraftingSequenceReportMembers(sequenceReport, writer);
// Discarded test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::DiscardedTestRuns]);
writer.StartArray();
for (const auto& testRun : sequenceReport.GetDiscardedTestRuns())
{
writer.String(testRun.c_str());
}
writer.EndArray(); // Discarded test runs
}
writer.EndObject();
return stringBuffer.GetString();
}
AZStd::string SerializeSequenceReport(const Client::SafeImpactAnalysisSequenceReport& sequenceReport)
{
rapidjson::StringBuffer stringBuffer;
rapidjson::PrettyWriter<rapidjson::StringBuffer> writer(stringBuffer);
writer.StartObject();
{
SerializeDraftingSequenceReportMembers(sequenceReport, writer);
// Discarded test runs
writer.Key(SequenceReportFields::Keys[SequenceReportFields::DiscardedTestRuns]);
SerializeTestSelection(sequenceReport.GetDiscardedTestRuns(), writer);
// Discarded test run report
writer.Key(SequenceReportFields::Keys[SequenceReportFields::DiscardedTestRunReport]);
SerializeTestRunReport(sequenceReport.GetDiscardedTestRunReport(), writer);
}
writer.EndObject();
return stringBuffer.GetString();
}
} // namespace TestImpact

@ -8,11 +8,13 @@
#include <TestImpactFramework/TestImpactClientTestRun.h>
#include <AzCore/std/tuple.h>
namespace TestImpact
{
namespace Client
{
TestRun::TestRun(
TestRunBase::TestRunBase(
const AZStd::string& name,
const AZStd::string& commandString,
AZStd::chrono::high_resolution_clock::time_point startTime,
@ -26,107 +28,145 @@ namespace TestImpact
{
}
const AZStd::string& TestRun::GetTargetName() const
const AZStd::string& TestRunBase::GetTargetName() const
{
return m_targetName;
}
const AZStd::string& TestRun::GetCommandString() const
const AZStd::string& TestRunBase::GetCommandString() const
{
return m_commandString;
}
AZStd::chrono::high_resolution_clock::time_point TestRun::GetStartTime() const
AZStd::chrono::high_resolution_clock::time_point TestRunBase::GetStartTime() const
{
return m_startTime;
}
AZStd::chrono::high_resolution_clock::time_point TestRun::GetEndTime() const
AZStd::chrono::high_resolution_clock::time_point TestRunBase::GetEndTime() const
{
return m_startTime + m_duration;
}
AZStd::chrono::milliseconds TestRun::GetDuration() const
AZStd::chrono::milliseconds TestRunBase::GetDuration() const
{
return m_duration;
}
TestRunResult TestRun::GetResult() const
TestRunResult TestRunBase::GetResult() const
{
return m_result;
}
TestFailure::TestFailure(const AZStd::string& testName, const AZStd::string& errorMessage)
: m_name(testName)
, m_errorMessage(errorMessage)
TestRunWithExecutionFailure::TestRunWithExecutionFailure(TestRunBase&& testRun)
: TestRunBase(AZStd::move(testRun))
{
}
const AZStd::string& TestFailure::GetName() const
TimedOutTestRun::TimedOutTestRun(TestRunBase&& testRun)
: TestRunBase(AZStd::move(testRun))
{
return m_name;
}
const AZStd::string& TestFailure::GetErrorMessage() const
UnexecutedTestRun::UnexecutedTestRun(TestRunBase&& testRun)
: TestRunBase(AZStd::move(testRun))
{
return m_errorMessage;
}
TestCaseFailure::TestCaseFailure(const AZStd::string& testCaseName, AZStd::vector<TestFailure>&& testFailures)
: m_name(testCaseName)
, m_testFailures(AZStd::move(testFailures))
Test::Test(const AZStd::string& testName, TestResult result)
: m_name(testName)
, m_result(result)
{
}
const AZStd::string& TestCaseFailure::GetName() const
const AZStd::string& Test::GetName() const
{
return m_name;
}
const AZStd::vector<TestFailure>& TestCaseFailure::GetTestFailures() const
TestResult Test::GetResult() const
{
return m_testFailures;
return m_result;
}
static size_t CalculateNumTestRunFailures(const AZStd::vector<TestCaseFailure>& testFailures)
AZStd::tuple<size_t, size_t, size_t> CalculateTestCaseMetrics(const AZStd::vector<Test>& tests)
{
size_t numTestFailures = 0;
for (const auto& testCase : testFailures)
size_t totalNumPassingTests = 0;
size_t totalNumFailingTests = 0;
size_t totalNumDisabledTests = 0;
for (const auto& test : tests)
{
numTestFailures += testCase.GetTestFailures().size();
if (test.GetResult() == Client::TestResult::Passed)
{
totalNumPassingTests++;
}
else if (test.GetResult() == Client::TestResult::Failed)
{
totalNumFailingTests++;
}
else
{
totalNumDisabledTests++;
}
}
return numTestFailures;
return { totalNumPassingTests, totalNumFailingTests, totalNumDisabledTests };
}
TestRunWithTestFailures::TestRunWithTestFailures(
CompletedTestRun::CompletedTestRun(
const AZStd::string& name,
const AZStd::string& commandString,
AZStd::chrono::high_resolution_clock::time_point startTime,
AZStd::chrono::milliseconds duration,
TestRunResult result,
AZStd::vector<TestCaseFailure>&& testFailures)
: TestRun(name, commandString, startTime, duration, result)
, m_testCaseFailures(AZStd::move(testFailures))
AZStd::vector<Test>&& tests)
: TestRunBase(name, commandString, startTime, duration, result)
, m_tests(AZStd::move(tests))
{
AZStd::tie(m_totalNumPassingTests, m_totalNumFailingTests, m_totalNumDisabledTests) = CalculateTestCaseMetrics(m_tests);
}
CompletedTestRun::CompletedTestRun(TestRunBase&& testRun, AZStd::vector<Test>&& tests)
: TestRunBase(AZStd::move(testRun))
, m_tests(AZStd::move(tests))
{
AZStd::tie(m_totalNumPassingTests, m_totalNumFailingTests, m_totalNumDisabledTests) = CalculateTestCaseMetrics(m_tests);
}
size_t CompletedTestRun::GetTotalNumTests() const
{
return m_tests.size();
}
size_t CompletedTestRun::GetTotalNumPassingTests() const
{
return m_totalNumPassingTests;
}
size_t CompletedTestRun::GetTotalNumFailingTests() const
{
return m_totalNumFailingTests;
}
size_t CompletedTestRun::GetTotalNumDisabledTests() const
{
m_numTestFailures = CalculateNumTestRunFailures(m_testCaseFailures);
return m_totalNumDisabledTests;
}
TestRunWithTestFailures::TestRunWithTestFailures(TestRun&& testRun, AZStd::vector<TestCaseFailure>&& testFailures)
: TestRun(AZStd::move(testRun))
, m_testCaseFailures(AZStd::move(testFailures))
const AZStd::vector<Test>& CompletedTestRun::GetTests() const
{
m_numTestFailures = CalculateNumTestRunFailures(m_testCaseFailures);
return m_tests;
}
size_t TestRunWithTestFailures::GetNumTestFailures() const
PassingTestRun::PassingTestRun(TestRunBase&& testRun, AZStd::vector<Test>&& tests)
: CompletedTestRun(AZStd::move(testRun), AZStd::move(tests))
{
return m_numTestFailures;
}
const AZStd::vector<TestCaseFailure>& TestRunWithTestFailures::GetTestCaseFailures() const
FailingTestRun::FailingTestRun(TestRunBase&& testRun, AZStd::vector<Test>&& tests)
: CompletedTestRun(AZStd::move(testRun), AZStd::move(tests))
{
return m_testCaseFailures;
}
} // namespace Client
} // namespace TestImpact

@ -6,7 +6,7 @@
*
*/
#include <TestImpactFramework/TestImpactFileUtils.h>
#include <TestImpactFramework/TestImpactUtils.h>
#include <TestImpactFramework/TestImpactRuntime.h>
#include <TestImpactFramework/TestImpactRuntimeException.h>
@ -81,7 +81,7 @@ namespace TestImpact
{
if (m_testCompleteCallback.has_value())
{
Client::TestRun testRun(
Client::TestRunBase testRun(
testJob.GetTestTarget()->GetName(),
testJob.GetCommandString(),
testJob.GetStartTime(),
@ -110,8 +110,140 @@ namespace TestImpact
return result;
}
//! Utility structure for holding the pertinent data for test run reports.
template<typename TestJob>
struct TestRunData
{
TestSequenceResult m_result = TestSequenceResult::Success;
AZStd::vector<TestJob> m_jobs;
AZStd::chrono::high_resolution_clock::time_point m_relativeStartTime;
AZStd::chrono::milliseconds m_duration = AZStd::chrono::milliseconds{ 0 };
};
//! Wrapper for the impact analysis test sequence to handle both the updating and non-updating policies through a common pathway.
//! @tparam TestRunnerFunctor The functor for running the specified tests.
//! @tparam TestJob The test engine job type returned by the functor.
//! @param maxConcurrency The maximum concurrency being used for this sequence.
//! @param policyState The policy state being used for the sequence.
//! @param suiteType The suite type used for this sequence.
//! @param timer The timer to use for the test run timings.
//! @param testRunner The test runner functor to use for each of the test runs.
//! @param includedSelectedTestTargets The subset of test targets that were selected to run and not also fully excluded from running.
//! @param excludedSelectedTestTargets The subset of test targets that were selected to run but were fully excluded running.
//! @param discardedTestTargets The subset of test targets that were discarded from the test selection and will not be run.
//! @param globalTimeout The maximum duration the entire test sequence may run for (infinite if empty).
//! @param testSequenceStartCallback The client function to be called after the test targets have been selected but prior to running the
//! tests.
//! @param testSequenceCompleteCallback The client function to be called after the test sequence has completed.
//! @param testRunCompleteCallback The client function to be called after an individual test run has completed.
//! @param updateCoverage The function to call to update the dynamic dependency map with test coverage (if any).
template<typename TestRunnerFunctor, typename TestJob>
Client::ImpactAnalysisSequenceReport ImpactAnalysisTestSequenceWrapper(
size_t maxConcurrency,
const ImpactAnalysisSequencePolicyState& policyState,
SuiteType suiteType,
const Timer& sequenceTimer,
const TestRunnerFunctor& testRunner,
const AZStd::vector<const TestTarget*>& includedSelectedTestTargets,
const AZStd::vector<const TestTarget*>& excludedSelectedTestTargets,
const AZStd::vector<const TestTarget*>& discardedTestTargets,
const AZStd::vector<const TestTarget*>& draftedTestTargets,
const AZStd::optional<AZStd::chrono::milliseconds>& testTargetTimeout,
const AZStd::optional<AZStd::chrono::milliseconds>& globalTimeout,
AZStd::optional<ImpactAnalysisTestSequenceStartCallback> testSequenceStartCallback,
AZStd::optional<TestSequenceCompleteCallback<Client::ImpactAnalysisSequenceReport>> testSequenceEndCallback,
AZStd::optional<TestRunCompleteCallback> testCompleteCallback,
AZStd::optional<AZStd::function<void(const AZStd::vector<TestJob>& jobs)>> updateCoverage)
{
TestRunData<TestJob> selectedTestRunData, draftedTestRunData;
AZStd::optional<AZStd::chrono::milliseconds> sequenceTimeout = globalTimeout;
// Extract the client facing representation of selected, discarded and drafted test targets
const Client::TestRunSelection selectedTests(
ExtractTestTargetNames(includedSelectedTestTargets), ExtractTestTargetNames(excludedSelectedTestTargets));
const auto discardedTests = ExtractTestTargetNames(discardedTestTargets);
const auto draftedTests = ExtractTestTargetNames(draftedTestTargets);
// Inform the client that the sequence is about to start
if (testSequenceStartCallback.has_value())
{
(*testSequenceStartCallback)(suiteType, selectedTests, discardedTests, draftedTests);
}
// We share the test run complete handler between the selected and drafted test runs as to present them together as one
// continuous test sequence to the client rather than two discrete test runs
const size_t totalNumTestRuns = includedSelectedTestTargets.size() + draftedTestTargets.size();
TestRunCompleteCallbackHandler testRunCompleteHandler(totalNumTestRuns, testCompleteCallback);
const auto gatherTestRunData = [&sequenceTimer, &testRunner, &testRunCompleteHandler, &globalTimeout]
(const AZStd::vector<const TestTarget*>& testsTargets, TestRunData<TestJob>& testRunData)
{
const Timer testRunTimer;
testRunData.m_relativeStartTime = testRunTimer.GetStartTimePointRelative(sequenceTimer);
auto [result, jobs] = testRunner(testsTargets, testRunCompleteHandler, globalTimeout);
testRunData.m_result = result;
testRunData.m_jobs = AZStd::move(jobs);
testRunData.m_duration = testRunTimer.GetElapsedMs();
};
if (!includedSelectedTestTargets.empty())
{
// Run the selected test targets and collect the test run results
gatherTestRunData(includedSelectedTestTargets, selectedTestRunData);
// Carry the remaining global sequence time over to the drafted test run
if (globalTimeout.has_value())
{
const auto elapsed = selectedTestRunData.m_duration;
sequenceTimeout = elapsed < globalTimeout.value() ? globalTimeout.value() - elapsed : AZStd::chrono::milliseconds(0);
}
}
if (!draftedTestTargets.empty())
{
// Run the drafted test targets and collect the test run results
gatherTestRunData(draftedTestTargets, draftedTestRunData);
}
// Generate the sequence report for the client
const auto sequenceReport = Client::ImpactAnalysisSequenceReport(
maxConcurrency,
testTargetTimeout,
globalTimeout,
policyState,
suiteType,
selectedTests,
discardedTests,
draftedTests,
GenerateTestRunReport(
selectedTestRunData.m_result,
selectedTestRunData.m_relativeStartTime,
selectedTestRunData.m_duration,
selectedTestRunData.m_jobs),
GenerateTestRunReport(
draftedTestRunData.m_result,
draftedTestRunData.m_relativeStartTime,
draftedTestRunData.m_duration,
draftedTestRunData.m_jobs));
// Inform the client that the sequence has ended
if (testSequenceEndCallback.has_value())
{
(*testSequenceEndCallback)(sequenceReport);
}
// Update the dynamic dependency map with the latest coverage data (if any)
if (updateCoverage.has_value())
{
(*updateCoverage)(ConcatenateVectors(selectedTestRunData.m_jobs, draftedTestRunData.m_jobs));
}
return sequenceReport;
}
Runtime::Runtime(
RuntimeConfig&& config,
AZStd::optional<RepoPath> dataFile,
SuiteType suiteFilter,
Policy::ExecutionFailure executionFailurePolicy,
Policy::FailedTestCoverage failedTestCoveragePolicy,
@ -151,27 +283,22 @@ namespace TestImpact
try
{
if (dataFile.has_value())
{
m_sparTiaFile = dataFile.value().String();
}
else
{
m_sparTiaFile = m_config.m_workspace.m_active.m_sparTiaFiles[static_cast<size_t>(m_suiteFilter)].String();
}
// Populate the dynamic dependency map with the existing source coverage data (if any)
m_sparTIAFile = m_config.m_workspace.m_active.m_sparTIAFiles[static_cast<size_t>(m_suiteFilter)].String();
const auto tiaDataRaw = ReadFileContents<Exception>(m_sparTIAFile);
const auto tiaDataRaw = ReadFileContents<Exception>(m_sparTiaFile);
const auto tiaData = DeserializeSourceCoveringTestsList(tiaDataRaw);
if (tiaData.GetNumSources())
{
m_dynamicDependencyMap->ReplaceSourceCoverage(tiaData);
m_hasImpactAnalysisData = true;
// Enumerate new test targets
const auto testTargetsWithNoEnumeration = m_dynamicDependencyMap->GetNotCoveringTests();
if (!testTargetsWithNoEnumeration.empty())
{
m_testEngine->UpdateEnumerationCache(
testTargetsWithNoEnumeration,
Policy::ExecutionFailure::Ignore,
Policy::TestFailure::Continue,
AZStd::nullopt,
AZStd::nullopt,
AZStd::nullopt);
}
}
}
catch (const DependencyException& e)
@ -186,7 +313,7 @@ namespace TestImpact
AZ_Printf(
LogCallSite,
AZStd::string::format(
"No test impact analysis data found for suite '%s' at %s\n", GetSuiteTypeName(m_suiteFilter).c_str(), m_sparTIAFile.c_str()).c_str());
"No test impact analysis data found for suite '%s' at %s\n", SuiteTypeAsString(m_suiteFilter).c_str(), m_sparTiaFile.c_str()).c_str());
}
}
@ -230,7 +357,7 @@ namespace TestImpact
}
}
AZStd::pair<AZStd::vector<const TestTarget*>, AZStd::vector<const TestTarget*>> Runtime::SelectCoveringTestTargetsAndUpdateEnumerationCache(
AZStd::pair<AZStd::vector<const TestTarget*>, AZStd::vector<const TestTarget*>> Runtime::SelectCoveringTestTargets(
const ChangeList& changeList,
Policy::TestPrioritization testPrioritizationPolicy)
{
@ -243,9 +370,6 @@ namespace TestImpact
// Populate a set with the selected test targets so that we can infer the discarded test target not selected for this change list
const AZStd::unordered_set<const TestTarget*> selectedTestTargetSet(selectedTestTargets.begin(), selectedTestTargets.end());
// Update the enumeration caches of mutated targets regardless of the current sharding policy
EnumerateMutatedTestTargets(changeDependencyList);
// The test targets in the main list not in the selected test target set are the test targets not selected for this change list
for (const auto& testTarget : m_dynamicDependencyMap->GetTestTargetList().GetTargets())
{
@ -287,7 +411,7 @@ namespace TestImpact
void Runtime::ClearDynamicDependencyMapAndRemoveExistingFile()
{
m_dynamicDependencyMap->ClearAllSourceCoverage();
DeleteFile(m_sparTIAFile);
DeleteFile(m_sparTiaFile);
}
SourceCoveringTestsList Runtime::CreateSourceCoveringTestFromTestCoverages(const AZStd::vector<TestEngineInstrumentedRun>& jobs)
@ -368,9 +492,9 @@ namespace TestImpact
}
m_dynamicDependencyMap->ReplaceSourceCoverage(sourceCoverageTestsList);
const auto sparTIA = m_dynamicDependencyMap->ExportSourceCoverage();
const auto sparTIAData = SerializeSourceCoveringTestsList(sparTIA);
WriteFileContents<RuntimeException>(sparTIAData, m_sparTIAFile);
const auto sparTia = m_dynamicDependencyMap->ExportSourceCoverage();
const auto sparTiaData = SerializeSourceCoveringTestsList(sparTia);
WriteFileContents<RuntimeException>(sparTiaData, m_sparTiaFile);
m_hasImpactAnalysisData = true;
}
catch(const RuntimeException& e)
@ -386,11 +510,42 @@ namespace TestImpact
}
}
Client::SequenceReport Runtime::RegularTestSequence(
PolicyStateBase Runtime::GeneratePolicyStateBase() const
{
PolicyStateBase policyState;
policyState.m_executionFailurePolicy = m_executionFailurePolicy;
policyState.m_failedTestCoveragePolicy = m_failedTestCoveragePolicy;
policyState.m_integrityFailurePolicy = m_integrationFailurePolicy;
policyState.m_targetOutputCapture = m_targetOutputCapture;
policyState.m_testFailurePolicy = m_testFailurePolicy;
policyState.m_testShardingPolicy = m_testShardingPolicy;
return policyState;
}
SequencePolicyState Runtime::GenerateSequencePolicyState() const
{
return { GeneratePolicyStateBase() };
}
SafeImpactAnalysisSequencePolicyState Runtime::GenerateSafeImpactAnalysisSequencePolicyState(
Policy::TestPrioritization testPrioritizationPolicy) const
{
return { GeneratePolicyStateBase(), testPrioritizationPolicy };
}
ImpactAnalysisSequencePolicyState Runtime::GenerateImpactAnalysisSequencePolicyState(
Policy::TestPrioritization testPrioritizationPolicy, Policy::DynamicDependencyMap dynamicDependencyMapPolicy) const
{
return { GeneratePolicyStateBase(), testPrioritizationPolicy, dynamicDependencyMapPolicy };
}
Client::RegularSequenceReport Runtime::RegularTestSequence(
AZStd::optional<AZStd::chrono::milliseconds> testTargetTimeout,
AZStd::optional<AZStd::chrono::milliseconds> globalTimeout,
AZStd::optional<TestSequenceStartCallback> testSequenceStartCallback,
AZStd::optional<TestSequenceCompleteCallback<Client::SequenceReport>> testSequenceEndCallback,
AZStd::optional<TestSequenceCompleteCallback<Client::RegularSequenceReport>> testSequenceEndCallback,
AZStd::optional<TestRunCompleteCallback> testCompleteCallback)
{
const Timer sequenceTimer;
@ -434,7 +589,11 @@ namespace TestImpact
const auto testRunDuration = testRunTimer.GetElapsedMs();
// Generate the sequence report for the client
const auto sequenceReport = Client::SequenceReport(
const auto sequenceReport = Client::RegularSequenceReport(
m_maxConcurrency,
testTargetTimeout,
globalTimeout,
GenerateSequencePolicyState(),
m_suiteFilter,
selectedTests,
GenerateTestRunReport(result, testRunTimer.GetStartTimePointRelative(sequenceTimer), testRunDuration, testJobs));
@ -448,95 +607,6 @@ namespace TestImpact
return sequenceReport;
}
//! Wrapper for the impact analysis test sequence to handle both the updating and non-updating policies through a common pathway.
//! @tparam TestRunnerFunctor The functor for running the specified tests.
//! @tparam TestJob The test engine job type returned by the functor.
//! @param suiteType The suite type used for this sequence.
//! @param timer The timer to use for the test run timings.
//! @param testRunner The test runner functor to use for each of the test runs.
//! @param includedSelectedTestTargets The subset of test targets that were selected to run and not also fully excluded from running.
//! @param excludedSelectedTestTargets The subset of test targets that were selected to run but were fully excluded running.
//! @param discardedTestTargets The subset of test targets that were discarded from the test selection and will not be run.
//! @param globalTimeout The maximum duration the entire test sequence may run for (infinite if empty).
//! @param testSequenceStartCallback The client function to be called after the test targets have been selected but prior to running the tests.
//! @param testSequenceCompleteCallback The client function to be called after the test sequence has completed.
//! @param testRunCompleteCallback The client function to be called after an individual test run has completed.
//! @param updateCoverage The function to call to update the dynamic dependency map with test coverage (if any).
template<typename TestRunnerFunctor, typename TestJob>
Client::ImpactAnalysisSequenceReport ImpactAnalysisTestSequenceWrapper(
SuiteType suiteType,
const Timer& sequenceTimer,
const TestRunnerFunctor& testRunner,
const AZStd::vector<const TestTarget*>& includedSelectedTestTargets,
const AZStd::vector<const TestTarget*>& excludedSelectedTestTargets,
const AZStd::vector<const TestTarget*>& discardedTestTargets,
const AZStd::vector<const TestTarget*>& draftedTestTargets,
const AZStd::optional<AZStd::chrono::milliseconds> globalTimeout,
AZStd::optional<ImpactAnalysisTestSequenceStartCallback> testSequenceStartCallback,
AZStd::optional<TestSequenceCompleteCallback<Client::ImpactAnalysisSequenceReport>> testSequenceEndCallback,
AZStd::optional<TestRunCompleteCallback> testCompleteCallback,
AZStd::optional<AZStd::function<void(const AZStd::vector<TestJob>& jobs)>> updateCoverage)
{
AZStd::optional<AZStd::chrono::milliseconds> sequenceTimeout = globalTimeout;
// Extract the client facing representation of selected, discarded and drafted test targets
const Client::TestRunSelection selectedTests(
ExtractTestTargetNames(includedSelectedTestTargets), ExtractTestTargetNames(excludedSelectedTestTargets));
const auto discardedTests = ExtractTestTargetNames(discardedTestTargets);
const auto draftedTests = ExtractTestTargetNames(draftedTestTargets);
// Inform the client that the sequence is about to start
if (testSequenceStartCallback.has_value())
{
(*testSequenceStartCallback)(suiteType, selectedTests, discardedTests, draftedTests);
}
// We share the test run complete handler between the selected and drafted test runs as to present them together as one
// continuous test sequence to the client rather than two discrete test runs
const size_t totalNumTestRuns = includedSelectedTestTargets.size() + draftedTestTargets.size();
TestRunCompleteCallbackHandler testRunCompleteHandler(totalNumTestRuns, testCompleteCallback);
// Run the selected test targets and collect the test run results
const Timer selectedTestRunTimer;
const auto [selectedResult, selectedTestJobs] = testRunner(includedSelectedTestTargets, testRunCompleteHandler, globalTimeout);
const auto selectedTestRunDuration = selectedTestRunTimer.GetElapsedMs();
// Carry the remaining global sequence time over to the drafted test run
if (globalTimeout.has_value())
{
const auto elapsed = selectedTestRunDuration;
sequenceTimeout = elapsed < globalTimeout.value() ? globalTimeout.value() - elapsed : AZStd::chrono::milliseconds(0);
}
// Run the drafted test targets and collect the test run results
Timer draftedTestRunTimer;
const auto [draftedResult, draftedTestJobs] = testRunner(draftedTestTargets, testRunCompleteHandler, globalTimeout);
const auto draftedTestRunDuration = draftedTestRunTimer.GetElapsedMs();
// Generate the sequence report for the client
const auto sequenceReport = Client::ImpactAnalysisSequenceReport(
suiteType,
selectedTests,
discardedTests,
draftedTests,
GenerateTestRunReport(selectedResult, selectedTestRunTimer.GetStartTimePointRelative(sequenceTimer), selectedTestRunDuration, selectedTestJobs),
GenerateTestRunReport(draftedResult, draftedTestRunTimer.GetStartTimePointRelative(sequenceTimer), draftedTestRunDuration, draftedTestJobs));
// Inform the client that the sequence has ended
if (testSequenceEndCallback.has_value())
{
(*testSequenceEndCallback)(sequenceReport);
}
// Update the dynamic dependency map with the latest coverage data (if any)
if (updateCoverage.has_value())
{
(*updateCoverage)(ConcatenateVectors(selectedTestJobs, draftedTestJobs));
}
return sequenceReport;
}
Client::ImpactAnalysisSequenceReport Runtime::ImpactAnalysisTestSequence(
const ChangeList& changeList,
Policy::TestPrioritization testPrioritizationPolicy,
@ -550,10 +620,30 @@ namespace TestImpact
const Timer sequenceTimer;
// Draft in the test targets that have no coverage entries in the dynamic dependency map
AZStd::vector<const TestTarget*> draftedTestTargets = m_dynamicDependencyMap->GetNotCoveringTests();
const AZStd::vector<const TestTarget*> draftedTestTargets = m_dynamicDependencyMap->GetNotCoveringTests();
// The test targets that were selected for the change list by the dynamic dependency map and the test targets that were not
auto [selectedTestTargets, discardedTestTargets] = SelectCoveringTestTargetsAndUpdateEnumerationCache(changeList, testPrioritizationPolicy);
const auto selectCoveringTestTargetsAndPruneDraftedFromDiscarded =
[this, &draftedTestTargets, &changeList, testPrioritizationPolicy]()
{
// The test targets that were selected for the change list by the dynamic dependency map and the test targets that were not
const auto [selectedTestTargets, discardedTestTargets] =
SelectCoveringTestTargets(changeList, testPrioritizationPolicy);
const AZStd::unordered_set<const TestTarget*> draftedTestTargetsSet(draftedTestTargets.begin(), draftedTestTargets.end());
AZStd::vector<const TestTarget*> discardedNotDraftedTestTargets;
for (const auto* testTarget : discardedTestTargets)
{
if (!draftedTestTargetsSet.count(testTarget))
{
discardedNotDraftedTestTargets.push_back(testTarget);
}
}
return AZStd::pair{ selectedTestTargets, discardedNotDraftedTestTargets };
};
const auto [selectedTestTargets, discardedTestTargets] = selectCoveringTestTargetsAndPruneDraftedFromDiscarded();
// The subset of selected test targets that are not on the configuration's exclude list and those that are
auto [includedSelectedTestTargets, excludedSelectedTestTargets] = SelectTestTargetsByExcludeList(selectedTestTargets);
@ -604,6 +694,8 @@ namespace TestImpact
};
return ImpactAnalysisTestSequenceWrapper(
m_maxConcurrency,
GenerateImpactAnalysisSequencePolicyState(testPrioritizationPolicy, dynamicDependencyMapPolicy),
m_suiteFilter,
sequenceTimer,
instrumentedTestRun,
@ -611,6 +703,7 @@ namespace TestImpact
excludedSelectedTestTargets,
discardedTestTargets,
draftedTestTargets,
testTargetTimeout,
globalTimeout,
testSequenceStartCallback,
testSequenceEndCallback,
@ -620,6 +713,8 @@ namespace TestImpact
else
{
return ImpactAnalysisTestSequenceWrapper(
m_maxConcurrency,
GenerateImpactAnalysisSequencePolicyState(testPrioritizationPolicy, dynamicDependencyMapPolicy),
m_suiteFilter,
sequenceTimer,
regularTestRun,
@ -627,6 +722,7 @@ namespace TestImpact
excludedSelectedTestTargets,
discardedTestTargets,
draftedTestTargets,
testTargetTimeout,
globalTimeout,
testSequenceStartCallback,
testSequenceEndCallback,
@ -645,13 +741,15 @@ namespace TestImpact
AZStd::optional<TestRunCompleteCallback> testCompleteCallback)
{
const Timer sequenceTimer;
auto sequenceTimeout = globalTimeout;
TestRunData<TestEngineInstrumentedRun> selectedTestRunData, draftedTestRunData;
TestRunData<TestEngineRegularRun> discardedTestRunData;
AZStd::optional<AZStd::chrono::milliseconds> sequenceTimeout = globalTimeout;
// Draft in the test targets that have no coverage entries in the dynamic dependency map
AZStd::vector<const TestTarget*> draftedTestTargets = m_dynamicDependencyMap->GetNotCoveringTests();
// The test targets that were selected for the change list by the dynamic dependency map and the test targets that were not
auto [selectedTestTargets, discardedTestTargets] = SelectCoveringTestTargetsAndUpdateEnumerationCache(changeList, testPrioritizationPolicy);
const auto [selectedTestTargets, discardedTestTargets] = SelectCoveringTestTargets(changeList, testPrioritizationPolicy);
// The subset of selected test targets that are not on the configuration's exclude list and those that are
auto [includedSelectedTestTargets, excludedSelectedTestTargets] = SelectTestTargetsByExcludeList(selectedTestTargets);
@ -675,71 +773,107 @@ namespace TestImpact
// continuous test sequence to the client rather than three discrete test runs
const size_t totalNumTestRuns = includedSelectedTestTargets.size() + draftedTestTargets.size() + includedDiscardedTestTargets.size();
TestRunCompleteCallbackHandler testRunCompleteHandler(totalNumTestRuns, testCompleteCallback);
// Functor for running instrumented test targets
const auto instrumentedTestRun =
[this, &testTargetTimeout, &sequenceTimeout, &testRunCompleteHandler](const AZStd::vector<const TestTarget*>& testsTargets)
{
return m_testEngine->InstrumentedRun(
testsTargets,
m_testShardingPolicy,
m_executionFailurePolicy,
m_integrationFailurePolicy,
m_testFailurePolicy,
m_targetOutputCapture,
testTargetTimeout,
sequenceTimeout,
AZStd::ref(testRunCompleteHandler));
};
// Run the selected test targets and collect the test run results
const Timer selectedTestRunTimer;
const auto [selectedResult, selectedTestJobs] = m_testEngine->InstrumentedRun(
includedSelectedTestTargets,
m_testShardingPolicy,
m_executionFailurePolicy,
m_integrationFailurePolicy,
m_testFailurePolicy,
m_targetOutputCapture,
testTargetTimeout,
sequenceTimeout,
AZStd::ref(testRunCompleteHandler));
const auto selectedTestRunDuration = selectedTestRunTimer.GetElapsedMs();
// Functor for running uninstrumented test targets
const auto regularTestRun =
[this, &testTargetTimeout, &sequenceTimeout, &testRunCompleteHandler](const AZStd::vector<const TestTarget*>& testsTargets)
{
return m_testEngine->RegularRun(
testsTargets,
m_testShardingPolicy,
m_executionFailurePolicy,
m_testFailurePolicy,
m_targetOutputCapture,
testTargetTimeout,
sequenceTimeout,
AZStd::ref(testRunCompleteHandler));
};
// Carry the remaining global sequence time over to the discarded test run
if (globalTimeout.has_value())
// Functor for running instrumented test targets
const auto gatherTestRunData = [&sequenceTimer]
(const AZStd::vector<const TestTarget*>& testsTargets, const auto& testRunner, auto& testRunData)
{
const Timer testRunTimer;
testRunData.m_relativeStartTime = testRunTimer.GetStartTimePointRelative(sequenceTimer);
auto [result, jobs] = testRunner(testsTargets);
testRunData.m_result = result;
testRunData.m_jobs = AZStd::move(jobs);
testRunData.m_duration = testRunTimer.GetElapsedMs();
};
if (!includedSelectedTestTargets.empty())
{
const auto elapsed = selectedTestRunDuration;
sequenceTimeout = elapsed < globalTimeout.value() ? globalTimeout.value() - elapsed : AZStd::chrono::milliseconds(0);
}
// Run the selected test targets and collect the test run results
gatherTestRunData(includedSelectedTestTargets, instrumentedTestRun, selectedTestRunData);
// Run the discarded test targets and collect the test run results
const Timer discardedTestRunTimer;
const auto [discardedResult, discardedTestJobs] = m_testEngine->RegularRun(
includedDiscardedTestTargets,
m_testShardingPolicy,
m_executionFailurePolicy,
m_testFailurePolicy,
m_targetOutputCapture,
testTargetTimeout,
sequenceTimeout,
AZStd::ref(testRunCompleteHandler));
const auto discardedTestRunDuration = discardedTestRunTimer.GetElapsedMs();
// Carry the remaining global sequence time over to the discarded test run
if (globalTimeout.has_value())
{
const auto elapsed = selectedTestRunData.m_duration;
sequenceTimeout = elapsed < globalTimeout.value() ? globalTimeout.value() - elapsed : AZStd::chrono::milliseconds(0);
}
}
// Carry the remaining global sequence time over to the drafted test run
if (globalTimeout.has_value())
if (!includedDiscardedTestTargets.empty())
{
const auto elapsed = selectedTestRunDuration + discardedTestRunDuration;
sequenceTimeout = elapsed < globalTimeout.value() ? globalTimeout.value() - elapsed : AZStd::chrono::milliseconds(0);
// Run the discarded test targets and collect the test run results
gatherTestRunData(includedDiscardedTestTargets, regularTestRun, discardedTestRunData);
// Carry the remaining global sequence time over to the drafted test run
if (globalTimeout.has_value())
{
const auto elapsed = selectedTestRunData.m_duration + discardedTestRunData.m_duration;
sequenceTimeout = elapsed < globalTimeout.value() ? globalTimeout.value() - elapsed : AZStd::chrono::milliseconds(0);
}
}
// Run the drafted test targets and collect the test run results
const Timer draftedTestRunTimer;
const auto [draftedResult, draftedTestJobs] = m_testEngine->InstrumentedRun(
draftedTestTargets,
m_testShardingPolicy,
m_executionFailurePolicy,
m_integrationFailurePolicy,
m_testFailurePolicy,
m_targetOutputCapture,
testTargetTimeout,
sequenceTimeout,
AZStd::ref(testRunCompleteHandler));
const auto draftedTestRunDuration = draftedTestRunTimer.GetElapsedMs();
if (!draftedTestTargets.empty())
{
// Run the drafted test targets and collect the test run results
gatherTestRunData(draftedTestTargets, instrumentedTestRun, draftedTestRunData);
}
// Generate the sequence report for the client
const auto sequenceReport = Client::SafeImpactAnalysisSequenceReport(
m_maxConcurrency,
testTargetTimeout,
globalTimeout,
GenerateSafeImpactAnalysisSequencePolicyState(testPrioritizationPolicy),
m_suiteFilter,
selectedTests,
discardedTests,
draftedTests,
GenerateTestRunReport(selectedResult, selectedTestRunTimer.GetStartTimePointRelative(sequenceTimer), selectedTestRunDuration, selectedTestJobs),
GenerateTestRunReport(discardedResult, discardedTestRunTimer.GetStartTimePointRelative(sequenceTimer), discardedTestRunDuration, discardedTestJobs),
GenerateTestRunReport(draftedResult, draftedTestRunTimer.GetStartTimePointRelative(sequenceTimer), draftedTestRunDuration, draftedTestJobs));
GenerateTestRunReport(
selectedTestRunData.m_result,
selectedTestRunData.m_relativeStartTime,
selectedTestRunData.m_duration,
selectedTestRunData.m_jobs),
GenerateTestRunReport(
discardedTestRunData.m_result,
discardedTestRunData.m_relativeStartTime,
discardedTestRunData.m_duration,
discardedTestRunData.m_jobs),
GenerateTestRunReport(
draftedTestRunData.m_result,
draftedTestRunData.m_relativeStartTime,
draftedTestRunData.m_duration,
draftedTestRunData.m_jobs));
// Inform the client that the sequence has ended
if (testSequenceEndCallback.has_value())
@ -747,15 +881,15 @@ namespace TestImpact
(*testSequenceEndCallback)(sequenceReport);
}
UpdateAndSerializeDynamicDependencyMap(ConcatenateVectors(selectedTestJobs, draftedTestJobs));
UpdateAndSerializeDynamicDependencyMap(ConcatenateVectors(selectedTestRunData.m_jobs, draftedTestRunData.m_jobs));
return sequenceReport;
}
Client::SequenceReport Runtime::SeededTestSequence(
Client::SeedSequenceReport Runtime::SeededTestSequence(
AZStd::optional<AZStd::chrono::milliseconds> testTargetTimeout,
AZStd::optional<AZStd::chrono::milliseconds> globalTimeout,
AZStd::optional<TestSequenceStartCallback> testSequenceStartCallback,
AZStd::optional<TestSequenceCompleteCallback<Client::SequenceReport>> testSequenceEndCallback,
AZStd::optional<TestSequenceCompleteCallback<Client::SeedSequenceReport>> testSequenceEndCallback,
AZStd::optional<TestRunCompleteCallback> testCompleteCallback)
{
const Timer sequenceTimer;
@ -799,7 +933,11 @@ namespace TestImpact
const auto testRunDuration = testRunTimer.GetElapsedMs();
// Generate the sequence report for the client
const auto sequenceReport = Client::SequenceReport(
const auto sequenceReport = Client::SeedSequenceReport(
m_maxConcurrency,
testTargetTimeout,
globalTimeout,
GenerateSequencePolicyState(),
m_suiteFilter,
selectedTests,
GenerateTestRunReport(result, testRunTimer.GetStartTimePointRelative(sequenceTimer), testRunDuration, testJobs));

@ -6,7 +6,7 @@
*
*/
#include <TestImpactFramework/TestImpactFileUtils.h>
#include <TestImpactFramework/TestImpactUtils.h>
#include <TestImpactFramework/TestImpactRuntimeException.h>
#include <TestImpactRuntimeUtils.h>

@ -38,34 +38,44 @@ namespace TestImpact
//! Extracts the name information from the specified test targets.
AZStd::vector<AZStd::string> ExtractTestTargetNames(const AZStd::vector<const TestTarget*>& testTargets);
//! Generates a test run failure report from the specified test engine job information.
//! Generates the test suites from the specified test engine job information.
//! @tparam TestJob The test engine job type.
template<typename TestJob>
AZStd::vector<Client::TestCaseFailure> GenerateTestCaseFailures(const TestJob& testJob)
AZStd::vector<Client::Test> GenerateClientTests(const TestJob& testJob)
{
AZStd::vector<Client::TestCaseFailure> testCaseFailures;
AZStd::vector<Client::Test> tests;
if (testJob.GetTestRun().has_value())
{
for (const auto& testSuite : testJob.GetTestRun()->GetTestSuites())
{
AZStd::vector<Client::TestFailure> testFailures;
for (const auto& testCase : testSuite.m_tests)
{
if (testCase.m_result.value_or(TestRunResult::Passed) == TestRunResult::Failed)
auto result = Client::TestResult::NotRun;
if (testCase.m_result.has_value())
{
testFailures.push_back(Client::TestFailure(testCase.m_name, "No error message retrieved"));
if (testCase.m_result.value() == TestRunResult::Passed)
{
result = Client::TestResult::Passed;
}
else if (testCase.m_result.value() == TestRunResult::Failed)
{
result = Client::TestResult::Failed;
}
else
{
throw RuntimeException(AZStd::string::format(
"Unexpected test run result: %u", aznumeric_cast<AZ::u32>(testCase.m_result.value())));
}
}
}
if (!testFailures.empty())
{
testCaseFailures.push_back(Client::TestCaseFailure(testSuite.m_name, AZStd::move(testFailures)));
const auto name = AZStd::string::format("%s.%s", testSuite.m_name.c_str(), testCase.m_name.c_str());
tests.push_back(Client::Test(name, result));
}
}
}
return testCaseFailures;
return tests;
}
template<typename TestJob>
@ -75,11 +85,11 @@ namespace TestImpact
AZStd::chrono::milliseconds duration,
const AZStd::vector<TestJob>& testJobs)
{
AZStd::vector<Client::TestRun> passingTests;
AZStd::vector<Client::TestRunWithTestFailures> failingTests;
AZStd::vector<Client::TestRun> executionFailureTests;
AZStd::vector<Client::TestRun> timedOutTests;
AZStd::vector<Client::TestRun> unexecutedTests;
AZStd::vector<Client::PassingTestRun> passingTests;
AZStd::vector<Client::FailingTestRun> failingTests;
AZStd::vector<Client::TestRunWithExecutionFailure> executionFailureTests;
AZStd::vector<Client::TimedOutTestRun> timedOutTests;
AZStd::vector<Client::UnexecutedTestRun> unexecutedTests;
for (const auto& testJob : testJobs)
{
@ -88,7 +98,7 @@ namespace TestImpact
AZStd::chrono::high_resolution_clock::time_point() +
AZStd::chrono::duration_cast<AZStd::chrono::milliseconds>(testJob.GetStartTime() - startTime);
Client::TestRun clientTestRun(
Client::TestRunBase clientTestRun(
testJob.GetTestTarget()->GetName(), testJob.GetCommandString(), relativeStartTime, testJob.GetDuration(),
testJob.GetTestResult());
@ -96,27 +106,27 @@ namespace TestImpact
{
case Client::TestRunResult::FailedToExecute:
{
executionFailureTests.push_back(clientTestRun);
executionFailureTests.emplace_back(AZStd::move(clientTestRun));
break;
}
case Client::TestRunResult::NotRun:
{
unexecutedTests.push_back(clientTestRun);
unexecutedTests.emplace_back(AZStd::move(clientTestRun));
break;
}
case Client::TestRunResult::Timeout:
{
timedOutTests.push_back(clientTestRun);
timedOutTests.emplace_back(AZStd::move(clientTestRun));
break;
}
case Client::TestRunResult::AllTestsPass:
{
passingTests.push_back(clientTestRun);
passingTests.emplace_back(AZStd::move(clientTestRun), GenerateClientTests(testJob));
break;
}
case Client::TestRunResult::TestFailures:
{
failingTests.emplace_back(AZStd::move(clientTestRun), GenerateTestCaseFailures(testJob));
failingTests.emplace_back(AZStd::move(clientTestRun), GenerateClientTests(testJob));
break;
}
default:

@ -0,0 +1,244 @@
/*
* Copyright (c) Contributors to the Open 3D Engine Project.
* For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#include <TestImpactFramework/TestImpactException.h>
#include <TestImpactFramework/TestImpactUtils.h>
#include <AzCore/std/functional.h>
namespace TestImpact
{
//! Delete the files that match the pattern from the specified directory.
//! @param path The path to the directory to pattern match the files for deletion.
//! @param pattern The pattern to match files for deletion.
size_t DeleteFiles(const RepoPath& path, const AZStd::string& pattern)
{
size_t numFilesDeleted = 0;
AZ::IO::SystemFile::FindFiles(
AZStd::string::format("%s/%s", path.c_str(), pattern.c_str()).c_str(),
[&path, &numFilesDeleted](const char* file, bool isFile)
{
if (isFile)
{
AZ::IO::SystemFile::Delete(AZStd::string::format("%s/%s", path.c_str(), file).c_str());
numFilesDeleted++;
}
return true;
});
return numFilesDeleted;
}
//! Deletes the specified file.
void DeleteFile(const RepoPath& file)
{
DeleteFiles(file.ParentPath(), file.Filename().Native());
}
//! User-friendly names for the test suite types.
AZStd::string SuiteTypeAsString(SuiteType suiteType)
{
switch (suiteType)
{
case SuiteType::Main:
return "main";
case SuiteType::Periodic:
return "periodic";
case SuiteType::Sandbox:
return "sandbox";
default:
throw(Exception("Unexpected suite type"));
}
}
AZStd::string SequenceReportTypeAsString(Client::SequenceReportType type)
{
switch (type)
{
case Client::SequenceReportType::RegularSequence:
return "regular";
case Client::SequenceReportType::SeedSequence:
return "seed";
case Client::SequenceReportType::ImpactAnalysisSequence:
return "impact_analysis";
case Client::SequenceReportType::SafeImpactAnalysisSequence:
return "safe_impact_analysis";
default:
throw(Exception(AZStd::string::format("Unexpected sequence report type: %u", aznumeric_cast<AZ::u32>(type))));
}
}
AZStd::string TestSequenceResultAsString(TestSequenceResult result)
{
switch (result)
{
case TestSequenceResult::Failure:
return "failure";
case TestSequenceResult::Success:
return "success";
case TestSequenceResult::Timeout:
return "timeout";
default:
throw(Exception(AZStd::string::format("Unexpected test sequence result: %u", aznumeric_cast<AZ::u32>(result))));
}
}
AZStd::string TestRunResultAsString(Client::TestRunResult result)
{
switch (result)
{
case Client::TestRunResult::AllTestsPass:
return "all_tests_pass";
case Client::TestRunResult::FailedToExecute:
return "failed_to_execute";
case Client::TestRunResult::NotRun:
return "not_run";
case Client::TestRunResult::TestFailures:
return "test_failures";
case Client::TestRunResult::Timeout:
return "timeout";
default:
throw(Exception(AZStd::string::format("Unexpected test run result: %u", aznumeric_cast<AZ::u32>(result))));
}
}
AZStd::string ExecutionFailurePolicyAsString(Policy::ExecutionFailure executionFailurePolicy)
{
switch (executionFailurePolicy)
{
case Policy::ExecutionFailure::Abort:
return "abort";
case Policy::ExecutionFailure::Continue:
return "continue";
case Policy::ExecutionFailure::Ignore:
return "ignore";
default:
throw(Exception(
AZStd::string::format("Unexpected execution failure policy: %u", aznumeric_cast<AZ::u32>(executionFailurePolicy))));
}
}
AZStd::string FailedTestCoveragePolicyAsString(Policy::FailedTestCoverage failedTestCoveragePolicy)
{
switch (failedTestCoveragePolicy)
{
case Policy::FailedTestCoverage::Discard:
return "discard";
case Policy::FailedTestCoverage::Keep:
return "keep";
default:
throw(Exception(
AZStd::string::format("Unexpected failed test coverage policy: %u", aznumeric_cast<AZ::u32>(failedTestCoveragePolicy))));
}
}
AZStd::string TestPrioritizationPolicyAsString(Policy::TestPrioritization testPrioritizationPolicy)
{
switch (testPrioritizationPolicy)
{
case Policy::TestPrioritization::DependencyLocality:
return "dependency_locality";
case Policy::TestPrioritization::None:
return "none";
default:
throw(Exception(
AZStd::string::format("Unexpected test prioritization policy: %u", aznumeric_cast<AZ::u32>(testPrioritizationPolicy))));
}
}
AZStd::string TestFailurePolicyAsString(Policy::TestFailure testFailurePolicy)
{
switch (testFailurePolicy)
{
case Policy::TestFailure::Abort:
return "abort";
case Policy::TestFailure::Continue:
return "continue";
default:
throw(
Exception(AZStd::string::format("Unexpected test failure policy: %u", aznumeric_cast<AZ::u32>(testFailurePolicy))));
}
}
AZStd::string IntegrityFailurePolicyAsString(Policy::IntegrityFailure integrityFailurePolicy)
{
switch (integrityFailurePolicy)
{
case Policy::IntegrityFailure::Abort:
return "abort";
case Policy::IntegrityFailure::Continue:
return "continue";
default:
throw(Exception(
AZStd::string::format("Unexpected integration failure policy: %u", aznumeric_cast<AZ::u32>(integrityFailurePolicy))));
}
}
AZStd::string DynamicDependencyMapPolicyAsString(Policy::DynamicDependencyMap dynamicDependencyMapPolicy)
{
switch (dynamicDependencyMapPolicy)
{
case Policy::DynamicDependencyMap::Discard:
return "discard";
case Policy::DynamicDependencyMap::Update:
return "update";
default:
throw(Exception(AZStd::string::format(
"Unexpected dynamic dependency map policy: %u", aznumeric_cast<AZ::u32>(dynamicDependencyMapPolicy))));
}
}
AZStd::string TestShardingPolicyAsString(Policy::TestSharding testShardingPolicy)
{
switch (testShardingPolicy)
{
case Policy::TestSharding::Always:
return "always";
case Policy::TestSharding::Never:
return "never";
default:
throw(Exception(
AZStd::string::format("Unexpected test sharding policy: %u", aznumeric_cast<AZ::u32>(testShardingPolicy))));
}
}
AZStd::string TargetOutputCapturePolicyAsString(Policy::TargetOutputCapture targetOutputCapturePolicy)
{
switch (targetOutputCapturePolicy)
{
case Policy::TargetOutputCapture::File:
return "file";
case Policy::TargetOutputCapture::None:
return "none";
case Policy::TargetOutputCapture::StdOut:
return "stdout";
case Policy::TargetOutputCapture::StdOutAndFile:
return "stdout_file";
default:
throw(Exception(
AZStd::string::format("Unexpected target output capture policy: %u", aznumeric_cast<AZ::u32>(targetOutputCapturePolicy))));
}
}
AZStd::string ClientTestResultAsString(Client::TestResult result)
{
switch (result)
{
case Client::TestResult::Failed:
return "failed";
case Client::TestResult::NotRun:
return "not_run";
case Client::TestResult::Passed:
return "passed";
default:
throw(Exception(AZStd::string::format("Unexpected client test case result: %u", aznumeric_cast<AZ::u32>(result))));
}
}
} // namespace TestImpact

@ -16,11 +16,14 @@ set(FILES
Include/TestImpactFramework/TestImpactChangelist.h
Include/TestImpactFramework/TestImpactChangelistSerializer.h
Include/TestImpactFramework/TestImpactChangelistException.h
Include/TestImpactFramework/TestImpactPolicy.h
Include/TestImpactFramework/TestImpactTestSequence.h
Include/TestImpactFramework/TestImpactClientTestSelection.h
Include/TestImpactFramework/TestImpactClientTestRun.h
Include/TestImpactFramework/TestImpactClientSequenceReport.h
Include/TestImpactFramework/TestImpactFileUtils.h
Include/TestImpactFramework/TestImpactUtils.h
Include/TestImpactFramework/TestImpactClientSequenceReportSerializer.h
Include/TestImpactFramework/TestImpactSequenceReportException.h
Source/Artifact/TestImpactArtifactException.h
Source/Artifact/Factory/TestImpactBuildTargetDescriptorFactory.cpp
Source/Artifact/Factory/TestImpactBuildTargetDescriptorFactory.h
@ -125,5 +128,7 @@ set(FILES
Source/TestImpactClientTestRun.cpp
Source/TestImpactClientSequenceReport.cpp
Source/TestImpactChangeListSerializer.cpp
Source/TestImpactClientSequenceReportSerializer.cpp
Source/TestImpactRepoPath.cpp
Source/TestImpactUtils.cpp
)

@ -1,7 +1,8 @@
{
"meta": {
"platform": "${platform}",
"timestamp": "${timestamp}"
"timestamp": "${timestamp}",
"build_config": "${build_config}"
},
"jenkins": {
"use_test_impact_analysis": ${use_tiaf}
@ -32,8 +33,7 @@
"historic": {
"root": "${historic_dir}",
"relative_paths": {
"last_run_hash_file": "last_run.hash",
"last_build_target_list_file": "LastRunBuildTargets.json"
"data": "historic_data.json"
}
}
},

@ -22,10 +22,10 @@ set(LY_TEST_IMPACT_CONSOLE_TARGET "TestImpact.Frontend.Console")
set(LY_TEST_IMPACT_WORKING_DIR "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/TestImpactFramework")
# Directory for artifacts generated at runtime
set(LY_TEST_IMPACT_TEMP_DIR "${LY_TEST_IMPACT_WORKING_DIR}/Temp")
set(LY_TEST_IMPACT_TEMP_DIR "${LY_TEST_IMPACT_WORKING_DIR}/$<CONFIG>/Temp")
# Directory for files that persist between runtime runs
set(LY_TEST_IMPACT_PERSISTENT_DIR "${LY_TEST_IMPACT_WORKING_DIR}/Persistent")
set(LY_TEST_IMPACT_PERSISTENT_DIR "${LY_TEST_IMPACT_WORKING_DIR}/$<CONFIG>/Persistent")
# Directory for static artifacts produced as part of the build system generation process
set(LY_TEST_IMPACT_ARTIFACT_DIR "${LY_TEST_IMPACT_WORKING_DIR}/Artifact")
@ -43,7 +43,7 @@ set(LY_TEST_IMPACT_TEST_TYPE_FILE "${LY_TEST_IMPACT_ARTIFACT_DIR}/TestType/All.t
set(LY_TEST_IMPACT_GEM_TARGET_FILE "${LY_TEST_IMPACT_ARTIFACT_DIR}/BuildType/All.gems")
# Path to the config file for each build configuration
set(LY_TEST_IMPACT_CONFIG_FILE_PATH "${LY_TEST_IMPACT_PERSISTENT_DIR}/tiaf.$<CONFIG>.json")
set(LY_TEST_IMPACT_CONFIG_FILE_PATH "${LY_TEST_IMPACT_PERSISTENT_DIR}/tiaf.json")
# Preprocessor directive for the config file path
set(LY_TEST_IMPACT_CONFIG_FILE_PATH_DEFINITION "LY_TEST_IMPACT_DEFAULT_CONFIG_FILE=\"${LY_TEST_IMPACT_CONFIG_FILE_PATH}\"")
@ -379,6 +379,9 @@ function(ly_test_impact_write_config_file CONFIG_TEMPLATE_FILE BIN_DIR)
# Timestamp this config file was generated at
string(TIMESTAMP timestamp "%Y-%m-%d %H:%M:%S")
# Build configuration this config file is being generated for
set(build_config "$<CONFIG>")
# Instrumentation binary
if(NOT LY_TEST_IMPACT_INSTRUMENTATION_BIN)
# No binary specified is not an error, it just means that the test impact analysis part of the framework is disabled

@ -89,7 +89,7 @@
"CONFIGURATION": "profile",
"SCRIPT_PATH": "scripts/build/TestImpactAnalysis/tiaf_driver.py",
"SCRIPT_PARAMETERS":
"--config=\"!OUTPUT_DIRECTORY!/bin/TestImpactFramework/persistent/tiaf.profile.json\" --suite=main --test-failure-policy=continue --src-branch=!BRANCH_NAME! --dst-branch=!CHANGE_TARGET! --pipeline=!PIPELINE_NAME! --dest-commit=!CHANGE_ID! --seeding-branches=!BUILD_SNAPSHOTS! --seeding-pipelines=default"
"--config=\"!OUTPUT_DIRECTORY!/bin/TestImpactFramework/profile/Persistent/tiaf.json\" --src-branch=!BRANCH_NAME! --dst-branch=!BRANCH_NAME! --commit=!CHANGE_ID! --s3-bucket=!TEST_IMPACT_S3_BUCKET! --mars-index-prefix=jonawals --suite=main --test-failure-policy=continue"
}
},
"debug_vs2019": {

@ -6,34 +6,67 @@
#
#
import os
import subprocess
import git
import pathlib
# Returns True if the dst commit descends from the src commit, otherwise False
def is_descendent(src_commit_hash, dst_commit_hash):
if src_commit_hash is None or dst_commit_hash is None:
return False
result = subprocess.run(["git", "merge-base", "--is-ancestor", src_commit_hash, dst_commit_hash])
return result.returncode == 0
# Attempts to create a diff from the src and dst commits and write to the specified output file
def create_diff_file(src_commit_hash, dst_commit_hash, output_path):
if os.path.isfile(output_path):
os.remove(output_path)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
# git diff will only write to the output file if both commit hashes are valid
subprocess.run(["git", "diff", "--name-status", f"--output={output_path}", src_commit_hash, dst_commit_hash])
if not os.path.isfile(output_path):
raise FileNotFoundError(f"Source commit '{src_commit_hash}' and/or destination commit '{dst_commit_hash}' are invalid")
# Basic representation of a repository
# Basic representation of a git repository
class Repo:
def __init__(self, repo_path):
self.__repo = git.Repo(repo_path)
def __init__(self, repo_path: str):
self._repo = git.Repo(repo_path)
# Returns the current branch
@property
def current_branch(self):
branch = self.__repo.active_branch
branch = self._repo.active_branch
return branch.name
def create_diff_file(self, src_commit_hash: str, dst_commit_hash: str, output_path: pathlib.Path):
"""
Attempts to create a diff from the src and dst commits and write to the specified output file.
@param src_commit_hash: The hash for the source commit.
@param dst_commit_hash: The hash for the destination commit.
@param output_path: The path to the file to write the diff to.
"""
try:
# Remove the existing file (if any) and create the parent directory
output_path.unlink(missing_ok=True)
output_path.parent.mkdir(exist_ok=True)
except EnvironmentError as e:
raise RuntimeError(f"Could not create path for output file '{output_path}'")
# git diff will only write to the output file if both commit hashes are valid
subprocess.run(["git", "diff", "--name-status", f"--output={output_path}", src_commit_hash, dst_commit_hash])
if not output_path.is_file():
raise RuntimeError(f"Source commit '{src_commit_hash}' and/or destination commit '{dst_commit_hash}' are invalid")
def is_descendent(self, src_commit_hash: str, dst_commit_hash: str):
"""
Determines whether or not dst_commit is a descendent of src_commit.
@param src_commit_hash: The hash for the source commit.
@param dst_commit_hash: The hash for the destination commit.
@return: True if the dst commit descends from the src commit, otherwise False.
"""
if not src_commit_hash and not dst_commit_hash:
return False
result = subprocess.run(["git", "merge-base", "--is-ancestor", src_commit_hash, dst_commit_hash])
return result.returncode == 0
# Returns the distance between two commits
def commit_distance(self, src_commit_hash: str, dst_commit_hash: str):
"""
Determines the number of commits between src_commit and dst_commit.
@param src_commit_hash: The hash for the source commit.
@param dst_commit_hash: The hash for the destination commit.
@return: The distance between src_commit and dst_commit (if both are valid commits), otherwise None.
"""
if not src_commit_hash and not dst_commit_hash:
return None
commits = self._repo.iter_commits(src_commit_hash + '..' + dst_commit_hash)
return len(list(commits))

@ -0,0 +1,452 @@
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import datetime
import json
import socket
from tiaf_logger import get_logger
logger = get_logger(__file__)
MARS_JOB_KEY = "job"
SRC_COMMIT_KEY = "src_commit"
DST_COMMIT_KEY = "src_commit"
COMMIT_DISTANCE_KEY = "commit_distance"
SRC_BRANCH_KEY = "src_branch"
DST_BRANCH_KEY = "dst_branch"
SUITE_KEY = "suite"
SOURCE_OF_TRUTH_BRANCH_KEY = "source_of_truth_branch"
IS_SOURCE_OF_TRUTH_BRANCH_KEY = "is_source_of_truth_branch"
USE_TEST_IMPACT_ANALYSIS_KEY = "use_test_impact_analysis"
HAS_CHANGE_LIST_KEY = "has_change_list"
HAS_HISTORIC_DATA_KEY = "has_historic_data"
S3_BUCKET_KEY = "s3_bucket"
DRIVER_ARGS_KEY = "driver_args"
RUNTIME_ARGS_KEY = "runtime_args"
RUNTIME_RETURN_CODE_KEY = "return_code"
NAME_KEY = "name"
RESULT_KEY = "result"
NUM_PASSING_TESTS_KEY = "num_passing_tests"
NUM_FAILING_TESTS_KEY = "num_failing_tests"
NUM_DISABLED_TESTS_KEY = "num_disabled_tests"
COMMAND_ARGS_STRING = "command_args"
NUM_PASSING_TEST_RUNS_KEY = "num_passing_test_runs"
NUM_FAILING_TEST_RUNS_KEY = "num_failing_test_runs"
NUM_EXECUTION_FAILURE_TEST_RUNS_KEY = "num_execution_failure_test_runs"
NUM_TIMED_OUT_TEST_RUNS_KEY = "num_timed_out_test_runs"
NUM_UNEXECUTED_TEST_RUNS_KEY = "num_unexecuted_test_runs"
TOTAL_NUM_PASSING_TESTS_KEY = "total_num_passing_tests"
TOTAL_NUM_FAILING_TESTS_KEY = "total_num_failing_tests"
TOTAL_NUM_DISABLED_TESTS_KEY = "total_num_disabled_tests"
START_TIME_KEY = "start_time"
END_TIME_KEY = "end_time"
DURATION_KEY = "duration"
INCLUDED_TEST_RUNS_KEY = "included_test_runs"
EXCLUDED_TEST_RUNS_KEY = "excluded_test_runs"
NUM_INCLUDED_TEST_RUNS_KEY = "num_included_test_runs"
NUM_EXCLUDED_TEST_RUNS_KEY = "num_excluded_test_runs"
TOTAL_NUM_TEST_RUNS_KEY = "total_num_test_runs"
PASSING_TEST_RUNS_KEY = "passing_test_runs"
FAILING_TEST_RUNS_KEY = "failing_test_runs"
EXECUTION_FAILURE_TEST_RUNS_KEY = "execution_failure_test_runs"
TIMED_OUT_TEST_RUNS_KEY = "timed_out_test_runs"
UNEXECUTED_TEST_RUNS_KEY = "unexecuted_test_runs"
TOTAL_NUM_PASSING_TEST_RUNS_KEY = "total_num_passing_test_runs"
TOTAL_NUM_FAILING_TEST_RUNS_KEY = "total_num_failing_test_runs"
TOTAL_NUM_EXECUTION_FAILURE_TEST_RUNS_KEY = "total_num_execution_failure_test_runs"
TOTAL_NUM_TIMED_OUT_TEST_RUNS_KEY = "total_num_timed_out_test_runs"
TOTAL_NUM_UNEXECUTED_TEST_RUNS_KEY = "total_num_unexecuted_test_runs"
SEQUENCE_TYPE_KEY = "type"
IMPACT_ANALYSIS_SEQUENCE_TYPE_KEY = "impact_analysis"
SAFE_IMPACT_ANALYSIS_SEQUENCE_TYPE_KEY = "safe_impact_analysis"
SEED_SEQUENCE_TYPE_KEY = "seed"
TEST_TARGET_TIMEOUT_KEY = "test_target_timeout"
GLOBAL_TIMEOUT_KEY = "global_timeout"
MAX_CONCURRENCY_KEY = "max_concurrency"
SELECTED_KEY = "selected"
DRAFTED_KEY = "drafted"
DISCARDED_KEY = "discarded"
SELECTED_TEST_RUN_REPORT_KEY = "selected_test_run_report"
DISCARDED_TEST_RUN_REPORT_KEY = "discarded_test_run_report"
DRAFTED_TEST_RUN_REPORT_KEY = "drafted_test_run_report"
SELECTED_TEST_RUNS_KEY = "selected_test_runs"
DRAFTED_TEST_RUNS_KEY = "drafted_test_runs"
DISCARDED_TEST_RUNS_KEY = "discarded_test_runs"
INSTRUMENTATION_KEY = "instrumentation"
EFFICIENCY_KEY = "efficiency"
CONFIG_KEY = "config"
POLICY_KEY = "policy"
CHANGE_LIST_KEY = "change_list"
TEST_RUN_SELECTION_KEY = "test_run_selection"
DYNAMIC_DEPENDENCY_MAP_POLICY_KEY = "dynamic_dependency_map"
DYNAMIC_DEPENDENCY_MAP_POLICY_UPDATE_KEY = "update"
REPORT_KEY = "report"
class FilebeatExn(Exception):
pass
class FilebeatClient(object):
def __init__(self, host="127.0.0.1", port=9000, timeout=20):
self._filebeat_host = host
self._filebeat_port = port
self._socket_timeout = timeout
self._socket = None
self._open_socket()
def send_event(self, payload, index, timestamp=None, pipeline="filebeat"):
if not timestamp:
timestamp = datetime.datetime.utcnow().timestamp()
event = {
"index": index,
"timestamp": timestamp,
"pipeline": pipeline,
"payload": json.dumps(payload)
}
# Serialise event, add new line and encode as UTF-8 before sending to Filebeat.
data = json.dumps(event, sort_keys=True) + "\n"
data = data.encode()
#print(f"-> {data}")
self._send_data(data)
def _open_socket(self):
logger.info(f"Connecting to Filebeat on {self._filebeat_host}:{self._filebeat_port}")
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(self._socket_timeout)
try:
self._socket.connect((self._filebeat_host, self._filebeat_port))
except (ConnectionError, socket.timeout):
raise FilebeatExn("Failed to connect to Filebeat") from None
def _send_data(self, data):
total_sent = 0
while total_sent < len(data):
try:
sent = self._socket.send(data[total_sent:])
except BrokenPipeError:
logging.error("Filebeat socket closed by peer")
self._socket.close()
self._open_socket()
total_sent = 0
else:
total_sent = total_sent + sent
def format_timestamp(timestamp: float):
"""
Formats the given floating point timestamp into "yyyy-MM-dd'T'HH:mm:ss.SSSXX" format.
@param timestamp: The timestamp to format.
@return: The formatted timestamp.
"""
return datetime.datetime.utcfromtimestamp(timestamp).strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
def generate_mars_timestamp(t0_offset_milliseconds: int, t0_timestamp: float):
"""
Generates a MARS timestamp in the format "yyyy-MM-dd'T'HH:mm:ss.SSSXX" by offsetting the T0 timestamp
by the specified amount of milliseconds.
@param t0_offset_milliseconds: The amount of time to offset from T0.
@param t0_timestamp: The T0 timestamp that TIAF timings will be offst from.
@return: The formatted timestamp offset from T0 by the specified amount of milliseconds.
"""
t0_offset_seconds = get_duration_in_seconds(t0_offset_milliseconds)
t0_offset_timestamp = t0_timestamp + t0_offset_seconds
return format_timestamp(t0_offset_timestamp)
def get_duration_in_seconds(duration_in_milliseconds: int):
"""
Gets the specified duration in milliseconds (as used by TIAF) in seconds (as used my MARS documents).
@param duration_in_milliseconds: The millisecond duration to transform into seconds.
@return: The duration in seconds.
"""
return duration_in_milliseconds * 0.001
def generate_mars_job(tiaf_result, driver_args):
"""
Generates a MARS job document using the job meta-data used to drive the TIAF sequence.
@param tiaf_result: The result object generated by the TIAF script.
@param driver_args: The arguments specified to the driver script.
@return: The MARS job document with the job meta-data.
"""
mars_job = {key:tiaf_result[key] for key in
[
SRC_COMMIT_KEY,
DST_COMMIT_KEY,
COMMIT_DISTANCE_KEY,
SRC_BRANCH_KEY,
DST_BRANCH_KEY,
SUITE_KEY,
SOURCE_OF_TRUTH_BRANCH_KEY,
IS_SOURCE_OF_TRUTH_BRANCH_KEY,
USE_TEST_IMPACT_ANALYSIS_KEY,
HAS_CHANGE_LIST_KEY,
HAS_HISTORIC_DATA_KEY,
S3_BUCKET_KEY,
RUNTIME_ARGS_KEY,
RUNTIME_RETURN_CODE_KEY
]}
mars_job[DRIVER_ARGS_KEY] = driver_args
return mars_job
def generate_test_run_list(test_runs):
"""
Generates a list of test run name strings from the list of TIAF test runs.
@param test_runs: The list of TIAF test runs to generate the name strings from.
@return: The list of test run name strings.
"""
test_run_list = []
for test_run in test_runs:
test_run_list.append(test_run[NAME_KEY])
return test_run_list
def generate_mars_test_run_selections(test_run_selection, test_run_report, t0_timestamp: float):
"""
Generates a list of MARS test run selections from a TIAF test run selection and report.
@param test_run_selection: The TIAF test run selection.
@param test_run_report: The TIAF test run report.
@param t0_timestamp: The T0 timestamp that TIAF timings will be offst from.
@return: The list of TIAF test runs.
"""
mars_test_run_selection = {key:test_run_report[key] for key in
[
RESULT_KEY,
NUM_PASSING_TEST_RUNS_KEY,
NUM_FAILING_TEST_RUNS_KEY,
NUM_EXECUTION_FAILURE_TEST_RUNS_KEY,
NUM_TIMED_OUT_TEST_RUNS_KEY,
NUM_UNEXECUTED_TEST_RUNS_KEY,
TOTAL_NUM_PASSING_TESTS_KEY,
TOTAL_NUM_FAILING_TESTS_KEY,
TOTAL_NUM_DISABLED_TESTS_KEY
]}
mars_test_run_selection[START_TIME_KEY] = generate_mars_timestamp(test_run_report[START_TIME_KEY], t0_timestamp)
mars_test_run_selection[END_TIME_KEY] = generate_mars_timestamp(test_run_report[END_TIME_KEY], t0_timestamp)
mars_test_run_selection[DURATION_KEY] = get_duration_in_seconds(test_run_report[DURATION_KEY])
mars_test_run_selection[INCLUDED_TEST_RUNS_KEY] = test_run_selection[INCLUDED_TEST_RUNS_KEY]
mars_test_run_selection[EXCLUDED_TEST_RUNS_KEY] = test_run_selection[EXCLUDED_TEST_RUNS_KEY]
mars_test_run_selection[NUM_INCLUDED_TEST_RUNS_KEY] = test_run_selection[NUM_INCLUDED_TEST_RUNS_KEY]
mars_test_run_selection[NUM_EXCLUDED_TEST_RUNS_KEY] = test_run_selection[NUM_EXCLUDED_TEST_RUNS_KEY]
mars_test_run_selection[TOTAL_NUM_TEST_RUNS_KEY] = test_run_selection[TOTAL_NUM_TEST_RUNS_KEY]
mars_test_run_selection[PASSING_TEST_RUNS_KEY] = generate_test_run_list(test_run_report[PASSING_TEST_RUNS_KEY])
mars_test_run_selection[FAILING_TEST_RUNS_KEY] = generate_test_run_list(test_run_report[FAILING_TEST_RUNS_KEY])
mars_test_run_selection[EXECUTION_FAILURE_TEST_RUNS_KEY] = generate_test_run_list(test_run_report[EXECUTION_FAILURE_TEST_RUNS_KEY])
mars_test_run_selection[TIMED_OUT_TEST_RUNS_KEY] = generate_test_run_list(test_run_report[TIMED_OUT_TEST_RUNS_KEY])
mars_test_run_selection[UNEXECUTED_TEST_RUNS_KEY] = generate_test_run_list(test_run_report[UNEXECUTED_TEST_RUNS_KEY])
return mars_test_run_selection
def generate_test_runs_from_list(test_run_list: list):
"""
Generates a list of TIAF test runs from a list of test target name strings.
@param test_run_list: The list of test target names.
@return: The list of TIAF test runs.
"""
test_run_list = {
TOTAL_NUM_TEST_RUNS_KEY: len(test_run_list),
NUM_INCLUDED_TEST_RUNS_KEY: len(test_run_list),
NUM_EXCLUDED_TEST_RUNS_KEY: 0,
INCLUDED_TEST_RUNS_KEY: test_run_list,
EXCLUDED_TEST_RUNS_KEY: []
}
return test_run_list
def generate_mars_sequence(sequence_report: dict, mars_job: dict, change_list:dict, t0_timestamp: float):
"""
Generates the MARS sequence document from the specified TIAF sequence report.
@param sequence_report: The TIAF runtime sequence report.
@param mars_job: The MARS job for this sequence.
@param change_list: The change list for which the TIAF sequence was run.
@param t0_timestamp: The T0 timestamp that TIAF timings will be offst from.
@return: The MARS sequence document for the specified TIAF sequence report.
"""
mars_sequence = {key:sequence_report[key] for key in
[
SEQUENCE_TYPE_KEY,
RESULT_KEY,
POLICY_KEY,
TOTAL_NUM_TEST_RUNS_KEY,
TOTAL_NUM_PASSING_TEST_RUNS_KEY,
TOTAL_NUM_FAILING_TEST_RUNS_KEY,
TOTAL_NUM_EXECUTION_FAILURE_TEST_RUNS_KEY,
TOTAL_NUM_TIMED_OUT_TEST_RUNS_KEY,
TOTAL_NUM_UNEXECUTED_TEST_RUNS_KEY,
TOTAL_NUM_PASSING_TESTS_KEY,
TOTAL_NUM_FAILING_TESTS_KEY,
TOTAL_NUM_DISABLED_TESTS_KEY
]}
mars_sequence[START_TIME_KEY] = generate_mars_timestamp(sequence_report[START_TIME_KEY], t0_timestamp)
mars_sequence[END_TIME_KEY] = generate_mars_timestamp(sequence_report[END_TIME_KEY], t0_timestamp)
mars_sequence[DURATION_KEY] = get_duration_in_seconds(sequence_report[DURATION_KEY])
config = {key:sequence_report[key] for key in
[
TEST_TARGET_TIMEOUT_KEY,
GLOBAL_TIMEOUT_KEY,
MAX_CONCURRENCY_KEY
]}
test_run_selection = {}
test_run_selection[SELECTED_KEY] = generate_mars_test_run_selections(sequence_report[SELECTED_TEST_RUNS_KEY], sequence_report[SELECTED_TEST_RUN_REPORT_KEY], t0_timestamp)
if sequence_report[SEQUENCE_TYPE_KEY] == IMPACT_ANALYSIS_SEQUENCE_TYPE_KEY or sequence_report[SEQUENCE_TYPE_KEY] == SAFE_IMPACT_ANALYSIS_SEQUENCE_TYPE_KEY:
total_test_runs = sequence_report[TOTAL_NUM_TEST_RUNS_KEY]
if total_test_runs > 0:
test_run_selection[SELECTED_KEY][EFFICIENCY_KEY] = (1.0 - (test_run_selection[SELECTED_KEY][TOTAL_NUM_TEST_RUNS_KEY] / total_test_runs)) * 100
else:
test_run_selection[SELECTED_KEY][EFFICIENCY_KEY] = 100
test_run_selection[DRAFTED_KEY] = generate_mars_test_run_selections(generate_test_runs_from_list(sequence_report[DRAFTED_TEST_RUNS_KEY]), sequence_report[DRAFTED_TEST_RUN_REPORT_KEY], t0_timestamp)
if sequence_report[SEQUENCE_TYPE_KEY] == SAFE_IMPACT_ANALYSIS_SEQUENCE_TYPE_KEY:
test_run_selection[DISCARDED_KEY] = generate_mars_test_run_selections(sequence_report[DISCARDED_TEST_RUNS_KEY], sequence_report[DISCARDED_TEST_RUN_REPORT_KEY], t0_timestamp)
else:
test_run_selection[SELECTED_KEY][EFFICIENCY_KEY] = 0
mars_sequence[MARS_JOB_KEY] = mars_job
mars_sequence[CONFIG_KEY] = config
mars_sequence[TEST_RUN_SELECTION_KEY] = test_run_selection
mars_sequence[CHANGE_LIST_KEY] = change_list
return mars_sequence
def extract_mars_test_target(test_run, instrumentation, mars_job, t0_timestamp: float):
"""
Extracts a MARS test target from the specified TIAF test run.
@param test_run: The TIAF test run.
@param instrumentation: Flag specifying whether or not instrumentation was used for the test targets in this run.
@param mars_job: The MARS job for this test target.
@param t0_timestamp: The T0 timestamp that TIAF timings will be offst from.
@return: The MARS test target documents for the specified TIAF test target.
"""
mars_test_run = {key:test_run[key] for key in
[
NAME_KEY,
RESULT_KEY,
NUM_PASSING_TESTS_KEY,
NUM_FAILING_TESTS_KEY,
NUM_DISABLED_TESTS_KEY,
COMMAND_ARGS_STRING
]}
mars_test_run[START_TIME_KEY] = generate_mars_timestamp(test_run[START_TIME_KEY], t0_timestamp)
mars_test_run[END_TIME_KEY] = generate_mars_timestamp(test_run[END_TIME_KEY], t0_timestamp)
mars_test_run[DURATION_KEY] = get_duration_in_seconds(test_run[DURATION_KEY])
mars_test_run[MARS_JOB_KEY] = mars_job
mars_test_run[INSTRUMENTATION_KEY] = instrumentation
return mars_test_run
def extract_mars_test_targets_from_report(test_run_report, instrumentation, mars_job, t0_timestamp: float):
"""
Extracts the MARS test targets from the specified TIAF test run report.
@param test_run_report: The TIAF runtime test run report.
@param instrumentation: Flag specifying whether or not instrumentation was used for the test targets in this run.
@param mars_job: The MARS job for these test targets.
@param t0_timestamp: The T0 timestamp that TIAF timings will be offst from.
@return: The list of all MARS test target documents for the test targets in the TIAF test run report.
"""
mars_test_targets = []
for test_run in test_run_report[PASSING_TEST_RUNS_KEY]:
mars_test_targets.append(extract_mars_test_target(test_run, instrumentation, mars_job, t0_timestamp))
for test_run in test_run_report[FAILING_TEST_RUNS_KEY]:
mars_test_targets.append(extract_mars_test_target(test_run, instrumentation, mars_job, t0_timestamp))
for test_run in test_run_report[EXECUTION_FAILURE_TEST_RUNS_KEY]:
mars_test_targets.append(extract_mars_test_target(test_run, instrumentation, mars_job, t0_timestamp))
for test_run in test_run_report[TIMED_OUT_TEST_RUNS_KEY]:
mars_test_targets.append(extract_mars_test_target(test_run, instrumentation, mars_job, t0_timestamp))
for test_run in test_run_report[UNEXECUTED_TEST_RUNS_KEY]:
mars_test_targets.append(extract_mars_test_target(test_run, instrumentation, mars_job, t0_timestamp))
return mars_test_targets
def generate_mars_test_targets(sequence_report: dict, mars_job: dict, t0_timestamp: float):
"""
Generates a MARS test target document for each test target in the TIAF sequence report.
@param sequence_report: The TIAF runtime sequence report.
@param mars_job: The MARS job for this sequence.
@param t0_timestamp: The T0 timestamp that TIAF timings will be offst from.
@return: The list of all MARS test target documents for the test targets in the TIAF sequence report.
"""
mars_test_targets = []
# Determine whether or not the test targets were executed with instrumentation
if sequence_report[SEQUENCE_TYPE_KEY] == SEED_SEQUENCE_TYPE_KEY or sequence_report[SEQUENCE_TYPE_KEY] == SAFE_IMPACT_ANALYSIS_SEQUENCE_TYPE_KEY or (sequence_report[SEQUENCE_TYPE_KEY] == IMPACT_ANALYSIS_SEQUENCE_TYPE_KEY and sequence_report[POLICY_KEY][DYNAMIC_DEPENDENCY_MAP_POLICY_KEY] == DYNAMIC_DEPENDENCY_MAP_POLICY_UPDATE_KEY):
instrumentation = True
else:
instrumentation = False
# Extract the MARS test target documents from each of the test run reports
mars_test_targets += extract_mars_test_targets_from_report(sequence_report[SELECTED_TEST_RUN_REPORT_KEY], instrumentation, mars_job, t0_timestamp)
if sequence_report[SEQUENCE_TYPE_KEY] == IMPACT_ANALYSIS_SEQUENCE_TYPE_KEY or sequence_report[SEQUENCE_TYPE_KEY] == SAFE_IMPACT_ANALYSIS_SEQUENCE_TYPE_KEY:
mars_test_targets += extract_mars_test_targets_from_report(sequence_report[DRAFTED_TEST_RUN_REPORT_KEY], instrumentation, mars_job, t0_timestamp)
if sequence_report[SEQUENCE_TYPE_KEY] == SAFE_IMPACT_ANALYSIS_SEQUENCE_TYPE_KEY:
mars_test_targets += extract_mars_test_targets_from_report(sequence_report[DISCARDED_TEST_RUN_REPORT_KEY], instrumentation, mars_job, t0_timestamp)
return mars_test_targets
def transmit_report_to_mars(mars_index_prefix: str, tiaf_result: dict, driver_args: list):
"""
Transforms the TIAF result into the appropriate MARS documents and transmits them to MARS.
@param mars_index_prefix: The index prefix to be used for all MARS documents.
@param tiaf_result: The result object from the TIAF script.
@param driver_args: The arguments passed to the TIAF driver script.
"""
try:
filebeat = FilebeatClient("localhost", 9000, 60)
# T0 is the current timestamp that the report timings will be offset from
t0_timestamp = datetime.datetime.now().timestamp()
# Generate and transmit the MARS job document
mars_job = generate_mars_job(tiaf_result, driver_args)
filebeat.send_event(mars_job, f"{mars_index_prefix}.tiaf.job")
if tiaf_result[REPORT_KEY]:
# Generate and transmit the MARS sequence document
mars_sequence = generate_mars_sequence(tiaf_result[REPORT_KEY], mars_job, tiaf_result[CHANGE_LIST_KEY], t0_timestamp)
filebeat.send_event(mars_sequence, f"{mars_index_prefix}.tiaf.sequence")
# Generate and transmit the MARS test target documents
mars_test_targets = generate_mars_test_targets(tiaf_result[REPORT_KEY], mars_job, t0_timestamp)
for mars_test_target in mars_test_targets:
filebeat.send_event(mars_test_target, f"{mars_index_prefix}.tiaf.test_target")
except FilebeatExn as e:
logger.error(e)
except KeyError as e:
logger.error(f"The report does not contain the key {str(e)}.")

@ -6,237 +6,301 @@
#
#
import os
import json
import subprocess
import re
import git_utils
import uuid
import pathlib
from git_utils import Repo
from enum import Enum
from tiaf_persistent_storage_local import PersistentStorageLocal
from tiaf_persistent_storage_s3 import PersistentStorageS3
from tiaf_logger import get_logger
# Returns True if the specified child path is a child of the specified parent path, otherwise False
def is_child_path(parent_path, child_path):
parent_path = os.path.abspath(parent_path)
child_path = os.path.abspath(child_path)
return os.path.commonpath([os.path.abspath(parent_path)]) == os.path.commonpath([os.path.abspath(parent_path), os.path.abspath(child_path)])
logger = get_logger(__file__)
class TestImpact:
def __init__(self, config_file, dst_commit, src_branch, dst_branch, pipeline, seeding_branches, seeding_pipelines):
# Commit
self.__dst_commit = dst_commit
print(f"Commit: '{self.__dst_commit}'.")
self.__src_commit = None
self.__has_src_commit = False
# Branch
self.__src_branch = src_branch
print(f"Source branch: '{self.__src_branch}'.")
self.__dst_branch = dst_branch
print(f"Destination branch: '{self.__dst_branch}'.")
print(f"Seeding branches: '{seeding_branches}'.")
if self.__src_branch in seeding_branches:
self.__is_seeding_branch = True
else:
self.__is_seeding_branch = False
print(f"Is seeding branch: '{self.__is_seeding_branch}'.")
# Pipeline
self.__pipeline = pipeline
print(f"Pipeline: '{self.__pipeline}'.")
print(f"Seeding pipelines: '{seeding_pipelines}'.")
if self.__pipeline in seeding_pipelines:
self.__is_seeding_pipeline = True
else:
self.__is_seeding_pipeline = False
print(f"Is seeding pipeline: '{self.__is_seeding_pipeline}'.")
# Config
self.__parse_config_file(config_file)
# Sequence
if self.__is_seeding_branch and self.__is_seeding_pipeline:
self.__is_seeding = True
else:
self.__is_seeding = False
print(f"Is seeding: '{self.__is_seeding}'.")
if self.__use_test_impact_analysis and not self.__is_seeding:
self.__generate_change_list()
# Parse the configuration file and retrieve the data needed for launching the test impact analysis runtime
def __parse_config_file(self, config_file):
print(f"Attempting to parse configuration file '{config_file}'...")
with open(config_file, "r") as config_data:
config = json.load(config_data)
self.__repo_dir = config["repo"]["root"]
self.__repo = Repo(self.__repo_dir)
# TIAF
self.__use_test_impact_analysis = config["jenkins"]["use_test_impact_analysis"]
print(f"Is using test impact analysis: '{self.__use_test_impact_analysis}'.")
self.__tiaf_bin = config["repo"]["tiaf_bin"]
if self.__use_test_impact_analysis and not os.path.isfile(self.__tiaf_bin):
raise FileNotFoundError("Could not find tiaf binary")
# Workspaces
self.__active_workspace = config["workspace"]["active"]["root"]
self.__historic_workspace = config["workspace"]["historic"]["root"]
self.__temp_workspace = config["workspace"]["temp"]["root"]
# Last commit hash
last_commit_hash_path_file = config["workspace"]["historic"]["relative_paths"]["last_run_hash_file"]
self.__last_commit_hash_path = os.path.join(self.__historic_workspace, last_commit_hash_path_file)
print("The configuration file was parsed successfully.")
# Restricts change lists from checking in test impact analysis files
def __check_for_restricted_files(self, file_path):
if is_child_path(self.__active_workspace, file_path) or is_child_path(self.__historic_workspace, file_path) or is_child_path(self.__temp_workspace, file_path):
raise ValueError(f"Checking in test impact analysis framework files is illegal: '{file_path}''.")
def __read_last_run_hash(self):
self.__has_src_commit = False
if os.path.isfile(self.__last_commit_hash_path):
print(f"Previous commit hash found at '{self.__last_commit_hash_path}'.")
with open(self.__last_commit_hash_path) as file:
self.__src_commit = file.read()
self.__has_src_commit = True
def __write_last_run_hash(self, last_run_hash):
os.makedirs(self.__historic_workspace, exist_ok=True)
f = open(self.__last_commit_hash_path, "w")
f.write(last_run_hash)
f.close()
# Determines the change list bewteen now and the last tiaf run (if any)
def __generate_change_list(self):
self.__has_change_list = False
self.__change_list_path = None
def __init__(self, config_file: str):
"""
Initializes the test impact model with the commit, branches as runtime configuration.
@param config_file: The runtime config file to obtain the runtime configuration data from.
"""
self._has_change_list = False
self._parse_config_file(config_file)
def _parse_config_file(self, config_file: str):
"""
Parse the configuration file and retrieve the data needed for launching the test impact analysis runtime.
@param config_file: The runtime config file to obtain the runtime configuration data from.
"""
logger.info(f"Attempting to parse configuration file '{config_file}'...")
try:
with open(config_file, "r") as config_data:
self._config = json.load(config_data)
self._repo_dir = self._config["repo"]["root"]
self._repo = Repo(self._repo_dir)
# TIAF
self._use_test_impact_analysis = self._config["jenkins"]["use_test_impact_analysis"]
self._tiaf_bin = pathlib.Path(self._config["repo"]["tiaf_bin"])
if self._use_test_impact_analysis and not self._tiaf_bin.is_file():
logger.warning(f"Could not find TIAF binary at location {self._tiaf_bin}, TIAF will be turned off.")
self._use_test_impact_analysis = False
else:
logger.info(f"Runtime binary found at location {self._tiaf_bin}")
# Workspaces
self._active_workspace = self._config["workspace"]["active"]["root"]
self._historic_workspace = self._config["workspace"]["historic"]["root"]
self._temp_workspace = self._config["workspace"]["temp"]["root"]
logger.info("The configuration file was parsed successfully.")
except KeyError as e:
logger.error(f"The config does not contain the key {str(e)}.")
return
def _attempt_to_generate_change_list(self, last_commit_hash, instance_id: str):
"""
Attempts to determine the change list bewteen now and the last tiaf run (if any).
@param last_commit_hash: The commit hash of the last TIAF run.
@param instance_id: The unique id to derive the change list file name from.
"""
self._has_change_list = False
self._change_list_path = None
# Check whether or not a previous commit hash exists (no hash is not a failure)
self.__read_last_run_hash()
if self.__has_src_commit == True:
if git_utils.is_descendent(self.__src_commit, self.__dst_commit) == False:
print(f"Source commit '{self.__src_commit}' and destination commit '{self.__dst_commit}' are not related.")
self._src_commit = last_commit_hash
if self._src_commit:
if self._repo.is_descendent(self._src_commit, self._dst_commit) == False:
logger.info(f"Source commit '{self._src_commit}' and destination commit '{self._dst_commit}' are not related.")
return
diff_path = os.path.join(self.__temp_workspace, "changelist.diff")
self._commit_distance = self._repo.commit_distance(self._src_commit, self._dst_commit)
diff_path = pathlib.Path(pathlib.PurePath(self._temp_workspace).joinpath(f"changelist.{instance_id}.diff"))
try:
git_utils.create_diff_file(self.__src_commit, self.__dst_commit, diff_path)
except FileNotFoundError as e:
print(e)
self._repo.create_diff_file(self._src_commit, self._dst_commit, diff_path)
except RuntimeError as e:
logger.error(e)
return
# A diff was generated, attempt to parse the diff and construct the change list
print(f"Generated diff between commits '{self.__src_commit}' and '{self.__dst_commit}': '{diff_path}'.")
change_list = {}
change_list["createdFiles"] = []
change_list["updatedFiles"] = []
change_list["deletedFiles"] = []
logger.info(f"Generated diff between commits '{self._src_commit}' and '{self._dst_commit}': '{diff_path}'.")
with open(diff_path, "r") as diff_data:
lines = diff_data.readlines()
for line in lines:
match = re.split("^R[0-9]+\\s(\\S+)\\s(\\S+)", line)
if len(match) > 1:
# File rename
self.__check_for_restricted_files(match[1])
self.__check_for_restricted_files(match[2])
# Treat renames as a deletion and an addition
change_list["deletedFiles"].append(match[1])
change_list["createdFiles"].append(match[2])
self._change_list["deletedFiles"].append(match[1])
self._change_list["createdFiles"].append(match[2])
else:
match = re.split("^[AMD]\\s(\\S+)", line)
self.__check_for_restricted_files(match[1])
if len(match) > 1:
if line[0] == 'A':
# File addition
change_list["createdFiles"].append(match[1])
self._change_list["createdFiles"].append(match[1])
elif line[0] == 'M':
# File modification
change_list["updatedFiles"].append(match[1])
self._change_list["updatedFiles"].append(match[1])
elif line[0] == 'D':
# File Deletion
change_list["deletedFiles"].append(match[1])
self._change_list["deletedFiles"].append(match[1])
# Serialize the change list to the JSON format the test impact analysis runtime expects
change_list_json = json.dumps(change_list, indent = 4)
change_list_path = os.path.join(self.__temp_workspace, "changelist.json")
change_list_json = json.dumps(self._change_list, indent = 4)
change_list_path = pathlib.PurePath(self._temp_workspace).joinpath(f"changelist.{instance_id}.json")
f = open(change_list_path, "w")
f.write(change_list_json)
f.close()
print(f"Change list constructed successfully: '{change_list_path}'.")
print(f"{len(change_list['createdFiles'])} created files, {len(change_list['updatedFiles'])} updated files and {len(change_list['deletedFiles'])} deleted files.")
logger.info(f"Change list constructed successfully: '{change_list_path}'.")
logger.info(f"{len(self._change_list['createdFiles'])} created files, {len(self._change_list['updatedFiles'])} updated files and {len(self._change_list['deletedFiles'])} deleted files.")
# Note: an empty change list generated due to no changes between last and current commit is valid
self.__has_change_list = True
self.__change_list_path = change_list_path
self._has_change_list = True
self._change_list_path = change_list_path
else:
print("No previous commit hash found, regular or seeded sequences only will be run.")
self.__has_change_list = False
logger.error("No previous commit hash found, regular or seeded sequences only will be run.")
self._has_change_list = False
return
# Runs the specified test sequence
def run(self, suite, test_failure_policy, safe_mode, test_timeout, global_timeout):
def _generate_result(self, s3_bucket: str, suite: str, return_code: int, report: dict, runtime_args: list):
"""
Generates the result object from the pertinent runtime meta-data and sequence report.
@param The generated result object.
"""
result = {}
result["src_commit"] = self._src_commit
result["dst_commit"] = self._dst_commit
result["commit_distance"] = self._commit_distance
result["src_branch"] = self._src_branch
result["dst_branch"] = self._dst_branch
result["suite"] = suite
result["use_test_impact_analysis"] = self._use_test_impact_analysis
result["source_of_truth_branch"] = self._source_of_truth_branch
result["is_source_of_truth_branch"] = self._is_source_of_truth_branch
result["has_change_list"] = self._has_change_list
result["has_historic_data"] = self._has_historic_data
result["s3_bucket"] = s3_bucket
result["runtime_args"] = runtime_args
result["return_code"] = return_code
result["report"] = report
result["change_list"] = self._change_list
return result
def run(self, commit: str, src_branch: str, dst_branch: str, s3_bucket: str, suite: str, test_failure_policy: str, safe_mode: bool, test_timeout: int, global_timeout: int):
"""
Determins the type of sequence to run based on the commit, source branch and test branch before running the
sequence with the specified values.
@param commit: The commit hash of the changes to run test impact analysis on.
@param src_branch: If not equal to dst_branch, the branch that is being built.
@param dst_branch: If not equal to src_branch, the destination branch for the PR being built.
@param s3_bucket: Location of S3 bucket to use for persistent storage, otherwise local disk storage will be used.
@param suite: Test suite to run.
@param test_failure_policy: Test failure policy for regular and test impact sequences (ignored when seeding).
@param safe_mode: Flag to run impact analysis tests in safe mode (ignored when seeding).
@param test_timeout: Maximum run time (in seconds) of any test target before being terminated (unlimited if None).
@param global_timeout: Maximum run time of the sequence before being terminated (unlimited if None).
"""
args = []
seed_sequence_test_failure_policy = "continue"
# Suite
args.append(f"--suite={suite}")
print(f"Test suite is set to '{suite}'.")
# Timeouts
if test_timeout != None:
args.append(f"--ttimeout={test_timeout}")
print(f"Test target timeout is set to {test_timeout} seconds.")
if global_timeout != None:
args.append(f"--gtimeout={global_timeout}")
print(f"Global sequence timeout is set to {test_timeout} seconds.")
if self.__use_test_impact_analysis:
print("Test impact analysis is enabled.")
# Seed sequences
if self.__is_seeding:
persistent_storage = None
self._has_historic_data = False
self._change_list = {}
self._change_list["createdFiles"] = []
self._change_list["updatedFiles"] = []
self._change_list["deletedFiles"] = []
# Branches
self._src_branch = src_branch
self._dst_branch = dst_branch
logger.info(f"Src branch: '{self._src_branch}'.")
logger.info(f"Dst branch: '{self._dst_branch}'.")
# Source of truth (the branch from which the coverage data will be stored/retrieved from)
if not self._dst_branch or self._src_branch == self._dst_branch:
# Branch builds are their own source of truth and will update the coverage data for the source of truth after any instrumented sequences complete
self._is_source_of_truth_branch = True
self._source_of_truth_branch = self._src_branch
else:
# PR builds use their destination as the source of truth and never update the coverage data for the source of truth
self._is_source_of_truth_branch = False
self._source_of_truth_branch = self._dst_branch
logger.info(f"Source of truth branch: '{self._source_of_truth_branch}'.")
logger.info(f"Is source of truth branch: '{self._is_source_of_truth_branch}'.")
# Commit
self._dst_commit = commit
logger.info(f"Commit: '{self._dst_commit}'.")
self._src_commit = None
self._commit_distance = None
# Generate a unique ID to be used as part of the file name for required runtime dynamic artifacts.
instance_id = uuid.uuid4().hex
if self._use_test_impact_analysis:
logger.info("Test impact analysis is enabled.")
try:
# Persistent storage location
if s3_bucket:
persistent_storage = PersistentStorageS3(self._config, suite, s3_bucket, self._source_of_truth_branch)
else:
persistent_storage = PersistentStorageLocal(self._config, suite)
except SystemError as e:
logger.warning(f"The persistent storage encountered an irrecoverable error, test impact analysis will be disabled: '{e}'")
persistent_storage = None
if persistent_storage:
if persistent_storage.has_historic_data:
logger.info("Historic data found.")
self._attempt_to_generate_change_list(persistent_storage.last_commit_hash, instance_id)
else:
logger.info("No historic data found.")
# Sequence type
args.append("--sequence=seed")
print("Sequence type is set to 'seed'.")
# Test failure policy
args.append(f"--fpolicy={seed_sequence_test_failure_policy}")
print(f"Test failure policy is set to '{seed_sequence_test_failure_policy}'.")
# Impact analysis sequences
else:
if self.__has_change_list:
# Change list
args.append(f"--changelist={self.__change_list_path}")
print(f"Change list is set to '{self.__change_list_path}'.")
# Sequence type
args.append("--sequence=tianowrite")
print("Sequence type is set to 'tianowrite'.")
# Integrity failure policy
args.append("--ipolicy=continue")
print("Integration failure policy is set to 'continue'.")
if self._has_change_list:
if self._is_source_of_truth_branch:
# Use TIA sequence (instrumented subset of tests) for coverage updating branches so we can update the coverage data with the generated coverage
sequence_type = "tia"
else:
# Use TIA no-write sequence (regular subset of tests) for non coverage updating branche
sequence_type = "tianowrite"
# Ignore integrity failures for non coverage updating branches as our confidence in the
args.append("--ipolicy=continue")
logger.info("Integration failure policy is set to 'continue'.")
# Safe mode
if safe_mode:
args.append("--safemode=on")
print("Safe mode set to 'on'.")
logger.info("Safe mode set to 'on'.")
else:
args.append("--safemode=off")
print("Safe mode set to 'off'.")
logger.info("Safe mode set to 'off'.")
# Change list
args.append(f"--changelist={self._change_list_path}")
logger.info(f"Change list is set to '{self._change_list_path}'.")
else:
args.append("--sequence=regular")
print("Sequence type is set to 'regular'.")
# Test failure policy
args.append(f"--fpolicy={test_failure_policy}")
print(f"Test failure policy is set to '{test_failure_policy}'.")
else:
print("Test impact analysis is disabled.")
# Sequence type
args.append("--sequence=regular")
print("Sequence type is set to 'regular'.")
# Seeding job
if self.__is_seeding:
# Test failure policy
args.append(f"--fpolicy={seed_sequence_test_failure_policy}")
print(f"Test failure policy is set to '{seed_sequence_test_failure_policy}'.")
# Non seeding job
if self._is_source_of_truth_branch:
# Use seed sequence (instrumented all tests) for coverage updating branches so we can generate the coverage bed for future sequences
sequence_type = "seed"
# We always continue after test failures when seeding to ensure we capture the coverage for all test targets
test_failure_policy = "continue"
else:
# Use regular sequence (regular all tests) for non coverage updating branches as we have no coverage to use nor coverage to update
sequence_type = "regular"
# Ignore integrity failures for non coverage updating branches as our confidence in the
args.append("--ipolicy=continue")
logger.info("Integration failure policy is set to 'continue'.")
else:
# Test failure policy
args.append(f"--fpolicy={test_failure_policy}")
print(f"Test failure policy is set to '{test_failure_policy}'.")
print("Args: ", end='')
print(*args)
result = subprocess.run([self.__tiaf_bin] + args)
# Use regular sequence (regular all tests) when the persistent storage fails to avoid wasting time generating seed data that will not be preserved
sequence_type = "regular"
else:
# Use regular sequence (regular all tests) when test impact analysis is disabled
sequence_type = "regular"
args.append(f"--sequence={sequence_type}")
logger.info(f"Sequence type is set to '{sequence_type}'.")
# Test failure policy
args.append(f"--fpolicy={test_failure_policy}")
logger.info(f"Test failure policy is set to '{test_failure_policy}'.")
# Sequence report
report_file = pathlib.PurePath(self._temp_workspace).joinpath(f"report.{instance_id}.json")
args.append(f"--report={report_file}")
logger.info(f"Sequence report file is set to '{report_file}'.")
# Suite
args.append(f"--suite={suite}")
logger.info(f"Test suite is set to '{suite}'.")
# Timeouts
if test_timeout is not None:
args.append(f"--ttimeout={test_timeout}")
logger.info(f"Test target timeout is set to {test_timeout} seconds.")
if global_timeout is not None:
args.append(f"--gtimeout={global_timeout}")
logger.info(f"Global sequence timeout is set to {test_timeout} seconds.")
# Run sequence
unpacked_args = " ".join(args)
logger.info(f"Args: {unpacked_args}")
runtime_result = subprocess.run([str(self._tiaf_bin)] + args)
report = None
# If the sequence completed (with or without failures) we will update the historical meta-data
if result.returncode == 0 or result.returncode == 7:
print("Test impact analysis runtime returned successfully.")
if self.__is_seeding:
print("Writing historical meta-data...")
self.__write_last_run_hash(self.__dst_commit)
print("Complete!")
if runtime_result.returncode == 0 or runtime_result.returncode == 7:
logger.info("Test impact analysis runtime returned successfully.")
if self._is_source_of_truth_branch and persistent_storage is not None:
persistent_storage.update_and_store_historic_data(self._dst_commit)
with open(report_file) as json_file:
report = json.load(json_file)
else:
print(f"The test impact analysis runtime returned with error: '{result.returncode}'.")
return result.returncode
logger.error(f"The test impact analysis runtime returned with error: '{runtime_result.returncode}'.")
return self._generate_result(s3_bucket, suite, runtime_result.returncode, report, args)

@ -7,60 +7,138 @@
#
import argparse
import mars_utils
import sys
import pathlib
import traceback
from tiaf import TestImpact
from tiaf_logger import get_logger
import sys
import os
import datetime
import json
import socket
logger = get_logger(__file__)
def parse_args():
def file_path(value):
if os.path.isfile(value):
def valid_file_path(value):
if pathlib.Path(value).is_file():
return value
else:
raise FileNotFoundError(value)
def timout_type(value):
def valid_timout_type(value):
value = int(value)
if value <= 0:
raise ValueError("Timer values must be positive integers")
return value
def test_failure_policy(value):
def valid_test_failure_policy(value):
if value == "continue" or value == "abort" or value == "ignore":
return value
else:
raise ValueError("Test failure policy must be 'abort', 'continue' or 'ignore'")
parser = argparse.ArgumentParser()
parser.add_argument('--config', dest="config", type=file_path, help="Path to the test impact analysis framework configuration file", required=True)
parser.add_argument('--src-branch', dest="src_branch", help="The branch that is being build", required=True)
parser.add_argument('--dst-branch', dest="dst_branch", help="For PR builds, the destination branch to be merged to, otherwise empty")
parser.add_argument('--seeding-branches', dest="seeding_branches", type=lambda arg: arg.split(','), help="Comma separated branches that seeding will occur on", required=True)
parser.add_argument('--pipeline', dest="pipeline", help="Pipeline the test impact analysis framework is running on", required=True)
parser.add_argument('--seeding-pipelines', dest="seeding_pipelines", type=lambda arg: arg.split(','), help="Comma separated pipeline that seeding will occur on", required=True)
parser.add_argument('--dest-commit', dest="dst_commit", help="Commit to run test impact analysis on (ignored when seeding)", required=True)
parser.add_argument('--suite', dest="suite", help="Test suite to run", required=True)
parser.add_argument('--test-failure-policy', dest="test_failure_policy", type=test_failure_policy, help="Test failure policy for regular and test impact sequences (ignored when seeding)", required=True)
parser.add_argument('--safeMode', dest="safe_mode", action='store_true', help="Run impact analysis tests in safe mode (ignored when seeding)")
parser.add_argument('--testTimeout', dest="test_timeout", type=timout_type, help="Maximum run time (in seconds) of any test target before being terminated", required=False)
parser.add_argument('--globalTimeout', dest="global_timeout", type=timout_type, help="Maximum run time of the sequence before being terminated", required=False)
parser.set_defaults(test_timeout=None)
parser.set_defaults(global_timeout=None)
# Configuration file path
parser.add_argument(
'--config',
type=valid_file_path,
help="Path to the test impact analysis framework configuration file",
required=True
)
# Source branch
parser.add_argument(
'--src-branch',
help="Branch that is being built",
required=True
)
# Destination branch
parser.add_argument(
'--dst-branch',
help="For PR builds, the destination branch to be merged to, otherwise empty",
required=False
)
# Commit hash
parser.add_argument(
'--commit',
help="Commit that is being built",
required=True
)
# S3 bucket
parser.add_argument(
'--s3-bucket',
help="Location of S3 bucket to use for persistent storage, otherwise local disk storage will be used",
required=False
)
# MARS index prefix
parser.add_argument(
'--mars-index-prefix',
help="Index prefix to use for MARS, otherwise no data will be tramsmitted to MARS",
required=False
)
# Test suite
parser.add_argument(
'--suite',
help="Test suite to run",
required=True
)
# Test failure policy
parser.add_argument(
'--test-failure-policy',
type=valid_test_failure_policy,
help="Test failure policy for regular and test impact sequences (ignored when seeding)",
required=True
)
# Safe mode
parser.add_argument(
'--safe-mode',
action='store_true',
help="Run impact analysis tests in safe mode (ignored when seeding)",
required=False
)
# Test timeout
parser.add_argument(
'--test-timeout',
type=valid_timout_type,
help="Maximum run time (in seconds) of any test target before being terminated",
required=False
)
# Global timeout
parser.add_argument(
'--global-timeout',
type=valid_timout_type,
help="Maximum run time of the sequence before being terminated",
required=False
)
args = parser.parse_args()
return args
if __name__ == "__main__":
try:
args = parse_args()
tiaf = TestImpact(args.config, args.dst_commit, args.src_branch, args.dst_branch, args.pipeline, args.seeding_branches, args.seeding_pipelines)
return_code = tiaf.run(args.suite, args.test_failure_policy, args.safe_mode, args.test_timeout, args.global_timeout)
tiaf = TestImpact(args.config)
tiaf_result = tiaf.run(args.commit, args.src_branch, args.dst_branch, args.s3_bucket, args.suite, args.test_failure_policy, args.safe_mode, args.test_timeout, args.global_timeout)
if args.mars_index_prefix:
logger.info("Transmitting report to MARS...")
mars_utils.transmit_report_to_mars(args.mars_index_prefix, tiaf_result, sys.argv)
logger.info("Complete!")
# Non-gating will be removed from this script and handled at the job level in SPEC-7413
#sys.exit(return_code)
#sys.exit(result.return_code)
sys.exit(0)
except Exception as e:
# Non-gating will be removed from this script and handled at the job level in SPEC-7413
print(f"Exception caught by TIAF driver: {e}")
logger.error(f"Exception caught by TIAF driver: '{e}'.")
traceback.print_exc()

@ -0,0 +1,20 @@
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import logging
import sys
def get_logger(name: str):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s][TIAF][%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger

@ -0,0 +1,118 @@
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import json
import pathlib
from abc import ABC, abstractmethod
from tiaf_logger import get_logger
logger = get_logger(__file__)
# Abstraction for the persistent storage required by TIAF to store and retrieve the branch coverage data and other meta-data
class PersistentStorage(ABC):
def __init__(self, config: dict, suite: str):
"""
Initializes the persistent storage into a state for which there is no historic data available.
@param config: The runtime configuration to obtain the data file paths from.
@param suite: The test suite for which the historic data will be obtained for.
"""
# Work on the assumption that there is no historic meta-data (a valid state to be in, should none exist)
self._last_commit_hash = None
self._has_historic_data = False
try:
# The runtime expects the coverage data to be in the location specified in the config file (unless overridden with
# the --datafile command line argument, which the TIAF scripts do not do)
self._active_workspace = pathlib.Path(config["workspace"]["active"]["root"])
unpacked_coverage_data_file = config["workspace"]["active"]["relative_paths"]["test_impact_data_files"][suite]
except KeyError as e:
raise SystemError(f"The config does not contain the key {str(e)}.")
self._unpacked_coverage_data_file = self._active_workspace.joinpath(unpacked_coverage_data_file)
def _unpack_historic_data(self, historic_data_json: str):
"""
Unpacks the historic data into the appropriate memory and disk locations.
@param historic_data_json: The historic data in JSON format.
"""
self._has_historic_data = False
try:
historic_data = json.loads(historic_data_json)
self._last_commit_hash = historic_data["last_commit_hash"]
# Create the active workspace directory where the coverage data file will be placed and unpack the coverage data so
# it is accessible by the runtime
self._active_workspace.mkdir(exist_ok=True)
with open(self._unpacked_coverage_data_file, "w", newline='\n') as coverage_data:
coverage_data.write(historic_data["coverage_data"])
self._has_historic_data = True
except json.JSONDecodeError:
logger.error("The historic data does not contain valid JSON.")
except KeyError as e:
logger.error(f"The historic data does not contain the key {str(e)}.")
except EnvironmentError as e:
logger.error(f"There was a problem the coverage data file '{self._unpacked_coverage_data_file}': '{e}'.")
def _pack_historic_data(self, last_commit_hash: str):
"""
Packs the current historic data into a JSON file for serializing.
@param last_commit_hash: The commit hash to associate the coverage data (and any other meta data) with.
@return: The packed historic data in JSON format.
"""
try:
# Attempt to read the existing coverage data
if self._unpacked_coverage_data_file.is_file():
with open(self._unpacked_coverage_data_file, "r") as coverage_data:
historic_data = {"last_commit_hash": last_commit_hash, "coverage_data": coverage_data.read()}
return json.dumps(historic_data)
else:
logger.info(f"No coverage data exists at location '{self._unpacked_coverage_data_file}'.")
except EnvironmentError as e:
logger.error(f"There was a problem the coverage data file '{self._unpacked_coverage_data_file}': '{e}'.")
except TypeError:
logger.error("The historic data could not be serialized to valid JSON.")
return None
@abstractmethod
def _store_historic_data(self, historic_data_json: str):
"""
Stores the historic data in the designated persistent storage location.
@param historic_data_json: The historic data (in JSON format) to be stored in persistent storage.
"""
pass
def update_and_store_historic_data(self, last_commit_hash: str):
"""
Updates the historic data and stores it in the designated persistent storage location.
@param last_commit_hash: The commit hash to associate the coverage data (and any other meta data) with.
"""
historic_data_json = self._pack_historic_data(last_commit_hash)
if historic_data_json:
self._store_historic_data(historic_data_json)
else:
logger.info("The historic data could not be successfully stored.")
@property
def has_historic_data(self):
return self._has_historic_data
@property
def last_commit_hash(self):
return self._last_commit_hash

@ -0,0 +1,56 @@
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import pathlib
import logging
from tiaf_persistent_storage import PersistentStorage
from tiaf_logger import get_logger
logger = get_logger(__file__)
# Implementation of local persistent storage
class PersistentStorageLocal(PersistentStorage):
def __init__(self, config: str, suite: str):
"""
Initializes the persistent storage with any local historic data available.
@param config: The runtime config file to obtain the data file paths from.
@param suite: The test suite for which the historic data will be obtained for.
"""
super().__init__(config, suite)
try:
# Attempt to obtain the local persistent data location specified in the runtime config file
self._historic_workspace = pathlib.Path(config["workspace"]["historic"]["root"])
historic_data_file = pathlib.Path(config["workspace"]["historic"]["relative_paths"]["data"])
# Attempt to unpack the local historic data file
self._historic_data_file = self._historic_workspace.joinpath(historic_data_file)
if self._historic_data_file.is_file():
with open(self._historic_data_file, "r") as historic_data_raw:
historic_data_json = historic_data_raw.read()
self._unpack_historic_data(historic_data_json)
except KeyError as e:
raise SystemError(f"The config does not contain the key {str(e)}.")
except EnvironmentError as e:
raise SystemError(f"There was a problem the historic data file '{self._historic_data_file}': '{e}'.")
def _store_historic_data(self, historic_data_json: str):
"""
Stores then historical data in historic workspace location specified in the runtime config file.
@param historic_data_json: The historic data (in JSON format) to be stored in persistent storage.
"""
try:
self._historic_workspace.mkdir(exist_ok=True)
with open(self._historic_data_file, "w") as historic_data_file:
historic_data_file.write(historic_data_json)
except EnvironmentError as e:
logger.error(f"There was a problem the historic data file '{self._historic_data_file}': '{e}'.")

@ -0,0 +1,87 @@
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import boto3
import botocore.exceptions
import zlib
import logging
from io import BytesIO
from tiaf_persistent_storage import PersistentStorage
from tiaf_logger import get_logger
logger = get_logger(__file__)
# Implementation of s3 bucket persistent storage
class PersistentStorageS3(PersistentStorage):
def __init__(self, config: dict, suite: str, s3_bucket: str, branch: str):
"""
Initializes the persistent storage with the specified s3 bucket.
@param config: The runtime config file to obtain the data file paths from.
@param suite: The test suite for which the historic data will be obtained for.
@param s3_bucket: The s3 bucket to use for storing nd retrieving historic data.
"""
super().__init__(config, suite)
try:
# We store the historic data as compressed JSON
object_extension = "json.zip"
# historic_data.json.zip is the file containing the coverage and meta-data of the last TIAF sequence run
historic_data_file = f"historic_data.{object_extension}"
# The location of the data is in the form <branch>/<config> so the build config of each branch gets its own historic data
self._dir = f'{branch}/{config["meta"]["build_config"]}'
self._historic_data_key = f'{self._dir}/{historic_data_file}'
logger.info(f"Attempting to retrieve historic data for branch '{branch}' at location '{self._historic_data_key}' on bucket '{s3_bucket}'...")
self._s3 = boto3.resource("s3")
self._bucket = self._s3.Bucket(s3_bucket)
# There is only one historic_data.json.zip in the specified location
for object in self._bucket.objects.filter(Prefix=self._historic_data_key):
logger.info(f"Historic data found for branch '{branch}'.")
# Archive the existing object with the name of the existing last commit hash
archive_key = f"{self._dir}/archive/{self._last_commit_hash}.{object_extension}"
logger.info(f"Archiving existing historic data to {archive_key}...")
self._bucket.copy({"Bucket": self._bucket.name, "Key": self._historic_data_key}, archive_key)
# Decode the historic data object into raw bytes
response = object.get()
file_stream = response['Body']
# Decompress and unpack the zipped historic data JSON
historic_data_json = zlib.decompress(file_stream.read()).decode('UTF-8')
self._unpack_historic_data(historic_data_json)
return
except KeyError as e:
raise SystemError(f"The config does not contain the key {str(e)}.")
except botocore.exceptions.BotoCoreError as e:
raise SystemError(f"There was a problem with the s3 bucket: {e}")
except botocore.exceptions.ClientError as e:
raise SystemError(f"There was a problem with the s3 client: {e}")
def _store_historic_data(self, historic_data_json: str):
"""
Stores then historical data in specified s3 bucket at the location <branch>/<build_config>/historical_data.json.zip.
@param historic_data_json: The historic data (in JSON format) to be stored in persistent storage.
"""
try:
data = BytesIO(zlib.compress(bytes(historic_data_json, "UTF-8")))
logger.info(f"Uploading historic data to location '{self._historic_data_key}'...")
self._bucket.upload_fileobj(data, self._historic_data_key)
logger.info("Upload complete.")
except botocore.exceptions.BotoCoreError as e:
logger.error(f"There was a problem with the s3 bucket: {e}")
except botocore.exceptions.ClientError as e:
logger.error(f"There was a problem with the s3 client: {e}")
Loading…
Cancel
Save