Added debugger attachment utilities to the engine, fixed crash when showing console variables, improvements to timeout handling and small cleanup (#3591)

* Added debugger attachment utilities to the engine, fixed crash when showing console variables

Signed-off-by: Garcia Ruiz <aljanru@amazon.co.uk>

* Removed unused variables

Signed-off-by: Garcia Ruiz <aljanru@amazon.co.uk>

* Removed unneded check

Signed-off-by: Garcia Ruiz <aljanru@amazon.co.uk>

* Small fix for crashes/timeouts

Signed-off-by: Garcia Ruiz <aljanru@amazon.co.uk>

* Removed unused variable, fixed compile error

Signed-off-by: Garcia Ruiz <aljanru@amazon.co.uk>

* Fix compile

Signed-off-by: Garcia Ruiz <aljanru@amazon.co.uk>

* Addressed esteban comments

* Addressed tom comments

Co-authored-by: Garcia Ruiz <aljanru@amazon.co.uk>
monroegm-disable-blank-issue-2
AMZN-AlexOteiza 4 years ago committed by GitHub
parent 9752fb009a
commit d784ff8c57
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -573,6 +573,10 @@ static CVarBlock* VarBlockFromConsoleVars()
IVariable* pVariable = nullptr;
for (int i = 0; i < cmdCount; i++)
{
if (!cmds[i].data())
{
continue;
}
ICVar* pCVar = console->GetCVar(cmds[i].data());
if (!pCVar)
{

@ -551,7 +551,9 @@ public:
{ "NSDocumentRevisionsDebugMode", nsDocumentRevisionsDebugMode},
{ "skipWelcomeScreenDialog", m_bSkipWelcomeScreenDialog},
{ "autotest_mode", m_bAutotestMode},
{ "regdumpall", dummy }
{ "regdumpall", dummy },
{ "attach-debugger", dummy }, // Attaches a debugger for the current application
{ "wait-for-debugger", dummy }, // Waits until a debugger is attached to the current application
};
QString dummyString;
@ -2290,7 +2292,7 @@ int CCryEditApp::IdleProcessing(bool bBackgroundUpdate)
int res = 0;
if (bIsAppWindow || m_bForceProcessIdle || m_bKeepEditorActive
// Automated tests must always keep the editor active, or they can get stuck
|| m_bAutotestMode)
|| m_bAutotestMode || m_bRunPythonTestScript)
{
res = 1;
bActive = true;
@ -4011,6 +4013,19 @@ extern "C" int AZ_DLL_EXPORT CryEditMain(int argc, char* argv[])
{
CryAllocatorsRAII cryAllocatorsRAII;
// Debugging utilities
for (int i = 1; i < argc; ++i)
{
if (azstricmp(argv[i], "--attach-debugger") == 0)
{
AZ::Debug::Trace::AttachDebugger();
}
else if (azstricmp(argv[i], "--wait-for-debugger") == 0)
{
AZ::Debug::Trace::WaitForDebugger();
}
}
// ensure the EditorEventsBus context gets created inside EditorLib
[[maybe_unused]] const auto& editorEventsContext = AzToolsFramework::EditorEvents::Bus::GetOrCreateContext();

@ -401,6 +401,16 @@ inline namespace Commands
{
return static_cast<int>(GetIEditor()->GetEditorConfigPlatform());
}
bool PyAttachDebugger()
{
return AZ::Debug::Trace::AttachDebugger();
}
bool PyWaitForDebugger(float timeoutSeconds = -1.f)
{
return AZ::Debug::Trace::WaitForDebugger(timeoutSeconds);
}
}
namespace AzToolsFramework
@ -448,6 +458,9 @@ namespace AzToolsFramework
addLegacyGeneral(behaviorContext->Method("start_process_detached", PyStartProcessDetached, nullptr, "Launches a detached process with an optional space separated list of arguments."));
addLegacyGeneral(behaviorContext->Method("launch_lua_editor", PyLaunchLUAEditor, nullptr, "Launches the Lua editor, may receive a list of space separate file paths, or an empty string to only open the editor."));
addLegacyGeneral(behaviorContext->Method("attach_debugger", PyAttachDebugger, nullptr, "Prompts for attaching the debugger"));
addLegacyGeneral(behaviorContext->Method("wait_for_debugger", PyWaitForDebugger, behaviorContext->MakeDefaultValues(-1.f), "Pauses this thread execution until the debugger has been attached"));
// this will put these methods into the 'azlmbr.legacy.checkout_dialog' module
auto addCheckoutDialog = [](AZ::BehaviorContext::GlobalMethodBuilder methodBuilder)
{

@ -25,6 +25,7 @@
#include <AzCore/std/containers/unordered_set.h>
#include <AzCore/Module/Environment.h>
#include <AzCore/Console/IConsole.h>
#include <AzCore/std/chrono/chrono.h>
namespace AZ
{
@ -33,6 +34,7 @@ namespace AZ
namespace Platform
{
#if defined(AZ_ENABLE_DEBUG_TOOLS)
bool AttachDebugger();
bool IsDebuggerPresent();
void HandleExceptions(bool isEnabled);
void DebugBreak();
@ -141,6 +143,40 @@ namespace AZ
#endif
}
bool
Trace::AttachDebugger()
{
#if defined(AZ_ENABLE_DEBUG_TOOLS)
return Platform::AttachDebugger();
#else
return false;
#endif
}
bool
Trace::WaitForDebugger(float timeoutSeconds/*=-1.f*/)
{
#if defined(AZ_ENABLE_DEBUG_TOOLS)
using AZStd::chrono::system_clock;
using AZStd::chrono::time_point;
using AZStd::chrono::milliseconds;
milliseconds timeoutMs = milliseconds(aznumeric_cast<long long>(timeoutSeconds * 1000));
system_clock clock;
time_point start = clock.now();
auto hasTimedOut = [&clock, start, timeoutMs]()
{
return timeoutMs.count() >= 0 && (clock.now() - start) >= timeoutMs;
};
while (!AZ::Debug::Trace::IsDebuggerPresent() && !hasTimedOut())
{
AZStd::this_thread::sleep_for(milliseconds(1));
}
return AZ::Debug::Trace::IsDebuggerPresent();
#endif
}
//=========================================================================
// HandleExceptions
// [8/3/2009]

@ -49,6 +49,8 @@ namespace AZ
*/
static const char* GetDefaultSystemWindow();
static bool IsDebuggerPresent();
static bool AttachDebugger();
static bool WaitForDebugger(float timeoutSeconds = -1.f);
/// True or false if we want to handle system exceptions.
static void HandleExceptions(bool isEnabled);

@ -56,6 +56,13 @@ namespace AZ
return ((info.kp_proc.p_flag & P_TRACED) != 0);
}
bool AttachDebugger()
{
// Not supported yet
AZ_Assert(false, "AttachDebugger() is not supported for Mac platform yet");
return false;
}
void HandleExceptions(bool)
{}

@ -60,6 +60,13 @@ namespace AZ
return s_debuggerDetected;
}
bool AttachDebugger()
{
// Not supported yet
AZ_Assert(false, "AttachDebugger() is not supported for Unix platform yet");
return false;
}
void HandleExceptions(bool)
{}

@ -49,6 +49,45 @@ namespace AZ
}
}
bool AttachDebugger()
{
if (IsDebuggerPresent())
{
return true;
}
// Launch vsjitdebugger.exe, this app is always present in System32 folder
// with an installation of any version of visual studio.
// It will open a debugging dialog asking the user what debugger to use
STARTUPINFOW startupInfo = {0};
startupInfo.cb = sizeof(startupInfo);
PROCESS_INFORMATION processInfo = {0};
wchar_t cmdline[MAX_PATH];
swprintf_s(cmdline, L"vsjitdebugger.exe -p %li", ::GetCurrentProcessId());
bool success = ::CreateProcessW(
NULL, // No module name (use command line)
cmdline, // Command line
NULL, // Process handle not inheritable
NULL, // Thread handle not inheritable
FALSE, // No handle inheritance
0, // No creation flags
NULL, // Use parent's environment block
NULL, // Use parent's starting directory
&startupInfo, // Pointer to STARTUPINFO structure
&processInfo); // Pointer to PROCESS_INFORMATION structure
if (success)
{
::WaitForSingleObject(processInfo.hProcess, INFINITE);
::CloseHandle(processInfo.hProcess);
::CloseHandle(processInfo.hThread);
return true;
}
return false;
}
void DebugBreak()
{
__debugbreak();

@ -59,6 +59,11 @@ class EditorTestBase(ABC):
timeout = 180
# Test file that this test will run
test_module = None
# Attach debugger when running the test, useful for debugging crashes. This should never be True on production.
# It's also recommended to switch to EditorSingleTest for debugging in isolation
attach_debugger = False
# Wait until a debugger is attached at the startup of the test, this is another way of debugging.
wait_for_debugger = False
# Test that will be run alone in one editor
class EditorSingleTest(EditorTestBase):
@ -117,8 +122,9 @@ class Result:
class Pass(Base):
@classmethod
def create(cls, output : str, editor_log : str):
def create(cls, test_spec : EditorTestBase, output : str, editor_log : str):
r = cls()
r.test_spec = test_spec
r.output = output
r.editor_log = editor_log
return r
@ -135,8 +141,9 @@ class Result:
class Fail(Base):
@classmethod
def create(cls, output, editor_log : str):
def create(cls, test_spec : EditorTestBase, output, editor_log : str):
r = cls()
r.test_spec = test_spec
r.output = output
r.editor_log = editor_log
return r
@ -157,9 +164,10 @@ class Result:
class Crash(Base):
@classmethod
def create(cls, output : str, ret_code : int, stacktrace : str, editor_log : str):
def create(cls, test_spec : EditorTestBase, output : str, ret_code : int, stacktrace : str, editor_log : str):
r = cls()
r.output = output
r.test_spec = test_spec
r.ret_code = ret_code
r.stacktrace = stacktrace
r.editor_log = editor_log
@ -187,9 +195,10 @@ class Result:
class Timeout(Base):
@classmethod
def create(cls, output : str, time_secs : float, editor_log : str):
def create(cls, test_spec : EditorTestBase, output : str, time_secs : float, editor_log : str):
r = cls()
r.output = output
r.test_spec = test_spec
r.time_secs = time_secs
r.editor_log = editor_log
return r
@ -210,9 +219,10 @@ class Result:
class Unknown(Base):
@classmethod
def create(cls, output : str, extra_info : str, editor_log : str):
def create(cls, test_spec : EditorTestBase, output : str, extra_info : str, editor_log : str):
r = cls()
r.output = output
r.test_spec = test_spec
r.editor_log = editor_log
r.extra_info = extra_info
return r
@ -255,7 +265,7 @@ class EditorTestSuite():
def editor_test_data(self, request):
class TestData():
def __init__(self):
self.results = {}
self.results = {} # Dict of str(test_spec.__name__) -> Result
self.asset_processor = None
test_data = TestData()
@ -548,7 +558,7 @@ class EditorTestSuite():
for test_spec in test_spec_list:
name = editor_utils.get_module_filename(test_spec.test_module)
if name not in found_jsons.keys():
results[test_spec.__name__] = Result.Unknown.create(output, "Couldn't find any test run information on stdout", editor_log_content)
results[test_spec.__name__] = Result.Unknown.create(test_spec, output, "Couldn't find any test run information on stdout", editor_log_content)
else:
result = None
json_result = found_jsons[name]
@ -564,9 +574,9 @@ class EditorTestSuite():
log_start = end
if json_result["success"]:
result = Result.Pass.create(json_output, cur_log)
result = Result.Pass.create(test_spec, json_output, cur_log)
else:
result = Result.Fail.create(json_output, cur_log)
result = Result.Fail.create(test_spec, json_output, cur_log)
results[test_spec.__name__] = result
return results
@ -587,8 +597,13 @@ class EditorTestSuite():
test_spec : EditorTestBase, cmdline_args : List[str] = []):
test_cmdline_args = self.global_extra_cmdline_args + cmdline_args
if test_spec.use_null_renderer or (test_spec.use_null_renderer is None and self.use_null_renderer):
test_spec_uses_null_renderer = getattr(test_spec, "use_null_renderer", None)
if test_spec_uses_null_renderer or (test_spec_uses_null_renderer is None and self.use_null_renderer):
test_cmdline_args += ["-rhi=null"]
if test_spec.attach_debugger:
test_cmdline_args += ["--attach-debugger"]
if test_spec.wait_for_debugger:
test_cmdline_args += ["--wait-for-debugger"]
# Cycle any old crash report in case it wasn't cycled properly
editor_utils.cycle_crash_report(run_id, workspace)
@ -610,18 +625,18 @@ class EditorTestSuite():
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
if return_code == 0:
test_result = Result.Pass.create(output, editor_log_content)
test_result = Result.Pass.create(test_spec, output, editor_log_content)
else:
has_crashed = return_code != EditorTestSuite._TEST_FAIL_RETCODE
if has_crashed:
test_result = Result.Crash.create(output, return_code, editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG), None)
test_result = Result.Crash.create(test_spec, output, return_code, editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG), None)
editor_utils.cycle_crash_report(run_id, workspace)
else:
test_result = Result.Fail.create(output, editor_log_content)
test_result = Result.Fail.create(test_spec, output, editor_log_content)
except WaitTimeoutError:
editor.kill()
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
test_result = Result.Timeout.create(output, test_spec.timeout, editor_log_content)
test_result = Result.Timeout.create(test_spec, output, test_spec.timeout, editor_log_content)
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
results = self._get_results_using_output([test_spec], output, editor_log_content)
@ -629,13 +644,17 @@ class EditorTestSuite():
return results
# Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that editor
# instance. In case of failure this function also parses the editor output to find out what specific tests that failed
# instance. In case of failure this function also parses the editor output to find out what specific tests failed
def _exec_editor_multitest(self, request, workspace, editor, run_id : int, log_name : str,
test_spec_list : List[EditorTestBase], cmdline_args=[]):
test_cmdline_args = self.global_extra_cmdline_args + cmdline_args
if self.use_null_renderer:
test_cmdline_args += ["-rhi=null"]
if any([t.attach_debugger for t in test_spec_list]):
test_cmdline_args += ["--attach-debugger"]
if any([t.wait_for_debugger for t in test_spec_list]):
test_cmdline_args += ["--wait-for-debugger"]
# Cycle any old crash report in case it wasn't cycled properly
editor_utils.cycle_crash_report(run_id, workspace)
@ -661,35 +680,65 @@ class EditorTestSuite():
if return_code == 0:
# No need to scrap the output, as all the tests have passed
for test_spec in test_spec_list:
results[test_spec.__name__] = Result.Pass.create(output, editor_log_content)
results[test_spec.__name__] = Result.Pass.create(test_spec, output, editor_log_content)
else:
# Scrap the output to attempt to find out which tests failed.
# This function should always populate the result list, if it didn't find it, it will have "Unknown" type of result
results = self._get_results_using_output(test_spec_list, output, editor_log_content)
assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results don't match the tests ran"
# If the editor crashed, find out in which test it happened and update the results
has_crashed = return_code != EditorTestSuite._TEST_FAIL_RETCODE
if has_crashed:
crashed_test = None
for key, result in results.items():
crashed_result = None
for test_spec_name, result in results.items():
if isinstance(result, Result.Unknown):
if not crashed_test:
if not crashed_result:
# The first test with "Unknown" result (no data in output) is likely the one that crashed
crash_error = editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG)
editor_utils.cycle_crash_report(run_id, workspace)
results[key] = Result.Crash.create(output, return_code, crash_error, result.editor_log)
crashed_test = results[key]
results[test_spec_name] = Result.Crash.create(result.test_spec, output, return_code, crash_error, result.editor_log)
crashed_result = result
else:
results[key] = Result.Unknown.create(output, f"This test has unknown result, test '{crashed_test.__name__}' crashed before this test could be executed", result.editor_log)
# If there are remaning "Unknown" results, these couldn't execute because of the crash, update with info about the offender
results[test_spec_name].extra_info = f"This test has unknown result, test '{crashed_result.test_spec.__name__}' crashed before this test could be executed"
except WaitTimeoutError:
results = self._get_results_using_output(test_spec_list, output, editor_log_content)
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
# if all the tests ran, the one that has caused the crash is the last test
if not crashed_result:
crash_error = editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG)
editor_utils.cycle_crash_report(run_id, workspace)
results[test_spec_name] = Result.Crash.create(crashed_result.test_spec, output, return_code, crash_error, crashed_result.editor_log)
except WaitTimeoutError:
editor.kill()
for key, result in results.items():
output = editor.get_output()
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
# The editor timed out when running the tests, get the data from the output to find out which ones ran
results = self._get_results_using_output(test_spec_list, output, editor_log_content)
assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results don't match the tests ran"
# Similar logic here as crashes, the first test that has no result is the one that timed out
timed_out_result = None
for test_spec_name, result in results.items():
if isinstance(result, Result.Unknown):
results[key] = Result.Timeout.create(result.output, total_timeout, result.editor_log)
# FIX-ME
if not timed_out_result:
results[test_spec_name] = Result.Timeout.create(result.test_spec, result.output, self.timeout_editor_shared_test, result.editor_log)
timed_out_result = result
else:
# If there are remaning "Unknown" results, these couldn't execute because of the timeout, update with info about the offender
results[test_spec_name].extra_info = f"This test has unknown result, test '{timed_out_result.test_spec.__name__}' timed out before this test could be executed"
# if all the tests ran, the one that has caused the timeout is the last test, as it didn't close the editor
if not timed_out_result:
results[test_spec_name] = Result.Timeout.create(timed_out_result.test_spec, results[test_spec_name].output, self.timeout_editor_shared_test, result.editor_log)
return results
# Runs a single test with the given specs, used by the collector to register the test
def _run_single_test(self, request, workspace, editor, editor_test_data, test_spec : EditorTestBase):
# Runs a single test (one editor, one test) with the given specs
def _run_single_test(self, request, workspace, editor, editor_test_data, test_spec : EditorSingleTest):
self._setup_editor_test(editor, workspace, editor_test_data)
extra_cmdline_args = []
if hasattr(test_spec, "extra_cmdline_args"):
@ -700,8 +749,8 @@ class EditorTestSuite():
test_name, test_result = next(iter(results.items()))
self._report_result(test_name, test_result)
# Runs a batch of tests in one single editor with the given spec list
def _run_batched_tests(self, request, workspace, editor, editor_test_data, test_spec_list : List[EditorTestBase], extra_cmdline_args=[]):
# Runs a batch of tests in one single editor with the given spec list (one editor, multiple tests)
def _run_batched_tests(self, request, workspace, editor, editor_test_data, test_spec_list : List[EditorSharedTest], extra_cmdline_args=[]):
if not test_spec_list:
return
@ -710,8 +759,8 @@ class EditorTestSuite():
assert results is not None
editor_test_data.results.update(results)
# Runs multiple editors with one test on each editor
def _run_parallel_tests(self, request, workspace, editor, editor_test_data, test_spec_list : List[EditorTestBase], extra_cmdline_args=[]):
# Runs multiple editors with one test on each editor (multiple editor, one test each)
def _run_parallel_tests(self, request, workspace, editor, editor_test_data, test_spec_list : List[EditorSharedTest], extra_cmdline_args=[]):
if not test_spec_list:
return
@ -747,8 +796,8 @@ class EditorTestSuite():
for result in results_per_thread:
editor_test_data.results.update(result)
# Runs multiple editors with a batch of tests for each editor
def _run_parallel_batched_tests(self, request, workspace, editor, editor_test_data, test_spec_list : List[EditorTestBase], extra_cmdline_args=[]):
# Runs multiple editors with a batch of tests for each editor (multiple editor, multiple tests each)
def _run_parallel_batched_tests(self, request, workspace, editor, editor_test_data, test_spec_list : List[EditorSharedTest], extra_cmdline_args=[]):
if not test_spec_list:
return

Loading…
Cancel
Save