@ -59,6 +59,11 @@ class EditorTestBase(ABC):
timeout = 180
timeout = 180
# Test file that this test will run
# Test file that this test will run
test_module = None
test_module = None
# Attach debugger when running the test, useful for debugging crashes. This should never be True on production.
# It's also recommended to switch to EditorSingleTest for debugging in isolation
attach_debugger = False
# Wait until a debugger is attached at the startup of the test, this is another way of debugging.
wait_for_debugger = False
# Test that will be run alone in one editor
# Test that will be run alone in one editor
class EditorSingleTest ( EditorTestBase ) :
class EditorSingleTest ( EditorTestBase ) :
@ -117,8 +122,9 @@ class Result:
class Pass ( Base ) :
class Pass ( Base ) :
@classmethod
@classmethod
def create ( cls , output : str , editor_log : str ) :
def create ( cls , test_spec : EditorTestBase , output : str , editor_log : str ) :
r = cls ( )
r = cls ( )
r . test_spec = test_spec
r . output = output
r . output = output
r . editor_log = editor_log
r . editor_log = editor_log
return r
return r
@ -135,8 +141,9 @@ class Result:
class Fail ( Base ) :
class Fail ( Base ) :
@classmethod
@classmethod
def create ( cls , output, editor_log : str ) :
def create ( cls , test_spec : EditorTestBase , output, editor_log : str ) :
r = cls ( )
r = cls ( )
r . test_spec = test_spec
r . output = output
r . output = output
r . editor_log = editor_log
r . editor_log = editor_log
return r
return r
@ -157,9 +164,10 @@ class Result:
class Crash ( Base ) :
class Crash ( Base ) :
@classmethod
@classmethod
def create ( cls , output : str , ret_code : int , stacktrace : str , editor_log : str ) :
def create ( cls , test_spec : EditorTestBase , output : str , ret_code : int , stacktrace : str , editor_log : str ) :
r = cls ( )
r = cls ( )
r . output = output
r . output = output
r . test_spec = test_spec
r . ret_code = ret_code
r . ret_code = ret_code
r . stacktrace = stacktrace
r . stacktrace = stacktrace
r . editor_log = editor_log
r . editor_log = editor_log
@ -187,9 +195,10 @@ class Result:
class Timeout ( Base ) :
class Timeout ( Base ) :
@classmethod
@classmethod
def create ( cls , output : str , time_secs : float , editor_log : str ) :
def create ( cls , test_spec : EditorTestBase , output : str , time_secs : float , editor_log : str ) :
r = cls ( )
r = cls ( )
r . output = output
r . output = output
r . test_spec = test_spec
r . time_secs = time_secs
r . time_secs = time_secs
r . editor_log = editor_log
r . editor_log = editor_log
return r
return r
@ -210,9 +219,10 @@ class Result:
class Unknown ( Base ) :
class Unknown ( Base ) :
@classmethod
@classmethod
def create ( cls , output : str , extra_info : str , editor_log : str ) :
def create ( cls , test_spec : EditorTestBase , output : str , extra_info : str , editor_log : str ) :
r = cls ( )
r = cls ( )
r . output = output
r . output = output
r . test_spec = test_spec
r . editor_log = editor_log
r . editor_log = editor_log
r . extra_info = extra_info
r . extra_info = extra_info
return r
return r
@ -255,7 +265,7 @@ class EditorTestSuite():
def editor_test_data ( self , request ) :
def editor_test_data ( self , request ) :
class TestData ( ) :
class TestData ( ) :
def __init__ ( self ) :
def __init__ ( self ) :
self . results = { }
self . results = { } # Dict of str(test_spec.__name__) -> Result
self . asset_processor = None
self . asset_processor = None
test_data = TestData ( )
test_data = TestData ( )
@ -548,7 +558,7 @@ class EditorTestSuite():
for test_spec in test_spec_list :
for test_spec in test_spec_list :
name = editor_utils . get_module_filename ( test_spec . test_module )
name = editor_utils . get_module_filename ( test_spec . test_module )
if name not in found_jsons . keys ( ) :
if name not in found_jsons . keys ( ) :
results [ test_spec . __name__ ] = Result . Unknown . create ( output, " Couldn ' t find any test run information on stdout " , editor_log_content )
results [ test_spec . __name__ ] = Result . Unknown . create ( test_spec, output, " Couldn ' t find any test run information on stdout " , editor_log_content )
else :
else :
result = None
result = None
json_result = found_jsons [ name ]
json_result = found_jsons [ name ]
@ -564,9 +574,9 @@ class EditorTestSuite():
log_start = end
log_start = end
if json_result [ " success " ] :
if json_result [ " success " ] :
result = Result . Pass . create ( json_output, cur_log )
result = Result . Pass . create ( test_spec, json_output, cur_log )
else :
else :
result = Result . Fail . create ( json_output, cur_log )
result = Result . Fail . create ( test_spec, json_output, cur_log )
results [ test_spec . __name__ ] = result
results [ test_spec . __name__ ] = result
return results
return results
@ -587,8 +597,13 @@ class EditorTestSuite():
test_spec : EditorTestBase , cmdline_args : List [ str ] = [ ] ) :
test_spec : EditorTestBase , cmdline_args : List [ str ] = [ ] ) :
test_cmdline_args = self . global_extra_cmdline_args + cmdline_args
test_cmdline_args = self . global_extra_cmdline_args + cmdline_args
if test_spec . use_null_renderer or ( test_spec . use_null_renderer is None and self . use_null_renderer ) :
test_spec_uses_null_renderer = getattr ( test_spec , " use_null_renderer " , None )
if test_spec_uses_null_renderer or ( test_spec_uses_null_renderer is None and self . use_null_renderer ) :
test_cmdline_args + = [ " -rhi=null " ]
test_cmdline_args + = [ " -rhi=null " ]
if test_spec . attach_debugger :
test_cmdline_args + = [ " --attach-debugger " ]
if test_spec . wait_for_debugger :
test_cmdline_args + = [ " --wait-for-debugger " ]
# Cycle any old crash report in case it wasn't cycled properly
# Cycle any old crash report in case it wasn't cycled properly
editor_utils . cycle_crash_report ( run_id , workspace )
editor_utils . cycle_crash_report ( run_id , workspace )
@ -610,18 +625,18 @@ class EditorTestSuite():
editor_log_content = editor_utils . retrieve_editor_log_content ( run_id , log_name , workspace )
editor_log_content = editor_utils . retrieve_editor_log_content ( run_id , log_name , workspace )
if return_code == 0 :
if return_code == 0 :
test_result = Result . Pass . create ( output, editor_log_content )
test_result = Result . Pass . create ( test_spec, output, editor_log_content )
else :
else :
has_crashed = return_code != EditorTestSuite . _TEST_FAIL_RETCODE
has_crashed = return_code != EditorTestSuite . _TEST_FAIL_RETCODE
if has_crashed :
if has_crashed :
test_result = Result . Crash . create ( output, return_code , editor_utils . retrieve_crash_output ( run_id , workspace , self . _TIMEOUT_CRASH_LOG ) , None )
test_result = Result . Crash . create ( test_spec, output, return_code , editor_utils . retrieve_crash_output ( run_id , workspace , self . _TIMEOUT_CRASH_LOG ) , None )
editor_utils . cycle_crash_report ( run_id , workspace )
editor_utils . cycle_crash_report ( run_id , workspace )
else :
else :
test_result = Result . Fail . create ( output, editor_log_content )
test_result = Result . Fail . create ( test_spec, output, editor_log_content )
except WaitTimeoutError :
except WaitTimeoutError :
editor . kill ( )
editor . kill ( )
editor_log_content = editor_utils . retrieve_editor_log_content ( run_id , log_name , workspace )
editor_log_content = editor_utils . retrieve_editor_log_content ( run_id , log_name , workspace )
test_result = Result . Timeout . create ( output, test_spec . timeout , editor_log_content )
test_result = Result . Timeout . create ( test_spec, output, test_spec . timeout , editor_log_content )
editor_log_content = editor_utils . retrieve_editor_log_content ( run_id , log_name , workspace )
editor_log_content = editor_utils . retrieve_editor_log_content ( run_id , log_name , workspace )
results = self . _get_results_using_output ( [ test_spec ] , output , editor_log_content )
results = self . _get_results_using_output ( [ test_spec ] , output , editor_log_content )
@ -629,13 +644,17 @@ class EditorTestSuite():
return results
return results
# Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that editor
# Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that editor
# instance. In case of failure this function also parses the editor output to find out what specific tests that failed
# instance. In case of failure this function also parses the editor output to find out what specific tests failed
def _exec_editor_multitest ( self , request , workspace , editor , run_id : int , log_name : str ,
def _exec_editor_multitest ( self , request , workspace , editor , run_id : int , log_name : str ,
test_spec_list : List [ EditorTestBase ] , cmdline_args = [ ] ) :
test_spec_list : List [ EditorTestBase ] , cmdline_args = [ ] ) :
test_cmdline_args = self . global_extra_cmdline_args + cmdline_args
test_cmdline_args = self . global_extra_cmdline_args + cmdline_args
if self . use_null_renderer :
if self . use_null_renderer :
test_cmdline_args + = [ " -rhi=null " ]
test_cmdline_args + = [ " -rhi=null " ]
if any ( [ t . attach_debugger for t in test_spec_list ] ) :
test_cmdline_args + = [ " --attach-debugger " ]
if any ( [ t . wait_for_debugger for t in test_spec_list ] ) :
test_cmdline_args + = [ " --wait-for-debugger " ]
# Cycle any old crash report in case it wasn't cycled properly
# Cycle any old crash report in case it wasn't cycled properly
editor_utils . cycle_crash_report ( run_id , workspace )
editor_utils . cycle_crash_report ( run_id , workspace )
@ -661,35 +680,65 @@ class EditorTestSuite():
if return_code == 0 :
if return_code == 0 :
# No need to scrap the output, as all the tests have passed
# No need to scrap the output, as all the tests have passed
for test_spec in test_spec_list :
for test_spec in test_spec_list :
results [ test_spec . __name__ ] = Result . Pass . create ( output, editor_log_content )
results [ test_spec . __name__ ] = Result . Pass . create ( test_spec, output, editor_log_content )
else :
else :
# Scrap the output to attempt to find out which tests failed.
# This function should always populate the result list, if it didn't find it, it will have "Unknown" type of result
results = self . _get_results_using_output ( test_spec_list , output , editor_log_content )
results = self . _get_results_using_output ( test_spec_list , output , editor_log_content )
assert len ( results ) == len ( test_spec_list ) , " bug in _get_results_using_output(), the number of results don ' t match the tests ran "
# If the editor crashed, find out in which test it happened and update the results
has_crashed = return_code != EditorTestSuite . _TEST_FAIL_RETCODE
has_crashed = return_code != EditorTestSuite . _TEST_FAIL_RETCODE
if has_crashed :
if has_crashed :
crashed_test = None
crashed_ resul t = None
for key , result in results . items ( ) :
for test_spec_name , result in results . items ( ) :
if isinstance ( result , Result . Unknown ) :
if isinstance ( result , Result . Unknown ) :
if not crashed_test :
if not crashed_result :
# The first test with "Unknown" result (no data in output) is likely the one that crashed
crash_error = editor_utils . retrieve_crash_output ( run_id , workspace , self . _TIMEOUT_CRASH_LOG )
crash_error = editor_utils . retrieve_crash_output ( run_id , workspace , self . _TIMEOUT_CRASH_LOG )
editor_utils . cycle_crash_report ( run_id , workspace )
editor_utils . cycle_crash_report ( run_id , workspace )
results [ key ] = Result . Crash . create ( output , return_code , crash_error , result . editor_log )
results [ test_spec_name ] = Result . Crash . create ( result . test_spec , output , return_code , crash_error , result . editor_log )
crashed_ test = results [ key ]
crashed_ result = result
else :
else :
results [ key ] = Result . Unknown . create ( output , f " This test has unknown result, test ' { crashed_test . __name__ } ' crashed before this test could be executed " , result . editor_log )
# If there are remaning "Unknown" results, these couldn't execute because of the crash, update with info about the offender
results [ test_spec_name ] . extra_info = f " This test has unknown result, test ' { crashed_result . test_spec . __name__ } ' crashed before this test could be executed "
except WaitTimeoutError :
# if all the tests ran, the one that has caused the crash is the last test
results = self . _get_results_using_output ( test_spec_list , output , editor_log_content )
if not crashed_result :
editor_log_content = editor_utils . retrieve_editor_log_content ( run_id , log_name , workspace )
crash_error = editor_utils . retrieve_crash_output ( run_id , workspace , self . _TIMEOUT_CRASH_LOG )
editor_utils . cycle_crash_report ( run_id , workspace )
results [ test_spec_name ] = Result . Crash . create ( crashed_result . test_spec , output , return_code , crash_error , crashed_result . editor_log )
except WaitTimeoutError :
editor . kill ( )
editor . kill ( )
for key , result in results . items ( ) :
output = editor . get_output ( )
editor_log_content = editor_utils . retrieve_editor_log_content ( run_id , log_name , workspace )
# The editor timed out when running the tests, get the data from the output to find out which ones ran
results = self . _get_results_using_output ( test_spec_list , output , editor_log_content )
assert len ( results ) == len ( test_spec_list ) , " bug in _get_results_using_output(), the number of results don ' t match the tests ran "
# Similar logic here as crashes, the first test that has no result is the one that timed out
timed_out_result = None
for test_spec_name , result in results . items ( ) :
if isinstance ( result , Result . Unknown ) :
if isinstance ( result , Result . Unknown ) :
results [ key ] = Result . Timeout . create ( result . output , total_timeout , result . editor_log )
if not timed_out_result :
# FIX-ME
results [ test_spec_name ] = Result . Timeout . create ( result . test_spec , result . output , self . timeout_editor_shared_test , result . editor_log )
timed_out_result = result
else :
# If there are remaning "Unknown" results, these couldn't execute because of the timeout, update with info about the offender
results [ test_spec_name ] . extra_info = f " This test has unknown result, test ' { timed_out_result . test_spec . __name__ } ' timed out before this test could be executed "
# if all the tests ran, the one that has caused the timeout is the last test, as it didn't close the editor
if not timed_out_result :
results [ test_spec_name ] = Result . Timeout . create ( timed_out_result . test_spec , results [ test_spec_name ] . output , self . timeout_editor_shared_test , result . editor_log )
return results
return results
# Runs a single test with the given specs, used by the collector to register the test
# Runs a single test (one editor, one test) with the given specs
def _run_single_test ( self , request , workspace , editor , editor_test_data , test_spec : EditorTestBase ) :
def _run_single_test ( self , request , workspace , editor , editor_test_data , test_spec : Editor Single Test) :
self . _setup_editor_test ( editor , workspace , editor_test_data )
self . _setup_editor_test ( editor , workspace , editor_test_data )
extra_cmdline_args = [ ]
extra_cmdline_args = [ ]
if hasattr ( test_spec , " extra_cmdline_args " ) :
if hasattr ( test_spec , " extra_cmdline_args " ) :
@ -700,8 +749,8 @@ class EditorTestSuite():
test_name , test_result = next ( iter ( results . items ( ) ) )
test_name , test_result = next ( iter ( results . items ( ) ) )
self . _report_result ( test_name , test_result )
self . _report_result ( test_name , test_result )
# Runs a batch of tests in one single editor with the given spec list
# Runs a batch of tests in one single editor with the given spec list (one editor, multiple tests)
def _run_batched_tests ( self , request , workspace , editor , editor_test_data , test_spec_list : List [ Editor TestBase ] , extra_cmdline_args = [ ] ) :
def _run_batched_tests ( self , request , workspace , editor , editor_test_data , test_spec_list : List [ Editor Shared Test] , extra_cmdline_args = [ ] ) :
if not test_spec_list :
if not test_spec_list :
return
return
@ -710,8 +759,8 @@ class EditorTestSuite():
assert results is not None
assert results is not None
editor_test_data . results . update ( results )
editor_test_data . results . update ( results )
# Runs multiple editors with one test on each editor
# Runs multiple editors with one test on each editor (multiple editor, one test each)
def _run_parallel_tests ( self , request , workspace , editor , editor_test_data , test_spec_list : List [ Editor TestBase ] , extra_cmdline_args = [ ] ) :
def _run_parallel_tests ( self , request , workspace , editor , editor_test_data , test_spec_list : List [ Editor Shared Test] , extra_cmdline_args = [ ] ) :
if not test_spec_list :
if not test_spec_list :
return
return
@ -747,8 +796,8 @@ class EditorTestSuite():
for result in results_per_thread :
for result in results_per_thread :
editor_test_data . results . update ( result )
editor_test_data . results . update ( result )
# Runs multiple editors with a batch of tests for each editor
# Runs multiple editors with a batch of tests for each editor (multiple editor, multiple tests each)
def _run_parallel_batched_tests ( self , request , workspace , editor , editor_test_data , test_spec_list : List [ Editor TestBase ] , extra_cmdline_args = [ ] ) :
def _run_parallel_batched_tests ( self , request , workspace , editor , editor_test_data , test_spec_list : List [ Editor Shared Test] , extra_cmdline_args = [ ] ) :
if not test_spec_list :
if not test_spec_list :
return
return