@ -25,16 +25,16 @@ import re
import ly_test_tools . environment . file_system as file_system
import ly_test_tools . environment . waiter as waiter
import ly_test_tools . environment . process_utils as process_utils
import ly_test_tools . o3de . editor_test_utils as editor_utils
from ly_test_tools . o3de . asset_processor import AssetProcessor
from ly_test_tools . launchers . exceptions import WaitTimeoutError
from . import editor_test_utils as editor_utils
# This file provides editor testing functionality to easily write automated editor tests for O3DE.
# For using these utilities, you can subclass your test suite from EditorTestSuite, this allows an easy way of specifying
# python test scripts that the editor will run without needing to write any boilerplace code.
# It supports out of the box parallelization(running multiple editor instances at once), batching(running multiple tests in the same editor instance) and
# crash detection.
# For using these utilities, you can subclass your test suite from EditorTestSuite, this allows an easy way of
# specifying python test scripts that the editor will run without needing to write any boilerplace code.
# It supports out of the box parallelization(running multiple editor instances at once), batching(running multiple tests
# in the same editor instance) and crash detection.
# Usage example:
# class MyTestSuite(EditorTestSuite):
#
@ -48,7 +48,8 @@ from . import editor_test_utils as editor_utils
# from . import yet_another_script_to_be_run_by_editor as test_module
#
#
# EditorTestSuite does introspection of the defined classes inside of it and automatically prepares the tests, parallelizing/batching as required
# EditorTestSuite does introspection of the defined classes inside of it and automatically prepares the tests,
# parallelizing/batching as required
# This file contains no tests, but with this we make sure it won't be picked up by the runner since the file ends with _test
__test__ = False
@ -109,12 +110,22 @@ class EditorBatchedTest(EditorSharedTest):
class Result :
class Base :
def get_output_str ( self ) :
# type () -> str
"""
Checks if the output attribute exists and returns it .
: return : Either the output string or a no output message
"""
if hasattr ( self , " output " ) and self . output is not None :
return self . output
else :
return " -- No output -- "
def get_editor_log_str ( self ) :
# type () -> str
"""
Checks if the editor_log attribute exists and returns it .
: return : Either the editor_log string or a no output message
"""
if hasattr ( self , " editor_log " ) and self . editor_log is not None :
return self . editor_log
else :
@ -122,7 +133,15 @@ class Result:
class Pass ( Base ) :
@classmethod
def create ( cls , test_spec : EditorTestBase , output : str , editor_log : str ) :
def create ( cls , test_spec , output , editor_log ) :
# type (EditorTestBase, str, str) -> Pass
"""
Creates a Pass object with a given test spec , output string , and editor log string .
: test_spec : The type of EditorTestBase
: output : The test output
: editor_log : The editor log ' s output
: return : the Pass object
"""
r = cls ( )
r . test_spec = test_spec
r . output = output
@ -141,7 +160,15 @@ class Result:
class Fail ( Base ) :
@classmethod
def create ( cls , test_spec : EditorTestBase , output , editor_log : str ) :
def create ( cls , test_spec , output , editor_log ) :
# type (EditorTestBase, str, str) -> Fail
"""
Creates a Fail object with a given test spec , output string , and editor log string .
: test_spec : The type of EditorTestBase
: output : The test output
: editor_log : The editor log ' s output
: return : the Fail object
"""
r = cls ( )
r . test_spec = test_spec
r . output = output
@ -164,7 +191,18 @@ class Result:
class Crash ( Base ) :
@classmethod
def create ( cls , test_spec : EditorTestBase , output : str , ret_code : int , stacktrace : str , editor_log : str ) :
def create ( cls , test_spec , output , ret_code , stacktrace , editor_log ) :
# type (EditorTestBase, str, int, str, str) -> Crash
"""
Creates a Crash object with a given test spec , output string , and editor log string . This also includes the
return code and stacktrace .
: test_spec : The type of EditorTestBase
: output : The test output
: ret_code : The test ' s return code
: stacktrace : The test ' s stacktrace if available
: editor_log : The editor log ' s output
: return : The Crash object
"""
r = cls ( )
r . output = output
r . test_spec = test_spec
@ -174,7 +212,7 @@ class Result:
return r
def __str__ ( self ) :
stacktrace_str = " -- No stacktrace data found -- " if not self . stacktrace else self . stacktrace
stacktrace_str = " -- No stacktrace data found -- \n " if not self . stacktrace else self . stacktrace
output = (
f " Test CRASHED, return code { hex ( self . ret_code ) } \n "
f " --------------- \n "
@ -190,12 +228,21 @@ class Result:
f " -------------- \n "
f " { self . get_editor_log_str ( ) } \n "
)
crash_str = " -- No crash information found -- "
return output
class Timeout ( Base ) :
@classmethod
def create ( cls , test_spec : EditorTestBase , output : str , time_secs : float , editor_log : str ) :
def create ( cls , test_spec , output , time_secs , editor_log ) :
# type (EditorTestBase, str, float, str) -> Timeout
"""
Creates a Timeout object with a given test spec , output string , and editor log string . The timeout time
should be provided in seconds
: test_spec : The type of EditorTestBase
: output : The test output
: time_secs : The timeout duration in seconds
: editor_log : The editor log ' s output
: return : The Timeout object
"""
r = cls ( )
r . output = output
r . test_spec = test_spec
@ -219,14 +266,23 @@ class Result:
class Unknown ( Base ) :
@classmethod
def create ( cls , test_spec : EditorTestBase , output : str , extra_info : str , editor_log : str ) :
def create ( cls , test_spec , output , extra_info , editor_log ) :
# type (EditorTestBase, str, str , str) -> Unknown
"""
Creates an Unknown test results object if something goes wrong .
: test_spec : The type of EditorTestBase
: output : The test output
: extra_info : Any extra information as a string
: editor_log : The editor log ' s output
: return : The Unknown object
"""
r = cls ( )
r . output = output
r . test_spec = test_spec
r . editor_log = editor_log
r . extra_info = extra_info
return r
def __str__ ( self ) :
output = (
f " Unknown test result, possible cause: { self . extra_info } \n "
@ -263,6 +319,18 @@ class EditorTestSuite():
@pytest.fixture ( scope = " class " )
def editor_test_data ( self , request ) :
# type (request) -> TestData
"""
Yields a generator to capture the test results and an AssetProcessor object .
: request : The pytest request
: yield : The TestData object
"""
self . _editor_test_data ( request )
def _editor_test_data ( self , request ) :
"""
A wrapper function for unit testing to call directly
"""
class TestData ( ) :
def __init__ ( self ) :
self . results = { } # Dict of str(test_spec.__name__) -> Result
@ -445,6 +513,10 @@ class EditorTestSuite():
@classmethod
def pytest_custom_modify_items ( cls , session , items , config ) :
# type () -> None
"""
"""
# Add here the runners functions and filter the tests that will be run.
# The runners will be added if they have any selected tests
new_items = [ ]
@ -463,23 +535,53 @@ class EditorTestSuite():
@classmethod
def get_single_tests ( cls ) :
# type () -> list
"""
Grabs all of the EditorSingleTests subclassed tests from the EditorTestSuite class
Usage example :
class MyTestSuite ( EditorTestSuite ) :
class MyFirstTest ( EditorSingleTest ) :
from . import script_to_be_run_by_editor as test_module
: return : The list of single tests
"""
single_tests = [ c [ 1 ] for c in cls . __dict__ . items ( ) if inspect . isclass ( c [ 1 ] ) and issubclass ( c [ 1 ] , EditorSingleTest ) ]
return single_tests
@classmethod
def get_shared_tests ( cls ) :
# type () -> list
"""
Grabs all of the EditorSharedTests from the EditorTestSuite
Usage example :
class MyTestSuite ( EditorTestSuite ) :
class MyFirstTest ( EditorSharedTest ) :
from . import script_to_be_run_by_editor as test_module
: return : The list of shared tests
"""
shared_tests = [ c [ 1 ] for c in cls . __dict__ . items ( ) if inspect . isclass ( c [ 1 ] ) and issubclass ( c [ 1 ] , EditorSharedTest ) ]
return shared_tests
@classmethod
def get_session_shared_tests ( cls , session ) :
# type (Session) -> list[EditorTestBase]
"""
Filters and returns all of the shared tests in a given session .
: session : The test session
: return : The list of tests
"""
shared_tests = cls . get_shared_tests ( )
return cls . filter_session_shared_tests ( session , shared_tests )
@staticmethod
def filter_session_shared_tests ( session_items , shared_tests ) :
# Retrieve the test sub-set that was collected
# this can be less than the original set if were overriden via -k argument or similars
# type (list, list) -> list[EditorTestBase]
"""
Retrieve the test sub - set that was collected this can be less than the original set if were overriden via - k
argument or similars
: session_items : The tests in a session to run
: shared_tests : All of the shared tests
: return : The list of filtered tests
"""
def will_run ( item ) :
try :
skipping_pytest_runtest_setup ( item )
@ -488,13 +590,20 @@ class EditorTestSuite():
return False
session_items_by_name = { item . originalname : item for item in session_items }
selected_shared_tests = [ test for test in shared_tests if test . __name__ in session_items_by_name . keys ( ) and will_run ( session_items_by_name [ test . __name__ ] ) ]
selected_shared_tests = [ test for test in shared_tests if test . __name__ in session_items_by_name . keys ( ) and
will_run ( session_items_by_name [ test . __name__ ] ) ]
return selected_shared_tests
@staticmethod
def filter_shared_tests ( shared_tests , is_batchable = False , is_parallelizable = False ) :
# Retrieve the test sub-set that was collected
# this can be less than the original set if were overriden via -k argument or similars
# type (list, bool, bool) -> list
"""
Filters and returns all tests based off of if they are batchable and / or parallelizable
: shared_tests : All shared tests
: is_batchable : Filter to batchable tests
: is_parallelizable : Filter to parallelizable tests
: return : The list of filtered tests
"""
return [
t for t in shared_tests if (
getattr ( t , " is_batchable " , None ) is is_batchable
@ -504,9 +613,15 @@ class EditorTestSuite():
]
### Utils ###
# Prepares the asset processor for the test
def _prepare_asset_processor ( self , workspace , editor_test_data ) :
# type (AbstractWorkspace, TestData) -> None
"""
Prepares the asset processor for the test depending on whether or not the process is open and if the current
test owns it .
: workspace : The workspace object in case an AssetProcessor object needs to be created
: editor_test_data : The test data from calling editor_test_data ( )
: return : None
"""
try :
# Start-up an asset processor if we are not running one
# If another AP process exist, don't kill it, as we don't own it
@ -525,14 +640,29 @@ class EditorTestSuite():
raise ex
def _setup_editor_test ( self , editor , workspace , editor_test_data ) :
# type(Editor, AbstractWorkspace, TestData) -> None
"""
Sets up an editor test by preparing the Asset Processor , killing all other O3DE processes , and configuring
: editor : The launcher Editor object
: workspace : The test Workspace object
: editor_test_data : The TestData from calling editor_test_data ( )
: return : None
"""
self . _prepare_asset_processor ( workspace , editor_test_data )
editor_utils . kill_all_ly_processes ( include_asset_processor = False )
editor . configure_settings ( )
# Utility function for parsing the output information from the editor.
# It deserializes the JSON content printed in the output for every test and returns that information.
@staticmethod
def _get_results_using_output ( test_spec_list , output , editor_log_content ) :
# type(list, str, str) -> dict{str: Result}
"""
Utility function for parsing the output information from the editor . It deserializes the JSON content printed in
the output for every test and returns that information .
: test_spec_list : The list of EditorTests
: output : The Editor from Editor . get_output ( )
: editor_log_content : The contents of the editor log as a string
: return : A dict of the tests and their respective Result objects
"""
results = { }
pattern = re . compile ( r " JSON_START \ ((.+?) \ )JSON_END " )
out_matches = pattern . finditer ( output )
@ -541,7 +671,8 @@ class EditorTestSuite():
try :
elem = json . loads ( m . groups ( ) [ 0 ] )
found_jsons [ elem [ " name " ] ] = elem
except Exception :
except Exception as e :
raise e
continue # Avoid to fail if the output data is corrupt
# Try to find the element in the log, this is used for cutting the log contents later
@ -558,7 +689,9 @@ class EditorTestSuite():
for test_spec in test_spec_list :
name = editor_utils . get_module_filename ( test_spec . test_module )
if name not in found_jsons . keys ( ) :
results [ test_spec . __name__ ] = Result . Unknown . create ( test_spec , output , " Couldn ' t find any test run information on stdout " , editor_log_content )
results [ test_spec . __name__ ] = Result . Unknown . create ( test_spec , output ,
" Couldn ' t find any test run information on stdout " ,
editor_log_content )
else :
result = None
json_result = found_jsons [ name ]
@ -573,7 +706,7 @@ class EditorTestSuite():
cur_log = editor_log_content [ log_start : end ]
log_start = end
if json_result [ " success " ] :
if " success " in json_result . keys ( ) :
result = Result . Pass . create ( test_spec , json_output , cur_log )
else :
result = Result . Fail . create ( test_spec , json_output , cur_log )
@ -581,9 +714,15 @@ class EditorTestSuite():
return results
# Fails the test if the test result is not a PASS, specifying the information
@staticmethod
def _report_result ( name : str , result : Result . Base ) :
def _report_result ( name , result ) :
# type (str, Result) -> None
"""
Fails the test if the test result is not a PASS , specifying the information
: name : Name of the test
: result : The Result object which denotes if the test passed or not
: return : None
"""
if isinstance ( result , Result . Pass ) :
output_str = f " Test { name } : \n { str ( result ) } "
print ( output_str )
@ -592,10 +731,19 @@ class EditorTestSuite():
pytest . fail ( error_str )
### Running tests ###
# Starts the editor with the given test and retuns an result dict with a single element specifying the result
def _exec_editor_test ( self , request , workspace , editor , run_id : int , log_name : str ,
test_spec : EditorTestBase , cmdline_args : List [ str ] = [ ] ) :
def _exec_editor_test ( self , request , workspace , editor , run_id , log_name , test_spec , cmdline_args = [ ] ) :
# type (Request, AbstractWorkspace, Editor, int, str, EditorTestBase, list[str] -> dict{str: Result}
"""
Starts the editor with the given test and retuns an result dict with a single element specifying the result
: request : The pytest request
: workspace : The LyTestTools Workspace object
: editor : The LyTestTools Editor object
: run_id : The unique run id
: log_name : The name of the editor log to retrieve
: test_spec : The type of EditorTestBase
: cmdline_args : Any additional command line args
: return : a dictionary of Result objects
"""
test_cmdline_args = self . global_extra_cmdline_args + cmdline_args
test_spec_uses_null_renderer = getattr ( test_spec , " use_null_renderer " , None )
if test_spec_uses_null_renderer or ( test_spec_uses_null_renderer is None and self . use_null_renderer ) :
@ -629,12 +777,14 @@ class EditorTestSuite():
else :
has_crashed = return_code != EditorTestSuite . _TEST_FAIL_RETCODE
if has_crashed :
test_result = Result . Crash . create ( test_spec , output , return_code , editor_utils . retrieve_crash_output ( run_id , workspace , self . _TIMEOUT_CRASH_LOG ) , None )
test_result = Result . Crash . create ( test_spec , output , return_code , editor_utils . retrieve_crash_output
( run_id , workspace , self . _TIMEOUT_CRASH_LOG ) , None )
editor_utils . cycle_crash_report ( run_id , workspace )
else :
test_result = Result . Fail . create ( test_spec , output , editor_log_content )
except WaitTimeoutError :
editor . kill ( )
output = editor . get_output ( )
editor . kill ( )
editor_log_content = editor_utils . retrieve_editor_log_content ( run_id , log_name , workspace )
test_result = Result . Timeout . create ( test_spec , output , test_spec . timeout , editor_log_content )
@ -643,11 +793,21 @@ class EditorTestSuite():
results [ test_spec . __name__ ] = test_result
return results
# Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that editor
# instance. In case of failure this function also parses the editor output to find out what specific tests failed
def _exec_editor_multitest ( self , request , workspace , editor , run_id : int , log_name : str ,
test_spec_list : List [ EditorTestBase ] , cmdline_args = [ ] ) :
def _exec_editor_multitest ( self , request , workspace , editor , run_id , log_name , test_spec_list , cmdline_args = [ ] ) :
# type (Request, AbstractWorkspace, Editor, int, str, list[EditorTestBase], list[str]) -> dict{str: Result}
"""
Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that
editor instance . In case of failure this function also parses the editor output to find out what specific tests
failed .
: request : The pytest request
: workspace : The LyTestTools Workspace object
: editor : The LyTestTools Editor object
: run_id : The unique run id
: log_name : The name of the editor log to retrieve
: test_spec_list : A list of EditorTestBase tests to run
: cmdline_args : Any additional command line args
: return : A dict of Result objects
"""
test_cmdline_args = self . global_extra_cmdline_args + cmdline_args
if self . use_null_renderer :
test_cmdline_args + = [ " -rhi=null " ]
@ -660,7 +820,8 @@ class EditorTestSuite():
editor_utils . cycle_crash_report ( run_id , workspace )
results = { }
test_filenames_str = " ; " . join ( editor_utils . get_testcase_module_filepath ( test_spec . test_module ) for test_spec in test_spec_list )
test_filenames_str = " ; " . join ( editor_utils . get_testcase_module_filepath ( test_spec . test_module ) for
test_spec in test_spec_list )
cmdline = [
" --runpythontest " , test_filenames_str ,
" -logfile " , f " @log@/ { log_name } " ,
@ -685,7 +846,8 @@ class EditorTestSuite():
# Scrap the output to attempt to find out which tests failed.
# This function should always populate the result list, if it didn't find it, it will have "Unknown" type of result
results = self . _get_results_using_output ( test_spec_list , output , editor_log_content )
assert len ( results ) == len ( test_spec_list ) , " bug in _get_results_using_output(), the number of results don ' t match the tests ran "
assert len ( results ) == len ( test_spec_list ) , " bug in _get_results_using_output(), the number of results " \
" don ' t match the tests ran "
# If the editor crashed, find out in which test it happened and update the results
has_crashed = return_code != EditorTestSuite . _TEST_FAIL_RETCODE
@ -695,50 +857,67 @@ class EditorTestSuite():
if isinstance ( result , Result . Unknown ) :
if not crashed_result :
# The first test with "Unknown" result (no data in output) is likely the one that crashed
crash_error = editor_utils . retrieve_crash_output ( run_id , workspace , self . _TIMEOUT_CRASH_LOG )
crash_error = editor_utils . retrieve_crash_output ( run_id , workspace ,
self . _TIMEOUT_CRASH_LOG )
editor_utils . cycle_crash_report ( run_id , workspace )
results [ test_spec_name ] = Result . Crash . create ( result . test_spec , output , return_code , crash_error , result . editor_log )
results [ test_spec_name ] = Result . Crash . create ( result . test_spec , output , return_code ,
crash_error , result . editor_log )
crashed_result = result
else :
# If there are remaning "Unknown" results, these couldn't execute because of the crash, update with info about the offender
results [ test_spec_name ] . extra_info = f " This test has unknown result, test ' { crashed_result . test_spec . __name__ } ' crashed before this test could be executed "
# If there are remaning "Unknown" results, these couldn't execute because of the crash,
# update with info about the offender
results [ test_spec_name ] . extra_info = f " This test has unknown result, test " \
f " ' { crashed_result . test_spec . __name__ } ' crashed " \
f " before this test could be executed "
# if all the tests ran, the one that has caused the crash is the last test
if not crashed_result :
crash_error = editor_utils . retrieve_crash_output ( run_id , workspace , self . _TIMEOUT_CRASH_LOG )
editor_utils . cycle_crash_report ( run_id , workspace )
results [ test_spec_name ] = Result . Crash . create ( crashed_result . test_spec , output , return_code , crash_error , crashed_result . editor_log )
results [ test_spec_name ] = Result . Crash . create ( crashed_result . test_spec , output , return_code ,
crash_error , crashed_result . editor_log )
except WaitTimeoutError :
editor . kill ( )
output = editor . get_output ( )
editor_log_content = editor_utils . retrieve_editor_log_content ( run_id , log_name , workspace )
# The editor timed out when running the tests, get the data from the output to find out which ones ran
results = self . _get_results_using_output ( test_spec_list , output , editor_log_content )
assert len ( results ) == len ( test_spec_list ) , " bug in _get_results_using_output(), the number of results don' t match the tests ran "
assert len ( results ) == len ( test_spec_list ) , " bug in _get_results_using_output(), the number of results " \
" don ' t match the tests ran "
# Similar logic here as crashes, the first test that has no result is the one that timed out
timed_out_result = None
for test_spec_name , result in results . items ( ) :
if isinstance ( result , Result . Unknown ) :
if not timed_out_result :
results [ test_spec_name ] = Result . Timeout . create ( result . test_spec , result . output , self . timeout_editor_shared_test , result . editor_log )
results [ test_spec_name ] = Result . Timeout . create ( result . test_spec , result . output ,
self . timeout_editor_shared_test ,
result . editor_log )
timed_out_result = result
else :
# If there are remaning "Unknown" results, these couldn't execute because of the timeout, update with info about the offender
results [ test_spec_name ] . extra_info = f " This test has unknown result, test ' { timed_out_result . test_spec . __name__ } ' timed out before this test could be executed "
# If there are remaning "Unknown" results, these couldn't execute because of the timeout,
# update with info about the offender
results [ test_spec_name ] . extra_info = f " This test has unknown result, test " \
f " ' { timed_out_result . test_spec . __name__ } ' timed out " \
f " before this test could be executed "
# if all the tests ran, the one that has caused the timeout is the last test, as it didn't close the editor
if not timed_out_result :
results [ test_spec_name ] = Result . Timeout . create ( timed_out_result . test_spec , results [ test_spec_name ] . output , self . timeout_editor_shared_test , result . editor_log )
results [ test_spec_name ] = Result . Timeout . create ( timed_out_result . test_spec ,
results [ test_spec_name ] . output ,
self . timeout_editor_shared_test , result . editor_log )
return results
# Runs a single test (one editor, one test) with the given specs
def _run_single_test ( self , request , workspace , editor , editor_test_data , test_spec : EditorSingleTest ) :
def _run_single_test ( self , request , workspace , editor , editor_test_data , test_spec ) :
# type (Request, AbstractWorkspace, Editor, TestData, EditorSingleTest) -> None
"""
Runs a single test ( one editor , one test ) with the given specs
: request : The Pytest Request
: workspace : The LyTestTools Workspace object
: editor : The LyTestTools Editor object
: editor_test_data : The TestData from calling editor_test_data ( )
: test_spec : The test class that should be a subclass of EditorSingleTest
: return : None
"""
self . _setup_editor_test ( editor , workspace , editor_test_data )
extra_cmdline_args = [ ]
if hasattr ( test_spec , " extra_cmdline_args " ) :
@ -749,18 +928,39 @@ class EditorTestSuite():
test_name , test_result = next ( iter ( results . items ( ) ) )
self . _report_result ( test_name , test_result )
# Runs a batch of tests in one single editor with the given spec list (one editor, multiple tests)
def _run_batched_tests ( self , request , workspace , editor , editor_test_data , test_spec_list : List [ EditorSharedTest ] , extra_cmdline_args = [ ] ) :
def _run_batched_tests ( self , request , workspace , editor , editor_test_data , test_spec_list , extra_cmdline_args = [ ] ) :
# type (Request, AbstractWorkspace, Editor, TestData, list[EditorSharedTest], list[str]) -> None
"""
Runs a batch of tests in one single editor with the given spec list ( one editor , multiple tests )
: request : The Pytest Request
: workspace : The LyTestTools Workspace object
: editor : The LyTestTools Editor object
: editor_test_data : The TestData from calling editor_test_data ( )
: test_spec_list : A list of EditorSharedTest tests to run
: extra_cmdline_args : Any extra command line args in a list
: return : None
"""
if not test_spec_list :
return
self . _setup_editor_test ( editor , workspace , editor_test_data )
results = self . _exec_editor_multitest ( request , workspace , editor , 1 , " editor_test.log " , test_spec_list , extra_cmdline_args )
results = self . _exec_editor_multitest ( request , workspace , editor , 1 , " editor_test.log " , test_spec_list ,
extra_cmdline_args )
assert results is not None
editor_test_data . results . update ( results )
# Runs multiple editors with one test on each editor (multiple editor, one test each)
def _run_parallel_tests ( self , request , workspace , editor , editor_test_data , test_spec_list : List [ EditorSharedTest ] , extra_cmdline_args = [ ] ) :
def _run_parallel_tests ( self , request , workspace , editor , editor_test_data , test_spec_list , extra_cmdline_args = [ ] ) :
# type(Request, AbstractWorkspace, Editor, TestData, list[EditorSharedTest], list[str]) -> None
"""
Runs multiple editors with one test on each editor ( multiple editor , one test each )
: request : The Pytest Request
: workspace : The LyTestTools Workspace object
: editor : The LyTestTools Editor object
: editor_test_data : The TestData from calling editor_test_data ( )
: test_spec_list : A list of EditorSharedTest tests to run
: extra_cmdline_args : Any extra command line args in a list
: return : None
"""
if not test_spec_list :
return
@ -778,7 +978,8 @@ class EditorTestSuite():
for i in range ( total_threads ) :
def make_func ( test_spec , index , my_editor ) :
def run ( request , workspace , extra_cmdline_args ) :
results = self . _exec_editor_test ( request , workspace , my_editor , index + 1 , f " editor_test.log " , test_spec , extra_cmdline_args )
results = self . _exec_editor_test ( request , workspace , my_editor , index + 1 , f " editor_test.log " ,
test_spec , extra_cmdline_args )
assert results is not None
results_per_thread [ index ] = results
return run
@ -796,8 +997,19 @@ class EditorTestSuite():
for result in results_per_thread :
editor_test_data . results . update ( result )
# Runs multiple editors with a batch of tests for each editor (multiple editor, multiple tests each)
def _run_parallel_batched_tests ( self , request , workspace , editor , editor_test_data , test_spec_list : List [ EditorSharedTest ] , extra_cmdline_args = [ ] ) :
def _run_parallel_batched_tests ( self , request , workspace , editor , editor_test_data , test_spec_list ,
extra_cmdline_args = [ ] ) :
# type(Request, AbstractWorkspace, Editor, TestData, list[EditorSharedTest], list[str] -> None
"""
Runs multiple editors with a batch of tests for each editor ( multiple editor , multiple tests each )
: request : The Pytest Request
: workspace : The LyTestTools Workspace object
: editor : The LyTestTools Editor object
: editor_test_data : The TestData from calling editor_test_data ( )
: test_spec_list : A list of EditorSharedTest tests to run
: extra_cmdline_args : Any extra command line args in a list
: return : None
"""
if not test_spec_list :
return
@ -813,7 +1025,9 @@ class EditorTestSuite():
def run ( request , workspace , extra_cmdline_args ) :
results = None
if len ( test_spec_list_for_editor ) > 0 :
results = self . _exec_editor_multitest ( request , workspace , my_editor , index + 1 , f " editor_test.log " , test_spec_list_for_editor , extra_cmdline_args )
results = self . _exec_editor_multitest ( request , workspace , my_editor , index + 1 ,
f " editor_test.log " , test_spec_list_for_editor ,
extra_cmdline_args )
assert results is not None
else :
results = { }
@ -833,8 +1047,13 @@ class EditorTestSuite():
for result in results_per_thread :
editor_test_data . results . update ( result )
# Retrieves the number of parallel preference cmdline overrides
def _get_number_parallel_editors ( self , request ) :
# type(Request) -> int
"""
Retrieves the number of parallel preference cmdline overrides
: request : The Pytest Request
: return : The number of parallel editors to use
"""
parallel_editors_value = request . config . getoption ( " --editors-parallel " , None )
if parallel_editors_value :
return int ( parallel_editors_value )