Skip to content

Commit

Permalink
Fix broken unit tests
Browse files Browse the repository at this point in the history
  • Loading branch information
the-other-james committed Sep 21, 2023
1 parent 6381320 commit 9fcecbc
Show file tree
Hide file tree
Showing 6 changed files with 48 additions and 91 deletions.
73 changes: 10 additions & 63 deletions test/onair/data_handling/parsers/test_csv_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,76 +20,23 @@ def setup_teardown():
yield 'setup_teardown'

# process_data_per_data_file tests
def test_CSV_process_data_file_sets_instance_all_headers_item_data_file_to_returned_labels_item_data_file_when_returned_data_is_empty(mocker, setup_teardown):
def test_CSV_process_data_file_sets_sim_data_to_parse_csv_data_return_and_frame_index_to_zero(mocker, setup_teardown):
# Arrange
arg_data_file = MagicMock()

fake_label_data_file_item = MagicMock()
fake_labels = {arg_data_file:fake_label_data_file_item}
fake_data = []
forced_return_parse_csv_data = [fake_labels, fake_data]
forced_return_parse_csv_data = MagicMock()

mocker.patch.object(pytest.cut, "parse_csv_data", return_value=forced_return_parse_csv_data)

# OnAirDataSource initialize normally sets all headers, so unit test must set this instead
pytest.cut.all_headers = {}

# Act
pytest.cut.process_data_file(arg_data_file)

# Assert
assert pytest.cut.all_headers[arg_data_file] == fake_label_data_file_item

def test_CSV_process_data_file_sets_instance_all_headers_item_data_file_to_returned_labels_item_data_file_and_when_data_key_is_not_in_sim_data_sets_item_key_to_dict_and_key_item_data_file_item_to_data_key_item_data_file_item(mocker, setup_teardown):
# Arrange
arg_data_file = MagicMock()

fake_label_data_file_item = MagicMock()
fake_labels = {arg_data_file:fake_label_data_file_item}
fake_key_data_file_item = MagicMock()
fake_key = MagicMock()
fake_data = {fake_key:{arg_data_file:fake_key_data_file_item}}
forced_return_parse_csv_data = [fake_labels, fake_data]

mocker.patch.object(pytest.cut, "parse_csv_data", return_value=forced_return_parse_csv_data)

# OnAirDataSource initialize normally sets all headers and sim_data, so unit test must set this instead
pytest.cut.all_headers = {}
pytest.cut.sim_data = {}

# Act
pytest.cut.process_data_file(arg_data_file)

# Assert
assert pytest.cut.all_headers[arg_data_file] == fake_label_data_file_item
assert pytest.cut.sim_data[fake_key][arg_data_file] == fake_key_data_file_item

def test_CSV_process_data_file_sets_instance_all_headers_item_data_file_to_returned_labels_item_data_file_and_when_data_key_is_already_in_sim_data_sets_item_key_to_dict_and_key_item_data_file_item_to_data_key_item_data_file_item(mocker, setup_teardown):
# Arrange
arg_data_file = MagicMock()

fake_label_data_file_item = MagicMock()
fake_labels = {arg_data_file:fake_label_data_file_item}
fake_key_data_file_item = MagicMock()
fake_key = MagicMock()
fake_data = {fake_key:{arg_data_file:fake_key_data_file_item}}
forced_return_parse_csv_data = [fake_labels, fake_data]

mocker.patch.object(pytest.cut, "parse_csv_data", return_value=forced_return_parse_csv_data)

# OnAirDataSource initialize normally sets all headers and sim_data, so unit test must set this instead
pytest.cut.all_headers = {}
pytest.cut.sim_data = {fake_key:{}}

# Act
pytest.cut.process_data_file(arg_data_file)

# Assert
assert pytest.cut.all_headers[arg_data_file] == fake_label_data_file_item
assert pytest.cut.sim_data[fake_key][arg_data_file] == fake_key_data_file_item
assert pytest.cut.sim_data == forced_return_parse_csv_data
assert pytest.cut.frame_index == 0

# CSV parse_csv_data tests
def test_CSV_parse_csv_data_returns_tuple_of_empty_lists_when_parsed_dataset_from_given_dataFile_call_to_iterrows_returns_empty(mocker, setup_teardown):
def test_CSV_parse_csv_data_returns_empty_list_when_parsed_dataset_from_given_dataFile_call_to_iterrows_returns_empty(mocker, setup_teardown):
# Arrange
arg_dataFile = MagicMock()

Expand All @@ -104,7 +51,7 @@ def test_CSV_parse_csv_data_returns_tuple_of_empty_lists_when_parsed_dataset_fro
fake_second_data_set.columns = MagicMock()
fake_second_data_set.columns.values = set()

expected_result = ([], [])
expected_result = []

mocker.patch(csv_parser.__name__ + '.pd.read_csv', return_value=fake_initial_data_set)
mocker.patch.object(fake_columns_str, 'contains', return_value=forced_return_contains)
Expand All @@ -124,7 +71,7 @@ def test_CSV_parse_csv_data_returns_tuple_of_empty_lists_when_parsed_dataset_fro
assert fake_initial_data_set.loc.__getitem__.call_args_list[0].args[0][1] == ~forced_return_contains
assert result == expected_result

def test_CSV_parse_csv_data_returns_tuple_of_empty_list_of_headers_and_list_of_row_values_when_parsed_dataset_from_given_dataFile_call_to_iterrows_returns_iterator(mocker, setup_teardown):
def test_CSV_parse_csv_data_returns_list_of_row_values_when_parsed_dataset_from_given_dataFile_call_to_iterrows_returns_iterator(mocker, setup_teardown):
# Arrange
arg_dataFile = MagicMock()

Expand All @@ -150,7 +97,7 @@ def test_CSV_parse_csv_data_returns_tuple_of_empty_list_of_headers_and_list_of_r
expected_result_list.append(fake_row_values)
forced_return_iterrows = iter(fake_iterrows)

expected_result = ([], expected_result_list)
expected_result = expected_result_list

mocker.patch(csv_parser.__name__ + '.pd.read_csv', return_value=fake_initial_data_set)
mocker.patch.object(fake_columns_str, 'contains', return_value=forced_return_contains)
Expand All @@ -168,7 +115,7 @@ def test_CSV_parse_csv_data_returns_tuple_of_empty_list_of_headers_and_list_of_r
assert fake_columns_str.contains.call_args_list[0].args == ('^Unnamed', )
assert result == expected_result

def test_CSV_parse_csv_data_returns_tuple_of_list_of_headers_and_list_of_data_frames_call_to_iterrows_returns_iterator_and_column_names_exist(mocker, setup_teardown):
def test_CSV_parse_csv_data_returns_list_of_data_frames_call_to_iterrows_returns_iterator_and_column_names_exist(mocker, setup_teardown):
# Arrange
arg_dataFile = MagicMock()

Expand Down Expand Up @@ -196,7 +143,7 @@ def test_CSV_parse_csv_data_returns_tuple_of_list_of_headers_and_list_of_data_fr
expected_result_dict.append(fake_row_values)
forced_return_iterrows = iter(fake_iterrows)

expected_result = (fake_second_data_set.columns.values, expected_result_dict)
expected_result = expected_result_dict

mocker.patch(csv_parser.__name__ + '.pd.read_csv', return_value=fake_initial_data_set)
mocker.patch.object(fake_columns_str, 'contains', return_value=forced_return_contains)
Expand Down
21 changes: 20 additions & 1 deletion test/onair/data_handling/parsers/test_on_air_data_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,15 @@ def process_data_file(self, data_file):
def parse_meta_data_file(self, configFile, ss_breakdown):
super().parse_meta_data_file(configFile, ss_breakdown)

def get_next(self):
return super().get_next()

def has_more(self):
return super().has_more()

def has_data(self):
return super().has_data()

class IncompleteOnAirDataSource(OnAirDataSource):
pass

Expand All @@ -32,6 +41,15 @@ def process_data_file(self, data_file):
def parse_meta_data_file(self, configFile, ss_breakdown):
return super().parse_meta_data_file(configFile, ss_breakdown)

def get_next(self):
return super().get_next()

def has_more(self):
return super().has_more()

def has_data(self):
return super().has_data()

@pytest.fixture
def setup_teardown():
pytest.cut = FakeOnAirDataSource.__new__(FakeOnAirDataSource)
Expand All @@ -48,6 +66,7 @@ def test_OnAirDataSource__init__sets_instance_variables_as_expected_and_calls_pa
fake_configs['subsystem_assignments'] = MagicMock()
fake_configs['test_assignments'] = MagicMock()
fake_configs['description_assignments'] = MagicMock()
fake_configs['data_labels'] = MagicMock()

mocker.patch.object(pytest.cut, 'parse_meta_data_file', return_value=fake_configs)
mocker.patch.object(pytest.cut, 'process_data_file')
Expand All @@ -58,7 +77,7 @@ def test_OnAirDataSource__init__sets_instance_variables_as_expected_and_calls_pa
# Assert
assert pytest.cut.raw_data_file == arg_rawDataFile
assert pytest.cut.meta_data_file == arg_metadataFile
assert pytest.cut.all_headers == {}
assert pytest.cut.all_headers == fake_configs['data_labels']
assert pytest.cut.sim_data == {}
assert pytest.cut.parse_meta_data_file.call_count == 1
assert pytest.cut.parse_meta_data_file.call_args_list[0].args == (arg_metadataFile, arg_ss_breakdown, )
Expand Down
18 changes: 15 additions & 3 deletions test/onair/data_handling/parsers/test_tlm_json_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ def test_tlm_json_parser_parseTlmConfJson_returns_configs_with_empty_dicts_when_
expected_result['subsystem_assignments'] = []
expected_result['test_assignments'] = []
expected_result['description_assignments'] = []
expected_result['data_labels'] = []

# Act
result = tlm_json_parser.parseTlmConfJson(arg_file_path)
Expand Down Expand Up @@ -61,6 +62,7 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo
expected_result['subsystem_assignments'] = [fake_subsystem]
expected_result['test_assignments'] = [[[fake_mnemonics, fake_limits]]]
expected_result['description_assignments'] = [fake_description]
expected_result['data_labels'] = [fake_label]

# Act
result = tlm_json_parser.parseTlmConfJson(arg_file_path)
Expand Down Expand Up @@ -89,6 +91,7 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo
expected_result['subsystem_assignments'] = [fake_subsystem]
expected_result['test_assignments'] = [[['NOOP']]]
expected_result['description_assignments'] = [['No description']]
expected_result['data_labels'] = [fake_label]

# Act
result = tlm_json_parser.parseTlmConfJson(arg_file_path)
Expand All @@ -107,11 +110,13 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo
fake_data = MagicMock()
fake_organized_data = {}
fake_subsystems = []
fake_labels = []
num_labels = pytest.gen.randint(2, 10) # arbitrary, from 2 to 10
for i in range(num_labels):
fake_label = MagicMock()
fake_subsystem = MagicMock()
fake_subsystems.append(fake_subsystem)
fake_labels.append(fake_label)
fake_organized_data[fake_label] = {'subsystem' : fake_subsystem}

mocker.patch(tlm_json_parser.__name__ + '.parseJson', return_value=fake_data)
Expand All @@ -121,6 +126,7 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo
expected_result['subsystem_assignments'] = fake_subsystems
expected_result['test_assignments'] = [[['NOOP']]] * num_labels
expected_result['description_assignments'] = [['No description']] * num_labels
expected_result['data_labels'] = fake_labels

# Act
result = tlm_json_parser.parseTlmConfJson(arg_file_path)
Expand Down Expand Up @@ -154,6 +160,7 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo
expected_result['subsystem_assignments'] = [fake_subsystem]
expected_result['test_assignments'] = [[[fake_mnemonics, fake_limits]]]
expected_result['description_assignments'] = [fake_description]
expected_result['data_labels'] = [fake_label]

# Act
result = tlm_json_parser.parseTlmConfJson(arg_file_path)
Expand All @@ -171,13 +178,13 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo

fake_data = MagicMock()
num_elems = pytest.gen.randint(2, 10) # arbitrary, from 2 to 10
fake_label = [MagicMock() for i in range(num_elems)]
fake_labels = [MagicMock() for i in range(num_elems)]
fake_subsystem = MagicMock()
fake_limits = MagicMock()
fake_mnemonics = MagicMock()
fake_description = MagicMock()
fake_organized_data = {}
for label in fake_label:
for label in fake_labels:
fake_organized_data[label] = {'subsystem' : fake_subsystem,
'tests' : {fake_mnemonics : fake_limits},
'description' : fake_description}
Expand All @@ -189,6 +196,7 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo
expected_result['subsystem_assignments'] = [fake_subsystem] * num_elems
expected_result['test_assignments'] = [[[fake_mnemonics, fake_limits]]] * num_elems
expected_result['description_assignments'] = [fake_description] * num_elems
expected_result['data_labels'] = fake_labels

# Act
result = tlm_json_parser.parseTlmConfJson(arg_file_path)
Expand Down Expand Up @@ -232,7 +240,8 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo
ordered_mnemonics = [y for x, y in sorted(zip(ordering_list, fake_mnemonics))]
ordered_limits = [y for x, y in sorted(zip(ordering_list, fake_limits))]
ordered_descs = [y for x, y in sorted(zip(ordering_list, fake_description))]

ordered_labels = [y for x, y in sorted(zip(ordering_list, fake_label))]

fake_organized_data = {}
for i in range(num_elems):
fake_organized_data[fake_label[i]] = {'subsystem' : fake_subsystem[i],
Expand All @@ -246,6 +255,7 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo
expected_result['subsystem_assignments'] = []
expected_result['test_assignments'] = []
expected_result['description_assignments'] = []
expected_result['data_labels'] = ordered_labels
for i in range(num_elems):
expected_result['subsystem_assignments'].append(ordered_subsys[i])
expected_result['test_assignments'].append([[ordered_mnemonics[i], ordered_limits[i]]])
Expand Down Expand Up @@ -293,6 +303,7 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo
ordered_mnemonics = [y for x, y in sorted(zip(ordering_list, fake_mnemonics))]
ordered_limits = [y for x, y in sorted(zip(ordering_list, fake_limits))]
ordered_descs = [y for x, y in sorted(zip(ordering_list, fake_description))]
ordered_labels = [y for x, y in sorted(zip(ordering_list, fake_label))]

fake_organized_data = {}
for i in range(num_elems):
Expand All @@ -307,6 +318,7 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo
expected_result['subsystem_assignments'] = []
expected_result['test_assignments'] = []
expected_result['description_assignments'] = []
expected_result['data_labels'] = ordered_labels
for i in range(num_elems):
expected_result['subsystem_assignments'].append(ordered_subsys[i])
expected_result['test_assignments'].append([[ordered_mnemonics[i], ordered_limits[i]]])
Expand Down
7 changes: 2 additions & 5 deletions test/onair/src/run_scripts/test_execution_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -446,14 +446,12 @@ def test_ExecutionEngine_parse_data_sets_the_simDataParser_to_the_data_parser(mo
arg_subsystems_breakdown = MagicMock()

class FakeParser:
def __init__(self, data_file, metad_file, subsystems_breakdown):
def __init__(self, data_file, metadata_file, subsystems_breakdown):
pass

fake_parser = MagicMock()
fake_parser_class = FakeParser
fake_parser_class_instance = MagicMock()
fake_run_path = str(MagicMock())
fake_environ = {'RUN_PATH':fake_run_path}
fake_parsed_data = MagicMock()

cut = ExecutionEngine.__new__(ExecutionEngine)
Expand All @@ -462,7 +460,6 @@ def __init__(self, data_file, metad_file, subsystems_breakdown):

mocker.patch(execution_engine.__name__ + '.importlib.import_module', return_value=fake_parser)
mocker.patch(execution_engine.__name__ + '.getattr', return_value=fake_parser_class)
mocker.patch.dict(execution_engine.__name__ + '.os.environ', fake_environ)
mocker.patch.object(fake_parser_class, '__new__', return_value=fake_parser_class_instance)

# Act
Expand All @@ -475,7 +472,7 @@ def __init__(self, data_file, metad_file, subsystems_breakdown):
assert execution_engine.getattr.call_args_list[0].args == (fake_parser, arg_parser_name,)
assert cut.simDataParser == fake_parser_class_instance
assert fake_parser_class.__new__.call_count == 1
assert fake_parser_class.__new__.call_args_list[0].args == (fake_parser_class, fake_run_path + arg_dataFile, fake_run_path + arg_metadataFile, arg_subsystems_breakdown, )
assert fake_parser_class.__new__.call_args_list[0].args == (fake_parser_class, arg_dataFile, arg_metadataFile, arg_subsystems_breakdown, )

# subsystems_breakdown

Expand Down
3 changes: 1 addition & 2 deletions test/onair/src/run_scripts/test_sim.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,7 @@ def test_Simulator__init__creates_Vehicle_and_DataSource_from_parsed_data_and_Ag
assert cut.simulator == arg_simType
assert sim.VehicleRepresentation.call_count == 1
assert sim.VehicleRepresentation.call_args_list[0].args == (fake_vehicle_metadata[0], fake_vehicle_metadata[1], )
assert sim.DataSource.call_count == 1
assert sim.DataSource.call_args_list[0].args == (fake_sim_data, )
assert cut.simData == arg_dataParser
assert sim.Agent.call_count == 1
assert sim.Agent.call_args_list[0].args == (fake_vehicle, arg_plugin_list)
assert cut.agent == fake_agent
Expand Down
17 changes: 0 additions & 17 deletions test/onair/src/systems/test_vehicle_rep.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,23 +68,6 @@ def test_VehicleRepresentation__init__sets_status_to_Status_with_str_MISSION_and
assert cut.test_suite == fake_test_suite
assert cut.curr_data == ['-'] * fake_len

# NOTE: commonly each optional arg is tested, but because their sizes must be equal testing both at once
def test_VehicleRepresentation__init__default_given_headers_and_tests_are_both_empty_list(mocker):
# Arrange
cut = VehicleRepresentation.__new__(VehicleRepresentation)

mocker.patch(vehicle_rep.__name__ + '.Status')
mocker.patch(vehicle_rep.__name__ + '.TelemetryTestSuite')

# Act
cut.__init__()

# Assert
assert cut.headers == []
assert vehicle_rep.TelemetryTestSuite.call_count == 1
assert vehicle_rep.TelemetryTestSuite.call_args_list[0].args == ([], [])
assert cut.curr_data == ['-'] * 0

# update tests
def test_VehicleRepresentation_update_does_not_set_any_curr_data_when_given_frame_is_vacant_and_executes_suite_with_given_frame_and_sets_status_with_suite_status(mocker):
# Arrange
Expand Down

0 comments on commit 9fcecbc

Please sign in to comment.