Skip to content

Commit

Permalink
Merge branch 'main' into NPI-3683-sp3-consistency-check-streamlining
Browse files Browse the repository at this point in the history
  • Loading branch information
treefern committed Jan 14, 2025
2 parents 955b41a + 08f725b commit 201024b
Show file tree
Hide file tree
Showing 13 changed files with 147 additions and 92 deletions.
2 changes: 1 addition & 1 deletion gnssanalysis/filenames.py
Original file line number Diff line number Diff line change
Expand Up @@ -593,7 +593,7 @@ def determine_snx_name_props(file_path: pathlib.Path) -> Dict[str, Any]:
if blk:
soln_df = pd.read_csv(
io.BytesIO(blk[0]),
delim_whitespace=True,
sep="\\s+", # delim_whitespace is deprecated
comment="*",
names=["CODE", "PT", "SOLN", "T", "START_EPOCH", "END_EPOCH", "MEAN_EPOCH"],
converters={
Expand Down
2 changes: 1 addition & 1 deletion gnssanalysis/gn_frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def _get_core_list(core_list_path):
# need to check if solution numbers are consistent with discontinuities selection
core_df = _pd.read_csv(
core_list_path,
delim_whitespace=True,
sep="\\s+", # delim_whitespace is deprecated
skiprows=4,
comment="-",
usecols=[0, 1, 2, 3],
Expand Down
2 changes: 1 addition & 1 deletion gnssanalysis/gn_io/blq.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def read_blq(path, as_complex=True):
sites = blq_file_read[:, 0].astype("<U4")
constituents = ["M2", "S2", "N2", "K2", "K1", "O1", "P1", "Q1", "MF", "MM", "SSA"]

blq_df = _pd.read_csv(_BytesIO(b"\n".join(blq_file_read[:, 1:].reshape((-1)))), delim_whitespace=True, header=None)
blq_df = _pd.read_csv(_BytesIO(b"\n".join(blq_file_read[:, 1:].reshape((-1)))), sep="\\s+", header=None)
if as_complex:
# convert extracted A and P to complex phasors X + jY so the comparison of several blq files could be done
b = blq_df.values.reshape(-1, 11 * 3)
Expand Down
2 changes: 1 addition & 1 deletion gnssanalysis/gn_io/clk.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def read_clk(clk_path):

clk_df = _pd.read_csv(
_BytesIO(data),
delim_whitespace=True,
sep="\\s+", # delim_whitespace is deprecated
header=None,
usecols=clk_cols,
names=clk_names, # type:ignore
Expand Down
2 changes: 1 addition & 1 deletion gnssanalysis/gn_io/discon.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def _read_discontinuities(path):
out_df = _pd.read_csv(
filepath_or_buffer=_BytesIO(block),
usecols=[0, 1, 2, 4, 5, 6],
delim_whitespace=True,
sep="\\s+", # delim_whitespace is deprecated
header=None,
names=["CODE", "PT", "SOLN", "START", "END", "MODEL"],
dtype={0: object, 1: object, 2: int, 4: object, 5: object, 6: MODEL_CATEGORY},
Expand Down
2 changes: 1 addition & 1 deletion gnssanalysis/gn_io/erp.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ def read_erp(
data_of_interest = content[start_of_data:] # data block
erp_df = _pd.read_csv(
_BytesIO(data_of_interest),
delim_whitespace=True,
sep="\\s+", # delim_whitespace is deprecated
names=headers,
index_col=False,
)
Expand Down
7 changes: 4 additions & 3 deletions gnssanalysis/gn_io/igslog.py
Original file line number Diff line number Diff line change
Expand Up @@ -463,12 +463,13 @@ def gather_metadata(
gather_id_loc, columns=["CODE", "DOMES_N", "CITY", "COUNTRY", "X", "Y", "Z", "LAT", "LON", "HEI", "PATH"]
)

id_loc_df.CITY[id_loc_df.CITY == ""] = "N/A"
id_loc_df.loc[id_loc_df.CITY == "", "CITY"] = "N/A"
id_loc_df.CITY = id_loc_df.CITY.str.rstrip().str.upper()
id_loc_df.COUNTRY = translate_series(
id_loc_df.COUNTRY.str.rstrip().str.upper(), _gn_io.aux_dicts.translation_country
).values
id_loc_df.DOMES_N[id_loc_df.DOMES_N == ""] = "---------"

id_loc_df.loc[id_loc_df.DOMES_N == "", "DOMES_N"] = "---------"

xyz_array = (
id_loc_df[["X", "Y", "Z"]].stack().str.replace(",", ".").replace({"": None}).unstack().values.astype(float)
Expand Down Expand Up @@ -504,7 +505,7 @@ def gather_metadata(
ant_df.RADOME2 = ant_df.RADOME2.str.rstrip().str.upper()

no_rad2_mask = ~ant_df.RADOME.isin(_gn_io.aux_dicts.atx_rad_tbl)
ant_df.RADOME[no_rad2_mask] = ant_df.RADOME2[no_rad2_mask]
ant_df.loc[no_rad2_mask, "RADOME"] = ant_df.RADOME2[no_rad2_mask]
# translation_ant.index.name= None
antennas = translate_series(ant_df.ANTENNA, _gn_io.aux_dicts.translation_ant)
invalid_ant_mask = ~antennas.index.isin(_gn_io.aux_dicts.atx_ant_tbl)
Expand Down
2 changes: 1 addition & 1 deletion gnssanalysis/gn_io/pea.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def read_pea_partials(path):
df = _pd.read_csv(
_BytesIO(partials[begin:]),
header=None,
delim_whitespace=True,
sep="\\s+", # delim_whitespace is deprecated
usecols=[0, 1, 2, 9, 10, 11],
names=[None, "MJD", "TOD", "X", "Y", "Z"],
)
Expand Down
2 changes: 1 addition & 1 deletion gnssanalysis/gn_io/sinex.py
Original file line number Diff line number Diff line change
Expand Up @@ -461,7 +461,7 @@ def _get_snx_matrix(path_or_bytes, stypes=("APR", "EST"), verbose=True, snx_head
else:
return None # not found

matrix_raw = _pd.read_csv(snx_buffer, delim_whitespace=True, dtype={0: _np.int16, 1: _np.int16})
matrix_raw = _pd.read_csv(snx_buffer, sep="\\s+", dtype={0: _np.int16, 1: _np.int16})
# can be 4 and 5 columns; only 2 first int16

output = []
Expand Down
195 changes: 119 additions & 76 deletions gnssanalysis/gn_io/sp3.py

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion gnssanalysis/gn_io/trace.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def _find_trace(output_path: str) -> tuple:
# LC_bytes = b''.join(trace_LC_list)
# LC_bytes = LC_bytes.replace(b'=',b'') #getting rif of '='

# df_LC = _pd.read_csv(_BytesIO(LC_bytes),delim_whitespace=True,header=None,usecols=[1,2,4,6,8,9,10,11,12,13]).astype(
# df_LC = _pd.read_csv(_BytesIO(LC_bytes),sep="\\s+",header=None,usecols=[1,2,4,6,8,9,10,11,12,13]).astype(
# {
# 1: _np.int16, 2:_np.int32, 4: '<U3',
# 6: '<U1', 8: '<U4',
Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
boto3
click
hatanaka
jinja2
matplotlib
numpy
pandas
Expand Down
18 changes: 14 additions & 4 deletions tests/test_sp3.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,14 +154,24 @@ def test_read_sp3_correct_svs_read_when_ev_ep_present(self, mock_file):
# TODO Add test(s) for correctly reading header fundamentals (ACC, ORB_TYPE, etc.)
# TODO add tests for correctly reading the actual content of the SP3 in addition to the header.
# TODO add tests for correctly generating sp3 output content with gen_sp3_content() and gen_sp3_header()

@patch("builtins.open", new_callable=mock_open, read_data=input_data)
def test_gen_sp3_content_velocity_exception_handling(self, mock_file):
# These tests should include:
# - Correct alignment of POS, CLK, STDPOS STDCLK, (not velocity yet), FLAGS
# - Correct alignment of the above when nodata and infinite values are present
# - Inclusion of HLM orbit_type in header, after applying Helmert trainsformation (if not covered elsewhere?
# Probably should be covered elsewhere)
# - Not including column names (can just test that output matches expected format)
# - Not including any NaN value *anywhere*

def test_gen_sp3_content_velocity_exception_handling(self):
"""
gen_sp3_content() velocity output should raise exception (currently unsupported).\
If asked to continue with warning, it should remove velocity columns before output.
"""
sp3_df = sp3.read_sp3("mock_path", pOnly=False)
# Input data passed as bytes here, rather than using a mock file, because the mock file setup seems to break
# part of Pandas Styler, which is used by gen_sp3_content(). Specifically, some part of Styler's attempt to
# load style config files leads to a crash, despite some style config files appearing to read successfully)
input_data_fresh = input_data + b"" # Lazy attempt at not passing a reference
sp3_df = sp3.read_sp3(bytes(input_data_fresh), pOnly=False)
with self.assertRaises(NotImplementedError):
generated_sp3_content = sp3.gen_sp3_content(sp3_df, continue_on_unhandled_velocity_data=False)

Expand Down

0 comments on commit 201024b

Please sign in to comment.