diff --git a/alphadia/calibration/property.py b/alphadia/calibration/property.py index 040a45f4..080ef280 100644 --- a/alphadia/calibration/property.py +++ b/alphadia/calibration/property.py @@ -75,6 +75,7 @@ def __init__( float(transform_deviation) if transform_deviation is not None else None ) self.is_fitted = False + self.metrics = None def __repr__(self) -> str: return f"" @@ -172,10 +173,12 @@ def fit(self, dataframe: pd.DataFrame, plot: bool = False, **kwargs): self.function.fit(input_values, target_value) self.is_fitted = True except Exception as e: - logging.error(f"Could not fit estimator {self.name}: {e}") + logging.exception(f"Could not fit estimator {self.name}: {e}") return - if plot is True: + self._save_metrics(dataframe) + + if plot: self.plot(dataframe, **kwargs) def predict(self, dataframe, inplace=True): @@ -200,13 +203,13 @@ def predict(self, dataframe, inplace=True): logging.warning( f"{self.name} prediction was skipped as it has not been fitted yet" ) - return + return None if not set(self.input_columns).issubset(dataframe.columns): logging.warning( f"{self.name} calibration was skipped as input column {self.input_columns} not found in dataframe" ) - return + return None input_values = dataframe[self.input_columns].values @@ -297,6 +300,13 @@ def deviation(self, dataframe: pd.DataFrame): axis=1, ) + def _save_metrics(self, dataframe): + deviation = self.deviation(dataframe) + self.metrics = { + "median_accuracy": np.median(np.abs(deviation[:, 1])), + "median_precision": np.median(np.abs(deviation[:, 2])), + } + def ci(self, dataframe, ci: float = 0.95): """Calculate the residual deviation at the given confidence interval. diff --git a/alphadia/data/alpharaw.py b/alphadia/data/alpharaw.py index a1607bb4..d0866397 100644 --- a/alphadia/data/alpharaw.py +++ b/alphadia/data/alpharaw.py @@ -317,8 +317,6 @@ def filter_spectra(self, **kwargs): This function is implemented in the sub-class. """ - pass - def jitclass(self): return AlphaRawJIT( self.cycle, diff --git a/alphadia/fdrexperimental.py b/alphadia/fdrexperimental.py index c8d4cc20..c35459a9 100644 --- a/alphadia/fdrexperimental.py +++ b/alphadia/fdrexperimental.py @@ -8,9 +8,8 @@ # third party imports import numpy as np import torch -import torch.nn as nn -import torch.optim as optim from sklearn import model_selection +from torch import nn, optim from torchmetrics.classification import BinaryAUROC from tqdm import tqdm @@ -30,7 +29,6 @@ class Classifier(ABC): @abstractmethod def fitted(self): """Return whether the classifier has been fitted.""" - pass @abstractmethod def fit(self, x: np.array, y: np.array): @@ -46,7 +44,6 @@ def fit(self, x: np.array, y: np.array): Target values of shape (n_samples,) or (n_samples, n_classes). """ - pass @abstractmethod def predict(self, x: np.array): @@ -65,7 +62,6 @@ def predict(self, x: np.array): Predicted class of shape (n_samples,). """ - pass @abstractmethod def predict_proba(self, x: np.array): @@ -84,7 +80,6 @@ def predict_proba(self, x: np.array): Predicted class probabilities of shape (n_samples, n_classes). """ - pass @abstractmethod def to_state_dict(self): @@ -97,7 +92,6 @@ def to_state_dict(self): state_dict : dict State dict of the classifier. """ - pass @abstractmethod def from_state_dict(self, state_dict: dict): @@ -111,7 +105,6 @@ def from_state_dict(self, state_dict: dict): State dict of the classifier. """ - pass class BinaryClassifier(Classifier): diff --git a/alphadia/libtransform.py b/alphadia/libtransform.py index 2dbd5021..805b4073 100644 --- a/alphadia/libtransform.py +++ b/alphadia/libtransform.py @@ -32,7 +32,6 @@ class ProcessingStep: def __init__(self) -> None: """Base class for processing steps. Each implementation must implement the `validate` and `forward` method. Processing steps can be chained together in a ProcessingPipeline.""" - pass def __call__(self, *args: typing.Any) -> typing.Any: """Run the processing step on the input object.""" diff --git a/alphadia/numba/fft.py b/alphadia/numba/fft.py index 8c93b57e..f43897b6 100644 --- a/alphadia/numba/fft.py +++ b/alphadia/numba/fft.py @@ -47,13 +47,13 @@ def rfft2(x: np.array, s: None | tuple = None) -> np.array: @overload(rfft2, fastmath=True) def _(x, s=None): if not isinstance(x, nb.types.Array): - return + return None if x.ndim != 2: - return + return None if x.dtype != nb.types.float32: - return + return None def funcx_impl(x, s=None): s, axes = ndshape_and_axes(x, s, (-2, -1)) @@ -98,13 +98,13 @@ def irfft2(x: np.array, s: None | tuple = None) -> np.array: @overload(irfft2, fastmath=True) def _(x, s=None): if not isinstance(x, nb.types.Array): - return + return None if x.ndim != 2: - return + return None if x.dtype != nb.types.complex64: - return + return None def funcx_impl(x, s=None): s, axes = ndshape_and_axes(x, s, (-2, -1)) @@ -161,16 +161,16 @@ def convolve_fourier(dense, kernel): @overload(convolve_fourier, fastmath=True) def _(dense, kernel): if not isinstance(dense, nb.types.Array): - return + return None if not isinstance(kernel, nb.types.Array): - return + return None if kernel.ndim != 2: - return + return None if dense.ndim < 2: - return + return None if dense.ndim == 2: diff --git a/alphadia/numba/fragments.py b/alphadia/numba/fragments.py index 0d0ed3ba..5746d5fa 100644 --- a/alphadia/numba/fragments.py +++ b/alphadia/numba/fragments.py @@ -331,7 +331,9 @@ def get_ion_group_mapping( score_group_intensity = np.zeros((len(ion_mz)), dtype=np.float32) - for precursor, mz, intensity in zip(ion_precursor, ion_mz, ion_intensity): # noqa: B905 ('strict' not supported by numba yet + for precursor, mz, intensity in zip( + ion_precursor, ion_mz, ion_intensity + ): # ('strict' not supported by numba yet # score_group_idx = precursor_group[precursor] if len(grouped_mz) == 0 or np.abs(grouped_mz[-1] - mz) > EPSILON: diff --git a/alphadia/outputtransform.py b/alphadia/outputtransform.py index 504e14a4..f9d900e6 100644 --- a/alphadia/outputtransform.py +++ b/alphadia/outputtransform.py @@ -116,7 +116,7 @@ def accumulate_frag_df( raw_name, df = next(df_iterable, (None, None)) if df is None: logger.warning(f"no frag file found for {raw_name}") - return + return None df = prepare_df(df, self.psm_df, column=self.column) @@ -886,7 +886,7 @@ def build_library( if len(psm_df) == 0: logger.warning("No precursors found, skipping library building") - return + return None libbuilder = libtransform.MbrLibraryBuilder( fdr=0.01, @@ -942,6 +942,10 @@ def _build_run_stat_df( folder, peptidecentric.PeptideCentricWorkflow.OPTIMIZATION_MANAGER_PATH ) + calibration_manager_path = os.path.join( + folder, peptidecentric.PeptideCentricWorkflow.CALIBRATION_MANAGER_PATH + ) + if channels is None: channels = [0] out_df = [] @@ -956,31 +960,69 @@ def _build_run_stat_df( "proteins": channel_df["pg"].nunique(), } - if "weighted_mass_error" in channel_df.columns: - base_dict["ms1_accuracy"] = np.mean(channel_df["weighted_mass_error"]) - if "cycle_fwhm" in channel_df.columns: base_dict["fwhm_rt"] = np.mean(channel_df["cycle_fwhm"]) if "mobility_fwhm" in channel_df.columns: base_dict["fwhm_mobility"] = np.mean(channel_df["mobility_fwhm"]) + # collect optimization stats + base_dict["optimization.ms2_error"] = np.nan + base_dict["optimization.ms1_error"] = np.nan + base_dict["optimization.rt_error"] = np.nan + base_dict["optimization.mobility_error"] = np.nan + if os.path.exists(optimization_manager_path): optimization_manager = manager.OptimizationManager( path=optimization_manager_path ) - - base_dict["ms2_error"] = optimization_manager.ms2_error - base_dict["ms1_error"] = optimization_manager.ms1_error - base_dict["rt_error"] = optimization_manager.rt_error - base_dict["mobility_error"] = optimization_manager.mobility_error + base_dict["optimization.ms2_error"] = optimization_manager.ms2_error + base_dict["optimization.ms1_error"] = optimization_manager.ms1_error + base_dict["optimization.rt_error"] = optimization_manager.rt_error + base_dict["optimization.mobility_error"] = ( + optimization_manager.mobility_error + ) else: logger.warning(f"Error reading optimization manager for {raw_name}") - base_dict["ms2_error"] = np.nan - base_dict["ms1_error"] = np.nan - base_dict["rt_error"] = np.nan - base_dict["mobility_error"] = np.nan + + # collect calibration stats + base_dict["calibration.ms2_median_accuracy"] = np.nan + base_dict["calibration.ms2_median_precision"] = np.nan + base_dict["calibration.ms1_median_accuracy"] = np.nan + base_dict["calibration.ms1_median_precision"] = np.nan + + if os.path.exists(calibration_manager_path): + calibration_manager = manager.CalibrationManager( + path=calibration_manager_path + ) + + if ( + fragment_mz_estimator := calibration_manager.get_estimator( + "fragment", "mz" + ) + ) and (fragment_mz_metrics := fragment_mz_estimator.metrics): + base_dict["calibration.ms2_median_accuracy"] = fragment_mz_metrics[ + "median_accuracy" + ] + base_dict["calibration.ms2_median_precision"] = fragment_mz_metrics[ + "median_precision" + ] + + if ( + precursor_mz_estimator := calibration_manager.get_estimator( + "precursor", "mz" + ) + ) and (precursor_mz_metrics := precursor_mz_estimator.metrics): + base_dict["calibration.ms1_median_accuracy"] = precursor_mz_metrics[ + "median_accuracy" + ] + base_dict["calibration.ms1_median_precision"] = precursor_mz_metrics[ + "median_precision" + ] + + else: + logger.warning(f"Error reading calibration manager for {raw_name}") out_df.append(base_dict) diff --git a/alphadia/peakgroup/kernel.py b/alphadia/peakgroup/kernel.py index 8cd37e28..8b72b7bf 100644 --- a/alphadia/peakgroup/kernel.py +++ b/alphadia/peakgroup/kernel.py @@ -177,7 +177,6 @@ def get_dense_matrix(self, verbose: bool = True): mobility_resolution = np.mean(np.diff(self.dia_data.mobility_values[::-1])) if verbose: - pass logger.info( f"Duty cycle consists of {rt_datapoints} frames, {rt_resolution:.2f} seconds cycle time" ) @@ -189,7 +188,6 @@ def get_dense_matrix(self, verbose: bool = True): mobility_sigma = self.determine_mobility_sigma(mobility_resolution) if verbose: - pass logger.info( f"FWHM in RT is {self.fwhm_rt:.2f} seconds, sigma is {rt_sigma:.2f}" ) diff --git a/alphadia/peakgroup/search.py b/alphadia/peakgroup/search.py index 9438b115..ff7e692e 100644 --- a/alphadia/peakgroup/search.py +++ b/alphadia/peakgroup/search.py @@ -690,7 +690,7 @@ def build_candidates( cycle_limits_list = np.zeros((peak_cycle_list.shape[0], 2), dtype="int32") for candidate_rank, (scan_relative, cycle_relative) in enumerate( - zip(peak_scan_list, peak_cycle_list) # noqa: B905 ('strict' not supported by numba yet) + zip(peak_scan_list, peak_cycle_list) # ('strict' not supported by numba yet) ): scan_limits_relative, cycle_limits_relative = numeric.symetric_limits_2d( score, @@ -740,7 +740,7 @@ def build_candidates( peak_score_list, scan_limits_list, cycle_limits_list, - ): # noqa: B905 ('strict' not supported by numba yet) + ): # ('strict' not supported by numba yet) # does not work anymore scan_limits_absolute = numeric.wrap1( diff --git a/alphadia/peakgroup/utils.py b/alphadia/peakgroup/utils.py index 788c8f7e..6921d192 100644 --- a/alphadia/peakgroup/utils.py +++ b/alphadia/peakgroup/utils.py @@ -18,16 +18,16 @@ def assemble_isotope_mz(mono_mz, charge, isotope_intensity): @overload(assemble_isotope_mz) def _(mono_mz, charge, isotope_intensity): if not isinstance(mono_mz, nb.types.Float): - return + return None if not isinstance(charge, nb.types.Integer): - return + return None if not isinstance(isotope_intensity, nb.types.Array): - return + return None if isotope_intensity.ndim != 1: - return + return None def funcx_impl(mono_mz, charge, isotope_intensity): offset = np.arange(len(isotope_intensity)) * 1.0033548350700006 / charge diff --git a/alphadia/planning.py b/alphadia/planning.py index ae4fd08d..f431b8aa 100644 --- a/alphadia/planning.py +++ b/alphadia/planning.py @@ -70,9 +70,9 @@ def __init__( reporting.init_logging(self.output_folder) logger.progress(" _ _ ___ ___ _ ") - logger.progress(" __ _| |_ __| |_ __ _| \_ _| /_\ ") - logger.progress(" / _` | | '_ \ ' \\/ _` | |) | | / _ \ ") - logger.progress(" \__,_|_| .__/_||_\__,_|___/___/_/ \_\\") + logger.progress(r" __ _| |_ __| |_ __ _| \_ _| /_\ ") + logger.progress(" / _` | | '_ \\ ' \\/ _` | |) | | / _ \\ ") + logger.progress(" \\__,_|_| .__/_||_\\__,_|___/___/_/ \\_\\") logger.progress(" |_| ") logger.progress("") diff --git a/alphadia/transferlearning/train.py b/alphadia/transferlearning/train.py index b9aa2426..9704d557 100644 --- a/alphadia/transferlearning/train.py +++ b/alphadia/transferlearning/train.py @@ -994,7 +994,7 @@ def finetune_ccs(self, psm_df: pd.DataFrame) -> pd.DataFrame: logger.error( "Failed to finetune CCS model. PSM dataframe does not contain mobility or ccs columns." ) - return + return None if "ccs" not in psm_df.columns: psm_df["ccs"] = mobility_to_ccs_for_df(psm_df, "mobility") elif "mobility" not in psm_df.columns: diff --git a/alphadia/utils.py b/alphadia/utils.py index 8cee9786..e14c6e05 100644 --- a/alphadia/utils.py +++ b/alphadia/utils.py @@ -9,13 +9,13 @@ # alpha family imports import alphatims.bruker import alphatims.utils -import matplotlib.patches as patches import numba as nb import numpy as np # third party imports import pandas as pd import torch +from matplotlib import patches logger = logging.getLogger() diff --git a/alphadia/workflow/manager.py b/alphadia/workflow/manager.py index 6dc2e3e8..be0eb989 100644 --- a/alphadia/workflow/manager.py +++ b/alphadia/workflow/manager.py @@ -2,6 +2,7 @@ import logging import os import pickle +import traceback import typing from collections import defaultdict from copy import deepcopy @@ -82,11 +83,16 @@ def save(self): try: with open(self.path, "wb") as f: pickle.dump(self, f) - except Exception: + except Exception as e: self.reporter.log_string( - f"Failed to save {self.__class__.__name__} to {self.path}", + f"Failed to save {self.__class__.__name__} to {self.path}: {str(e)}", verbosity="error", ) + # Log the full traceback + + self.reporter.log_string( + f"Traceback: {traceback.format_exc()}", verbosity="error" + ) def load(self): """Load the state from pickle file.""" diff --git a/alphadia/workflow/optimization.py b/alphadia/workflow/optimization.py index a446a87b..3f00b85a 100644 --- a/alphadia/workflow/optimization.py +++ b/alphadia/workflow/optimization.py @@ -57,12 +57,9 @@ def step(self, precursors_df: pd.DataFrame, fragments_df: pd.DataFrame): """ - pass - @abstractmethod def skip(self): """Record skipping of optimization. Can be overwritten with an empty method if there is no need to record skips.""" - pass def proceed_with_insufficient_precursors(self, precursors_df, fragments_df): self.workflow.reporter.log_string( @@ -80,7 +77,6 @@ def proceed_with_insufficient_precursors(self, precursors_df, fragments_df): @abstractmethod def plot(self): """Plots the progress of the optimization. Can be overwritten with an empty method if there is no need to plot the progress.""" - pass @abstractmethod def _update_workflow(): @@ -92,7 +88,6 @@ def _update_workflow(): and FWHM_mobility """ - pass @abstractmethod def _update_history(): @@ -107,7 +102,6 @@ def _update_history(): The filtered fragment dataframe for the search. """ - pass class AutomaticOptimizer(BaseOptimizer): @@ -503,7 +497,6 @@ def _get_feature_value( """ - pass class TargetedOptimizer(BaseOptimizer): @@ -611,11 +604,9 @@ def step( def skip(self): """See base class.""" - pass def plot(self): """See base class""" - pass def _update_workflow(self): pass diff --git a/docs/developer_guide.md b/docs/developer_guide.md index 74424fb0..54663e06 100644 --- a/docs/developer_guide.md +++ b/docs/developer_guide.md @@ -16,7 +16,9 @@ This package uses a shared release process defined in the ## Notes for developers ### Debugging -To debug e2e tests with PyCharm: +A good start for debugging is this notebook: `nvs/debug/debug_lvl1.ipynb` + +##### Debug e2e tests with PyCharm 1. Create a "Run/Debug configuration" with - "module": `alphadia.cli` - "script parameters": `--config /abs/path/to/tests/e2e_tests/basic/config.yaml` @@ -24,6 +26,29 @@ To debug e2e tests with PyCharm: 2. Uncomment the lines following the `uncomment for debugging` comment in `alphadia/cli.py`. 3. Run the configuration. +##### Debug e2e tests with VS Code +1. Create the following debug configuration (`launch.json`, see [here](https://code.visualstudio.com/docs/editor/debugging#_launch-configurations)): +```json +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Python Debugger: Current File with Arguments", + "type": "debugpy", + "request": "launch", + "cwd": "/abs/path/to/tests/e2e_tests", + "program": "../../alphadia/cli.py", + "console": "integratedTerminal", + "args": [ + "--config", "/abs/path/to/tests/e2e_tests/basic/config.yaml" + ] + } + ] +} +``` +2. Uncomment the lines following the `uncomment for debugging` comment in `alphadia/cli.py`. +3. Run the configuration. + ### pre-commit hooks It is highly recommended to use the provided pre-commit hooks, as the CI pipeline enforces all checks therein to diff --git a/pyproject.toml b/pyproject.toml index 7407154d..711b3bf1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,7 +60,7 @@ version = {attr = "alphadia.__version__"} alphadia = "alphadia.cli:run" [tool.ruff] -extend-exclude = ["misc/.bumpversion.cfg"] +extend-exclude = ["misc/.bumpversion.cfg", "tests"] [tool.ruff.lint] select = [ @@ -76,10 +76,39 @@ select = [ "SIM", # isort "I", + #"ALL" ] ignore = [ + "D", + "ANN", + "SLF001", # Private member accessed TODO this needs to be fixed in alphabase + "E501", # Line too long (ruff wraps code, but not docstrings) "B028", # No explicit `stacklevel` keyword argument found (for warnings) - "B905" # This causes problems in numba code: `zip()` without an explicit `strict=` parameter + "B905", # This causes problems in numba code: `zip()` without an explicit `strict=` parameter + "COM812", #may cause conflicts when used with the formatter + "ISC001", #may cause conflicts when used with the formatter + "D211", # no-blank-line-before-class + "D213", # multi-line-summary-second-line + "S101", # Use of `assert` detected + "INP001", # implicit namespace package. + "ERA001", # Found commented-out code + "D203", # 1 blank line required before class docstring + "TD002", "TD003", "FIX002", # things around TO-DO + "PT011", #pytest.raises(ValueError) is too broad + "G004", "EM102", # Logging statement uses f-string + "TRY003", # Avoid specifying long messages outside the exception class + "ANN101", # Missing type annotation for `self` in method + "ANN102", # Missing type annotation for `cls` in classmethod + "ANN002", # Missing type annotation for `*args` + "ANN003", # Missing type annotation for `**kwargs + "FA102", # Missing `from __future__ import annotations + "EM101", # Exception must not use a string literal, assign to variable first + "D104", # Missing docstring in public package + "ANN204", # Missing return type annotation for special method `__init__` + "D401", # First line of docstring should be in imperative mood + "B023", # Function definition does not bind loop variable + "PD901", # Avoid using the generic variable name `df` for DataFrames" + "TCH003", # Move standard library import into a type-checking block ] diff --git a/tests/performance_tests/1_brunner_2022_1ng_all.py b/tests/performance_tests/1_brunner_2022_1ng_all.py index 6bd9f948..b40107a1 100644 --- a/tests/performance_tests/1_brunner_2022_1ng_all.py +++ b/tests/performance_tests/1_brunner_2022_1ng_all.py @@ -22,7 +22,7 @@ try: test_dir = os.environ["TEST_DATA_DIR"] except KeyError: - logging.error("TEST_DATA_DIR environtment variable not set") + logging.exception("TEST_DATA_DIR environtment variable not set") raise KeyError from None logging.info(f"Test data directory: {test_dir}") diff --git a/tests/performance_tests/diann_psm_extraction.py b/tests/performance_tests/diann_psm_extraction.py index 29118a7d..f45280a8 100644 --- a/tests/performance_tests/diann_psm_extraction.py +++ b/tests/performance_tests/diann_psm_extraction.py @@ -25,7 +25,7 @@ try: neptune_token = os.environ["NEPTUNE_TOKEN"] except KeyError: - logging.error("NEPTUNE_TOKEN environtment variable not set") + logging.exception("NEPTUNE_TOKEN environtment variable not set") raise KeyError from None run = neptune.init_run(project="MannLabs/alphaDIA", api_token=neptune_token) @@ -42,7 +42,7 @@ try: test_dir = os.environ["TEST_DATA_DIR"] except KeyError: - logging.error("TEST_DATA_DIR environtment variable not set") + logging.exception("TEST_DATA_DIR environtment variable not set") raise KeyError from None logging.info(f"Test data directory: {test_dir}") diff --git a/tests/unit_tests/test_calibration_property.py b/tests/unit_tests/test_calibration_property.py index cadee274..cf13183d 100644 --- a/tests/unit_tests/test_calibration_property.py +++ b/tests/unit_tests/test_calibration_property.py @@ -20,6 +20,8 @@ def test_uninitialized_calibration(): with pytest.raises(ValueError): mz_calibration.fit(mz_df) + assert mz_calibration.metrics is None + def test_fit_predict_linear(): library_mz = np.linspace(100, 1000, 100) @@ -38,6 +40,8 @@ def test_fit_predict_linear(): mz_calibration.predict(mz_df) assert "calibrated_mz" in mz_df.columns + assert "median_accuracy" in mz_calibration.metrics + assert "median_precision" in mz_calibration.metrics def test_fit_predict_loess(): @@ -57,6 +61,8 @@ def test_fit_predict_loess(): mz_calibration.predict(mz_df) assert "calibrated_mz" in mz_df.columns + assert "median_accuracy" in mz_calibration.metrics + assert "median_precision" in mz_calibration.metrics def test_save_load(): @@ -86,3 +92,5 @@ def test_save_load(): mz_calibration_loaded.predict(df_loaded) assert np.allclose(df_original["calibrated_mz"], df_loaded["calibrated_mz"]) + assert "median_accuracy" in mz_calibration.metrics + assert "median_precision" in mz_calibration.metrics diff --git a/tests/unit_tests/test_data.py b/tests/unit_tests/test_data.py index 71ff21d0..87311c87 100644 --- a/tests/unit_tests/test_data.py +++ b/tests/unit_tests/test_data.py @@ -40,7 +40,7 @@ def test_cycle(): assert cycle_start == rand_cycle_start -@pytest.mark.slow +@pytest.mark.slow() def test_raw_data(): if pytest.test_data is None: pytest.skip("No test data found") diff --git a/tests/unit_tests/test_fdr.py b/tests/unit_tests/test_fdr.py index 8042ce7d..d2aa30db 100644 --- a/tests/unit_tests/test_fdr.py +++ b/tests/unit_tests/test_fdr.py @@ -159,7 +159,7 @@ def test_get_q_values(): ) -@pytest.mark.slow +@pytest.mark.slow() def test_fdr(): matplotlib.use("Agg") diff --git a/tests/unit_tests/test_outputtransform.py b/tests/unit_tests/test_outputtransform.py index 23f5da92..4b68447f 100644 --- a/tests/unit_tests/test_outputtransform.py +++ b/tests/unit_tests/test_outputtransform.py @@ -134,8 +134,9 @@ def test_output_transform(): os.path.join(temp_folder, f"{output.STAT_OUTPUT}.tsv"), sep="\t" ) assert len(stat_df) == 3 - assert stat_df["ms2_error"][0] == 6 - assert stat_df["rt_error"][0] == 200 + + assert stat_df["optimization.ms2_error"][0] == 6 + assert stat_df["optimization.rt_error"][0] == 200 assert all( [ diff --git a/tests/unit_tests/test_planning.py b/tests/unit_tests/test_planning.py index d9a23ce8..f46b92dc 100644 --- a/tests/unit_tests/test_planning.py +++ b/tests/unit_tests/test_planning.py @@ -9,7 +9,7 @@ from alphadia.test_data_downloader import DataShareDownloader -@pytest.mark.slow +@pytest.mark.slow() def test_fasta_digest(): # digest & predict new library common_contaminants = os.path.join(_const.CONST_FILE_FOLDER, "contaminants.fasta") @@ -45,7 +45,7 @@ def test_fasta_digest(): assert len(plan.spectral_library.fragment_df) > 0 -@pytest.mark.slow +@pytest.mark.slow() def test_library_loading(): temp_directory = tempfile.gettempdir() diff --git a/tests/unit_tests/test_workflow.py b/tests/unit_tests/test_workflow.py index 1e289b13..20c4cc0a 100644 --- a/tests/unit_tests/test_workflow.py +++ b/tests/unit_tests/test_workflow.py @@ -293,7 +293,7 @@ def test_optimization_manager_fit(): os.remove(temp_path) -@pytest.mark.slow +@pytest.mark.slow() def test_workflow_base(): if pytest.test_data is None: pytest.skip("No test data found")