diff --git a/Dockerfile b/Dockerfile index 6aa9095..040bc80 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,16 @@ # set base image (host OS) FROM python:3.11 +RUN addgroup -S nonroot \ + && adduser -S nonroot -G nonroot + +USER nonroot + # set the working directory in the container WORKDIR /code # copy the dependencies file to the working directory -COPY . . +COPY ./raman-fitting ./raman-fitting # copy setup.cfg to work dir # COPY setup.cfg . @@ -25,5 +30,5 @@ RUN pip install -e ./ #COPY src/ . # command to run on container start -CMD [ "raman_fitting -M make_examples" ] +CMD [ "raman_fitting run examples" ] # CMD [ "python", "./raman_fitting/docker/run_make_examples.py" ] diff --git a/src/raman_fitting/__init__.py b/src/raman_fitting/__init__.py index e7e20e8..186796f 100644 --- a/src/raman_fitting/__init__.py +++ b/src/raman_fitting/__init__.py @@ -1,7 +1,3 @@ -# pylint: disable=W0614,W0611,W0622 -# flake8: noqa -# isort:skip_file - __author__ = "David Wallace" __docformat__ = "restructuredtext" __status__ = "Development" @@ -9,6 +5,7 @@ __current_package_name__ = "raman_fitting" __package_name__ = __current_package_name__ +import importlib.util try: from ._version import __version__ @@ -22,9 +19,9 @@ __version__ = _gv(_path.join(_path.dirname(__file__), _path.pardir)) except ModuleNotFoundError: __version__ = "importerr_modulenotfound_version" - except Exception as e: + except Exception: __version__ = "importerr_exception_version" -except Exception as e: +except Exception: __version__ = "catch_exception_version" import sys @@ -42,7 +39,6 @@ soft_dependencies = {} missing_dependencies = [] -import importlib.util for dependency in hard_dependencies: if not importlib.util.find_spec(dependency): @@ -58,20 +54,3 @@ ) del hard_dependencies, soft_dependencies, dependency, missing_dependencies - -# Main Loop Delegator -from raman_fitting.delegating.main_delegator import MainDelegator, make_examples - -# Indexer -from raman_fitting.imports.files.file_indexer import RamanFileIndex - -# Processing -from raman_fitting.imports.spectrum.spectrum_constructor import SpectrumDataLoader -# from raman_fitting.imports.spectrum.spectra_collection import SpectrumDataCollection - - -# Modelling / fitting -from raman_fitting.models.deconvolution.init_models import InitializeModels - -# Exporting / Plotting -from raman_fitting.exports.exporter import ExportManager diff --git a/src/raman_fitting/config/logging_config.py b/src/raman_fitting/config/logging_config.py index 05a9dc1..360bfe3 100644 --- a/src/raman_fitting/config/logging_config.py +++ b/src/raman_fitting/config/logging_config.py @@ -7,7 +7,7 @@ # it is in the same Python interpreter process. FORMATTER = logging.Formatter( - "%(asctime)s — %(name)s — %(levelname)s —" "%(funcName)s:%(lineno)d — %(message)s" + "%(asctime)s — %(name)s — %(levelname)s —%(funcName)s:%(lineno)d — %(message)s" ) diff --git a/src/raman_fitting/config/path_settings.py b/src/raman_fitting/config/path_settings.py index 683c7f9..e589d67 100644 --- a/src/raman_fitting/config/path_settings.py +++ b/src/raman_fitting/config/path_settings.py @@ -36,11 +36,10 @@ # Storage file of the index USER_INDEX_FILE_PATH: Path = USER_HOME_PACKAGE / INDEX_FILE_NAME -TEMP_DIR = Path(tempfile.mkdtemp(prefix='raman-fitting-')) -TEMP_RESULTS_DIR: Path = TEMP_DIR / 'results' +TEMP_DIR = Path(tempfile.mkdtemp(prefix="raman-fitting-")) +TEMP_RESULTS_DIR: Path = TEMP_DIR / "results" -# TODO fix label on clean processed spectrum to simple window name -CLEAN_SPEC_WINDOW_NAME_PREFIX = "savgol_filter_raw_window_" +CLEAN_SPEC_REGION_NAME_PREFIX = "savgol_filter_raw_region_" ERROR_MSG_TEMPLATE = "{sample_group} {sampleid}: {msg}" @@ -52,7 +51,7 @@ class InternalPathSettings(BaseModel): example_fixtures: DirectoryPath = Field(INTERNAL_EXAMPLE_FIXTURES) pytest_fixtures: DirectoryPath = Field(INTERNAL_PYTEST_FIXTURES) temp_dir: DirectoryPath = Field(TEMP_RESULTS_DIR) - temp_index_file: FilePath = Field(TEMP_DIR / INDEX_FILE_NAME ) + temp_index_file: FilePath = Field(TEMP_DIR / INDEX_FILE_NAME) EXPORT_FOLDER_NAMES = { @@ -84,9 +83,7 @@ def get_run_mode_paths(run_mode: RunModes, user_package_home: Path = None): "RESULTS_DIR": user_package_home / "examples", "DATASET_DIR": INTERNAL_EXAMPLE_FIXTURES, "USER_CONFIG_FILE": INTERNAL_EXAMPLE_FIXTURES / f"{PACKAGE_NAME}.toml", - "INDEX_FILE": user_package_home - / "examples" - / f"{PACKAGE_NAME}_index.csv", + "INDEX_FILE": user_package_home / "examples" / f"{PACKAGE_NAME}_index.csv", }, RunModes.NORMAL.name: { "RESULTS_DIR": user_package_home / "results", @@ -139,12 +136,9 @@ def initialize_run_mode_paths( ) -> RunModePaths: run_mode_paths = get_run_mode_paths(run_mode, user_package_home=user_package_home) - # USER_HOME_PACKAGE = get_user_destination_dir(USER_HOME_PACKAGE) for destname, destdir in run_mode_paths.items(): destdir = Path(destdir) check_and_make_dirs(destdir) - # dest_dirs["RUN_MODE"] = run_mode - # breakpoint() return RunModePaths(RUN_MODE=run_mode, **run_mode_paths) diff --git a/src/raman_fitting/delegating/__init__.py b/src/raman_fitting/delegating/__init__.py index d76ea1d..e69de29 100644 --- a/src/raman_fitting/delegating/__init__.py +++ b/src/raman_fitting/delegating/__init__.py @@ -1 +0,0 @@ -# import main_delegator# diff --git a/src/raman_fitting/delegating/main_delegator.py b/src/raman_fitting/delegating/main_delegator.py index 921c2e2..8d5f7d6 100644 --- a/src/raman_fitting/delegating/main_delegator.py +++ b/src/raman_fitting/delegating/main_delegator.py @@ -18,7 +18,7 @@ ) from raman_fitting.models.spectrum import SpectrumData from raman_fitting.models.fit_models import SpectrumFitModel -from raman_fitting.models.splitter import WindowNames +from raman_fitting.models.splitter import RegionNames from raman_fitting.exports.exporter import ExportManager from raman_fitting.imports.files.file_indexer import ( RamanFileIndex, @@ -56,11 +56,11 @@ class MainDelegator: lmfit_models: LMFitModelCollection = field( default_factory=get_models_and_peaks_from_definitions ) - fit_model_window_names: Sequence[WindowNames] = field( - default=(WindowNames.first_order, WindowNames.second_order) + fit_model_region_names: Sequence[RegionNames] = field( + default=(RegionNames.first_order, RegionNames.second_order) ) fit_model_specific_names: Sequence[str] | None = None - sample_IDs: Sequence[str] = field(default_factory=list) + sample_ids: Sequence[str] = field(default_factory=list) sample_groups: Sequence[str] = field(default_factory=list) index: RamanFileIndex = None @@ -92,7 +92,7 @@ def select_samples_from_index(self) -> Sequence[RamanFileInfo]: **dict( raman_files=index.raman_files, sample_groups=self.sample_groups, - sample_IDs=self.sample_IDs, + sample_ids=self.sample_ids, ) ) selection = index_selector.selection @@ -106,38 +106,38 @@ def call_export_manager(self): exports = export.export_files() return exports - # window_names:List[WindowNames], model_names: List[str] + # region_names:List[RegionNames], model_names: List[str] def select_models_from_provided_models(self) -> LMFitModelCollection: - selected_window_names = self.fit_model_window_names + selected_region_names = self.fit_model_region_names selected_model_names = self.fit_model_specific_names selected_models = {} - for window_name, all_window_models in self.lmfit_models.items(): - if window_name not in selected_window_names: + for region_name, all_region_models in self.lmfit_models.items(): + if region_name not in selected_region_names: continue if not selected_model_names: - selected_models[window_name] = all_window_models + selected_models[region_name] = all_region_models continue - selected_window_models = {} - for mod_name, mod_val in all_window_models.items(): + selected_region_models = {} + for mod_name, mod_val in all_region_models.items(): if mod_name not in selected_model_names: continue - selected_window_models[mod_name] = mod_val + selected_region_models[mod_name] = mod_val - selected_models[window_name] = selected_window_models + selected_models[region_name] = selected_region_models return selected_models def select_fitting_model( - self, window_name: WindowNames, model_name: str + self, region_name: RegionNames, model_name: str ) -> BaseLMFitModel: try: - return self.lmfit_models[window_name][model_name] + return self.lmfit_models[region_name][model_name] except KeyError as exc: - raise KeyError(f"Model {window_name} {model_name} not found.") from exc + raise KeyError(f"Model {region_name} {model_name} not found.") from exc def main_run(self): selection = self.select_samples_from_index() - if not self.fit_model_window_names: - logger.info("No model window names were selected.") + if not self.fit_model_region_names: + logger.info("No model region names were selected.") if not self.selected_models: logger.info("No fit models were selected.") @@ -157,42 +157,38 @@ def main_run(self): continue unique_positions = {i.sample.position for i in sgrp} - if not len(unique_positions) > len(sgrp): - # TODO handle edge-case, multiple source files for a single position on a sample + if len(unique_positions) <= len(sgrp): + # handle edge-case, multiple source files for a single position on a sample _error_msg = f"Handle multiple source files for a single position on a sample, {group_name} {sample_id}" results[group_name][sample_id]["errors"] = _error_msg logger.debug(_error_msg) - # results[group_name][sample_id]['data_source'] = sgrp model_result = run_fit_over_selected_models(sgrp, self.selected_models) results[group_name][sample_id]["fit_results"] = model_result self.results = results - # TODO add a FitResultModel for collection all the results - # sample_result = {'group': grp, 'spec_fit': spec_fit, 'mean_spec': mean_spec} - # results[group_name][sample_id].update(sample_result) def run_fit_over_selected_models( raman_files: List[RamanFileInfo], models: LMFitModelCollection -) -> Dict[WindowNames, AggregatedSampleSpectrumFitResult]: +) -> Dict[RegionNames, AggregatedSampleSpectrumFitResult]: results = {} - for window_name, window_grp in models.items(): + for region_name, region_grp in models.items(): aggregated_spectrum = prepare_aggregated_spectrum_from_files( - window_name, raman_files + region_name, raman_files ) if aggregated_spectrum is None: continue fit_model_results = {} - for model_name, model in window_grp.items(): + for model_name, model in region_grp.items(): spectrum_fit = run_sample_fit_with_model( aggregated_spectrum.spectrum, model ) fit_model_results[model_name] = spectrum_fit - fit_window_results = AggregatedSampleSpectrumFitResult( - window_name=window_name, + fit_region_results = AggregatedSampleSpectrumFitResult( + region_name=region_name, aggregated_spectrum=aggregated_spectrum, fit_model_results=fit_model_results, ) - results[window_name] = fit_window_results + results[region_name] = fit_region_results return results @@ -200,12 +196,12 @@ def run_sample_fit_with_model( spectrum: SpectrumData, model: BaseLMFitModel ) -> SpectrumFitModel: name = model.name - window = model.window_name.name - spec_fit = SpectrumFitModel(spectrum=spectrum, model=model, window=window) - # TODO include optional https://lmfit.github.io/lmfit-py/model.html#saving-and-loading-modelresults + region = model.region_name.name + spec_fit = SpectrumFitModel(spectrum=spectrum, model=model, region=region) + # include optional https://lmfit.github.io/lmfit-py/model.html#saving-and-loading-modelresults spec_fit.run_fit() logger.debug( - f"Fit with model {name} on {window} success: {spec_fit.fit_result.success} in {spec_fit.elapsed_time:.2f}s." + f"Fit with model {name} on {region} success: {spec_fit.fit_result.success} in {spec_fit.elapsed_time:.2f}s." ) # spec_fit.fit_result.plot(show_init=True) return spec_fit diff --git a/src/raman_fitting/delegating/models.py b/src/raman_fitting/delegating/models.py index fb6ae23..5e59730 100644 --- a/src/raman_fitting/delegating/models.py +++ b/src/raman_fitting/delegating/models.py @@ -7,7 +7,7 @@ from raman_fitting.models.spectrum import SpectrumData from raman_fitting.models.fit_models import SpectrumFitModel -from raman_fitting.models.splitter import WindowNames +from raman_fitting.models.splitter import RegionNames from raman_fitting.imports.spectrumdata_parser import SpectrumReader from raman_fitting.processing.post_processing import SpectrumProcessor @@ -24,6 +24,6 @@ class AggregatedSampleSpectrum(BaseModel): class AggregatedSampleSpectrumFitResult(BaseModel): - window_name: WindowNames + region_name: RegionNames aggregated_spectrum: AggregatedSampleSpectrum fit_model_results: Dict[str, SpectrumFitModel] diff --git a/src/raman_fitting/delegating/pre_processing.py b/src/raman_fitting/delegating/pre_processing.py index 66770ed..b28afee 100644 --- a/src/raman_fitting/delegating/pre_processing.py +++ b/src/raman_fitting/delegating/pre_processing.py @@ -1,10 +1,7 @@ from typing import List -from raman_fitting.config.path_settings import ( - CLEAN_SPEC_WINDOW_NAME_PREFIX, -) from raman_fitting.models.spectrum import SpectrumData -from raman_fitting.models.splitter import WindowNames +from raman_fitting.models.splitter import RegionNames from raman_fitting.imports.spectrumdata_parser import SpectrumReader from raman_fitting.processing.post_processing import SpectrumProcessor from raman_fitting.imports.models import RamanFileInfo @@ -16,12 +13,14 @@ import numpy as np from loguru import logger +from raman_fitting.config.path_settings import CLEAN_SPEC_REGION_NAME_PREFIX + def prepare_aggregated_spectrum_from_files( - window_name: WindowNames, raman_files: List[RamanFileInfo] + region_name: RegionNames, raman_files: List[RamanFileInfo] ) -> AggregatedSampleSpectrum | None: - selected_processed_data = f"{CLEAN_SPEC_WINDOW_NAME_PREFIX}{window_name}" - clean_data_for_window = {} + selected_processed_data = f"{CLEAN_SPEC_REGION_NAME_PREFIX}{region_name}" + clean_data_for_region = {} data_sources = [] for i in raman_files: read = SpectrumReader(i.file) @@ -33,25 +32,25 @@ def prepare_aggregated_spectrum_from_files( selected_clean_data = processed.clean_spectrum.spec_regions[ selected_processed_data ] - clean_data_for_window[i.file] = selected_clean_data + clean_data_for_region[i.file] = selected_clean_data - if not clean_data_for_window: + if not clean_data_for_region: logger.info("prepare_mean_data_for_fitting received no files.") return - # TODO wrap this in a ProcessedSpectraCollection model + # wrap this in a ProcessedSpectraCollection model mean_int = np.mean( - np.vstack([i.intensity for i in clean_data_for_window.values()]), axis=0 + np.vstack([i.intensity for i in clean_data_for_region.values()]), axis=0 ) mean_ramanshift = np.mean( - np.vstack([i.ramanshift for i in clean_data_for_window.values()]), axis=0 + np.vstack([i.ramanshift for i in clean_data_for_region.values()]), axis=0 ) - source_files = list(map(str, clean_data_for_window.keys())) + source_files = list(map(str, clean_data_for_region.keys())) mean_spec = SpectrumData( **{ "ramanshift": mean_ramanshift, "intensity": mean_int, - "label": f"clean_{window_name}_mean", - "window_name": window_name, + "label": f"clean_{region_name}_mean", + "region_name": region_name, "source": source_files, } ) diff --git a/src/raman_fitting/exports/file_table.py b/src/raman_fitting/exports/file_table.py index 2398805..661beb7 100644 --- a/src/raman_fitting/exports/file_table.py +++ b/src/raman_fitting/exports/file_table.py @@ -7,7 +7,7 @@ def raw_data_spectra_export(spectra: List[SpectrumData]): try: for spec in spectra: wnxl_outpath_spectra = spec.mean_info.DestRaw.unique()[0].joinpath( - f"spectra_{spec.sIDmean_col}_{spec.windowname}.xlsx" + f"spectra_{spec.sIDmean_col}_{spec.regionname}.xlsx" ) spec.mean_spec.to_excel(wnxl_outpath_spectra) diff --git a/src/raman_fitting/exports/plot_formatting.py b/src/raman_fitting/exports/plot_formatting.py index 7fbfd53..4379235 100644 --- a/src/raman_fitting/exports/plot_formatting.py +++ b/src/raman_fitting/exports/plot_formatting.py @@ -9,7 +9,7 @@ from collections import namedtuple from typing import Sequence, Tuple -from raman_fitting.models.splitter import WindowNames +from raman_fitting.models.splitter import RegionNames import matplotlib.pyplot as plt from lmfit import Model as LMFitModel @@ -24,13 +24,13 @@ ModelValidation = namedtuple("ModelValidation", "valid peak_group model_inst message") -PLOT_WINDOW_AXES = { - WindowNames.full: (0, 0), - WindowNames.low: (0, 1), - WindowNames.first_order: (0, 2), - WindowNames.mid: (1, 1), - WindowNames.second_order: (1, 2), - WindowNames.normalization: (1, 0), +PLOT_region_AXES = { + RegionNames.full: (0, 0), + RegionNames.low: (0, 1), + RegionNames.first_order: (0, 2), + RegionNames.mid: (1, 1), + RegionNames.second_order: (1, 2), + RegionNames.normalization: (1, 0), } @@ -50,7 +50,7 @@ def get_cmap_list( length: int, cmap_options: Tuple = CMAP_OPTIONS_DEFAULT, default_color: Tuple = DEFAULT_COLOR, -) -> Tuple: +) -> Tuple | None: lst = list(range(length)) if not lst: return None @@ -58,9 +58,10 @@ def get_cmap_list( # set fallback color from class if isinstance(default_color, tuple) and default_color is not None: if len(default_color) == 4: - cmap = [default_color for i in lst] + cmap = [default_color for _ in lst] + return cmap elif default_color is None: - cmap = [DEFAULT_COLOR for i in lst] + cmap = [DEFAULT_COLOR for _ in lst] else: raise ValueError(f"default color is not tuple but {type(default_color)}") diff --git a/src/raman_fitting/exports/plotting_fit_results.py b/src/raman_fitting/exports/plotting_fit_results.py index 855f4e3..c6340c9 100644 --- a/src/raman_fitting/exports/plotting_fit_results.py +++ b/src/raman_fitting/exports/plotting_fit_results.py @@ -13,34 +13,29 @@ from raman_fitting.config.path_settings import ExportPathSettings -from raman_fitting.models.splitter import WindowNames +from raman_fitting.models.splitter import RegionNames from raman_fitting.delegating.models import AggregatedSampleSpectrumFitResult from loguru import logger matplotlib.rcParams.update({"font.size": 14}) -# TODO fix big spectrum plot, reduce complexity if-statements def fit_spectrum_plot( - aggregated_spectra: Dict[WindowNames, AggregatedSampleSpectrumFitResult], + aggregated_spectra: Dict[RegionNames, AggregatedSampleSpectrumFitResult], export_paths: ExportPathSettings | None = None, plot_annotation=True, plot_residuals=True, ): # pragma: no cover - first_order = aggregated_spectra[WindowNames.first_order] - second_order = aggregated_spectra[WindowNames.second_order] + first_order = aggregated_spectra[RegionNames.first_order] + second_order = aggregated_spectra[RegionNames.second_order] sources = first_order.aggregated_spectrum.sources sample = sources[0].file_info.sample - # first_model_name = "4peaks" second_model_name = "2nd_4peaks" - # first_model = [first_model_name] second_model = second_order.fit_model_results.get(second_model_name) - # breakpoint() for first_model_name, first_model in first_order.fit_model_results.items(): - # for second_model_name, second_model in second_order.fit_model_results.items(): prepare_combined_spectrum_fit_result_plot( first_model, second_model, @@ -64,7 +59,6 @@ def prepare_combined_spectrum_fit_result_plot( ax = plt.subplot(gs[0]) ax_res = plt.subplot(gs[1]) ax.set_title(f"{sample.id}") - # breakpoint() first_model_name = first_model.model.name @@ -92,7 +86,7 @@ def prepare_combined_spectrum_fit_result_plot( # set axes labels and legend set_axes_labels_and_legend(ax) - plot_special_si_components(ax, first_model, first_model_name) + plot_special_si_components(ax, first_model) if export_paths is not None: savepath = export_paths.plots.joinpath(f"Model_{first_model_name}").with_suffix( @@ -256,7 +250,7 @@ def prepare_annotate_fit_report_second(ax2nd, second_result) -> Text: def prepare_annotate_fit_report_first(ax, first_result): fit_report = first_result.fit_report() - if len(fit_report) > -1: # TODO remove + if len(fit_report) > -1: fit_report = fit_report.replace("prefix='D3_'", "prefix='D3_' \n") props = dict(boxstyle="round", facecolor="wheat", alpha=0.5) @@ -272,13 +266,12 @@ def prepare_annotate_fit_report_first(ax, first_result): return annotate_report_first -def plot_special_si_components(ax, first_model, model_name: str): +def plot_special_si_components(ax, first_model): first_result = first_model.fit_result si_components = filter(lambda x: x.prefix.startswith("Si"), first_result.components) first_eval_comps = first_model.fit_result.eval_components() for si_comp in si_components: si_result = si_comp - # TODO should be si_fit_results ax.plot( first_model.spectrum.ramanshift, first_eval_comps[si_comp.prefix], diff --git a/src/raman_fitting/exports/plotting_raw_data.py b/src/raman_fitting/exports/plotting_raw_data.py index 866a666..d53f009 100644 --- a/src/raman_fitting/exports/plotting_raw_data.py +++ b/src/raman_fitting/exports/plotting_raw_data.py @@ -12,12 +12,12 @@ import matplotlib import matplotlib.pyplot as plt -from raman_fitting.models.splitter import WindowNames +from raman_fitting.models.splitter import RegionNames from raman_fitting.config.path_settings import ( - CLEAN_SPEC_WINDOW_NAME_PREFIX, + CLEAN_SPEC_REGION_NAME_PREFIX, ExportPathSettings, ) -from raman_fitting.exports.plot_formatting import PLOT_WINDOW_AXES +from raman_fitting.exports.plot_formatting import PLOT_region_AXES from raman_fitting.delegating.models import AggregatedSampleSpectrumFitResult from loguru import logger @@ -26,7 +26,7 @@ def raw_data_spectra_plot( - aggregated_spectra: Dict[WindowNames, AggregatedSampleSpectrumFitResult], + aggregated_spectra: Dict[RegionNames, AggregatedSampleSpectrumFitResult], export_paths: ExportPathSettings, ): # pragma: no cover if not aggregated_spectra: @@ -42,15 +42,15 @@ def raw_data_spectra_plot( sources_fmt = dict(alpha=0.4, lw=2) try: - fig, ax = plt.subplots(2, 3, figsize=(18, 12)) + _, ax = plt.subplots(2, 3, figsize=(18, 12)) # noqa - for _window_name, window_agg in aggregated_spectra.items(): - mean_spec = window_agg.aggregated_spectrum.spectrum - window_name = mean_spec.window_name - ax_window = ax[PLOT_WINDOW_AXES[window_name]] - selected_processed_data = f"{CLEAN_SPEC_WINDOW_NAME_PREFIX}{window_name}" + for _region_name, region_agg in aggregated_spectra.items(): + mean_spec = region_agg.aggregated_spectrum.spectrum + region_name = mean_spec.region_name + ax_region = ax[PLOT_region_AXES[region_name]] + selected_processed_data = f"{CLEAN_SPEC_REGION_NAME_PREFIX}{region_name}" # plot the mean aggregated spectrum - ax_window.plot( + ax_region.plot( mean_spec.ramanshift, mean_spec.intensity, label=mean_spec.label, @@ -58,20 +58,20 @@ def raw_data_spectra_plot( ) for spec_source in sources: - _legend = True if "full" == window_name else False + _legend = True if "full" == region_name else False spec_regions = spec_source.processed.clean_spectrum.spec_regions spec = spec_regions[selected_processed_data] # plot each of the data sources - ax_window.plot( + ax_region.plot( spec.ramanshift, spec.intensity, label=spec_source.file_info.file.stem, **sources_fmt, ) - ax_window.set_title(window_name) + ax_region.set_title(region_name) if _legend: - ax_window.legend(fontsize=10) + ax_region.legend(fontsize=10) plt.suptitle(f"Mean {sample.id}", fontsize=16) plt.savefig( @@ -86,8 +86,8 @@ def raw_data_spectra_plot( raise exc from exc -def plot_despike_Z(x): - fig, ax = plt.subplots(2) +def plot_despike_z(x): + _, ax = plt.subplots(2) # noqa ax.plot(x=x, y=["Zt", "Z_t_filtered"], alpha=0.5) ax.plot(x=x, y=["input_intensity", "despiked_intensity"], alpha=0.8) plt.show() diff --git a/src/raman_fitting/imports/collector.py b/src/raman_fitting/imports/collector.py index 092d1b6..e81a8cc 100644 --- a/src/raman_fitting/imports/collector.py +++ b/src/raman_fitting/imports/collector.py @@ -1,5 +1,5 @@ from pathlib import Path -from typing import List, Collection +from typing import List, Collection, Tuple import logging from .models import RamanFileInfo @@ -7,19 +7,25 @@ logger = logging.getLogger(__name__) -def collect_raman_file_infos(raman_files: Collection[Path]) -> List[RamanFileInfo]: +def collect_raman_file_infos( + raman_files: Collection[Path], +) -> Tuple[List[RamanFileInfo], List[Path]]: pp_collection = [] - # _extra_assign_destdir_and_set_paths(index) _files = [] + _failed_files = [] for file in raman_files: _files.append(file) try: pp_res = RamanFileInfo(**{"file": file}) pp_collection.append(pp_res) except Exception as exc: - raise exc from exc logger.warning( f"{__name__} collect_raman_file_infos unexpected error for calling RamanFileInfo on\n{file}.\n{exc}" ) - # pp_collection = sorted(pp_collection) + _failed_files.append({"file": file, "error": exc}) + if _failed_files: + logger.warning( + f"{__name__} collect_raman_file_infos failed for {len(_failed_files)}." + ) + return pp_collection, _files diff --git a/src/raman_fitting/imports/files/file_finder.py b/src/raman_fitting/imports/files/file_finder.py index 52595a1..38137a6 100644 --- a/src/raman_fitting/imports/files/file_finder.py +++ b/src/raman_fitting/imports/files/file_finder.py @@ -35,9 +35,6 @@ def find_files(directory: Path, suffixes: List[str]) -> List[Path]: logger.warning( f"find_files warning: the chose data file dir was empty.\n{directory}\mPlease choose another directory which contains your data files." ) - # TODO filter files somewhere else - # raman_files_raw = [i for i in raman_files if not any(k in i for k in excluded) - # "fail" not in i.stem and "Labjournal" not in str(i) logger.info( f"find_files {len(raman_files)} files were found in the chosen data dir:\n\t{directory}" ) diff --git a/src/raman_fitting/imports/files/file_indexer.py b/src/raman_fitting/imports/files/file_indexer.py index 264c885..da6aa53 100644 --- a/src/raman_fitting/imports/files/file_indexer.py +++ b/src/raman_fitting/imports/files/file_indexer.py @@ -109,14 +109,14 @@ def parse_dataset_to_index(dataset: Dataset) -> RamanFileInfoSet: class IndexSelector(BaseModel): raman_files: Sequence[RamanFileInfo] - sample_IDs: List[str] = Field(default_factory=list) + sample_ids: List[str] = Field(default_factory=list) sample_groups: List[str] = Field(default_factory=list) selection: Sequence[RamanFileInfo] = Field(default_factory=list) @model_validator(mode="after") def make_and_set_selection(self) -> "IndexSelector": rf_index = self.raman_files - if not any([self.sample_groups, self.sample_IDs]): + if not any([self.sample_groups, self.sample_ids]): self.selection = rf_index logger.debug( f"{self.__class__.__qualname__} selected {len(self.selection)} of {len(rf_index)}. " @@ -127,11 +127,11 @@ def make_and_set_selection(self) -> "IndexSelector": filter(lambda x: x.sample.group in self.sample_groups, rf_index) ) _pre_selected_samples = {i.sample.id for i in rf_index_groups} - selected_sampleIDs = filterfalse( - lambda x: x in _pre_selected_samples, self.sample_IDs + selected_sample_ids = filterfalse( + lambda x: x in _pre_selected_samples, self.sample_ids ) rf_index_samples = list( - filter(lambda x: x.sample.id in selected_sampleIDs, rf_index) + filter(lambda x: x.sample.id in selected_sample_ids, rf_index) ) rf_selection_index = rf_index_groups + rf_index_samples self.selection = rf_selection_index @@ -180,9 +180,6 @@ def collect_raman_file_index_info( raman_files: Sequence[Path] | None = None, **kwargs ) -> RamanFileInfoSet: """loops over the files and scrapes the index data from each file""" - # if not raman_files: - # raman_files = list(settings.internal_paths.example_fixtures.glob("*.txt")) - index, files = collect_raman_file_infos(raman_files, **kwargs) logger.info(f"successfully made index {len(index)} from {len(files)} files") return index @@ -215,7 +212,6 @@ def main(): except Exception as e: logger.error(f"Raman Index error: {e}") raman_index = None - # ds = cast_raman_files_to_dataset(raman_index.raman_files) return raman_index diff --git a/src/raman_fitting/imports/files/index_funcs.py b/src/raman_fitting/imports/files/index_funcs.py index 0d536a5..39bdd24 100644 --- a/src/raman_fitting/imports/files/index_funcs.py +++ b/src/raman_fitting/imports/files/index_funcs.py @@ -48,7 +48,7 @@ def load_index(index_file): logger.info( f"Succesfully imported Raman Index file from {index_file}, with len({len(index)})" ) - if not len(index) == len(index): + if len(index) != len(index): logger.error( f"""'Error in load_index from {index_file}, \nlength of loaded index not same as number of raman files @@ -102,17 +102,17 @@ def index_selection(index, **kwargs): logger.warning("index selection index arg empty") return - if default_selection: - if default_selection == "all": - index_selection = index.copy() + if default_selection == "all": + index_selection = index.copy() if "samplegroups" in kwargs: - if kwargs["samplegroups"]: - index = list( - filter(lambda x: x.sample.group in kwargs["samplegroups"], index) - ) + index = list( + filter(lambda x: x.sample.group in kwargs.get("samplegroups", []), index) + ) if "sampleIDs" in kwargs: - index = list(filter(lambda x: x.sample.id in kwargs["sampleIDs"], index)) + index = list( + filter(lambda x: x.sample.id in kwargs.get("sampleIDs", []), index) + ) if "extra" in kwargs: runq = kwargs.get("run") diff --git a/src/raman_fitting/imports/files/index_helpers.py b/src/raman_fitting/imports/files/index_helpers.py index 50759fc..7ffa5dd 100644 --- a/src/raman_fitting/imports/files/index_helpers.py +++ b/src/raman_fitting/imports/files/index_helpers.py @@ -20,5 +20,5 @@ def get_filename_id_from_path(path: Path) -> str: _parent_suffix_hash = hashlib.sha512( (str(path.parent) + path.suffix).encode("utf-8") ).hexdigest() - fnID = f"{_parent_suffix_hash}_{path.stem}" - return fnID + filename_id = f"{_parent_suffix_hash}_{path.stem}" + return filename_id diff --git a/src/raman_fitting/imports/spectrum/datafile_parsers.py b/src/raman_fitting/imports/spectrum/datafile_parsers.py index 76fbe20..ec8da5f 100644 --- a/src/raman_fitting/imports/spectrum/datafile_parsers.py +++ b/src/raman_fitting/imports/spectrum/datafile_parsers.py @@ -27,7 +27,7 @@ def filter_data_for_numeric(data: Dataset): def load_dataset_from_file(filepath, **kwargs) -> Dataset: with open(filepath, "r") as fh: - imported_data = Dataset().load(fh) + imported_data = Dataset(**kwargs).load(fh) return imported_data @@ -88,7 +88,7 @@ def use_np_loadtxt(filepath, usecols=(0, 1), **kwargs) -> np.array: def cast_array_into_spectrum_frame(array, keys: List[str] = None) -> pd.DataFrame: """cast array into spectrum frame""" - if not array.ndim == len(keys): + if array.ndim != len(keys): raise ValueError( f"Array dimension {array.ndim} does not match the number of keys {len(keys)}" ) diff --git a/src/raman_fitting/imports/spectrum/spectra_collection.py b/src/raman_fitting/imports/spectrum/spectra_collection.py index d79854a..3324a5b 100644 --- a/src/raman_fitting/imports/spectrum/spectra_collection.py +++ b/src/raman_fitting/imports/spectrum/spectra_collection.py @@ -50,8 +50,6 @@ def get_mean_spectra_info(spectra: List[SpectrumDataLoader]) -> Dict: mean_spec_info = { k: list(val)[0] for k, val in _all_spec_info_sets if len(val) == 1 } - # logger.warning(f"get_mean_spectra_info failed for spectra {spectra}") - # mean_spec_info = {} mean_spec_info.update({"mean_spectrum": True}) return mean_spec_info @@ -64,8 +62,8 @@ def calculate_mean_spectrum_from_spectra( try: spectra_regions = [i.clean_spectrum.spec_regions for i in spectra] mean_spec_regions = {} - for window_name in spectra_regions[0].keys(): - regions_specs = [i[window_name] for i in spectra_regions] + for region_name in spectra_regions[0].keys(): + regions_specs = [i[region_name] for i in spectra_regions] ramanshift_mean = np.mean([i.ramanshift for i in regions_specs], axis=0) intensity_mean = np.mean([i.intensity for i in regions_specs], axis=0) @@ -73,10 +71,10 @@ def calculate_mean_spectrum_from_spectra( "ramanshift": ramanshift_mean, "intensity": intensity_mean, "label": regions_specs[0].label + "_mean", - "window_name": window_name + "_mean", + "region_name": region_name + "_mean", } mean_spec = SpectrumData(**_data) - mean_spec_regions[window_name] = mean_spec + mean_spec_regions[region_name] = mean_spec except Exception: logger.warning(f"get_mean_spectra_prep_data failed for spectra {spectra}") @@ -95,6 +93,5 @@ def get_best_guess_spectra_length(spectra: List[SpectrumDataLoader]) -> List: length_counts = [(i, lengths.count(i)) for i in set_lengths] best_guess_length = max(length_counts, key=itemgetter(1))[0] print(f"Spectra not same length {length_counts} took {best_guess_length}") - # self._raw_spectra = self._spectra spectra = [spec for spec in spectra if spec.spectrum_length == best_guess_length] return spectra diff --git a/src/raman_fitting/imports/spectrum/spectrum_constructor.py b/src/raman_fitting/imports/spectrum/spectrum_constructor.py index 2f3ccf7..5a58aae 100644 --- a/src/raman_fitting/imports/spectrum/spectrum_constructor.py +++ b/src/raman_fitting/imports/spectrum/spectrum_constructor.py @@ -6,11 +6,12 @@ import pandas as pd from raman_fitting.imports.spectrumdata_parser import SpectrumReader -from raman_fitting.models.splitter import SplittedSpectrum from pydantic import BaseModel from raman_fitting.processing.post_processing import SpectrumProcessor +from raman_fitting.models.splitter import SplitSpectrum + logger = logging.getLogger(__name__) SPECTRUM_KEYS = ("ramanshift", "intensity") @@ -33,7 +34,7 @@ class SpectrumDataLoader: # ovv: pd.DataFrame = field(default_factory=pd.DataFrame, repr=False) run_kwargs: Dict = field(default_factory=dict, repr=False) spectrum_length: int = field(default=0, init=False) - clean_spectrum: SplittedSpectrum = field(default=None, init=False) + clean_spectrum: SplitSpectrum = field(default=None, init=False) def __post_init__(self): self._qcnm = self.__class__.__qualname__ @@ -44,12 +45,11 @@ def validate_info_with_filepath(self): if not self.info: self.info = {"FilePath": self.file} return - FP_from_info = self.info.get("FilePath", None) - if FP_from_info: - if Path(FP_from_info) != self.file: - raise ValueError( - f"Mismatch in value for FilePath: {self.file} != {FP_from_info}" - ) + filepath_ = self.info.get("FilePath", None) + if filepath_ and Path(filepath_) != self.file: + raise ValueError( + f"Mismatch in value for FilePath: {self.file} != {filepath_}" + ) def load_data_delegator(self): """calls the SpectrumReader class""" diff --git a/src/raman_fitting/imports/spectrum/validators.py b/src/raman_fitting/imports/spectrum/validators.py index 4f493c0..68621ef 100644 --- a/src/raman_fitting/imports/spectrum/validators.py +++ b/src/raman_fitting/imports/spectrum/validators.py @@ -37,7 +37,7 @@ def validate(self, spectrum_data: pd.DataFrame): def validate_spectrum_keys_expected_values( spectrum_data: Dataset, expected_values: ValidateSpectrumValues ): - if not expected_values.spectrum_key not in spectrum_data.columns: + if expected_values.spectrum_key not in spectrum_data.columns: logger.error( f"The expected value type {expected_values.spectrum_key} is not in the columns {spectrum_data.columns}" ) diff --git a/src/raman_fitting/imports/spectrumdata_parser.py b/src/raman_fitting/imports/spectrumdata_parser.py index 68d7308..527681e 100644 --- a/src/raman_fitting/imports/spectrumdata_parser.py +++ b/src/raman_fitting/imports/spectrumdata_parser.py @@ -3,6 +3,7 @@ @author: DW """ + from dataclasses import dataclass, field import hashlib @@ -60,7 +61,7 @@ class SpectrumReader: spectrum: SpectrumData = field(default=None) label: str = "raw" - window_name: str = "full" + region_name: str = "full" spectrum_hash: str = field(default=None, repr=False) spectrum_length: int = field(default=0, init=False) @@ -85,7 +86,7 @@ def __post_init__(self, **kwargs): logger.warning( f"The values of {spectrum_key} of this spectrum are invalid. {validator}" ) - spec_init = {"label": self.label, "window_name": self.window_name} + spec_init = {"label": self.label, "region_name": self.region_name} _parsed_spec_dict = { k: parsed_spectrum[k] for k in spectrum_keys_expected_values.keys() } diff --git a/src/raman_fitting/models/deconvolution/base_model.py b/src/raman_fitting/models/deconvolution/base_model.py index 5fc8a09..ddda3fe 100644 --- a/src/raman_fitting/models/deconvolution/base_model.py +++ b/src/raman_fitting/models/deconvolution/base_model.py @@ -20,7 +20,7 @@ from raman_fitting.models.deconvolution.lmfit_parameter import ( construct_lmfit_model_from_components, ) -from raman_fitting.models.splitter import WindowNames +from raman_fitting.models.splitter import RegionNames logger = logging.getLogger(__name__) @@ -58,7 +58,7 @@ class BaseLMFitModel(BaseModel): repr=False, ) lmfit_model: LMFitModel = Field(None, init_var=False, repr=False) - window_name: WindowNames + region_name: RegionNames @property def has_substrate(self): @@ -142,16 +142,16 @@ def get_models_and_peaks_from_definitions( if "models" in val } all_models = {} - for window_name, window_model_settings in models_settings.items(): - if window_model_settings is None: + for region_name, region_model_settings in models_settings.items(): + if region_model_settings is None: continue - all_models[window_name] = {} - for model_name, model_peaks in window_model_settings.items(): - all_models[window_name][model_name] = BaseLMFitModel( + all_models[region_name] = {} + for model_name, model_peaks in region_model_settings.items(): + all_models[region_name][model_name] = BaseLMFitModel( name=model_name, peaks=model_peaks, peak_collection=peak_collection, - window_name=window_name, + region_name=region_name, ) return all_models diff --git a/src/raman_fitting/models/deconvolution/base_peak.py b/src/raman_fitting/models/deconvolution/base_peak.py index 82f51c4..76b3c4c 100644 --- a/src/raman_fitting/models/deconvolution/base_peak.py +++ b/src/raman_fitting/models/deconvolution/base_peak.py @@ -218,7 +218,9 @@ def get_peaks_from_peak_definitions( ) -> Dict[str, BasePeak]: if peak_definitions is None: peak_definitions = load_config_from_toml_files() - peak_settings = {k: val.get("peaks") for k, val in peak_definitions.items() if "peaks" in val} + peak_settings = { + k: val.get("peaks") for k, val in peak_definitions.items() if "peaks" in val + } peak_models = {} for peak_type, peak_type_defs in peak_settings.items(): if peak_type_defs is None: @@ -231,7 +233,6 @@ def get_peaks_from_peak_definitions( def _main(): model_definitions = load_config_from_toml_files() print(model_definitions["first_order"]["models"]) - # PARAMETER_ARGS = inspect.signature(Parameter).parameters.keys() peaks = {} peak_items = { **model_definitions["first_order"]["peaks"], @@ -240,8 +241,8 @@ def _main(): for k, v in peak_items: peaks.update({k: BasePeak(**v)}) - D_peak = BasePeak(**model_definitions["first_order"]["peaks"]["D"]) - print(D_peak) + peak_d = BasePeak(**model_definitions["first_order"]["peaks"]["D"]) + print(peak_d) model_items = { **model_definitions["first_order"]["models"], **model_definitions["second_order"]["models"], diff --git a/src/raman_fitting/models/deconvolution/init_models.py b/src/raman_fitting/models/deconvolution/init_models.py index cb1f7d2..6a0e7e6 100644 --- a/src/raman_fitting/models/deconvolution/init_models.py +++ b/src/raman_fitting/models/deconvolution/init_models.py @@ -21,7 +21,7 @@ class InitializeModels: model_definitions: dict = field(default_factory=dict) peaks: dict = field(default_factory=dict) - lmfit_models: Dict[str, Dict[str, BaseLMFitModel]] = None + lmfit_models: Dict[str, Dict[str, BaseLMFitModel]] | None = None def __post_init__(self): self.model_definitions = self.model_definitions or {} diff --git a/src/raman_fitting/models/deconvolution/lmfit_parameter.py b/src/raman_fitting/models/deconvolution/lmfit_parameter.py index 0d57aba..eab314c 100644 --- a/src/raman_fitting/models/deconvolution/lmfit_parameter.py +++ b/src/raman_fitting/models/deconvolution/lmfit_parameter.py @@ -92,23 +92,23 @@ class LMFitParameterHints(BaseModel): @model_validator(mode="after") def check_min_max(self) -> "LMFitParameterHints": - min, max = self.min, self.max - if min is not None and max is not None and min > max: + min_, max_ = self.min, self.max + if min_ is not None and max_ is not None and min_ > max_: raise ValueError("Min must be less than max") return self @model_validator(mode="after") def check_value_min_max(self) -> "LMFitParameterHints": - value, min, max = self.value, self.min, self.max + value, min_, max_ = self.value, self.min, self.max if value is None: raise ValueError("Value must not be None") - if min is not None: - assert value >= min - if max is not None: - assert value <= max - if max and min: - assert min <= value <= max - assert min < max + if min_ is not None: + assert value >= min_ + if max_ is not None: + assert value <= max_ + if max_ and min_: + assert min_ <= value <= max_ + assert min_ < max_ return self @model_validator(mode="after") diff --git a/src/raman_fitting/models/deconvolution/spectrum_regions.py b/src/raman_fitting/models/deconvolution/spectrum_regions.py index d22235d..a64a8bf 100644 --- a/src/raman_fitting/models/deconvolution/spectrum_regions.py +++ b/src/raman_fitting/models/deconvolution/spectrum_regions.py @@ -12,13 +12,13 @@ def get_default_regions_from_toml_files() -> Dict[str, Dict[str, float]]: return default_regions -WindowNames = StrEnum( - "WindowNames", " ".join(get_default_regions_from_toml_files()), module=__name__ +RegionNames = StrEnum( + "RegionNames", " ".join(get_default_regions_from_toml_files()), module=__name__ ) -class SpectrumWindowLimits(BaseModel): - name: WindowNames +class SpectrumRegionLimits(BaseModel): + name: RegionNames min: int max: int extra_margin: int = 20 diff --git a/src/raman_fitting/models/fit_models.py b/src/raman_fitting/models/fit_models.py index 9a5181a..ada1b8f 100644 --- a/src/raman_fitting/models/fit_models.py +++ b/src/raman_fitting/models/fit_models.py @@ -7,7 +7,7 @@ from lmfit.model import ModelResult from raman_fitting.models.deconvolution.base_model import BaseLMFitModel -from raman_fitting.models.deconvolution.spectrum_regions import WindowNames +from raman_fitting.models.deconvolution.spectrum_regions import RegionNames from raman_fitting.models.spectrum import SpectrumData @@ -31,18 +31,18 @@ class SpectrumFitModel(BaseModel): spectrum: SpectrumData model: BaseLMFitModel - window: WindowNames + region: RegionNames fit_kwargs: Dict = Field(default_factory=dict) fit_result: ModelResult = Field(None, init_var=False) elapsed_time: float = Field(0, init_var=False, repr=False) @model_validator(mode="after") - def match_window_names(self): - model_window = self.model.window_name - spec_window = self.spectrum.window_name - if model_window != spec_window: + def match_region_names(self): + model_region = self.model.region_name + spec_region = self.spectrum.region_name + if model_region != spec_region: raise ValueError( - f"Window names do not match {model_window} and {spec_window}" + f"Region names do not match {model_region} and {spec_region}" ) return self @@ -58,7 +58,6 @@ def run_fit(self) -> None: self.fit_result = fit_result def process_fit_results(self): - # TODO add parameter post processing steps self.fit_result fit_attrs = { @@ -70,12 +69,12 @@ def process_fit_results(self): def run_fit( - model: LMFitModel, spectrum: SpectrumData, method="leastsq", **kws + model: LMFitModel, spectrum: SpectrumData, method="leastsq", **kwargs ) -> ModelResult: # ideas: improve fitting loop so that starting parameters from modelX and modelX+Si are shared, faster... init_params = model.make_params() x, y = spectrum.ramanshift, spectrum.intensity - out = model.fit(y, init_params, x=x, method=method) # 'leastsq' + out = model.fit(y, init_params, x=x, method=method, **kwargs) # 'leastsq' return out @@ -93,9 +92,9 @@ def run_fit( spectrum_processor = SpectrumProcessor(specread.spectrum) clean_spec_1st_order = spectrum_processor.clean_spectrum.spec_regions[ - "savgol_filter_raw_window_first_order" + "savgol_filter_raw_region_first_order" ] - clean_spec_1st_order.window_name = "first_order" + clean_spec_1st_order.region_name = "first_order" from raman_fitting.models.deconvolution.base_model import ( get_models_and_peaks_from_definitions, diff --git a/src/raman_fitting/models/spectrum.py b/src/raman_fitting/models/spectrum.py index 5fc5dba..47d3980 100644 --- a/src/raman_fitting/models/spectrum.py +++ b/src/raman_fitting/models/spectrum.py @@ -1,5 +1,4 @@ -from dataclasses import dataclass -from typing import Sequence, Tuple +from typing import Sequence import numpy as np from pydantic import ( @@ -16,7 +15,7 @@ class SpectrumData(BaseModel): ramanshift: pnd.Np1DArrayFp32 = Field(repr=False) intensity: pnd.Np1DArrayFp32 = Field(repr=False) label: str - window_name: str = None + region_name: str | None = None source: Sequence[str] | None = None @model_validator(mode="after") @@ -45,29 +44,3 @@ class SpectrumMetaData(BaseModel): sample_position: str creation_date: AwareDatetime source_file: FilePath # FileStem is derived - - -@dataclass -class NotSpectrumMetaData: - spec_name: str = "spectrum_info" - sGrp_cols: Tuple[str] = ("SampleGroup", "SampleID", "FileCreationDate") - sPos_cols: Tuple[str] = ("FileStem", "SamplePos", "FilePath") - spectrum_cols: Tuple[str] = ("ramanshift", "intensity_raw", "intensity") - spectrum_info_cols: Tuple[str] = ("spectrum_length",) - export_info_cols: Tuple[str] = ( - "DestGrpDir", - "DestFittingPlots", - "DestFittingComps", - "DestRaw", - ) - - @property - def info_cols(self): - info_cols = ( - self.sGrp_cols - + self.sPos_cols - + self.spectrum_cols - + self.spectrum_info_cols - + self.export_info_cols - ) - return info_cols diff --git a/src/raman_fitting/models/splitter.py b/src/raman_fitting/models/splitter.py index 08189e9..863c7ed 100644 --- a/src/raman_fitting/models/splitter.py +++ b/src/raman_fitting/models/splitter.py @@ -4,80 +4,80 @@ from pydantic import BaseModel, model_validator, Field from .spectrum import SpectrumData from .deconvolution.spectrum_regions import ( - SpectrumWindowLimits, - WindowNames, + SpectrumRegionLimits, + RegionNames, get_default_regions_from_toml_files, ) -class SplittedSpectrum(BaseModel): +class SplitSpectrum(BaseModel): spectrum: SpectrumData - window_limits: Dict[str, SpectrumWindowLimits] = Field(None, init_var=None) + region_limits: Dict[str, SpectrumRegionLimits] = Field(None, init_var=None) spec_regions: Dict[str, SpectrumData] = Field(None, init_var=None) info: Dict[str, Any] = Field(default_factory=dict) @model_validator(mode="after") - def process_spectrum(self) -> "SplittedSpectrum": - if self.window_limits is None: - window_limits = get_default_spectrum_window_limits() - self.window_limits = window_limits + def process_spectrum(self) -> "SplitSpectrum": + if self.region_limits is None: + region_limits = get_default_spectrum_region_limits() + self.region_limits = region_limits if self.spec_regions is not None: return self spec_regions = split_spectrum_data_in_regions( self.spectrum.ramanshift, self.spectrum.intensity, - spec_window_limits=self.window_limits, + spec_region_limits=self.region_limits, label=self.spectrum.label, ) self.spec_regions = spec_regions return self - def get_window(self, window_name: WindowNames): - window_name = WindowNames(window_name) - spec_window_keys = [ - i for i in self.spec_regions.keys() if window_name.name in i + def get_region(self, region_name: RegionNames): + region_name = RegionNames(region_name) + spec_region_keys = [ + i for i in self.spec_regions.keys() if region_name.name in i ] - if not len(spec_window_keys) == 1: - raise ValueError(f"Key {window_name} not in {spec_window_keys}") - spec_window_key = spec_window_keys[0] - return self.spec_regions[spec_window_key] + if len(spec_region_keys) != 1: + raise ValueError(f"Key {region_name} not in {spec_region_keys}") + spec_region_key = spec_region_keys[0] + return self.spec_regions[spec_region_key] -def get_default_spectrum_window_limits( +def get_default_spectrum_region_limits( regions_mapping: Dict = None, -) -> Dict[str, SpectrumWindowLimits]: +) -> Dict[str, SpectrumRegionLimits]: if regions_mapping is None: regions_mapping = get_default_regions_from_toml_files() regions = {} - for window_name, window_config in regions_mapping.items(): - regions[window_name] = SpectrumWindowLimits(name=window_name, **window_config) + for region_name, region_config in regions_mapping.items(): + regions[region_name] = SpectrumRegionLimits(name=region_name, **region_config) return regions def split_spectrum_data_in_regions( - ramanshift: np.array, intensity: np.array, spec_window_limits=None, label=None + ramanshift: np.array, intensity: np.array, spec_region_limits=None, label=None ) -> Dict[str, SpectrumData]: """ - For splitting of spectra into the several SpectrumWindowLimits, - the names of the regions are taken from SpectrumWindowLimits + For splitting of spectra into the several SpectrumRegionLimits, + the names of the regions are taken from SpectrumRegionLimits and set as attributes to the instance. """ - if spec_window_limits is None: - spec_window_limits = get_default_spectrum_window_limits() + if spec_region_limits is None: + spec_region_limits = get_default_spectrum_region_limits() spec_regions = {} - for window_name, window in spec_window_limits.items(): - # find indices of window in ramanshift array - ind = (ramanshift >= np.min(window.min)) & (ramanshift <= np.max(window.max)) - window_lbl = f"window_{window_name}" - if label is not None and label not in window_lbl: - window_lbl = f"{label}_{window_lbl}" + for region_name, region in spec_region_limits.items(): + # find indices of region in ramanshift array + ind = (ramanshift >= np.min(region.min)) & (ramanshift <= np.max(region.max)) + region_lbl = f"region_{region_name}" + if label is not None and label not in region_lbl: + region_lbl = f"{label}_{region_lbl}" _data = { "ramanshift": ramanshift[ind], "intensity": intensity[ind], - "label": window_lbl, - "window_name": window_name, + "label": region_lbl, + "region_name": region_name, } - spec_regions[window_lbl] = SpectrumData(**_data) + spec_regions[region_lbl] = SpectrumData(**_data) return spec_regions diff --git a/src/raman_fitting/processing/baseline_subtraction.py b/src/raman_fitting/processing/baseline_subtraction.py index 497e1db..241060b 100644 --- a/src/raman_fitting/processing/baseline_subtraction.py +++ b/src/raman_fitting/processing/baseline_subtraction.py @@ -3,66 +3,62 @@ import numpy as np from scipy.stats import linregress -from ..models.splitter import SplittedSpectrum +from ..models.splitter import SplitSpectrum from ..models.spectrum import SpectrumData logger = logging.getLogger(__name__) -def subtract_baseline_per_window( - spec: SpectrumData, splitted_spectrum: SplittedSpectrum -): +def subtract_baseline_per_region(spec: SpectrumData, split_spectrum: SplitSpectrum): ramanshift = spec.ramanshift intensity = spec.intensity - window_name = spec.window_name + region_name = spec.region_name label = spec.label - regions_data = splitted_spectrum.spec_regions - window_limits = splitted_spectrum.window_limits + regions_data = split_spectrum.spec_regions + region_limits = split_spectrum.region_limits selected_intensity = intensity - window_config = window_limits[window_name] - window_name_first_order = list( + region_config = region_limits[region_name] + region_name_first_order = list( filter(lambda x: "first_order" in x, regions_data.keys()) ) if ( - any((i in window_name or i in label) for i in ("full", "norm")) - and window_name_first_order + any((i in region_name or i in label) for i in ("full", "norm")) + and region_name_first_order ): - selected_intensity = regions_data[window_name_first_order[0]].intensity - window_config = window_limits["first_order"] + selected_intensity = regions_data[region_name_first_order[0]].intensity + region_config = region_limits["first_order"] bl_linear = linregress( ramanshift[[0, -1]], [ - np.mean(selected_intensity[0 : window_config.extra_margin]), - np.mean(selected_intensity[-window_config.extra_margin : :]), + np.mean(selected_intensity[0 : region_config.extra_margin]), + np.mean(selected_intensity[-region_config.extra_margin : :]), ], ) i_blcor = intensity - (bl_linear[0] * ramanshift + bl_linear[1]) - # if np.isnan(i_blcor).any(): - # raise ValueError("There are nan values in subtract_baseline_per_window") return i_blcor, bl_linear -def subtract_baseline_from_splitted_spectrum( - splitted_spectrum: SplittedSpectrum, label=None -) -> SplittedSpectrum: +def subtract_baseline_from_split_spectrum( + split_spectrum: SplitSpectrum, label=None +) -> SplitSpectrum: _bl_spec_regions = {} _info = {} label = "blcorr" if label is None else label - for window_name, spec in splitted_spectrum.spec_regions.items(): - blcorr_int, blcorr_lin = subtract_baseline_per_window(spec, splitted_spectrum) + for region_name, spec in split_spectrum.spec_regions.items(): + blcorr_int, blcorr_lin = subtract_baseline_per_region(spec, split_spectrum) new_label = f"{label}_{spec.label}" if label not in spec.label else spec.label spec = SpectrumData( **{ "ramanshift": spec.ramanshift, "intensity": blcorr_int, "label": new_label, - "windown_name": window_name, + "regionn_name": region_name, } ) - _bl_spec_regions.update(**{window_name: spec}) - _info.update(**{window_name: blcorr_lin}) - bl_corrected_spectra = splitted_spectrum.model_copy( + _bl_spec_regions.update(**{region_name: spec}) + _info.update(**{region_name: blcorr_lin}) + bl_corrected_spectra = split_spectrum.model_copy( update={"spec_regions": _bl_spec_regions, "info": _info} ) return bl_corrected_spectra @@ -70,11 +66,11 @@ def subtract_baseline_from_splitted_spectrum( def subtract_baseline( ramanshift: np.array, intensity: np.array, label: str = None -) -> SplittedSpectrum: +) -> SplitSpectrum: "Subtract the a baseline of background intensity of a spectrum." spectrum = SpectrumData(ramanshift=ramanshift, intensity=intensity, label=label) - splitted_spectrum = SplittedSpectrum(spectrum=spectrum) - blcorrected_spectrum = subtract_baseline_from_splitted_spectrum( - splitted_spectrum, label=label + split_spectrum = SplitSpectrum(spectrum=spectrum) + blcorrected_spectrum = subtract_baseline_from_split_spectrum( + split_spectrum, label=label ) return blcorrected_spectrum diff --git a/src/raman_fitting/processing/despike.py b/src/raman_fitting/processing/despike.py index ca7af67..8c7c084 100644 --- a/src/raman_fitting/processing/despike.py +++ b/src/raman_fitting/processing/despike.py @@ -4,7 +4,7 @@ @author: dw """ -from typing import Dict, Tuple, Any +from typing import Dict, Tuple, Any, Optional import copy import logging @@ -18,9 +18,9 @@ class SpectrumDespiker(BaseModel): - spectrum: SpectrumData = None + spectrum: Optional[SpectrumData] = None threshold_z_value: int = 4 - moving_window_size: int = 1 + moving_region_size: int = 1 ignore_lims: Tuple[int, int] = (20, 46) info: Dict = Field(default_factory=dict) despiked_spectrum: SpectrumData = Field(None) @@ -48,7 +48,7 @@ def call_despike_spectrum(self, intensity: np.ndarray) -> Tuple[np.ndarray, Dict despiked_intensity, result_info = despike_spectrum( intensity, self.threshold_z_value, - self.moving_window_size, + self.moving_region_size, ignore_lims=self.ignore_lims, ) return despiked_intensity, result_info @@ -57,7 +57,7 @@ def call_despike_spectrum(self, intensity: np.ndarray) -> Tuple[np.ndarray, Dict def despike_spectrum( intensity: np.ndarray, threshold_z_value: int, - moving_window_size: int, + moving_region_size: int, ignore_lims=(20, 46), ) -> Tuple[np.ndarray, Dict[str, Any]]: """ @@ -89,7 +89,7 @@ def despike_spectrum( z_intensity = calc_z_value_intensity(intensity) filtered_z_intensity = filter_z_intensity_values(z_intensity, threshold_z_value) i_despiked = despike_filter( - intensity, filtered_z_intensity, moving_window_size, ignore_lims=ignore_lims + intensity, filtered_z_intensity, moving_region_size, ignore_lims=ignore_lims ) result = {"z_intensity": z_intensity, "filtered_z_intensity": filtered_z_intensity} return i_despiked, result @@ -97,18 +97,11 @@ def despike_spectrum( def calc_z_value_intensity(intensity: np.ndarray) -> np.ndarray: diff_intensity = np.append(np.diff(intensity), 0) # dYt - # dYt = intensity.diff() median_diff_intensity = np.median(diff_intensity) # dYt_Median - # M = dYt.median() - # dYt_M = dYt-M - median_abs_deviation = np.median( - abs(diff_intensity - median_diff_intensity) - ) # dYt_MAD - # MAD = np.mad(diff_intensity) + median_abs_deviation = np.median(abs(diff_intensity - median_diff_intensity)) intensity_values_z = ( 0.6745 * (diff_intensity - median_diff_intensity) ) / median_abs_deviation - # intensity = blcor.assign(**{'abs_z_intensity': z_intensity.abs()}) return intensity_values_z @@ -122,7 +115,7 @@ def filter_z_intensity_values(z_intensity, z_intensityhreshold): def despike_filter( intensity: np.ndarray, filtered_z_intensity: np.ndarray, - moving_window_size: int, + moving_region_size: int, ignore_lims=(20, 46), ): n = len(intensity) @@ -131,7 +124,7 @@ def despike_filter( for i in list(spikes[0]): if i < ignore_lims[0] or i > ignore_lims[1]: w = np.arange( - max(0, i - moving_window_size), min(n, i + moving_window_size) + max(0, i - moving_region_size), min(n, i + moving_region_size) ) w = w[~np.isnan(filtered_z_intensity[w])] if intensity[w].any(): diff --git a/src/raman_fitting/processing/filter.py b/src/raman_fitting/processing/filter.py index c19aebb..1a78d01 100644 --- a/src/raman_fitting/processing/filter.py +++ b/src/raman_fitting/processing/filter.py @@ -10,8 +10,7 @@ class IntensityProcessor(Protocol): - def process_intensity(self, intensity: np.ndarray) -> np.ndarray: - ... + def process_intensity(self, intensity: np.ndarray) -> np.ndarray: ... @dataclass @@ -55,15 +54,6 @@ def filter_spectrum( return filtered_spectrum -# def savgol_filter(intensity: np.ndarray): -# args = (13, 3) -# kwargs = dict(mode='nearest') -# func = signal.savgol_filter -# savgol_int_filter = IntensityFilter(func, filter_args=args, filter_kwargs=kwargs) -# filtered_intensity = savgol_int_filter.process_intensity() -# return filtered_intensity - - """ Parameters ---------- diff --git a/src/raman_fitting/processing/normalization.py b/src/raman_fitting/processing/normalization.py index c64b654..5b39ec0 100644 --- a/src/raman_fitting/processing/normalization.py +++ b/src/raman_fitting/processing/normalization.py @@ -2,30 +2,30 @@ import numpy as np -from ..models.splitter import SplittedSpectrum +from ..models.splitter import SplitSpectrum from ..models.spectrum import SpectrumData from ..models.fit_models import SpectrumFitModel, LMFitModel from loguru import logger -def get_simple_normalization_intensity(splitted_spectrum: SplittedSpectrum) -> float: - norm_spec = splitted_spectrum.get_window("normalization") +def get_simple_normalization_intensity(split_spectrum: SplitSpectrum) -> float: + norm_spec = split_spectrum.get_region("normalization") normalization_intensity = np.nanmax(norm_spec.intensity) return normalization_intensity def get_normalization_factor( - splitted_spectrum: SplittedSpectrum, + split_spectrum: SplitSpectrum, norm_method="simple", normalization_model: LMFitModel = None, ) -> float: - simple_norm = get_simple_normalization_intensity(splitted_spectrum) + simple_norm = get_simple_normalization_intensity(split_spectrum) normalization_intensity = simple_norm if "fit" in norm_method and normalization_model is not None: fit_norm = normalizer_fit_model( - splitted_spectrum, normalization_model=normalization_model + split_spectrum, normalization_model=normalization_model ) if fit_norm is not None: normalization_intensity = fit_norm @@ -34,39 +34,39 @@ def get_normalization_factor( return norm_factor -def normalize_regions_in_splitted_spectrum( - splitted_spectrum: SplittedSpectrum, norm_factor: float, label: Optional[str] = None -) -> SplittedSpectrum: +def normalize_regions_in_split_spectrum( + split_spectrum: SplitSpectrum, norm_factor: float, label: Optional[str] = None +) -> SplitSpectrum: norm_spec_regions = {} norm_infos = {} - label = splitted_spectrum.spectrum.label if label is None else label - for window_name, spec in splitted_spectrum.spec_regions.items(): - norm_label = f"{window_name}_{label}" if window_name not in label else label + label = split_spectrum.spectrum.label if label is None else label + for region_name, spec in split_spectrum.spec_regions.items(): + norm_label = f"{region_name}_{label}" if region_name not in label else label norm_label = f"norm_{norm_label}" if "norm" not in norm_label else norm_label - # label looks like "norm_windowname_label" + # label looks like "norm_regionname_label" _data = SpectrumData( **{ "ramanshift": spec.ramanshift, "intensity": spec.intensity * norm_factor, "label": norm_label, - "window_name": window_name, + "region_name": region_name, } ) - norm_spec_regions.update(**{window_name: _data}) - norm_infos.update(**{window_name: {"normalization_factor": norm_factor}}) - norm_spectra = splitted_spectrum.model_copy( + norm_spec_regions.update(**{region_name: _data}) + norm_infos.update(**{region_name: {"normalization_factor": norm_factor}}) + norm_spectra = split_spectrum.model_copy( update={"spec_regions": norm_spec_regions, "info": norm_infos} ) return norm_spectra -def normalize_splitted_spectrum( - splitted_spectrum: SplittedSpectrum, -) -> SplittedSpectrum: +def normalize_split_spectrum( + split_spectrum: SplitSpectrum, +) -> SplitSpectrum: "Normalize the spectrum intensity according to normalization method." - normalization_factor = get_normalization_factor(splitted_spectrum) - norm_data = normalize_regions_in_splitted_spectrum( - splitted_spectrum, normalization_factor + normalization_factor = get_normalization_factor(split_spectrum) + norm_data = normalize_regions_in_split_spectrum( + split_spectrum, normalization_factor ) return norm_data @@ -82,5 +82,3 @@ def normalizer_fit_model( return spec_fit.fit_result.params["G_height"].value except KeyError as e: logger.error(e) - except Exception as e: - raise e from e diff --git a/src/raman_fitting/processing/post_processing.py b/src/raman_fitting/processing/post_processing.py index 4f89f53..bdd036d 100644 --- a/src/raman_fitting/processing/post_processing.py +++ b/src/raman_fitting/processing/post_processing.py @@ -4,11 +4,11 @@ from raman_fitting.models.spectrum import SpectrumData -from .baseline_subtraction import subtract_baseline_from_splitted_spectrum +from .baseline_subtraction import subtract_baseline_from_split_spectrum from .filter import filter_spectrum from .despike import SpectrumDespiker -from ..models.splitter import SplittedSpectrum -from .normalization import normalize_splitted_spectrum +from ..models.splitter import SplitSpectrum +from .normalization import normalize_split_spectrum logger = logging.getLogger(__name__) @@ -17,8 +17,7 @@ class PostProcessor(Protocol): - def process_spectrum(self, spectrum: SpectrumData): - ... + def process_spectrum(self, spectrum: SpectrumData): ... @dataclass @@ -32,7 +31,7 @@ def __post_init__(self): self.clean_spectrum = processed_spectrum self.processed = True - def process_spectrum(self) -> SplittedSpectrum: + def process_spectrum(self) -> SplitSpectrum: pre_processed_spectrum = self.pre_process_intensity() post_processed_spectra = self.post_process_spectrum(pre_processed_spectrum) return post_processed_spectra @@ -42,8 +41,8 @@ def pre_process_intensity(self) -> SpectrumData: despiker = SpectrumDespiker(**{"spectrum": filtered_spectrum}) return despiker.despiked_spectrum - def post_process_spectrum(self, spectrum: SpectrumData) -> SplittedSpectrum: - split_spectrum = SplittedSpectrum(spectrum=spectrum) - baseline_subtracted = subtract_baseline_from_splitted_spectrum(split_spectrum) - normalized_spectra = normalize_splitted_spectrum(baseline_subtracted) + def post_process_spectrum(self, spectrum: SpectrumData) -> SplitSpectrum: + split_spectrum = SplitSpectrum(spectrum=spectrum) + baseline_subtracted = subtract_baseline_from_split_spectrum(split_spectrum) + normalized_spectra = normalize_split_spectrum(baseline_subtracted) return normalized_spectra diff --git a/tests/deconvolution_models/test_base_model.py b/tests/deconvolution_models/test_base_model.py index 9316449..1687fd2 100644 --- a/tests/deconvolution_models/test_base_model.py +++ b/tests/deconvolution_models/test_base_model.py @@ -31,24 +31,25 @@ def test_empty_base_model(): with pytest.raises(ValidationError): BaseLMFitModel(peaks="A+B") - + with pytest.raises(ValidationError): - BaseLMFitModel(name="Test_empty", peaks="A+B",window_name="full") + BaseLMFitModel(name="Test_empty", peaks="A+B", region_name="full") def test_base_model_2peaks(): - bm = BaseLMFitModel(name="Test_2peaks", peaks="K2+D+G", window_name="full") + bm = BaseLMFitModel(name="Test_2peaks", peaks="K2+D+G", region_name="full") assert set(helper_get_list_components(bm)) == set(["D_", "G_"]) bm.add_substrate() assert set(helper_get_list_components(bm)) == set(["D_", "G_", SUBSTRATE_PREFIX]) bm.remove_substrate() assert set(helper_get_list_components(bm)) == set(["D_", "G_"]) + def test_base_model_wrong_chars_model_name(): bm = BaseLMFitModel( name="Test_wrong_chars", peaks="K2+---////+ +7 +K1111+1D+D2", - window_name="full", + region_name="full", ) assert set(helper_get_list_components(bm)) == set(["D2_"]) bm.add_substrate() diff --git a/tests/deconvolution_models/test_base_peaks.py b/tests/deconvolution_models/test_base_peaks.py index 2b01c59..55ac67a 100644 --- a/tests/deconvolution_models/test_base_peaks.py +++ b/tests/deconvolution_models/test_base_peaks.py @@ -40,23 +40,26 @@ def test_basepeak_initialization(): test_peak = BasePeak(peak_name="test", peak_type="Voigt") assert test_peak.peak_name == "test" + @pytest.mark.skip(reason="TODO: add field validations") def test_empty_base_class_with_kwargs_raises(): eb = BasePeak(peak_type="Voigt", peak_name="test") assert eb.peak_type == "Voigt" - # TODO built in field validation str_length + # add in field validation str_length with pytest.raises(ValueError) as excinfo: eb.peak_name = 10 * "emptytest" assert _error_message_contains(excinfo, "value for peak_name is too long 90") - # TODO built in field validation for peak_type + # add built in field validation for peak_type with pytest.raises(ValueError) as excinfo: eb.peak_type = "VoigtLorentzian" - assert _error_message_contains(excinfo, - ''''Multiple options ['Lorentzian', 'Voigt'] for misspelled value "VoigtLorentzian"''', - ) + assert _error_message_contains( + excinfo, + ''''Multiple options ['Lorentzian', 'Voigt'] for misspelled value "VoigtLorentzian"''', + ) + def test_base_class_good_with_init_extra_tests(): td1_kwargs = dict( @@ -80,6 +83,7 @@ def test_base_class_good_with_init_extra_tests(): # td1.peak_name = "R2D2" # assert td1.lmfit_model.prefix == "R2D2_" + def test_base_class_good_with_init(): d1_kwargs = dict( peak_name="D1D1", @@ -94,6 +98,7 @@ def test_base_class_good_with_init(): td1 = BasePeak(**d1_kwargs) assert td1.peak_name == d1_kwargs["peak_name"] + def test_base_class_good_with_init_added_method(): tkwargs = dict( peak_type="Lorentzian", @@ -108,6 +113,7 @@ def test_base_class_good_with_init_added_method(): td1m = BasePeak(**tkwargs) assert td1m.peak_type == tkwargs["peak_type"] + def test_base_class_good_with_attributes_and_init(): tkwargs = dict( param_hints={ @@ -123,6 +129,7 @@ def test_base_class_good_with_attributes_and_init(): _center_value = nca.lmfit_model.param_hints["center"]["value"] assert _center_value == 2435 + def test_base_class_good_with_attributes_no_init(): tkwargs = dict( param_hints={ @@ -138,6 +145,7 @@ def test_base_class_good_with_attributes_no_init(): assert ncni.param_hints["center"].value == 2435 assert ncni.lmfit_model.param_hints["center"]["value"] == 2435 + def test_base_class_good_with_attributes_init_collision_values(): tkwargs = dict( param_hints={ diff --git a/tests/deconvolution_models/test_fit_models.py b/tests/deconvolution_models/test_fit_models.py index ef2f16a..1d95261 100644 --- a/tests/deconvolution_models/test_fit_models.py +++ b/tests/deconvolution_models/test_fit_models.py @@ -12,14 +12,14 @@ @pytest.fixture def clean_spec(example_files) -> None: - file = [i for i in example_files if "_pos4" in i.stem][0] + file = [i for i in example_files if "_pos4" in i.stem][0] specread = SpectrumReader(file) spectrum_processor = SpectrumProcessor(specread.spectrum) clean_spec_1st_order = spectrum_processor.clean_spectrum.spec_regions[ - "savgol_filter_raw_window_first_order" + "savgol_filter_raw_region_first_order" ] - clean_spec_1st_order.window_name = "first_order" + clean_spec_1st_order.region_name = "first_order" return clean_spec_1st_order @@ -31,7 +31,7 @@ def test_fit_first_order(clean_spec): for model_name, test_model in models["first_order"].items(): # with subTest(model_name=model_name, test_model=test_model): spec_fit = SpectrumFitModel( - **{"spectrum": spectrum, "model": test_model, "window": "first_order"} + **{"spectrum": spectrum, "model": test_model, "region": "first_order"} ) spec_fit.run_fit() for component in test_model.lmfit_model.components: diff --git a/tests/delegating/test_main_delegator.py b/tests/delegating/test_main_delegator.py index ad1f1e8..bf9041a 100644 --- a/tests/delegating/test_main_delegator.py +++ b/tests/delegating/test_main_delegator.py @@ -1,29 +1,28 @@ - import pytest from raman_fitting.config.path_settings import RunModes from raman_fitting.delegating.main_delegator import MainDelegator -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def delegator(): return MainDelegator(run_mode=RunModes.PYTEST) + def test_initialize_models(delegator): - assert "first_order" in delegator.lmfit_models assert "first_order" in delegator.selected_models with pytest.raises(KeyError): - delegator.select_fitting_model("no_name","no model") + delegator.select_fitting_model("no_name", "no model") -def test_index(delegator): - # breakpoint() - # index = delegator.select_samples_from_index() + +def test_delegator_index(delegator): assert delegator.index assert len(delegator.index.raman_files) == 5 selection = delegator.select_samples_from_index() assert len(delegator.index.raman_files) == len(selection) + @pytest.mark.skip(reason="enable main_run before release.") def test_main_run(): delegator.main_run() diff --git a/tests/exporting/test_plotting.py b/tests/exporting/test_plotting.py index d764b67..a978b36 100644 --- a/tests/exporting/test_plotting.py +++ b/tests/exporting/test_plotting.py @@ -24,20 +24,20 @@ def initialized_models(): return InitializeModels() + def test_get_cmap_list(): assert get_cmap_list(0) == None _cmap = get_cmap_list(50) assert _cmap == [DEFAULT_COLOR] * 50 _cmap = get_cmap_list(5) - assert len(_cmap) >= 5 - _cmap = get_cmap_list(5, default_color=COLOR_BLACK) - # assert _cmap, [COLOR_BLACK] * 5 + assert len(_cmap) >= 5 + _cmap1 = get_cmap_list(5, default_color=COLOR_BLACK) + assert _cmap1 == [COLOR_BLACK] * 5 + def test_assign_colors_to_peaks(initialized_models): for order_type, model_collection in initialized_models.lmfit_models.items(): for model_name, model in model_collection.items(): annotated_models = assign_colors_to_peaks(model.lmfit_model.components) prefixes = set([i.prefix for i in model.lmfit_model.components]) - assert prefixes == set(annotated_models.keys()) - # print(annotated_models) - + assert prefixes == set(annotated_models.keys()) diff --git a/tests/indexing/test_filename_parser.py b/tests/indexing/test_filename_parser.py index eb4b3b2..cd6eb05 100644 --- a/tests/indexing/test_filename_parser.py +++ b/tests/indexing/test_filename_parser.py @@ -1,6 +1,3 @@ - -# import importlib - import pytest from raman_fitting.imports.models import RamanFileInfo @@ -39,25 +36,28 @@ def path_parsers(example_files): path_parsers_ = [] for fn in example_files: path_parsers_.append(RamanFileInfo(**{"file": fn})) - return path_parsers_ + return path_parsers_ -def test_RamanFileInfo(path_parsers): +def test_ramanfileinfo(path_parsers): assert all(isinstance(i, RamanFileInfo) for i in path_parsers) -def test_PP_extra_from_map(): + +def test_sample_id_name_mapper(): for k, val in sample_id_name_mapper.items(): _mapval = overwrite_sample_id_from_mapper(k, sample_id_name_mapper) assert _mapval == val -def test_PP_extra_from_parts(): + +def test_overwrite_sample_id_from_mapper(): assert "TEST" == overwrite_sample_group_id_from_parts([], "TEST", sGrp_name_mapper) for k, val in sGrp_name_mapper.items(): - emptymap_PP = RamanFileInfo(file=f"{k}/TEST.txt") + empty_path_parts = RamanFileInfo(file=f"{k}/TEST.txt") assert val == overwrite_sample_group_id_from_parts( - emptymap_PP.parts, "TEST", sGrp_name_mapper - ) + empty_path_parts.parts, "TEST", sGrp_name_mapper + ) + -def test_PP_parse_filepath_to_sid_and_pos(): +def test_parse_string_to_sample_id_and_position(): for file, _expected in example_parse_fixture.items(): assert parse_string_to_sample_id_and_position(file) == _expected diff --git a/tests/indexing/test_indexer.py b/tests/indexing/test_indexer.py index 60f1674..3f2cb19 100644 --- a/tests/indexing/test_indexer.py +++ b/tests/indexing/test_indexer.py @@ -1,4 +1,3 @@ - import pytest from raman_fitting.config.path_settings import ( @@ -14,32 +13,27 @@ run_mode = RunModes.PYTEST run_paths = get_run_mode_paths(run_mode) + @pytest.fixture def index(example_files, internal_paths, tmp_raman_dir): pytest_fixtures_files = list(internal_paths.pytest_fixtures.rglob("*txt")) - index_file = internal_paths.temp_index_file + index_file = internal_paths.temp_index_file all_test_files = example_files + pytest_fixtures_files index = initialize_index_from_source_files( - index_file=index_file, - files=all_test_files, - force_reindex=True + index_file=index_file, files=all_test_files, force_reindex=True ) return index -def test_RamanFileIndex_make_examples(index, example_files): + +def test_index_make_examples(index, example_files): assert isinstance(index, RamanFileIndex) assert isinstance(index.raman_files[0], RamanFileInfo) assert len(index.dataset) > 1 assert len(index.dataset) == len(example_files) + # @unittest.skip("export_index not yet implemented") def test_load_index(index): index.index_file.exists() - try: - new_index = RamanFileIndex( - index_file=index.index_file, force_reindex=False - ) - except Exception as e: - raise e from e + new_index = RamanFileIndex(index_file=index.index_file, force_reindex=False) assert isinstance(new_index, RamanFileIndex) - diff --git a/tests/processing/test_spectrum_constructor.py b/tests/processing/test_spectrum_constructor.py index fbedcbb..b178592 100644 --- a/tests/processing/test_spectrum_constructor.py +++ b/tests/processing/test_spectrum_constructor.py @@ -1,33 +1,18 @@ -# flake8: noqa -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri May 14 09:01:57 2021 - -@author: zmg -""" - -import unittest - from raman_fitting.imports.spectrum.spectrum_constructor import ( SpectrumDataLoader, ) -# class TestSpectrumDataLoader(unittest.TestCase): -def test_SpectrumDataLoader_empty(): +def test_spectrum_data_loader_empty(): spd = SpectrumDataLoader("empty.txt") assert spd.file == "empty.txt" - assert spd.clean_spectrum == None + assert spd.clean_spectrum is None -def test_SpectrumDataLoader_file(example_files): + +def test_spectrum_data_loader_file(example_files): for file in example_files: spd = SpectrumDataLoader( - file, run_kwargs=dict(SampleID=file.stem, SamplePos=1) + file, run_kwargs=dict(sample_id=file.stem, sample_pos=1) ) assert len(spd.clean_spectrum.spectrum) == 1600 assert len(spd.clean_spectrum.spec_regions) >= 5 - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_sample.py b/tests/test_sample.py deleted file mode 100644 index c3afbfb..0000000 --- a/tests/test_sample.py +++ /dev/null @@ -1,7 +0,0 @@ -# content of test_sample.py -def inc(x): - return x + 1 - - -def test_answer(): - assert "PyTestsNonZero" == "PyTestsNonZero"