diff --git a/peakdet/__init__.py b/peakdet/__init__.py index ac6ee0f..324c738 100644 --- a/peakdet/__init__.py +++ b/peakdet/__init__.py @@ -16,6 +16,8 @@ "__version__", ] +from loguru import logger + from peakdet.analytics import HRV from peakdet.external import load_rtpeaks from peakdet.io import load_history, load_physio, save_history, save_physio @@ -34,3 +36,5 @@ __version__ = get_versions()["version"] del get_versions + +logger.disable("peakdet") diff --git a/peakdet/cli/run.py b/peakdet/cli/run.py index e0f6f14..b08a7ca 100644 --- a/peakdet/cli/run.py +++ b/peakdet/cli/run.py @@ -1,13 +1,11 @@ # -*- coding: utf-8 -*- +import argparse +import datetime import glob import os import sys -import warnings -import matplotlib - -matplotlib.use("WXAgg") -from gooey import Gooey, GooeyParser +from loguru import logger import peakdet @@ -41,19 +39,11 @@ } -@Gooey( - program_name="Physio pipeline", - program_description="Physiological processing pipeline", - default_size=(800, 600), - target=TARGET, -) def get_parser(): """Parser for GUI and command-line arguments""" - parser = GooeyParser() + parser = argparse.ArgumentParser() parser.add_argument( "file_template", - metavar="Filename template", - widget="FileChooser", help="Select a representative file and replace all " 'subject-specific information with a "?" symbol.' "\nFor example, subject_001_data.txt should " @@ -67,28 +57,24 @@ def get_parser(): ) inp_group.add_argument( "--modality", - metavar="Modality", default="ECG", choices=list(MODALITIES.keys()), help="Modality of input data.", ) inp_group.add_argument( "--fs", - metavar="Sampling rate", default=1000.0, type=float, help="Sampling rate of input data.", ) inp_group.add_argument( "--source", - metavar="Source", default="rtpeaks", choices=list(LOADERS.keys()), help="Program used to collect the data.", ) inp_group.add_argument( "--channel", - metavar="Channel", default=1, type=int, help="Which channel of data to read from data " @@ -102,7 +88,6 @@ def get_parser(): out_group.add_argument( "-o", "--output", - metavar="Filename", default="peakdet.csv", help="Output filename for generated measurements.", ) @@ -111,16 +96,13 @@ def get_parser(): "--measurements", metavar="Measurements", nargs="+", - widget="Listbox", choices=list(ATTR_CONV.keys()), default=["Average NN intervals", "Standard deviation of NN intervals"], - help="Desired physiological measurements.\nChoose " - "multiple with shift+click or ctrl+click.", + help="Desired physiological measurements", ) out_group.add_argument( "-s", "--savehistory", - metavar="Save history", action="store_true", help="Whether to save history of data processing " "for each file.", ) @@ -132,22 +114,43 @@ def get_parser(): edit_group.add_argument( "-n", "--noedit", - metavar="Editing", action="store_true", help="Turn off interactive editing.", ) edit_group.add_argument( "-t", "--thresh", - metavar="Threshold", default=0.2, type=float, help="Threshold for peak detection algorithm.", ) + log_style_group = parser.add_argument_group( + "Logging style arguments (optional and mutually exclusive)", + "Options to specify the logging style", + ) + log_style_group_exclusive = log_style_group.add_mutually_exclusive_group() + log_style_group_exclusive.add_argument( + "-debug", + "--debug", + dest="debug", + action="store_true", + help="Print additional debugging info and error diagnostics to log file. Default is False.", + default=False, + ) + log_style_group_exclusive.add_argument( + "-quiet", + "--quiet", + dest="quiet", + action="store_true", + help="Only print warnings to log file. Default is False.", + default=False, + ) + return parser +@logger.catch def workflow( *, file_template, @@ -159,7 +162,9 @@ def workflow( savehistory=True, noedit=False, thresh=0.2, - measurements=ATTR_CONV.keys() + measurements=ATTR_CONV.keys(), + debug=False, + quiet=False, ): """ Basic workflow for physiological data @@ -190,17 +195,74 @@ def workflow( measurements : list, optional Which HRV-related measurements to save from data. See ``peakdet.HRV`` for available measurements. Default: all available measurements. + verbose : bool, optional + Whether to include verbose logs when catching exceptions that include diagnostics """ + outdir = os.path.dirname(output) + logger.info(f"Current path is {outdir}") + + # Create logfile name + basename = "peakdet" + extension = "log" + isotime = datetime.datetime.now().strftime("%Y-%m-%dT%H%M%S") + logname = os.path.join(outdir, (basename + isotime + "." + extension)) + + logger.remove(0) + if quiet: + logger.add( + sys.stderr, + level="WARNING", + colorize=True, + backtrace=False, + diagnose=False, + ) + logger.add( + logname, + level="WARNING", + colorize=False, + backtrace=False, + diagnose=False, + ) + elif debug: + logger.add( + sys.stderr, + level="DEBUG", + colorize=True, + backtrace=True, + diagnose=True, + ) + logger.add( + logname, + level="DEBUG", + colorize=False, + backtrace=True, + diagnose=True, + ) + else: + logger.add( + sys.stderr, + level="INFO", + colorize=True, + backtrace=True, + diagnose=False, + ) + logger.add( + logname, + level="INFO", + colorize=False, + backtrace=True, + diagnose=False, + ) # output file - print("OUTPUT FILE:\t\t{}\n".format(output)) + logger.info("OUTPUT FILE:\t\t{}".format(output)) # grab files from file template - print("FILE TEMPLATE:\t{}\n".format(file_template)) + logger.info("FILE TEMPLATE:\t{}".format(file_template)) files = glob.glob(file_template, recursive=True) # convert measurements to peakdet.HRV attribute friendly names try: - print("REQUESTED MEASUREMENTS: {}\n".format(", ".join(measurements))) + logger.info("REQUESTED MEASUREMENTS: {}\n".format(", ".join(measurements))) except TypeError: raise TypeError( "It looks like you didn't select any of the options " @@ -221,7 +283,7 @@ def workflow( # requested on command line, warn and use existing measurements so # as not to totally fork up existing file if eheader != head: - warnings.warn( + logger.warning( "Desired output file already exists and requested " "measurements do not match with measurements in " "existing output file. Using the pre-existing " @@ -238,7 +300,7 @@ def workflow( # iterate through all files and do peak detection with manual editing for fname in files: fname = os.path.relpath(fname) - print("Currently processing {}".format(fname)) + logger.info("Currently processing {}".format(fname)) # if we want to save history, this is the output name it would take outname = os.path.join( @@ -281,6 +343,7 @@ def workflow( def main(): + logger.enable("") opts = get_parser().parse_args() workflow(**vars(opts)) diff --git a/peakdet/editor.py b/peakdet/editor.py index d1f735e..c723ae4 100644 --- a/peakdet/editor.py +++ b/peakdet/editor.py @@ -5,6 +5,7 @@ import matplotlib.pyplot as plt import numpy as np +from loguru import logger from matplotlib.widgets import SpanSelector from peakdet import operations, utils @@ -142,6 +143,7 @@ def on_edit(self, xmin, xmax, *, method): method accepts 'insert', 'reject', 'delete' """ + logger.debug("Edited peaks with action: {}", method) if method not in ["insert", "reject", "delete"]: raise ValueError(f'Action "{method}" not supported.') @@ -184,6 +186,7 @@ def undo(self): # pop off last edit and delete func, peaks = self.data._history.pop() + logger.debug(f"Undo previous action: {func}") if func == "reject_peaks": self.data._metadata["reject"] = np.setdiff1d( diff --git a/peakdet/external.py b/peakdet/external.py index ec2e731..26dab1b 100644 --- a/peakdet/external.py +++ b/peakdet/external.py @@ -3,9 +3,8 @@ Functions for interacting with physiological data acquired by external packages """ -import warnings - import numpy as np +from loguru import logger from peakdet import physio, utils @@ -40,7 +39,7 @@ def load_rtpeaks(fname, channel, fs): """ if fname.startswith("/"): - warnings.warn( + logger.warning( "Provided file seems to be an absolute path. In order " "to ensure full reproducibility it is recommended that " "a relative path is provided." diff --git a/peakdet/io.py b/peakdet/io.py index 1e87238..697f130 100644 --- a/peakdet/io.py +++ b/peakdet/io.py @@ -5,9 +5,9 @@ import json import os.path as op -import warnings import numpy as np +from loguru import logger from peakdet import physio, utils @@ -61,11 +61,13 @@ def load_physio(data, *, fs=None, dtype=None, history=None, allow_pickle=False): inp["history"] = list(map(tuple, inp["history"])) except (IOError, OSError, ValueError): inp = dict(data=np.loadtxt(data), history=[utils._get_call(exclude=[])]) + logger.debug("Instantiating Physio object from a file") phys = physio.Physio(**inp) # if we got a numpy array, load that into a Physio object elif isinstance(data, np.ndarray): + logger.debug("Instantiating Physio object from numpy array") if history is None: - warnings.warn( + logger.warning( "Loading data from a numpy array without providing a" "history will render reproducibility functions " "useless! Continuing anyways." @@ -73,6 +75,9 @@ def load_physio(data, *, fs=None, dtype=None, history=None, allow_pickle=False): phys = physio.Physio(np.asarray(data, dtype=dtype), fs=fs, history=history) # create a new Physio object out of a provided Physio object elif isinstance(data, physio.Physio): + logger.debug( + "Instantiating a new Physio object from the provided Physio object" + ) phys = utils.new_physio_like(data, data.data, fs=fs, dtype=dtype) phys._history += [utils._get_call()] else: @@ -81,7 +86,7 @@ def load_physio(data, *, fs=None, dtype=None, history=None, allow_pickle=False): # reset sampling rate, as requested if fs is not None and fs != phys.fs: if not np.isnan(phys.fs): - warnings.warn( + logger.warning( "Provided sampling rate does not match loaded rate. " "Resetting loaded sampling rate {} to provided {}".format(phys.fs, fs) ) @@ -119,6 +124,7 @@ def save_physio(fname, data): np.savez_compressed( dest, data=data.data, fs=data.fs, history=hist, metadata=data._metadata ) + logger.info(f"Saved {data} in {fname}") return fname @@ -149,10 +155,11 @@ def load_history(file, verbose=False): history = json.load(src) # replay history from beginning and return resultant Physio object + logger.info(f"Replaying history from {file}") data = None for func, kwargs in history: if verbose: - print("Rerunning {}".format(func)) + logger.info("Rerunning {}".format(func)) # loading functions don't have `data` input because it should be the # first thing in `history` (when the data was originally loaded!). # for safety, check if `data` is None; someone could have potentially @@ -203,7 +210,7 @@ def save_history(file, data): data = check_physio(data) if len(data.history) == 0: - warnings.warn( + logger.warning( "History of provided Physio object is empty. Saving " "anyway, but reloading this file will result in an " "error." @@ -211,5 +218,6 @@ def save_history(file, data): file += ".json" if not file.endswith(".json") else "" with open(file, "w") as dest: json.dump(data.history, dest, indent=4) + logger.info(f"Saved {data} history in {file}") return file diff --git a/peakdet/operations.py b/peakdet/operations.py index f5a2e7f..480b0c0 100644 --- a/peakdet/operations.py +++ b/peakdet/operations.py @@ -5,6 +5,7 @@ import matplotlib.pyplot as plt import numpy as np +from loguru import logger from scipy import interpolate, signal from peakdet import editor, utils @@ -58,6 +59,15 @@ def filter_physio(data, cutoffs, method, *, order=3): "frequency for input data with sampling rate {}.".format(cutoffs, data.fs) ) + if method in ["lowpass", "highpass"]: + logger.info( + f"Applying a {method} filter (order: {order}) to the signal, with cutoff frequency at {cutoffs} Hz" + ) + elif method in ["bandpass", "bandstop"]: + logger.info( + f"Applying a {method} filter (order: {order}) to the signal, with cutoff frequencies at {cutoffs[0]} and {cutoffs[1]} Hz" + ) + b, a = signal.butter(int(order), nyq_cutoff, btype=method) filtered = utils.new_physio_like(data, signal.filtfilt(b, a, data)) @@ -99,6 +109,10 @@ def interpolate_physio(data, target_fs, *, kind="cubic"): suppinterp = None else: suppinterp = interpolate.interp1d(t_orig, data.suppdata, kind=kind)(t_new) + + logger.info( + f"Interpolating the signal at {target_fs} Hz (Interpolation ratio: {factor})." + ) interp = utils.new_physio_like(data, interp, fs=target_fs, suppdata=suppinterp) return interp @@ -128,19 +142,27 @@ def peakfind_physio(data, *, thresh=0.2, dist=None): ensure_fs = True if dist is None else False data = utils.check_physio(data, ensure_fs=ensure_fs, copy=True) - # first pass peak detection to get approximate distance between peaks cdist = data.fs // 4 if dist is None else dist thresh = np.squeeze(np.diff(np.percentile(data, [5, 95]))) * thresh locs, heights = signal.find_peaks(data[:], distance=cdist, height=thresh) + logger.debug( + f"First peak detection iteration. Acquiring approximate distance between peaks (Number of peaks: {len(locs)})" + ) # second, more thorough peak detection cdist = np.diff(locs).mean() // 2 heights = np.percentile(heights["peak_heights"], 1) locs, heights = signal.find_peaks(data[:], distance=cdist, height=heights) data._metadata["peaks"] = locs + logger.debug( + f"Second peak detection iteration. Acquiring more precise peak locations (Number of peaks: {len(locs)})" + ) # perform trough detection based on detected peaks data._metadata["troughs"] = utils.check_troughs(data, data.peaks) + logger.debug( + f"Trough detection based on detected peaks (Number of troughs: {len(data.troughs)})" + ) return data @@ -159,7 +181,6 @@ def delete_peaks(data, remove): ------- data : Physio_like """ - data = utils.check_physio(data, ensure_fs=False, copy=True) data._metadata["peaks"] = np.setdiff1d(data._metadata["peaks"], remove) data._metadata["troughs"] = utils.check_troughs(data, data.peaks, data.troughs) @@ -181,7 +202,6 @@ def reject_peaks(data, remove): ------- data : Physio_like """ - data = utils.check_physio(data, ensure_fs=False, copy=True) data._metadata["reject"] = np.append(data._metadata["reject"], remove) data._metadata["troughs"] = utils.check_troughs(data, data.peaks, data.troughs) @@ -203,7 +223,6 @@ def add_peaks(data, add): ------- data : Physio_like """ - data = utils.check_physio(data, ensure_fs=False, copy=True) idx = np.searchsorted(data._metadata["peaks"], add) data._metadata["peaks"] = np.insert(data._metadata["peaks"], idx, add) @@ -234,6 +253,7 @@ def edit_physio(data): return # perform manual editing + logger.info("Opening interactive peak editor") edits = editor._PhysioEditor(data) plt.show(block=True) @@ -265,7 +285,7 @@ def plot_physio(data, *, ax=None): ax : :class:`matplotlib.axes.Axes` Axis with plotted `data` """ - + logger.debug(f"Plotting {data}") # generate x-axis time series fs = 1 if np.isnan(data.fs) else data.fs time = np.arange(0, len(data) / fs, 1 / fs) diff --git a/peakdet/physio.py b/peakdet/physio.py index eb62468..8b87baf 100644 --- a/peakdet/physio.py +++ b/peakdet/physio.py @@ -4,6 +4,7 @@ """ import numpy as np +from loguru import logger class Physio: @@ -41,6 +42,7 @@ class Physio: """ def __init__(self, data, fs=None, history=None, metadata=None, suppdata=None): + logger.debug("Initializing new Physio object") self._data = np.asarray(data).squeeze() if self.data.ndim > 1: raise ValueError( diff --git a/peakdet/tests/__init__.py b/peakdet/tests/__init__.py index bed8839..44aec0d 100644 --- a/peakdet/tests/__init__.py +++ b/peakdet/tests/__init__.py @@ -1,3 +1,6 @@ from peakdet.tests.utils import get_test_data_path +from peakdet.utils import enable_logger __all__ = ["get_test_data_path"] + +enable_logger("DEBUG", True, True) diff --git a/peakdet/tests/conftest.py b/peakdet/tests/conftest.py new file mode 100644 index 0000000..273030e --- /dev/null +++ b/peakdet/tests/conftest.py @@ -0,0 +1,16 @@ +import pytest +from _pytest.logging import LogCaptureFixture +from loguru import logger + + +@pytest.fixture +def caplog(caplog: LogCaptureFixture): + handler_id = logger.add( + caplog.handler, + format="{message}", + level=20, + filter=lambda record: record["level"].no >= caplog.handler.level, + enqueue=False, + ) + yield caplog + logger.remove(handler_id) diff --git a/peakdet/tests/test_external.py b/peakdet/tests/test_external.py index 23c695e..4eb5efe 100644 --- a/peakdet/tests/test_external.py +++ b/peakdet/tests/test_external.py @@ -8,14 +8,14 @@ DATA = testutils.get_test_data_path("rtpeaks.csv") -def test_load_rtpeaks(): +def test_load_rtpeaks(caplog): for channel in [1, 2, 9]: - with pytest.warns(UserWarning): - hist = dict(fname=DATA, channel=channel, fs=1000.0) - phys = external.load_rtpeaks(DATA, channel=channel, fs=1000.0) - assert phys.history == [("load_rtpeaks", hist)] - assert phys.fs == 1000.0 - with pytest.raises(ValueError): - external.load_rtpeaks( - testutils.get_test_data_path("ECG.csv"), channel=channel, fs=1000.0 - ) + hist = dict(fname=DATA, channel=channel, fs=1000.0) + phys = external.load_rtpeaks(DATA, channel=channel, fs=1000.0) + assert phys.history == [("load_rtpeaks", hist)] + assert phys.fs == 1000.0 + with pytest.raises(ValueError): + external.load_rtpeaks( + testutils.get_test_data_path("ECG.csv"), channel=channel, fs=1000.0 + ) + assert caplog.text.count("WARNING") > 1 diff --git a/peakdet/tests/test_io.py b/peakdet/tests/test_io.py index 8e032ad..0daa79a 100644 --- a/peakdet/tests/test_io.py +++ b/peakdet/tests/test_io.py @@ -10,16 +10,14 @@ from peakdet.tests.utils import get_test_data_path -def test_load_physio(): +def test_load_physio(caplog): # try loading pickle file (from io.save_physio) pckl = io.load_physio(get_test_data_path("ECG.phys"), allow_pickle=True) assert isinstance(pckl, physio.Physio) assert pckl.data.size == 44611 assert pckl.fs == 1000.0 - with pytest.warns(UserWarning): - pckl = io.load_physio( - get_test_data_path("ECG.phys"), fs=500.0, allow_pickle=True - ) + pckl = io.load_physio(get_test_data_path("ECG.phys"), fs=500.0, allow_pickle=True) + assert caplog.text.count("WARNING") == 1 assert pckl.fs == 500.0 # try loading CSV file @@ -30,8 +28,8 @@ def test_load_physio(): assert csv.history[0][0] == "load_physio" # try loading array - with pytest.warns(UserWarning): - arr = io.load_physio(np.loadtxt(get_test_data_path("ECG.csv"))) + arr = io.load_physio(np.loadtxt(get_test_data_path("ECG.csv"))) + assert caplog.text.count("WARNING") == 2 assert isinstance(arr, physio.Physio) arr = io.load_physio( np.loadtxt(get_test_data_path("ECG.csv")), @@ -74,7 +72,7 @@ def test_load_history(tmpdir): assert filt.fs == replayed.fs -def test_save_history(tmpdir): +def test_save_history(tmpdir, caplog): # get paths of data, original history, new history fname = get_test_data_path("ECG.csv") orig_history = get_test_data_path("history.json") @@ -82,8 +80,8 @@ def test_save_history(tmpdir): # make physio object and perform some operations phys = physio.Physio(np.loadtxt(fname), fs=1000.0) - with pytest.warns(UserWarning): # no history = warning - io.save_history(temp_history, phys) + io.save_history(temp_history, phys) + assert caplog.text.count("WARNING") == 1 # no history = warning filt = operations.filter_physio(phys, [5.0, 15.0], "bandpass") path = io.save_history(temp_history, filt) # dump history= diff --git a/peakdet/utils.py b/peakdet/utils.py index 86740be..b3041cf 100644 --- a/peakdet/utils.py +++ b/peakdet/utils.py @@ -5,9 +5,11 @@ """ import inspect +import sys from functools import wraps import numpy as np +from loguru import logger from peakdet import physio @@ -157,7 +159,7 @@ def new_physio_like( dtype=None, copy_history=True, copy_metadata=True, - copy_suppdata=True + copy_suppdata=True, ): """ Makes `data` into physio object like `ref_data` @@ -240,3 +242,87 @@ def check_troughs(data, peaks, troughs=None): all_troughs[f] = idx return all_troughs + + +def enable_logger(loglevel="INFO", diagnose=True, backtrace=True): + """ + Toggles the use of the module's logger and configures it + + Parameters + ---------- + loglevel : {'INFO', 'DEBUG', 'WARNING', 'ERROR'} + Logger log level. Default: "INFO" + """ + _valid_loglevels = ["INFO", "DEBUG", "WARNING", "ERROR"] + + if loglevel not in _valid_loglevels: + raise ValueError( + "Provided log level {} is not permitted; must be in {}.".format( + loglevel, _valid_loglevels + ) + ) + logger.enable("peakdet") + try: + logger.remove(0) + except ValueError: + logger.warning( + "The logger has been already enabled. If you want to" + "change the log level of an existing logger, please" + "refer to the change_loglevel() function. (Note: You can" + "find the log_handle either from the initial call of this" + "function, or the console logs)" + ) + return + log_handle = logger.add( + sys.stderr, level=loglevel, backtrace=backtrace, diagnose=diagnose + ) + logger.debug(f"Enabling logger with handle_id: {log_handle}") + return log_handle + + +def change_loglevel(log_handle, loglevel, diagnose=True, backtrace=True): + """ + Change the loguru logger's log level. The logger needs to + be already enabled by `enable_logger()` + + Parameters + ---------- + log_handle : Enabled logger's handle, returned by `enable_logger()` + loglevel : {'INFO', 'DEBUG', 'WARNING', 'ERROR'} + """ + _valid_loglevels = ["INFO", "DEBUG", "WARNING", "ERROR"] + + if loglevel not in _valid_loglevels: + raise ValueError( + "Provided log level {} is not permitted; must be in {}.".format( + loglevel, _valid_loglevels + ) + ) + logger.remove(log_handle) + new_log_handle = logger.add( + sys.stderr, level=loglevel, backtrace=backtrace, diagnose=diagnose + ) + logger.info( + f'Changing the logger log level to "{loglevel}" (New logger handle_id: {new_log_handle})' + ) + return new_log_handle + + +def disable_logger(log_handle=None): + """ + Change the loguru logger's log level. The logger needs to + be already enabled by `enable_logger()` + + Parameters + ---------- + log_handle : Enabled logger's handle, returned by `enable_logger()` + Default: None + If left as None, this function will disable all logger instances + """ + if log_handle is None: + logger.info("Disabling all logger instances") + logger.remove() + else: + logger.info(f"Disabling logger with handle_id: {log_handle}") + logger.remove(log_handle) + logger.disable("peakdet") diff --git a/requirements.txt b/requirements.txt index 74fa65e..0e8bceb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ matplotlib numpy scipy +loguru diff --git a/setup.cfg b/setup.cfg index df99dd1..16898b4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -27,6 +27,7 @@ install_requires = matplotlib >=3.1.1, <=3.6.3, !=3.3.0rc1 numpy >=1.9.3 scipy + loguru tests_require = pytest >=5.3 pytest-cov