From 4254aad241bb2f0be4c4a6f17e1846f4350c92cb Mon Sep 17 00:00:00 2001 From: Dominique Makowski Date: Tue, 21 May 2024 14:45:13 +0100 Subject: [PATCH 01/49] bump version --- neurokit2/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neurokit2/__init__.py b/neurokit2/__init__.py index 00df5089ec..9fe63da09a 100644 --- a/neurokit2/__init__.py +++ b/neurokit2/__init__.py @@ -33,7 +33,7 @@ from .video import * # Info -__version__ = "0.2.9" +__version__ = "0.2.10" # Maintainer info From 5393ffc994cc2989beefeda64bbbc8ab4cacc26d Mon Sep 17 00:00:00 2001 From: S-N-2019 <56240762+S-N-2019@users.noreply.github.com> Date: Tue, 21 May 2024 12:11:15 -0400 Subject: [PATCH 02/49] fix location of misplaced double quotes in docstring of ecg_quality.py --- neurokit2/ecg/ecg_quality.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neurokit2/ecg/ecg_quality.py b/neurokit2/ecg/ecg_quality.py index 3c50058483..620409429f 100644 --- a/neurokit2/ecg/ecg_quality.py +++ b/neurokit2/ecg/ecg_quality.py @@ -56,7 +56,7 @@ def ecg_quality(ecg_cleaned, rpeaks=None, sampling_rate=1000, method="averageQRS array or str Vector containing the quality index ranging from 0 to 1 for ``"averageQRS"`` method, returns string classification (``Unacceptable``, ``Barely Acceptable`` or ``Excellent``) - of the signal for ``"zhao2018 method"``. + of the signal for ``"zhao2018"`` method. See Also -------- From 9caba86a702465511ff41051654753d35be283bb Mon Sep 17 00:00:00 2001 From: Dominique Makowski Date: Thu, 23 May 2024 13:58:59 +0100 Subject: [PATCH 03/49] Fix EDA recovery time computation (now, in seconds) --- neurokit2/eda/eda_eventrelated.py | 38 ++++++++++++++++--------------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/neurokit2/eda/eda_eventrelated.py b/neurokit2/eda/eda_eventrelated.py index 3fd8a2a428..732088672a 100644 --- a/neurokit2/eda/eda_eventrelated.py +++ b/neurokit2/eda/eda_eventrelated.py @@ -3,9 +3,11 @@ import numpy as np -from ..epochs.eventrelated_utils import (_eventrelated_addinfo, - _eventrelated_sanitizeinput, - _eventrelated_sanitizeoutput) +from ..epochs.eventrelated_utils import ( + _eventrelated_addinfo, + _eventrelated_sanitizeinput, + _eventrelated_sanitizeoutput, +) from ..misc import NeuroKitWarning @@ -134,7 +136,7 @@ def _eda_eventrelated_eda(epoch, output={}): warn( "Input does not have an `EDA_Phasic` column." " Will skip computation of maximum amplitude of phasic EDA component.", - category=NeuroKitWarning + category=NeuroKitWarning, ) return output @@ -149,7 +151,7 @@ def _eda_eventrelated_scr(epoch, output={}): warn( "Input does not have an `SCR_Amplitude` column." " Will skip computation of SCR peak amplitude.", - category=NeuroKitWarning + category=NeuroKitWarning, ) return output @@ -157,7 +159,7 @@ def _eda_eventrelated_scr(epoch, output={}): warn( "Input does not have an `SCR_RecoveryTime` column." " Will skip computation of SCR half-recovery times.", - category=NeuroKitWarning + category=NeuroKitWarning, ) return output @@ -165,23 +167,23 @@ def _eda_eventrelated_scr(epoch, output={}): warn( "Input does not have an `SCR_RiseTime` column." " Will skip computation of SCR rise times.", - category=NeuroKitWarning + category=NeuroKitWarning, ) return output - # Peak amplitude and Time of peak - first_activation = np.where(epoch["SCR_Amplitude"][epoch.index > 0] != 0)[0][0] - peak_amplitude = epoch["SCR_Amplitude"][epoch.index > 0].iloc[first_activation] - output["SCR_Peak_Amplitude"] = peak_amplitude - output["SCR_Peak_Amplitude_Time"] = epoch["SCR_Amplitude"][epoch.index > 0].index[first_activation] - # Rise Time - rise_time = epoch["SCR_RiseTime"][epoch.index > 0].iloc[first_activation] - output["SCR_RiseTime"] = rise_time + epoch_postevent = epoch[epoch.index > 0] + # Peak amplitude + first_peak = np.where(epoch_postevent["SCR_Amplitude"] != 0)[0][0] + output["SCR_Peak_Amplitude"] = epoch_postevent["SCR_Amplitude"].iloc[first_peak] + # Time of peak (Raw, from epoch onset) + output["SCR_Peak_Amplitude_Time"] = epoch_postevent.index[first_peak] + # Rise Time (From the onset of the peak) + output["SCR_RiseTime"] = epoch_postevent["SCR_RiseTime"].iloc[first_peak] - # Recovery Time + # Recovery Time (from peak to half recovery time) if any(epoch["SCR_RecoveryTime"][epoch.index > 0] != 0): - recovery_time = np.where(epoch["SCR_RecoveryTime"][epoch.index > 0] != 0)[0][0] - output["SCR_RecoveryTime"] = recovery_time + recov_t = np.where(epoch_postevent["SCR_RecoveryTime"] != 0)[0][0] + output["SCR_RecoveryTime"] = epoch_postevent["SCR_RecoveryTime"].iloc[recov_t] else: output["SCR_RecoveryTime"] = np.nan From 132eb7818ebb1b8ddb3a779fdebc0d5caa611df6 Mon Sep 17 00:00:00 2001 From: Marc Balle Date: Wed, 12 Jun 2024 10:25:00 +0200 Subject: [PATCH 04/49] return frequency as scalar --- neurokit2/signal/signal_filter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neurokit2/signal/signal_filter.py b/neurokit2/signal/signal_filter.py index 94065c9d6f..aecb222924 100644 --- a/neurokit2/signal/signal_filter.py +++ b/neurokit2/signal/signal_filter.py @@ -338,10 +338,10 @@ def _signal_filter_sanitize(lowcut=None, highcut=None, sampling_rate=1000, norma # pass frequencies in order of lowest to highest to the scipy filter freqs = list(np.sort([lowcut, highcut])) elif lowcut is not None: - freqs = [lowcut] + freqs = lowcut filter_type = "highpass" elif highcut is not None: - freqs = [highcut] + freqs = highcut filter_type = "lowpass" # Normalize frequency to Nyquist Frequency (Fs/2). From 7df75adb356ea23648b7149371fd3baae6b90a90 Mon Sep 17 00:00:00 2001 From: Dominique Makowski Date: Thu, 20 Jun 2024 17:02:05 +0100 Subject: [PATCH 05/49] fix download instruction from LUDB database --- data/ludb/download_ludb.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/data/ludb/download_ludb.py b/data/ludb/download_ludb.py index 3e71284e74..c8468b28a2 100644 --- a/data/ludb/download_ludb.py +++ b/data/ludb/download_ludb.py @@ -4,10 +4,8 @@ The database consists of 200 10-second 12-lead ECG signal records representing different morphologies of the ECG signal. The ECGs were collected from healthy volunteers and patients, which had various cardiovascular diseases. The boundaries of P, T waves and QRS complexes were manually annotated by cardiologists for all 200 records. Steps: - 1. In the command line, run 'pip install gsutil' - 2. Then, 'gsutil -m cp -r gs://ludb-1.0.0.physionet.org D:/YOURPATH/NeuroKit/data/ludb' - This will download all the files in a folder named 'ludb-1.0.0.physionet.org' at the - destination you entered. + 1. Download zipped data base from https://physionet.org/content/ludb/1.0.1/ + 2. Unzip the folder so that you have a `lobachevsky-university-electrocardiography-database-1.0.1/` folder' 3. Run this script. """ import pandas as pd @@ -16,8 +14,6 @@ import os -files = os.listdir("./ludb-1.0.0.physionet.org/") - dfs_ecg = [] dfs_rpeaks = [] @@ -25,22 +21,26 @@ for participant in range(200): filename = str(participant + 1) - data, info = wfdb.rdsamp("./ludb-1.0.0.physionet.org/" + filename) + data, info = wfdb.rdsamp( + "./lobachevsky-university-electrocardiography-database-1.0.1/data/" + filename + ) # Get signal data = pd.DataFrame(data, columns=info["sig_name"]) data = data[["i"]].rename(columns={"i": "ECG"}) - data["Participant"] = "LUDB_%.2i" %(participant + 1) + data["Participant"] = "LUDB_%.2i" % (participant + 1) data["Sample"] = range(len(data)) - data["Sampling_Rate"] = info['fs'] + data["Sampling_Rate"] = info["fs"] data["Database"] = "LUDB" # Get annotations - anno = wfdb.rdann("./ludb-1.0.0.physionet.org/" + filename, 'atr_i') + anno = wfdb.rdann( + "./lobachevsky-university-electrocardiography-database-1.0.1/data/" + filename, "i" + ) anno = anno.sample[np.where(np.array(anno.symbol) == "N")[0]] anno = pd.DataFrame({"Rpeaks": anno}) - anno["Participant"] = "LUDB_%.2i" %(participant + 1) - anno["Sampling_Rate"] = info['fs'] + anno["Participant"] = "LUDB_%.2i" % (participant + 1) + anno["Sampling_Rate"] = info["fs"] anno["Database"] = "LUDB" # Store with the rest From 4c1aaeb07275e03f66867ee040ec690ada6bf0ab Mon Sep 17 00:00:00 2001 From: atpage Date: Wed, 3 Jul 2024 12:34:53 -0400 Subject: [PATCH 06/49] Fix typo in ecg_quality docstring --- neurokit2/ecg/ecg_quality.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neurokit2/ecg/ecg_quality.py b/neurokit2/ecg/ecg_quality.py index 5b7c362dbe..14b1976d4d 100644 --- a/neurokit2/ecg/ecg_quality.py +++ b/neurokit2/ecg/ecg_quality.py @@ -24,7 +24,7 @@ def ecg_quality( interpolating the distance of each QRS segment from the average QRS segment present in the * data. This index is therefore relative: 1 corresponds to heartbeats that are the closest to the average sample and 0 corresponds to the most distant heartbeat from that average sample. - Note that 1 does not necessarily means "good": if the majority of samples are bad, than being + Note that 1 does not necessarily means "good": if the majority of samples are bad, then being close to the average will likely mean bad as well. Use this index with care and plot it alongside your ECG signal to see if it makes sense. From feaf6efc825d40ad9e3a520ccd66aa0eb247ac71 Mon Sep 17 00:00:00 2001 From: Andrew Barros Date: Wed, 10 Jul 2024 10:42:58 -0400 Subject: [PATCH 07/49] Return an empty array rather than throwing an exception if no QRS data is found. --- neurokit2/ecg/ecg_findpeaks.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/neurokit2/ecg/ecg_findpeaks.py b/neurokit2/ecg/ecg_findpeaks.py index 6a1f34e61b..b1dfd65c19 100644 --- a/neurokit2/ecg/ecg_findpeaks.py +++ b/neurokit2/ecg/ecg_findpeaks.py @@ -274,6 +274,9 @@ def _ecg_findpeaks_neurokit( qrs = smoothgrad > gradthreshold beg_qrs = np.where(np.logical_and(np.logical_not(qrs[0:-1]), qrs[1:]))[0] end_qrs = np.where(np.logical_and(qrs[0:-1], np.logical_not(qrs[1:])))[0] + + if len(beg_qrs) == 0: + return [] # Throw out QRS-ends that precede first QRS-start. end_qrs = end_qrs[end_qrs > beg_qrs[0]] From 847bb1994be5402497c067a67d60beac98487feb Mon Sep 17 00:00:00 2001 From: Andrew Barros Date: Fri, 12 Jul 2024 09:18:34 -0400 Subject: [PATCH 08/49] add tests for all peak detection methods handling empty input --- tests/tests_ecg_findpeaks.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tests/tests_ecg_findpeaks.py b/tests/tests_ecg_findpeaks.py index c2ca3f4a1c..e6169ccd0b 100644 --- a/tests/tests_ecg_findpeaks.py +++ b/tests/tests_ecg_findpeaks.py @@ -13,6 +13,7 @@ _ecg_findpeaks_MWA, _ecg_findpeaks_peakdetect, _ecg_findpeaks_hamilton, + _ecg_findpeaks_findmethod, ) @@ -23,6 +24,23 @@ def _read_csv_column(csv_name, column): csv_data = pd.read_csv(csv_path, header=None) return csv_data[column].to_numpy() +def test_ecg_findpeaks_all_methods_handle_empty_input(): + METHODS = ["neurokit", "pantompkins", "nabian", "gamboa", + "slopesumfunction", "wqrs", "hamilton", "christov", + "engzee", "manikandan", "elgendi", "kalidas", + "martinez", "rodrigues", "vgraph"] + + failed_methods = [] + for method in METHODS: + try: + method_func = _ecg_findpeaks_findmethod(method) + _ = method_func(np.zeros(12*240), sampling_rate=240) + except Exception: + failed_methods.append(method) + continue + + np.testing.assert_equal(failed_methods, []) + def test_ecg_findpeaks_MWA(): np.testing.assert_array_equal( From 8738072ecc33a8aaa1bcfdf5a25e9ffb992e8154 Mon Sep 17 00:00:00 2001 From: Andrew Barros Date: Fri, 12 Jul 2024 09:40:58 -0400 Subject: [PATCH 09/49] fix empty input errors in other findpeak methods --- neurokit2/ecg/ecg_findpeaks.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/neurokit2/ecg/ecg_findpeaks.py b/neurokit2/ecg/ecg_findpeaks.py index b1dfd65c19..df3af656c6 100644 --- a/neurokit2/ecg/ecg_findpeaks.py +++ b/neurokit2/ecg/ecg_findpeaks.py @@ -276,7 +276,8 @@ def _ecg_findpeaks_neurokit( end_qrs = np.where(np.logical_and(qrs[0:-1], np.logical_not(qrs[1:])))[0] if len(beg_qrs) == 0: - return [] + return np.array([]) + # Throw out QRS-ends that precede first QRS-start. end_qrs = end_qrs[end_qrs > beg_qrs[0]] @@ -503,6 +504,14 @@ def _ecg_findpeaks_zong(signal, sampling_rate=1000, cutoff=16, window=0.13, **kw ret = np.pad(clt, (window_size - 1, 0), "constant", constant_values=(0, 0)) ret = np.convolve(ret, np.ones(window_size), "valid") + # Check that ret is at least as large as the window + if len(ret) < window_size: + warn( + f"The signal must be at least {window_size} samples long for peak detection with the Zong method. ", + category=NeuroKitWarning, + ) + return np.array([]) + for i in range(1, window_size): ret[i - 1] = ret[i - 1] / i ret[window_size - 1 :] = ret[window_size - 1 :] / window_size @@ -638,7 +647,8 @@ def _ecg_findpeaks_christov(signal, sampling_rate=1000, **kwargs): if len(RR) > 5: RR.pop(0) Rm = int(np.mean(RR)) - + if len(QRS) == 0: + return np.array([]) QRS.pop(0) QRS = np.array(QRS, dtype="int") return QRS @@ -919,6 +929,9 @@ def _ecg_findpeaks_engzee(signal, sampling_rate=1000, **kwargs): thi = False thf = False + if len(r_peaks) == 0: + return np.array([]) + r_peaks.pop( 0 ) # removing the 1st detection as it 1st needs the QRS complex amplitude for the threshold @@ -959,6 +972,12 @@ def running_mean(x, N): # Eq. 1: First-order differencing difference dn = np.append(filtered[1:], 0) - filtered + + # If the signal is flat then return an empty array rather than error out + # with a divide by zero error. + if np.max(abs(dn)) == 0: + return np.array([]) + # Eq. 2 dtn = dn / (np.max(abs(dn))) From 3cce0cf80528fce4c253a3cbef8c38b87bbd12b4 Mon Sep 17 00:00:00 2001 From: Andrew Barros Date: Fri, 12 Jul 2024 10:49:06 -0400 Subject: [PATCH 10/49] add better error message to the test, remove vgraph from testing set --- tests/tests_ecg_findpeaks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/tests_ecg_findpeaks.py b/tests/tests_ecg_findpeaks.py index e6169ccd0b..8d7a5aa1ec 100644 --- a/tests/tests_ecg_findpeaks.py +++ b/tests/tests_ecg_findpeaks.py @@ -28,7 +28,7 @@ def test_ecg_findpeaks_all_methods_handle_empty_input(): METHODS = ["neurokit", "pantompkins", "nabian", "gamboa", "slopesumfunction", "wqrs", "hamilton", "christov", "engzee", "manikandan", "elgendi", "kalidas", - "martinez", "rodrigues", "vgraph"] + "martinez", "rodrigues"] failed_methods = [] for method in METHODS: @@ -38,8 +38,8 @@ def test_ecg_findpeaks_all_methods_handle_empty_input(): except Exception: failed_methods.append(method) continue - - np.testing.assert_equal(failed_methods, []) + if failed_methods: + raise Exception(f"Failed methods: {failed_methods}") def test_ecg_findpeaks_MWA(): From f0c35dcf7b1ef49ae01839ad477410b39e4fab90 Mon Sep 17 00:00:00 2001 From: Andrew Barros Date: Fri, 12 Jul 2024 12:37:02 -0400 Subject: [PATCH 11/49] Refactor the test to use parameterized testing. --- tests/tests_ecg_findpeaks.py | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/tests/tests_ecg_findpeaks.py b/tests/tests_ecg_findpeaks.py index 8d7a5aa1ec..f80ee0c2be 100644 --- a/tests/tests_ecg_findpeaks.py +++ b/tests/tests_ecg_findpeaks.py @@ -3,6 +3,7 @@ import numpy as np import pandas as pd +import pytest # Trick to directly access internal functions for unit testing. # @@ -24,22 +25,16 @@ def _read_csv_column(csv_name, column): csv_data = pd.read_csv(csv_path, header=None) return csv_data[column].to_numpy() -def test_ecg_findpeaks_all_methods_handle_empty_input(): - METHODS = ["neurokit", "pantompkins", "nabian", "gamboa", +#vgraph is not included because it currently causes CI to fail (issue 1007) +@pytest.mark.parametrize("method",["neurokit", "pantompkins", "nabian", "gamboa", "slopesumfunction", "wqrs", "hamilton", "christov", "engzee", "manikandan", "elgendi", "kalidas", - "martinez", "rodrigues"] - - failed_methods = [] - for method in METHODS: - try: - method_func = _ecg_findpeaks_findmethod(method) - _ = method_func(np.zeros(12*240), sampling_rate=240) - except Exception: - failed_methods.append(method) - continue - if failed_methods: - raise Exception(f"Failed methods: {failed_methods}") + "martinez", "rodrigues",]) +def test_ecg_findpeaks_all_methods_handle_empty_input(method): + method_func = _ecg_findpeaks_findmethod(method) + # The test here is implicit: no exceptions means that it passed, + # even if the output is nonsense. + _ = method_func(np.zeros(12*240), sampling_rate=240) def test_ecg_findpeaks_MWA(): From 3dc069bc8353fa195510a3ff187604b99dbfa663 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sat, 13 Jul 2024 19:49:28 +0200 Subject: [PATCH 12/49] Fix Documentation Dependency Issue - Added pickleshare as dependency in the docs-build workflow --- .github/workflows/docs-build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/docs-build.yml b/.github/workflows/docs-build.yml index fd1dcb74ef..ac71ba73f8 100644 --- a/.github/workflows/docs-build.yml +++ b/.github/workflows/docs-build.yml @@ -44,6 +44,7 @@ jobs: pip install EMD-signal pip install cvxopt pip install ts2vg + pip install pickleshare pip install https://github.com/neuropsychology/neurokit/zipball/dev - name: Build documentation 📜 From ad6e6620cbc5822aad30953f4edaca5e254ad37a Mon Sep 17 00:00:00 2001 From: Andrew Barros Date: Sat, 13 Jul 2024 20:45:47 -0500 Subject: [PATCH 13/49] now that 1007 is closed, add vgraph to the test suite (which now fails) --- tests/tests_ecg_findpeaks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/tests_ecg_findpeaks.py b/tests/tests_ecg_findpeaks.py index f80ee0c2be..7049fabbcc 100644 --- a/tests/tests_ecg_findpeaks.py +++ b/tests/tests_ecg_findpeaks.py @@ -25,11 +25,11 @@ def _read_csv_column(csv_name, column): csv_data = pd.read_csv(csv_path, header=None) return csv_data[column].to_numpy() -#vgraph is not included because it currently causes CI to fail (issue 1007) + @pytest.mark.parametrize("method",["neurokit", "pantompkins", "nabian", "gamboa", "slopesumfunction", "wqrs", "hamilton", "christov", "engzee", "manikandan", "elgendi", "kalidas", - "martinez", "rodrigues",]) + "martinez", "rodrigues", "vgraph"]) def test_ecg_findpeaks_all_methods_handle_empty_input(method): method_func = _ecg_findpeaks_findmethod(method) # The test here is implicit: no exceptions means that it passed, From d38318feb331d29b2577162971e5453020f26133 Mon Sep 17 00:00:00 2001 From: Andrew Barros Date: Sat, 13 Jul 2024 20:48:35 -0500 Subject: [PATCH 14/49] add flat input checking to _ecg_findpeaks_visiblitygraph --- neurokit2/ecg/ecg_findpeaks.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/neurokit2/ecg/ecg_findpeaks.py b/neurokit2/ecg/ecg_findpeaks.py index df3af656c6..9a0695eac8 100644 --- a/neurokit2/ecg/ecg_findpeaks.py +++ b/neurokit2/ecg/ecg_findpeaks.py @@ -1245,6 +1245,10 @@ def _ecg_findpeaks_visibilitygraph( weights = np.zeros(N) # Empty array to store the weights BETA = 0.55 # Target number of nonzero elements in the resulting weight vector + # if the input signal is flat, return an empty array, otherwise the visiblity graph will fail + if np.max(signal) == np.min(signal): + return np.array([]) + # If input length is smaller than window, compute only one segment of this length if N < M: M, R = N, N From c7070888d30365b4fb0085f0bc162029ff19b83e Mon Sep 17 00:00:00 2001 From: danibene <34680344+danibene@users.noreply.github.com> Date: Fri, 19 Jul 2024 16:05:31 -0400 Subject: [PATCH 15/49] revise pNN50 and 20 definitions, with references --- neurokit2/hrv/hrv_time.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/neurokit2/hrv/hrv_time.py b/neurokit2/hrv/hrv_time.py index aa977dbbcc..2ec3158b94 100644 --- a/neurokit2/hrv/hrv_time.py +++ b/neurokit2/hrv/hrv_time.py @@ -74,10 +74,10 @@ def hrv_time(peaks, sampling_rate=1000, show=False, **kwargs): Frequency (LF/HF) Ratio (Sollers et al., 2007). * **Prc20NN**: The 20th percentile of the RR intervals (Han, 2017; Hovsepian, 2015). * **Prc80NN**: The 80th percentile of the RR intervals (Han, 2017; Hovsepian, 2015). - * **pNN50**: The proportion of RR intervals greater than 50ms, out of the total number of - RR intervals. - * **pNN20**: The proportion of RR intervals greater than 20ms, out of the total number of - RR intervals. + * **pNN50**: The percentage of absolute differences in successive RR intervals greater than + 50 ms (Bigger et al., 1988; Mietus et al., 2002). + * **pNN20**: The percentage of absolute differences in successive RR intervals greater than + 20 ms (Mietus et al., 2002). * **MinNN**: The minimum of the RR intervals (Parent, 2019; Subramaniam, 2022). * **MaxNN**: The maximum of the RR intervals (Parent, 2019; Subramaniam, 2022). * **TINN**: A geometrical parameter of the HRV, or more specifically, the baseline width of @@ -111,6 +111,9 @@ def hrv_time(peaks, sampling_rate=1000, show=False, **kwargs): References ---------- + * Bigger Jr, J. T., Kleiger, R. E., Fleiss, J. L., Rolnitzky, L. M., Steinman, R. C., & Miller, + J. P. (1988). Components of heart rate variability measured during healing of acute myocardial + infarction. The American journal of cardiology, 61(4), 208-215. * Pham, T., Lau, Z. J., Chen, S. H. A., & Makowski, D. (2021). Heart Rate Variability in Psychology: A Review of HRV Indices and an Analysis Tutorial. Sensors, 21(12), 3998. https://doi.org/10.3390/s21123998 @@ -123,6 +126,8 @@ def hrv_time(peaks, sampling_rate=1000, show=False, **kwargs): towards a gold standard for continuous stress assessment in the mobile environment. In Proceedings of the 2015 ACM international joint conference on pervasive and ubiquitous computing (pp. 493-504). + * Mietus, J. E., Peng, C. K., Henry, I., Goldsmith, R. L., & Goldberger, A. L. (2002). The pNNx + files: re-examining a widely used heart rate variability measure. Heart, 88(4), 378-380. * Parent, M., Tiwari, A., Albuquerque, I., Gagnon, J. F., Lafond, D., Tremblay, S., & Falk, T. H. (2019). A multimodal approach to improve the robustness of physiological stress prediction during physical activity. In 2019 IEEE International Conference on Systems, Man and From ed728a82d686eb5315f70612b00e76b7e8c4c9b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 21 Jul 2024 16:07:50 +0200 Subject: [PATCH 16/49] conf.py: remove redundant options --- docs/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index eedfdb48fe..635cf94de6 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -131,7 +131,7 @@ def find_version(): "use_issues_button": True, "path_to_docs": "docs/", "use_edit_page_button": True, - "logo_only": True, + # "logo_only": True, "show_toc_level": 1, "navigation_with_keys": False, } @@ -140,4 +140,4 @@ def find_version(): # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] +# html_static_path = ["_static"] From dbcb0cfaff49528207498e2e4981ad61b430c2ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 21 Jul 2024 16:18:22 +0200 Subject: [PATCH 17/49] make.bat: Remove duplicate = sign in set function --- docs/make.bat | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/make.bat b/docs/make.bat index 8ac7e9f77c..56b87e4910 100644 --- a/docs/make.bat +++ b/docs/make.bat @@ -7,7 +7,7 @@ REM Command file for Sphinx documentation @REM SPHINXBUILD="D:\Python3\Scripts\sphinx-build.exe" if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD==python -m sphinx + set SPHINXBUILD=python -m sphinx ) set SOURCEDIR="." set BUILDDIR="_build" @@ -34,4 +34,4 @@ goto end %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end -popd \ No newline at end of file +popd From e58bb655594669f08f1825eed66b580ab97404e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 21 Jul 2024 16:19:22 +0200 Subject: [PATCH 18/49] eeg_microstates.ipynb: replaced doi with actual link --- docs/examples/eeg_microstates/eeg_microstates.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/examples/eeg_microstates/eeg_microstates.ipynb b/docs/examples/eeg_microstates/eeg_microstates.ipynb index 004e6699f9..3140724f3b 100644 --- a/docs/examples/eeg_microstates/eeg_microstates.ipynb +++ b/docs/examples/eeg_microstates/eeg_microstates.ipynb @@ -589,7 +589,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Several different clustering algorithms can be used to segment your EEG recordings into microstates. These algorithms mainly differ in how they define cluster membership and the cost functionals to be optimized ([Xu & Tian, 2015](10.1007/s40745-015-0040-1)). The method to use hence depends on your data and the underlying assumptions of the methods (e.g., some methods ignore polarity). There is no one true method that gives the best results but you can refer to [Poulsen et al., 2018](https://www.researchgate.net/publication/331367421_Microstate_EEGlab_toolbox_An_introductory_guide#pf6) if you would like a more detailed review of the different clustering methods." + "Several different clustering algorithms can be used to segment your EEG recordings into microstates. These algorithms mainly differ in how they define cluster membership and the cost functionals to be optimized ([Xu & Tian, 2015](https://doi.org/10.1007/s40745-015-0040-1)). The method to use hence depends on your data and the underlying assumptions of the methods (e.g., some methods ignore polarity). There is no one true method that gives the best results but you can refer to [Poulsen et al., 2018](https://www.researchgate.net/publication/331367421_Microstate_EEGlab_toolbox_An_introductory_guide#pf6) if you would like a more detailed review of the different clustering methods." ] }, { From 62c9cf67188b46b0b8bbaf18dcf2958703c02800 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 21 Jul 2024 16:20:41 +0200 Subject: [PATCH 19/49] studies: added unique headers to create more informative links - Duplicate headers were always replaced by id1, id2 etc. - Unique names give more semantic meanings when linking directly to a heading --- studies/complexity_eeg/README.md | 4 +- studies/ecg_benchmark/README.md | 148 ++++++++++++++-------------- studies/eog_blinktemplate/README.md | 14 +-- 3 files changed, 83 insertions(+), 83 deletions(-) diff --git a/studies/complexity_eeg/README.md b/studies/complexity_eeg/README.md index 91bd2749de..b4f2a2de3a 100644 --- a/studies/complexity_eeg/README.md +++ b/studies/complexity_eeg/README.md @@ -71,7 +71,7 @@ data_delay <- read.csv("data_delay.csv") |> # facet_wrap(~Metric, scales = "free_y") ``` -#### Per Channel +#### Per Channel Delay Optimization ``` r delay_perchannel <- function(data_delay, dataset="Lemon") { @@ -247,7 +247,7 @@ data_dim <- read.csv("data_dimension.csv") |> Area = fct_relevel(Area, c("F", "C", "T", "P", "O"))) ``` -#### Per Channel +#### Per Channel Dimension Optimization ``` r dim_perchannel <- function(data_dim, dataset="Lemon") { diff --git a/studies/ecg_benchmark/README.md b/studies/ecg_benchmark/README.md index 05111f9456..1589093a1a 100644 --- a/studies/ecg_benchmark/README.md +++ b/studies/ecg_benchmark/README.md @@ -93,9 +93,9 @@ rpeaks = [pd.read_csv("../../data/gudb/Rpeaks.csv"), ## Study 1: Comparing Different R-Peaks Detection Algorithms -### Procedure +### Algorithm Comparison Procedure -#### Setup Functions +#### Algorithm Comparison Setup Functions ``` python import neurokit2 as nk @@ -107,19 +107,19 @@ def neurokit(ecg, sampling_rate): def pantompkins1985(ecg, sampling_rate): signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="pantompkins1985") return info["ECG_R_Peaks"] - + def hamilton2002(ecg, sampling_rate): signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="hamilton2002") return info["ECG_R_Peaks"] - + def martinez2003(ecg, sampling_rate): signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="martinez2003") return info["ECG_R_Peaks"] - + def christov2004(ecg, sampling_rate): signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="christov2004") return info["ECG_R_Peaks"] - + def gamboa2008(ecg, sampling_rate): signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="gamboa2008") return info["ECG_R_Peaks"] @@ -127,27 +127,27 @@ def gamboa2008(ecg, sampling_rate): def elgendi2010(ecg, sampling_rate): signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="elgendi2010") return info["ECG_R_Peaks"] - + def engzeemod2012(ecg, sampling_rate): signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="engzeemod2012") return info["ECG_R_Peaks"] - + def kalidas2017(ecg, sampling_rate): signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="kalidas2017") return info["ECG_R_Peaks"] - + def rodrigues2020(ecg, sampling_rate): signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="rodrigues2020") return info["ECG_R_Peaks"] ``` -#### Run the Benchmarking +#### Run the Algorithm Comparison Benchmarks *Note: This takes a long time (several hours).* ``` python results = [] -for method in [neurokit, pantompkins1985, hamilton2002, martinez2003, christov2004, +for method in [neurokit, pantompkins1985, hamilton2002, martinez2003, christov2004, gamboa2008, elgendi2010, engzeemod2012, kalidas2017, rodrigues2020]: for i in range(len(rpeaks)): data_ecg = pd.read_csv(ecgs[i]) @@ -159,33 +159,33 @@ results = pd.concat(results).reset_index(drop=True) results.to_csv("data_detectors.csv", index=False) ``` -### Results +### Algorithm Comparison Results ``` r library(tidyverse) library(easystats) library(lme4) -data <- read.csv("data_detectors.csv", stringsAsFactors = FALSE) %>% +data <- read.csv("data_detectors.csv", stringsAsFactors = FALSE) %>% mutate(Method = fct_relevel(Method, "neurokit", "pantompkins1985", "hamilton2002", "martinez2003", "christov2004", "gamboa2008", "elgendi2010", "engzeemod2012", "kalidas2017", "rodrigues2020")) -colors <- c("neurokit"="#E91E63", "pantompkins1985"="#f44336", "hamilton2002"="#FF5722", "martinez2003"="#FF9800", "christov2004"="#FFC107", "gamboa2008"="#4CAF50", "elgendi2010"="#009688", "engzeemod2012"="#2196F3", "kalidas2017"="#3F51B5", "rodrigues2020"="#9C27B0") +colors <- c("neurokit"="#E91E63", "pantompkins1985"="#f44336", "hamilton2002"="#FF5722", "martinez2003"="#FF9800", "christov2004"="#FFC107", "gamboa2008"="#4CAF50", "elgendi2010"="#009688", "engzeemod2012"="#2196F3", "kalidas2017"="#3F51B5", "rodrigues2020"="#9C27B0") ``` #### Errors and bugs ``` r -data %>% +data %>% mutate(Error = case_when( Error == "index -1 is out of bounds for axis 0 with size 0" ~ "index -1 out of bounds", Error == "index 0 is out of bounds for axis 0 with size 0" ~ "index 0 out of bounds", - TRUE ~ Error)) %>% - group_by(Database, Method) %>% - mutate(n = n()) %>% - group_by(Database, Method, Error) %>% - summarise(Percentage = n() / unique(n)) %>% - ungroup() %>% - mutate(Error = fct_relevel(Error, "None")) %>% + TRUE ~ Error)) %>% + group_by(Database, Method) %>% + mutate(n = n()) %>% + group_by(Database, Method, Error) %>% + summarise(Percentage = n() / unique(n)) %>% + ungroup() %>% + mutate(Error = fct_relevel(Error, "None")) %>% ggplot(aes(x=Error, y=Percentage, fill=Method)) + geom_bar(stat="identity", position = position_dodge2(preserve = "single")) + facet_wrap(~Database, nrow=5) + @@ -208,14 +208,14 @@ data <- filter(data, !is.na(Score)) #### Computation Time -##### Descriptive Statistics +##### Duration Descriptive Statistics ``` r # Normalize duration -data <- data %>% - mutate(Duration = (Duration) / (Recording_Length * Sampling_Rate)) +data <- data %>% + mutate(Duration = (Duration) / (Recording_Length * Sampling_Rate)) -data %>% +data %>% ggplot(aes(x=Method, y=Duration, fill=Method)) + geom_jitter2(aes(color=Method, group=Database), size=3, alpha=0.2, position=position_jitterdodge()) + geom_boxplot(aes(alpha=Database), outlier.alpha = 0) + @@ -244,7 +244,7 @@ data %>% -##### Statistical Modelling +##### Duration Statistical Modelling ``` r model <- lmer(Duration ~ Method + (1|Database) + (1|Participant), data=data) @@ -253,7 +253,7 @@ means <- modelbased::estimate_means(model) arrange(means, Mean) ## Estimated Marginal Means -## +## ## Method | Mean | SE | 95% CI ## ---------------------------------------------------- ## gamboa2008 | 2.90e-05 | 1.18e-05 | [0.00, 0.00] @@ -266,10 +266,10 @@ arrange(means, Mean) ## pantompkins1985 | 5.64e-04 | 1.17e-05 | [0.00, 0.00] ## elgendi2010 | 9.80e-04 | 1.18e-05 | [0.00, 0.00] ## christov2004 | 1.25e-03 | 1.17e-05 | [0.00, 0.00] -## +## ## Marginal means estimated at Method -means %>% +means %>% ggplot(aes(x=Method, y=Mean, color=Method)) + geom_line(aes(group=1), size=1) + geom_pointrange(aes(ymin=CI_low, ymax=CI_high), size=1) + @@ -293,14 +293,14 @@ substantially slower. original “true” R-peaks location. As such, the closest to zero, the better the accuracy. -##### Descriptive Statistics +##### Performance Descriptive Statistics ``` r -data <- data %>% - mutate(Outlier = performance::check_outliers(Score, threshold = list(zscore = stats::qnorm(p = 1 - 0.000001)))) %>% +data <- data %>% + mutate(Outlier = performance::check_outliers(Score, threshold = list(zscore = stats::qnorm(p = 1 - 0.000001)))) %>% filter(Outlier == 0) -data %>% +data %>% ggplot(aes(x=Database, y=Score)) + geom_boxplot(aes(fill=Method), outlier.alpha = 0, alpha=1) + geom_jitter2(aes(color=Method, group=Method), size=3, alpha=0.2, position=position_jitterdodge()) + @@ -310,12 +310,12 @@ data %>% scale_color_manual(values=colors) + scale_fill_manual(values=colors) + scale_y_sqrt() + - ylab("Amount of Error") + ylab("Amount of Error") ``` ![](../../studies/ecg_benchmark/figures/unnamed-chunk-10-1.png) -##### Statistical Modelling +##### Performance Statistical Modelling ``` r model <- lmer(Score ~ Method + (1|Database) + (1|Participant), data=data) @@ -324,7 +324,7 @@ means <- modelbased::estimate_means(model) arrange(means, abs(Mean)) ## Estimated Marginal Means -## +## ## Method | Mean | SE | 95% CI ## ------------------------------------------------ ## neurokit | 0.01 | 4.89e-03 | [0.00, 0.02] @@ -337,10 +337,10 @@ arrange(means, abs(Mean)) ## hamilton2002 | 0.08 | 5.18e-03 | [0.07, 0.09] ## elgendi2010 | 0.09 | 5.13e-03 | [0.08, 0.10] ## gamboa2008 | 0.22 | 8.02e-03 | [0.20, 0.24] -## +## ## Marginal means estimated at Method -means %>% +means %>% ggplot(aes(x=Method, y=Mean, color=Method)) + geom_line(aes(group=1), size=1) + geom_pointrange(aes(ymin=CI_low, ymax=CI_high), size=1) + @@ -348,7 +348,7 @@ means %>% theme_modern() + theme(axis.text.x = element_text(angle = 45, hjust = 1)) + scale_color_manual(values=colors) + - ylab("Amount of Error") + ylab("Amount of Error") ``` ![](../../studies/ecg_benchmark/figures/unnamed-chunk-11-1.png) @@ -362,7 +362,7 @@ Discrepancies could be due to the differences in data and analysis, as here we used more databases and modelled them by respecting their hierarchical structure using mixed models. -### Conclusion +### Algorithm Comparison Conclusion Based on the accuracy / execution time criterion, it seems like `neurokit` is the best R-peak detection method, followed by @@ -370,9 +370,9 @@ Based on the accuracy / execution time criterion, it seems like ## Study 2: Normalization -### Procedure +### Normalization Procedure -#### Setup Functions +#### Normalization Setup Functions ``` python import neurokit2 as nk @@ -385,14 +385,14 @@ def mean_detrend(ecg, sampling_rate): ecg = nk.signal_detrend(ecg, order=0) signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="neurokit") return info["ECG_R_Peaks"] - + def standardize(ecg, sampling_rate): ecg = nk.standardize(ecg) signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="neurokit") return info["ECG_R_Peaks"] ``` -#### Run the Benchmarking +#### Run the Normalization Benchmarks *Note: This takes a long time (several hours).* @@ -409,33 +409,33 @@ results = pd.concat(results).reset_index(drop=True) results.to_csv("data_normalization.csv", index=False) ``` -### Results +### Normalization Results ``` r library(tidyverse) library(easystats) library(lme4) -data <- read.csv("data_normalization.csv", stringsAsFactors = FALSE) %>% +data <- read.csv("data_normalization.csv", stringsAsFactors = FALSE) %>% mutate(Database = ifelse(str_detect(Database, "GUDB"), paste0(str_replace(Database, "GUDB_", "GUDB ("), ")"), Database), Method = fct_relevel(Method, "none", "mean_removal", "standardization"), - Participant = paste0(Database, Participant)) %>% - filter(Error == "None") %>% + Participant = paste0(Database, Participant)) %>% + filter(Error == "None") %>% filter(!is.na(Score)) -colors <- c("none"="#607D8B", "mean_removal"="#673AB7", "standardization"="#00BCD4") +colors <- c("none"="#607D8B", "mean_removal"="#673AB7", "standardization"="#00BCD4") ``` -#### Accuracy +#### Normalized Accuracy -##### Descriptive Statistics +##### Normalized Performance Descriptive Statistics ``` r -data <- data %>% - mutate(Outlier = performance::check_outliers(Score, threshold = list(zscore = stats::qnorm(p = 1 - 0.000001)))) %>% +data <- data %>% + mutate(Outlier = performance::check_outliers(Score, threshold = list(zscore = stats::qnorm(p = 1 - 0.000001)))) %>% filter(Outlier == 0) -data %>% +data %>% ggplot(aes(x=Database, y=Score)) + geom_boxplot(aes(fill=Method), outlier.alpha = 0, alpha=1) + geom_jitter2(aes(color=Method, group=Method), size=3, alpha=0.2, position=position_jitterdodge()) + @@ -445,62 +445,62 @@ data %>% scale_color_manual(values=colors) + scale_fill_manual(values=colors) + scale_y_sqrt() + - ylab("Amount of Error") + ylab("Amount of Error") ``` ![](../../studies/ecg_benchmark/figures/unnamed-chunk-15-1.png) -##### Statistical Modelling +##### Normalized Performance Statistical Modelling ``` r model <- lmer(Score ~ Method + (1|Database) + (1|Participant), data=data) -modelbased::estimate_contrasts(model) +modelbased::estimate_contrasts(model) ## Marginal Contrasts Analysis -## +## ## Level1 | Level2 | Difference | 95% CI | SE | t(553.00) | p ## ------------------------------------------------------------------------------------------ ## mean_removal | standardization | -1.01e-07 | [ 0.00, 0.00] | 1.29e-07 | -0.78 | 0.716 ## none | mean_removal | -8.72e-08 | [ 0.00, 0.00] | 1.29e-07 | -0.68 | 0.777 ## none | standardization | -1.88e-07 | [ 0.00, 0.00] | 1.28e-07 | -1.47 | 0.308 -## +## ## Marginal contrasts estimated at Method ## p-value adjustment method: Holm (1979) means <- modelbased::estimate_means(model) arrange(means, abs(Mean)) ## Estimated Marginal Means -## +## ## Method | Mean | SE | 95% CI ## ---------------------------------------------------- ## none | 5.23e-03 | 5.14e-04 | [0.00, 0.01] ## mean_removal | 5.23e-03 | 5.14e-04 | [0.00, 0.01] ## standardization | 5.23e-03 | 5.14e-04 | [0.00, 0.01] -## +## ## Marginal means estimated at Method -means %>% +means %>% ggplot(aes(x=Method, y=Mean, color=Method)) + geom_line(aes(group=1), size=1) + geom_pointrange(aes(ymin=CI_low, ymax=CI_high), size=1) + theme_modern() + theme(axis.text.x = element_text(angle = 45, hjust = 1)) + scale_color_manual(values=colors) + - ylab("Amount of Error") + ylab("Amount of Error") ``` ![](../../studies/ecg_benchmark/figures/unnamed-chunk-16-1.png) -### Conclusion +### Normalization Conclusion No significant benefits added by normalization for the `neurokit` method. ## Study 3: Low Frequency Trends Removal -### Procedure +### Trend Removal Procedure -#### Setup Functions +#### Trend Removal Setup Functions ``` python import neurokit2 as nk @@ -515,29 +515,29 @@ def polylength(ecg, sampling_rate): ecg = nk.signal_detrend(ecg, method="polynomial", order=int(length / 2)) signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="neurokit") return info["ECG_R_Peaks"] - + def tarvainen(ecg, sampling_rate): ecg = nk.signal_detrend(ecg, method="tarvainen2002") signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="neurokit") return info["ECG_R_Peaks"] - + def locreg(ecg, sampling_rate): - ecg = nk.signal_detrend(ecg, - method="locreg", + ecg = nk.signal_detrend(ecg, + method="locreg", window=1/0.5, stepsize=0.02) signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="neurokit") return info["ECG_R_Peaks"] - + def rollingz(ecg, sampling_rate): ecg = nk.standardize(ecg, window=sampling_rate*2) signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="neurokit") return info["ECG_R_Peaks"] - + # Filtering-based ``` -#### Run the Benchmarking +#### Run the Trend Removal Benchmarks *Note: This takes a very long time (several hours).* diff --git a/studies/eog_blinktemplate/README.md b/studies/eog_blinktemplate/README.md index 15284b5c98..f3ddfc0ab2 100644 --- a/studies/eog_blinktemplate/README.md +++ b/studies/eog_blinktemplate/README.md @@ -33,7 +33,7 @@ the functions parameters on this cleaner subset of events. ## Study 1: Initial Estimation -### Methods +### Initial Estimation Methods #### Define Functions @@ -61,7 +61,7 @@ def fit_scr(x, time_peak, rise, decay1, decay2): ft = ft[0 : len(x)] y = ft / np.max(ft) return y - + # Starting parameters plt.plot(fit_gamma(np.arange(100), 3, 3, 0.5), linewidth=2, linestyle='-', color="#4CAF50", label='Gamma') plt.plot(fit_scr(np.arange(100), 3.5, 0.5, 1, 1), linewidth=2, linestyle='-', color="#9C27B0", label='SCR') @@ -121,7 +121,7 @@ for i in range(4): params_scr = pd.concat([params_scr, p_scr], axis=0) ``` -### Results +### Initial Estimation Results Visualize the optimal templates for one task. @@ -151,7 +151,7 @@ plt.clf() ## Study 2: Difference between Template and EOG Events -### Methods +### Template vs. EOG Event Methods ``` python data_rmse = pd.DataFrame(columns=["RMSE", "Index", "Participant", "Task", "Function"]) @@ -191,7 +191,7 @@ for i in range(4): data_rmse = pd.concat([data_rmse, rmse], axis=0) ``` -### Results +### Template vs. EOG Event Results ``` python p = data_rmse.pivot(index='Index', columns='Function', values='RMSE').plot.kde() @@ -205,7 +205,7 @@ plt.clf() ## Study 3: Optimize the Parameters -### Methods +### Parameter Optimization Methods ``` python optimal_gamma = np.nanmedian(params_gamma.iloc[:, [0, 1, 2]], axis=0) @@ -308,7 +308,7 @@ print(df.median(axis=0)) ## dtype: float64 ``` -### Results +### Parameter Optimization Results ``` python data = pd.read_csv("../../data/eogdb/eogdb_task3.csv") From 3f56b8b752ff10993faa574e8fd76c0072d202e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 21 Jul 2024 16:26:16 +0200 Subject: [PATCH 20/49] eda_intervalrelated: fix bulletpoints under return docstring --- neurokit2/eda/eda_intervalrelated.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/neurokit2/eda/eda_intervalrelated.py b/neurokit2/eda/eda_intervalrelated.py index 3e27101761..7cdebabd4a 100644 --- a/neurokit2/eda/eda_intervalrelated.py +++ b/neurokit2/eda/eda_intervalrelated.py @@ -17,6 +17,7 @@ def eda_intervalrelated(data, sampling_rate=1000, **kwargs): Parameters ---------- + data : Union[dict, pd.DataFrame] A DataFrame containing the different processed signal(s) as different columns, typically generated by :func:`eda_process` or :func:`bio_process`. Can also take a dict containing @@ -31,6 +32,7 @@ def eda_intervalrelated(data, sampling_rate=1000, **kwargs): DataFrame A dataframe containing the analyzed EDA features. The analyzed features consist of the following: + * ``"SCR_Peaks_N"``: the number of occurrences of Skin Conductance Response (SCR). * ``"SCR_Peaks_Amplitude_Mean"``: the mean amplitude of the SCR peak occurrences. * ``"EDA_Tonic_SD"``: the mean amplitude of the SCR peak occurrences. @@ -44,22 +46,22 @@ def eda_intervalrelated(data, sampling_rate=1000, **kwargs): .bio_process, eda_eventrelated Examples - ---------- + -------- .. ipython:: python - import neurokit2 as nk + import neurokit2 as nk - # Download data - data = nk.data("bio_resting_8min_100hz") + # Download data + data = nk.data("bio_resting_8min_100hz") - # Process the data - df, info = nk.eda_process(data["EDA"], sampling_rate=100) + # Process the data + df, info = nk.eda_process(data["EDA"], sampling_rate=100) - # Single dataframe is passed - nk.eda_intervalrelated(df, sampling_rate=100) + # Single dataframe is passed + nk.eda_intervalrelated(df, sampling_rate=100) - epochs = nk.epochs_create(df, events=[0, 25300], sampling_rate=100, epochs_end=20) - nk.eda_intervalrelated(epochs, sampling_rate=100) + epochs = nk.epochs_create(df, events=[0, 25300], sampling_rate=100, epochs_end=20) + nk.eda_intervalrelated(epochs, sampling_rate=100) """ From df1341ffda527e3a89329489ff2270db78ae0c45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 21 Jul 2024 16:27:09 +0200 Subject: [PATCH 21/49] mne_to_df: shortened output to a summary of the results - Previously this was printing the entire result which took up 80% of the eeg doc page --- neurokit2/eeg/mne_to_df.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/neurokit2/eeg/mne_to_df.py b/neurokit2/eeg/mne_to_df.py index a4ed1a6c08..40299bcaea 100644 --- a/neurokit2/eeg/mne_to_df.py +++ b/neurokit2/eeg/mne_to_df.py @@ -83,15 +83,26 @@ def mne_to_dict(eeg): # Raw objects eeg = nk.mne_data("filt-0-40_raw") - nk.mne_to_dict(eeg) + eeg_dict = nk.mne_to_dict(eeg) + + # Print function result summary + eeg_dict_view = {k: f"Signal with length: {len(v)}" for k, v in eeg_dict.items()} + eeg_dict_view + # Epochs objects eeg = nk.mne_data("epochs") - nk.mne_to_dict(eeg) + eeg_epoch_dict = nk.mne_to_dict(eeg) + + # Print function result summary + list(eeg_epoch_dict.items())[:2] # Evoked objects eeg = nk.mne_data("evoked") - nk.mne_to_dict(eeg) + eeg_evoked_dict = nk.mne_to_dict(eeg) + + # Print function result summary + eeg_evoked_dict """ return _mne_convert(eeg, to_what="dict") From 2cd2d23cdc9d310332c98c649f2d96e9bb0f7ced Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 21 Jul 2024 16:28:58 +0200 Subject: [PATCH 22/49] events_find: Add empty line at the start of ipython directive - Without this blank line, the directive does not execute --- neurokit2/events/events_find.py | 1 + 1 file changed, 1 insertion(+) diff --git a/neurokit2/events/events_find.py b/neurokit2/events/events_find.py index 9e5c207a5f..cb9ea0f185 100644 --- a/neurokit2/events/events_find.py +++ b/neurokit2/events/events_find.py @@ -137,6 +137,7 @@ def events_find( Convert the event condition results its human readable representation .. ipython:: python + value_to_condition = {1: "Stimulus 1", 2: "Stimulus 2", 3: "Stimulus 3"} events["condition"] = [value_to_condition[id] for id in events["condition"]] events From d90be5990552120683af5ab0f885656dc123cd18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 21 Jul 2024 16:29:59 +0200 Subject: [PATCH 23/49] progress_bar: fix execution of example - This directive did not execute due to a missing space in the directive call --- neurokit2/misc/progress_bar.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neurokit2/misc/progress_bar.py b/neurokit2/misc/progress_bar.py index 95da41900b..6cd9c89694 100644 --- a/neurokit2/misc/progress_bar.py +++ b/neurokit2/misc/progress_bar.py @@ -19,7 +19,7 @@ def progress_bar(it, prefix="", size=40, verbose=True): Examples -------- - ..ipython:: python + .. ipython:: python import neurokit2 as nk From 97437aea0ed4c6c23034d7ce0067c696a98f202a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 21 Jul 2024 16:31:11 +0200 Subject: [PATCH 24/49] rsp_process: fix bullet point list and indentation - Bullet point list lacked an empty line - One bullet point had the incorrect indentation --- neurokit2/rsp/rsp_process.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/neurokit2/rsp/rsp_process.py b/neurokit2/rsp/rsp_process.py index 61acd3ac58..9e32036fee 100644 --- a/neurokit2/rsp/rsp_process.py +++ b/neurokit2/rsp/rsp_process.py @@ -68,7 +68,8 @@ def rsp_process( * ``"RSP_Phase"``: breathing phase, marked by "1" for inspiration and "0" for expiration. * ``"RSP_Phase_Completion"``: breathing phase completion, expressed in percentage (from 0 to 1), representing the stage of the current respiratory phase. - * ``"RSP_RVT"``: respiratory volume per time (RVT). + * ``"RSP_RVT"``: respiratory volume per time (RVT). + info : dict A dictionary containing the samples at which inhalation peaks and exhalation troughs occur, accessible with the keys ``"RSP_Peaks"``, and ``"RSP_Troughs"`` respectively, as well as the From 5b667ee15409479465c288f116844548d66dc8a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 21 Jul 2024 19:28:50 +0200 Subject: [PATCH 25/49] signal_(interpolate|detrend): added missing spaces - interpolate: missing space before quadratic to make the highlighting work - interpolate: added more precise linking - detrend: entire docstring was misaligned by missing some empty lines --- neurokit2/signal/signal_detrend.py | 2 ++ neurokit2/signal/signal_interpolate.py | 7 ++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/neurokit2/signal/signal_detrend.py b/neurokit2/signal/signal_detrend.py index 14ac8f96fb..8c506c5581 100644 --- a/neurokit2/signal/signal_detrend.py +++ b/neurokit2/signal/signal_detrend.py @@ -18,10 +18,12 @@ def signal_detrend( sampling_rate=1000, ): """**Signal Detrending** + Apply a baseline (order = 0), linear (order = 1), or polynomial (order > 1) detrending to the signal (i.e., removing a general trend). One can also use other methods, such as smoothness priors approach described by Tarvainen (2002) or LOESS regression, but these scale badly for long signals. + Parameters ---------- signal : Union[list, np.array, pd.Series] diff --git a/neurokit2/signal/signal_interpolate.py b/neurokit2/signal/signal_interpolate.py index 8110cc2b6e..c44e8053c1 100644 --- a/neurokit2/signal/signal_interpolate.py +++ b/neurokit2/signal/signal_interpolate.py @@ -32,13 +32,13 @@ def signal_interpolate( method : str Method of interpolation. Can be ``"linear"``, ``"nearest"``, ``"zero"``, ``"slinear"``, ``"quadratic"``, ``"cubic"``, ``"previous"``, ``"next"``, ``"monotone_cubic"``, or ``"akima"``. - The methods ``"zero"``, ``"slinear"``,``"quadratic"`` and ``"cubic"`` refer to a spline + The methods ``"zero"``, ``"slinear"``, ``"quadratic"`` and ``"cubic"`` refer to a spline interpolation of zeroth, first, second or third order; whereas ``"previous"`` and ``"next"`` simply return the previous or next value of the point. An integer specifying the order of the spline interpolator to use. - See `here `_ for details on the ``"monotone_cubic"`` method. - See `here `_ for details on the ``"akima"`` method. fill_value : float or tuple or str If a ndarray (or float), this value will be used to fill in for @@ -87,6 +87,7 @@ def signal_interpolate( plt.scatter(x_values, signal, label="original datapoints", zorder=3) @suppress plt.close() + """ # Sanity checks if x_values is None: From 840fbe3e6dfbece20d829c312651b497d97eee39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 21 Jul 2024 19:34:42 +0200 Subject: [PATCH 26/49] docs-check workflow: added missing pickleshare dependency --- .github/workflows/docs-check.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/docs-check.yml b/.github/workflows/docs-check.yml index f479083e4a..79b6f5891f 100644 --- a/.github/workflows/docs-check.yml +++ b/.github/workflows/docs-check.yml @@ -38,6 +38,7 @@ jobs: pip install EMD-signal pip install cvxopt pip install ts2vg + pip install pickleshare pip install https://github.com/neuropsychology/neurokit/zipball/dev - name: Build documentation 📜 From a235631b65aa23278a7e792aa9030af86915abf4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 21 Jul 2024 22:20:30 +0200 Subject: [PATCH 27/49] added @DerAndereJohannes as a contributor --- AUTHORS.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS.rst b/AUTHORS.rst index 50cd43a3d4..31ebdffec2 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -52,6 +52,7 @@ Contributors * `Jannik Gut `_ * `Nattapong Thammasan `_ *(OnePlanet, Netherlands)* * `Marek Sokol `_ *(Faculty of Biomedical Engineering of the CTU in Prague, Czech Republic)* +* `Johannes Herforth `_ *(University of Luxembourg, Luxembourg)* Thanks also to `Chuan-Peng Hu `_, `@ucohen `_, `Anthony Gatti `_, `Julien Lamour `_, `@renatosc `_, `Nicolas Beaudoin-Gagnon `_ and `@rubinovitz `_ for their contribution in `NeuroKit 1 `_. From 530da0b113c701a781057b1419f46f1e14cac8b7 Mon Sep 17 00:00:00 2001 From: danibene <34680344+danibene@users.noreply.github.com> Date: Thu, 25 Jul 2024 16:30:44 -0400 Subject: [PATCH 28/49] use underscore in database name for annotations to be consistent with database name for ECG data --- data/gudb/download_gudb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/gudb/download_gudb.py b/data/gudb/download_gudb.py index cae0e02d5f..630e65f253 100644 --- a/data/gudb/download_gudb.py +++ b/data/gudb/download_gudb.py @@ -41,7 +41,7 @@ anno = pd.DataFrame({"Rpeaks": ecg_class.anno_cs}) anno["Participant"] = "GUDB_%.2i" %(participant) anno["Sampling_Rate"] = 250 - anno["Database"] = "GUDB (" + experiment + ")" + anno["Database"] = "GUDB_" + experiment # Store with the rest dfs_ecg.append(data) From 19c140650377a0b24cadbf09690178675fd57a93 Mon Sep 17 00:00:00 2001 From: danibene <34680344+danibene@users.noreply.github.com> Date: Thu, 25 Jul 2024 16:31:00 -0400 Subject: [PATCH 29/49] fix typo in comment --- data/gudb/download_gudb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/gudb/download_gudb.py b/data/gudb/download_gudb.py index 630e65f253..b56f8f1a08 100644 --- a/data/gudb/download_gudb.py +++ b/data/gudb/download_gudb.py @@ -28,7 +28,7 @@ # creating class which loads the experiment ecg_class = ecg_gudb_database.GUDb(participant, experiment) - # Chest Strap Data - only donwload if R-peaks annotations are available + # Chest Strap Data - only download if R-peaks annotations are available if ecg_class.anno_cs_exists: data = pd.DataFrame({"ECG": ecg_class.cs_V2_V1}) From 93a26cf64f1d57a355bf7c2f93fc4c128109e9fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sat, 27 Jul 2024 14:47:52 +0200 Subject: [PATCH 30/49] Added codebookadd directive to sphinx documentation --- docs/_static/neurokit_codebook.csv | 0 docs/conf.py | 5 +- docs/directives/csv_codebook_directive.py | 58 +++++++++++++++++++++++ 3 files changed, 61 insertions(+), 2 deletions(-) create mode 100644 docs/_static/neurokit_codebook.csv create mode 100644 docs/directives/csv_codebook_directive.py diff --git a/docs/_static/neurokit_codebook.csv b/docs/_static/neurokit_codebook.csv new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/conf.py b/docs/conf.py index 635cf94de6..1580ffe22b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -21,7 +21,7 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -# sys.path.insert(0, os.path.abspath('.')) +sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath("../")) @@ -69,6 +69,7 @@ def find_version(): "sphinxemoji.sphinxemoji", "sphinx_copybutton", "myst_nb", + "directives.csv_codebook_directive", ] # Add any paths that contain templates here, relative to this directory. @@ -140,4 +141,4 @@ def find_version(): # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ["_static"] +html_static_path = ["_static"] diff --git a/docs/directives/csv_codebook_directive.py b/docs/directives/csv_codebook_directive.py new file mode 100644 index 0000000000..fe6ef4d6de --- /dev/null +++ b/docs/directives/csv_codebook_directive.py @@ -0,0 +1,58 @@ +import csv +import os +from docutils import nodes +from docutils.parsers.rst import Directive + +class CSVDocDirective(Directive): + has_content = True + + def run(self): + # Codebook path + csv_file_path = os.path.join(os.path.abspath('.'), "_static", "neurokit_codebook.csv") + + # Check if the file exists and whether it is empty + file_empty = not os.path.exists(csv_file_path) or os.stat(csv_file_path).st_size == 0 + + # List to hold bullet list nodes + bullet_list = nodes.bullet_list() + + # Open the CSV file and append the content + with open(csv_file_path, 'a', newline='', encoding='utf-8') as csvfile: + writer = csv.writer(csvfile) + # writer = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL) + + # Write header if file is newly created or empty + if file_empty: + header = ['Field Name', 'Field Description'] + writer.writerow(header) + + # Iterate through rows: add them to the codebook and add them to the page + for line in self.content: + fields = line.split(',') + writer.writerow([field.strip() for field in fields]) + + if len(fields) >= 2: + paragraph = nodes.paragraph() + + # Create backtick formatting around the field name + field1 = nodes.literal('', '', nodes.Text(fields[0].strip())) + + # Add the remainder of the line + colon_space = nodes.Text(': ') + field2 = nodes.Text(fields[1].strip()) + + # Add all the parts to the paragraph + paragraph += field1 + paragraph += colon_space + paragraph += field2 + + # Add to the bullet point list + list_item = nodes.list_item() + list_item += paragraph + bullet_list += list_item + + return [bullet_list] + + +def setup(app): + app.add_directive("codebookadd", CSVDocDirective) From 834a95d884dfa4ae9331e5521f5870f92af90fb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sat, 27 Jul 2024 14:49:04 +0200 Subject: [PATCH 31/49] added initial codebook page --- docs/codebook.rst | 51 +++++++++++++++++++++++++++++++++++++++++++++++ docs/index.rst | 1 + 2 files changed, 52 insertions(+) create mode 100644 docs/codebook.rst diff --git a/docs/codebook.rst b/docs/codebook.rst new file mode 100644 index 0000000000..97589e3486 --- /dev/null +++ b/docs/codebook.rst @@ -0,0 +1,51 @@ +Codebook +======== + +Here you can download the complete codebook which details the structure of data used throughout this documentation. + +.. raw:: html + + + +This codebook contains detailed descriptions of all variables, their possible values, and additional metadata. + +.. raw:: html + + + +
+ +
+
+ + diff --git a/docs/index.rst b/docs/index.rst index 801447140a..3c9218c68b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -35,6 +35,7 @@ You can navigate to the different sections using the left panel. We recommend ch installation authors cite_us + codebook examples/index functions/index resources/index From 2298813ad9e4c1a760fb9b2d743b5e5535a55e22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sat, 27 Jul 2024 17:29:00 +0200 Subject: [PATCH 32/49] altered codebook.rst csv parsing - now handles quoted fields --- docs/codebook.rst | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/docs/codebook.rst b/docs/codebook.rst index 97589e3486..1ae5e8abec 100644 --- a/docs/codebook.rst +++ b/docs/codebook.rst @@ -33,19 +33,54 @@ This codebook contains detailed descriptions of all variables, their possible va
+ From 70e48cb174396beafe09bebf80507cc21e490649 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sat, 27 Jul 2024 19:04:23 +0200 Subject: [PATCH 33/49] Change the directive to use | as delimiter --- docs/directives/csv_codebook_directive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/directives/csv_codebook_directive.py b/docs/directives/csv_codebook_directive.py index fe6ef4d6de..a12e83727a 100644 --- a/docs/directives/csv_codebook_directive.py +++ b/docs/directives/csv_codebook_directive.py @@ -28,7 +28,7 @@ def run(self): # Iterate through rows: add them to the codebook and add them to the page for line in self.content: - fields = line.split(',') + fields = line.split('|') writer.writerow([field.strip() for field in fields]) if len(fields) >= 2: From 685768fe49566ce86897b462f8d0163df3b99dcf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sat, 27 Jul 2024 23:18:07 +0200 Subject: [PATCH 34/49] updated the *_process files with the new codebook directive --- neurokit2/ecg/ecg_process.py | 43 ++++++++++++++++++------------------ neurokit2/eda/eda_process.py | 35 +++++++++-------------------- neurokit2/emg/emg_process.py | 14 ++++++------ neurokit2/eog/eog_process.py | 9 ++++---- neurokit2/ppg/ppg_process.py | 9 ++++---- neurokit2/rsp/rsp_process.py | 22 +++++++++--------- 6 files changed, 59 insertions(+), 73 deletions(-) diff --git a/neurokit2/ecg/ecg_process.py b/neurokit2/ecg/ecg_process.py index 39a6274a0a..23357a28e1 100644 --- a/neurokit2/ecg/ecg_process.py +++ b/neurokit2/ecg/ecg_process.py @@ -39,28 +39,25 @@ def ecg_process(ecg_signal, sampling_rate=1000, method="neurokit"): signals : DataFrame A DataFrame of the same length as the ``ecg_signal`` containing the following columns: - * ``"ECG_Raw"``: the raw signal. - * ``"ECG_Clean"``: the cleaned signal. - * ``"ECG_Rate"``: heart rate interpolated between R-peaks. - * ``"ECG_Quality"``: the quality of the cleaned signal - * ``"ECG_R_Peaks"``: the R-peaks marked as "1" in a list of zeros. - * ``"ECG_P_Peaks"``: the P-peaks marked as "1" in a list of zeros - * ``"ECG_P_Onsets"``: the P-onsets marked as "1" in a list of zeros. - * ``"ECG_P_Offsets"``: the P-offsets marked as "1" in a list of zeros. - * ``"ECG_Q_Peaks"``: the Q-peaks marked as "1" in a list of zeros . - * ``"ECG_R_Onsets"``: the R-onsets marked as "1" in a list of zeros. - * ``"ECG_R_Offsets"``: the R-offsets marked as "1" in a list of zeros. - * ``"ECG_S_Peaks"``: the S-peaks marked as "1" in a list of zeros. - * ``"ECG_T_Peaks"``: the T-peaks marked as "1" in a list of zeros. - * ``"ECG_T_Onsets"``: the T-onsets marked as "1" in a list of zeros. - * ``"ECG_T_Offsets"``: the T-offsets marked as "1" in a list of zeros. - * ``"ECG_Phase_Atrial"``: cardiac phase, marked by "1" for systole and "0" for diastole. - * ``"ECG_Phase_Completion_Atrial"``: cardiac phase (atrial) completion, expressed in - percentage (from 0 to 1), representing the stage of the current cardiac phase. - * ``"ECG_Phase_Ventricular"``: cardiac phase, marked by "1" for systole and "0" for - diastole. - * ``"ECG_Phase_Completion_Ventricular"``: cardiac phase (ventricular) completion, expressed - in percentage (from 0 to 1), representing the stage of the current cardiac phase. + .. codebookadd:: + ECG_Raw|The raw signal. + ECG_Clean|The cleaned signal. + ECG_Rate|Heart rate interpolated between R-peaks. + ECG_Quality|The quality of the cleaned signal. + ECG_R_Peaks|The R-peaks marked as "1" in a list of zeros. + ECG_R_Onsets|The R-onsets marked as "1" in a list of zeros. + ECG_R_Offsets|The R-offsets marked as "1" in a list of zeros. + ECG_P_Peaks|The P-peaks marked as "1" in a list of zeros. + ECG_P_Onsets|The P-onsets marked as "1" in a list of zeros. + ECG_P_Offsets|The P-offsets marked as "1" in a list of zeros. + ECG_Q_Peaks|The Q-peaks marked as "1" in a list of zeros. + ECG_S_Peaks|The S-peaks marked as "1" in a list of zeros. + ECG_T_Peaks|The T-peaks marked as "1" in a list of zeros. + ECG_T_Onsets|The T-onsets marked as "1" in a list of zeros. + ECG_T_Offsets|The T-offsets marked as "1" in a list of zeros. + ECG_Phase_Atrial|Cardiac phase, marked by "1" for systole and "0" for diastole. + ECG_Phase_Completion_Atrial|Cardiac phase (atrial) completion, expressed in percentage (from 0 to 1), representing the stage of the current cardiac phase. + ECG_Phase_Completion_Ventricular|Cardiac phase (ventricular) completion, expressed in percentage (from 0 to 1), representing the stage of the current cardiac phase. rpeaks : dict A dictionary containing the samples at which the R-peaks occur, accessible with the key @@ -88,6 +85,8 @@ def ecg_process(ecg_signal, sampling_rate=1000, method="neurokit"): @suppress plt.close() + + """ # Sanitize and clean input diff --git a/neurokit2/eda/eda_process.py b/neurokit2/eda/eda_process.py index 862dd5cd2d..b162eb2104 100644 --- a/neurokit2/eda/eda_process.py +++ b/neurokit2/eda/eda_process.py @@ -40,31 +40,18 @@ def eda_process( A DataFrame of same length as ``"eda_signal"`` containing the following columns: - * ``"EDA_Raw"``: the raw signal. + .. codebookadd:: + EDA_Raw|The raw signal. + EDA_Clean|The cleaned signal. + EDA_Tonic|The tonic component of the signal, or the Tonic Skin Conductance Level (SCL). + EDA_Phasic|The phasic component of the signal, or the Phasic Skin Conductance Response (SCR). + SCR_Onsets|The samples at which the onsets of the peaks occur, marked as "1" in a list of zeros. + SCR_Peaks|The samples at which the peaks occur, marked as "1" in a list of zeros. + SCR_Height|The SCR amplitude of the signal including the Tonic component. Note that cumulative effects of close-occurring SCRs might lead to an underestimation of the amplitude. + SCR_Amplitude|The SCR amplitude of the signal excluding the Tonic component. + SCR_RiseTime|The SCR amplitude of the signal excluding the Tonic component. + SCR_Recovery|The samples at which SCR peaks recover (decline) to half amplitude, marked as "1" in a list of zeros. - * ``"EDA_Clean"``: the cleaned signal. - - * ``"EDA_Tonic"``: the tonic component of the signal, or the Tonic Skin Conductance Level - (SCL). - - * ``"EDA_Phasic"``: the phasic component of the signal, or the Phasic Skin Conductance - Response (SCR). - - * ``"SCR_Onsets"``: the samples at which the onsets of the peaks occur, marked as "1" in a - list of zeros. - - * ``"SCR_Peaks"``: the samples at which the peaks occur, marked as "1" in a list of zeros. - - * ``"SCR_Height"``: the SCR amplitude of the signal including the Tonic component. Note that - cumulative effects of close-occurring SCRs might lead to an underestimation of the - amplitude. - - * ``"SCR_Amplitude"``: the SCR amplitude of the signal excluding the Tonic component. - - * ``"SCR_RiseTime"``: the time taken for SCR onset to reach peak amplitude within the SCR. - - * ``"SCR_Recovery"``: the samples at which SCR peaks recover (decline) to half amplitude, - marked as "1" in a list of zeros. info : dict A dictionary containing the information of each SCR peak (see :func:`eda_findpeaks`), as well as the signals' sampling rate. diff --git a/neurokit2/emg/emg_process.py b/neurokit2/emg/emg_process.py index 91229ae890..2f08de6f67 100644 --- a/neurokit2/emg/emg_process.py +++ b/neurokit2/emg/emg_process.py @@ -35,13 +35,13 @@ def emg_process(emg_signal, sampling_rate=1000, report=None, **kwargs): signals : DataFrame A DataFrame of same length as ``emg_signal`` containing the following columns: - * ``"EMG_Raw"``: the raw signal. - * ``"EMG_Clean"``: the cleaned signal. - * ``"EMG_Amplitude"``: the signal amplitude, or the activation level of the signal. - * ``"EMG_Activity"``: the activity of the signal for which amplitude exceeds the threshold - specified,marked as "1" in a list of zeros. - * ``"EMG_Onsets"``: the onsets of the amplitude, marked as "1" in a list of zeros. - * ``"EMG_Offsets"``: the offsets of the amplitude, marked as "1" in a list of zeros. + .. codebookadd:: + EMG_Raw|The raw EMG signal. + EMG_Clean|The cleaned EMG signal. + EMG_Amplitude|The signal amplitude, or the activation of the signal. + EMG_Activity|The activity of the signal for which amplitude exceeds the threshold specified,marked as "1" in a list of zeros. + EMG_Onsets|The onsets of the amplitude, marked as "1" in a list of zeros. + EMG_Offsets|The offsets of the amplitude, marked as "1" in a list of zeros. info : dict A dictionary containing the information of each amplitude onset, offset, and peak activity diff --git a/neurokit2/eog/eog_process.py b/neurokit2/eog/eog_process.py index 13ec77a858..1cf1823c7b 100644 --- a/neurokit2/eog/eog_process.py +++ b/neurokit2/eog/eog_process.py @@ -30,10 +30,11 @@ def eog_process(veog_signal, sampling_rate=1000, **kwargs): signals : DataFrame A DataFrame of the same length as the :func:`.eog_signal` containing the following columns: - * ``"EOG_Raw"``: the raw signal. - * ``"EOG_Clean"``: the cleaned signal. - * ``"EOG_Blinks"``: the blinks marked as "1" in a list of zeros. - * ``"EOG_Rate"``: eye blinks rate interpolated between blinks. + .. codebookadd:: + EOG_Raw|The raw signal. + EOG_Clean|The cleaned signal. + EOG_Blinks|The blinks marked as "1" in a list of zeros. + EOG_Rate|Eye blink rate interpolated between blinks info : dict A dictionary containing the samples at which the eye blinks occur, accessible with the key diff --git a/neurokit2/ppg/ppg_process.py b/neurokit2/ppg/ppg_process.py index 21a94993ba..31302aca2c 100644 --- a/neurokit2/ppg/ppg_process.py +++ b/neurokit2/ppg/ppg_process.py @@ -44,10 +44,11 @@ def ppg_process( signals : DataFrame A DataFrame of same length as :func:`.ppg_signal` containing the following columns: - * ``"PPG_Raw"``: the raw signal. - * ``"PPG_Clean"``: the cleaned signal. - * ``"PPG_Rate"``: the heart rate as measured based on PPG peaks. - * ``"PPG_Peaks"``: the PPG peaks marked as "1" in a list of zeros. + .. codebookadd:: + PPG_Raw|The raw signal. + PPG_Clean|The cleaned signal. + PPG_Rate|The heart rate as measured based on PPG peaks. + PPG_Peaks|The PPG peaks marked as "1" in a list of zeros. info : dict A dictionary containing the information of peaks and the signals' sampling rate. diff --git a/neurokit2/rsp/rsp_process.py b/neurokit2/rsp/rsp_process.py index 9e32036fee..c7c3074a8c 100644 --- a/neurokit2/rsp/rsp_process.py +++ b/neurokit2/rsp/rsp_process.py @@ -57,18 +57,16 @@ def rsp_process( signals : DataFrame A DataFrame of same length as :func:`.rsp_signal` containing the following columns: - * ``"RSP_Raw"``: the raw signal. - * ``"RSP_Clean"``: the cleaned signal. - * ``"RSP_Peaks"``: the respiratory peaks (exhalation onsets) marked as "1" in a list of - zeros. - * ``"RSP_Troughs"``: the respiratory troughs (inhalation onsets) marked as "1" in a list of - zeros. - * ``"RSP_Rate"``: breathing rate interpolated between inhalation peaks. - * ``"RSP_Amplitude"``: breathing amplitude interpolated between inhalation peaks. - * ``"RSP_Phase"``: breathing phase, marked by "1" for inspiration and "0" for expiration. - * ``"RSP_Phase_Completion"``: breathing phase completion, expressed in percentage (from 0 to - 1), representing the stage of the current respiratory phase. - * ``"RSP_RVT"``: respiratory volume per time (RVT). + .. codebookadd:: + RSP_Raw|The raw signal. + RSP_Clean|The raw signal. + RSP_Peaks|The respiratory peaks (exhalation onsets) marked as "1" in a list of zeros. + RSP_Troughs|The respiratory troughs (inhalation onsets) marked as "1" in a list of zeros. + RSP_Rate|The breathing rate interpolated between inhalation peaks. + RSP_Amplitude|The breathing amplitude interpolated between inhalation peaks. + RSP_Phase|The breathing phase, marked by "1" for inspiration and "0" for expiration. + RSP_Phase_Completion|The breathing phase completion, expressed in percentage (from 0 to 1), representing the stage of the current respiratory phase. + RSP_RVT|Respiratory volume per time (RVT). info : dict A dictionary containing the samples at which inhalation peaks and exhalation troughs occur, From dfa7c6533f125a2688120dedd49d090a1f762caf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sat, 27 Jul 2024 23:40:55 +0200 Subject: [PATCH 35/49] added codebookadd directive to the eda components --- neurokit2/eda/eda_eventrelated.py | 23 +++++++---------------- neurokit2/eda/eda_intervalrelated.py | 8 +++++--- neurokit2/eda/eda_process.py | 20 ++++++++++---------- neurokit2/eda/eda_sympathetic.py | 4 ++++ 4 files changed, 26 insertions(+), 29 deletions(-) diff --git a/neurokit2/eda/eda_eventrelated.py b/neurokit2/eda/eda_eventrelated.py index 732088672a..b843d6eee5 100644 --- a/neurokit2/eda/eda_eventrelated.py +++ b/neurokit2/eda/eda_eventrelated.py @@ -30,22 +30,13 @@ def eda_eventrelated(epochs, silent=False): by the `Label` column (if not present, by the `Index` column). The analyzed features consist the following: - * ``"EDA_SCR"``: indication of whether Skin Conductance Response (SCR) occurs following the event - (1 if an SCR onset is present and 0 if absent) and if so, its corresponding peak amplitude, - time of peak, rise and recovery time. If there is no occurrence of SCR, nans are displayed - for the below features. - - * ``"EDA_Peak_Amplitude"``: the maximum amplitude of the phasic component of the signal. - - * ``"SCR_Peak_Amplitude"``: the peak amplitude of the first SCR in each epoch. - - * ``"SCR_Peak_Amplitude_Time"``: the timepoint of each first SCR peak amplitude. - - * ``"SCR_RiseTime"``: the risetime of each first SCR i.e., the time it takes for SCR to - reach peak amplitude from onset. - - * ``"SCR_RecoveryTime"``: the half-recovery time of each first SCR i.e., the time it takes - for SCR to decrease to half amplitude. + .. codebookadd:: + EDA_SCR|Electrodermal activity|indication of whether Skin Conductance Response (SCR) occurs following the event (1 if an SCR onset is present and 0 if absent) and if so, its corresponding peak amplitude, time of peak, rise and recovery time. If there is no occurrence of SCR, nans are displayed for the below features. + EDA_Peak_Amplitude|Electrodermal activity|The maximum amplitude of the phasic component of the signal. + SCR_Peak_Amplitude|Electrodermal activity|The peak amplitude of the first SCR in each epoch. + SCR_Peak_Amplitude_Time|Electrodermal activity|The timepoint of each first SCR peak amplitude. + SCR_RiseTime|Electrodermal activity|The risetime of each first SCR i.e., the time it takes for SCR to reach peak amplitude from onset. + SCR_RecoveryTime|Electrodermal activity|The half-recovery time of each first SCR i.e., the time it takes for SCR to decrease to half amplitude. See Also -------- diff --git a/neurokit2/eda/eda_intervalrelated.py b/neurokit2/eda/eda_intervalrelated.py index 7cdebabd4a..13bbf6f25a 100644 --- a/neurokit2/eda/eda_intervalrelated.py +++ b/neurokit2/eda/eda_intervalrelated.py @@ -33,9 +33,11 @@ def eda_intervalrelated(data, sampling_rate=1000, **kwargs): A dataframe containing the analyzed EDA features. The analyzed features consist of the following: - * ``"SCR_Peaks_N"``: the number of occurrences of Skin Conductance Response (SCR). - * ``"SCR_Peaks_Amplitude_Mean"``: the mean amplitude of the SCR peak occurrences. - * ``"EDA_Tonic_SD"``: the mean amplitude of the SCR peak occurrences. + .. codebookadd:: + SCR_Peaks_N|Electrodermal activity|The number of occurrences of Skin Conductance Response (SCR). + SCR_Peaks_Amplitude_Mean|Electrodermal activity|The mean amplitude of the SCR peak occurrences. + EDA_Tonic_SD|Electrodermal activity|The mean amplitude of the SCR peak occurrences. + * ``"EDA_Sympathetic"``: see :func:`eda_sympathetic` (only computed if signal duration > 64 sec). * ``"EDA_Autocorrelation"``: see :func:`eda_autocor` (only computed if signal duration diff --git a/neurokit2/eda/eda_process.py b/neurokit2/eda/eda_process.py index b162eb2104..dd42439de6 100644 --- a/neurokit2/eda/eda_process.py +++ b/neurokit2/eda/eda_process.py @@ -41,16 +41,16 @@ def eda_process( columns: .. codebookadd:: - EDA_Raw|The raw signal. - EDA_Clean|The cleaned signal. - EDA_Tonic|The tonic component of the signal, or the Tonic Skin Conductance Level (SCL). - EDA_Phasic|The phasic component of the signal, or the Phasic Skin Conductance Response (SCR). - SCR_Onsets|The samples at which the onsets of the peaks occur, marked as "1" in a list of zeros. - SCR_Peaks|The samples at which the peaks occur, marked as "1" in a list of zeros. - SCR_Height|The SCR amplitude of the signal including the Tonic component. Note that cumulative effects of close-occurring SCRs might lead to an underestimation of the amplitude. - SCR_Amplitude|The SCR amplitude of the signal excluding the Tonic component. - SCR_RiseTime|The SCR amplitude of the signal excluding the Tonic component. - SCR_Recovery|The samples at which SCR peaks recover (decline) to half amplitude, marked as "1" in a list of zeros. + EDA_Raw|Electrodermal activity|The raw signal. + EDA_Clean|Electrodermal activity|The cleaned signal. + EDA_Tonic|Electrodermal activity|The tonic component of the signal, or the Tonic Skin Conductance Level (SCL). + EDA_Phasic|Electrodermal activity|The phasic component of the signal, or the Phasic Skin Conductance Response (SCR). + SCR_Onsets|Electrodermal activity|The samples at which the onsets of the peaks occur, marked as "1" in a list of zeros. + SCR_Peaks|Electrodermal activity|The samples at which the peaks occur, marked as "1" in a list of zeros. + SCR_Height|Electrodermal activity|The SCR amplitude of the signal including the Tonic component. Note that cumulative effects of close-occurring SCRs might lead to an underestimation of the amplitude. + SCR_Amplitude|Electrodermal activity|The SCR amplitude of the signal excluding the Tonic component. + SCR_RiseTime|Electrodermal activity|The SCR amplitude of the signal excluding the Tonic component. + SCR_Recovery|Electrodermal activity|The samples at which SCR peaks recover (decline) to half amplitude, marked as "1" in a list of zeros. info : dict A dictionary containing the information of each SCR peak (see :func:`eda_findpeaks`), diff --git a/neurokit2/eda/eda_sympathetic.py b/neurokit2/eda/eda_sympathetic.py index 3a9f311d0a..dc17a9823a 100644 --- a/neurokit2/eda/eda_sympathetic.py +++ b/neurokit2/eda/eda_sympathetic.py @@ -47,6 +47,10 @@ def eda_sympathetic( ``"EDA_Sympathetic"`` and ``"EDA_SympatheticN"`` (normalized, obtained by dividing EDA_Symp by total power). + .. codebookadd:: + EDA_Sympathetic|Electrodermal activity|Derived from Posada-Quintero et al. (2016), who argue that dynamics of the sympathetic component of EDA signal is represented in the frequency band of 0.045-0.25Hz. + EDA_SympatheticN|Electrodermal activity|normalized version of "EDA_Sympathetic" obtained by dividing EDA_Sympathetic by total power + Examples -------- .. ipython:: python From 373d14bf93b541db9f85c143a8c06540e94e595b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 28 Jul 2024 00:01:04 +0200 Subject: [PATCH 36/49] added codebookadd directive to the eda components --- neurokit2/ecg/ecg_eventrelated.py | 33 +++++++++++++--------------- neurokit2/ecg/ecg_intervalrelated.py | 4 +++- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/neurokit2/ecg/ecg_eventrelated.py b/neurokit2/ecg/ecg_eventrelated.py index c3ea51f229..bb3cea31cc 100644 --- a/neurokit2/ecg/ecg_eventrelated.py +++ b/neurokit2/ecg/ecg_eventrelated.py @@ -30,28 +30,25 @@ def ecg_eventrelated(epochs, silent=False): by the `Label` column (if not present, by the `Index` column). The analyzed features consist of the following: - * ``ECG_Rate_Max``: the maximum heart rate after stimulus onset. - * ``ECG_Rate_Min``: the minimum heart rate after stimulus onset. - * ``ECG_Rate_Mean``: the mean heart rate after stimulus onset. - * ``ECG_Rate_SD``: the standard deviation of the heart rate after stimulus onset. - * ``ECG_Rate_Max_Time``: the time at which maximum heart rate occurs. - * ``ECG_Rate_Min_Time``: the time at which minimum heart rate occurs. - * ``ECG_Phase_Atrial``: indication of whether the onset of the event concurs with - respiratory systole (1) or diastole (0). - * ``ECG_Phase_Ventricular``: indication of whether the onset of the event concurs with - respiratory systole (1) or diastole (0). - * ``ECG_Phase_Atrial_Completion``: indication of the stage of the current cardiac (atrial) - phase (0 to 1) at the onset of the event. - * ``ECG_Phase_Ventricular_Completion``: indication of the stage of the current cardiac - (ventricular) phase (0 to 1) at the onset of the event. + .. codebookadd:: + ECG_Rate_Max|The maximum heart rate after stimulus onset. + ECG_Rate_Min|The minimum heart rate after stimulus onset. + ECG_Rate_Mean|The mean heart rate after stimulus onset. + ECG_Rate_SD|The standard deviation of the heart rate after stimulus onset. + ECG_Rate_Max_Time|The time at which maximum heart rate occurs. + ECG_Rate_Min_Time|The time at which minimum heart rate occurs. + ECG_Phase_Atrial|Indication of whether the onset of the event concurs with respiratory systole (1) or diastole (0). + ECG_Phase_Ventricular|Indication of whether the onset of the event concurs with respiratory systole (1) or diastole (0). + ECG_Phase_Atrial_Completion|Indication of the stage of the current cardiac (atrial) phase (0 to 1) at the onset of the event. + ECG_Phase_Ventricular_Completion|Indication of the stage of the current cardiac (ventricular) phase (0 to 1) at the onset of the event. We also include the following *experimental* features related to the parameters of a quadratic model: - * ``ECG_Rate_Trend_Linear``: The parameter corresponding to the linear trend. - * ``ECG_Rate_Trend_Quadratic``: The parameter corresponding to the curvature. - * ``ECG_Rate_Trend_R2``: the quality of the quadratic model. If too low, the parameters - might not be reliable or meaningful. + .. codebookadd:: + ECG_Rate_Trend_Linear|The parameter corresponding to the linear trend. + ECG_Rate_Trend_Quadratic|The parameter corresponding to the curvature. + ECG_Rate_Trend_R2|The quality of the quadratic model. If too low, the parameters might not be reliable or meaningful. See Also -------- diff --git a/neurokit2/ecg/ecg_intervalrelated.py b/neurokit2/ecg/ecg_intervalrelated.py index d735b2869e..5e223cb665 100644 --- a/neurokit2/ecg/ecg_intervalrelated.py +++ b/neurokit2/ecg/ecg_intervalrelated.py @@ -25,7 +25,9 @@ def ecg_intervalrelated(data, sampling_rate=1000): DataFrame A dataframe containing the analyzed ECG features. The analyzed features consist of the following: - * ``ECG_Rate_Mean``: the mean heart rate. + .. codebookadd:: + ECG_Rate_Mean|The mean heart rate. + * ``ECG_HRV``: the different heart rate variability metrices. See :func:`.hrv_summary()` docstrings for details. From fff66d7930e981f698cbe40eefec728f13826631 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 28 Jul 2024 00:05:42 +0200 Subject: [PATCH 37/49] added codebookadd directive to the emg components --- neurokit2/emg/emg_eventrelated.py | 16 +++++++--------- neurokit2/emg/emg_intervalrelated.py | 6 ++++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/neurokit2/emg/emg_eventrelated.py b/neurokit2/emg/emg_eventrelated.py index ee2336a5c6..6b8bf3eb36 100644 --- a/neurokit2/emg/emg_eventrelated.py +++ b/neurokit2/emg/emg_eventrelated.py @@ -31,15 +31,13 @@ def emg_eventrelated(epochs, silent=False): by the `Label` column (if not present, by the `Index` column). The analyzed features consist of the following: - * ``"EMG_Activation*``: indication of whether there is muscular activation following - the onset of the event (1 if present, 0 if absent) and if so, its corresponding - amplitude features and the number of activations in each epoch. If there is no - activation, nans are displayed for the below features. - * ``"EMG_Amplitude_Mean*``: the mean amplitude of the activity. - * ``"EMG_Amplitude_Max*``: the maximum amplitude of the activity. - * ``"EMG_Amplitude_SD*``: the standard deviation of the activity amplitude. - * ``"EMG_Amplitude_Max_Time*``: the time of maximum amplitude. - * ``"EMG_Bursts*``: the number of activations, or bursts of activity, within each epoch. + .. codebookadd:: + EMG_Activation|Indication of whether there is muscular activation following. + EMG_Amplitude_Mean|The mean amplitude of the activity. + EMG_Amplitude_Max|The maximum amplitude of the activity. + EMG_Amplitude_SD|The standard deviation of the activity amplitude. + EMG_Amplitude_Max_Time|The time of maximum amplitude. + EMG_Bursts|The number of activations, or bursts of activity, within each epoch. See Also -------- diff --git a/neurokit2/emg/emg_intervalrelated.py b/neurokit2/emg/emg_intervalrelated.py index 8eac5392dc..13af79e791 100644 --- a/neurokit2/emg/emg_intervalrelated.py +++ b/neurokit2/emg/emg_intervalrelated.py @@ -19,8 +19,10 @@ def emg_intervalrelated(data): ------- DataFrame A dataframe containing the analyzed EMG features. The analyzed features consist of the following: - * ``"EMG_Activation_N"``: the number of bursts of muscular activity. - * ``"EMG_Amplitude_Mean"``: the mean amplitude of the muscular activity. + + .. codebookadd:: + EMG_Activation_N|The number of bursts of muscular activity. + ECG_Amplitude_Mean|The mean amplitude of the muscular activity. See Also -------- From 4e8fc20443cb8144135ec1d0cbbded461770020c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 28 Jul 2024 00:11:01 +0200 Subject: [PATCH 38/49] added codebookadd directive to the eog components --- neurokit2/eog/eog_eventrelated.py | 24 +++++++++--------------- neurokit2/eog/eog_intervalrelated.py | 6 +++--- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/neurokit2/eog/eog_eventrelated.py b/neurokit2/eog/eog_eventrelated.py index ce5b377508..ac8a56beda 100644 --- a/neurokit2/eog/eog_eventrelated.py +++ b/neurokit2/eog/eog_eventrelated.py @@ -31,21 +31,15 @@ def eog_eventrelated(epochs, silent=False): by the `Label` column (if not present, by the `Index` column). The analyzed features consist of the following: - * ``"EOG_Rate_Baseline"``: the baseline EOG rate before stimulus onset. - - * ``"EOG_Rate_Max"``: the maximum EOG rate after stimulus onset. - - * ``"EOG_Rate_Min"``: the minimum EOG rate after stimulus onset. - - * ``"EOG_Rate_Mean"``: the mean EOG rate after stimulus onset. - - * ``"EOG_Rate_SD"``: the standard deviation of the EOG rate after stimulus onset. - - * ``"EOG_Rate_Max_Time"``: the time at which maximum EOG rate occurs. - - * ``"EOG_Rate_Min_Time"``: the time at which minimum EOG rate occurs. - - * ``"EOG_Blinks_Presence"``: marked with '1' if a blink occurs in the epoch, and '0' if not. + .. codebookadd:: + EOG_Rate_Baseline|The baseline EOG rate before stimulus onset. + EOG_Rate_Max|The maximum EOG rate after stimulus onset. + EOG_Rate_Min|The minimum EOG rate after stimulus onset. + EOG_Rate_Mean|The mean EOG rate after stimulus onset. + EOG_Rate_SD|The standard deviation of the EOG rate after stimulus onset. + EOG_Rate_Max_Time|The time at which maximum EOG rate occurs. + EOG_Rate_Min_Time|The time at which minimum EOG rate occurs. + EOG_Blinks_Presence|Marked with '1' if a blink occurs in the epoch, and '0' if not. See Also -------- diff --git a/neurokit2/eog/eog_intervalrelated.py b/neurokit2/eog/eog_intervalrelated.py index 71abf08ca6..ff2c26689b 100644 --- a/neurokit2/eog/eog_intervalrelated.py +++ b/neurokit2/eog/eog_intervalrelated.py @@ -22,9 +22,9 @@ def eog_intervalrelated(data): A dataframe containing the analyzed EOG features. The analyzed features consist of the following: - * ``"EOG_Rate_Mean"``: the mean heart rate. - - * ``"EOG_Peaks_N"``: the number of blink peak occurrences. + .. codebookadd:: + EOG_Rate_Mean|The mean EOG value. + EOG_Peaks_N|The number of blink peak occurrences. See Also -------- From 0013624d5efc8f014464a297209703673c65af97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 28 Jul 2024 00:16:19 +0200 Subject: [PATCH 39/49] added codebookadd directive to the ppg components --- neurokit2/ppg/ppg_eventrelated.py | 31 +++++++++++----------------- neurokit2/ppg/ppg_intervalrelated.py | 3 ++- 2 files changed, 14 insertions(+), 20 deletions(-) diff --git a/neurokit2/ppg/ppg_eventrelated.py b/neurokit2/ppg/ppg_eventrelated.py index a6e8aa72d5..3daa6b8097 100644 --- a/neurokit2/ppg/ppg_eventrelated.py +++ b/neurokit2/ppg/ppg_eventrelated.py @@ -26,29 +26,22 @@ def ppg_eventrelated(epochs, silent=False): by the `Label` column (if not present, by the `Index` column). The analyzed features consist of the following: - * ``"PPG_Rate_Baseline"``: the baseline heart rate (at stimulus onset). - - * ``"PPG_Rate_Max"``: the maximum heart rate after stimulus onset. - - * ``"PPG_Rate_Min"``: the minimum heart rate after stimulus onset. - - * ``"PPG_Rate_Mean"``: the mean heart rate after stimulus onset. - - * ``"PPG_Rate_SD"``: the standard deviation of the heart rate after stimulus onset. - - * ``"PPG_Rate_Max_Time"``: the time at which maximum heart rate occurs. - - * ``"PPG_Rate_Min_Time"``: the time at which minimum heart rate occurs. + .. codebookadd:: + PPG_Rate_Baseline|The baseline heart rate (at stimulus onset). + PPG_Rate_Max|The maximum heart rate after stimulus onset. + PPG_Rate_Min|The minimum heart rate after stimulus onset. + PPG_Rate_Mean|The mean heart rate after stimulus onset. + PPG_Rate_SD|The standard deviation of the heart rate after stimulus onset. + PPG_Rate_Max_Time|The time at which maximum heart rate occurs. + PPG_Rate_Min_Time|The time at which minimum heart rate occurs. We also include the following *experimental* features related to the parameters of a quadratic model: - * ``"PPG_Rate_Trend_Linear"``: The parameter corresponding to the linear trend. - - * ``"PPG_Rate_Trend_Quadratic"``: The parameter corresponding to the curvature. - - * ``"PPG_Rate_Trend_R2"``: the quality of the quadratic model. If too low, the parameters - might not be reliable or meaningful. + .. codebookadd:: + PPG_Rate_Trend_Linear|The parameter corresponding to the linear trend. + PPG_Rate_Trend_Quadratic|The parameter corresponding to the curvature. + PPG_Rate_Trend_R2|The quality of the quadratic model. If too low, the parameters might not be reliable or meaningful. See Also -------- diff --git a/neurokit2/ppg/ppg_intervalrelated.py b/neurokit2/ppg/ppg_intervalrelated.py index fdee4fd9d8..4f46ba0c03 100644 --- a/neurokit2/ppg/ppg_intervalrelated.py +++ b/neurokit2/ppg/ppg_intervalrelated.py @@ -23,7 +23,8 @@ def ppg_intervalrelated(data, sampling_rate=1000): DataFrame A dataframe containing the analyzed PPG features. The analyzed features consist of the following: - * ``"PPG_Rate_Mean"``: the mean heart rate. + .. codebookadd:: + PPG_Rate_Mean|The mean PPG rate. * ``"HRV"``: the different heart rate variability metrices. From 77d826ca1aac1f8aaf1a45aaef5bb6d55257d42d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 28 Jul 2024 00:35:49 +0200 Subject: [PATCH 40/49] added codebookadd directive to the rsp components --- neurokit2/rsp/rsp_eventrelated.py | 33 +++++++--------- neurokit2/rsp/rsp_intervalrelated.py | 12 +++--- neurokit2/rsp/rsp_rrv.py | 57 ++++++++++------------------ 3 files changed, 41 insertions(+), 61 deletions(-) diff --git a/neurokit2/rsp/rsp_eventrelated.py b/neurokit2/rsp/rsp_eventrelated.py index 321fc645e0..8750b690b5 100644 --- a/neurokit2/rsp/rsp_eventrelated.py +++ b/neurokit2/rsp/rsp_eventrelated.py @@ -31,25 +31,20 @@ def rsp_eventrelated(epochs, silent=False): by the `Label` column (if not present, by the `Index` column). The analyzed features consist of the following: - * ``"RSP_Rate_Max"``: the maximum respiratory rate after stimulus onset. - * ``"RSP_Rate_Min"``: the minimum respiratory rate after stimulus onset. - * ``"RSP_Rate_Mean"``: the mean respiratory rate after stimulus onset. - * ``"RSP_Rate_SD"``: the standard deviation of the respiratory rate after stimulus onset. - * ``"RSP_Rate_Max_Time"``: the time at which maximum respiratory rate occurs. - * ``"RSP_Rate_Min_Time"``: the time at which minimum respiratory rate occurs. - * ``"RSP_Amplitude_Baseline"``: the respiratory amplitude at stimulus onset. - * ``"RSP_Amplitude_Max"``: the change in maximum respiratory amplitude from before stimulus - onset. - * ``"RSP_Amplitude_Min"``: the change in minimum respiratory amplitude from before stimulus - onset. - * ``"RSP_Amplitude_Mean"``: the change in mean respiratory amplitude from before stimulus - onset. - * ``"RSP_Amplitude_SD"``: the standard deviation of the respiratory amplitude after - stimulus onset. - * ``"RSP_Phase"``: indication of whether the onset of the event concurs with respiratory - inspiration (1) or expiration (0). - * ``"RSP_PhaseCompletion"``: indication of the stage of the current respiration phase (0 to - 1) at the onset of the event. + .. codebookadd:: + RSP_Rate_Max|The maximum respiratory rate after stimulus onset. + RSP_Rate_Min|The minimum respiratory rate after stimulus onset. + RSP_Rate_Mean|The mean respiratory rate after stimulus onset. + RSP_Rate_SD|The standard deviation of the respiratory rate after stimulus onset. + RSP_Rate_Max_Time|The time at which maximum respiratory rate occurs. + RSP_Rate_Min_Time|The time at which minimum respiratory rate occurs. + RSP_Amplitude_Baseline|The respiratory amplitude at stimulus onset. + RSP_Amplitude_Max|The change in maximum respiratory amplitude from before stimulus onset. + RSP_Amplitude_Min|The change in minimum respiratory amplitude from before stimulus onset. + RSP_Amplitude_Mean|The change in mean respiratory amplitude from before stimulus onset. + RSP_Amplitude_SD|The standard deviation of the respiratory amplitude after stimulus onset. + RSP_Phase|Indication of whether the onset of the event concurs with respiratory inspiration (1) or expiration (0). + RSP_PhaseCompletion|Indication of the stage of the current respiration phase (0 to 1) at the onset of the event. See Also -------- diff --git a/neurokit2/rsp/rsp_intervalrelated.py b/neurokit2/rsp/rsp_intervalrelated.py index 10b7342b6b..738083a027 100644 --- a/neurokit2/rsp/rsp_intervalrelated.py +++ b/neurokit2/rsp/rsp_intervalrelated.py @@ -28,13 +28,15 @@ def rsp_intervalrelated(data, sampling_rate=1000): A dataframe containing the analyzed RSP features. The analyzed features consist of the following: - * ``"RSP_Rate_Mean"``: the mean respiratory rate. - * ``"RSP_Amplitude_Mean"``: the mean respiratory amplitude. + .. codebookadd:: + RSP_Rate_Mean|The mean respiratory rate. + RSP_Amplitude_Mean|The mean respiratory amplitude. + RSP_Phase_Duration_Inspiration|The average inspiration duration. + RSP_Phase_Duration_Expiration|The average expiration duration. + RSP_Phase_Duration_Ratio|The inspiration-to-expiratory time ratio (I/E). + * ``"RSP_RRV"``: the different respiratory rate variability metrices. See :func:`.rsp_rrv` docstrings for details. - * ``"RSP_Phase_Duration_Inspiration"``: the average inspiratory duration. - * ``"RSP_Phase_Duration_Expiration"``: the average expiratory duration. - * ``"RSP_Phase_Duration_Ratio "``: the inspiratory-to-expiratory time ratio (I/E). See Also -------- diff --git a/neurokit2/rsp/rsp_rrv.py b/neurokit2/rsp/rsp_rrv.py index 3516911a11..4f0d41cca9 100644 --- a/neurokit2/rsp/rsp_rrv.py +++ b/neurokit2/rsp/rsp_rrv.py @@ -41,43 +41,26 @@ def rsp_rrv(rsp_rate, troughs=None, sampling_rate=1000, show=False, silent=True) DataFrame DataFrame consisting of the computed RRV metrics, which includes: - * ``"RRV_SDBB"``: the standard deviation of the breath-to-breath intervals. - * ``"RRV_RMSSD"``: the root mean square of successive differences of the breath-to-breath - intervals. - * ``"RRV_SDSD"``: the standard deviation of the successive differences between adjacent - breath-to-breath intervals. - * ``"RRV_BBx"``: the number of successive interval differences that are greater than x - seconds. - * ``"RRV-pBBx"``: the proportion of breath-to-breath intervals that are greater than x - seconds, - out of the total number of intervals. - * ``"RRV_VLF"``: spectral power density pertaining to very low frequency band (i.e., 0 to . - 04 Hz) by default. - * ``"RRV_LF"``: spectral power density pertaining to low frequency band (i.e., .04 to .15 - Hz) by default. - * ``"RRV_HF"``: spectral power density pertaining to high frequency band (i.e., .15 to .4 - Hz) by default. - * ``"RRV_LFHF"``: the ratio of low frequency power to high frequency power. - * ``"RRV_LFn"``: the normalized low frequency, obtained by dividing the low frequency - power by the total power. - * ``"RRV_HFn"``: the normalized high frequency, obtained by dividing the low frequency - power by total power. - * ``"RRV_SD1"``: SD1 is a measure of the spread of breath-to-breath intervals on the - Poincaré plot perpendicular to the line of identity. It is an index of short-term - variability. - * ``"RRV_SD2"``: SD2 is a measure of the spread of breath-to-breath intervals on the - Poincaré plot along the line of identity. It is an index of long-term variability. - * ``"RRV_SD2SD1"``: the ratio between short and long term fluctuations of the - breath-to-breath intervals (SD2 divided by SD1). - * ``"RRV_ApEn"``: the approximate entropy of RRV, calculated - by :func:`.entropy_approximate`. - * ``"RRV_SampEn"``: the sample entropy of RRV, calculated by :func:`.entropy_sample`. - * ``"RRV_DFA_alpha1"``: the "short-term" fluctuation value generated from Detrended - Fluctuation Analysis i.e. the root mean square deviation from the fitted trend of the - breath-to-breath intervals. Will only be computed if mora than 160 breath cycles in the - signal. - * ``"RRV_DFA_alpha2"``: the long-term fluctuation value. Will only be computed if mora - than 640 breath cycles in the signal. + .. codebookadd:: + RRV_SDBB|The standard deviation of the breath-to-breath intervals. + RRV_RMSSD|The root mean square of successive differences of the breath-to-breath intervals. + RRV_SDSD|The standard deviation of the successive differences between adjacent breath-to-breath intervals. + RRV_BBx|The number of successive interval differences that are greater than x seconds. + RRV_pBBx|the proportion of breath-to-breath intervals that are greater than x seconds, out of the total number of intervals. + RRV_VLF|Spectral power density pertaining to very low frequency band (i.e., 0 to .04 Hz) by default. + RRV_LF|Spectral power density pertaining to low frequency band (i.e., .04 to .15 Hz) by default. + RRV_HF|Spectral power density pertaining to high frequency band (i.e., .15 to .4 Hz) by default. + RRV_LFHF|The ratio of low frequency power to high frequency power. + RRV_LFn|The normalized low frequency, obtained by dividing the low frequency power by the total power. + RRV_HFn|The normalized high frequency, obtained by dividing the low frequency power by total power. + RRV_SD1|SD1 is a measure of the spread of breath-to-breath intervals on the Poincaré plot perpendicular to the line of identity. It is an index of short-term variability. + RRV_SD2|SD2 is a measure of the spread of breath-to-breath intervals on the Poincaré plot along the line of identity. It is an index of long-term variability. + RRV_SD2SD1|The ratio between short and long term fluctuations of the breath-to-breath intervals (SD2 divided by SD1). + RRV_DFA_alpha1|The "short-term" fluctuation value generated from Detrended Fluctuation Analysis i.e. the root mean square deviation from the fitted trend of the breath-to-breath intervals. Will only be computed if mora than 160 breath cycles in the signal. + RRV_DFA_alpha2|The long-term fluctuation value. Will only be computed if mora than 640 breath cycles in the signal. + RRV_ApEn|The approximate entropy of RRV, calculated by :func:`.entropy_approximate`. + RRV_SampEn|The sample entropy of RRV, calculated by :func:`.entropy_sample`. + * **MFDFA indices**: Indices related to the :func:`multifractal spectrum <.fractal_dfa()>`. From 240815e557d0ecdc6da9e9b50690e5b1416d84f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 28 Jul 2024 01:06:49 +0200 Subject: [PATCH 41/49] added codebookadd directive to the hrv components --- neurokit2/hrv/hrv_frequency.py | 14 +++++++++++++- neurokit2/hrv/hrv_nonlinear.py | 32 +++++++++++++++++++++++++++++++- neurokit2/hrv/hrv_rsa.py | 17 +++++++---------- 3 files changed, 51 insertions(+), 12 deletions(-) diff --git a/neurokit2/hrv/hrv_frequency.py b/neurokit2/hrv/hrv_frequency.py index 5b61ad6286..04c4bed539 100644 --- a/neurokit2/hrv/hrv_frequency.py +++ b/neurokit2/hrv/hrv_frequency.py @@ -101,7 +101,19 @@ def hrv_frequency( Returns ------- DataFrame - Contains frequency domain HRV metrics. + DataFrame consisting of the computed HRV frequency metrics, which includes: + + .. codebookadd:: + HRV_ULF|The spectral power of ultra low frequencies (by default, .0 to .0033 Hz). Very long signals are required for this to index to be extracted, otherwise, will return NaN. + HRV_VLF|The spectral power of very low frequencies (by default, .0033 to .04 Hz). + HRV_LF|The spectral power of low frequencies (by default, .04 to .15 Hz). + HRV_HF|The spectral power of high frequencies (by default, .15 to .4 Hz). + HRV_VHF|The spectral power of very high frequencies (by default, .4 to .5 Hz). + HRV_TP|The total spectral power. + HRV_LFHF|The ratio obtained by dividing the low frequency power by the high frequency power. + HRV_LFn|The normalized low frequency, obtained by dividing the low frequency power by the total power. + HRV_HFn|The normalized high frequency, obtained by dividing the low frequency power by the total power. + HRV_LnHF|The log transformed HF. See Also -------- diff --git a/neurokit2/hrv/hrv_nonlinear.py b/neurokit2/hrv/hrv_nonlinear.py index cf7c6058a6..33d0819e6e 100644 --- a/neurokit2/hrv/hrv_nonlinear.py +++ b/neurokit2/hrv/hrv_nonlinear.py @@ -144,7 +144,37 @@ def hrv_nonlinear(peaks, sampling_rate=1000, show=False, **kwargs): Returns ------- DataFrame - Contains non-linear HRV metrics. + DataFrame consisting of the computed non-linear HRV metrics, which includes: + + .. codebookadd:: + HRV_SD1|Standard deviation perpendicular to the line of identity. It is an index of short-term RR interval fluctuations, i.e., beat-to-beat variability. It is equivalent (although on another scale) to RMSSD, and therefore it is redundant to report correlation with both. + HRV_SD2|Standard deviation along the identity line. Index of long-term HRV changes. + HRV_SD1SD2|Ratio of SD1 to SD2. Describes the ratio of short term to long term variations in HRV. + HRV_S|Area of ellipse described by *SD1* and *SD2* (``pi * SD1 * SD2``). It is proportional to *SD1SD2*. + HRV_CSI|The Cardiac Sympathetic Index (Toichi, 1997) is a measure of cardiac sympathetic function independent of vagal activity, calculated by dividing the longitudinal variability of the Poincaré plot (``4*SD2``) by its transverse variability (``4*SD1``). + HRV_CVI|The Cardiac Vagal Index (Toichi, 1997) is an index of cardiac parasympathetic function (vagal activity unaffected by sympathetic activity), and is equal equal to the logarithm of the product of longitudinal (``4*SD2``) and transverse variability (``4*SD1``). + HRV_CSI_Modified|The modified CSI (Jeppesen, 2014) obtained by dividing the square of the longitudinal variability by its transverse variability. + HRV_GI|Guzik's Index, defined as the distance of points above line of identity (LI) to LI divided by the distance of all points in Poincaré plot to LI except those that are located on LI. + HRV_SI|Slope Index, defined as the phase angle of points above LI divided by the phase angle of all points in Poincaré plot except those that are located on LI. + HRV_AI|Area Index, defined as the cumulative area of the sectors corresponding to the points that are located above LI divided by the cumulative area of sectors corresponding to all points in the Poincaré plot except those that are located on LI. + HRV_PI|Porta's Index, defined as the number of points below LI divided by the total number of points in Poincaré plot except those that are located on LI. + HRV_SD1a|Short-term variance of contributions of decelerations (prolongations of RR intervals), (Piskorski, 2011). + HRV_SD1d|Short-term variance of contributions of accelerations (shortenings of RR intervals), (Piskorski, 2011). + HRV_C1a|The contributions of heart rate accelerations to short-term HRV, (Piskorski, 2011). + HRV_C1d|The contributions of heart rate decelerations to short-term HRV, (Piskorski, 2011). + HRV_SD2a|Long-term variance of contributions of accelerations (shortenings of RR intervals), (Piskorski, 2011). + HRV_SD2d|Long-term variance of contributions of decelerations (prolongations of RR intervals), (Piskorski, 2011). + HRV_C2a|The contributions of heart rate accelerations to long-term HRV, (Piskorski, 2011). + HRV_C2d|The contributions of heart rate decelerations to long-term HRV, (Piskorski, 2011). + HRV_SDNNa|Total variance of contributions of accelerations (shortenings of RR intervals), (Piskorski, 2011). + HRV_SDNNd|Total variance of contributions of decelerations (prolongations of RR intervals), (Piskorski, 2011). + HRV_Ca|The total contributions of heart rate accelerations to HRV. + HRV_Cd|The total contributions of heart rate decelerations to HRV. + HRV_PIP|Percentage of inflection points of the RR intervals series. + HRV_IALS|Inverse of the average length of the acceleration/deceleration segments. + HRV_PSS|Percentage of short segments. + HRV_PAS|Percentage of NN intervals in alternation segments. + See Also -------- diff --git a/neurokit2/hrv/hrv_rsa.py b/neurokit2/hrv/hrv_rsa.py index 2970212fbc..204b670083 100644 --- a/neurokit2/hrv/hrv_rsa.py +++ b/neurokit2/hrv/hrv_rsa.py @@ -95,16 +95,13 @@ def hrv_rsa( rsa : dict A dictionary containing the RSA features, which includes: - * ``"RSA_P2T_Values"``: the estimate of RSA during each breath cycle, produced by - subtracting the shortest heart period (or RR interval) from the longest heart period in - ms. - * ``"RSA_P2T_Mean"``: the mean peak-to-trough across all cycles in ms - * ``"RSA_P2T_Mean_log"``: the logarithm of the mean of RSA estimates. - * ``"RSA_P2T_SD"``: the standard deviation of all RSA estimates. - * ``"RSA_P2T_NoRSA"``: the number of breath cycles - from which RSA could not be calculated. - * ``"RSA_PorgesBohrer"``: the Porges-Bohrer estimate of RSA, optimal - when the signal to noise ratio is low, in ``ln(ms^2)``. + .. codebookadd:: + RSA_P2T_Values|The estimate of RSA during each breath cycle, produced by subtracting the shortest heart period (or RR interval) from the longest heart period in ms. + RSA_P2T_Mean|The mean peak-to-trough across all cycles in ms. + RSA_P2T_Mean_log|The logarithm of the mean of RSA estimates. + RSA_P2T_SD|The standard deviation of all RSA estimates. + RSA_P2T_NoRSA|The number of breath cycles from which RSA could not be calculated. + RSA_PorgesBohrer|The Porges-Bohrer estimate of RSA, optimal when the signal to noise ratio is low, in ln(ms^2). Example ---------- From e67787e7465f2a0385c8f09dc5db72a0f1a17064 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 28 Jul 2024 02:07:03 +0200 Subject: [PATCH 42/49] Fixed codebookadd directive in the eda components --- neurokit2/eda/eda_eventrelated.py | 12 ++++++------ neurokit2/eda/eda_intervalrelated.py | 6 +++--- neurokit2/eda/eda_process.py | 20 ++++++++++---------- neurokit2/eda/eda_sympathetic.py | 4 ++-- 4 files changed, 21 insertions(+), 21 deletions(-) diff --git a/neurokit2/eda/eda_eventrelated.py b/neurokit2/eda/eda_eventrelated.py index b843d6eee5..09e917ec52 100644 --- a/neurokit2/eda/eda_eventrelated.py +++ b/neurokit2/eda/eda_eventrelated.py @@ -31,12 +31,12 @@ def eda_eventrelated(epochs, silent=False): the following: .. codebookadd:: - EDA_SCR|Electrodermal activity|indication of whether Skin Conductance Response (SCR) occurs following the event (1 if an SCR onset is present and 0 if absent) and if so, its corresponding peak amplitude, time of peak, rise and recovery time. If there is no occurrence of SCR, nans are displayed for the below features. - EDA_Peak_Amplitude|Electrodermal activity|The maximum amplitude of the phasic component of the signal. - SCR_Peak_Amplitude|Electrodermal activity|The peak amplitude of the first SCR in each epoch. - SCR_Peak_Amplitude_Time|Electrodermal activity|The timepoint of each first SCR peak amplitude. - SCR_RiseTime|Electrodermal activity|The risetime of each first SCR i.e., the time it takes for SCR to reach peak amplitude from onset. - SCR_RecoveryTime|Electrodermal activity|The half-recovery time of each first SCR i.e., the time it takes for SCR to decrease to half amplitude. + EDA_SCR|indication of whether Skin Conductance Response (SCR) occurs following the event (1 if an SCR onset is present and 0 if absent) and if so, its corresponding peak amplitude, time of peak, rise and recovery time. If there is no occurrence of SCR, nans are displayed for the below features. + EDA_Peak_Amplitude|The maximum amplitude of the phasic component of the signal. + SCR_Peak_Amplitude|The peak amplitude of the first SCR in each epoch. + SCR_Peak_Amplitude_Time|The timepoint of each first SCR peak amplitude. + SCR_RiseTime|The risetime of each first SCR i.e., the time it takes for SCR to reach peak amplitude from onset. + SCR_RecoveryTime|The half-recovery time of each first SCR i.e., the time it takes for SCR to decrease to half amplitude. See Also -------- diff --git a/neurokit2/eda/eda_intervalrelated.py b/neurokit2/eda/eda_intervalrelated.py index 13bbf6f25a..f480ec7569 100644 --- a/neurokit2/eda/eda_intervalrelated.py +++ b/neurokit2/eda/eda_intervalrelated.py @@ -34,9 +34,9 @@ def eda_intervalrelated(data, sampling_rate=1000, **kwargs): features consist of the following: .. codebookadd:: - SCR_Peaks_N|Electrodermal activity|The number of occurrences of Skin Conductance Response (SCR). - SCR_Peaks_Amplitude_Mean|Electrodermal activity|The mean amplitude of the SCR peak occurrences. - EDA_Tonic_SD|Electrodermal activity|The mean amplitude of the SCR peak occurrences. + SCR_Peaks_N|The number of occurrences of Skin Conductance Response (SCR). + SCR_Peaks_Amplitude_Mean|The mean amplitude of the SCR peak occurrences. + EDA_Tonic_SD|The mean amplitude of the SCR peak occurrences. * ``"EDA_Sympathetic"``: see :func:`eda_sympathetic` (only computed if signal duration > 64 sec). diff --git a/neurokit2/eda/eda_process.py b/neurokit2/eda/eda_process.py index dd42439de6..b162eb2104 100644 --- a/neurokit2/eda/eda_process.py +++ b/neurokit2/eda/eda_process.py @@ -41,16 +41,16 @@ def eda_process( columns: .. codebookadd:: - EDA_Raw|Electrodermal activity|The raw signal. - EDA_Clean|Electrodermal activity|The cleaned signal. - EDA_Tonic|Electrodermal activity|The tonic component of the signal, or the Tonic Skin Conductance Level (SCL). - EDA_Phasic|Electrodermal activity|The phasic component of the signal, or the Phasic Skin Conductance Response (SCR). - SCR_Onsets|Electrodermal activity|The samples at which the onsets of the peaks occur, marked as "1" in a list of zeros. - SCR_Peaks|Electrodermal activity|The samples at which the peaks occur, marked as "1" in a list of zeros. - SCR_Height|Electrodermal activity|The SCR amplitude of the signal including the Tonic component. Note that cumulative effects of close-occurring SCRs might lead to an underestimation of the amplitude. - SCR_Amplitude|Electrodermal activity|The SCR amplitude of the signal excluding the Tonic component. - SCR_RiseTime|Electrodermal activity|The SCR amplitude of the signal excluding the Tonic component. - SCR_Recovery|Electrodermal activity|The samples at which SCR peaks recover (decline) to half amplitude, marked as "1" in a list of zeros. + EDA_Raw|The raw signal. + EDA_Clean|The cleaned signal. + EDA_Tonic|The tonic component of the signal, or the Tonic Skin Conductance Level (SCL). + EDA_Phasic|The phasic component of the signal, or the Phasic Skin Conductance Response (SCR). + SCR_Onsets|The samples at which the onsets of the peaks occur, marked as "1" in a list of zeros. + SCR_Peaks|The samples at which the peaks occur, marked as "1" in a list of zeros. + SCR_Height|The SCR amplitude of the signal including the Tonic component. Note that cumulative effects of close-occurring SCRs might lead to an underestimation of the amplitude. + SCR_Amplitude|The SCR amplitude of the signal excluding the Tonic component. + SCR_RiseTime|The SCR amplitude of the signal excluding the Tonic component. + SCR_Recovery|The samples at which SCR peaks recover (decline) to half amplitude, marked as "1" in a list of zeros. info : dict A dictionary containing the information of each SCR peak (see :func:`eda_findpeaks`), diff --git a/neurokit2/eda/eda_sympathetic.py b/neurokit2/eda/eda_sympathetic.py index dc17a9823a..537f4f40c2 100644 --- a/neurokit2/eda/eda_sympathetic.py +++ b/neurokit2/eda/eda_sympathetic.py @@ -48,8 +48,8 @@ def eda_sympathetic( by total power). .. codebookadd:: - EDA_Sympathetic|Electrodermal activity|Derived from Posada-Quintero et al. (2016), who argue that dynamics of the sympathetic component of EDA signal is represented in the frequency band of 0.045-0.25Hz. - EDA_SympatheticN|Electrodermal activity|normalized version of "EDA_Sympathetic" obtained by dividing EDA_Sympathetic by total power + EDA_Sympathetic|Derived from Posada-Quintero et al. (2016), who argue that dynamics of the sympathetic component of EDA signal is represented in the frequency band of 0.045-0.25Hz. + EDA_SympatheticN|normalized version of "EDA_Sympathetic" obtained by dividing EDA_Sympathetic by total power Examples -------- From 7ca5c57e4c16946db4725391bfc9a573d4d7304a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 28 Jul 2024 02:08:46 +0200 Subject: [PATCH 43/49] updated codebook directive with two new columns - added column "Field Category" - added column "Source File Name" --- docs/directives/csv_codebook_directive.py | 25 +++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/docs/directives/csv_codebook_directive.py b/docs/directives/csv_codebook_directive.py index a12e83727a..1ed730ded8 100644 --- a/docs/directives/csv_codebook_directive.py +++ b/docs/directives/csv_codebook_directive.py @@ -3,6 +3,17 @@ from docutils import nodes from docutils.parsers.rst import Directive +abrv_to_sensor = { + "ecg": "Electrocardiography", + "eda": "Electrodermal Activity", + "rsp": "Respiration", + "ppg": "Photoplethysmography", + "eeg": "Electroencephalography", + "emg": "Electromyography", + "eog": "Electrooculography", + "hrv": "Heart Rate Variability", + } + class CSVDocDirective(Directive): has_content = True @@ -16,19 +27,29 @@ def run(self): # List to hold bullet list nodes bullet_list = nodes.bullet_list() + doc_source_name = self.state.document.settings.env.temp_data.get('object')[0] + + maybe_sensor = doc_source_name.split("_") + doc_sensor = "N/A" + + if len(maybe_sensor) > 0 and maybe_sensor[0] in abrv_to_sensor: + doc_sensor = abrv_to_sensor[maybe_sensor[0]] + # Open the CSV file and append the content with open(csv_file_path, 'a', newline='', encoding='utf-8') as csvfile: writer = csv.writer(csvfile) - # writer = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL) # Write header if file is newly created or empty if file_empty: - header = ['Field Name', 'Field Description'] + header = ['Field Name', 'Field Description', 'Field Category', 'Source File Name'] writer.writerow(header) # Iterate through rows: add them to the codebook and add them to the page for line in self.content: fields = line.split('|') + fields.append(doc_sensor) + fields.append(f"{doc_source_name}.py") + writer.writerow([field.strip() for field in fields]) if len(fields) >= 2: From f051655a1cd0284f5d1ce258fe11fc980e3c749a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 28 Jul 2024 02:22:46 +0200 Subject: [PATCH 44/49] added header to the codebook table --- docs/codebook.rst | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/codebook.rst b/docs/codebook.rst index 1ae5e8abec..1a33073fc3 100644 --- a/docs/codebook.rst +++ b/docs/codebook.rst @@ -1,7 +1,7 @@ Codebook ======== -Here you can download the complete codebook which details the structure of data used throughout this documentation. +Here you can download the complete codebook which details the variables that you can compute using the NeuroKit package. .. raw:: html @@ -11,7 +11,11 @@ Here you can download the complete codebook which details the structure of data -This codebook contains detailed descriptions of all variables, their possible values, and additional metadata. +This codebook contains detailed descriptions of all variables, their descriptions, and additional metadata. + + +Codebook Table +============== .. raw:: html From 086336e162a3971bfeb1f6cdc18ceb18d3559b2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 28 Jul 2024 13:16:25 +0200 Subject: [PATCH 45/49] adjusted csv_codebook_directive to handle newlines --- docs/directives/csv_codebook_directive.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/directives/csv_codebook_directive.py b/docs/directives/csv_codebook_directive.py index 1ed730ded8..f1b839f6a7 100644 --- a/docs/directives/csv_codebook_directive.py +++ b/docs/directives/csv_codebook_directive.py @@ -46,7 +46,13 @@ def run(self): # Iterate through rows: add them to the codebook and add them to the page for line in self.content: + fields = line.split('|') + + # Remove multi line long space sequences + for fid in range(len(fields)): + fields[fid] = " ".join(fields[fid].split()) + fields.append(doc_sensor) fields.append(f"{doc_source_name}.py") From b37098f2eeb43e10f63488b2869e2a7eeab7f9ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=BCnter=20Herforth?= Date: Sun, 28 Jul 2024 13:17:06 +0200 Subject: [PATCH 46/49] added newline separators to all codebookadd directives --- docs/directives/csv_codebook_directive.py | 4 ++ neurokit2/ecg/ecg_eventrelated.py | 15 ++++-- neurokit2/ecg/ecg_process.py | 6 ++- neurokit2/eda/eda_eventrelated.py | 11 ++-- neurokit2/eda/eda_process.py | 6 ++- neurokit2/eda/eda_sympathetic.py | 6 ++- neurokit2/emg/emg_process.py | 3 +- neurokit2/hrv/hrv_frequency.py | 13 +++-- neurokit2/hrv/hrv_nonlinear.py | 61 ++++++++++++++++------- neurokit2/hrv/hrv_rsa.py | 6 ++- neurokit2/ppg/ppg_eventrelated.py | 3 +- neurokit2/rsp/rsp_process.py | 6 ++- neurokit2/rsp/rsp_rrv.py | 38 +++++++++----- 13 files changed, 124 insertions(+), 54 deletions(-) diff --git a/docs/directives/csv_codebook_directive.py b/docs/directives/csv_codebook_directive.py index f1b839f6a7..7f48514758 100644 --- a/docs/directives/csv_codebook_directive.py +++ b/docs/directives/csv_codebook_directive.py @@ -53,11 +53,15 @@ def run(self): for fid in range(len(fields)): fields[fid] = " ".join(fields[fid].split()) + # Append last fields fields.append(doc_sensor) fields.append(f"{doc_source_name}.py") + # Write to CSV writer.writerow([field.strip() for field in fields]) + + # Prepare the documentation stylization if len(fields) >= 2: paragraph = nodes.paragraph() diff --git a/neurokit2/ecg/ecg_eventrelated.py b/neurokit2/ecg/ecg_eventrelated.py index bb3cea31cc..893c2e7887 100644 --- a/neurokit2/ecg/ecg_eventrelated.py +++ b/neurokit2/ecg/ecg_eventrelated.py @@ -37,10 +37,14 @@ def ecg_eventrelated(epochs, silent=False): ECG_Rate_SD|The standard deviation of the heart rate after stimulus onset. ECG_Rate_Max_Time|The time at which maximum heart rate occurs. ECG_Rate_Min_Time|The time at which minimum heart rate occurs. - ECG_Phase_Atrial|Indication of whether the onset of the event concurs with respiratory systole (1) or diastole (0). - ECG_Phase_Ventricular|Indication of whether the onset of the event concurs with respiratory systole (1) or diastole (0). - ECG_Phase_Atrial_Completion|Indication of the stage of the current cardiac (atrial) phase (0 to 1) at the onset of the event. - ECG_Phase_Ventricular_Completion|Indication of the stage of the current cardiac (ventricular) phase (0 to 1) at the onset of the event. + ECG_Phase_Atrial|Indication of whether the onset of the event concurs with \ + respiratory systole (1) or diastole (0). + ECG_Phase_Ventricular|Indication of whether the onset of the event concurs with \ + respiratory systole (1) or diastole (0). + ECG_Phase_Atrial_Completion|Indication of the stage of the current cardiac (atrial) \ + phase (0 to 1) at the onset of the event. + ECG_Phase_Ventricular_Completion|Indication of the stage of the current cardiac \ + (ventricular) phase (0 to 1) at the onset of the event. We also include the following *experimental* features related to the parameters of a quadratic model: @@ -48,7 +52,8 @@ def ecg_eventrelated(epochs, silent=False): .. codebookadd:: ECG_Rate_Trend_Linear|The parameter corresponding to the linear trend. ECG_Rate_Trend_Quadratic|The parameter corresponding to the curvature. - ECG_Rate_Trend_R2|The quality of the quadratic model. If too low, the parameters might not be reliable or meaningful. + ECG_Rate_Trend_R2|The quality of the quadratic model. If too low, the parameters \ + might not be reliable or meaningful. See Also -------- diff --git a/neurokit2/ecg/ecg_process.py b/neurokit2/ecg/ecg_process.py index 23357a28e1..584843bc7c 100644 --- a/neurokit2/ecg/ecg_process.py +++ b/neurokit2/ecg/ecg_process.py @@ -56,8 +56,10 @@ def ecg_process(ecg_signal, sampling_rate=1000, method="neurokit"): ECG_T_Onsets|The T-onsets marked as "1" in a list of zeros. ECG_T_Offsets|The T-offsets marked as "1" in a list of zeros. ECG_Phase_Atrial|Cardiac phase, marked by "1" for systole and "0" for diastole. - ECG_Phase_Completion_Atrial|Cardiac phase (atrial) completion, expressed in percentage (from 0 to 1), representing the stage of the current cardiac phase. - ECG_Phase_Completion_Ventricular|Cardiac phase (ventricular) completion, expressed in percentage (from 0 to 1), representing the stage of the current cardiac phase. + ECG_Phase_Completion_Atrial|Cardiac phase (atrial) completion, expressed in \ + percentage (from 0 to 1), representing the stage of the current cardiac phase. + ECG_Phase_Completion_Ventricular|Cardiac phase (ventricular) completion, expressed \ + in percentage (from 0 to 1), representing the stage of the current cardiac phase. rpeaks : dict A dictionary containing the samples at which the R-peaks occur, accessible with the key diff --git a/neurokit2/eda/eda_eventrelated.py b/neurokit2/eda/eda_eventrelated.py index 09e917ec52..d12b58ecab 100644 --- a/neurokit2/eda/eda_eventrelated.py +++ b/neurokit2/eda/eda_eventrelated.py @@ -31,12 +31,17 @@ def eda_eventrelated(epochs, silent=False): the following: .. codebookadd:: - EDA_SCR|indication of whether Skin Conductance Response (SCR) occurs following the event (1 if an SCR onset is present and 0 if absent) and if so, its corresponding peak amplitude, time of peak, rise and recovery time. If there is no occurrence of SCR, nans are displayed for the below features. + EDA_SCR|indication of whether Skin Conductance Response (SCR) occurs following the \ + event (1 if an SCR onset is present and 0 if absent) and if so, its corresponding \ + peak amplitude, time of peak, rise and recovery time. If there is no occurrence \ + of SCR, nans are displayed for the below features. EDA_Peak_Amplitude|The maximum amplitude of the phasic component of the signal. SCR_Peak_Amplitude|The peak amplitude of the first SCR in each epoch. SCR_Peak_Amplitude_Time|The timepoint of each first SCR peak amplitude. - SCR_RiseTime|The risetime of each first SCR i.e., the time it takes for SCR to reach peak amplitude from onset. - SCR_RecoveryTime|The half-recovery time of each first SCR i.e., the time it takes for SCR to decrease to half amplitude. + SCR_RiseTime|The risetime of each first SCR i.e., the time it takes for SCR to \ + reach peak amplitude from onset. + SCR_RecoveryTime|The half-recovery time of each first SCR i.e., the time it takes \ + for SCR to decrease to half amplitude. See Also -------- diff --git a/neurokit2/eda/eda_process.py b/neurokit2/eda/eda_process.py index b162eb2104..c93361eaa3 100644 --- a/neurokit2/eda/eda_process.py +++ b/neurokit2/eda/eda_process.py @@ -47,10 +47,12 @@ def eda_process( EDA_Phasic|The phasic component of the signal, or the Phasic Skin Conductance Response (SCR). SCR_Onsets|The samples at which the onsets of the peaks occur, marked as "1" in a list of zeros. SCR_Peaks|The samples at which the peaks occur, marked as "1" in a list of zeros. - SCR_Height|The SCR amplitude of the signal including the Tonic component. Note that cumulative effects of close-occurring SCRs might lead to an underestimation of the amplitude. + SCR_Height|The SCR amplitude of the signal including the Tonic component. Note that cumulative \ + effects of close-occurring SCRs might lead to an underestimation of the amplitude. SCR_Amplitude|The SCR amplitude of the signal excluding the Tonic component. SCR_RiseTime|The SCR amplitude of the signal excluding the Tonic component. - SCR_Recovery|The samples at which SCR peaks recover (decline) to half amplitude, marked as "1" in a list of zeros. + SCR_Recovery|The samples at which SCR peaks recover (decline) to half amplitude, marked as "1" \ + in a list of zeros. info : dict A dictionary containing the information of each SCR peak (see :func:`eda_findpeaks`), diff --git a/neurokit2/eda/eda_sympathetic.py b/neurokit2/eda/eda_sympathetic.py index 537f4f40c2..bd7c93816b 100644 --- a/neurokit2/eda/eda_sympathetic.py +++ b/neurokit2/eda/eda_sympathetic.py @@ -48,8 +48,10 @@ def eda_sympathetic( by total power). .. codebookadd:: - EDA_Sympathetic|Derived from Posada-Quintero et al. (2016), who argue that dynamics of the sympathetic component of EDA signal is represented in the frequency band of 0.045-0.25Hz. - EDA_SympatheticN|normalized version of "EDA_Sympathetic" obtained by dividing EDA_Sympathetic by total power + EDA_Sympathetic|Derived from Posada-Quintero et al. (2016), who argue that dynamics of \ + the sympathetic component of EDA signal is represented in the frequency band of 0.045-0.25Hz. + EDA_SympatheticN|normalized version of "EDA_Sympathetic" obtained by dividing \ + EDA_Sympathetic by total power Examples -------- diff --git a/neurokit2/emg/emg_process.py b/neurokit2/emg/emg_process.py index 2f08de6f67..20b2cb12ba 100644 --- a/neurokit2/emg/emg_process.py +++ b/neurokit2/emg/emg_process.py @@ -39,7 +39,8 @@ def emg_process(emg_signal, sampling_rate=1000, report=None, **kwargs): EMG_Raw|The raw EMG signal. EMG_Clean|The cleaned EMG signal. EMG_Amplitude|The signal amplitude, or the activation of the signal. - EMG_Activity|The activity of the signal for which amplitude exceeds the threshold specified,marked as "1" in a list of zeros. + EMG_Activity|The activity of the signal for which amplitude exceeds the threshold \ + specified,marked as "1" in a list of zeros. EMG_Onsets|The onsets of the amplitude, marked as "1" in a list of zeros. EMG_Offsets|The offsets of the amplitude, marked as "1" in a list of zeros. diff --git a/neurokit2/hrv/hrv_frequency.py b/neurokit2/hrv/hrv_frequency.py index 04c4bed539..1fb7031f76 100644 --- a/neurokit2/hrv/hrv_frequency.py +++ b/neurokit2/hrv/hrv_frequency.py @@ -104,15 +104,20 @@ def hrv_frequency( DataFrame consisting of the computed HRV frequency metrics, which includes: .. codebookadd:: - HRV_ULF|The spectral power of ultra low frequencies (by default, .0 to .0033 Hz). Very long signals are required for this to index to be extracted, otherwise, will return NaN. + HRV_ULF|The spectral power of ultra low frequencies (by default, .0 to .0033 Hz). \ + Very long signals are required for this to index to be extracted, otherwise, \ + will return NaN. HRV_VLF|The spectral power of very low frequencies (by default, .0033 to .04 Hz). HRV_LF|The spectral power of low frequencies (by default, .04 to .15 Hz). HRV_HF|The spectral power of high frequencies (by default, .15 to .4 Hz). HRV_VHF|The spectral power of very high frequencies (by default, .4 to .5 Hz). HRV_TP|The total spectral power. - HRV_LFHF|The ratio obtained by dividing the low frequency power by the high frequency power. - HRV_LFn|The normalized low frequency, obtained by dividing the low frequency power by the total power. - HRV_HFn|The normalized high frequency, obtained by dividing the low frequency power by the total power. + HRV_LFHF|The ratio obtained by dividing the low frequency power by the high frequency \ + power. + HRV_LFn|The normalized low frequency, obtained by dividing the low frequency power by \ + the total power. + HRV_HFn|The normalized high frequency, obtained by dividing the low frequency power by \ + the total power. HRV_LnHF|The log transformed HF. See Also diff --git a/neurokit2/hrv/hrv_nonlinear.py b/neurokit2/hrv/hrv_nonlinear.py index 33d0819e6e..cb699b8be2 100644 --- a/neurokit2/hrv/hrv_nonlinear.py +++ b/neurokit2/hrv/hrv_nonlinear.py @@ -147,27 +147,52 @@ def hrv_nonlinear(peaks, sampling_rate=1000, show=False, **kwargs): DataFrame consisting of the computed non-linear HRV metrics, which includes: .. codebookadd:: - HRV_SD1|Standard deviation perpendicular to the line of identity. It is an index of short-term RR interval fluctuations, i.e., beat-to-beat variability. It is equivalent (although on another scale) to RMSSD, and therefore it is redundant to report correlation with both. + HRV_SD1|Standard deviation perpendicular to the line of identity. It is an index of \ + short-term RR interval fluctuations, i.e., beat-to-beat variability. It is \ + equivalent (although on another scale) to RMSSD, and therefore it is redundant to \ + report correlation with both. HRV_SD2|Standard deviation along the identity line. Index of long-term HRV changes. - HRV_SD1SD2|Ratio of SD1 to SD2. Describes the ratio of short term to long term variations in HRV. - HRV_S|Area of ellipse described by *SD1* and *SD2* (``pi * SD1 * SD2``). It is proportional to *SD1SD2*. - HRV_CSI|The Cardiac Sympathetic Index (Toichi, 1997) is a measure of cardiac sympathetic function independent of vagal activity, calculated by dividing the longitudinal variability of the Poincaré plot (``4*SD2``) by its transverse variability (``4*SD1``). - HRV_CVI|The Cardiac Vagal Index (Toichi, 1997) is an index of cardiac parasympathetic function (vagal activity unaffected by sympathetic activity), and is equal equal to the logarithm of the product of longitudinal (``4*SD2``) and transverse variability (``4*SD1``). - HRV_CSI_Modified|The modified CSI (Jeppesen, 2014) obtained by dividing the square of the longitudinal variability by its transverse variability. - HRV_GI|Guzik's Index, defined as the distance of points above line of identity (LI) to LI divided by the distance of all points in Poincaré plot to LI except those that are located on LI. - HRV_SI|Slope Index, defined as the phase angle of points above LI divided by the phase angle of all points in Poincaré plot except those that are located on LI. - HRV_AI|Area Index, defined as the cumulative area of the sectors corresponding to the points that are located above LI divided by the cumulative area of sectors corresponding to all points in the Poincaré plot except those that are located on LI. - HRV_PI|Porta's Index, defined as the number of points below LI divided by the total number of points in Poincaré plot except those that are located on LI. - HRV_SD1a|Short-term variance of contributions of decelerations (prolongations of RR intervals), (Piskorski, 2011). - HRV_SD1d|Short-term variance of contributions of accelerations (shortenings of RR intervals), (Piskorski, 2011). - HRV_C1a|The contributions of heart rate accelerations to short-term HRV, (Piskorski, 2011). - HRV_C1d|The contributions of heart rate decelerations to short-term HRV, (Piskorski, 2011). - HRV_SD2a|Long-term variance of contributions of accelerations (shortenings of RR intervals), (Piskorski, 2011). - HRV_SD2d|Long-term variance of contributions of decelerations (prolongations of RR intervals), (Piskorski, 2011). + HRV_SD1SD2|Ratio of SD1 to SD2. Describes the ratio of short term to long term \ + variations in HRV. + HRV_S|Area of ellipse described by *SD1* and *SD2* (``pi * SD1 * SD2``). It is \ + proportional to *SD1SD2*. + HRV_CSI|The Cardiac Sympathetic Index (Toichi, 1997) is a measure of cardiac \ + sympathetic function independent of vagal activity, calculated by dividing the \ + longitudinal variability of the Poincaré plot (``4*SD2``) by its transverse \ + variability (``4*SD1``). + HRV_CVI|The Cardiac Vagal Index (Toichi, 1997) is an index of cardiac parasympathetic \ + function (vagal activity unaffected by sympathetic activity), and is equal equal \ + to the logarithm of the product of longitudinal (``4*SD2``) and transverse \ + variability (``4*SD1``). + HRV_CSI_Modified|The modified CSI (Jeppesen, 2014) obtained by dividing the square of \ + the longitudinal variability by its transverse variability. + HRV_GI|Guzik's Index, defined as the distance of points above line of identity (LI) to \ + LI divided by the distance of all points in Poincaré plot to LI except those that \ + are located on LI. + HRV_SI|Slope Index, defined as the phase angle of points above LI divided by the phase \ + angle of all points in Poincaré plot except those that are located on LI. + HRV_AI|Area Index, defined as the cumulative area of the sectors corresponding to the \ + points that are located above LI divided by the cumulative area of sectors \ + corresponding to all points in the Poincaré plot except those that are located \ + on LI. + HRV_PI|Porta's Index, defined as the number of points below LI divided by the total \ + number of points in Poincaré plot except those that are located on LI. + HRV_SD1a|Short-term variance of contributions of decelerations (prolongations of RR \ + intervals), (Piskorski, 2011). + HRV_SD1d|Short-term variance of contributions of accelerations (shortenings of RR \ + intervals), (Piskorski, 2011). + HRV_C1a|The contributions of heart rate accelerations to short-term HRV, (Piskorski, 2011). + HRV_C1d|The contributions of heart rate decelerations to short-term HRV, (Piskorski, 2011). + HRV_SD2a|Long-term variance of contributions of accelerations (shortenings of RR \ + intervals), (Piskorski, 2011). + HRV_SD2d|Long-term variance of contributions of decelerations (prolongations of RR \ + intervals), (Piskorski, 2011). HRV_C2a|The contributions of heart rate accelerations to long-term HRV, (Piskorski, 2011). HRV_C2d|The contributions of heart rate decelerations to long-term HRV, (Piskorski, 2011). - HRV_SDNNa|Total variance of contributions of accelerations (shortenings of RR intervals), (Piskorski, 2011). - HRV_SDNNd|Total variance of contributions of decelerations (prolongations of RR intervals), (Piskorski, 2011). + HRV_SDNNa|Total variance of contributions of accelerations (shortenings of RR \ + intervals), (Piskorski, 2011). + HRV_SDNNd|Total variance of contributions of decelerations (prolongations of RR \ + intervals), (Piskorski, 2011). HRV_Ca|The total contributions of heart rate accelerations to HRV. HRV_Cd|The total contributions of heart rate decelerations to HRV. HRV_PIP|Percentage of inflection points of the RR intervals series. diff --git a/neurokit2/hrv/hrv_rsa.py b/neurokit2/hrv/hrv_rsa.py index 204b670083..9aee3fcfe7 100644 --- a/neurokit2/hrv/hrv_rsa.py +++ b/neurokit2/hrv/hrv_rsa.py @@ -96,12 +96,14 @@ def hrv_rsa( A dictionary containing the RSA features, which includes: .. codebookadd:: - RSA_P2T_Values|The estimate of RSA during each breath cycle, produced by subtracting the shortest heart period (or RR interval) from the longest heart period in ms. + RSA_P2T_Values|The estimate of RSA during each breath cycle, produced by subtracting \ + the shortest heart period (or RR interval) from the longest heart period in ms. RSA_P2T_Mean|The mean peak-to-trough across all cycles in ms. RSA_P2T_Mean_log|The logarithm of the mean of RSA estimates. RSA_P2T_SD|The standard deviation of all RSA estimates. RSA_P2T_NoRSA|The number of breath cycles from which RSA could not be calculated. - RSA_PorgesBohrer|The Porges-Bohrer estimate of RSA, optimal when the signal to noise ratio is low, in ln(ms^2). + RSA_PorgesBohrer|The Porges-Bohrer estimate of RSA, optimal when the signal to noise \ + ratio is low, in ln(ms^2). Example ---------- diff --git a/neurokit2/ppg/ppg_eventrelated.py b/neurokit2/ppg/ppg_eventrelated.py index 3daa6b8097..59518f0afc 100644 --- a/neurokit2/ppg/ppg_eventrelated.py +++ b/neurokit2/ppg/ppg_eventrelated.py @@ -41,7 +41,8 @@ def ppg_eventrelated(epochs, silent=False): .. codebookadd:: PPG_Rate_Trend_Linear|The parameter corresponding to the linear trend. PPG_Rate_Trend_Quadratic|The parameter corresponding to the curvature. - PPG_Rate_Trend_R2|The quality of the quadratic model. If too low, the parameters might not be reliable or meaningful. + PPG_Rate_Trend_R2|The quality of the quadratic model. If too low, the parameters \ + might not be reliable or meaningful. See Also -------- diff --git a/neurokit2/rsp/rsp_process.py b/neurokit2/rsp/rsp_process.py index c7c3074a8c..3dd153157e 100644 --- a/neurokit2/rsp/rsp_process.py +++ b/neurokit2/rsp/rsp_process.py @@ -61,11 +61,13 @@ def rsp_process( RSP_Raw|The raw signal. RSP_Clean|The raw signal. RSP_Peaks|The respiratory peaks (exhalation onsets) marked as "1" in a list of zeros. - RSP_Troughs|The respiratory troughs (inhalation onsets) marked as "1" in a list of zeros. + RSP_Troughs|The respiratory troughs (inhalation onsets) marked as "1" in a list \ + of zeros. RSP_Rate|The breathing rate interpolated between inhalation peaks. RSP_Amplitude|The breathing amplitude interpolated between inhalation peaks. RSP_Phase|The breathing phase, marked by "1" for inspiration and "0" for expiration. - RSP_Phase_Completion|The breathing phase completion, expressed in percentage (from 0 to 1), representing the stage of the current respiratory phase. + RSP_Phase_Completion|The breathing phase completion, expressed in percentage \ + (from 0 to 1), representing the stage of the current respiratory phase. RSP_RVT|Respiratory volume per time (RVT). info : dict diff --git a/neurokit2/rsp/rsp_rrv.py b/neurokit2/rsp/rsp_rrv.py index 4f0d41cca9..7bc114dc1c 100644 --- a/neurokit2/rsp/rsp_rrv.py +++ b/neurokit2/rsp/rsp_rrv.py @@ -44,20 +44,34 @@ def rsp_rrv(rsp_rate, troughs=None, sampling_rate=1000, show=False, silent=True) .. codebookadd:: RRV_SDBB|The standard deviation of the breath-to-breath intervals. RRV_RMSSD|The root mean square of successive differences of the breath-to-breath intervals. - RRV_SDSD|The standard deviation of the successive differences between adjacent breath-to-breath intervals. + RRV_SDSD|The standard deviation of the successive differences between adjacent \ + breath-to-breath intervals. RRV_BBx|The number of successive interval differences that are greater than x seconds. - RRV_pBBx|the proportion of breath-to-breath intervals that are greater than x seconds, out of the total number of intervals. - RRV_VLF|Spectral power density pertaining to very low frequency band (i.e., 0 to .04 Hz) by default. - RRV_LF|Spectral power density pertaining to low frequency band (i.e., .04 to .15 Hz) by default. - RRV_HF|Spectral power density pertaining to high frequency band (i.e., .15 to .4 Hz) by default. + RRV_pBBx|the proportion of breath-to-breath intervals that are greater than x seconds, \ + out of the total number of intervals. + RRV_VLF|Spectral power density pertaining to very low frequency band (i.e., 0 to\ + .04 Hz) by default. + RRV_LF|Spectral power density pertaining to low frequency band (i.e., .04 to \ + .15 Hz) by default. + RRV_HF|Spectral power density pertaining to high frequency band (i.e., .15 to \ + .4 Hz) by default. RRV_LFHF|The ratio of low frequency power to high frequency power. - RRV_LFn|The normalized low frequency, obtained by dividing the low frequency power by the total power. - RRV_HFn|The normalized high frequency, obtained by dividing the low frequency power by total power. - RRV_SD1|SD1 is a measure of the spread of breath-to-breath intervals on the Poincaré plot perpendicular to the line of identity. It is an index of short-term variability. - RRV_SD2|SD2 is a measure of the spread of breath-to-breath intervals on the Poincaré plot along the line of identity. It is an index of long-term variability. - RRV_SD2SD1|The ratio between short and long term fluctuations of the breath-to-breath intervals (SD2 divided by SD1). - RRV_DFA_alpha1|The "short-term" fluctuation value generated from Detrended Fluctuation Analysis i.e. the root mean square deviation from the fitted trend of the breath-to-breath intervals. Will only be computed if mora than 160 breath cycles in the signal. - RRV_DFA_alpha2|The long-term fluctuation value. Will only be computed if mora than 640 breath cycles in the signal. + RRV_LFn|The normalized low frequency, obtained by dividing the low frequency power by \ + the total power. + RRV_HFn|The normalized high frequency, obtained by dividing the low frequency power by \ + total power. + RRV_SD1|SD1 is a measure of the spread of breath-to-breath intervals on the Poincaré \ + plot perpendicular to the line of identity. It is an index of short-term variability. + RRV_SD2|SD2 is a measure of the spread of breath-to-breath intervals on the Poincaré \ + plot along the line of identity. It is an index of long-term variability. + RRV_SD2SD1|The ratio between short and long term fluctuations of the breath-to-breath \ + intervals (SD2 divided by SD1). + RRV_DFA_alpha1|The "short-term" fluctuation value generated from Detrended Fluctuation \ + Analysis i.e. the root mean square deviation from the fitted trend of the \ + breath-to-breath intervals. Will only be computed if mora than 160 breath cycles \ + in the signal. + RRV_DFA_alpha2|The long-term fluctuation value. Will only be computed if mora than \ + 640 breath cycles in the signal. RRV_ApEn|The approximate entropy of RRV, calculated by :func:`.entropy_approximate`. RRV_SampEn|The sample entropy of RRV, calculated by :func:`.entropy_sample`. From 5d3e044d202621d5e4668f2d6db98289ad177b01 Mon Sep 17 00:00:00 2001 From: Peter H Charlton Date: Mon, 29 Jul 2024 13:36:29 +0100 Subject: [PATCH 47/49] Add MSPTDfast algorithm Added MSPTDfast algorithm (v.1.1) for PPG beat detection. --- neurokit2/ppg/ppg_findpeaks.py | 246 ++++++++++++++++++++++++++++++++- 1 file changed, 245 insertions(+), 1 deletion(-) diff --git a/neurokit2/ppg/ppg_findpeaks.py b/neurokit2/ppg/ppg_findpeaks.py index e0750fb523..4fdaf4b7e6 100644 --- a/neurokit2/ppg/ppg_findpeaks.py +++ b/neurokit2/ppg/ppg_findpeaks.py @@ -68,6 +68,8 @@ def ppg_findpeaks( * Bishop, S. M., & Ercole, A. (2018). Multi-scale peak and trough detection optimised for periodic and quasi-periodic neuroscience data. In Intracranial Pressure & Neuromonitoring XVI (pp. 189-195). Springer International Publishing. + * Charlton, P. H. et al. (2024). MSPTDfast: An Efficient Photoplethysmography Beat Detection + Algorithm. Proc CinC. """ method = method.lower() @@ -75,13 +77,17 @@ def ppg_findpeaks( peaks = _ppg_findpeaks_elgendi(ppg_cleaned, sampling_rate, show=show, **kwargs) elif method in ["msptd", "bishop2018", "bishop"]: peaks, _ = _ppg_findpeaks_bishop(ppg_cleaned, show=show, **kwargs) + elif method in ["msptdfast", "msptdfastv1", "charlton2024", "charlton"]: + peaks, onsets = _ppg_findpeaks_charlton(ppg_cleaned, sampling_rate, show=show, **kwargs) else: raise ValueError( - "`method` not found. Must be one of the following: 'elgendi', 'bishop'." + "`method` not found. Must be one of the following: 'elgendi', 'bishop', 'charlton'." ) # Prepare output. info = {"PPG_Peaks": peaks} + if 'onsets' in locals(): + info["PPG_Onsets"] = onsets return info @@ -239,3 +245,241 @@ def _ppg_findpeaks_bishop( ax0.set_title("PPG Peaks (Method by Bishop et al., 2018)") return peaks, onsets + + +def _ppg_findpeaks_charlton( + signal, + sampling_rate=1000, + show=False, +): + """Implementation of Charlton et al (2024) MSPTDfast: An Efficient Photoplethysmography + Beat Detection Algorithm. 2024 Computing in Cardiology (CinC), Karlsruhe, Germany, + doi:10.1101/2024.07.18.24310627. + """ + + # Inner functions + + def find_m_max(x, N, max_scale, m_max): + """Find local maxima scalogram for peaks + """ + + for k in range(1, max_scale + 1): # scalogram scales + for i in range(k + 2, N - k + 2): + if x[i - 2] > x[i - k - 2] and x[i - 2] > x[i + k - 2]: + m_max[k - 1, i - 2] = True + + return m_max + + def find_m_min(x, N, max_scale, m_min): + """Find local minima scalogram for onsets + """ + + for k in range(1, max_scale + 1): # scalogram scales + for i in range(k + 2, N - k + 2): + if x[i - 2] < x[i - k - 2] and x[i - 2] < x[i + k - 2]: + m_min[k - 1, i - 2] = True + + return m_min + + def find_lms_using_msptd_approach(max_scale, x, options): + """Find local maxima (or minima) scalogram(s) using the + MSPTD approach + """ + + # Setup + N = len(x) + + # Find local maxima scalogram (if required) + if options["find_pks"]: + m_max = np.full((max_scale, N), False) # matrix for maxima + m_max = find_m_max(x, N, max_scale, m_max) + else: + m_max = None + + # Find local minima scalogram (if required) + if options["find_trs"]: + m_min = np.full((max_scale, N), False) # matrix for minima + m_min = find_m_min(x, N, max_scale, m_min) + else: + m_min = None + + return m_max, m_min + + def downsample(win_sig, ds_factor): + """Downsamples signal by picking out every nth sample, where n is + specified by ds_factor + """ + + return win_sig[::ds_factor] + + def detect_peaks_and_onsets_using_msptd(signal, fs, options): + """Detect peaks and onsets in a PPG signal using a modified MSPTD approach + (where the modifications are those specified in Charlton et al. 2024) + """ + + # Setup + N = len(signal) + L = int(np.ceil(N / 2) - 1) + + # Step 0: Don't calculate scales outside the range of plausible HRs + + plaus_hr_hz = np.array(options['plaus_hr_bpm']) / 60 # in Hz + init_scales = np.arange(1, L + 1) + durn_signal = len(signal) / fs + init_scales_fs = (L / init_scales) / durn_signal + if options['use_reduced_lms_scales']: + init_scales_inc_log = init_scales_fs >= plaus_hr_hz[0] + else: + init_scales_inc_log = np.ones_like(init_scales_fs, dtype=bool) # DIDN"T FULLY UNDERSTAND + + max_scale_index = np.where(init_scales_inc_log)[0] # DIDN"T FULLY UNDERSTAND THIS AND NEXT FEW LINES + if max_scale_index.size > 0: + max_scale = max_scale_index[-1] + 1 # Add 1 to convert from 0-based to 1-based index + else: + max_scale = None # Or handle the case where no scales are valid + + # Step 1: calculate local maxima and local minima scalograms + + # - detrend + x = scipy.signal.detrend(signal, type="linear") + + # - populate LMS matrices + [m_max, m_min] = find_lms_using_msptd_approach(max_scale, x, options) + + # Step 2: find the scale with the most local maxima (or local minima) + + # - row-wise summation (i.e. sum each row) + if options["find_pks"]: + gamma_max = np.sum(m_max, axis=1) # the "axis=1" option makes it row-wise + if options["find_trs"]: + gamma_min = np.sum(m_min, axis=1) + # - find scale with the most local maxima (or local minima) + if options["find_pks"]: + lambda_max = np.argmax(gamma_max) + if options["find_trs"]: + lambda_min = np.argmax(gamma_min) + + # Step 3: Use lambda to remove all elements of m for which k>lambda + first_scale_to_include = np.argmax(init_scales_inc_log) + if options["find_pks"]: + m_max = m_max[first_scale_to_include:lambda_max + 1, :] + if options["find_trs"]: + m_min = m_min[first_scale_to_include:lambda_min + 1, :] + + # Step 4: Find peaks (and onsets) + # - column-wise summation + if options["find_pks"]: + m_max_sum = np.sum(m_max == False, axis=0) + peaks = np.where(m_max_sum == 0)[0].astype(int) + else: + peaks = [] + + if options["find_trs"]: + m_min_sum = np.sum(m_min == False, axis=0) + onsets = np.where(m_min_sum == 0)[0].astype(int) + else: + onsets = [] + + return peaks, onsets + + # ~~~ Main function ~~~ + + # Specify settings + # - version: optimal selection (CinC 2024) + options = { + 'find_trs': True, # whether or not to find onsets + 'find_pks': True, # whether or not to find peaks + 'do_ds': True, # whether or not to do downsampling + 'ds_freq': 20, # the target downsampling frequency + 'use_reduced_lms_scales': True, # whether or not to reduce the number of scales (default 30 bpm) + 'win_len': 8, # duration of individual windows for analysis + 'win_overlap': 0.2, # proportion of window overlap + 'plaus_hr_bpm': [30, 200] # range of plausible HRs (only the lower bound is used) + } + + # Split into overlapping windows + no_samps_in_win = options["win_len"] * sampling_rate + if len(signal) <= no_samps_in_win: + win_starts = 0 + win_ends = len(signal) - 1 + else: + win_offset = round(no_samps_in_win * (1 - options["win_overlap"])) + win_starts = list(range(0, len(signal) - no_samps_in_win + 1, win_offset)) + win_ends = [start + 1 + no_samps_in_win for start in win_starts] + if win_ends[-1] < len(signal): + win_starts.append(len(signal) - 1 - no_samps_in_win) + win_ends.append(len(signal)) + # this ensures that the windows include the entire signal duration + + # Set up downsampling if the sampling frequency is particularly high + if options["do_ds"]: + min_fs = options["ds_freq"] + if sampling_rate > min_fs: + ds_factor = int(np.floor(sampling_rate / min_fs)) + ds_fs = sampling_rate / np.floor(sampling_rate / min_fs) + else: + options["do_ds"] = False + + # detect peaks and onsets in each window + peaks = [] + onsets = [] + + # cycle through each window + for win_no in range(len(win_starts)): + # Extract this window's data + win_sig = signal[win_starts[win_no]:win_ends[win_no]] + + # Downsample signal + if options['do_ds']: + rel_sig = downsample(win_sig, ds_factor) + rel_fs = ds_fs + else: + rel_sig = win_sig + rel_fs = sampling_rate + + # Detect peaks and onsets + p, t = detect_peaks_and_onsets_using_msptd(rel_sig, rel_fs, options) + + # Resample peaks + if options['do_ds']: + p = [peak * ds_factor for peak in p] + t = [onset * ds_factor for onset in t] + + # Correct peak indices by finding highest point within tolerance either side of detected peaks + tol_durn = 0.05 + if rel_fs < 10: + tol_durn = 0.2 + elif rel_fs < 20: + tol_durn = 0.1 + tol = int(np.ceil(rel_fs * tol_durn)) + + for pk_no in range(len(p)): + segment = win_sig[(p[pk_no] - tol):(p[pk_no] + tol + 1)] + temp = np.argmax(segment) + p[pk_no] = p[pk_no] - tol + temp + + # Correct onset indices by finding highest point within tolerance either side of detected onsets + for onset_no in range(len(t)): + segment = win_sig[(t[onset_no] - tol):(t[onset_no] + tol + 1)] + temp = np.argmin(segment) + t[onset_no] = t[onset_no] - tol + temp + + # Store peaks and onsets + win_peaks = [peak + win_starts[win_no] for peak in p] + peaks.extend(win_peaks) + win_onsets = [onset + win_starts[win_no] for onset in t] + onsets.extend(win_onsets) + + # Tidy up detected peaks and onsets (by ordering them and only retaining unique ones) + peaks = sorted(set(peaks)) + onsets = sorted(set(onsets)) + + # Plot results (optional) + if show: + _, ax0 = plt.subplots(nrows=1, ncols=1, sharex=True) + ax0.plot(signal, label="signal") + ax0.scatter(peaks, signal[peaks], c="r") + ax0.scatter(onsets, signal[onsets], c="b") + ax0.set_title("PPG Onsets (Method by Charlton et al., 2024)") + + return peaks, onsets From 581fbda11b6426ef25050032da04523332cc96ba Mon Sep 17 00:00:00 2001 From: Peter H Charlton Date: Tue, 6 Aug 2024 09:20:40 +0100 Subject: [PATCH 48/49] Update ecg_peaks.py Brief update to description of the 'neurokit' ecg_peaks method, to include the publication validating this algorithm. --- neurokit2/ecg/ecg_peaks.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/neurokit2/ecg/ecg_peaks.py b/neurokit2/ecg/ecg_peaks.py index e3d884bcdc..ddd135ab19 100644 --- a/neurokit2/ecg/ecg_peaks.py +++ b/neurokit2/ecg/ecg_peaks.py @@ -17,7 +17,9 @@ def ecg_peaks(ecg_cleaned, sampling_rate=1000, method="neurokit", correct_artifa * **neurokit** (default): QRS complexes are detected based on the steepness of the absolute gradient of the ECG signal. Subsequently, R-peaks are detected as local maxima in - the QRS complexes. Unpublished, but see https://github.com/neuropsychology/NeuroKit/issues/476 + the QRS complexes. The method is unpublished, but see: (i) https://github.com/neuropsychology/NeuroKit/issues/476 + for discussion of this algorithm; and (ii) https://doi.org/10.21105/joss.02621 for the original validation of + this algorithm. * **pantompkins1985**: Algorithm by Pan & Tompkins (1985). * **hamilton2002**: Algorithm by Hamilton (2002). * **zong2003**: Algorithm by Zong et al. (2003). From 124dc0434884b9884c41a30b4c9db602afc8428c Mon Sep 17 00:00:00 2001 From: visserle <99926564+visserle@users.noreply.github.com> Date: Fri, 16 Aug 2024 11:18:27 +0200 Subject: [PATCH 49/49] >`np.Inf` was removed in the NumPy 2.0 release. Use `np.inf` instead. --- neurokit2/eda/eda_phasic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neurokit2/eda/eda_phasic.py b/neurokit2/eda/eda_phasic.py index f19361a676..ad740012e5 100644 --- a/neurokit2/eda/eda_phasic.py +++ b/neurokit2/eda/eda_phasic.py @@ -584,7 +584,7 @@ def lasso(R, s, sampling_rate, maxIters, epsilon): ) ATv = np.matmul(R.transpose(), v).flatten() - gammaI = np.Inf + gammaI = np.inf removeIndices = [] inactiveSet = np.arange(0, W) @@ -603,7 +603,7 @@ def lasso(R, s, sampling_rate, maxIters, epsilon): epsilon = 1e-12 gammaArr = (lmbda - c[inactiveSet]) / (1 - ATv[inactiveSet] + epsilon) - gammaArr[gammaArr < zeroTol] = np.Inf + gammaArr[gammaArr < zeroTol] = np.inf gammaIc = np.min(gammaArr) # Imin = np.argmin(gammaArr) newIndices = inactiveSet[(np.abs(gammaArr - gammaIc) < zeroTol)]