diff --git a/delta/data_models/channels_2d.py b/delta/data_models/channels_2d.py index cc7c699..9245f5d 100644 --- a/delta/data_models/channels_2d.py +++ b/delta/data_models/channels_2d.py @@ -1,28 +1,27 @@ # -*- Encoding: UTF-8 -*- -""" -Author: Ralph Kube - -Abstractions for channels and channel ranges in 2d images. - -""" - -import itertools +#import itertools class channel_2d: - """Abstraction of a channel in 2d array""" + """Abstraction of a channel in 2d array + + """ def __init__(self, ch_v, ch_h, chnum_v, chnum_h, order): - """ - Parameters - ---------- - ch_v, ch_h : int - Horizontal and Vertical channel number - chnum_v, chnum_h : int - Total count of vertical and horizontal channels - order : string, either 'horizontal' or 'vertical' - Denotes whether horizontal or vertical channels are - arranged consecutively + """Initializes channel_2d class + + Args: + ch_v (int): + Horizontal channel number + ch_h (int): + Vertical channel number + chnum_v (int): + Total count of vertical channels + chnum_h (int): + Total count of vertical channels + order (string): + Either 'horizontal' or 'vertical'. Denotes whether horizontal or + vertical channels are arranged consecutively """ # horizontal channels assert((ch_v > 0) & (ch_v <= chnum_v )) @@ -51,7 +50,12 @@ def get_num(self): def get_idx(self): - """Returns the linear, ZERO-BASED, index corresponding to ch_h and ch_v""" + """Returns the linear, ZERO-BASED, index corresponding to ch_h and ch_v + + Returns: + index (int): + Linear, zero-based index corresponding to ch_h and ch_v in the 2d array. + """ return self.idx_fct(self.ch_v, self.ch_h) - 1 @@ -71,6 +75,14 @@ class channel_pair: """ def __init__(self, ch1, ch2): + """Initializes channel pair with 2 channels. + Args: + ch1 (channel): + First channel + ch2 (channel): + Second channel + + """ self.ch1 = ch1 self.ch2 = ch2 @@ -114,27 +126,34 @@ def from_json(cls, str): class channel_range: - """Defines iterators over a 2d range - - v - ^ - | - 6 | oooooo - 5 | ooxxxo - 4 | ooxxxo - 3 | ooxxxo - 2 | ooxxxo - 1 | oooooo - +--------> h - 123456 - + """Defines iterators over a 2d sub-array. + + This class defines an iterator over a rectangular selection in a 2d sub-array, + as defined by vertical and horizontal initial and final position (vi, hi), and (vf, hf). + + .. line-block:: + v + ^ + | + 6 | oooooo + 5 | ooxxxo + 4 | ooxxxo + 3 | ooxxxo + 2 | ooxxxo + 1 | oooooo + +--------> h + 123456 + The rectangular selection above shows (vi,hi) = (2,3) and (vf, hf) = (5,5). Iteration over this selection with horizontal channels consecutively ordered gives the index series - (3,2), (4,2), (5,2), - (3,3), (4,3), (5,3), - (3,4), (4,4), (5,4), - (4,5), (4,5), (5,5). + + .. line-block:: + (3,2), (4,2), (5,2), + (3,3), (4,3), (5,3), + (3,4), (4,4), (5,4), + (4,5), (4,5), (5,5). + """ def __init__(self, ch_start, ch_end): @@ -207,27 +226,21 @@ def __next__(self): def length(self): - """Calculates the number of channels in the list.""" + """Returns the number of channels in the range. + + Returns: + int: Number of channels in the range + """ return(self.ch_end.get_num() - self.ch_start.get_num() + 1) class num_to_vh(): - """Returns a tuple (ch_v, ch_h) for a channel number. + """Functor that returns a tuple (ch_v, ch_h) for a channel number. Note that these are 1-based numbers. - Parameters - ---------- - ch_num: int, channel nu,ber. - - Returns: - -------- - (ch_v, ch_h): int, Vertical and horizontal channel numbers. - - Vertical channel number is between 1 and 24. Horizontal channel number is - between 1 and 8. - + >>> obj = num_to_vh(24, 8, "vertical") >>> ch_num_to_vh(17) (3, 1) """ @@ -235,20 +248,27 @@ class num_to_vh(): def __init__(self, chnum_v: int, chnum_h: int, order: str): """Initializes with number of vertical and horizontal views. - Parameters: - ----------- - chnum_v, chnum_h: int - Number of vertical and horizontal views in the diagnostic - order : string, either 'horizontal' or 'vertical' + Args: + chnum_v (int): Number of vertical views in the diagnostic + chnum_h (int): Number of horizontal views in the diagnostic + order (string) : Either 'horizontal' or 'vertical' denotes whether horizontal or vertical channels are ordered consecutively """ self.chnum_v = chnum_v self.chnum_h = chnum_h + self.order = order def __call__(self, ch_num): - """Converts 2d indices ch_v and ch_h to linear index.""" + """Converts 2d indices ch_v and ch_h to linear index. + + Args: + ch_num (int): Linear, one-based channel number + + Returns: + (ch_v, ch_h) (tuple): Vertical and horizontal channel view + """ assert((ch_num >= 1) & (ch_num <= self.chnum_v * self.chnum_h)) # Calculate using zero-base ch_num -= 1 @@ -258,26 +278,20 @@ def __call__(self, ch_num): class vh_to_num: - """Returns the linear channel index 1..192 for a ch_v, ch_h. - + """Returns the linear channel number for a tuple (ch_v, ch_h). - Returns: - -------- - ch_num: int, linear channel index - - >>> vh_2_num = ch_vh_to_num(24, 8, order='horizontal') - >>> vh_2_num(2, 4) + >>> obj = vh_to_num(24, 8, order='horizontal') + >>> obj(2, 4) 12 - >>> vh_2_num = ch_vh_to_num(24, 8, order='vertical') - >>> vh_2_num(2, 4) + >>> obj = vh_to_num(24, 8, order='vertical') + >>> obj(2, 4) 28 """ def __init__(self, chnum_v, chnum_h, order="horizontal"): """ - Parameters: - ----------- + Args: ch_v, ch_h: int vertical and horizontal chanel numbers order : string, either 'horizontal' or 'vertical' @@ -293,6 +307,15 @@ def __init__(self, chnum_v, chnum_h, order="horizontal"): self.order = order def __call__(self, ch_v, ch_h): + """Converts ch_v and ch_h to linear index. + + Args: + (ch_v, ch_h) (tuple): Vertical and horizontal channel view + + + Returns: + ch_num (int): Linear, one-based channel number + """ # We usually want to check that we are within the bounds. # But sometimes it is helpful to avoid this. diff --git a/delta/data_models/helpers.py b/delta/data_models/helpers.py index 2cdc637..078b332 100644 --- a/delta/data_models/helpers.py +++ b/delta/data_models/helpers.py @@ -15,8 +15,7 @@ class data_model_generator(): def __init__(self, cfg_diagnostic: dict): """Sets up data model generation. - Parameters: - =========== + Args: cfg_diagnostic: dict, Diagnostic section of the config file """ @@ -32,10 +31,8 @@ def __init__(self, cfg_diagnostic: dict): def new_chunk(self, stream_data: np.array, chunk_idx: int): """Generates a data model from new chunk of streamed data. - Parameters - ---------- - stream_data : np.array - New data chunk read from reader_gen. + Args: + stream_data (np.array): New data chunk read from :class: reader_gen. """ @@ -137,26 +134,25 @@ def __init__(self, offlev, offstd): self.sigstd = None def __call__(self, data): - """Normalizes data + """Normalizes data in-place - Parameters: - ----------- - data......: array. - dim0...-2: spatial data. dim -1: Time + Args: + data (twod_data): + Data that will be normalized to siglev and sigstd """ # For these asserts to hold we need to calculate offlev,offstd with keepdims=True - assert(self.offlev.shape[:-1] == data.shape[:-1]) - assert(self.offstd.shape[:-1] == data.shape[:-1]) + assert(self.offlev.shape[:-1] == data.shape[data.axis_t]) + assert(self.offstd.shape[:-1] == data.shape[data.axis_t]) assert(self.offlev.ndim == data.ndim) assert(self.offstd.ndim == data.ndim) data[:] = data - self.offlev - self.siglev = np.median(data, axis=-1, keepdims=True) - self.sigstd = data.std(axis=-1, keepdims=True) + self.siglev = np.median(data, axis=data.axis_t, keepdims=True) + self.sigstd = data.std(axis=data.axis_t, keepdims=True) - data[:] = data / data.mean(axis=-1, keepdims=True) - 1.0 + data[:] = data / data.mean(axis=data.axis_t, keepdims=True) - 1.0 return None diff --git a/delta/data_models/kstar_ecei.py b/delta/data_models/kstar_ecei.py index ea5d901..229cbf5 100644 --- a/delta/data_models/kstar_ecei.py +++ b/delta/data_models/kstar_ecei.py @@ -20,120 +20,13 @@ import numpy as np -import warnings -import h5py -import itertools import json -import time -from data_models.channels_2d import channel_2d, channel_range +#from data_models.channels_2d import channel_2d, channel_range +from .channels_2d import channel_2d, channel_range -class timebase_streaming(): - """Defines a timebase for a data chunk in the stream""" - - def __init__(self, t_start: float, t_end: float, f_sample: float, samples_per_chunk: int, chunk_idx: int): - """ - Defines a timebase for a data chunk in the stream - - Parameters: - ----------- - t_start............: float, Start time of the data stream, in seconds - t_end..............: float, End time of the data stream, in seconds - f_sample...........: float, Sampling frequency, in Hz - samples_per_chunk..: int, Number of samples per chunk - chunk_idx..........: int, Index of the chunk that this timebase is used used - """ - assert(t_start < t_end) - assert(f_sample > 0) - assert(chunk_idx >= 0) - assert(chunk_idx < (t_end - t_start) * f_sample // samples_per_chunk) - - self.t_start = t_start - self.t_end = t_end - self.f_sample = f_sample - self.dt = 1. / self.f_sample - - # Total samples in the entire stream - self.total_num_samples = int((self.t_end - self.t_start) / self.dt) - self.chunk_idx = chunk_idx - # How many samples are in a chunk - self.samples_per_chunk = samples_per_chunk - - def time_to_idx(self, time: float): - """Generates an index suitable to index the current data chunk for - the time - - Parameters: - ----------- - time.......: float, Absolute time we wish to get an index for - """ - assert(time >= self.t_start) - assert(time <= self.t_end) - - # Generate the index the time would have in the entire time-series - tidx_absolute = round((time - self.t_start) / self.dt) - if tidx_absolute // self.samples_per_chunk != self.chunk_idx: - return None - tidx_rel = tidx_absolute % self.samples_per_chunk - - return tidx_rel - - - def gen_full_timebase(self): - """Generates an array of times associated with the samples in the current chunk""" - - return np.arange(self.chunk_idx * self.samples_per_chunk, - (self.chunk_idx + 1) * self.samples_per_chunk) * self.dt + self.t_start - - -class timebase(): - def __init__(self, t_start, t_end, f_sample): - """ - Defines a time base for ECEI channel data - Parameters - ---------- - t_trigger: float, - t_offset: float, - f_sample: float, sampling frequency of the ECEI data - """ - # Assume that 0 <= t_start < t_end - assert(t_end >= 0.0) - assert(t_start < t_end) - assert(f_sample >= 0.0) - self.t_start = t_start - self.t_end = t_end - self.f_sample = f_sample - self.dt = 1. / self.f_sample - - self.num_samples = int((self.t_end - self.t_start) / self.dt) - - - def time_to_idx(self, t0): - """ - Given a timestamp, returns the index where the timebase is closest. - - Parameters: - t0: float - Time stamps that we want to calculate the index for - """ - # Ensure that the time we are looking for is inside the domain - assert(t0 >= self.t_start) - assert(t0 <= self.t_end) - - fulltime = self.get_fulltime() - idx = np.argmin(np.abs(fulltime - t0)) - - return idx - - - def get_fulltime(self): - """ - Returns an array with the full timebase - - """ - return np.arange(self.t_start, self.t_end, self.dt) - def channel_range_from_str(range_str): """ @@ -176,106 +69,6 @@ def channel_range_from_str(range_str): -class ecei_view(): - """Defines the view of an ECEI. This extends ecei_channel to the entire diagnostic - - Parameters: - ----------- - datafilename: string, filename to the HDF5 file - tb: timebase - Timebase object for the raw voltages - dev: device name - t_offset: tuple (t_n0, t_n1) - Tuple that defines the time interval where a signal reference value is calculated. If None, - raw values will be used. - t_crop: tuple (t_c0, t_c1) - Defines the time interval where the data is cropped to. If None, data will not - be cropped - - - """ - - def __init__(self, datafilename, tb, dev, t_offset=(-0.099, -0.089), t_crop=(1.0, 1.1), num_v=24, num_h=8): - - # Number of vertical and horizontal channels - self.num_v = num_v - self.num_h = num_h - # Infer number of samples in the cropped interval - idx_offset = [tb.time_to_idx(t) for t in t_offset] - if idx_crop is not None: - idx_crop = [tb.time_to_idx(t) for t in t_crop] - self.num_samples = idx_crop[1] - idx_crop[0] - - # Use float32 since data is generated from 16bit integers - self.ecei_data = np.zeros([self.num_v, self.num_h, self.num_samples], dtype=np.float32) - # Marks data the we ignore for plotting etc. - self.bad_data = np.zeros([self.num_v, self.num_h], dtype=bool) - - # Offset level - self.offlev = np.zeros([self.num_v, self.num_h], dtype=np.float32) - # Offset standard deviation - self.offstd = np.zeros([self.num_v, self.num_h], dtype=np.float32) - # Signal level - self.siglev = np.zeros([self.num_v, self.num_h], dtype=np.float32) - # Signal standard deviation - self.sigstd = np.zeros([self.num_v, self.num_h], dtype=np.float32) - - tic = time.perf_counter() - # Load data from HDF file - with h5py.File(datafilename, "r") as df: - print("Trigger time: ", df['ECEI'].attrs['TriggerTime']) - for ch_idx in range(192): - ch_v, ch_h = np.mod(ch_idx, 24), ch_idx // 24 - ch_str = f"/ECEI/ECEI_{dev}{(ch_v + 1):02d}{(ch_h + 1):02d}/Voltage" - - # Calculate the start-of-shot offset - self.offlev[ch_v, ch_h] = np.median(df[ch_str][idx_offset[0]:idx_offset[1]]) * 1e-4 - self.offstd[ch_v, ch_h] = df[ch_str][idx_offset[0]:idx_offset[1]].std() * 1e-4 - - tmp = df[ch_str][idx_crop[0]:idx_crop[1]] * 1e-4 - self.offlev[ch_v, ch_h] - - self.siglev[ch_v, ch_h] = np.median(tmp) - self.sigstd = tmp.std() - self.ecei_data[ch_v, ch_h, :] = tmp / tmp.mean() - 1.0 - - toc = time.perf_counter() - - print(f"Loading data took {(toc - tic):4.2f}s") - - self.tb = timebase(t_crop[0], t_crop[1], tb.f_sample) - - self.mark_bad_channels(verbose=True) - - - def mark_bad_channels(self, verbose=False): - """Mark bad channels. These are channels with either - * Low signal level: std(offset) / siglev > 0.3 - * Saturated signal data(bottom saturation): std(offset) < 0.001 - * Saturated offset data(top saturation): std(signal) < 0.001 - """ - - # Check for low signal level - ref = 100. * self.offstd / self.siglev - ref[self.siglev < 0.01] = 100 - - if verbose: - for item in np.argwhere(ref > 30.0): - print(f"LOW SIGNAL: channel({item[0] + 1:d},{item[1] + 1:d}): {ref[tuple(item)]*1e2:4.2f}") - self.bad_data[ref > 30.0] = True - - # Mark bottom saturated channels - self.bad_data[self.offstd < 1e-3] = True - if verbose: - for item in np.argwhere(self.offstd < 1e-3): - os = self.offstd[tuple(item)] - ol = self.offlev[tuple(item)] - print(f"SAT offset data channel ({item[0] + 1:d}, {item[1] + 1:d}) offstd = {os} offlevel = {ol}") - - # Mark top saturated channels - self.bad_data[self.sigstd < 1e-3] = True - if verbose: - for item in np.argwhere(self.sigstd < 1e-3): - os = self.offstd[tuple(item)] - ol = self.offlev[tuple(item)] - print(f"SAT signal data channel ({item[0] + 1:d}, {item[1] + 1:d}) offstd = {os} offlevel = {ol}") - class ecei_chunk(): """Class that represents a time-chunk of ECEI data""" @@ -362,11 +155,6 @@ def data(self): return self.data_ft - - - - - class ecei_channel_2d(channel_2d): """Represents an ECEI channel. The ECEI array has 24 horizontal channels and 8 vertical channels. @@ -959,4 +747,107 @@ def channel_position(ch, ecei_cfg): return (rpos, zpos, apos) + + +# class ecei_view(): +# """Defines the view of an ECEI. This extends ecei_channel to the entire diagnostic + +# Parameters: +# ----------- +# datafilename: string, filename to the HDF5 file +# tb: timebase - Timebase object for the raw voltages +# dev: device name +# t_offset: tuple (t_n0, t_n1) - Tuple that defines the time interval where a signal reference value is calculated. If None, +# raw values will be used. +# t_crop: tuple (t_c0, t_c1) - Defines the time interval where the data is cropped to. If None, data will not +# be cropped + + +# """ + +# def __init__(self, datafilename, tb, dev, t_offset=(-0.099, -0.089), t_crop=(1.0, 1.1), num_v=24, num_h=8): + +# # Number of vertical and horizontal channels +# self.num_v = num_v +# self.num_h = num_h +# # Infer number of samples in the cropped interval +# idx_offset = [tb.time_to_idx(t) for t in t_offset] +# if idx_crop is not None: +# idx_crop = [tb.time_to_idx(t) for t in t_crop] +# self.num_samples = idx_crop[1] - idx_crop[0] + +# # Use float32 since data is generated from 16bit integers +# self.ecei_data = np.zeros([self.num_v, self.num_h, self.num_samples], dtype=np.float32) +# # Marks data the we ignore for plotting etc. +# self.bad_data = np.zeros([self.num_v, self.num_h], dtype=bool) + +# # Offset level +# self.offlev = np.zeros([self.num_v, self.num_h], dtype=np.float32) +# # Offset standard deviation +# self.offstd = np.zeros([self.num_v, self.num_h], dtype=np.float32) +# # Signal level +# self.siglev = np.zeros([self.num_v, self.num_h], dtype=np.float32) +# # Signal standard deviation +# self.sigstd = np.zeros([self.num_v, self.num_h], dtype=np.float32) + +# tic = time.perf_counter() +# # Load data from HDF file +# with h5py.File(datafilename, "r") as df: +# print("Trigger time: ", df['ECEI'].attrs['TriggerTime']) +# for ch_idx in range(192): +# ch_v, ch_h = np.mod(ch_idx, 24), ch_idx // 24 +# ch_str = f"/ECEI/ECEI_{dev}{(ch_v + 1):02d}{(ch_h + 1):02d}/Voltage" + +# # Calculate the start-of-shot offset +# self.offlev[ch_v, ch_h] = np.median(df[ch_str][idx_offset[0]:idx_offset[1]]) * 1e-4 +# self.offstd[ch_v, ch_h] = df[ch_str][idx_offset[0]:idx_offset[1]].std() * 1e-4 + +# tmp = df[ch_str][idx_crop[0]:idx_crop[1]] * 1e-4 - self.offlev[ch_v, ch_h] + +# self.siglev[ch_v, ch_h] = np.median(tmp) +# self.sigstd = tmp.std() +# self.ecei_data[ch_v, ch_h, :] = tmp / tmp.mean() - 1.0 + +# toc = time.perf_counter() + +# print(f"Loading data took {(toc - tic):4.2f}s") + +# self.tb = timebase(t_crop[0], t_crop[1], tb.f_sample) + +# self.mark_bad_channels(verbose=True) + + +# def mark_bad_channels(self, verbose=False): +# """Mark bad channels. These are channels with either +# * Low signal level: std(offset) / siglev > 0.3 +# * Saturated signal data(bottom saturation): std(offset) < 0.001 +# * Saturated offset data(top saturation): std(signal) < 0.001 +# """ + +# # Check for low signal level +# ref = 100. * self.offstd / self.siglev +# ref[self.siglev < 0.01] = 100 + +# if verbose: +# for item in np.argwhere(ref > 30.0): +# print(f"LOW SIGNAL: channel({item[0] + 1:d},{item[1] + 1:d}): {ref[tuple(item)]*1e2:4.2f}") +# self.bad_data[ref > 30.0] = True + +# # Mark bottom saturated channels +# self.bad_data[self.offstd < 1e-3] = True +# if verbose: +# for item in np.argwhere(self.offstd < 1e-3): +# os = self.offstd[tuple(item)] +# ol = self.offlev[tuple(item)] +# print(f"SAT offset data channel ({item[0] + 1:d}, {item[1] + 1:d}) offstd = {os} offlevel = {ol}") + +# # Mark top saturated channels +# self.bad_data[self.sigstd < 1e-3] = True +# if verbose: +# for item in np.argwhere(self.sigstd < 1e-3): +# os = self.offstd[tuple(item)] +# ol = self.offlev[tuple(item)] +# print(f"SAT signal data channel ({item[0] + 1:d}, {item[1] + 1:d}) offstd = {os} offlevel = {ol}") + + # End of file kstar_ecei.py diff --git a/delta/data_models/timebase.py b/delta/data_models/timebase.py new file mode 100644 index 0000000..3e54b9c --- /dev/null +++ b/delta/data_models/timebase.py @@ -0,0 +1,111 @@ +# -*- Encoding: UTF-8 -*- + + +class timebase_streaming(): + """Defines a timebase for a data chunk in the stream""" + + def __init__(self, t_start: float, t_end: float, f_sample: float, samples_per_chunk: int, chunk_idx: int): + """ + Defines a timebase for a data chunk in the stream + + Parameters: + ----------- + t_start............: float, Start time of the data stream, in seconds + t_end..............: float, End time of the data stream, in seconds + f_sample...........: float, Sampling frequency, in Hz + samples_per_chunk..: int, Number of samples per chunk + chunk_idx..........: int, Index of the chunk that this timebase is used used + """ + assert(t_start < t_end) + assert(f_sample > 0) + assert(chunk_idx >= 0) + assert(chunk_idx < (t_end - t_start) * f_sample // samples_per_chunk) + + self.t_start = t_start + self.t_end = t_end + self.f_sample = f_sample + self.dt = 1. / self.f_sample + + # Total samples in the entire stream + self.total_num_samples = int((self.t_end - self.t_start) / self.dt) + self.chunk_idx = chunk_idx + # How many samples are in a chunk + self.samples_per_chunk = samples_per_chunk + + def time_to_idx(self, time: float): + """Generates an index suitable to index the current data chunk for + the time + + Parameters: + ----------- + time.......: float, Absolute time we wish to get an index for + """ + assert(time >= self.t_start) + assert(time <= self.t_end) + + # Generate the index the time would have in the entire time-series + tidx_absolute = round((time - self.t_start) / self.dt) + if tidx_absolute // self.samples_per_chunk != self.chunk_idx: + return None + tidx_rel = tidx_absolute % self.samples_per_chunk + + return tidx_rel + + + def gen_full_timebase(self): + """Generates an array of times associated with the samples in the current chunk""" + + return np.arange(self.chunk_idx * self.samples_per_chunk, + (self.chunk_idx + 1) * self.samples_per_chunk) * self.dt + self.t_start + + +class timebase(): + def __init__(self, t_start, t_end, f_sample): + """ + Defines a time base for ECEI channel data + Parameters + ---------- + t_trigger: float, + t_offset: float, + f_sample: float, sampling frequency of the ECEI data + """ + # Assume that 0 <= t_start < t_end + assert(t_end >= 0.0) + assert(t_start < t_end) + assert(f_sample >= 0.0) + self.t_start = t_start + self.t_end = t_end + self.f_sample = f_sample + self.dt = 1. / self.f_sample + + self.num_samples = int((self.t_end - self.t_start) / self.dt) + + + def time_to_idx(self, t0): + """ + Given a timestamp, returns the index where the timebase is closest. + + Parameters: + t0: float - Time stamps that we want to calculate the index for + """ + # Ensure that the time we are looking for is inside the domain + assert(t0 >= self.t_start) + assert(t0 <= self.t_end) + + fulltime = self.get_fulltime() + idx = np.argmin(np.abs(fulltime - t0)) + + return idx + + + def get_fulltime(self): + """ + Returns an array with the full timebase + + """ + return np.arange(self.t_start, self.t_end, self.dt) + + + + +# End of file timebases.py \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index b760240..3a94d57 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -10,9 +10,10 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) +import os +import sys +sys.path.insert(0, os.path.abspath('../../delta')) +sys.setrecursionlimit(1500) # -- Project information ----------------------------------------------------- @@ -30,8 +31,7 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = [ -] +extensions = ["sphinx.ext.autodoc", "sphinx_rtd_theme"] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -52,7 +52,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'alabaster' +html_theme = 'sphinx_rtd_theme' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, diff --git a/docs/source/index.rst b/docs/source/index.rst index 9728e18..759e501 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -27,14 +27,26 @@ how to launch distributed anaylsis workflows. Guide -^^^^^ +===== .. toctree:: :maxdepth: 2 + :caption: Running Delta + + notes/configuring + notes/launching + + +Data models +=========== + +.. toctree:: + :maxdepth: 1 + :caption: Package reference + + modules/data_models - configuring - launching diff --git a/docs/source/modules/data_models.rst b/docs/source/modules/data_models.rst new file mode 100644 index 0000000..77f8d97 --- /dev/null +++ b/docs/source/modules/data_models.rst @@ -0,0 +1,24 @@ + +delta.data_models +================= + +.. contents:: Contents + :local: + +KSTAR ECEi data +--------------- +.. automodule:: data_models.kstar_ecei + :members: + +General 2d image data +--------------------- +.. automodule:: data_models.channels_2d + :members: + :special-members: __init__ + +Helper functions +---------------- +.. automodule:: data_models.helpers + :members: + + diff --git a/docs/source/configuring.rst b/docs/source/notes/configuring.rst similarity index 100% rename from docs/source/configuring.rst rename to docs/source/notes/configuring.rst diff --git a/docs/source/launching.rst b/docs/source/notes/launching.rst similarity index 100% rename from docs/source/launching.rst rename to docs/source/notes/launching.rst