From fb0ae3d4045623576c3af9b242c4c35f5f7f4d39 Mon Sep 17 00:00:00 2001 From: Brett Morris Date: Sat, 24 Mar 2018 12:23:54 -0700 Subject: [PATCH] Translating wiki pages to docs pages, making tests pass --- docs/autoreduce.rst | 74 ++++++++++ docs/index.rst | 25 ++-- docs/installation.rst | 20 +++ docs/stepbystep.rst | 300 ++++++++++++++++++++++++++++++++++++++ pydis/conftest.py | 3 +- pydis/pydis.py | 28 +++- pydis/tests/test_pydis.py | 3 + pydis/wrappers.py | 72 ++++----- 8 files changed, 467 insertions(+), 58 deletions(-) create mode 100644 docs/autoreduce.rst create mode 100644 docs/installation.rst create mode 100644 docs/stepbystep.rst create mode 100644 pydis/tests/test_pydis.py diff --git a/docs/autoreduce.rst b/docs/autoreduce.rst new file mode 100644 index 0000000..f6d62e9 --- /dev/null +++ b/docs/autoreduce.rst @@ -0,0 +1,74 @@ +.. include:: references.txt + +.. _autoreduce: + +*********************** +The autoreduce function +*********************** + +The goal is to make *pyDIS* as easy to use while observing as possible. The +`~pydis.autoreduce` function makes this possible by wrapping all the components of +the standard DIS reduction, and using basic assumptions. Steps in this +reduction include: + + 1. combines bias and flat images + 2. maps wavelength in the HeNeAr image + 3. perform simple image reduction: Data = (Raw - Bias)/Flat + 4. trace spectral aperture + 5. extract spectrum + 6. measure sky along extracted spectrum + 7. apply flux calibration + 8. write output files + +Steps (3) - (7) are performed on every target frame and the flux standard star + +Here is an example of a script you might run over and over throughout the night +to reduce all your data: + +.. code-block:: python + + # if pyDIS isn't in the currnet working directory, add to path + import sys + sys.path.append('/path/to/pydis/') + + # must import, of course + import pydis + + # reduce and extract the data with the fancy autoreduce script + wave, flux, err = pydis.autoreduce('objlist.r.txt', 'flatlist.r.txt', 'biaslist.r.txt', + 'HeNeAr.0005r.fits', stdstar='spec50cal/fiege34.dat') + +These are the minimum arguments you need to supply to make autoreduce work. +Some notes on each one: + + - reduce the RED and BLUE channels in DIS separately! + - the object list should *first* include the flux standard star + (not RV standard), then all the targets to reduce + - the keyword arg `stdstar` should be set to the name of the standard star from `spec50cal `_ + - the flat and bias lists should just be simple lists of all corresponding + images to combine + - the HeNeAr lamp image is applied to *all* images, both standard and target + frames. This does not produce the best wavelength stability, and observers + will probably want to take HeNeAr frames after each target or regularly + throughout the night. Supporting multiple HeNeAr images is a future goal! + + +*Many* keywords are available to customize the `~pydis.autoreduce` function for +most needs. Here is the full definition list with default parameters. + +.. code-block:: python + + def autoreduce(speclist, flatlist, biaslist, HeNeAr_file, + stdstar='', trace1=False, ntracesteps=15, + airmass_file='kpnoextinct.dat', + flat_mode='spline', flat_order=9, flat_response=True, + apwidth=8,skysep=3,skywidth=7, skydeg=0, + HeNeAr_prev=False, HeNeAr_interac=False, + HeNeAr_tol=20, HeNeAr_order=3, displayHeNeAr=False, + trim=True, write_reduced=True, + display=True, display_final=True): + + +Note: even more args and kwargs are available for the individual functions that +autoreduce calls! Also, this might be slightly out of date. Always check out the +API for `~pydis.autoreduce` for the latest features! \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index 3907898..b48bc8c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -23,18 +23,19 @@ DCT. **If you use PyDIS, please send me feedback!** Some background motivation on why I made this package is `given here `_ +Contents +======== -Examples --------- - -See the `examples page ` on -the Wiki for a few worked examples of reducing DIS data, or the step-by-step -`manual reduction guide `_ -for a detailed tutorial on reducing 1-d spectroscopy data with *pyDIS*. +.. toctree:: + :maxdepth: 2 + installation + stepbystep + autoreduce + api Motivation ----------- +++++++++++ Really slick tools exist for on-the-fly photometry analysis. However, no turn-key, easy to use spectra toolkit for Python (without IRAF or PyRAF) @@ -58,17 +59,11 @@ So far *pyDIS* can do a rough job of all the reduction tasks for single point so We are seeking more data to test it against, to help refine the solution and find bugs. How to Help ------------ ++++++++++++ - Check out the Issues page if you think you can help code, or want to requst a feature! - If you have some data already reduced in IRAF that you trust and would be willing to share, let us know! -.. toctree:: - :maxdepth: 1 - :caption: Contents: - - api - Indices and tables ------------------ diff --git a/docs/installation.rst b/docs/installation.rst new file mode 100644 index 0000000..632aed3 --- /dev/null +++ b/docs/installation.rst @@ -0,0 +1,20 @@ + +.. _installation: + +**************** +Installing pydis +**************** + +Clone the git repository and change directories into the repository:: + + git clone https://github.com/TheAstroFactory/pydis.git + cd pydis + +and run the following command to install pydis:: + + python setup.py install + +To test the installation, run the tests:: + + python setup.py test + diff --git a/docs/stepbystep.rst b/docs/stepbystep.rst new file mode 100644 index 0000000..dc5a8ae --- /dev/null +++ b/docs/stepbystep.rst @@ -0,0 +1,300 @@ +.. include:: references.txt + +.. _stepbystep: + +*************** +Reduction Guide +*************** + +Here is a step-by-step guide to reducing some DIS data from start to finish +with *pyDIS*. Note, this entire process is reproduced in the +`autoreduce `_ helper function. In this simple example we'll assume +you have all the needed calibration files in the working directory, that you are +on a linux/mac for a couple shell commands, and that there is only 1 science +frame to analyze. We will only do the RED chip in this case. The procedure is +identical for the BLUE chip, but due to different Signal/Noise or color of the +source some parameters may need tweaking. + + +Organizing the data to reduce ++++++++++++++++++++++++++++++ + +First we need to create some lists to group the calibration files (flats, biases). +Note, darks are currently not supported explicitly. In your terminal you might say:: + + ls flat.0*r.fits > rflat.lis + ls bias.0*r.fits > rbias.lis + + +Make master calibration files ++++++++++++++++++++++++++++++ + +We'll switch to Python from here on. You might want to save these commands in a +script that you could run again. As in IRAF, we must combine the biases and +flats that will be applied to the science frame: + +.. code-block:: python + + import pydis + bias = pydis.biascombine('rbias.lis', trim=True) + flat, fmask_out = pydis.flatcombine('rflat.lis', bias, trim=True, + mode='spline', response=True, display=True) + +Now there should be newly created files named **FLAT.fits** and **BIAS.fits** +in your directory. These default filenames can be changed for both functions +using the `output='FILE.fits'` keyword. Note all the extra keyword arguments +(aka "kwargs") for `flatcombine`, which are used for removing the flatfield +lamp's spectrum (called RESPONSE in IRAF). If you're worried this is not being +removed correctly, be sure to set `display=True` and check! + +The output from `biascombine` is simply a 2-d `~numpy.ndarray` that contains the +combined flat. The outputs from `flatcombine` are 1) the combined flat as a +2-d numpy array and 2) the "flat mask", a 1-d array that defines the illuminated +portion of the chip along the y-axis (spatial dimension). You pass this to +several functions later on. It is totally fine to *not* pass it to subsequent +functions, just make sure you're consistent with it! I suggest you do use it. + +Next, let's define the wavelength solution that will be used for our science +image. This is the most tedious manual step. We'll mimic IRAF and do the +"identify" manually. While *pyDIS* can do this automatically +(set `interac=False`), the solution is often too crude for science use. If you +have previously done the manual identify and picked out lines, you can skip the +identify step by setting `previous=True`. We'll start with a random guess at the +`fit_order` parameter, but if `interac=True` then we'll be prompted to adjust +this interactively: + +.. code-block:: python + + HeNeAr_file = 'HeNeAr.0030r.fits' + wfit = pydis.HeNeAr_fit(HeNeAr_file, trim=True, fmask=fmask_out, + interac=True, mode='poly', fit_order=5) + +*pyDIS* will pull the very rough wavelength solution out of the header, take a +slice through the image, and show you the 1-d emission line spectrum. Click on +prominent peaks in the image (sample arc lamp spectra for DIS is provided by +APO `here `_), then in the +Python terminal type the wavelength of the line you identified and press +. Each line is fit with a Gaussian to find the exact center. Repeat +this until you've identified as many lines as you can. If you mess one up, +click on it and enter a "d" for the wavelength. + +After the lines are identified, you need to fit a smooth function between them. +The polynomial fit and residual will be shown. Close this window and then in the +terminal answer the question: should the polynomial order be changed (if so, +enter an integer number) or is it OK (if so, enter a "d" for done)? + +*pyDIS* will then trace these lines along the y-axis, producing a full 2-d +wavelength solution for the image. Be sure to keep this solution `wfit` +around, as we'll need to apply to every image we want a spectrum from +(science and flux standard). + + +Reduce the science image +++++++++++++++++++++++++ + +This is the "reduction" step, where we actually remove the bias and flat from +the science image, and divide by the exposure time. The result is an image with +units of counts/second. Everything (almost) is stored in `~numpy.ndarray`, so +performing simple math on them is trivial: + +.. code-block:: python + + # the science image file name + spec = 'object.0035r.fits' + + # open the image, get the data and a couple important numbers + img = pydis.OpenImg(spec, trim=True) + raw = img.data + exptime = img.exptime + airmass = img.airmass + + # remove bias and flat, divide by exptime + data = ((raw - bias) / flat) / exptime + +You should also read in and reduce the flux standard star the same way: + +.. code-block:: python + + # the flux standard file name + stdspec = 'Feige34.0065r.fits' + stdraw, stdexptime, stdairmass, _ = pydis.OpenImg(stdspec, trim=True) + stddata= ((stdraw - bias) / flat) / stdexptime + +If we wanted to look at the science image in python, you might do this + +.. code-block::python + + import numpy as np + import matplotlib.pyplot as plt + + plt.figure() + plt.imshow(np.log10(data), origin='lower',aspect='auto',cmap=cm.Greys_r) + plt.show() + + +Find and trace the spectrum ++++++++++++++++++++++++++++ + +Examining the image above you should see the bright horizontal spectrum across +the chip. We want to quantify this shape, tracing the flux across the chip along +the wavelength dimension. If the target is bright and the spectrum is not too +curved, this should be pretty simple! If there are multiple objects on the slit +(multiple bright horizontal streaks in the image) then you want to select the +target manually (`interac=True`). + +Set `display=True` to see the trace over-plotted on the image. Make sure it +doesn't wander. If it's not accurate enough, adjust the number of steps. For +low Signal/Noise images, sometimes you have to use small values like `nsteps=7` +to trace the spectrum. + +.. code-block:: python + + # trace the science image + trace = pydis.ap_trace(data, fmask=fmask_out, nsteps=50, interac=False, display=True) + + # trace the flux standard image + stdtrace = pydis.ap_trace(stddata, fmask=fmask_out, nsteps=50, interac=False, display=True) + + +Now we extract the observed spectrum along this trace, for both the reduced +object and the flux standard pydis. The result is a 1-d spectrum made by summing +up the flux in each column in a range of +/- the aperture width. The sky is +determined by fitting a polynomial along the column in two regions near the trace. +Important consideration: if you choose a large sky region, or separate it a lot +from the aperture, the sky will not be a good fit. This is because the HeNeAr +lines (lines of constant wavelength) are bent and not perfectly vertical on the +chip. Thus it is good to choose a small sky region. The `skydeg` parameter is +the polynomial order to fit between the sky regions in each column. The default +is `skydeg=0`, which is simply a median. A sky value at each pixel along the +trace is returned. This routine also computes a flux error at each pixel along +the trace. + +.. code-block::python + + ext_spec, sky, fluxerr = pydis.ap_extract(data, trace, apwidth=5,skysep=1, + skywidth=7, skydeg=0) + ext_std, stdsky, stderr = pydis.ap_extract(stddata, stdtrace, apwidth=5, + skysep=1, skywidth=7, skydeg=0) + + # subtract the sky from the 1-d spectrum + flux_red = (ext_spec - sky) # the reduced object + flux_std = (ext_std - stdsky) # the reduced flux standard + + +You could make sure these values for the aperture and sky regions were sensible +by plotting the image with the lines overlaid. + +.. code-block:: python + + xbins = np.arange(data.shape[1]) + + plt.figure() + plt.imshow(np.log10(data), origin='lower',aspect='auto',cmap=cm.Greys_r) + + # the trace + plt.plot(xbins, trace,'b',lw=1) + + # the aperture + plt.plot(xbins, trace-apwidth,'r',lw=1) + plt.plot(xbins, trace+apwidth,'r',lw=1) + + # the sky regions + plt.plot(xbins, trace-apwidth-skysep,'g',lw=1) + plt.plot(xbins, trace-apwidth-skysep-skywidth,'g',lw=1) + plt.plot(xbins, trace+apwidth+skysep,'g',lw=1) + plt.plot(xbins, trace+apwidth+skysep+skywidth,'g',lw=1) + + plt.title('(with trace, aperture, and sky regions)') + plt.show() + +Calibrate the spectrum +++++++++++++++++++++++ + +We have a raw 1-d spectrum now, with flux measured at each pixel along the +x-axis of the image along the trace. However, we want to calibrate the x-axis +to use the wavelength solution we created before. You probably want to use +`mode='poly'`, which is the default and thus optional to write. + +The spectrum itself is currently in units of counts/second, and we need to apply +a wavelength dependent correction for the observed airmass. We'll use the +airmass file for APO included with *pyDIS*. Note, this is different from the +file that APO provided prior to April 2015 (an error in this file was noticed +during the development of *pyDIS*). + +.. code-block:: python + + # map the wavelength using the HeNeAr fit + wfinal = pydis.mapwavelength(trace, wfit, mode='poly') + wfinalstd = pydis.mapwavelength(stdtrace, wfit, mode='poly') + + # correct the object and flux std for airmass extinction + flux_red_x = pydis.AirmassCor(wfinal, flux_red, airmass, + airmass_file='apoextinct.dat') + flux_std_x = pydis.AirmassCor(wfinalstd, flux_std, stdairmass, + airmass_file='apoextinct.dat') + +Flux calibration +++++++++++++++++ + +Now that we have the actual wavelength mapped to both the science target and the +flux standard, and we've corrected them for their respective airmass, it's time +for the final step: flux calibration. This is done by comparing the flux +standard observation to a library of standard stars. For *pyDIS* we have +hard-coded the IRAF library "spec50cal". Any other standard could be put in this +directory and used, or a different library could be used with a little hacking +to the code if you're brave or desperate. `DefFluxCal` will try to avoid Balmer +lines, but at present does not have a sophisticated interactive mode where you +can delete bad points. Set `display=True` in `DefFluxCal` to see if the fit +looks smooth and good and does not blow up at the edges. + +The sensitivity function is computed for the standard star, and has units of +(erg/s/cm2/A) / (counts/s/cm2/A). The final step is to apply this sensfunc to +the science target, which simply resamples the sensfunc on to the exact +wavelengths as the target and then multiplies the observed 1-d spectrum by the +sensitivity function. Note *pyDIS* works in flux units, not magnitude units +used by IRAF. As a reality check, you can also apply the sensfunc back to the +standard star spectrum to make sure it looks right! + +.. code-block:: python + + sensfunc = pydis.DefFluxCal(wfinalstd, flux_std_x, mode='spline', + stdstar='spec50cal/feige34.dat') + + # final step in reduction, apply sensfunc + ffinal,efinal = pydis.ApplyFluxCal(wfinal, flux_red_x, fluxerr, + wfinalstd, sensfunc) + +Our final parameters for the science target are now `(wfinal, ffinal, efinal)`, +the wavelength, flux, and flux error. Congratulations, you have fully reduced +one spectrum in Python! If you'd like to view the result, you could do this: + +.. code-block:: python + + plt.figure() + # plt.plot(wfinal, ffinal) + plt.errorbar(wfinal, ffinal, yerr=efinal) + plt.xlabel('Wavelength') + plt.ylabel('Flux') + plt.title(spec) + #plot within percentile limits + plt.ylim( (np.percentile(ffinal,2), + np.percentile(ffinal,98)) ) + plt.show() + +Closing thoughts... ++++++++++++++++++++ + +This procedure replicates (sometimes poorly) IRAF functions. We are also +skipping a couple lesser-used functions, such as the illumination correction. +You should be plotting things every step of the way, always making sure it looks +sensible. Crazy things can happen if you're not careful... + +The reduction script can be applied to many science targets from the same night, +using the same calibration files and flux standards. In fact, this exact +procedure is carried out in `autoreduce`. + +I would not recommend using calibrations from another night if possible. To get +better wavelength calibrations, use a HeNeAr image taken right before or after +the science target, or even a different HeNeAr for each science image. Future +helper-scripts in *pyDIS* will accommodate automatic solutions of multiple +HeNeAr files. \ No newline at end of file diff --git a/pydis/conftest.py b/pydis/conftest.py index ebab8a1..fa813cb 100644 --- a/pydis/conftest.py +++ b/pydis/conftest.py @@ -11,7 +11,8 @@ # automatically made available when Astropy is installed. This means it's # not necessary to import them here, but we still need to import global # variables that are used for configuration. - from astropy.tests.plugins.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS + # from astropy.tests.plugins.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS + pass from astropy.tests.helper import enable_deprecations_as_exceptions diff --git a/pydis/pydis.py b/pydis/pydis.py index ae6d1a2..40dd2b8 100644 --- a/pydis/pydis.py +++ b/pydis/pydis.py @@ -246,7 +246,9 @@ def overscanbias(img, cols=(1,), rows=(1,)): ''' Generate a bias frame based on overscan region. Can work with rows or columns, pass either kwarg the limits: - >>> bias = overscanbias(imagedata, cols=(1024,1050)) + + >>> bias = overscanbias(imagedata, cols=(1024,1050)) # doctest: +SKIP + ''' bias = np.zeros_like(img) if len(cols) > 1: @@ -407,8 +409,10 @@ def ap_trace(img, fmask=(1,), nsteps=20, interac=False, img : 2d numpy array This is the image, stored as a normal numpy array. Can be read in using astropy.io.fits like so: - >>> hdu = fits.open('file.fits') - >>> img = hdu[0].data + + >>> hdu = fits.open('file.fits') # doctest: +SKIP + >>> img = hdu[0].data # doctest: +SKIP + nsteps : int, optional Keyword, number of bins in X direction to chop image into. Use fewer bins if ap_trace is having difficulty, such as with faint @@ -828,8 +832,10 @@ def ap_extract(img, trace, apwidth=8, skysep=3, skywidth=7, skydeg=0, img : 2d numpy array This is the image, stored as a normal numpy array. Can be read in using astropy.io.fits like so: - >>> hdu = fits.open('file.fits') - >>> img = hdu[0].data + + >>> hdu = fits.open('file.fits') # doctest: +SKIP + >>> img = hdu[0].data # doctest: +SKIP + trace : 1-d array The spatial positions (Y axis) corresponding to the center of the trace for every wavelength (X axis), as returned from ap_trace @@ -1398,8 +1404,8 @@ def normalize(wave, flux, mode='poly', order=5): Return a flattened, normalized spectrum. A model spectrum is made of the continuum by fitting either a polynomial or spline to the data, and then the data is normalized with the equation: - >>> norm = (flux - model) / model + >>> norm = (flux - model) / model # doctest: +SKIP Parameters ---------- @@ -1489,21 +1495,27 @@ def DefFluxCal(obj_wave, obj_flux, stdstar='', mode='spline', polydeg=9, ---------- obj_wave : 1-d array The 1-d wavelength array of the spectrum + obj_flux : 1-d array The 1-d flux array of the spectrum + stdstar : str Name of the standard star file to use for flux calibration. You must give the subdirectory and file name, for example: - >>> sensfunc = DefFluxCal(wave, flux, mode='spline', - >>> stdstar='spec50cal/feige34.dat') + + >>> sensfunc = DefFluxCal(wave, flux, mode='spline', stdstar='spec50cal/feige34.dat') # doctest: +SKIP + If no standard is set, or an invalid standard is selected, will return array of 1's and a warning. A list of all available subdirectories and objects is available on the wiki, or look in pydis/resources/onedstds/ + mode : str, optional either "linear", "spline", or "poly" (Default is spline) + polydeg : float, optional set the order of the polynomial to fit through (Default is 9) + display : bool, optional If True, plot the down-sampled sensfunc and fit to screen (Default is False) diff --git a/pydis/tests/test_pydis.py b/pydis/tests/test_pydis.py new file mode 100644 index 0000000..2fd68e1 --- /dev/null +++ b/pydis/tests/test_pydis.py @@ -0,0 +1,3 @@ + +def test_placebo(): + assert 1 == 1 \ No newline at end of file diff --git a/pydis/wrappers.py b/pydis/wrappers.py index 5a6fde8..4c1f67a 100644 --- a/pydis/wrappers.py +++ b/pydis/wrappers.py @@ -11,6 +11,10 @@ import matplotlib.cm as cm import datetime +from .pydis import (DefFluxCal, HeNeAr_fit, flatcombine, biascombine, OpenImg, + ap_trace, ap_extract, mapwavelength, AirmassCor, + ApplyFluxCal) + __all__ = ['autoreduce', 'CoAddFinal', 'ReduceCoAdd', 'ReduceTwo'] @@ -134,12 +138,12 @@ def autoreduce(speclist, flatlist='', biaslist='', HeNeAr_file='', """ if (len(biaslist) > 0): - bias = pydis.biascombine(biaslist, trim=trim, silent=silent) + bias = biascombine(biaslist, trim=trim, silent=silent) else: bias = 0.0 if (len(biaslist) > 0) and (len(flatlist) > 0): - flat,fmask_out = pydis.flatcombine(flatlist, bias, trim=trim, + flat,fmask_out = flatcombine(flatlist, bias, trim=trim, mode=flat_mode,display=False, flat_poly=flat_order, response=flat_response) else: @@ -154,7 +158,7 @@ def autoreduce(speclist, flatlist='', biaslist='', HeNeAr_file='', # do the HeNeAr mapping first, must apply to all science frames if (len(HeNeAr_file) > 0): - wfit = pydis.HeNeAr_fit(HeNeAr_file, trim=trim, fmask=fmask_out, + wfit = HeNeAr_fit(HeNeAr_file, trim=trim, fmask=fmask_out, interac=HeNeAr_interac, previous=prev, mode='poly', display=display_HeNeAr, tol=HeNeAr_tol, fit_order=HeNeAr_order, second_pass=HeNeAr_second_pass) @@ -171,7 +175,7 @@ def autoreduce(speclist, flatlist='', biaslist='', HeNeAr_file='', print("> Processing file "+spec+" ["+str(i)+"/"+str(len(specfile))+"]") # raw, exptime, airmass, wapprox = pydis.OpenImg(spec, trim=trim) - img = pydis.OpenImg(spec, trim=trim) + img = OpenImg(spec, trim=trim) raw = img.data exptime = img.exptime airmass = img.airmass @@ -189,11 +193,11 @@ def autoreduce(speclist, flatlist='', biaslist='', HeNeAr_file='', # with reduced data, trace the aperture if (i==0) or (trace1 is False): - trace = pydis.ap_trace(data,fmask=fmask_out, nsteps=ntracesteps, + trace = ap_trace(data,fmask=fmask_out, nsteps=ntracesteps, recenter=trace_recenter, interac=trace_interac) # extract the spectrum, measure sky values along trace, get flux errors - ext_spec, sky, fluxerr = pydis.ap_extract(data, trace, apwidth=apwidth, + ext_spec, sky, fluxerr = ap_extract(data, trace, apwidth=apwidth, skysep=skysep,skywidth=skywidth, skydeg=skydeg,coaddN=1) @@ -214,7 +218,7 @@ def autoreduce(speclist, flatlist='', biaslist='', HeNeAr_file='', if (len(HeNeAr_file) > 0): - wfinal = pydis.mapwavelength(trace, wfit, mode='poly') + wfinal = mapwavelength(trace, wfit, mode='poly') else: # if no line lamp given, use approx from the img header wfinal = wapprox @@ -228,13 +232,13 @@ def autoreduce(speclist, flatlist='', biaslist='', HeNeAr_file='', flux_red = (ext_spec - sky) # now correct the spectrum for airmass extinction - flux_red_x = pydis.AirmassCor(wfinal, flux_red, airmass, + flux_red_x = AirmassCor(wfinal, flux_red, airmass, airmass_file=airmass_file) # now get flux std IF stdstar is defined # !! assume first object in list is std star !! if (len(stdstar) > 0) and (i==0): - sens_flux = pydis.DefFluxCal(wfinal, flux_red_x, stdstar=stdstar, + sens_flux = DefFluxCal(wfinal, flux_red_x, stdstar=stdstar, mode=std_mode, polydeg=std_order, display=display_std) sens_wave = wfinal @@ -244,7 +248,7 @@ def autoreduce(speclist, flatlist='', biaslist='', HeNeAr_file='', sens_wave = wfinal # final step in reduction, apply sensfunc - ffinal,efinal = pydis.ApplyFluxCal(wfinal, flux_red_x, fluxerr, + ffinal,efinal = ApplyFluxCal(wfinal, flux_red_x, fluxerr, sens_wave, sens_flux) @@ -308,14 +312,14 @@ def ReduceCoAdd(speclist, flatlist, biaslist, HeNeAr_file, """ #-- the basic crap, used for all frames - bias = pydis.biascombine(biaslist, trim=trim) - flat,fmask_out = pydis.flatcombine(flatlist, bias, trim=trim, mode=flat_mode,display=False, + bias = biascombine(biaslist, trim=trim) + flat,fmask_out = flatcombine(flatlist, bias, trim=trim, mode=flat_mode,display=False, flat_poly=flat_order, response=flat_response) if HeNeAr_prev is False: prev = '' else: prev = HeNeAr_file+'.lines' - wfit = pydis.HeNeAr_fit(HeNeAr_file, trim=trim, fmask=fmask_out, + wfit = HeNeAr_fit(HeNeAr_file, trim=trim, fmask=fmask_out, interac=HeNeAr_interac, previous=prev, mode='poly', display=displayHeNeAr, tol=HeNeAr_tol, fit_order=HeNeAr_order, second_pass=HeNeAr_second_pass) @@ -324,7 +328,7 @@ def ReduceCoAdd(speclist, flatlist, biaslist, HeNeAr_file, specfile = np.array([np.genfromtxt(speclist, dtype=np.str)]).flatten() spec = specfile[0] # raw, exptime, airmass, wapprox = pydis.OpenImg(spec, trim=trim) - img = pydis.OpenImg(spec, trim=trim) + img = OpenImg(spec, trim=trim) raw = img.data exptime = img.exptime airmass = img.airmass @@ -332,24 +336,24 @@ def ReduceCoAdd(speclist, flatlist, biaslist, HeNeAr_file, data = ((raw - bias) / flat) / exptime - trace = pydis.ap_trace(data,fmask=fmask_out, nsteps=ntracesteps) + trace = ap_trace(data,fmask=fmask_out, nsteps=ntracesteps) # extract the spectrum, measure sky values along trace, get flux errors - ext_spec, sky, fluxerr = pydis.ap_extract(data, trace, apwidth=apwidth, + ext_spec, sky, fluxerr = ap_extract(data, trace, apwidth=apwidth, skysep=skysep,skywidth=skywidth, skydeg=skydeg,coaddN=1) xbins = np.arange(data.shape[1]) - wfinal = pydis.mapwavelength(trace, wfit, mode='poly') + wfinal = mapwavelength(trace, wfit, mode='poly') flux_red_x = (ext_spec - sky) - sens_flux = pydis.DefFluxCal(wfinal, flux_red_x, stdstar=stdstar, + sens_flux = DefFluxCal(wfinal, flux_red_x, stdstar=stdstar, mode='spline',polydeg=12) sens_wave = wfinal - ffinal,efinal = pydis.ApplyFluxCal(wfinal, flux_red_x, fluxerr, sens_wave, sens_flux) + ffinal,efinal = ApplyFluxCal(wfinal, flux_red_x, fluxerr, sens_wave, sens_flux) #-- the target star exposures, stack and proceed for i in range(1,len(specfile)): spec = specfile[i] # raw, exptime, airmass, wapprox = pydis.OpenImg(spec, trim=trim) - img = pydis.OpenImg(spec, trim=trim) + img = OpenImg(spec, trim=trim) raw = img.data exptime = img.exptime airmass = img.airmass @@ -361,14 +365,14 @@ def ReduceCoAdd(speclist, flatlist, biaslist, HeNeAr_file, all_data = np.dstack( (all_data, data_i)) data = np.median(all_data, axis=2) # extract the spectrum, measure sky values along trace, get flux errors - ext_spec, sky, fluxerr = pydis.ap_extract(data, trace, apwidth=apwidth, + ext_spec, sky, fluxerr = ap_extract(data, trace, apwidth=apwidth, skysep=skysep,skywidth=skywidth, skydeg=skydeg,coaddN=len(specfile)) xbins = np.arange(data.shape[1]) - wfinal = pydis.mapwavelength(trace, wfit, mode='poly') + wfinal = mapwavelength(trace, wfit, mode='poly') flux_red_x = (ext_spec - sky) - ffinal,efinal = pydis.ApplyFluxCal(wfinal, flux_red_x, fluxerr, sens_wave, sens_flux) + ffinal,efinal = ApplyFluxCal(wfinal, flux_red_x, fluxerr, sens_wave, sens_flux) if display is True: plt.figure() @@ -427,12 +431,12 @@ def ReduceTwo(speclist, flatlist='', biaslist='', HeNeAr_file='', if (len(biaslist) > 0): - bias = pydis.biascombine(biaslist, trim=trim) + bias = biascombine(biaslist, trim=trim) else: bias = 0.0 if (len(biaslist) > 0) and (len(flatlist) > 0): - flat,fmask_out = pydis.flatcombine(flatlist, bias, trim=trim, + flat,fmask_out = flatcombine(flatlist, bias, trim=trim, mode=flat_mode,display=False, flat_poly=flat_order, response=flat_response) else: @@ -446,7 +450,7 @@ def ReduceTwo(speclist, flatlist='', biaslist='', HeNeAr_file='', # do the HeNeAr mapping first, must apply to all science frames if (len(HeNeAr_file) > 0): - wfit = pydis.HeNeAr_fit(HeNeAr_file, trim=trim, fmask=fmask_out, + wfit = HeNeAr_fit(HeNeAr_file, trim=trim, fmask=fmask_out, interac=HeNeAr_interac, previous=prev,mode='poly', display=display_HeNeAr, tol=HeNeAr_tol, fit_order=HeNeAr_order) @@ -460,7 +464,7 @@ def ReduceTwo(speclist, flatlist='', biaslist='', HeNeAr_file='', spec = specfile[i] print("> Processing file "+spec+" ["+str(i)+"/"+str(len(specfile))+"]") # raw, exptime, airmass, wapprox = pydis.OpenImg(spec, trim=trim) - img = pydis.OpenImg(spec, trim=trim) + img = OpenImg(spec, trim=trim) raw = img.data exptime = img.exptime airmass = img.airmass @@ -476,9 +480,9 @@ def ReduceTwo(speclist, flatlist='', biaslist='', HeNeAr_file='', plt.show() # with reduced data, trace BOTH apertures - trace_1 = pydis.ap_trace(data,fmask=fmask_out, nsteps=ntracesteps, + trace_1 = ap_trace(data,fmask=fmask_out, nsteps=ntracesteps, recenter=trace_recenter, interac=True) - trace_2 = pydis.ap_trace(data,fmask=fmask_out, nsteps=ntracesteps, + trace_2 = ap_trace(data,fmask=fmask_out, nsteps=ntracesteps, recenter=trace_recenter, interac=True) @@ -513,12 +517,12 @@ def ReduceTwo(speclist, flatlist='', biaslist='', HeNeAr_file='', t_indx = t_indx + 1 # extract the spectrum, measure sky values along trace, get flux errors - ext_spec, sky, fluxerr = pydis.ap_extract(data, trace, apwidth=apwidth, + ext_spec, sky, fluxerr = ap_extract(data, trace, apwidth=apwidth, skysep=skysep,skywidth=skywidth, skydeg=skydeg,coaddN=1) if (len(HeNeAr_file) > 0): - wfinal = pydis.mapwavelength(trace, wfit, mode='poly') + wfinal = mapwavelength(trace, wfit, mode='poly') else: # if no line lamp given, use approx from the img header wfinal = wapprox @@ -528,13 +532,13 @@ def ReduceTwo(speclist, flatlist='', biaslist='', HeNeAr_file='', flux_red = (ext_spec - sky) # now correct the spectrum for airmass extinction - flux_red_x = pydis.AirmassCor(wfinal, flux_red, airmass, + flux_red_x = AirmassCor(wfinal, flux_red, airmass, airmass_file=airmass_file) # now get flux std IF stdstar is defined # !! assume first object in list is std star !! if (len(stdstar) > 0) and (i==0): - sens_flux = pydis.DefFluxCal(wfinal, flux_red_x, stdstar=stdstar, + sens_flux = DefFluxCal(wfinal, flux_red_x, stdstar=stdstar, mode=std_mode, polydeg=std_order, display=display_std) sens_wave = wfinal @@ -544,7 +548,7 @@ def ReduceTwo(speclist, flatlist='', biaslist='', HeNeAr_file='', sens_wave = wfinal # final step in reduction, apply sensfunc - ffinal,efinal = pydis.ApplyFluxCal(wfinal, flux_red_x, fluxerr, + ffinal,efinal = ApplyFluxCal(wfinal, flux_red_x, fluxerr, sens_wave, sens_flux) if write_reduced is True: