Skip to content

Commit

Permalink
Merge branch 'devel' into ms_readEER
Browse files Browse the repository at this point in the history
  • Loading branch information
oierlauzi committed Apr 16, 2024
2 parents 7bd4d7c + 7ae40f1 commit dafbd82
Show file tree
Hide file tree
Showing 11 changed files with 166 additions and 274 deletions.
1 change: 0 additions & 1 deletion xmipp3/protocols.conf
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,6 @@ Protocols SPA = [
{"tag": "protocol_group", "text": "Classify", "openItem": "False", "children": []},
{"tag": "protocol_group", "text": "Refine", "openItem": "False", "children": [
{"tag": "protocol", "value": "XmippProtReconstructHighRes", "text": "default"},
{"tag": "protocol", "value": "XmippProtDeepGlobalAssignment", "text": "default"},
{"tag": "protocol", "value": "XmippProtProjMatch", "text": "default"},
{"tag": "protocol", "value": "XmippProtLocalCTF", "text": "default"},
{"tag": "section", "text": "more", "openItem": "False", "children": []}
Expand Down
17 changes: 8 additions & 9 deletions xmipp3/protocols/protocol_core_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,7 @@


import pyworkflow.protocol.params as param
import pyworkflow.protocol.constants as const
from pyworkflow.utils.path import cleanPath, makePath
from pyworkflow.utils.path import makePath

import pwem.emlib.metadata as md
from pwem.protocols import ProtClassify2D
Expand Down Expand Up @@ -91,11 +90,11 @@ def analyzeCore(self):
inputMdName = join(fnLevel, 'level_classes.xmd')
writeSetOfClasses2D(self.inputClasses.get(), inputMdName, writeParticles=True)

args = " --dir %s --root level --computeCore %f %f"%(self._getExtraPath(),
self.thZscore, self.thPCAZscore)
args = " --dir %s --root level --computeCore %f %f" % (self._getExtraPath(),
self.thZscore, self.thPCAZscore)
self.runJob('xmipp_classify_CL2D_core_analysis', args)
self.runJob("xmipp_classify_evaluate_classes", "-i %s"%\
self._getExtraPath(join("level_00","level_classes_core.xmd")), numberOfMpi=1)
self._getExtraPath(join("level_00", "level_classes_core.xmd")), numberOfMpi=1)

#--------------------------- STEPS functions -------------------------------
def _defineFileNames(self):
Expand All @@ -104,8 +103,8 @@ def _defineFileNames(self):
myDict = {
'final_classes': self._getPath('classes2D%(sub)s.sqlite'),
'output_particles': self._getExtraPath('images.xmd'),
'level_classes' : self.levelPath + 'level_classes%(sub)s.xmd',
'level_images' : self.levelPath + 'level_images%(sub)s.xmd',
'level_classes': self.levelPath + 'level_classes%(sub)s.xmd',
'level_images': self.levelPath + 'level_images%(sub)s.xmd',
'classes_scipion': (self.levelPath + 'classes_scipion_level_'
'%(level)02d%(sub)s.sqlite'),
}
Expand All @@ -115,7 +114,7 @@ def createOutputStep(self):
""" Store the SetOfClasses2D object
resulting from the protocol execution.
"""
inputParticles = self.inputClasses.get().getImages()
inputParticles = self.inputClasses.get().getImagesPointer()
level = self._getLastLevel()
subset = CLASSES_CORE

Expand Down Expand Up @@ -143,7 +142,7 @@ def _methods(self):
strline ='We calculated the class cores %s. [Sorzano2014]' % self.getObjectTag('outputClasses_core')
return [strline]

#--------------------------- UTILS functions -------------------------------
# --------------------------- UTILS functions -------------------------------
def _updateParticle(self, item, row):
item.setClassId(row.getValue(md.MDL_REF))
item.setTransform(rowToAlignment(row, ALIGN_2D))
Expand Down
90 changes: 38 additions & 52 deletions xmipp3/protocols/protocol_deep_center.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
IntParam, BooleanParam, GPU_LIST)
from pyworkflow.protocol.constants import LEVEL_ADVANCED
from pyworkflow.utils import Message
from pyworkflow.utils.path import createLink
from pwem.protocols import ProtAlign2D
from pwem.objects import String
from xmipp3.convert import readSetOfParticles, writeSetOfParticles
Expand All @@ -39,11 +40,11 @@
from pyworkflow import BETA, UPDATED, NEW, PROD

class XmippProtDeepCenter(ProtAlign2D, xmipp3.XmippProtocol):
"""Center a set of particles with a neural network."""
_label = 'deep center'
_devStatus = BETA
"""Center a set of particles in 2D using a neural network. The particles remain the same, but their alignment
includes an approximate shift to place them in the center."""
_lastUpdateVersion = VERSION_3_0
_conda_env = 'xmipp_DLTK_v1.0'
_label = 'deep center'

def __init__(self, **args):
ProtAlign2D.__init__(self, **args)
Expand All @@ -61,32 +62,28 @@ def _defineParams(self, form):

form.addSection(label=Message.LABEL_INPUT)

form.addParam('inputImageSet', PointerParam, label="Input Image set",
form.addParam('inputParticles', PointerParam, label="Input images",
pointerClass='SetOfParticles',
help='The set of particles to center')
help='The set does not need to be centered or have alignment parameters')

form.addParam('sigma', FloatParam,
label="Shift sigma",
default=5,
expertLevel=LEVEL_ADVANCED,
help="Sigma for the training of the shift")
form.addParam('Xdim', IntParam,
label="Image size",
default=128,
expertLevel=LEVEL_ADVANCED,
help="Image size during the processing")
help="In pixels. This is used to generate artificially shifted particles.")

form.addSection('Training parameters')
form.addParam('precision', FloatParam,
label="Precision",
default=0.5,
help="In pixels.")

form.addParam('numModels', IntParam,
label="Number of models", default=5,
help="The maximum number of model available in xmipp is 5.")
form.addSection(label="Training")

form.addParam('trainSetSize', IntParam, label="Train set size", default=5000,
help='How many particles from the training')
help='How many particles to use for training. Set to -1 for all of them')

form.addParam('numEpochs', IntParam,
label="Number of epochs",
default=10,
default=100,
expertLevel=LEVEL_ADVANCED,
help="Number of epochs for training.")

Expand All @@ -104,58 +101,47 @@ def _defineParams(self, form):

# --------------------------- INSERT steps functions --------------------------------------------
def _insertAllSteps(self):
self.fnImgs = self._getTmpPath('imgs.xmd')
self.fnImgsTrain = self._getTmpPath('imgsTrain.xmd')
if self.useQueueForSteps() or self.useQueue():
myStr = os.environ["CUDA_VISIBLE_DEVICES"]
else:
myStr = self.gpuList.get()
os.environ["CUDA_VISIBLE_DEVICES"] = self.gpuList.get()
numGPU = myStr.split(',')

self._insertFunctionStep("convertInputStep", self.inputParticles.get())
self._insertFunctionStep("train", numGPU[0])
self._insertFunctionStep("predict", numGPU[0])
self._insertFunctionStep('createOutputStep')
self._insertFunctionStep("createOutputStep")

# --------------------------- STEPS functions ---------------------------------------------------
def convertInputStep(self, inputSet):
writeSetOfParticles(inputSet, self.fnImgs)
if self.trainSetSize.get()>0:
self.runJob("xmipp_metadata_utilities","-i %s --operate random_subset %d -o %s"%\
(self.fnImgs,self.trainSetSize, self.fnImgsTrain), numberOfMpi=1)
else:
createLink(self.fnImgs, self.fnImgsTrain)

def train(self, gpuId):
fnTrain = self._getTmpPath("trainingImages")
writeSetOfParticles(self.inputImageSet.get(), fnTrain+".xmd")
self.runJob("xmipp_metadata_utilities","-i %s.xmd --operate random_subset %d"%\
(fnTrain,self.trainSetSize), numberOfMpi=1)
self.runJob("xmipp_image_resize",
"-i %s.xmd -o %s.stk --save_metadata_stack %s.xmd --fourier %d" %
(fnTrain, fnTrain, fnTrain, self.Xdim),
numberOfMpi=self.numberOfThreads.get() * self.numberOfMpi.get())
args = "%s %s %f %d %d %s %d %f" %\
(fnTrain+".xmd", self._getExtraPath("model"), self.sigma,
self.numEpochs, self.batchSize, gpuId, self.numModels, self.learningRate)
args = "-i %s --omodel %s --sigma %f --maxEpochs %d --batchSize %d --gpu %s --learningRate %f --precision %f"%\
(self.fnImgsTrain, self._getExtraPath("model.h5"), self.sigma, self.numEpochs, self.batchSize, gpuId,
self.learningRate, self.precision)
self.runJob(f"xmipp_deep_center", args, numberOfMpi=1, env=self.getCondaEnv())

def predict(self, gpuId):
fnPredict = self._getExtraPath("predictImages")
fnPredictResized = self._getTmpPath("predictImages")
writeSetOfParticles(self.inputImageSet.get(), fnPredict+".xmd")
self.runJob("xmipp_image_resize",
"-i %s.xmd -o %s.stk --save_metadata_stack %s.xmd --fourier %d" %
(fnPredict, fnPredictResized, fnPredictResized, self.Xdim),
numberOfMpi=self.numberOfThreads.get() * self.numberOfMpi.get())
args = "%s %s %s %s" % (
fnPredict+".xmd", gpuId, fnPredictResized+".xmd", self._getExtraPath("model"))
fnModel = self._getExtraPath("model.h5")
args = "-i %s --gpu %s --model %s -o %s" % (self.fnImgs, gpuId, fnModel, self._getExtraPath('particles.xmd'))
self.runJob("xmipp_deep_center_predict", args, numberOfMpi=1, env=self.getCondaEnv())

def createOutputStep(self):
fnPredict = self._getExtraPath("predictImages.xmd")
fnPredict = self._getExtraPath("particles.xmd")
outputSet = self._createSetOfParticles()
readSetOfParticles(fnPredict, outputSet)
outputSet.copyInfo(self.inputImageSet.get())
outputSet.setAlignmentProj()
outputSet.copyInfo(self.inputParticles.get())
outputSet.setAlignment2D()
self._defineOutputs(outputParticles=outputSet)
self._store(outputSet)
self._defineSourceRelation(self.inputImageSet.get(), outputSet)

# --------------------------- INFO functions --------------------------------
def _methods(self):
methods = []
if hasattr(self, 'outputParticles'):
methods.append("We learned a model to center particles from %i input images (%s)." \
% (self.inputSet.get().getSize(), self.getObjectTag('inputSet')))
return methods
self._defineSourceRelation(self.inputParticles.get(), outputSet)


27 changes: 4 additions & 23 deletions xmipp3/protocols/protocol_movie_dose_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,24 +27,23 @@
import matplotlib.pyplot as plt
import numpy as np
import os
from pyworkflow import VERSION_3_0, NEW
from pyworkflow import VERSION_3_0
from pyworkflow.object import Set
from pyworkflow.protocol import STEPS_PARALLEL
from pyworkflow.protocol import Protocol
from pyworkflow.protocol.params import (PointerParam, IntParam, FloatParam, LEVEL_ADVANCED)
from pyworkflow.utils.properties import Message
import pyworkflow.protocol.constants as cons
from pwem.emlib.image import ImageHandler
from pwem.objects import SetOfMovies
from pwem.protocols import ProtProcessMovies
from xmipp3.convert import getScipionObj
import statistics as stat
from pyworkflow import BETA, UPDATED, NEW, PROD
from pyworkflow import NEW, PROD

THRESHOLD = 2
OUTPUT_ACCEPTED = 'outputMovies'
OUTPUT_DISCARDED = 'outputMoviesDiscarded'

class XmippProtMovieDoseAnalysis(ProtProcessMovies):
class XmippProtMovieDoseAnalysis(ProtProcessMovies, Protocol):
""" Protocol for the dose analysis """
# FIXME: WITH .mrcs IT DOES NOT FILL THE LABELS
_devStatus = PROD
Expand Down Expand Up @@ -311,24 +310,6 @@ def _checkNewOutput(self):
if outputStep and outputStep.isWaiting():
outputStep.setStatus(cons.STATUS_NEW)

def _updateOutputSet(self, outputName, outputSet, state=Set.STREAM_OPEN):
outputSet.setStreamState(state)
if self.hasAttribute(outputName):
outputSet.write() # Write to commit changes
outputAttr = getattr(self, outputName)
# Copy the properties to the object contained in the protocol
outputAttr.copy(outputSet, copyId=False)
# Persist changes
self._store(outputAttr)
else:
# Here the defineOutputs function will call the write() method
self._defineOutputs(**{outputName: outputSet})
self._store(outputSet)

# Close set databaset to avoid locking it
outputSet.close()


# ------------------------- UTILS functions --------------------------------
def getLimitIntervals(self):
""" Funtion to obtain the acceptance interval limits."""
Expand Down
63 changes: 53 additions & 10 deletions xmipp3/protocols/protocol_reconstruct_fourier.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,14 @@

from pwem.objects import Volume
from pwem.protocols import ProtReconstruct3D
from pwem import emlib
import pyworkflow.protocol.params as params
import pyworkflow.protocol.constants as cons
from xmipp3.convert import writeSetOfParticles
from xmipp3.base import isXmippCudaPresent
from pyworkflow.utils import moveFile
import os


class XmippProtReconstructFourier(ProtReconstruct3D):
"""
Reconstruct a volume using Xmipp_reconstruct_fourier from a given set of particles.
Expand Down Expand Up @@ -68,6 +69,8 @@ def _defineParams(self, form):
help='Maximum resolution (in Angstrom) to consider \n'
'in Fourier space (default Nyquist).\n'
'Param *--maxres* in Xmipp.')
form.addParam('useHalves', params.BooleanParam, label='Use halves', default=False,
help='Create separate reconstructions from two random subsets. Useful for resolution measurements')
line = form.addLine('Padding factor',
expertLevel=cons.LEVEL_ADVANCED,
help='Padding of the input images. Higher number will result in more precise interpolation in Fourier '
Expand Down Expand Up @@ -102,21 +105,36 @@ def _createFilenameTemplates(self):
""" Centralize how files are called for iterations and references. """
myDict = {
'input_xmd': self._getExtraPath('input_particles.xmd'),
'output_volume': self._getPath('output_volume.mrc')
'half1_xmd': self._getExtraPath('input_particles000001.xmd'),
'half2_xmd': self._getExtraPath('input_particles000002.xmd'),
'output_volume': self._getPath('output_volume.mrc'),
'half1_volume': self._getPath('half1.mrc'),
'half2_volume': self._getPath('half2.mrc')
}
self._updateFilenamesDict(myDict)

def _insertAllSteps(self):
self._createFilenameTemplates()
self._insertFunctionStep('convertInputStep')
self._insertReconstructStep()
if self.useHalves.get():
self._insertFunctionStep('splitInputStep')
self._insertReconstructStep('half1')
self._insertReconstructStep('half2')
self._insertFunctionStep('averageStep')
else:
self._insertReconstructStep()
self._insertFunctionStep('createOutputStep')

def _insertReconstructStep(self):
def _insertReconstructStep(self, half=None):
#imgSet = self.inputParticles.get()

params = ' -i %s' % self._getFileName('input_xmd')
params += ' -o %s' % self._getFileName('output_volume')
if half is None:
params = ' -i %s' % self._getFileName('input_xmd')
params += ' -o %s' % self._getFileName('output_volume')
else:
params = ' -i %s' % self._getFileName(half + '_xmd')
params += ' -o %s' % self._getFileName(half + '_volume')

params += ' --sym %s' % self.symmetryGroup.get()
maxRes = self.maxRes.get()
if maxRes == -1:
Expand Down Expand Up @@ -163,7 +181,14 @@ def convertInputStep(self):
#TODO: This only writes metadata what about binary file
#it should
writeSetOfParticles(imgSet, particlesMd)

def splitInputStep(self):
args = []
args += ['-i', self._getFileName('input_xmd')]
args += ['-n', 2]

self.runJob('xmipp_metadata_split', args, numberOfMpi=1)

def reconstructStep(self, params):
""" Create the input file in STAR format as expected by Xmipp.
If the input particles comes from Xmipp, just link the file.
Expand All @@ -178,16 +203,34 @@ def reconstructStep(self, params):
self.runJob('xmipp_reconstruct_fourier', params)
else:
self.runJob('xmipp_reconstruct_fourier_accel', params)

self.runJob("xmipp_image_header", "-i %s --sampling_rate %f"%\
(self._getFileName('output_volume'), self.inputParticles.get().getSamplingRate()),
numberOfMpi=1)

def averageStep(self):
# Read
half1 = emlib.Image(self._getFileName('half1_volume'))
half2 = emlib.Image(self._getFileName('half2_volume'))

# Average
half1.inplaceAdd(half2)
half1.inplaceMultiply(0.5)

# Write
half1.write(self._getFileName('output_volume'))

def createOutputStep(self):
imgSet = self.inputParticles.get()

self.runJob("xmipp_image_header", "-i %s --sampling_rate %f"%\
(self._getFileName('output_volume'), imgSet.getSamplingRate()),
numberOfMpi=1)

volume = Volume()
volume.setFileName(self._getFileName('output_volume'))
volume.setSamplingRate(imgSet.getSamplingRate())
if self.useHalves.get():
volume.setHalfMaps([
self._getFileName('half1_volume'),
self._getFileName('half2_volume')
])

self._defineOutputs(outputVolume=volume)
self._defineSourceRelation(self.inputParticles, volume)
Expand Down
Loading

0 comments on commit dafbd82

Please sign in to comment.