3
dI                @   s
  d Z ddlZddlmZ ddlmZ ddlmZmZmZm	Z	 ddl
mZmZmZmZmZmZ dd	lmZ d
dl
mZmZ G dd deZG dd deZG dd deZG dd deZG dd deZG dd deZG dd deZG dd deZG dd deeZG dd deZG d d! d!eZG d"d# d#eZ G d$d% d%eZ!G d&d' d'eZ"G d(d) d)eZ#G d*d+ d+eZ$G d,d- d-eZ%G d.d/ d/eZ&G d0d1 d1eZ'G d2d3 d3eZ(G d4d5 d5eZ)e)Z*e'Z+e(Z,G d6d7 d7eZ-G d8d9 d9eZ.G d:d; d;eZ/dS )<z0Wrappers for segmentation utilities within ANTs.    N)glob   )BibTeX)split_filenamecopyfilewhichfname_presuffix   )TraitedSpecFiletraitsInputMultiPathOutputMultiPath	isdefined)CopyHeaderInterface   )ANTSCommandANTSCommandInputSpecc            
   @   sd  e Zd ZejdddddddZeeddd	dd
ZeddddZ	ejdddddddgddZ
ejejejejddZejeddejddZejddZej ZejdgdZejddZejddZejej dgdZejddZejdgdZejddZejd gdZejddZejdd!d"dd#Zejd$gdZeddd%d&Z ej Z!ejd'dd(Z"d)S )*AtroposInputSpecr   r	      z--image-dimensionality %dTzimage dimension (2, 3, or 4))argstr
usedefaultdesc)existsz--intensity-image %s...)r   	mandatoryz--mask-image %s)r   r   r   RandomZOtsuKMeansPriorProbabilityImagesPriorLabelImagez%snumber_of_tissue_classes)r   requiresr   r   )minlenzQeither a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file.)r   )r   prior_weighting)r    z--likelihood-model %s)r   mrf_smoothing_factoricm_use_synchronous_updaten_iterationsz--use-random-seed %dz#use random seed value over constant)r   r   r   posterior_formulationF)r   Zgenfile
hash_fileszPOSTERIOR_%02d.nii.gz)r   N)#__name__
__module____qualname__r   Enum	dimensionr   r   intensity_images
mask_imageinitializationListZEitherIntFloatkmeans_init_centersStrprior_imager   r"   prior_probability_thresholdZlikelihood_modelr#   
mrf_radiusBoolr$   maximum_number_of_icm_terationsr%   convergence_thresholdr&   Zuse_random_seeduse_mixture_model_proportionsout_classified_image_namesave_posteriorsoutput_posteriors_name_template r?   r?   E/tmp/pip-build-7vycvbft/nipype/nipype/interfaces/ants/segmentation.pyr      s\   
r   c               @   s$   e Zd ZeddZeeddZdS )AtroposOutputSpecT)r   )existN)r(   r)   r*   r   classified_imager   
posteriorsr?   r?   r?   r@   rA   C   s   
rA   c                   s<   e Zd ZdZeZeZd Z fddZ	dd Z
dd Z  ZS )AtroposaF  
    A multivariate n-class segmentation algorithm.

    A finite mixture modeling (FMM) segmentation approach with possibilities for
    specifying prior constraints. These prior constraints include the specification
    of a prior label image, prior probability images (one for each class), and/or an
    MRF prior to enforce spatial smoothing of the labels. Similar algorithms include
    FAST and SPM.

    Examples
    --------
    >>> from nipype.interfaces.ants import Atropos
    >>> at = Atropos(
    ...     dimension=3, intensity_images='structural.nii', mask_image='mask.nii',
    ...     number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True,
    ...     mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True,
    ...     maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001,
    ...     posterior_formulation='Socrates', use_mixture_model_proportions=True)
    >>> at.inputs.initialization = 'Random'
    >>> at.cmdline
    'Atropos --image-dimensionality 3 --icm [1,1]
    --initialization Random[2] --intensity-image structural.nii
    --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06]
    --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1]
    --use-random-seed 1'

    >>> at = Atropos(
    ...     dimension=3, intensity_images='structural.nii', mask_image='mask.nii',
    ...     number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True,
    ...     mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True,
    ...     maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001,
    ...     posterior_formulation='Socrates', use_mixture_model_proportions=True)
    >>> at.inputs.initialization = 'KMeans'
    >>> at.inputs.kmeans_init_centers = [100, 200]
    >>> at.cmdline
    'Atropos --image-dimensionality 3 --icm [1,1]
    --initialization KMeans[2,100,200] --intensity-image structural.nii
    --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06]
    --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1]
    --use-random-seed 1'

    >>> at = Atropos(
    ...     dimension=3, intensity_images='structural.nii', mask_image='mask.nii',
    ...     number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True,
    ...     mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True,
    ...     maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001,
    ...     posterior_formulation='Socrates', use_mixture_model_proportions=True)
    >>> at.inputs.initialization = 'PriorProbabilityImages'
    >>> at.inputs.prior_image = 'BrainSegmentationPrior%02d.nii.gz'
    >>> at.inputs.prior_weighting = 0.8
    >>> at.inputs.prior_probability_threshold = 0.0000001
    >>> at.cmdline
    'Atropos --image-dimensionality 3 --icm [1,1]
    --initialization PriorProbabilityImages[2,BrainSegmentationPrior%02d.nii.gz,0.8,1e-07]
    --intensity-image structural.nii --likelihood-model Gaussian --mask-image mask.nii
    --mrf [0.2,1x1x1] --convergence [5,1e-06]
    --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz]
    --posterior-formulation Socrates[1] --use-random-seed 1'

    >>> at = Atropos(
    ...     dimension=3, intensity_images='structural.nii', mask_image='mask.nii',
    ...     number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True,
    ...     mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True,
    ...     maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001,
    ...     posterior_formulation='Socrates', use_mixture_model_proportions=True)
    >>> at.inputs.initialization = 'PriorLabelImage'
    >>> at.inputs.prior_image = 'segmentation0.nii.gz'
    >>> at.inputs.number_of_tissue_classes = 2
    >>> at.inputs.prior_weighting = 0.8
    >>> at.cmdline
    'Atropos --image-dimensionality 3 --icm [1,1]
    --initialization PriorLabelImage[2,segmentation0.nii.gz,0.8] --intensity-image structural.nii
    --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06]
    --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1]
    --use-random-seed 1'

    c                s  |dkrT| j j}d| g}|dkrft| j jrftt| j j}t||krTtd|dd |D 7 }|d$krBt| j j st| j j	 rtd	| | j jg d
 d kr|dkrtd fddt
d|d D  tdd  D s tddj  || j jd| j j	 g7 }|dkrBt| j jrB|jd| j j  d|dj|f S |dkrd| }t| j jr|d| jdd | j jD  7 }|d S |dkrd| }t| j jr|d| j j 7 }|d S |dkrd| }t| j jr|d| j j 7 }|d S |dkr@d | }t| j jr<|d!| j j 7 }|S |d"krxd#| }t| j jrp|d| j j 7 }|d S tt| j|||S )%Nr/   z%dr   zsKMeans initialization with initial cluster centers requires the number of centers to match number_of_tissue_classesc             S   s   g | ]}d | qS )z%gr?   ).0cr?   r?   r@   
<listcomp>   s    z'Atropos._format_arg.<locals>.<listcomp>r   r   zD'%s' initialization requires setting prior_image and prior_weightingz%02dr   zJ'PriorLabelImage' initialization does not accept patterns for prior_image.c                s   g | ]} d  | qS )r   r?   )rF   i)priors_pathsr?   r@   rH      s    r   c             S   s   g | ]}t jj|qS r?   )ospathr   )rF   pr?   r?   r@   rH      s    z*One or more prior images do not exist: %s.z, z%gz--initialization %s[%s],r#   z	--mrf [%gz,%sc             S   s   g | ]}t |qS r?   )str)rF   sr?   r?   r@   rH      s    ]r$   z	--icm [%dz,%gr%   z--convergence [%dr&   z--posterior-formulation %sz[%d]r<   z--output [%s)r   r   )inputsr   r   r3   sortedsetlen
ValueErrorr5   r"   rangeallFileNotFoundErrorjoinr6   appendr7   _format_xarrayr9   r:   r;   r=   r>   superrE   _format_arg)selfoptspecvalZ	n_classesZbracketsZcentersretval)	__class__)rJ   r@   r^      sv    








zAtropos._format_argc             C   sB   |dkr>| j j}t|s:t| j jd \}}}|d | }|S d S )Nr<   r   Z_labeled)rR   r<   r   r   r-   )r_   nameoutput_extr?   r?   r@   _gen_filename   s    zAtropos._gen_filenamec             C   sz   | j  j }tjj| jd|d< t| jjrv| jjrvg |d< x6t	| jj
D ]&}|d jtjj| jj|d   qLW |S )Nr<   rC   rD   r   )_outputsgetrK   rL   abspathri   r   rR   r=   rW   r   r[   r>   )r_   outputsrI   r?   r?   r@   _list_outputs   s    zAtropos._list_outputs)r(   r)   r*   __doc__r   
input_specrA   output_spec_cmdr^   ri   rn   __classcell__r?   r?   )rd   r@   rE   H   s   MMrE   c            	   @   s   e Zd ZeddddddZeddddddZejddd	d
gddddZej	ddddZ
ej	dddgddZej	dddgddZej	dddgddZej	dddgddZdS )LaplacianThicknessInputSpecz%sTzwhite matter segmentation imager   )r   r   r   r   positionzgray matter segmentation imager	   zname of output filer   input_wmz%s_thicknessF)r   r   ru   name_sourcename_templatekeep_extensionr'   z=Sigma of the Laplacian Recursive Image Filter (defaults to 1)r   )r   r   ru   z!Prior thickness (defaults to 500)smooth_param   )r   r   r    ru   z5Time delta used during integration (defaults to 0.01)prior_thickness   zcPositive floating point number for sulcus prior. Authors said that 0.15 might be a reasonable valuedT   z:Tolerance to reach during optimization (defaults to 0.001)sulcus_prior   N)r(   r)   r*   r   rv   Zinput_gmr   r4   output_imager2   rz   r|   r~   r   Z	tolerancer?   r?   r?   r@   rt      sX   rt   c               @   s   e Zd ZedddZdS )LaplacianThicknessOutputSpecTzCortical thickness)r   r   N)r(   r)   r*   r   r   r?   r?   r?   r@   r   8  s   r   c               @   s   e Zd ZdZd ZeZeZdS )LaplacianThicknessai  Calculates the cortical thickness from an anatomical image

    Examples
    --------

    >>> from nipype.interfaces.ants import LaplacianThickness
    >>> cort_thick = LaplacianThickness()
    >>> cort_thick.inputs.input_wm = 'white_matter.nii.gz'
    >>> cort_thick.inputs.input_gm = 'gray_matter.nii.gz'
    >>> cort_thick.cmdline
    'LaplacianThickness white_matter.nii.gz gray_matter.nii.gz white_matter_thickness.nii.gz'

    >>> cort_thick.inputs.output_image = 'output_thickness.nii.gz'
    >>> cort_thick.cmdline
    'LaplacianThickness white_matter.nii.gz gray_matter.nii.gz output_thickness.nii.gz'

    N)	r(   r)   r*   ro   rr   rt   rp   r   rq   r?   r?   r?   r@   r   <  s   r   c               @   s
  e Zd ZejdddddddZeddd	d
ZedddZedddZ	ej
dddgddddZejddZejdgdZejddZejej ddZejdgdZejdddddgdZed dd!Zejdddd"d#Zejddd$d%d&d'Zejd.ejejejd+d,dZd-S )/N4BiasFieldCorrectionInputSpecr   r	   r   z-d %dTzimage dimension (2, 3 or 4))r   r   r   z--input-image %szjinput for bias correction. Negative values or values close to zero should be processed prior to correction)r   r   r   z--mask-image %sz;image to specify region to perform final bias correction in)r   r   z--weight-image %szoimage for relative weighting (e.g. probability map of the white matter) of voxels during the B-spline fitting. z--output %szoutput file nameinput_imagez%s_correctedF)r   r   rw   rx   ry   r'   z--bspline-fitting %s)r   bspline_fitting_distance)r    z--shrink-factor %dz--convergence %sr%   z3True if the estimated bias should be saved to file.
bias_image)r   r   r   xorz Filename for the estimated bias.)r   r'   zCcopy headers of the original image into the output (corrected) file)r   r   r   z-rz2.1.0at  [NOTE: Only ANTs>=2.1.0]
At each iteration, a new intensity mapping is calculated and applied but there
is nothing which constrains the new intensity range to be within certain values.
The result is that the range can "drift" from the original at each iteration.
This option rescales to the [min,max] range of the original image intensities
within the user-specified mask.)r   r   Zmin_verr   333333?{Gz?   z!--histogram-sharpening [%g,%g,%d]a#  Three-values tuple of histogram sharpening parameters (FWHM, wienerNose, numberOfHistogramBins).
These options describe the histogram sharpening parameters, i.e. the deconvolution step parameters described in the original N3 algorithm.
The default values have been shown to work fairly well.N)r   r   r   )r(   r)   r*   r   r+   r,   r   r   r.   Zweight_imager4   r   r2   r   r1   bspline_ordershrink_factorr0   r%   r:   r8   	save_biasr   Zcopy_headerZrescale_intensitiesTupleZhistogram_sharpeningr?   r?   r?   r@   r   T  sb   
r   c               @   s$   e Zd ZedddZedddZdS )N4BiasFieldCorrectionOutputSpecTzWarped image)r   r   zEstimated biasN)r(   r)   r*   r   r   r   r?   r?   r?   r@   r     s   r   c                   s\   e Zd ZdZd ZeZeZdddZ	 fddZ
 fdd	Zd fdd	Z fddZ  ZS )N4BiasFieldCorrectionaA
  
    Bias field correction.

    N4 is a variant of the popular N3 (nonparameteric nonuniform normalization)
    retrospective bias correction algorithm. Based on the assumption that the
    corruption of the low frequency bias field can be modeled as a convolution of
    the intensity histogram by a Gaussian, the basic algorithmic protocol is to
    iterate between deconvolving the intensity histogram by a Gaussian, remapping
    the intensities, and then spatially smoothing this result by a B-spline modeling
    of the bias field itself. The modifications from and improvements obtained over
    the original N3 algorithm are described in [Tustison2010]_.

    .. [Tustison2010] N. Tustison et al.,
      N4ITK: Improved N3 Bias Correction, IEEE Transactions on Medical Imaging,
      29(6):1310-1320, June 2010.

    Examples
    --------

    >>> import copy
    >>> from nipype.interfaces.ants import N4BiasFieldCorrection
    >>> n4 = N4BiasFieldCorrection()
    >>> n4.inputs.dimension = 3
    >>> n4.inputs.input_image = 'structural.nii'
    >>> n4.inputs.bspline_fitting_distance = 300
    >>> n4.inputs.shrink_factor = 3
    >>> n4.inputs.n_iterations = [50,50,30,20]
    >>> n4.cmdline
    'N4BiasFieldCorrection --bspline-fitting [ 300 ]
    -d 3 --input-image structural.nii
    --convergence [ 50x50x30x20 ] --output structural_corrected.nii
    --shrink-factor 3'

    >>> n4_2 = copy.deepcopy(n4)
    >>> n4_2.inputs.convergence_threshold = 1e-6
    >>> n4_2.cmdline
    'N4BiasFieldCorrection --bspline-fitting [ 300 ]
    -d 3 --input-image structural.nii
    --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii
    --shrink-factor 3'

    >>> n4_3 = copy.deepcopy(n4_2)
    >>> n4_3.inputs.bspline_order = 5
    >>> n4_3.cmdline
    'N4BiasFieldCorrection --bspline-fitting [ 300, 5 ]
    -d 3 --input-image structural.nii
    --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii
    --shrink-factor 3'

    >>> n4_4 = N4BiasFieldCorrection()
    >>> n4_4.inputs.input_image = 'structural.nii'
    >>> n4_4.inputs.save_bias = True
    >>> n4_4.inputs.dimension = 3
    >>> n4_4.cmdline
    'N4BiasFieldCorrection -d 3 --input-image structural.nii
    --output [ structural_corrected.nii, structural_bias.nii ]'

    >>> n4_5 = N4BiasFieldCorrection()
    >>> n4_5.inputs.input_image = 'structural.nii'
    >>> n4_5.inputs.dimension = 3
    >>> n4_5.inputs.histogram_sharpening = (0.12, 0.02, 200)
    >>> n4_5.cmdline
    'N4BiasFieldCorrection -d 3  --histogram-sharpening [0.12,0.02,200]
    --input-image structural.nii --output structural_corrected.nii'

    r   FT)r   r   c                s   d| _ tt| j|| dS )z0Instantiate the N4BiasFieldCorrection interface.N)_out_bias_filer]   r   __init__)r_   argskwargs)rd   r?   r@   r     s    zN4BiasFieldCorrection.__init__c                s   |dkr&| j r&d|| j f }|j| S |dkr^t| jjrLd|| jjf }nd| }|j| S |dkrt| jjrd| jdd	 |D | jjf }nd
| jdd	 |D  }|j| S tt| j	|||S )Nr   z
[ %s, %s ]r   z
[ %g, %d ]z[ %g ]r%   z
[ %s, %g ]c             S   s   g | ]}t |qS r?   )rO   )rF   eltr?   r?   r@   rH     s    z5N4BiasFieldCorrection._format_arg.<locals>.<listcomp>z[ %s ]c             S   s   g | ]}t |qS r?   )rO   )rF   r   r?   r?   r@   rH     s    )
r   r   r   rR   r   r:   r\   r]   r   r^   )r_   re   
trait_specvaluenewval)rd   r?   r@   r^     s     


z!N4BiasFieldCorrection._format_argNc                sj   |pg ddg }d | _ | jjs*t| jjrX| jj}t|sRttjj| jj	dd}|| _ t
t| j|dS )Nr   r   Z_bias)suffix)skip)r   rR   r   r   r   r   rK   rL   basenamer   r]   r   _parse_inputs)r_   r   r   )rd   r?   r@   r     s    z#N4BiasFieldCorrection._parse_inputsc                s*   t t| j }| jr&tjj| j|d< |S )Nr   )r]   r   rn   r   rK   rL   rl   )r_   rm   )rd   r?   r@   rn     s    z#N4BiasFieldCorrection._list_outputs)r   F)r   T)N)r(   r)   r*   ro   rr   r   rp   r   rq   Z_copy_header_mapr   r^   r   rn   rs   r?   r?   )rd   r@   r     s   Br   c               @   sV  e Zd ZejddddddZeddddd	Zedd
ddd	ZeddddddZ	e
edddddZejdddddZejdddddZedddddZeddddZejd d!d"Zejd#d$d"Zejd%d&d"Zejd'd(d"Zejd)d*d"Zejd+d,d-d.d"Zejd+d,d/d0d"Zejd1d2d"Zedd3d4Zejd5d6d"Zejd7d8d"Zejd9d:d"Zd;S )<CorticalThicknessInputSpecr   r	   z-d %dTzimage dimension (2 or 3))r   r   r   z-a %sa&  Structural *intensity* image, typically T1. If more than one anatomical image is specified, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image.)r   r   r   r   z-e %szAnatomical *intensity* template (possibly created using a population data set with buildtemplateparallel.sh in ANTs). This template is  *not* skull-stripped.z-m %sz(brain probability mask in template spaceF)r   r   r   r   r   )r   z-p %s)r   r   ZantsCT_z-o %sz,Prefix that is prepended to all output filesznii.gzz.any of standard ITK formats, nii.gz is defaultz-s %s)r   r   r   zAnatomical *intensity* template (assumed to be skull-stripped). A common case would be where this would be the same template as specified in the -e option which is not skull stripped.z-t %s)r   r   r   r   z-f %szSMask (defined in the template space) used during registration for brain extraction.)r   r   r   z-k %dz<Keep brain extraction/segmentation warps, etc (default = 0).)r   r   z-i %dz:ANTS registration max iterations (default = 100x100x70x20)z-w %fz?Atropos spatial prior *probability* weight for the segmentationz-n %dz@N4 -> Atropos -> N4 iterations during segmentation (default = 3)z-b %szAtropos posterior formulation and whether or not to use mixture model proportions. e.g 'Socrates[1]' (default) or 'Aristotle[1]'. Choose the latter if you want use the distance priors (see also the -l option for label propagation control).r   r   z-j %dz;Use floating point precision in registrations (default = 0)z-u %dzFUse random number generated from system clock in Atropos (default = 1)z-vzNUse B-spline SyN for registrations and B-spline exponential mapping in DiReCT.z0Cortical ROI labels to use as a prior for ATITH.)r   r   z-l %sa  Incorporate a distance prior one the posterior formulation.  Should be of the form 'label[lambda,boundaryProbability]' where label is a value of 1,2,3,... denoting label ID.  The label probability for anything outside the current label = boundaryProbability * exp( -lambda * distanceFromBoundary ) Intuitively, smaller lambda values will increase the spatial capture range of the distance prior.  To apply to all label values, simply omit specifying the label, i.e. -l [lambda,boundaryProbability].z-q 1zIf = 1, use antsRegistrationSyNQuick.sh as the basis for registration during brain extraction, brain segmentation, and (optional) normalization to a template. Otherwise use antsRegistrationSyN.sh (default = 0).z-z 1zIf > 0, runs a faster version of the script. Only for testing. Implies -u 0. Requires single thread computation for complete reproducibility.N) r(   r)   r*   r   r+   r,   r   anatomical_imagebrain_templatebrain_probability_maskr   segmentation_priorsr4   
out_prefiximage_suffixt1_registration_templateextraction_registration_maskr1   keep_temporary_filesZmax_iterationsr2   Zprior_segmentation_weightZsegmentation_iterationsr&   use_floatingpoint_precisionuse_random_seedingr8   Zb_spline_smoothingZcortical_label_imageZlabel_propagationZquick_registrationdebugr?   r?   r?   r@   r   #  s   

r   c               @   s   e Zd ZedddZedddZedddZedddZeedddd	Z	edd
dZ
edddZedddZedddZedddZedddZedddZedddZdS )CorticalThicknessOutputSpecTzbrain extraction mask)r   r   zextracted brain from N4 imagezbrain segmentaion imagezN4 corrected image)r   zPosterior probability images)r   zcortical thickness filezTemplate to subject affinezTemplate to subject warpz Template to subject inverse warpz"Template to subject inverse affinez Template to subject log jacobianzNormalized cortical thicknesszBrain volumes as textN)r(   r)   r*   r   BrainExtractionMaskExtractedBrainN4BrainSegmentationBrainSegmentationN4r   BrainSegmentationPosteriorsCorticalThicknessTemplateToSubject1GenericAffineTemplateToSubject0WarpSubjectToTemplate1WarpSubjectToTemplate0GenericAffineSubjectToTemplateLogJacobian!CorticalThicknessNormedToTemplateBrainVolumesr?   r?   r?   r@   r     s$   



r   c                   sF   e Zd ZdZeZeZdZ fddZ	dgf fdd	Z
dd	 Z  ZS )
r   a}  
    Examples
    --------
    >>> from nipype.interfaces.ants.segmentation import CorticalThickness
    >>> corticalthickness = CorticalThickness()
    >>> corticalthickness.inputs.dimension = 3
    >>> corticalthickness.inputs.anatomical_image ='T1.nii.gz'
    >>> corticalthickness.inputs.brain_template = 'study_template.nii.gz'
    >>> corticalthickness.inputs.brain_probability_mask ='ProbabilityMaskOfStudyTemplate.nii.gz'
    >>> corticalthickness.inputs.segmentation_priors = ['BrainSegmentationPrior01.nii.gz',
    ...                                                 'BrainSegmentationPrior02.nii.gz',
    ...                                                 'BrainSegmentationPrior03.nii.gz',
    ...                                                 'BrainSegmentationPrior04.nii.gz']
    >>> corticalthickness.inputs.t1_registration_template = 'brain_study_template.nii.gz'
    >>> corticalthickness.cmdline
    'antsCorticalThickness.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz
    -e study_template.nii.gz -d 3 -s nii.gz -o antsCT_
    -p nipype_priors/BrainSegmentationPrior%02d.nii.gz -t brain_study_template.nii.gz'

    zantsCorticalThickness.shc                s   |dkrd| }|S |dkr(d| }|S |dkr<d| }|S |dkrPd| }|S |d	krdd
| }|S |dkrt | jjd \}}}d| }|S tt| j|||S )Nr   z-a %sr   z-e %sr   z-m %sr   z-o %sr   z-t %sr   r   z+-p nipype_priors/BrainSegmentationPrior%02d)r   rR   r   r]   r   r^   )r_   r`   ra   rb   rc   rg   rh   )rd   r?   r@   r^     s(    zCorticalThickness._format_argr   c       	         s   t jjt j d}t jj|s(t j| t| jjd \}}}xht	| jjD ]X\}}t jj|d|d  | }t jj|ot jj
|t jj|ksLtt jj|| qLW tt| j|}|S )NZnipype_priorsr   zBrainSegmentationPrior%02dr   )rK   rL   rZ   getcwdr   makedirsr   rR   r   	enumeraterealpathrl   r   r]   r   _run_interface)	r_   runtimecorrect_return_codesZpriors_directoryrg   rh   rI   ftarget)rd   r?   r@   r     s    
z CorticalThickness._run_interfacec             C   s  | j  j }tjjtj | jjd | jj |d< tjjtj | jjd | jj |d< tjjtj | jjd | jj |d< tjjtj | jjd | jj |d< g }xHt	t
| jjD ]4}|jtjjtj | jjd	|d
   | jj  qW ||d< tjjtj | jjd | jj |d< tjjtj | jjd |d< tjjtj | jjd | jj |d< tjjtj | jjd | jj |d< tjjtj | jjd |d< tjjtj | jjd | jj |d< tjjtj | jjd | jj |d< tjjtj | jjd |d< |S )NzBrainExtractionMask.r   zExtractedBrain0N4.r   zBrainSegmentation.r   zBrainSegmentation0N4.r   z BrainSegmentationPosteriors%02d.r   r   zCorticalThickness.r   z#TemplateToSubject1GenericAffine.matr   zTemplateToSubject0Warp.r   zSubjectToTemplate1Warp.r   z#SubjectToTemplate0GenericAffine.matr   zSubjectToTemplateLogJacobian.r   r   zbrainvols.csvr   )rj   rk   rK   rL   rZ   r   rR   r   r   rW   rU   r   r[   )r_   rm   rD   rI   r?   r?   r@   rn     sT    $zCorticalThickness._list_outputs)r(   r)   r*   ro   r   rp   r   rq   rr   r^   r   rn   rs   r?   r?   )rd   r@   r     s   r   c               @   s   e Zd ZejddddddZeddddd	Zedd
ddd	ZeddddddZ	ej
dddddZeddddZej
dddddZejdddddZejdd dZejddd!d"dZejd#d$dZd%S )&BrainExtractionInputSpecr   r	   z-d %dTzimage dimension (2 or 3))r   r   r   z-a %saz  Structural image, typically T1.  If more than one anatomical image is specified, subsequently specified images are used during the segmentation process.  However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image. Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs.)r   r   r   r   z-e %sz]Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs.z-m %szBrain probability mask created using e.g. LPBA40 data set which have brain masks defined, and warped to anatomical template and averaged resulting in a probability image.F)r   r   r   r   r   Zhighres001_z-o %sz,Prefix that is prepended to all output filesz-f %szMask (defined in the template space) used during registration for brain extraction. To limit the metric computation to a specific region.)r   r   r   znii.gzz.any of standard ITK formats, nii.gz is defaultz-s %s)r   r   r   r   r   z-u %dzFUse random number generated from system clock in Atropos (default = 1))r   r   z-k %dz<Keep brain extraction/segmentation warps, etc (default = 0).z-q %dz;Use floating point precision in registrations (default = 0)z-z 1zIf > 0, runs a faster version of the script. Only for testing. Implies -u 0. Requires single thread computation for complete reproducibility.N)r(   r)   r*   r   r+   r,   r   r   r   r   r4   r   r   r   r   r1   r   r   r8   r   r?   r?   r?   r@   r   X  s`   r   c               @   s   e Zd ZedddZedddZedddZedddZedddZedddZ	edddZ
edddZedddZedddZedddZedddZedddZedddZedddZedd	dZedd
dZedddZdS )BrainExtractionOutputSpecTzbrain extraction mask)r   r   zbrain extraction imagezsegmentation mask with only CSFz'segmentation mask with only grey matter z&segmentation mask with CSF, GM, and WMz(segmenration mask with only white matterzN4 bias field corrected imageN)r(   r)   r*   r   r   BrainExtractionBrainBrainExtractionCSFBrainExtractionGMBrainExtractionInitialAffine!BrainExtractionInitialAffineFixed"BrainExtractionInitialAffineMovingBrainExtractionLaplacian"BrainExtractionPrior0GenericAffine BrainExtractionPrior1InverseWarpBrainExtractionPrior1WarpBrainExtractionPriorWarpedBrainExtractionSegmentation BrainExtractionTemplateLaplacianBrainExtractionTmpBrainExtractionWMN4Corrected0N4Truncated0r?   r?   r?   r@   r     s*   


r   c                   s6   e Zd ZdZeZeZdZd	 fdd	Z	dd Z
  ZS )
BrainExtractionav  
    Atlas-based brain extraction.

    Examples
    --------
    >>> from nipype.interfaces.ants.segmentation import BrainExtraction
    >>> brainextraction = BrainExtraction()
    >>> brainextraction.inputs.dimension = 3
    >>> brainextraction.inputs.anatomical_image ='T1.nii.gz'
    >>> brainextraction.inputs.brain_template = 'study_template.nii.gz'
    >>> brainextraction.inputs.brain_probability_mask ='ProbabilityMaskOfStudyTemplate.nii.gz'
    >>> brainextraction.cmdline
    'antsBrainExtraction.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz
    -e study_template.nii.gz -d 3 -s nii.gz -o highres001_'

    zantsBrainExtraction.shr   c       	         s  | j  }|jdd ptjdd }|d krTtd|jd}|sHtd|j tjj	|}| j
jjd|i |jjd|i tt| j|}d|jkrx>|jjdD ].}|j jdr|j jddjd	d
 }P qW d||f }|jd kr||_n| jd| 7  _d|_| j| |S )NZANTSPATHZantsRegistration)envzpThe environment variable $ANTSPATH is not defined in host "%s", and Nipype could not determine it automatically.zwe cant find
zwe cant find ther    r   zOantsBrainExtraction.sh requires "%s" to be found in $ANTSPATH ($ANTSPATH="%s").r   )Z_get_environrk   rK   getenvr   environRuntimeErrorhostnamerL   dirnamerR   updater]   r   r   stdoutsplitstrip
startswithreplacestderr
returncodeZraise_exception)	r_   r   r   Zout_environZ	ants_pathZcmd_pathlineZtoolerrmsg)rd   r?   r@   r     s2    



zBrainExtraction._run_interfacec             C   s  | j  j }tjjtj | jjd | jj |d< tjjtj | jjd | jj |d< t	| jj
on| jj
dkrtjjtj | jjd | jj |d< tjjtj | jjd | jj |d	< tjjtj | jjd
 |d< tjjtj | jjd | jj |d< tjjtj | jjd | jj |d< tjjtj | jjd | jj |d< tjjtj | jjd |d< tjjtj | jjd | jj |d< tjjtj | jjd | jj |d< tjjtj | jjd | jj |d< tjjtj | jjd | jj |d< tjjtj | jjd | jj |d< tjjtj | jjd | jj |d< tjjtj | jjd  | jj |d!< tjjtj | jjd" | jj |d#< tjjtj | jjd$ | jj |d%< |S )&NzBrainExtractionMask.r   zBrainExtractionBrain.r   r   zBrainExtractionCSF.r   zBrainExtractionGM.r   z BrainExtractionInitialAffine.matr   z"BrainExtractionInitialAffineFixed.r   z#BrainExtractionInitialAffineMoving.r   zBrainExtractionLaplacian.r   z&BrainExtractionPrior0GenericAffine.matr   z!BrainExtractionPrior1InverseWarp.r   zBrainExtractionPrior1Warp.r   zBrainExtractionPriorWarped.r   zBrainExtractionSegmentation.r   z!BrainExtractionTemplateLaplacian.r   zBrainExtractionTmp.r   zBrainExtractionWM.r   zN4Corrected0.r   zN4Truncated0.r   )rj   rk   rK   rL   rZ   r   rR   r   r   r   r   )r_   rm   r?   r?   r@   rn     sr    zBrainExtraction._list_outputsr   )r   )r(   r)   r*   ro   r   rp   r   rq   rr   r   rn   rs   r?   r?   )rd   r@   r     s   (r   c               @   s   e Zd ZejddddddZedddd	d
ZejddddddZej	dddddZ
eddgdddddZejdddddgdZedgdddddZejdd d!dZd"S )#DenoiseImageInputSpecr	   r   r   z-d %dzThis option forces the image to be treated as a specified-dimensional image. If not specified, the program tries to infer the dimensionality from the input image.)r   r   Tz-i %sz9A scalar image is expected as input for noise correction.)r   r   r   r   ZGaussianZRicianz-n %sz(Employ a Rician or Gaussian noise model.)r   r   r   r   z-s %szRunning noise correction on large images can be time consuming. To lessen computation time, the input image can be resampled. The shrink factor, specified as a single integer, describes this resampling. Shrink factor = 1 is the default.)default_valuer   r   r   z-o %sr   Fz%s_noise_correctedzFThe output consists of the noise corrected version of the input image.)r   rw   r'   ry   rx   r   z4True if the estimated noise should be saved to file.noise_image)r   r   r   r   z%s_noisez!Filename for the estimated noise.)rw   r'   ry   rx   r   z-vzVerbose output.N)r(   r)   r*   r   r+   r,   r   r   Znoise_modelr1   r   r   r8   
save_noiser   verboser?   r?   r?   r@   r   p  sT   
r   c               @   s   e Zd ZeddZe ZdS )DenoiseImageOutputSpecT)r   N)r(   r)   r*   r   r   r   r?   r?   r?   r@   r     s   
r   c                   s,   e Zd ZdZeZeZd Z fddZ	  Z
S )DenoiseImagea{  
    Examples
    --------
    >>> import copy
    >>> from nipype.interfaces.ants import DenoiseImage
    >>> denoise = DenoiseImage()
    >>> denoise.inputs.dimension = 3
    >>> denoise.inputs.input_image = 'im1.nii'
    >>> denoise.cmdline
    'DenoiseImage -d 3 -i im1.nii -n Gaussian -o im1_noise_corrected.nii -s 1'

    >>> denoise_2 = copy.deepcopy(denoise)
    >>> denoise_2.inputs.output_image = 'output_corrected_image.nii.gz'
    >>> denoise_2.inputs.noise_model = 'Rician'
    >>> denoise_2.inputs.shrink_factor = 2
    >>> denoise_2.cmdline
    'DenoiseImage -d 3 -i im1.nii -n Rician -o output_corrected_image.nii.gz -s 2'

    >>> denoise_3 = DenoiseImage()
    >>> denoise_3.inputs.input_image = 'im1.nii'
    >>> denoise_3.inputs.save_noise = True
    >>> denoise_3.cmdline
    'DenoiseImage -i im1.nii -n Gaussian -o [ im1_noise_corrected.nii, im1_noise.nii ] -s 1'

    c                sR   |dkr>| j jst| j jr>d| jd| jdf }|j| S tt| j|||S )Nr   z
[ %s, %s ]r   )	rR   r   r   r   Z_filename_from_sourcer   r]   r   r^   )r_   re   r   r   r   )rd   r?   r@   r^     s    
zDenoiseImage._format_arg)r(   r)   r*   ro   r   rp   r   rq   rr   r^   rs   r?   r?   )rd   r@   r     s
   r   c               @   s  e Zd ZejddddddZejeeddd	dd
dZ	ejeeddddddZ
eeddddddZejdddddZejdddddZejddddgddZejdddddZejdddd dZejddd!d"d#Zejd$d%d&d'dZejdddgd(dd)dd*d+Zejej d,d-gd.d/Zejeddd0d1Zed2dd3d4Zed5dd6d7Zejd8d9dZejd:d;d<gd=d>Zejd?d;d<d@gdAd>ZejddBdCdZdDS )EJointFusionInputSpecr   r	   r   z-d %dzThis option forces the image to be treated as a specified-dimensional image. If not specified, the program tries to infer the dimensionality from the input image.)r   r   T)r   z-t %sz^The target image (or multimodal target images) assumed to be aligned to a common image domain.)r   r   r   z-g %s...z\The atlas image (or multimodal atlas images) assumed to be aligned to a common image domain.z-l %s...zThe atlas segmentation images. For performing label fusion the number of specified segmentations should be identical to the number of atlas image sets.g?z-a %szQRegularization term added to matrix Mx for calculating the inverse. Default = 0.1)r   r   r   r   g       @z-b %szKExponent for mapping intensity difference to the joint error. Default = 2.0Fz-ratlas_segmentation_imagezhRetain label posterior probability images. Requires atlas segmentations to be specified. Default = false)r   r   r    r   z-fz+Retain atlas voting images. Default = false)r   r   r   z-cz+Constrain solution to non-negative weights.z-p %sz4Patch radius for similarity measures. Default: 2x2x2)r!   maxlenr   r   PCZMSQz-m %szMetric to be used in determining the most similar neighborhood patch. Options include Pearson's correlation (PC) and mean squares (MSQ). Default = PC (Pearson correlation).r   z-s %szSearch radius for similarity measures. Default = 3x3x3. One can also specify an image where the value at the voxel specifies the isotropic search radius at that voxel.)r!   r   r   r   r   z-e %sexclusion_imagez)Specify a label for the exclusion region.)r   r    r   z0Specify an exclusion region for the given label.)r   z-x %szJIf a mask image is specified, fusion is only performed in the mask region.)r   r   r   z%szThe output label fusion image.)r   r'   r   r   z]Optional intensity fusion image file name format. (e.g. "antsJointFusionIntensity_%d.nii.gz")z"antsJointFusionPosterior_%d.nii.gzout_label_fusion out_intensity_fusion_name_formatz<Optional label posterior probability image file name format.)r    r   z%antsJointFusionVotingWeight_%d.nii.gzout_label_post_prob_name_formatz4Optional atlas voting weight image file name format.z-vzVerbose output.N) r(   r)   r*   r   r+   r,   r0   r   r   target_imageatlas_imager   r2   alphabetar8   Zretain_label_posterior_imagesZretain_atlas_voting_imagesZconstrain_nonnegativeZListIntpatch_radiusZpatch_metricsearch_radiusr4   exclusion_image_labelr   r.   r   r   r   #out_atlas_voting_weight_name_formatr   r?   r?   r?   r@   r     s   r   c               @   s@   e Zd ZeddZeeddZeeddZeeddZdS )JointFusionOutputSpecT)r   N)	r(   r)   r*   r   r   r   out_intensity_fusionout_label_post_probout_atlas_voting_weightr?   r?   r?   r@   r  q  s   
r  c                   s4   e Zd ZdZeZeZdZ fddZ	dd Z
  ZS )JointFusiona  
    An image fusion algorithm.

    Developed by Hongzhi Wang and Paul Yushkevich, and it won segmentation challenges
    at MICCAI 2012 and MICCAI 2013.
    The original label fusion framework was extended to accommodate intensities by Brian
    Avants.
    This implementation is based on Paul's original ITK-style implementation
    and Brian's ANTsR implementation.

    References include 1) H. Wang, J. W. Suh, S.
    Das, J. Pluta, C. Craige, P. Yushkevich, Multi-atlas segmentation with joint
    label fusion IEEE Trans. on Pattern Analysis and Machine Intelligence, 35(3),
    611-623, 2013. and 2) H. Wang and P. A. Yushkevich, Multi-atlas segmentation
    with joint label fusion and corrective learning--an open source implementation,
    Front. Neuroinform., 2013.

    Examples
    --------
    >>> from nipype.interfaces.ants import JointFusion
    >>> jf = JointFusion()
    >>> jf.inputs.out_label_fusion = 'ants_fusion_label_output.nii'
    >>> jf.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'] ]
    >>> jf.inputs.atlas_segmentation_image = ['segmentation0.nii.gz']
    >>> jf.inputs.target_image = ['im1.nii']
    >>> jf.cmdline
    "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz
    -b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii']"

    >>> jf.inputs.target_image = [ ['im1.nii', 'im2.nii'] ]
    >>> jf.cmdline
    "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz
    -b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii', 'im2.nii']"

    >>> jf.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'],
    ...                                        ['rc2s1.nii','rc2s2.nii'] ]
    >>> jf.inputs.atlas_segmentation_image = ['segmentation0.nii.gz',
    ...                                                    'segmentation1.nii.gz']
    >>> jf.cmdline
    "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii']
    -l segmentation0.nii.gz -l segmentation1.nii.gz -b 2.0 -o ants_fusion_label_output.nii
    -s 3x3x3 -t ['im1.nii', 'im2.nii']"

    >>> jf.inputs.dimension = 3
    >>> jf.inputs.alpha = 0.5
    >>> jf.inputs.beta = 1.0
    >>> jf.inputs.patch_radius = [3,2,1]
    >>> jf.inputs.search_radius = [3]
    >>> jf.cmdline
    "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii']
    -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -o ants_fusion_label_output.nii
    -p 3x2x1 -s 3 -t ['im1.nii', 'im2.nii']"

    >>> jf.inputs.search_radius = ['mask.nii']
    >>> jf.inputs.verbose = True
    >>> jf.inputs.exclusion_image = ['roi01.nii', 'roi02.nii']
    >>> jf.inputs.exclusion_image_label = ['1','2']
    >>> jf.cmdline
    "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii']
    -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii]
    -o ants_fusion_label_output.nii -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v"

    >>> jf.inputs.out_label_fusion = 'ants_fusion_label_output.nii'
    >>> jf.inputs.out_intensity_fusion_name_format = 'ants_joint_fusion_intensity_%d.nii.gz'
    >>> jf.inputs.out_label_post_prob_name_format = 'ants_joint_fusion_posterior_%d.nii.gz'
    >>> jf.inputs.out_atlas_voting_weight_name_format = 'ants_joint_fusion_voting_weight_%d.nii.gz'
    >>> jf.cmdline
    "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii']
    -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii]
    -o [ants_fusion_label_output.nii, ants_joint_fusion_intensity_%d.nii.gz,
    ants_joint_fusion_posterior_%d.nii.gz, ants_joint_fusion_voting_weight_%d.nii.gz]
    -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v"

    ZantsJointFusionc                s  |dkrRg }x:t t| jjD ]&}|jdj| jj| | jj|  qW dj|S |dkrjdj| j|S |dkrdj| j|S |dkr| jj	g}x4| jj
| jj| jjfD ]}t|r|j| qP qW t|d	krdjd
|d fS djdj|S |dkr"t| jj	sdj| jj
S dS |dkrDdjdd | jjD S |dkrfdjdd | jjD S |dkrt|t| jjkrtdjt|t| jjdjdd | jjD S tt| j|||S )Nr   z-e {0}[{1}]r   r   z-p {0}r   z-s {0}r   r   z-or   z-o [{}]z, r   z-o {0}r   r   c             S   s&   g | ]}d j djdd |D qS )z-g [{0}]z, c             s   s   | ]}d | V  qdS )z'%s'Nr?   )rF   fnr?   r?   r@   	<genexpr>  s    z5JointFusion._format_arg.<locals>.<listcomp>.<genexpr>)formatrZ   )rF   air?   r?   r@   rH     s   z+JointFusion._format_arg.<locals>.<listcomp>r   c             S   s&   g | ]}d j djdd |D qS )z-t [{0}]z, c             s   s   | ]}d | V  qdS )z'%s'Nr?   )rF   r  r?   r?   r@   r    s    z5JointFusion._format_arg.<locals>.<listcomp>.<genexpr>)r	  rZ   )rF   r
  r?   r?   r@   rH     s   r   z`Number of specified segmentations should be identical to the number of atlas image sets {0}!={1}c             S   s   g | ]}d j |qS )z-l {0})r	  )rF   r  r?   r?   r@   rH     s    )rW   rU   rR   r   r[   r	  r   rZ   r\   r   r   r   r  r   r   r   rV   r   r]   AntsJointFusionr^   )r_   r`   ra   rb   rc   iir   option)rd   r?   r@   r^     sX    






zJointFusion._format_argc             C   s   | j  j }t| jjr,tjj| jj|d< t| jjrXt	tjj| jjj
dd|d< t| jjrt	tjj| jjj
dd|d< t| jjrt	tjj| jjj
dd|d< |S )Nr   z%d*r  r  r  )rj   rk   r   rR   r   rK   rL   rl   r   r   r   r   r  )r_   rm   r?   r?   r@   rn     s     zJointFusion._list_outputs)r(   r)   r*   ro   r   rp   r  rq   rr   r^   rn   rs   r?   r?   )rd   r@   r  x  s   J=r  c               @   s0  e Zd ZejddddddZeddddd	Zejddd
dZ	ejddddZ
eddddZeddddZejdddddZejdddddZeddddZejdddddZejdddd dZejd!dd"d#dZejd$d%d&Zejddd'd(dZejd)dd*d+dZed,dd-gd.d/d0d1Zed-gdd2d3d0d4Zd5S )6KellyKapowskiInputSpecr   r	   z--image-dimensionality %dTzimage dimension (2 or 3))r   r   r   z--segmentation-image "%s"zrA segmentation image must be supplied labeling the gray and white matters. Default values = 2 and 3, respectively.)r   r   r   r   zDThe label value for the gray matter label in the segmentation_image.)r   r   zEThe label value for the white matter label in the segmentation_image.z$--gray-matter-probability-image "%s"zIn addition to the segmentation image, a gray matter probability image can be used. If no such image is supplied, one is created using the segmentation image and a variance of 1.0 mm.)r   r   r   z%--white-matter-probability-image "%s"zIn addition to the segmentation image, a white matter probability image can be used. If no such image is supplied, one is created using the segmentation image and a variance of 1.0 mm.z[50,0.001,10]z--convergence "%s"zConvergence is determined by fitting a line to the normalized energy profile of the last N iterations (where N is specified by the window size) and determining the slope which is then compared with the convergence threshold.
   z--thickness-prior-estimate %fzEProvides a prior constraint on the final thickness measurement in mm.)r   r   r   z--thickness-prior-image "%s"z=An image containing spatially varying prior thickness values.g?z--gradient-step %fz(Gradient step size for the optimization.g      ?z--smoothing-variance %fz;Defines the Gaussian smoothing of the hit and total images.g      ?z'--smoothing-velocity-field-parameter %fzDefines the Gaussian smoothing of the velocity field (default = 1.5). If the b-spline smoothing option is chosen, then this defines the isotropic mesh spacing for the smoothing spline (default = 15).z--use-bspline-smoothing 1z=Sets the option for B-spline smoothing of the velocity field.)r   r   z!--number-of-integration-points %dz;Number of compositions of the diffeomorphism per iteration.   z;--maximum-number-of-invert-displacement-field-iterations %dzIMaximum number of iterations for estimating the invertdisplacement field.z--output "%s"segmentation_imagez%s_cortical_thicknessz$Filename for the cortical thickness.F)r   ry   rw   rx   r   r'   z%s_warped_white_matterz*Filename for the warped white matter file.)rw   ry   rx   r   r'   N)r(   r)   r*   r   r+   r,   r   r  r1   gray_matter_labelwhite_matter_labelZgray_matter_prob_imageZwhite_matter_prob_imager4   Zconvergencer2   Zthickness_prior_estimateZthickness_prior_imageZgradient_stepZsmoothing_varianceZsmoothing_velocity_fieldr8   Zuse_bspline_smoothingZnumber_integration_pointsZ#max_invert_displacement_field_iterscortical_thicknesswarped_white_matterr?   r?   r?   r@   r  $  s   r  c               @   s    e Zd ZeddZeddZdS )KellyKapowskiOutputSpecz5A thickness map defined in the segmented gray matter.)r   zA warped white matter image.N)r(   r)   r*   r   r  r  r?   r?   r?   r@   r    s   r  c                   sV   e Zd ZdZd ZeZeZe	dddgdgZ
d fdd	Zd	d
 Z fddZ  ZS )KellyKapowskiaQ  
    Nipype Interface to ANTs' KellyKapowski, also known as DiReCT.

    DiReCT is a registration based estimate of cortical thickness. It was published
    in S. R. Das, B. B. Avants, M. Grossman, and J. C. Gee, Registration based
    cortical thickness measurement, Neuroimage 2009, 45:867--879.

    Examples
    --------
    >>> from nipype.interfaces.ants.segmentation import KellyKapowski
    >>> kk = KellyKapowski()
    >>> kk.inputs.dimension = 3
    >>> kk.inputs.segmentation_image = "segmentation0.nii.gz"
    >>> kk.inputs.convergence = "[45,0.0,10]"
    >>> kk.inputs.thickness_prior_estimate = 10
    >>> kk.cmdline
    'KellyKapowski --convergence "[45,0.0,10]"
    --output "[segmentation0_cortical_thickness.nii.gz,segmentation0_warped_white_matter.nii.gz]"
    --image-dimensionality 3 --gradient-step 0.025000
    --maximum-number-of-invert-displacement-field-iterations 20 --number-of-integration-points 10
    --segmentation-image "[segmentation0.nii.gz,2,3]" --smoothing-variance 1.000000
    --smoothing-velocity-field-parameter 1.500000 --thickness-prior-estimate 10.000000'

    a  @book{Das2009867,
  author={Sandhitsu R. Das and Brian B. Avants and Murray Grossman and James C. Gee},
  title={Registration based cortical thickness measurement.},
  journal={NeuroImage},
  volume={45},
  number={37},
  pages={867--879},
  year={2009},
  issn={1053-8119},
  url={http://www.sciencedirect.com/science/article/pii/S1053811908012780},
  doi={https://doi.org/10.1016/j.neuroimage.2008.12.016}
}z,The details on the implementation of DiReCT.implementation)entrydescriptiontagsNc                s,   |d krg }|dddg7 }t t| j|dS )Nr  r  r  )r   )r]   r  r   )r_   r   )rd   r?   r@   r     s    zKellyKapowski._parse_inputsc             C   sx   |dkr:| j j}t|s6t| j j\}}}|d | }|S |dkrt| j j}t|spt| j j\}}}|d | }|S d S )Nr  Z_cortical_thicknessr  Z_warped_white_matter)rR   r  r   r   r  r  )r_   re   rf   rg   rh   r?   r?   r@   ri     s    zKellyKapowski._gen_filenamec                sr   |dkr,dj | jj| jj| jj}|j| S |dkr^| jd}| jd}dj ||}|j| S tt| j	|||S )Nr  z[{0},{1},{2}]r  r  z[{},{}])
r	  rR   r  r  r  r   ri   r]   r  r^   )r_   r`   ra   rb   r   ctZwm)rd   r?   r@   r^     s    




zKellyKapowski._format_arg)N)r(   r)   r*   ro   rr   r  rp   r  rq   r   Z_referencesr   ri   r^   rs   r?   r?   )rd   r@   r    s   r  )0ro   rK   r   Zexternal.duer   Zutils.filemanipr   r   r   r   baser
   r   r   r   r   r   Zmixinsr   r   r   r   rA   rE   rt   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r  r  r  ZAntsJointFusionInputSpecZAntsJointFusionOutputSpecr  r  r  r?   r?   r?   r@   <module>   sR    8 98N|  V (>,  ( 