diff --git a/bullseye_workflow/__init__.py b/bullseye_workflow/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..b5505772ab5b5c48b2cf09dcb5e337c92530a3b3
--- /dev/null
+++ b/bullseye_workflow/__init__.py
@@ -0,0 +1 @@
+__author__ = 'sanromag'
diff --git a/bullseye_workflow/bullseye_pipeline.py b/bullseye_workflow/bullseye_pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..b37ecb1ec4d4e36f2d4508846398ea925bbd3097
--- /dev/null
+++ b/bullseye_workflow/bullseye_pipeline.py
@@ -0,0 +1,157 @@
+from __future__ import division
+
+import nipype.pipeline.engine as pe
+from nipype import SelectFiles
+import nipype.interfaces.utility as util
+
+from nipype import IdentityInterface, DataSink
+
+# from .utils import *
+from utils import *
+
+import os
+
+def create_bullseye_pipeline(name='bullseye'):
+
+
+    bullwf = pe.Workflow(name=name)
+
+    inputnode = pe.Node(interface=IdentityInterface(fields=['subject_id', 'scans_dir', 'outputdir']), name='inputnode')
+
+
+    # outputnode
+    outputnode = pe.Node(IdentityInterface(fields=[
+        'out_bullseye_wmparc', 'subject' ]),        name='outputnode')
+
+
+    #template for input files
+    template = {"ASEG": "{subject_id}/mri/aseg*gz",
+                "ANNOT_LH": "{subject_id}/label/lh.aparc.annot",
+                "ANNOT_RH": "{subject_id}/label/rh.aparc.annot",
+                "WHITE_LH": "{subject_id}/surf/lh.white",
+                "WHITE_RH": "{subject_id}/surf/rh.white",
+                "PIAL_LH": "{subject_id}/surf/lh.pial",
+                "PIAL_RH": "{subject_id}/surf/rh.pial",
+                "subject_id": "{subject_id}"}
+
+    fileselector = pe.Node(SelectFiles(template), name='fileselect')
+
+    # lobar parcellation
+    annot2label_lh = pe.Node(interface=Annot2Label(), name='annot2label_lh')
+    annot2label_lh.inputs.hemi = 'lh'
+    annot2label_lh.inputs.lobes = 'lobes'
+
+    annot2label_rh = pe.Node(interface=Annot2Label(), name='annot2label_rh')
+    annot2label_rh.inputs.hemi = 'rh'
+    annot2label_rh.inputs.lobes = 'lobes'
+
+    # aparc2aseg to map lobes into white matter volume
+    aparc2aseg = pe.Node(interface=Aparc2Aseg(), name='aparc2aseg')
+    aparc2aseg.inputs.annot = 'lobes'
+    aparc2aseg.inputs.labelwm = True
+    aparc2aseg.dmax = 1000
+    aparc2aseg.inputs.rip = True
+    aparc2aseg.inputs.hypo = True
+    aparc2aseg.inputs.out_file = 'lobes+aseg.nii.gz'
+
+    # group some lobes and discard others
+    filter_lobes = pe.Node(interface=util.Function(input_names=['in_file', 'include_superlist', 'fixed_id', 'map_pairs_list'], output_names=['out_file'],
+                                                   function=filter_labels), name='filter_lobes')
+    # Here we include insula (3007, 4007) with frontal (3001, 4001)
+    # We exclude the structure in the superior part spanning from anterior to posterior (3003, 4003)
+    filter_lobes.inputs.include_superlist = [[3001, 3007], [4001, 4007], [3004], [4004], [3005], [4005], [3006], [4006]]  # lobar labels in WM
+    filter_lobes.inputs.fixed_id = None
+    # we give some informative label-ids
+    filter_lobes.inputs.map_pairs_list = [[3001, 11], [4001, 21], [3004, 12], [4004, 22], [3005, 13], [4005, 23], [3006, 14], [4006, 24]]
+
+    # create ventricles and cortex masks
+    ventricles_mask = pe.Node(interface=util.Function(input_names=['in_file', 'include_superlist', 'fixed_id', 'map_pairs_list'], output_names=['out_file'],
+                                                function=filter_labels), name='ventricles_mask')
+    ventricles_mask.inputs.include_superlist = [[43, 4]]
+    ventricles_mask.inputs.fixed_id = [1]
+    ventricles_mask.inputs.map_pairs_list = None
+
+    cortex_mask = pe.Node(interface=util.Function(input_names=['in_file', 'include_superlist', 'fixed_id', 'map_pairs_list'], output_names=['out_file'],
+                                                  function=filter_labels), name='cortex_mask')
+    cortex_mask.inputs.include_superlist = [[1001, 2001, 1004, 2004, 1005, 2005, 1006, 2006]]  # lobar labels in cortex
+    cortex_mask.inputs.fixed_id = [1]
+    cortex_mask.inputs.map_pairs_list = None
+
+    # create mask with basal ganglia + thalamus
+    bgt_mask = pe.Node(interface=util.Function(input_names=['in_file', 'include_superlist', 'fixed_id', 'map_pairs_list'], output_names=['out_file'],
+                                               function=filter_labels), name='bgt_mask')
+    bgt_mask.inputs.include_superlist = [[10, 49, 11, 12, 50, 51, 26, 58, 13, 52]]  # basal ganglia + thalamus
+    bgt_mask.inputs.fixed_id = [5]
+    bgt_mask.inputs.map_pairs_list = None
+
+    # create normalized distance map
+    ndist_map = pe.Node(interface=util.Function(input_names=['orig_file', 'dest_file'], output_names=['out_file'],
+                                                function=norm_dist_map), name='ndist_map')
+
+    # generate WM parcellations by filling the discarded lobes (3003, 4003) and unsegmented white matter (5001, 5002)
+    gen_wmparc = pe.Node(interface=util.Function(input_names=['incl_file', 'ndist_file', 'label_file', 'incl_labels', 'verbose'], output_names=['out_file'],
+                                                 function=generate_wmparc), name='gen_wmparc')
+    gen_wmparc.inputs.incl_labels = [3003, 4003, 5001, 5002] # the labels that need to be 'filled'
+    gen_wmparc.inputs.verbose = False
+
+    # include bgt into wmparc to create the final lobar wmparc
+    lobe_wmparc = pe.Node(interface=util.Function(input_names=['in1_file', 'in2_file', 'out_file', 'intersect'], output_names=['out_file'],
+                                                  function=merge_labels), name='lobe_wmparc')
+    lobe_wmparc.inputs.intersect = False
+    lobe_wmparc.inputs.out_file = 'lobes_wmparc.nii.gz'
+
+    # create depth shells using normalized distance maps
+    depth_wmparc = pe.Node(interface=util.Function(input_names=['ndist_file', 'n_shells', 'out_file', 'mask_file'], output_names=['out_file'],
+                                                   function=create_shells), name='depth_wmparc')
+    depth_wmparc.inputs.n_shells = 4
+    depth_wmparc.inputs.out_file = 'shells_wmparc.nii.gz'
+
+    # final bullseye parcellation by intersecting depth and lobar parcellations
+    bullseye_wmparc = pe.Node(interface=util.Function(input_names=['in1_file', 'in2_file', 'out_file', 'intersect'], output_names=['out_file'],
+                                                      function=merge_labels), name='bullseye_wmparc')
+    bullseye_wmparc.inputs.intersect = True
+    bullseye_wmparc.inputs.out_file = 'bullseye_wmparc.nii.gz'
+
+
+    ##### CONNECTIONS #####
+
+    bullwf.connect(inputnode        , 'subject_id',      fileselector,'subject_id')
+    bullwf.connect(inputnode        , 'scans_dir',        fileselector,  'base_directory')
+
+    bullwf.connect(fileselector     , 'subject_id',       annot2label_lh, 'subject')
+    bullwf.connect(fileselector     , 'ANNOT_LH',         annot2label_lh, 'in_annot')
+    bullwf.connect(fileselector     , 'subject_id',       annot2label_rh, 'subject')
+    bullwf.connect(fileselector     , 'ANNOT_LH',         annot2label_rh, 'in_annot')
+
+    bullwf.connect(annot2label_rh     , 'out_annot_file',         aparc2aseg, 'in_lobes_rh')
+    bullwf.connect(annot2label_lh     , 'out_annot_file',         aparc2aseg, 'in_lobes_lh')
+    bullwf.connect(fileselector     , 'subject_id',         aparc2aseg, 'subject')
+    bullwf.connect(aparc2aseg     , 'out_file',         filter_lobes, 'in_file')
+
+    bullwf.connect(aparc2aseg     , 'out_file',         ventricles_mask, 'in_file')
+    bullwf.connect(aparc2aseg     , 'out_file',         cortex_mask, 'in_file')
+    bullwf.connect(aparc2aseg     , 'out_file',         bgt_mask, 'in_file')
+
+    bullwf.connect(ventricles_mask     , 'out_file',         ndist_map, 'orig_file')
+    bullwf.connect(cortex_mask     , 'out_file',         ndist_map, 'dest_file')
+
+    bullwf.connect(aparc2aseg     , 'out_file',         gen_wmparc, 'incl_file')
+    bullwf.connect(ndist_map     , 'out_file',         gen_wmparc, 'ndist_file')
+    bullwf.connect(filter_lobes     , 'out_file',         gen_wmparc, 'label_file')
+
+    bullwf.connect(gen_wmparc     , 'out_file',         lobe_wmparc, 'in1_file')
+    bullwf.connect(bgt_mask     , 'out_file',         lobe_wmparc, 'in2_file')
+
+    bullwf.connect(ndist_map     , 'out_file',         depth_wmparc, 'ndist_file')
+    bullwf.connect(lobe_wmparc     , 'out_file',         depth_wmparc, 'mask_file')
+
+    bullwf.connect(lobe_wmparc     , 'out_file',         bullseye_wmparc, 'in1_file')
+    bullwf.connect(depth_wmparc     , 'out_file',         bullseye_wmparc, 'in2_file')
+
+
+    bullwf.connect(bullseye_wmparc, 'out_file', outputnode, 'out_bullseye_wmparc')
+
+
+
+    return(bullwf)
+
diff --git a/bullseye_workflow/configoptions.py b/bullseye_workflow/configoptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..b22917037a3af9f73ccf5e346e9efa45490bfee9
--- /dev/null
+++ b/bullseye_workflow/configoptions.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Apr 18 15:18:10 2017
+
+@author: shahidm
+"""
+
+from os.path import realpath, join, abspath, dirname
+
+
+# defaults
+SCRIPT_PATH = dirname(realpath(__file__))
+
diff --git a/bullseye_workflow/create_flairreg_pipeline.py b/bullseye_workflow/create_flairreg_pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..847f1e83cf33e8a29ed553cf09f93d3cdf32db57
--- /dev/null
+++ b/bullseye_workflow/create_flairreg_pipeline.py
@@ -0,0 +1,50 @@
+from __future__ import division
+
+import nipype.pipeline.engine as pe
+from nipype import SelectFiles
+import nipype.interfaces.utility as util
+from nipype.interfaces.freesurfer import ApplyVolTransform, BBRegister
+from nipype.interfaces.fsl import ImageStats
+
+from nipype import IdentityInterface, DataSink
+
+# from .utils import *
+from utils import *
+
+import os
+
+def create_flairreg_pipeline():
+
+    flairreg = pe.Workflow(name='flairreg_pipeline')
+   
+    inputnode = pe.Node(interface=IdentityInterface(fields=['subject_id', 'FLAIR', 'LESION']), name='inputnode')
+    
+    bbreg = pe.Node(BBRegister(contrast_type='t2',
+                               out_fsl_file='flair2anat.mat',
+                               out_reg_file='flair2anat.dat',
+                               registered_file='flair2anat_bbreg.nii.gz',
+                               ),
+                               name='bbregister')
+
+    # apply transform to lesionmap
+    applyreg = pe.Node(ApplyVolTransform(), name="applyreg")
+    applyreg.inputs.fs_target=True
+
+
+    # outputnode
+    outputnode = pe.Node(IdentityInterface(fields=[
+        'lesion2anat', "flair2anat" ]),
+        name='outputnode')
+
+
+    ##### CONNECTIONS #####
+
+    
+    flairreg.connect(inputnode     , 'FLAIR',         bbreg, 'source_file')
+    flairreg.connect(inputnode     , 'subject_id',         bbreg, 'subject_id')
+    flairreg.connect(bbreg     , 'out_reg_file',         applyreg, 'reg_file')
+    flairreg.connect(inputnode     , 'LESION',         applyreg, 'source_file')
+    flairreg.connect(applyreg     , 'transformed_file',         outputnode,  'lesion2anat') 
+    flairreg.connect(bbreg     , 'registered_file',         outputnode,  'flair2anat') 
+
+    return(flairreg)
diff --git a/bullseye_workflow/run_bullseye_WMH_segmentation.py b/bullseye_workflow/run_bullseye_WMH_segmentation.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e08c5ec7095ff112f37e61717616927897a3048
--- /dev/null
+++ b/bullseye_workflow/run_bullseye_WMH_segmentation.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+"""
+running bullseye segmentation for a list of subjects
+prerequisites:
+	- freesurfer
+	- flair/t2 and wmh probability maps
+"""
+
+from nipype import Node, Workflow, Function
+from nipype.interfaces import fsl
+from nipype.interfaces.utility import IdentityInterface
+import nipype.interfaces.freesurfer as fs
+import nipype.interfaces.utility as util
+import nipype.interfaces.io as nio    
+from bullseye_pipeline import create_bullseye_pipeline
+from create_flairreg_pipeline import create_flairreg_pipeline
+from utils import extract_parcellation
+import numpy as np 
+import nibabel as nb
+import matplotlib
+import matplotlib.pyplot as plt
+import pandas as pd
+import os
+
+
+
+def create_bullseye_lesion(subjectlist):
+    """
+    a workflow to extract wmh in bullseye segmented wm
+    """
+
+    # specify the location of the preprocessed data    
+    working_dir="/data/pt_life_whm/Data/wd/" # where intermediate files will be saved 
+    freesurfer_dir="/data/pt_life_freesurfer/freesurfer_all" # where freesurfer output directories are located
+    flairdir="/data/pt_life_whm/Data/LST/" # where FLAIR/T2 images and wmh probability maps are located (assumed they are in the same directory)
+    outdir="/data/pt_life_whm/Data/WMparcellations_indiv/" # where outputs will be saved
+
+    os.environ['SUBJECTS_DIR'] = freesurfer_dir
+
+
+    # create node which returns subjects as iterables	
+    identitynode = Node(util.IdentityInterface(fields=['subject']),
+                    name='identitynode')
+    identitynode.iterables = ('subject', subjectlist)
+    
+
+
+    # select flair/t2 and wmh images
+    template_lesion = {"LESION":"sub-{subject_id}/ples_lpa_mFLAIR_bl.nii.gz", #exact file name of wmh probability map
+                      "FLAIR":"sub-{subject_id}/mFLAIR_bl.nii.gz"} #exact name of FLAIR/T2 image used for registration
+    fileselector_lesion = pe.Node(SelectFiles(template_lesion), name='fileselect_lesion')
+    fileselector_lesion.inputs.base_dir=base_directory
+    fileselector_lesion.subjects_dir=freesurfer_dir
+
+
+    # main workflow
+    bullseye_lesion = Workflow(name="bullseyelesion_bbreg")
+    bullseye_lesion.base_dir=working_dir
+    
+    # bullseye wm segmentation part    
+    bullseye=create_bullseye_pipeline()
+    bullseye.inputs.inputnode.scans_dir=freesurfer_dir
+
+    # wmh registration to freesurfer
+    lesionreg=create_flairreg_pipeline()
+    lesionreg.inputs.inputnode.freesurfer_dir=freesurfer_dir
+    lesionreg.inputs.inputnode.flair_dir=flairdir
+
+    # extract wmh volumes from be segmentation 
+    extractparc=Node(interface=util.Function(input_names=['in1_file', 'in2_file', 'subject_id', 'option'], output_names=['out_file'],
+                                               function=extract_parcellation), name='extractparc')
+    extractparc.inputs.option="new"                                           
+    
+    # generate datasink
+    datasink=Node(name="datasink", interface=nio.DataSink())
+    datasink.inputs.base_directory = outdir
+    datasink.inputs.substitutions = [('_subject_', '')]
+
+    # connect all nodes
+    bullseye_lesion.connect([
+        (identitynode, bullseye, [("subject", "inputnode.subject_id")]),
+        (identitynode, fileselector_lesion,[('subject', 'subject_id')]),
+        (fileselector_lesion, lesionreg,[( 'FLAIR', 'inputnode.FLAIR')]),
+        (fileselector_lesion, lesionreg,[( 'LESION', 'inputnode.LESION')]),
+        (bullseye, extractparc, [( 'outputnode.out_bullseye_wmparc', 'in2_file')]),
+        (lesionreg, extractparc, [('outputnode.lesion2anat', 'in1_file')]),
+        (bullseye, datasink,[( 'outputnode.out_bullseye_wmparc', '@bullseye')]),
+        (lesionreg, datasink,[( 'outputnode.lesion2anat', '@lesion2anat')]),
+        (lesionreg, datasink,[( 'outputnode.flair2anat', '@flair2anat')]),
+        (identitynode, extractparc,[( 'subject', 'subject_id')]),
+        (extractparc, datasink,[( 'out_file', '@lesionparc')]),
+    ])
+   
+    
+    
+    return bullseye_lesion
+
+
+# select participants
+df=pd.read_csv('/data/participants.csv', sep=',')
+
+df=df[df["dementia"]==0]
+subj=df['pseudonym'].values
+
+# run workflow in multi-thread mode
+bullseye_lesion=create_bullseye_lesion(subj)
+bullseye_lesion.write_graph(graph2use='colored', simple_form=True)
+bullseye_lesion.run(plugin='MultiProc', plugin_args={'n_procs' : 16})    #
+
+
+
+
diff --git a/bullseye_workflow/utils.py b/bullseye_workflow/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..8088435a8dfcbef3d737fc34ab81912ebef44355
--- /dev/null
+++ b/bullseye_workflow/utils.py
@@ -0,0 +1,391 @@
+from nipype.interfaces.base import (
+    traits,
+    TraitedSpec,
+    CommandLineInputSpec,
+    CommandLine,
+    File
+)
+
+import os
+
+def filter_labels(in_file, include_superlist, fixed_id=None, map_pairs_list=None):
+    """filters-out labels not in the include-superset. Merges labels within superset. Transforms label-ids according to mappings (or fixed id)"""
+    import nibabel as nib
+    import numpy as np
+    import os
+
+    # read label file and create output
+    in_nib = nib.load(in_file)
+    in0 = in_nib.get_data()
+    out0 = np.zeros(in0.shape, dtype=in0.dtype)
+
+    # for each group of labels in subset assign them the same id (either 1st in subset or fixed-id, in case given)
+    for labels_list in include_superlist:
+        for label in labels_list:
+            value = labels_list[0]
+            if fixed_id is not None: value = fixed_id[0]
+            out0[in0 == label] = value
+
+    # transform label-ids in case mapping is specified
+    if map_pairs_list is not None:
+        out1 = np.copy(out0)
+        for map_pair in map_pairs_list:
+            out1[out0 == map_pair[0]] = map_pair[1]
+
+    # save output
+    out_final = out0 if not map_pairs_list else out1
+    out_nib = nib.Nifti1Image(out_final, in_nib.affine, in_nib.header)
+    nib.save(out_nib, 'filtered.nii.gz')
+
+    return os.path.abspath('filtered.nii.gz')
+
+
+def norm_dist_map(orig_file, dest_file):
+    """compute normalized distance map given an origin and destination masks, resp."""
+    import os
+    import nibabel as nib
+    import numpy as np
+    from scipy.ndimage.morphology import distance_transform_edt
+
+    orig_nib = nib.load(orig_file)
+    dest_nib = nib.load(dest_file)
+
+    orig = orig_nib.get_data()
+    dest = dest_nib.get_data()
+
+    dist_orig = distance_transform_edt(np.logical_not(orig.astype(np.bool)))
+    dist_dest = distance_transform_edt(np.logical_not(dest.astype(np.bool)))
+
+    # normalized distance (0 in origin to 1 in dest)
+    ndist = dist_orig / (dist_orig + dist_dest)
+
+    ndist_nib = nib.Nifti1Image(ndist.astype(np.float32), orig_nib.affine)
+    nib.save(ndist_nib, 'ndist.nii.gz')
+
+    return os.path.abspath('ndist.nii.gz')
+
+def create_shells(ndist_file, n_shells=4, out_file = 'shells.nii.gz', mask_file=None):
+    """creates specified number of shells given normalized distance map. When mask is given, output in mask == 0 is set to zero"""
+    import os
+    import nibabel as nib
+    import numpy as np
+
+    ndist_nib = nib.load(ndist_file)
+    ndist = ndist_nib.get_data()
+
+    # if mask is provided, use it to mask-out regions outside it
+    if mask_file is not None:
+        mask_nib = nib.load(mask_file)
+        assert mask_nib.header.get_data_shape() == ndist_nib.header.get_data_shape(), "Different shapes of images"
+        mask = mask_nib.get_data() > 0
+
+    out = np.zeros(ndist.shape, dtype=np.int8)
+
+    limits = np.linspace(0., 1., n_shells+1)
+    for i in np.arange(n_shells)+1:
+        # compute shell and assing increasing label-id
+        mask2 = np.logical_and(ndist >= limits[i-1], ndist < limits[i])
+        if mask_file is not None:  # maskout regions outside mask
+            mask2 = np.logical_and(mask2, mask)
+        out[mask2] = i
+    out[np.isclose(ndist, 0.)] = 0  # need to assign zero to ventricles because of >= above
+
+    aux_hdr = ndist_nib.header
+    aux_hdr.set_data_dtype(np.int8)
+
+    out_nib = nib.Nifti1Image(out, ndist_nib.affine, aux_hdr)
+    nib.save(out_nib, out_file)
+
+    return os.path.abspath(out_file)
+
+
+def merge_labels(in1_file, in2_file, out_file='merged.nii.gz', intersect=False):
+    """merges labels from two input labelmaps, optionally computing intersection"""
+    import os
+    import nibabel as nib
+    import numpy as np
+
+    in1_nib = nib.load(in1_file) #lobe
+    in2_nib = nib.load(in2_file) #depth
+
+    assert in1_nib.header.get_data_shape() == in2_nib.header.get_data_shape(), "Different shapes of images"
+
+    in1 = in1_nib.get_data()
+    in2 = in2_nib.get_data()
+
+    out = None
+
+    # if not intersection, simply include labels from 'in2' into 'in1'
+    if not intersect:
+
+        out = np.zeros(in1.shape, dtype=np.int8)
+
+        out[:] = in1[:]
+        mask = in2 > 0
+        out[mask] = in2[mask]  # overwrite in1 where in2 > 0
+
+
+        aux_hdr = in1_nib.header
+        aux_hdr.set_data_dtype(np.int8)
+
+    # if intersection, create new label-set as cartesian product of the two sets
+    else:
+
+        out = np.zeros(in1.shape, dtype=np.int32)
+
+        u1_set = np.unique(in1.ravel())
+        u2_set = np.unique(in2.ravel())
+
+        for u1 in u1_set:
+            if u1 == 0: continue
+            mask1 = in1 == u1
+            for u2 in u2_set:
+                if u2 == 0: continue
+                mask2 = in2 == u2
+                mask3 = np.logical_and(mask1, mask2)
+                if not np.any(mask3): continue
+                out[mask3] = int(str(u1) + str(u2))  # new label id by concatenating [u1, u2]
+
+        aux_hdr = in1_nib.header
+        aux_hdr.set_data_dtype(np.int32)
+
+    out_nib = nib.Nifti1Image(out, in1_nib.affine, aux_hdr)
+    nib.save(out_nib, out_file)
+
+    return os.path.abspath(out_file)
+
+
+def generate_wmparc(incl_file, ndist_file, label_file, incl_labels=None, verbose=False):
+    """generates wmparc by propagating labels in 'label_file' down the gradient defined by distance map in 'ndist_file'.
+    Labels are only propagated in regions where 'incl_file' > 0 (or 'incl_file' == incl_labels[i], if 'incl_labels is provided).
+    """
+    import os
+    import nibabel as nib
+    import numpy as np
+    from scipy.ndimage.morphology import binary_dilation, generate_binary_structure, iterate_structure
+
+    connectivity = generate_binary_structure(3, 2)
+
+    # read images
+    incl_nib = nib.load(incl_file)
+    ndist_nib = nib.load(ndist_file)
+    label_nib = nib.load(label_file)
+
+    assert incl_nib.header.get_data_shape() == ndist_nib.header.get_data_shape() and \
+           incl_nib.header.get_data_shape() == label_nib.header.get_data_shape(), "Different shapes of mask, ndist and label images"
+
+    # create inclusion mask
+    incl_mask = None
+    incl_aux = incl_nib.get_data()
+    if incl_labels is None:
+        incl_mask = incl_aux > 0
+    else:
+        incl_mask = np.zeros(incl_nib.header.get_data_shape(), dtype=np.bool)
+        for lab in incl_labels:
+            incl_mask[incl_aux == lab] = True
+
+    # get rest of numpy arrays
+    ndist = ndist_nib.get_data()
+    label = label_nib.get_data()
+
+    # get DONE and processing masks
+    DONE_mask = label > 0  # this is for using freesurfer wmparc
+    proc_mask = np.logical_and(np.logical_and(ndist > 0., ndist < 1.), incl_mask)
+
+    # setup the ouptut vol
+    out = np.zeros(label.shape, dtype=label.dtype)
+
+    # initialize labels in cortex
+    out[DONE_mask] = label[DONE_mask]  # this is for using freesurfer wmparc
+
+    # start with connectivity 1
+    its_conn = 1
+
+    # main loop
+    while not np.all(DONE_mask[proc_mask]):
+
+        if verbose:
+            print('%0.1f done' % (100. * float(DONE_mask[proc_mask].sum()) / float(proc_mask.sum())))
+
+        # loop to increase connectivity for non-reachable TO-DO points
+        while True:
+
+            # dilate the SOLVED area
+            aux = binary_dilation(DONE_mask, iterate_structure(connectivity, its_conn))
+            # next TO-DO: close to DONE, in the processing mask and not yet done
+            TODO_mask = np.logical_and(np.logical_and(aux, proc_mask), np.logical_not(DONE_mask))
+
+            if TODO_mask.sum() > 0:
+                break
+
+            if verbose:
+                print('Non-reachable points. Increasing connectivity')
+
+            its_conn += 1
+
+        # sort TO-DO points by ndist
+        Idx_TODO = np.argwhere(TODO_mask)
+        Idx_ravel = np.ravel_multi_index(Idx_TODO.T, label.shape)
+        I_sort = np.argsort(ndist.ravel()[Idx_ravel])
+
+        # iterate along TO-DO points
+        for idx in Idx_TODO[I_sort[::-1]]:
+
+            max_dist = -1.
+
+            # process each neighbor
+            for off in np.argwhere(iterate_structure(connectivity, its_conn)) - its_conn:
+
+                try:
+
+                    # if it is not DONE then skip
+                    if not DONE_mask[idx[0] + off[0], idx[1] + off[1], idx[2] + off[2]]:
+                        continue
+
+                    # if it is the largest distance (ie, largest gradient)
+                    cur_dist = ndist[idx[0] + off[0], idx[1] + off[1], idx[2] + off[2]]
+                    if cur_dist > max_dist:
+                        out[idx[0], idx[1], idx[2]] = out[idx[0] + off[0], idx[1] + off[1], idx[2] + off[2]]
+                        max_dist = cur_dist
+
+                except:
+                    print('something wrong with neighbor at: (%d, %d, %d)' % (
+                    idx[0] + off[0], idx[1] + off[1], idx[2] + off[2]))
+                    pass
+
+            if max_dist < 0.: print("something went wrong with point: (%d, %d, %d)" % (idx[0], idx[1], idx[2]))
+
+            # mark as solved and remove from visited
+            DONE_mask[idx[0], idx[1], idx[2]] = True
+
+    # # remove labels from cortex (old aparc version)
+    # out[dest_mask] = 0
+
+    print('Writing output labelmap')
+    out_nib = nib.Nifti1Image(out, label_nib.affine, label_nib.header)
+    nib.save(out_nib, 'wmparc.nii.gz')
+
+    return os.path.abspath('wmparc.nii.gz')
+
+
+class Annot2LabelInputSpec(CommandLineInputSpec):
+    subject = traits.String(desc='subject id', argstr='--subject %s', position=0, mandatory=True)
+    hemi = traits.Enum("rh", "lh", desc="hemisphere [rh | lh]", position=1, argstr="--hemi %s", mandatory=True)
+    lobes = traits.Enum("lobes", desc='lobes type', argstr='--lobesStrict %s', position=2)
+    in_annot = traits.File(desc='input annotation file', exists=True)
+
+class Annot2LabelOutputSpec(TraitedSpec):
+    out_annot_file = File(desc = "lobes annotation file", exists = True)
+
+class Annot2Label(CommandLine):
+    """wrapper for FreeSurfer command-line tool 'mri_annotation2label'"""
+    input_spec = Annot2LabelInputSpec
+    output_spec = Annot2LabelOutputSpec
+    _cmd = os.path.join(os.environ['FREESURFER_HOME'], 'bin', 'mri_annotation2label')
+
+    def _list_outputs(self):
+            outputs = self.output_spec().get()
+            outputs['out_annot_file'] = os.path.join(os.path.dirname(self.inputs.in_annot), self.inputs.hemi + ".lobes.annot")
+            return outputs
+
+    def _format_arg(self, name, spec, value):
+        if(name=='subject'):
+             # take only the last part of the subject path
+             return spec.argstr % ( os.path.basename(os.path.normpath(self.inputs.subject)))
+
+        return super(Annot2Label, self)._format_arg(name, spec, value)
+
+
+class Aparc2AsegInputSpec(CommandLineInputSpec):
+    subject = traits.String(desc='subject id', argstr='--s %s', position=0, mandatory=True)
+    annot = traits.String(desc='name of annot file', argstr='--annot %s', position=1, mandatory=True)
+    labelwm = traits.Bool(desc='percolate white matter', argstr='--labelwm', position=2)
+    dmax = traits.Int(desc='depth to percolate', argstr='--wmparc-dmax %d', position=3)
+    rip = traits.Bool(desc='rip unknown label', argstr='--rip-unknown', position=4)
+    hypo = traits.Bool(desc='hypointensities as wm', argstr='--hypo-as-wm', position=5)
+    out_file = traits.File(desc='output aseg file', argstr='--o %s', position=6)
+    in_lobes_rh = traits.File(desc='input lobar file RH', exists=True)
+    in_lobes_lh = traits.File(desc='input lobar file LH', exists=True)
+
+class Aparc2AsegOutputSpec(TraitedSpec):
+    out_file = File(desc = "lobes aseg file", exists = True)
+
+class Aparc2Aseg(CommandLine):
+    """wrapper for FreeSurfer command-line tool 'mri_aparc2aseg'"""
+    input_spec = Aparc2AsegInputSpec
+    output_spec = Aparc2AsegOutputSpec
+
+    _cmd = os.path.join(os.environ['FREESURFER_HOME'], 'bin', 'mri_aparc2aseg')
+
+    def _list_outputs(self):
+            outputs = self.output_spec().get()
+            outputs['out_file'] = os.path.abspath(self.inputs.out_file)
+            return outputs
+
+    def _format_arg(self, name, spec, value):
+        if(name=='subject'):
+             # take only the last part of the subject path
+             return spec.argstr % ( os.path.basename(os.path.normpath(self.inputs.subject)))
+
+        return super(Aparc2Aseg, self)._format_arg(name, spec, value)
+
+def make_list(in1,in2):
+    return([in1,in2])
+    
+    
+def extract_parcellation(in1_file, in2_file, option="sum"):
+    """extracts WMH volumes from Bullseye parcellation:
+    
+    Parameters
+    ----------
+    in1_file : str
+        The file location of the coregistered WMH probability image
+    in2_file: str
+    	The file location of the Bullseye segmentation with labels 51:244
+    option: str
+    	The way the WMH volume attributed to Bullseye regions is calculated:
+    		thr refers to adding up all voxels with >0.1 equally in the bins representing Bullseye regions
+    		sum refers to no thresholding but instead of summing up probabilities within bins representing Bullseye regions
+
+    Returns
+    -------
+    out_file
+        path to the output file
+    """    
+    import os
+    import nibabel as nib
+    import numpy as np
+    
+
+
+    in1_nib = nib.load(in1_file) #WML
+    in2_nib = nib.load(in2_file) #bullseye parcellation
+
+    assert in1_nib.header.get_data_shape() == in2_nib.header.get_data_shape(), "Different shapes of images"
+
+    in1 = in1_nib.get_fdata()
+    in2 = in2_nib.get_fdata()
+    #in1_nib.get_data_dtype()
+
+
+    if option=="thr":
+    	out_file='res_thr.txt'
+    	in1m=in1>0.1 
+    	vals=in2[in1m]
+    	vals=vals.astype(int)
+
+    	counts=np.bincount(vals, minlength=245)[np.array([0,51,52,53,54,111,112,113,114,121,122,123,124,131,132,133,134,141,142,143,144,
+    	                         211,212,213,214,221,222,223,224,231,232,233,234,241,242,243,244])]
+    	#option minlength=245 to make sure all values are included even if participants have no lesions here
+    elif option=="sum":
+    	out_file='res_sum.txt'
+    	counts=[]
+	for i in np.nditer(np.array([51,52,53,54,111,112,113,114,121,122,123,124,131,132,133,134,141,142,143,144,211,212,213,214,221,222,223,224,231,232,233,234,241,242,243,244])): 
+        	maskedWML=in1[in2==i]
+        	counts.append(sum(maskedWML))
+    else: 
+    	raise Exception("No acceptable extraction method given") 
+
+    np.savetxt(out_file, counts, header=subject_id)
+
+    return os.path.abspath(out_file)