From ceac7c0f38878101876068a0cccc7bf945948db2 Mon Sep 17 00:00:00 2001 From: Wade Boohar Date: Wed, 20 Aug 2025 13:04:19 -0700 Subject: [PATCH 1/7] fix npex dependency --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 60d3338..7d8961a 100755 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ 'opencv-python-headless>=4.1.0.25', 'matplotlib>=2.1.0', 'foco-improc', - 'git+https://github.com/focolab/npex', + 'npex @ git+https://github.com/focolab/npex', 'imreg_dft', 'fastcluster', 'pyqt6', @@ -76,7 +76,7 @@ 'opencv-python-headless>=4.1.0.25', 'matplotlib>=2.1.0', 'foco-improc', - 'git+https://github.com/focolab/npex', + 'npex @ git+https://github.com/focolab/npex', 'imreg_dft', 'fastcluster', 'napari[all]', From 9b87142f13373b2dbaf4e0a27a6db71c7762c2df Mon Sep 17 00:00:00 2001 From: warbol Date: Tue, 2 Dec 2025 15:44:49 -0800 Subject: [PATCH 2/7] fix to allow compatibility w/ numpy 2.0, napari 0.5.0 --- eats_worm/Curator.py | 6 +++--- eats_worm/Extractor.py | 6 +++--- eats_worm/Threads.py | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/eats_worm/Curator.py b/eats_worm/Curator.py index 95a9f2e..8c38757 100644 --- a/eats_worm/Curator.py +++ b/eats_worm/Curator.py @@ -155,8 +155,8 @@ def restart(self): edge_width_is_relative=False point_symbol = 'disc' face_color = np.array([0,0,0,0]) - self.viewer.add_points(np.empty((0, 3)), symbol=point_symbol, face_color=face_color, edge_color='red', name='roi', size=point_size+1, scale=self.scale, edge_width=edge_width*1.25, edge_width_is_relative=edge_width_is_relative) - self.other_rois = self.viewer.add_points(np.empty((0, 3)), symbol=point_symbol, face_color=face_color, edge_color='green', name='other rois', size=point_size, scale=self.scale, edge_width=edge_width, edge_width_is_relative=edge_width_is_relative) + self.viewer.add_points(np.empty((0, 3)), symbol=point_symbol, face_color=face_color, border_color='red', name='roi', size=point_size+1, scale=self.scale, border_width=edge_width*1.25, border_width_is_relative=edge_width_is_relative) + self.other_rois = self.viewer.add_points(np.empty((0, 3)), symbol=point_symbol, face_color=face_color, border_color='green', name='other rois', size=point_size, scale=self.scale, border_width=edge_width, border_width_is_relative=edge_width_is_relative) if self.curator_layers: for layer in self.curator_layers.keys(): @@ -741,7 +741,7 @@ def add_roi(self, position, t): self.num_neurons += 1 print('Saving blob timeseries as numpy object...') self.e.timeseries = np.hstack((self.e.timeseries, np.empty((self.e.timeseries.shape[0], 1)))) - self.e.timeseries[:,-1] = np.NaN + self.e.timeseries[:,-1] = np.nan self.e.spool.export(f=os.path.join(self.e.output_dir, 'threads.obj')) self.e.save_timeseries() self.e.save_dataframe() diff --git a/eats_worm/Extractor.py b/eats_worm/Extractor.py index 3c7c2bd..8b291ff 100755 --- a/eats_worm/Extractor.py +++ b/eats_worm/Extractor.py @@ -67,7 +67,7 @@ def background_subtraction_quant_function(im, spool, t, frames, quant_radius=3, activity: list list of quantifications corresponding to the positions specified """ - intensities = [np.NaN] * len(spool.threads) + intensities = [np.nan] * len(spool.threads) positions = spool.get_positions_t(t, indices=threads_to_quantify) positions = np.rint(np.copy(positions)).astype(int) max_z = len(frames) # in case of max_x, max_y, max_z, we're using these as indices so don't subtract 1 because slicing is exclusive @@ -234,13 +234,13 @@ def quantify(mft=None, extractor=None, quant_function=background_subtraction_qua print(e) timeseries = np.empty((num_t,num_threads)) - timeseries[:] = np.NaN + timeseries[:] = np.nan quantified_voxels = {i: {} for i in range(num_threads)} quant_lock = Lock() processed_counter = [0] def quantify_in_parallel_thread(start, stop): thread_timeseries = np.empty((stop - start, num_threads)) - thread_timeseries[:] = np.NaN + thread_timeseries[:] = np.nan thread_quantified_voxels = None if save_quantification_voxels: thread_quantified_voxels = {i: {} for i in range(num_threads)} diff --git a/eats_worm/Threads.py b/eats_worm/Threads.py index 0607095..0615fdc 100755 --- a/eats_worm/Threads.py +++ b/eats_worm/Threads.py @@ -438,7 +438,7 @@ def __init__(self, position = [], t = 0, **kwargs): self.found = np.zeros((maxt)) #self.positions = [] self.t = [] - if position != []: + if list(position) != []: self.positions[t] = np.array(position) #self.t = t + 1 #self.positions.append(np.array(position)) From c4710e2140ded218e9e662d2afeb38b36fb0c0ef Mon Sep 17 00:00:00 2001 From: warbol Date: Thu, 11 Dec 2025 18:27:46 -0800 Subject: [PATCH 3/7] update dependencies for py3.13 --- pyproject.toml | 55 ++++++++++++++++++++++++++--- setup.py | 93 -------------------------------------------------- 2 files changed, 51 insertions(+), 97 deletions(-) delete mode 100755 setup.py diff --git a/pyproject.toml b/pyproject.toml index e3e06ac..23eed24 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,52 @@ [build-system] -build-backend = 'setuptools.build_meta' -requires = [ - 'setuptools', -] \ No newline at end of file +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "eats_worm" +version = "0.0.3" +requires-python = "<3.14" +description = "Method for extracting GCaMP signal from volumetric imaging recordings" +readme = "README.md" +authors = [ + {name = "UCSF FOCO Lab", email = "focolabdev@gmail.com"} +] +license = {text = "MIT License"} +classifiers = [ + "Programming Language :: Python :: 3", + "Framework :: napari", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", +] +dependencies = [ + "numpy", + "scipy", + "tifffile", + "opencv-python-headless", + "matplotlib", + "foco-improc", + "npex @ git+https://github.com/focolab/npex", + "imreg_dft", + "fastcluster", + "pyqtgraph", + "magicgui", + "pandas", + "scikit-image", + "xmltodict", + "pynwb", + "nwbinspector", + "dandi", + "remfile", + # MacOS (Darwin) specific dependencies + "pyqt6; sys_platform == 'darwin'", + "napari[pyqt6_experimental]; sys_platform == 'darwin'", + # Linux/Windows specific dependencies + "napari[all]; sys_platform != 'darwin'", +] + +[project.entry-points."napari.manifest"] +eats-worm = "eats_worm:napari.yaml" + +[tool.setuptools] +packages = ["eats_worm"] +include-package-data = true diff --git a/setup.py b/setup.py deleted file mode 100755 index 7d8961a..0000000 --- a/setup.py +++ /dev/null @@ -1,93 +0,0 @@ -import setuptools -import os, platform - -if platform.system() == 'Darwin': - setuptools.setup( - name="eats_worm", - version="0.0.3", - author="UCSF FOCO Lab", - author_email="focolabdev@gmail.com", - description="Method for extracting GCaMP signal from volumetric imaging recordings", - long_description_content_type=open('README.md').read(), - url="", - packages=setuptools.find_packages(), - include_package_data=True, - entry_points = { - 'napari.manifest': [ - 'eats-worm = eats_worm:napari.yaml' - ] - }, - classifiers=[ - "Programming Language :: Python :: 3", - "Framework :: napari", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", - ], - install_requires=[ - 'numpy>=1.22.4', - 'scipy>=1.0.0', - 'tifffile>=2022.5.4', - 'opencv-python-headless>=4.1.0.25', - 'matplotlib>=2.1.0', - 'foco-improc', - 'npex @ git+https://github.com/focolab/npex', - 'imreg_dft', - 'fastcluster', - 'pyqt6', - 'napari[pyqt6_experimental]', - 'pyqtgraph==0.13', - 'magicgui', - 'pandas>=1.4.2', - 'scikit-image', - 'xmltodict', - 'pynwb', - 'nwbinspector', - 'dandi' - ], - python_requires='<3.11' - ) - -else: - setuptools.setup( - name="eats_worm", - version="0.0.3", - author="UCSF FOCO Lab", - author_email="focolabdev@gmail.com", - description="Method for extracting GCaMP signal from volumetric imaging recordings", - long_description_content_type=open('README.md').read(), - url="", - packages=setuptools.find_packages(), - include_package_data=True, - entry_points = { - 'napari.manifest': [ - 'eats-worm = eats_worm:napari.yaml' - ] - }, - classifiers=[ - "Programming Language :: Python :: 3", - "Framework :: napari", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", - ], - install_requires=[ - 'numpy>=1.22.4', - 'scipy>=1.0.0', - 'tifffile>=2022.5.4', - 'opencv-python-headless>=4.1.0.25', - 'matplotlib>=2.1.0', - 'foco-improc', - 'npex @ git+https://github.com/focolab/npex', - 'imreg_dft', - 'fastcluster', - 'napari[all]', - 'pyqtgraph==0.13', - 'magicgui', - 'pandas>=1.4.2', - 'scikit-image', - 'xmltodict', - 'pynwb', - 'nwbinspector', - 'dandi' - ], - python_requires='<3.11' - ) From 6439ae9ed9ac1981b64e072f60d7793b328d0ceb Mon Sep 17 00:00:00 2001 From: warbol Date: Tue, 6 Jan 2026 11:41:38 -0800 Subject: [PATCH 4/7] add ndx-multichannel-volume dependency and package data manifest --- .gitignore | 3 ++- MANIFEST.in | 1 + pyproject.toml | 5 +++-- 3 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 MANIFEST.in diff --git a/.gitignore b/.gitignore index f6aa268..7e73cff 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ *egg-info .DS_Store *.toml -*__pycache__* \ No newline at end of file +*__pycache__* +build/ diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..9250ae7 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +include eats_worm/napari.yaml \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 23eed24..0532ce6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ requires = ["setuptools>=61.0"] build-backend = "setuptools.build_meta" [project] -name = "eats_worm" +name = "eats-worm" version = "0.0.3" requires-python = "<3.14" description = "Method for extracting GCaMP signal from volumetric imaging recordings" @@ -15,7 +15,7 @@ license = {text = "MIT License"} classifiers = [ "Programming Language :: Python :: 3", "Framework :: napari", - "License :: OSI Approved :: MIT License", + "License :: OSI zApproved :: MIT License", "Operating System :: OS Independent", ] dependencies = [ @@ -26,6 +26,7 @@ dependencies = [ "matplotlib", "foco-improc", "npex @ git+https://github.com/focolab/npex", + "ndx-multichannel-volume @ git+https://github.com/focolab/ndx-multichannel-volume", "imreg_dft", "fastcluster", "pyqtgraph", From db76405fb73be2411afc1b3ff6e8c8b8d8c4c54c Mon Sep 17 00:00:00 2001 From: warbol Date: Tue, 6 Jan 2026 15:55:22 -0800 Subject: [PATCH 5/7] improving backwards compatbility for 3.8/3.9 and updating readme --- README.md | 2 +- eats_worm/Curator.py | 9 +++++++-- pyproject.toml | 6 ++++-- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 3f09f87..a2a6b8c 100755 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ ### Installation Instructions -1. Setup your development environment. If you need help with this, there are many resources on Google. Most people in the lab use Conda as a package manager and develop in Conda virtual environments as well. +1. Setup your development environment. If you need help with this, there are many resources on Google. Most people in the lab use Conda as a package manager and develop in Conda virtual environments as well. Python versions 3.10-3.13 are recommended. 3.8-3.9 seem to work with minor graphical bugs in Napari but we cannot guarantee consistent results for these versions. 2. Start your virtual environment, and run the command ```bash diff --git a/eats_worm/Curator.py b/eats_worm/Curator.py index 8c38757..c5df2b2 100644 --- a/eats_worm/Curator.py +++ b/eats_worm/Curator.py @@ -155,8 +155,13 @@ def restart(self): edge_width_is_relative=False point_symbol = 'disc' face_color = np.array([0,0,0,0]) - self.viewer.add_points(np.empty((0, 3)), symbol=point_symbol, face_color=face_color, border_color='red', name='roi', size=point_size+1, scale=self.scale, border_width=edge_width*1.25, border_width_is_relative=edge_width_is_relative) - self.other_rois = self.viewer.add_points(np.empty((0, 3)), symbol=point_symbol, face_color=face_color, border_color='green', name='other rois', size=point_size, scale=self.scale, border_width=edge_width, border_width_is_relative=edge_width_is_relative) + try: + self.viewer.add_points(np.empty((0, 3)), symbol=point_symbol, face_color=face_color, border_color='red', name='roi', size=point_size+1, scale=self.scale, border_width=edge_width*1.25, border_width_is_relative=edge_width_is_relative) + self.other_rois = self.viewer.add_points(np.empty((0, 3)), symbol=point_symbol, face_color=face_color, border_color='green', name='other rois', size=point_size, scale=self.scale, border_width=edge_width, border_width_is_relative=edge_width_is_relative) + + except: + self.viewer.add_points(np.empty((0, 3)), symbol=point_symbol, face_color=face_color, edge_color='red', name='roi', size=point_size+1, scale=self.scale, edge_width=edge_width*1.25, edge_width_is_relative=edge_width_is_relative) + self.other_rois = self.viewer.add_points(np.empty((0, 3)), symbol=point_symbol, face_color=face_color, edge_color='green', name='other rois', size=point_size, scale=self.scale, edge_width=edge_width, edge_width_is_relative=edge_width_is_relative) if self.curator_layers: for layer in self.curator_layers.keys(): diff --git a/pyproject.toml b/pyproject.toml index 0532ce6..bd19b9f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,8 @@ dependencies = [ "npex @ git+https://github.com/focolab/npex", "ndx-multichannel-volume @ git+https://github.com/focolab/ndx-multichannel-volume", "imreg_dft", - "fastcluster", + "fastcluster==1.3.0; python_version >= '3.9'", + "fastcluster==1.2.6; python_version < '3.9'", "pyqtgraph", "magicgui", "pandas", @@ -42,7 +43,8 @@ dependencies = [ "pyqt6; sys_platform == 'darwin'", "napari[pyqt6_experimental]; sys_platform == 'darwin'", # Linux/Windows specific dependencies - "napari[all]; sys_platform != 'darwin'", + "napari[all]; sys_platform != 'darwin' and python_version >= '3.8'", + "napari; python_version < '3.8'" ] [project.entry-points."napari.manifest"] From 6a9cfe413db9f78a8b13becdc629fdca1720e0b6 Mon Sep 17 00:00:00 2001 From: warbol Date: Tue, 6 Jan 2026 16:03:32 -0800 Subject: [PATCH 6/7] typo --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index bd19b9f..d7c54e0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,7 @@ license = {text = "MIT License"} classifiers = [ "Programming Language :: Python :: 3", "Framework :: napari", - "License :: OSI zApproved :: MIT License", + "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ] dependencies = [ From 45c8d65767ee80a2fa08719c84ca357873223a74 Mon Sep 17 00:00:00 2001 From: warbol Date: Mon, 2 Feb 2026 14:38:53 -0800 Subject: [PATCH 7/7] refactor peak detection out of Extractor.py into its own class --- eats_worm/Detection.py | 331 +++++++++++++++++++++++++++++++++++++++++ eats_worm/Extractor.py | 268 ++++----------------------------- 2 files changed, 360 insertions(+), 239 deletions(-) create mode 100644 eats_worm/Detection.py diff --git a/eats_worm/Detection.py b/eats_worm/Detection.py new file mode 100644 index 0000000..cb24b58 --- /dev/null +++ b/eats_worm/Detection.py @@ -0,0 +1,331 @@ +import numpy as np +from improc.segfunctions import * +from improc.segfunctions import peak_local_max as ani_peak_local_max +from scipy.ndimage import rotate +from skimage.registration import phase_cross_correlation +from skimage.filters import rank +from skimage.feature import peak_local_max +import cv2 + +class PeaksResult: + def __init__(self, positions=None, delta_t = 1, offset = np.array([0,0,0])): + self.positions = positions + self.delta_t = delta_t + self.offset = offset + + +class Segmenter(object): + def __new__(cls, algorithm, *args, **kwargs): + tmip_options = ['tmip', 'tmip_2d_template', 'seeded_tmip', 'seeded_tmip_2d_template'] + fbf_options = ['skimage', 'threed', 'template', 'curated', 'seeded_skimage', 'findpeaks2d'] + + valid_options = tmip_options + fbf_options + + if cls is Segmenter: + if algorithm in tmip_options : return super(Segmenter, cls).__new__(TMIPSegmenter) + if algorithm in fbf_options : return super(Segmenter, cls).__new__(FrameByFrameSegmenter) + + raise ValueError(f"Unknown algorithm {algorithm}. Options are: {valid_options}") + else: + return super(Segmenter, cls).__new__(cls) + + def __init__(self, algorithm, im, algorithm_params): + self.algorithm = algorithm + self.im = im + self.algorithm_params = algorithm_params + + self.frames = self.im.frames + + ## peakfinding/spooling parameters + self.gaussian = algorithm_params.get('gaussian', (25,4,3,1)) + self.median = algorithm_params.get('median', 3) + self.quantile = algorithm_params.get('quantile', 0.99) + self.reg_peak_dist = algorithm_params.get('reg_peak_dist', 40) + self.peakfind_channel = algorithm_params.get('peakfind_channel',0) + self.register = algorithm_params.get('register', False) + self.curator_layers = algorithm_params.get('curator_layers', False) + try: + self.algorithm_params['templates_made'] = type(self.algorithm_params['template']) != bool + except: + self.algorithm_params['templates_made'] = False + + # self.start_t and self.end_t are time index cutoffs for partial analysis + self.start_t = algorithm_params['start_t'] + self.end_t = algorithm_params['end_t'] + + + def process_step(self): + """ + Subclasses must implement this method. + """ + raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement 'process_step'") + + +class TMIPSegmenter(Segmenter): + def __init__(self, algorithm, im, algorithm_params): + super().__init__(algorithm, im, algorithm_params) + self.window_size = self.algorithm_params.get('window_size', 10) + self.ims = [] + self.offsets = [] + self.last_offset = None + self.last_im = None + self.templated = '2d_template' in self.algorithm + self.seeded = 'seeded' in self.algorithm + if self.templated and self.curator_layers: + self.curator_layers = {'filtered': {'type': 'image', 'data' : []}} + + if all(isinstance(dimension, int) for dimension in self.im.anisotropy): + expanded_shape = tuple([dim_len * ani for dim_len, ani in zip(self.im.get_t(0).shape, self.im.anisotropy)]) + self.mask = np.zeros(expanded_shape, dtype=np.uint16) + self.mask[tuple([np.s_[::ani] for ani in self.im.anisotropy])] = 1 + + + + def process_step(self, t): + im1 = self.im.get_t(t, channel = self.peakfind_channel) + im1 = medFilter2d(im1, self.median) + + if self.seeded and t == self.start_t: + peaks = np.array(self.algorithm_params['peaks']) + self.last_offset = np.array([0,0,0]) + self.last_im = np.copy(im1) + return PeaksResult(peaks) + + else: + if self.register and t != self.start_t: + _off = phase_cross_correlation(self.last_im, im1, upsample_factor=100)[0][1:] + _off = np.insert(_off, 0, 0) + # track offset relative to t=0 so that all tmips are spatially aligned + _off += self.last_offset + self.offsets.append(_off) + shift = tuple(np.rint(_off).astype(int)) + self.last_im = np.copy(im1) + im1 = shift3d(im1, shift) + else: + self.last_im = np.copy(im1) + self.offsets.append(np.array([0, 0, 0])) + + self.last_offset = self.offsets[-1] + self.ims.append(np.copy(im1)) + + if len(self.ims) == self.window_size or t == self.end_t - 1: + tmip = np.max(np.array(self.ims), axis=0) + + if self.templated: + vol_tmip = np.max(np.array(self.ims), axis=0).astype(np.float32) + + for z in range(vol_tmip.shape[0]): + im_filtered = vol_tmip[z, :, :] + fb_threshold_margin = self.algorithm_params.get('fb_threshold_margin', 20) + threshold = np.median(im_filtered) + fb_threshold_margin + im_filtered = (im_filtered > threshold) * im_filtered + + gaussian_template_filter = render_gaussian_2d(self.algorithm_params.get('gaussian_diameter', 13), self.algorithm_params.get('gaussian_sigma', 9)) + im_filtered = cv2.matchTemplate(im_filtered, gaussian_template_filter, cv2.TM_CCOEFF) + pad = [int((x-1)/2) for x in gaussian_template_filter.shape] + im_filtered = np.pad(im_filtered, tuple(zip(pad, pad))) + + im_filtered = (im_filtered > threshold) * im_filtered + vol_tmip[z, :, :] = im_filtered + + if self.curator_layers: + vol_tmip_mask_as_list = (vol_tmip > 0).tolist() + for timepoint in range(len(self.ims)): + self.curator_layers['filtered']['data'].append(vol_tmip_mask_as_list) + + # reset local var + tmip = vol_tmip + + # get peaks, min distance is in 3 dimensions + if all(isinstance(dimension, int) for dimension in self.im.anisotropy): + expanded_im = np.repeat(tmip, self.im.anisotropy[0], axis=0) + expanded_im = np.repeat(expanded_im, self.im.anisotropy[1], axis=1) + expanded_im = np.repeat(expanded_im, self.im.anisotropy[2], axis=2) + expanded_im *= self.mask + peaks = peak_local_max(expanded_im, min_distance=self.algorithm_params.get('min_distance', 9), exclude_border = self.algorithm_params.get('exclude_border', True)).astype(float) + peaks /= self.im.anisotropy + else: + peaks = ani_peak_local_max(tmip, min_distance=self.algorithm_params.get('min_distance', 9), exclude_border=self.algorithm_params.get('exclude_border', True), anisotropy=self.im.anisotropy).astype(float) + else: + tmip = np.max(np.array(self.ims), axis=0) + expanded_im = np.repeat(tmip, self.im.anisotropy[0], axis=0) + expanded_im = np.repeat(expanded_im, self.im.anisotropy[1], axis=1) + expanded_im = np.repeat(expanded_im, self.im.anisotropy[2], axis=2) + expanded_im *= self.mask + peaks = peak_local_max(expanded_im, min_distance=self.algorithm_params.get('min_distance', 9), num_peaks=self.algorithm_params.get('num_peaks', 50)).astype(float) + peaks /= self.im.anisotropy + + delta_t = len(self.ims) + self.ims = [] + self.offsets = [] + return PeaksResult(peaks - self.last_offset, delta_t=delta_t) + + else: + # if not made TMIP, return empty result + return PeaksResult() + + + + + + + + +class FrameByFrameSegmenter(Segmenter): + def __init__(self, algorithm, im, algorithm_params): + super().__init__(algorithm, im, algorithm_params) + if algorithm in ['skimage', 'template']: + if all(isinstance(dimension, int) for dimension in self.im.anisotropy): + expanded_shape = tuple([dim_len * ani for dim_len, ani in zip(self.im.get_t(0).shape, self.im.anisotropy)]) + self.mask = np.zeros(expanded_shape, dtype=np.uint16) + self.mask[tuple([np.s_[::ani] for ani in self.im.anisotropy])] = 1 + + def process_step(self, t): + im1 = self.im.get_t(t, channel = self.peakfind_channel) + im1 = medFilter2d(im1, self.median) + if self.gaussian: + im1 = gaussian3d(im1, self.gaussian) + + im1 = np.array(im1 * np.array(im1 > np.quantile(im1, self.quantile))) + + + if all(isinstance(dimension, int) for dimension in self.im.anisotropy): + expanded_shape = tuple([dim_len * ani for dim_len, ani in zip(self.im.get_t(0).shape, self.im.anisotropy)]) + mask = np.zeros(expanded_shape, dtype=np.uint16) + mask[tuple([np.s_[::ani] for ani in self.im.anisotropy])] = 1 + + if self.algorithm == 'skimage': + expanded_im = np.repeat(im1, self.im.anisotropy[0], axis=0) + expanded_im = np.repeat(expanded_im, self.im.anisotropy[1], axis=1) + expanded_im = np.repeat(expanded_im, self.im.anisotropy[2], axis=2) + expanded_im *= self.mask + peaks = peak_local_max(expanded_im, min_distance=self.algorithm_params.get('min_distance', 9), num_peaks=self.algorithm_params.get('num_peaks', 50)) + peaks //= self.im.anisotropy + min_filter_size = self.algorithm_params.get('min_filter', False) + if min_filter_size: + min_filtered = rank.minimum(im1.astype(bool), np.ones((1, min_filter_size, min_filter_size))) + peaks_mask = np.zeros(im1.shape, dtype=bool) + peaks_mask[tuple(peaks.T)] = True + peaks = np.array(np.nonzero(min_filtered * peaks_mask)).T + + elif self.algorithm == 'threed': + peaks = findpeaks3d(im1) + peaks = reg_peaks(im1, peaks, thresh=self.reg_peak_dist) + + elif self.algorithm == 'template': + if not self.algorithm_params['templates_made']: + expanded_im = np.repeat(im1, self.im.anisotropy[0], axis=0) + expanded_im = np.repeat(expanded_im, self.im.anisotropy[1], axis=1) + expanded_im = np.repeat(expanded_im, self.im.anisotropy[2], axis=2) + expanded_im *= self.mask + try: + peaks = np.rint(self.algorithm_params["template_peaks"]).astype(int) + except: + peaks = peak_local_max(expanded_im, min_distance=self.algorithm_params.get('min_distance', 9), num_peaks=self.algorithm_params.get('num_peaks', 50)) + peaks //= self.im.anisotropy + chunks = get_bounded_chunks(data=im1, peaks=peaks, pad=[1, 25, 25]) + chunk_shapes = [chunk.shape for chunk in chunks] + max_chunk_shape = (max([chunk_shape[0] for chunk_shape in chunk_shapes]), max([chunk_shape[1] for chunk_shape in chunk_shapes]), max([chunk_shape[2] for chunk_shape in chunk_shapes])) + self.templates = [np.mean(np.array([chunk for chunk in chunks if chunk.shape == max_chunk_shape]), axis=0)] + quantiles = self.algorithm_params.get('quantiles', [0.5]) + rotations = self.algorithm_params.get('rotations', [0]) + for quantile in quantiles: + for rotation in rotations: + try: + self.templates.append(rotate(np.quantile(chunks, quantile, axis=0), rotation, axes=(-1, -2))) + except: + pass + print("Total number of computed templates: ", len(self.templates)) + self.algorithm_params['templates_made'] = True + peaks=None + for template in self.templates: + template_peaks = peak_filter_2(data=im1, params={'template': template, 'threshold': 0.5}) + if peaks is None: + peaks = template_peaks + else: + peaks = np.concatenate((peaks, template_peaks)) + peak_mask = np.zeros(im1.shape, dtype=bool) + peak_mask[tuple(peaks.T)] = True + peak_masked = im1 * peak_mask + expanded_im = np.repeat(peak_masked, self.im.anisotropy[0], axis=0) + expanded_im = np.repeat(expanded_im, self.im.anisotropy[1], axis=1) + expanded_im = np.repeat(expanded_im, self.im.anisotropy[2], axis=2) + expanded_im *= self.mask + peaks = peak_local_max(expanded_im, min_distance=13) + peaks //= self.im.anisotropy + + elif self.algorithm == 'curated': + if t == 0: + peaks = np.array(self.algorithm_params['peaks']) + self.last_peaks = peaks + else: + peaks = self.last_peaks + + elif algorithm == 'seeded_skimage': + if t == 0: + peaks = np.array(self.algorithm_params['peaks']) + else: + expanded_im = np.repeat(im1, self.im.anisotropy[0], axis=0) + expanded_im = np.repeat(expanded_im, self.im.anisotropy[1], axis=1) + expanded_im = np.repeat(expanded_im, self.im.anisotropy[2], axis=2) + expanded_im *= self.mask + peaks = peak_local_max(expanded_im, min_distance=self.algorithm_params.get('min_distance', 9), num_peaks=self.algorithm_params.get('num_peaks', 50)) + peaks //= self.im.anisotropy + + if self.register and t != self.start_t: + _off = phase_cross_correlation(self.last_im1, im1, upsample_factor=100)[0][1:] + _off = np.insert(_off, 0,0) + if self.algorithm == 'curated': + self.last_peaks -= _off + return PeaksResult(peaks, offset=_off) + + self.last_im1 = np.copy(im1) + return PeaksResult(peaks) + + + + + + + + + + +def shift3d(im, shift): + if np.max(np.abs(shift)) > 0: + axis = tuple(np.arange(im.ndim)) + im = np.roll(im, shift, axis) + if shift[0] >= 0: + im[:shift[0], :, :] = 0 + else: + im[shift[0]:, :, :] = 0 + if shift[1] >= 0: + im[:, :shift[1], :] = 0 + else: + im[:, shift[1]:, :] = 0 + if shift[2] >= 0: + im[:, :, :shift[2]] = 0 + else: + im[:, :, shift[2]:] = 0 + return im + + +def render_gaussian_2d(blob_diameter, sigma): + """ + :param im_width: + :param sigma: + :return: + """ + + gaussian = np.zeros((blob_diameter, blob_diameter), dtype=np.float32) + + # gaussian filter + for i in range(int(-(blob_diameter - 1) / 2), int((blob_diameter + 1) / 2)): + for j in range(int(-(blob_diameter - 1) / 2), int((blob_diameter + 1) / 2)): + x0 = int((blob_diameter) / 2) # center + y0 = int((blob_diameter) / 2) # center + x = i + x0 # row + y = j + y0 # col + gaussian[y, x] = np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / 2 / sigma / sigma) + + return gaussian \ No newline at end of file diff --git a/eats_worm/Extractor.py b/eats_worm/Extractor.py index 8b291ff..442ebd6 100755 --- a/eats_worm/Extractor.py +++ b/eats_worm/Extractor.py @@ -5,6 +5,7 @@ import scipy.spatial import copy from .Threads import * +from .Detection import * import pickle import os from .multifiletiff import * @@ -366,25 +367,7 @@ def find_class(self, module, name): return e -def render_gaussian_2d(blob_diameter, sigma): - """ - :param im_width: - :param sigma: - :return: - """ - - gaussian = np.zeros((blob_diameter, blob_diameter), dtype=np.float32) - - # gaussian filter - for i in range(int(-(blob_diameter - 1) / 2), int((blob_diameter + 1) / 2)): - for j in range(int(-(blob_diameter - 1) / 2), int((blob_diameter + 1) / 2)): - x0 = int((blob_diameter) / 2) # center - y0 = int((blob_diameter) / 2) # center - x = i + x0 # row - y = j + y0 # col - gaussian[y, x] = np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / 2 / sigma / sigma) - return gaussian class Extractor: @@ -649,18 +632,25 @@ def __init__(self, mft=None, params=None): self.frames = self.im.frames ## peakfinding/spooling parameters - self.gaussian = params.get('gaussian', (25,4,3,1)) - self.median = params.get('median', 3) - self.quantile = params.get('quantile', 0.99) - self.reg_peak_dist = params.get('reg_peak_dist', 40) - self.peakfind_channel = params.get('peakfind_channel',0) self.blob_merge_dist_thresh = params.get('blob_merge_dist_thresh', 6) self.remove_blobs_dist = params.get('remove_blobs_dist', 20) self.suppress_output = params.get('suppress_output', False) - self.register = params.get('register_frames', False) + self.predict = params.get('predict', True) self.algorithm = params.get('algorithm', 'template') self.algorithm_params = params.get('algorithm_params', {}) + + self.algorithm_params['gaussian'] = params.get('gaussian', (25,4,3,1)) + self.algorithm_params['median'] = params.get('median', 3) + self.algorithm_params['quantile'] = params.get('quantile', 0.99) + self.algorithm_params['reg_peak_dist'] = params.get('reg_peak_dist', 40) + self.algorithm_params['peakfind_channel'] = params.get('peakfind_channel', 0) + self.algorithm_params['register'] = params.get('register_frames', False) + + self.algorithm_params['start_t'] = params['start_t'] + self.algorithm_params['end_t'] = params['end_t'] + + self.curator_layers = params.get('curator_layers', False) try: self.algorithm_params['templates_made'] = type(self.algorithm_params['template']) != bool @@ -682,223 +672,23 @@ def calc_blob_threads(self): """ self.spool = Spool(self.blob_merge_dist_thresh, max_t=self.end_t - self.start_t, predict=self.predict) - # handle an ugly workaround for peak_local_max not supporting anisotropy. this stuff is only needed when - # using skimage or template matching, but putting it here allows us to avoid redoing the matmuls every iteration - if all(isinstance(dimension, int) for dimension in self.im.anisotropy): - expanded_shape = tuple([dim_len * ani for dim_len, ani in zip(self.im.get_t(0).shape, self.im.anisotropy)]) - mask = np.zeros(expanded_shape, dtype=np.uint16) - mask[tuple([np.s_[::ani] for ani in self.im.anisotropy])] = 1 - - if 'tmip' in self.algorithm: - window_size = self.algorithm_params.get('window_size', 10) - ims = [] - offsets = [] - last_offset = None - last_im = None - if self.algorithm.endswith('2d_template') and self.curator_layers: - self.curator_layers = {'filtered': {'type': 'image', 'data': []}} - for i in range(self.start_t, self.end_t): - im1 = self.im.get_t(i, channel = self.peakfind_channel) - - im1 = medFilter2d(im1, self.median) - if self.gaussian: - im1 = gaussian3d(im1, self.gaussian) - - if self.algorithm.startswith('seeded') and i==self.start_t: - peaks = np.array(self.algorithm_params['peaks']) - self.spool.reel(peaks,self.im.anisotropy) - if 'labels' in self.algorithm_params: - for thread, label in zip(self.spool.threads, self.algorithm_params['labels']): - thread.label = label - last_offset = np.array([0, 0, 0]) - last_im = im1 - - else: - if self.register and i!=self.start_t: - _off = phase_cross_correlation(last_im, im1, upsample_factor=100)[0][1:] - _off = np.insert(_off, 0,0) - # track offset relative to t=0 so that all tmips are spatially aligned - _off += last_offset - offsets.append(_off) - shift = tuple(np.rint(_off).astype(int)) - axis = tuple(np.arange(im1.ndim)) - last_im = np.copy(im1) - if np.max(shift) > 0: - im1 = np.roll(im1, shift, axis) - if shift[0] >= 0: - im1[:shift[0], :, :] = 0 - else: - im1[shift[0]:, :, :] = 0 - if shift[1] >= 0: - im1[:, :shift[1], :] = 0 - else: - im1[:, shift[1]:, :] = 0 - if shift[2] >- 0: - im1[:, :, :shift[2]] = 0 - else: - im1[:, :, shift[2]:] = 0 - else: - last_im = np.copy(im1) - offsets.append(np.array([0, 0, 0])) - last_offset = offsets[-1] - ims.append(np.copy(im1)) - - if len(ims) == window_size or i == self.end_t - 1: - tmip = np.max(np.array(ims), axis=0) - - if self.algorithm.endswith('2d_template'): - vol_tmip = np.max(np.array(ims), axis=0).astype(np.float32) - - for z in range(vol_tmip.shape[0]): - im_filtered = vol_tmip[z,:,:] - fb_threshold_margin = self.algorithm_params.get('fb_threshold_margin', 20) - threshold = np.median(im_filtered) + fb_threshold_margin - im_filtered = (im_filtered > threshold) * im_filtered - - gaussian_template_filter = render_gaussian_2d(self.algorithm_params.get('gaussian_diameter', 13), self.algorithm_params.get('gaussian_sigma', 9)) - im_filtered = cv2.matchTemplate(im_filtered, gaussian_template_filter, cv2.TM_CCOEFF) - pad = [int((x-1)/2) for x in gaussian_template_filter.shape] - im_filtered = np.pad(im_filtered, tuple(zip(pad, pad))) - - im_filtered = (im_filtered > threshold) * im_filtered - vol_tmip[z,:,:] = im_filtered - - if self.curator_layers: - vol_tmip_mask_as_list = (vol_tmip > 0).tolist() - for timepoint in range(len(ims)): - self.curator_layers['filtered']['data'].append(vol_tmip_mask_as_list) - - # reset local var - tmip = vol_tmip - - # get peaks, min distance is in 3 dimensions - if all(isinstance(dimension, int) for dimension in self.im.anisotropy): - expanded_im = np.repeat(tmip, self.im.anisotropy[0], axis=0) - expanded_im = np.repeat(expanded_im, self.im.anisotropy[1], axis=1) - expanded_im = np.repeat(expanded_im, self.im.anisotropy[2], axis=2) - expanded_im *= mask - peaks = peak_local_max(expanded_im, min_distance=self.algorithm_params.get('min_distance', 9), exclude_border=self.algorithm_params.get('exclude_border', True)).astype(float) - peaks /= self.im.anisotropy - else: - peaks = ani_peak_local_max(tmip, min_distance=self.algorithm_params.get('min_distance', 9), exclude_border=self.algorithm_params.get('exclude_border', True), anisotropy=self.im.anisotropy).astype(float) - else: - tmip = np.max(np.array(ims), axis=0) - expanded_im = np.repeat(tmip, self.im.anisotropy[0], axis=0) - expanded_im = np.repeat(expanded_im, self.im.anisotropy[1], axis=1) - expanded_im = np.repeat(expanded_im, self.im.anisotropy[2], axis=2) - expanded_im *= mask - peaks = peak_local_max(expanded_im, min_distance=self.algorithm_params.get('min_distance', 9), num_peaks=self.algorithm_params.get('num_peaks', 50)).astype(float) - peaks /= self.im.anisotropy - - self.spool.reel(peaks - last_offset, self.im.anisotropy, delta_t=len(ims)) - ims = [] - offsets = [] - - if not self.suppress_output: - print('\r' + 'Frames Processed: ' + str(i+1-self.start_t)+'/'+str(self.end_t - self.start_t), sep='', end='', flush=True) + segmenter = Segmenter(algorithm=self.algorithm, im=self.im, algorithm_params=self.algorithm_params) + for t in range(self.start_t, self.end_t): + result = segmenter.process_step(t) + peaks, delta_t, offset = result.positions, result.delta_t, result.offset + if peaks is not None: + self.spool.reel(peaks, self.im.anisotropy, delta_t=delta_t, offset=offset) + + if self.algorithm.startswith('seeded'): + if 'labels' in self.algorithm_params: + for thread, label in zip(self.spool.threads, self.algorithm_params['labels']): + thread.label = label - else: - for i in range(self.start_t, self.end_t): - im1 = self.im.get_t(i, channel = self.peakfind_channel) - im1 = medFilter2d(im1, self.median) - if self.gaussian: - im1 = gaussian3d(im1, self.gaussian) - im1 = np.array(im1 * np.array(im1 > np.quantile(im1, self.quantile))) - if self.algorithm == 'skimage': - expanded_im = np.repeat(im1, self.im.anisotropy[0], axis=0) - expanded_im = np.repeat(expanded_im, self.im.anisotropy[1], axis=1) - expanded_im = np.repeat(expanded_im, self.im.anisotropy[2], axis=2) - expanded_im *= mask - peaks = peak_local_max(expanded_im, min_distance=self.algorithm_params.get('min_distance', 9), num_peaks=self.algorithm_params.get('num_peaks', 50)) - peaks //= self.im.anisotropy - min_filter_size = self.algorithm_params.get('min_filter', False) - if min_filter_size: - min_filtered = rank.minimum(im1.astype(bool), np.ones((1, min_filter_size, min_filter_size))) - peaks_mask = np.zeros(im1.shape, dtype=bool) - peaks_mask[tuple(peaks.T)] = True - peaks = np.array(np.nonzero(min_filtered * peaks_mask)).T - elif self.algorithm == 'threed': - peaks = findpeaks3d(im1) - peaks = reg_peaks(im1, peaks,thresh=self.reg_peak_dist) - elif self.algorithm == 'template': - if not self.algorithm_params['templates_made']: - expanded_im = np.repeat(im1, self.im.anisotropy[0], axis=0) - expanded_im = np.repeat(expanded_im, self.im.anisotropy[1], axis=1) - expanded_im = np.repeat(expanded_im, self.im.anisotropy[2], axis=2) - expanded_im *= mask - try: - peaks = np.rint(self.algorithm_params["template_peaks"]).astype(int) - except: - peaks = peak_local_max(expanded_im, min_distance=self.algorithm_params.get('min_distance', 9), num_peaks=self.algorithm_params.get('num_peaks', 50)) - peaks //= self.im.anisotropy - chunks = get_bounded_chunks(data=im1, peaks=peaks, pad=[1, 25, 25]) - chunk_shapes = [chunk.shape for chunk in chunks] - max_chunk_shape = (max([chunk_shape[0] for chunk_shape in chunk_shapes]), max([chunk_shape[1] for chunk_shape in chunk_shapes]), max([chunk_shape[2] for chunk_shape in chunk_shapes])) - self.templates = [np.mean(np.array([chunk for chunk in chunks if chunk.shape == max_chunk_shape]), axis=0)] - quantiles = self.algorithm_params.get('quantiles', [0.5]) - rotations = self.algorithm_params.get('rotations', [0]) - for quantile in quantiles: - for rotation in rotations: - try: - self.templates.append(rotate(np.quantile(chunks, quantile, axis=0), rotation, axes=(-1, -2))) - except: - pass - print("Total number of computed templates: ", len(self.templates)) - self.algorithm_params['templates_made'] = True - peaks = None - for template in self.templates: - template_peaks = peak_filter_2(data=im1, params={'template': template, 'threshold': 0.5}) - if peaks is None: - peaks = template_peaks - else: - peaks = np.concatenate((peaks, template_peaks)) - peak_mask = np.zeros(im1.shape, dtype=bool) - peak_mask[tuple(peaks.T)] = True - peak_masked = im1 * peak_mask - expanded_im = np.repeat(peak_masked, self.im.anisotropy[0], axis=0) - expanded_im = np.repeat(expanded_im, self.im.anisotropy[1], axis=1) - expanded_im = np.repeat(expanded_im, self.im.anisotropy[2], axis=2) - expanded_im *= mask - peaks = peak_local_max(expanded_im, min_distance=13) - peaks //= self.im.anisotropy - elif self.algorithm == 'curated': - if 0 == i: - peaks = np.array(self.algorithm_params['peaks']) - self.last_peaks = peaks - else: - peaks = self.last_peaks - elif self.algorithm == 'seeded_skimage': - if 0 == i: - peaks = np.array(self.algorithm_params['peaks']) - else: - expanded_im = np.repeat(im1, self.im.anisotropy[0], axis=0) - expanded_im = np.repeat(expanded_im, self.im.anisotropy[1], axis=1) - expanded_im = np.repeat(expanded_im, self.im.anisotropy[2], axis=2) - expanded_im *= mask - peaks = peak_local_max(expanded_im, min_distance=self.algorithm_params.get('min_distance', 9), num_peaks=self.algorithm_params.get('num_peaks', 50)) - peaks //= self.im.anisotropy - else: - peaks = findpeaks2d(im1) - peaks = reg_peaks(im1, peaks,thresh=self.reg_peak_dist) - - if self.register and i!=self.start_t: - _off = phase_cross_correlation(self.last_im1, im1, upsample_factor=100)[0][1:] - _off = np.insert(_off, 0,0) - if self.algorithm == 'curated': - self.last_peaks -= _off - self.spool.reel(peaks,self.im.anisotropy, offset=_off) - else: - self.spool.reel(peaks,self.im.anisotropy) - if self.algorithm.startswith('seeded'): - if 'labels' in self.algorithm_params: - for thread, label in zip(self.spool.threads, self.algorithm_params['labels']): - thread.label = label - - self.last_im1 = np.copy(im1) - - if not self.suppress_output: - print('\r' + 'Frames Processed: ' + str(i+1-self.start_t)+'/'+str(self.end_t - self.start_t), sep='', end='', flush=True) + if not self.suppress_output: + print('\r' + 'Frames Processed: ' + str(t+1-self.start_t)+'/'+str(self.end_t - self.start_t), sep='', end='', flush=True) + + print('\nInfilling...') self.spool.infill()