From fce986265b4647ab310dc0cb7860b385e5eba3a1 Mon Sep 17 00:00:00 2001 From: shashikg Date: Sun, 26 Jan 2020 17:44:54 +0530 Subject: [PATCH 1/9] add visual search model for object array --- .gitignore | 4 + model_tools/brain_transformation/search.py | 177 +++++++++++++++++++++ 2 files changed, 181 insertions(+) create mode 100755 .gitignore create mode 100755 model_tools/brain_transformation/search.py diff --git a/.gitignore b/.gitignore new file mode 100755 index 0000000..85cc9fd --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +__pycache__/* +.ipynb_checkpoints +build/* +dist/* diff --git a/model_tools/brain_transformation/search.py b/model_tools/brain_transformation/search.py new file mode 100755 index 0000000..29af5a1 --- /dev/null +++ b/model_tools/brain_transformation/search.py @@ -0,0 +1,177 @@ +import os +from collections import OrderedDict + +from brainio_base.assemblies import BehavioralAssembly +from brainscore.model_interface import BrainModel +from candidate_models.base_models import BaseModelPool +from candidate_models.model_commitments.vs_layer import visual_search_layer + +import cv2 +import numpy as np +from tqdm import tqdm + +class VisualSearchObjArray(BrainModel): + def __init__(self, identifier, target_layer, stimulus_layer): + self.current_task = None + self.eye_res = 224 + self.arr_size = 6 + self.data_len = 300 + self.identifier = identifier + + self.fix = [[640, 512], + [365, 988], + [90, 512], + [365, 36], + [915, 36], + [1190, 512], + [915, 988]] + + target_model_pool = BaseModelPool(input_size=28) + stimulus_model_pool = BaseModelPool(input_size=224) + self.target_model = target_model_pool[identifier] + self.stimuli_model = stimulus_model_pool[identifier] + + if target_layer==None: + self.target_layer = visual_search_layer[identifier][0] + self.stimuli_layer = visual_search_layer[identifier][0] + else: + self.target_layer = target_layer + self.stimuli_layer = stimulus_layer + + + def start_task(self, task: BrainModel.Task): + self.current_task = task + print(task, "started") + + def look_at(self, stimuli_set): + self.gt_array = [] + gt = stimuli_set[stimuli_set['image_label'] == 'mask'] + gt_paths = list(gt.image_paths.values())[int(gt.index.values[0]):int(gt.index.values[-1]+1)] + + for i in range(6): + imagename_gt = gt_paths[i] + + gt = cv2.imread(imagename_gt, 0) + gt = cv2.resize(gt, (self.eye_res, self.eye_res), interpolation = cv2.INTER_AREA) + retval, gt = cv2.threshold(gt, 125, 255, cv2.THRESH_BINARY) + temp_stim = np.uint8(np.zeros((3*self.eye_res, 3*self.eye_res))) + temp_stim[self.eye_res:2*self.eye_res, self.eye_res:2*self.eye_res] = np.copy(gt) + gt = np.copy(temp_stim) + gt = gt/255 + + self.gt_array.append(gt) + + self.gt_total = np.copy(self.gt_array[0]) + for i in range(1,6): + self.gt_total += self.gt_array[i] + + self.score = np.zeros((self.data_len, self.arr_size+1)) + self.data = np.zeros((self.data_len, self.arr_size+2, 2), dtype=int) + S_data = np.zeros((300, 7, 2), dtype=int) + I_data = np.zeros((300, 1), dtype=int) + + data_cnt = 0 + + target = stimuli_set[stimuli_set['image_label'] == 'target'] + target_features = self.target_model(target, layers=[self.target_layer], stimuli_identifier=False) + if target_features.shape[0] == target_features['neuroid_num'].shape[0]: + target_features = target_features.T + + stimuli = stimuli_set[stimuli_set['image_label'] == 'stimuli'] + stimuli_features = self.stimuli_model(stimuli, layers=[self.stimuli_layer], stimuli_identifier=False) + if stimuli_features.shape[0] == stimuli_features['neuroid_num'].shape[0]: + stimuli_features = stimuli_features.T + + print(stimuli_features.shape, self.stimuli_layer, target_features.shape, self.target_layer) + + import torch + + for i in tqdm(range(self.data_len)): + op_target = self.unflat(target_features[i:i+1]) + MMconv = torch.nn.Conv2d(op_target.shape[1], 1, kernel_size=op_target.shape[2], stride=1, bias=False) + MMconv.weight = torch.nn.Parameter(torch.Tensor(op_target)) + + gt_idx = target_features.tar_obj_pos.values[i] + gt = self.gt_array[gt_idx] + + op_stimuli = self.unflat(stimuli_features[i:i+1]) + out = MMconv(torch.Tensor(op_stimuli)).detach().numpy() + out = out.reshape(out.shape[2:]) + + out = out - np.min(out) + out = out/np.max(out) + out *= 255 + out = np.uint8(out) + out = cv2.resize(out, (self.eye_res, self.eye_res), interpolation = cv2.INTER_AREA) + out = cv2.GaussianBlur(out,(7,7),3) + + temp_stim = np.uint8(np.zeros((3*self.eye_res, 3*self.eye_res))) + temp_stim[self.eye_res:2*self.eye_res, self.eye_res:2*self.eye_res] = np.copy(out) + attn = np.copy(temp_stim*self.gt_total) + + saccade = [] + (x, y) = int(attn.shape[0]/2), int(attn.shape[1]/2) + saccade.append((x, y)) + + for k in range(self.arr_size): + (x, y) = np.unravel_index(np.argmax(attn), attn.shape) + + fxn_x, fxn_y = x, y + + fxn_x, fxn_y = max(fxn_x, self.eye_res), max(fxn_y, self.eye_res) + fxn_x, fxn_y = min(fxn_x, (attn.shape[0]-self.eye_res)), min(fxn_y, (attn.shape[1]-self.eye_res)) + + saccade.append((fxn_x, fxn_y)) + + attn, t = self.remove_attn(attn, saccade[-1][0], saccade[-1][1]) + + if(t==gt_idx): + self.score[data_cnt, k+1] = 1 + data_cnt += 1 + break + + saccade = np.asarray(saccade) + j = saccade.shape[0] + + for k in range(j): + tar_id = self.get_pos(saccade[k, 0], saccade[k, 1], 0) + saccade[k, 0] = self.fix[tar_id][0] + saccade[k, 1] = self.fix[tar_id][1] + + I_data[i, 0] = min(7, j) + S_data[i, :j, 0] = saccade[:, 0].reshape((-1,))[:7] + S_data[i, :j, 1] = saccade[:, 1].reshape((-1,))[:7] + + self.data[:,:7,:] = S_data + self.data[:,7,:] = I_data + + return (self.score, self.data) + + def remove_attn(self, img, x, y): + t = -1 + for i in range(5, -1, -1): + fxt_place = self.gt_array[i][x, y] + if (fxt_place>0): + t = i + break + + if(t>-1): + img[self.gt_array[t] == 1] = 0 + + return img, t + + def get_pos(self, x, y, t): + for i in range(5, -1, -1): + fxt_place = self.gt_array[i][int(x), int(y)] + if (fxt_place>0): + t = i + 1 + break + return t + + def unflat(self, X): + channel_names = ['channel', 'channel_x', 'channel_y'] + assert all(hasattr(X, coord) for coord in channel_names) + shapes = [len(set(X[channel].values)) for channel in channel_names] + X = np.reshape(X.values, [X.shape[0]] + shapes) + X = np.transpose(X, axes=[0, 3, 1, 2]) + return X From a9b3cafd0b47435f05de209149123f409da3011e Mon Sep 17 00:00:00 2001 From: shashikg Date: Sun, 26 Jan 2020 17:46:02 +0530 Subject: [PATCH 2/9] Chnages to ModelCommitment to add visual search brain model --- model_tools/brain_transformation/__init__.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/model_tools/brain_transformation/__init__.py b/model_tools/brain_transformation/__init__.py index 6b330b9..9ed14f0 100644 --- a/model_tools/brain_transformation/__init__.py +++ b/model_tools/brain_transformation/__init__.py @@ -6,7 +6,7 @@ from .behavior import BehaviorArbiter, LogitsBehavior, ProbabilitiesMapping from .neural import LayerMappedModel, LayerSelection, LayerScores from .stimuli import PixelsToDegrees - +from .search import VisualSearchObjArray class ModelCommitment(BrainModel): """ @@ -21,7 +21,7 @@ class ModelCommitment(BrainModel): 'IT': LazyLoad(MajajITPublicBenchmark), } - def __init__(self, identifier, activations_model, layers, behavioral_readout_layer=None, region_benchmarks=None): + def __init__(self, identifier, activations_model, layers, behavioral_readout_layer=None, region_benchmarks=None, search_target_layer=None, search_stimulus_layer=None): self.layers = layers self.region_benchmarks = {**self.standard_region_benchmarks, **(region_benchmarks or {})} layer_model = LayerMappedModel(identifier=identifier, activations_model=activations_model) @@ -34,16 +34,28 @@ def __init__(self, identifier, activations_model, layers, behavioral_readout_lay BrainModel.Task.probabilities: probabilities_behavior}) self.do_behavior = False + self.search_model = VisualSearchObjArray(identifier=identifier, target_layer=search_target_layer, stimulus_layer=search_stimulus_layer) + self.do_search = False + def start_task(self, task: BrainModel.Task, *args, **kwargs): if task != BrainModel.Task.passive: - self.behavior_model.start_task(task, *args, **kwargs) - self.do_behavior = True + if task == BrainModel.Task.visual_search: + self.search_model.start_task(task) + self.do_behavior = False + self.do_search = True + else: + self.behavior_model.start_task(task, *args, **kwargs) + self.do_behavior = True + self.do_search = False else: self.do_behavior = False + self.do_search = False def look_at(self, stimuli): if self.do_behavior: return self.behavior_model.look_at(stimuli) + elif self.do_search: + return self.search_model.look_at(stimuli) else: return self.layer_model.look_at(stimuli) From b15f42ff2c79403316e998c866265bfd40ada91f Mon Sep 17 00:00:00 2001 From: shashikg Date: Sun, 26 Jan 2020 23:21:08 +0530 Subject: [PATCH 3/9] show visual search status --- .gitignore | 2 +- model_tools/brain_transformation/search.py | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index 85cc9fd..c95a93f 100755 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -__pycache__/* +__pycache__/ .ipynb_checkpoints build/* dist/* diff --git a/model_tools/brain_transformation/search.py b/model_tools/brain_transformation/search.py index 29af5a1..294cb34 100755 --- a/model_tools/brain_transformation/search.py +++ b/model_tools/brain_transformation/search.py @@ -41,7 +41,6 @@ def __init__(self, identifier, target_layer, stimulus_layer): def start_task(self, task: BrainModel.Task): self.current_task = task - print(task, "started") def look_at(self, stimuli_set): self.gt_array = [] @@ -82,11 +81,9 @@ def look_at(self, stimuli_set): if stimuli_features.shape[0] == stimuli_features['neuroid_num'].shape[0]: stimuli_features = stimuli_features.T - print(stimuli_features.shape, self.stimuli_layer, target_features.shape, self.target_layer) - import torch - for i in tqdm(range(self.data_len)): + for i in tqdm(range(self.data_len), desc="visual search stimuli: "): op_target = self.unflat(target_features[i:i+1]) MMconv = torch.nn.Conv2d(op_target.shape[1], 1, kernel_size=op_target.shape[2], stride=1, bias=False) MMconv.weight = torch.nn.Parameter(torch.Tensor(op_target)) From b73ac401c8e6759096e7538055e39e0f8b322e70 Mon Sep 17 00:00:00 2001 From: shashikg Date: Tue, 18 Feb 2020 18:30:10 +0530 Subject: [PATCH 4/9] removed candidate_model dependencies and some minor changes --- .gitignore | 1 + .travis.yml | 9 +++ model_tools/brain_transformation/__init__.py | 8 +-- model_tools/brain_transformation/search.py | 67 ++++++++------------ setup.py | 1 + tests/brain_transformation/test_search.py | 63 ++++++++++++++++++ 6 files changed, 104 insertions(+), 45 deletions(-) create mode 100644 tests/brain_transformation/test_search.py diff --git a/.gitignore b/.gitignore index c95a93f..c251744 100755 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ __pycache__/ .ipynb_checkpoints build/* dist/* +model_tools.egg-info/* diff --git a/.travis.yml b/.travis.yml index 101ff3a..67f3496 100644 --- a/.travis.yml +++ b/.travis.yml @@ -38,3 +38,12 @@ before_script: script: - if [ "$TRAVIS_PULL_REQUEST" = "false"]; then CUDA_VISIBLE_DEVICES= MT_MULTITHREAD=0 MT_IMAGENET_PATH=./imagenet2012.hdf5 pytest --ignore=tf-models -m "not memory_intense"; fi - if [ "$TRAVIS_PULL_REQUEST" = "true" ]; then CUDA_VISIBLE_DEVICES= MT_MULTITHREAD=0 MT_IMAGENET_PATH=./imagenet2012.hdf5 pytest --ignore=tf-models -m "not memory_intense and not private_access"; fi + +notifications: + slack: + if: | + branch = master AND \ + type IN (push, api, cron) + on_success: change + on_failure: always + secure: "eCS+HaOHPMTXTplsowGKt4Ibf19Lwd0ZWkQpbFdW06hgEIJDxjAfrTRSHV73YF8X4mrvQRyZ9Kax0n2OZUW3ulPaavdPVKgnJRoSYAToImIYZoMk5evFy/YHvtpxtuWGnV2NzTtktKLPB3QgwGehZqbMVuBE7HVsYpaqJwI8n82X6Gwcr+k9RI4A96Zi8HESPAJO/TTjrCxeYNM8jve/lo6oJR9DM2f2fj07UFXqGTpXjS80ULbCrK3HHbDFmph9aoFQDlEGyIuTfwZ32cQpuJbezxeDMdoYq4Aq37qxcQXxh2fVZIJ6LmknRQYkrcBbKXDsX5sUtePnKsx6ImjO2WUOkBHUKhXMwmKjuxY5OOSY1rXnI88YMQMQZYJCAPIY4pUekeU0PmWwziGXb1ZX4NUW8s54f3ZpY34QIumJ5FPVW3LKm73zEgaDvFySBYzqGUdiYhe7JUeA57CgG7E7sb1QWs/fnng9w++ERNwyqG+KUp+GMPEF9yRK6+AZ4ZgmFPhFX5ydRSlxzJKXoUP7myCLOEEG279pGyMAE981InnkClYcCrWm4lotbmHC0ViTEJ+U+gli9ESeviJDxFJBMjNGEC90jhFH9k52qN+/WnSMVqkt7AoOHqUidTws7Rey99ASWROzk/OP5zgsgVh/BfOamXbpoBSRP/5Wj46x6mg=" diff --git a/model_tools/brain_transformation/__init__.py b/model_tools/brain_transformation/__init__.py index 9ed14f0..ca55c2f 100644 --- a/model_tools/brain_transformation/__init__.py +++ b/model_tools/brain_transformation/__init__.py @@ -21,7 +21,7 @@ class ModelCommitment(BrainModel): 'IT': LazyLoad(MajajITPublicBenchmark), } - def __init__(self, identifier, activations_model, layers, behavioral_readout_layer=None, region_benchmarks=None, search_target_layer=None, search_stimulus_layer=None): + def __init__(self, identifier, activations_model, layers, behavioral_readout_layer=None, region_benchmarks=None, search_target_model_param=None, search_stimuli_model_param=None): self.layers = layers self.region_benchmarks = {**self.standard_region_benchmarks, **(region_benchmarks or {})} layer_model = LayerMappedModel(identifier=identifier, activations_model=activations_model) @@ -34,13 +34,13 @@ def __init__(self, identifier, activations_model, layers, behavioral_readout_lay BrainModel.Task.probabilities: probabilities_behavior}) self.do_behavior = False - self.search_model = VisualSearchObjArray(identifier=identifier, target_layer=search_target_layer, stimulus_layer=search_stimulus_layer) + self.search_model = VisualSearchObjArray(identifier=identifier, target_model_param=search_target_model_param, stimuli_model_param=search_stimuli_model_param) self.do_search = False def start_task(self, task: BrainModel.Task, *args, **kwargs): if task != BrainModel.Task.passive: - if task == BrainModel.Task.visual_search: - self.search_model.start_task(task) + if task == BrainModel.Task.visual_search_obj_arr: + self.search_model.start_task(task, **kwargs) self.do_behavior = False self.do_search = True else: diff --git a/model_tools/brain_transformation/search.py b/model_tools/brain_transformation/search.py index 294cb34..fc1db1c 100755 --- a/model_tools/brain_transformation/search.py +++ b/model_tools/brain_transformation/search.py @@ -3,43 +3,28 @@ from brainio_base.assemblies import BehavioralAssembly from brainscore.model_interface import BrainModel -from candidate_models.base_models import BaseModelPool -from candidate_models.model_commitments.vs_layer import visual_search_layer +from brainscore.utils import fullname import cv2 import numpy as np from tqdm import tqdm +import logging class VisualSearchObjArray(BrainModel): - def __init__(self, identifier, target_layer, stimulus_layer): + def __init__(self, identifier, target_model_param, stimuli_model_param): self.current_task = None - self.eye_res = 224 - self.arr_size = 6 - self.data_len = 300 self.identifier = identifier - - self.fix = [[640, 512], - [365, 988], - [90, 512], - [365, 36], - [915, 36], - [1190, 512], - [915, 988]] - - target_model_pool = BaseModelPool(input_size=28) - stimulus_model_pool = BaseModelPool(input_size=224) - self.target_model = target_model_pool[identifier] - self.stimuli_model = stimulus_model_pool[identifier] - - if target_layer==None: - self.target_layer = visual_search_layer[identifier][0] - self.stimuli_layer = visual_search_layer[identifier][0] - else: - self.target_layer = target_layer - self.stimuli_layer = stimulus_layer - - - def start_task(self, task: BrainModel.Task): + self.target_model = target_model_param['target_model'] + self.stimuli_model = stimuli_model_param['stimuli_model'] + self.target_layer = target_model_param['target_layer'] + self.stimuli_layer = stimuli_model_param['stimuli_layer'] + self.search_image_size = stimuli_model_param['search_image_size'] + self._logger = logging.getLogger(fullname(self)) + + def start_task(self, task: BrainModel.Task, **kwargs): + self.fix = kwargs['fix'] # fixation map + self.max_fix = kwargs['max_fix'] # maximum allowed fixation excluding the very first fixation + self.data_len = kwargs['data_len'] # Number of stimuli self.current_task = task def look_at(self, stimuli_set): @@ -51,10 +36,10 @@ def look_at(self, stimuli_set): imagename_gt = gt_paths[i] gt = cv2.imread(imagename_gt, 0) - gt = cv2.resize(gt, (self.eye_res, self.eye_res), interpolation = cv2.INTER_AREA) + gt = cv2.resize(gt, (self.search_image_size, self.search_image_size), interpolation = cv2.INTER_AREA) retval, gt = cv2.threshold(gt, 125, 255, cv2.THRESH_BINARY) - temp_stim = np.uint8(np.zeros((3*self.eye_res, 3*self.eye_res))) - temp_stim[self.eye_res:2*self.eye_res, self.eye_res:2*self.eye_res] = np.copy(gt) + temp_stim = np.uint8(np.zeros((3*self.search_image_size, 3*self.search_image_size))) + temp_stim[self.search_image_size:2*self.search_image_size, self.search_image_size:2*self.search_image_size] = np.copy(gt) gt = np.copy(temp_stim) gt = gt/255 @@ -64,8 +49,8 @@ def look_at(self, stimuli_set): for i in range(1,6): self.gt_total += self.gt_array[i] - self.score = np.zeros((self.data_len, self.arr_size+1)) - self.data = np.zeros((self.data_len, self.arr_size+2, 2), dtype=int) + self.score = np.zeros((self.data_len, self.max_fix+1)) + self.data = np.zeros((self.data_len, self.max_fix+2, 2), dtype=int) S_data = np.zeros((300, 7, 2), dtype=int) I_data = np.zeros((300, 1), dtype=int) @@ -85,7 +70,7 @@ def look_at(self, stimuli_set): for i in tqdm(range(self.data_len), desc="visual search stimuli: "): op_target = self.unflat(target_features[i:i+1]) - MMconv = torch.nn.Conv2d(op_target.shape[1], 1, kernel_size=op_target.shape[2], stride=1, bias=False) + MMconv = torch.nn.Conv2d(op_target.shape[1], 1, kernel_size=(op_target.shape[2], op_target.shape[3]), stride=1, bias=False) MMconv.weight = torch.nn.Parameter(torch.Tensor(op_target)) gt_idx = target_features.tar_obj_pos.values[i] @@ -99,24 +84,24 @@ def look_at(self, stimuli_set): out = out/np.max(out) out *= 255 out = np.uint8(out) - out = cv2.resize(out, (self.eye_res, self.eye_res), interpolation = cv2.INTER_AREA) + out = cv2.resize(out, (self.search_image_size, self.search_image_size), interpolation = cv2.INTER_AREA) out = cv2.GaussianBlur(out,(7,7),3) - temp_stim = np.uint8(np.zeros((3*self.eye_res, 3*self.eye_res))) - temp_stim[self.eye_res:2*self.eye_res, self.eye_res:2*self.eye_res] = np.copy(out) + temp_stim = np.uint8(np.zeros((3*self.search_image_size, 3*self.search_image_size))) + temp_stim[self.search_image_size:2*self.search_image_size, self.search_image_size:2*self.search_image_size] = np.copy(out) attn = np.copy(temp_stim*self.gt_total) saccade = [] (x, y) = int(attn.shape[0]/2), int(attn.shape[1]/2) saccade.append((x, y)) - for k in range(self.arr_size): + for k in range(self.max_fix): (x, y) = np.unravel_index(np.argmax(attn), attn.shape) fxn_x, fxn_y = x, y - fxn_x, fxn_y = max(fxn_x, self.eye_res), max(fxn_y, self.eye_res) - fxn_x, fxn_y = min(fxn_x, (attn.shape[0]-self.eye_res)), min(fxn_y, (attn.shape[1]-self.eye_res)) + fxn_x, fxn_y = max(fxn_x, self.search_image_size), max(fxn_y, self.search_image_size) + fxn_x, fxn_y = min(fxn_x, (attn.shape[0]-self.search_image_size)), min(fxn_y, (attn.shape[1]-self.search_image_size)) saccade.append((fxn_x, fxn_y)) diff --git a/setup.py b/setup.py index fe2baf2..c4c3fb0 100644 --- a/setup.py +++ b/setup.py @@ -15,6 +15,7 @@ "tqdm", "scikit-learn", "result_caching @ git+https://github.com/mschrimpf/result_caching", + "opencv-contrib-python", ] setup( diff --git a/tests/brain_transformation/test_search.py b/tests/brain_transformation/test_search.py new file mode 100644 index 0000000..1c4c169 --- /dev/null +++ b/tests/brain_transformation/test_search.py @@ -0,0 +1,63 @@ +import functools +import os + +import numpy as np +import pandas as pd +import pytest +from pytest import approx + +from model_tools.brain_transformation import ModelCommitment +from model_tools.activations import PytorchWrapper +import brainscore +import brainio_collection +from brainscore.model_interface import BrainModel + +def pytorch_custom(image_size): + import torch + from torch import nn + from model_tools.activations.pytorch import load_preprocess_images + + class MyModel(nn.Module): + def __init__(self): + super(MyModel, self).__init__() + self.conv1 = torch.nn.Conv2d(in_channels=3, out_channels=2, kernel_size=3, bias=False) + self.relu1 = torch.nn.ReLU() + + def forward(self, x): + x = self.conv1(x) + x = self.relu1(x) + return x + + preprocessing = functools.partial(load_preprocess_images, image_size=image_size) + return PytorchWrapper(model=MyModel(), preprocessing=preprocessing) + +class TestObjectSearch: + def test_model(self): + target_model_pool = pytorch_custom(28) + stimuli_model_pool = pytorch_custom(224) + search_target_model_param = {} + search_stimuli_model_param = {} + search_target_model_param['target_model'] = target_model_pool + search_stimuli_model_param['stimuli_model'] = stimuli_model_pool + search_target_model_param['target_layer'] = 'relu1' + search_stimuli_model_param['stimuli_layer'] = 'relu1' + search_target_model_param['target_img_size'] = 28 + search_stimuli_model_param['search_image_size'] = 224 + + model = ModelCommitment(identifier=stimuli_model_pool.identifier, activations_model=None, layers=['relu1'], search_target_model_param=search_target_model_param, search_stimuli_model_param=search_stimuli_model_param) + assemblies = brainscore.get_assembly('klab.Zhang2018search_obj_array') + stimuli = assemblies.stimulus_set + fix = [[640, 512], + [365, 988], + [90, 512], + [365, 36], + [915, 36], + [1190, 512], + [915, 988]] + max_fix = 6 + data_len = 300 + model.start_task(BrainModel.Task.visual_search_obj_arr, fix=fix, max_fix=max_fix, data_len=data_len) + cumm_perf, saccades = model.look_at(stimuli) + + assert saccades.shape == (300, 8, 2) + assert cumm_perf.shape == (7, 2) From 4e583fab09715bebe2b41410ba56f58b42443cf6 Mon Sep 17 00:00:00 2001 From: Martin Schrimpf Date: Fri, 20 Mar 2020 12:49:49 -0400 Subject: [PATCH 5/9] simplify behavior arbitration --- model_tools/brain_transformation/__init__.py | 25 +++++++------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/model_tools/brain_transformation/__init__.py b/model_tools/brain_transformation/__init__.py index ca55c2f..4e08be6 100644 --- a/model_tools/brain_transformation/__init__.py +++ b/model_tools/brain_transformation/__init__.py @@ -8,6 +8,7 @@ from .stimuli import PixelsToDegrees from .search import VisualSearchObjArray + class ModelCommitment(BrainModel): """ Standard process to convert a BaseModel (e.g. standard Machine Learning/Computer Vision model) @@ -21,7 +22,8 @@ class ModelCommitment(BrainModel): 'IT': LazyLoad(MajajITPublicBenchmark), } - def __init__(self, identifier, activations_model, layers, behavioral_readout_layer=None, region_benchmarks=None, search_target_model_param=None, search_stimuli_model_param=None): + def __init__(self, identifier, activations_model, layers, behavioral_readout_layer=None, region_benchmarks=None, + search_target_model_param=None, search_stimuli_model_param=None): self.layers = layers self.region_benchmarks = {**self.standard_region_benchmarks, **(region_benchmarks or {})} layer_model = LayerMappedModel(identifier=identifier, activations_model=activations_model) @@ -30,32 +32,23 @@ def __init__(self, identifier, activations_model, layers, behavioral_readout_lay behavioral_readout_layer = behavioral_readout_layer or layers[-1] probabilities_behavior = ProbabilitiesMapping(identifier=identifier, activations_model=activations_model, layer=behavioral_readout_layer) + search_model = VisualSearchObjArray(identifier=identifier, target_model_param=search_target_model_param, + stimuli_model_param=search_stimuli_model_param) self.behavior_model = BehaviorArbiter({BrainModel.Task.label: logits_behavior, - BrainModel.Task.probabilities: probabilities_behavior}) + BrainModel.Task.probabilities: probabilities_behavior, + BrainModel.Task.visual_search_obj_arr: search_model}) self.do_behavior = False - self.search_model = VisualSearchObjArray(identifier=identifier, target_model_param=search_target_model_param, stimuli_model_param=search_stimuli_model_param) - self.do_search = False - def start_task(self, task: BrainModel.Task, *args, **kwargs): if task != BrainModel.Task.passive: - if task == BrainModel.Task.visual_search_obj_arr: - self.search_model.start_task(task, **kwargs) - self.do_behavior = False - self.do_search = True - else: - self.behavior_model.start_task(task, *args, **kwargs) - self.do_behavior = True - self.do_search = False + self.behavior_model.start_task(task, *args, **kwargs) + self.do_behavior = True else: self.do_behavior = False - self.do_search = False def look_at(self, stimuli): if self.do_behavior: return self.behavior_model.look_at(stimuli) - elif self.do_search: - return self.search_model.look_at(stimuli) else: return self.layer_model.look_at(stimuli) From 1f9b09580aba604e4701e85c2c574861e71d301a Mon Sep 17 00:00:00 2001 From: Martin Schrimpf Date: Fri, 20 Mar 2020 12:50:15 -0400 Subject: [PATCH 6/9] auto-format --- model_tools/brain_transformation/search.py | 319 +++++++++++---------- 1 file changed, 160 insertions(+), 159 deletions(-) diff --git a/model_tools/brain_transformation/search.py b/model_tools/brain_transformation/search.py index fc1db1c..6fc5148 100755 --- a/model_tools/brain_transformation/search.py +++ b/model_tools/brain_transformation/search.py @@ -1,159 +1,160 @@ -import os -from collections import OrderedDict - -from brainio_base.assemblies import BehavioralAssembly -from brainscore.model_interface import BrainModel -from brainscore.utils import fullname - -import cv2 -import numpy as np -from tqdm import tqdm -import logging - -class VisualSearchObjArray(BrainModel): - def __init__(self, identifier, target_model_param, stimuli_model_param): - self.current_task = None - self.identifier = identifier - self.target_model = target_model_param['target_model'] - self.stimuli_model = stimuli_model_param['stimuli_model'] - self.target_layer = target_model_param['target_layer'] - self.stimuli_layer = stimuli_model_param['stimuli_layer'] - self.search_image_size = stimuli_model_param['search_image_size'] - self._logger = logging.getLogger(fullname(self)) - - def start_task(self, task: BrainModel.Task, **kwargs): - self.fix = kwargs['fix'] # fixation map - self.max_fix = kwargs['max_fix'] # maximum allowed fixation excluding the very first fixation - self.data_len = kwargs['data_len'] # Number of stimuli - self.current_task = task - - def look_at(self, stimuli_set): - self.gt_array = [] - gt = stimuli_set[stimuli_set['image_label'] == 'mask'] - gt_paths = list(gt.image_paths.values())[int(gt.index.values[0]):int(gt.index.values[-1]+1)] - - for i in range(6): - imagename_gt = gt_paths[i] - - gt = cv2.imread(imagename_gt, 0) - gt = cv2.resize(gt, (self.search_image_size, self.search_image_size), interpolation = cv2.INTER_AREA) - retval, gt = cv2.threshold(gt, 125, 255, cv2.THRESH_BINARY) - temp_stim = np.uint8(np.zeros((3*self.search_image_size, 3*self.search_image_size))) - temp_stim[self.search_image_size:2*self.search_image_size, self.search_image_size:2*self.search_image_size] = np.copy(gt) - gt = np.copy(temp_stim) - gt = gt/255 - - self.gt_array.append(gt) - - self.gt_total = np.copy(self.gt_array[0]) - for i in range(1,6): - self.gt_total += self.gt_array[i] - - self.score = np.zeros((self.data_len, self.max_fix+1)) - self.data = np.zeros((self.data_len, self.max_fix+2, 2), dtype=int) - S_data = np.zeros((300, 7, 2), dtype=int) - I_data = np.zeros((300, 1), dtype=int) - - data_cnt = 0 - - target = stimuli_set[stimuli_set['image_label'] == 'target'] - target_features = self.target_model(target, layers=[self.target_layer], stimuli_identifier=False) - if target_features.shape[0] == target_features['neuroid_num'].shape[0]: - target_features = target_features.T - - stimuli = stimuli_set[stimuli_set['image_label'] == 'stimuli'] - stimuli_features = self.stimuli_model(stimuli, layers=[self.stimuli_layer], stimuli_identifier=False) - if stimuli_features.shape[0] == stimuli_features['neuroid_num'].shape[0]: - stimuli_features = stimuli_features.T - - import torch - - for i in tqdm(range(self.data_len), desc="visual search stimuli: "): - op_target = self.unflat(target_features[i:i+1]) - MMconv = torch.nn.Conv2d(op_target.shape[1], 1, kernel_size=(op_target.shape[2], op_target.shape[3]), stride=1, bias=False) - MMconv.weight = torch.nn.Parameter(torch.Tensor(op_target)) - - gt_idx = target_features.tar_obj_pos.values[i] - gt = self.gt_array[gt_idx] - - op_stimuli = self.unflat(stimuli_features[i:i+1]) - out = MMconv(torch.Tensor(op_stimuli)).detach().numpy() - out = out.reshape(out.shape[2:]) - - out = out - np.min(out) - out = out/np.max(out) - out *= 255 - out = np.uint8(out) - out = cv2.resize(out, (self.search_image_size, self.search_image_size), interpolation = cv2.INTER_AREA) - out = cv2.GaussianBlur(out,(7,7),3) - - temp_stim = np.uint8(np.zeros((3*self.search_image_size, 3*self.search_image_size))) - temp_stim[self.search_image_size:2*self.search_image_size, self.search_image_size:2*self.search_image_size] = np.copy(out) - attn = np.copy(temp_stim*self.gt_total) - - saccade = [] - (x, y) = int(attn.shape[0]/2), int(attn.shape[1]/2) - saccade.append((x, y)) - - for k in range(self.max_fix): - (x, y) = np.unravel_index(np.argmax(attn), attn.shape) - - fxn_x, fxn_y = x, y - - fxn_x, fxn_y = max(fxn_x, self.search_image_size), max(fxn_y, self.search_image_size) - fxn_x, fxn_y = min(fxn_x, (attn.shape[0]-self.search_image_size)), min(fxn_y, (attn.shape[1]-self.search_image_size)) - - saccade.append((fxn_x, fxn_y)) - - attn, t = self.remove_attn(attn, saccade[-1][0], saccade[-1][1]) - - if(t==gt_idx): - self.score[data_cnt, k+1] = 1 - data_cnt += 1 - break - - saccade = np.asarray(saccade) - j = saccade.shape[0] - - for k in range(j): - tar_id = self.get_pos(saccade[k, 0], saccade[k, 1], 0) - saccade[k, 0] = self.fix[tar_id][0] - saccade[k, 1] = self.fix[tar_id][1] - - I_data[i, 0] = min(7, j) - S_data[i, :j, 0] = saccade[:, 0].reshape((-1,))[:7] - S_data[i, :j, 1] = saccade[:, 1].reshape((-1,))[:7] - - self.data[:,:7,:] = S_data - self.data[:,7,:] = I_data - - return (self.score, self.data) - - def remove_attn(self, img, x, y): - t = -1 - for i in range(5, -1, -1): - fxt_place = self.gt_array[i][x, y] - if (fxt_place>0): - t = i - break - - if(t>-1): - img[self.gt_array[t] == 1] = 0 - - return img, t - - def get_pos(self, x, y, t): - for i in range(5, -1, -1): - fxt_place = self.gt_array[i][int(x), int(y)] - if (fxt_place>0): - t = i + 1 - break - return t - - def unflat(self, X): - channel_names = ['channel', 'channel_x', 'channel_y'] - assert all(hasattr(X, coord) for coord in channel_names) - shapes = [len(set(X[channel].values)) for channel in channel_names] - X = np.reshape(X.values, [X.shape[0]] + shapes) - X = np.transpose(X, axes=[0, 3, 1, 2]) - return X +import cv2 +import logging +import numpy as np +from tqdm import tqdm + +from brainscore.model_interface import BrainModel +from brainscore.utils import fullname + + +class VisualSearchObjArray(BrainModel): + def __init__(self, identifier, target_model_param, stimuli_model_param): + self.current_task = None + self.identifier = identifier + self.target_model = target_model_param['target_model'] + self.stimuli_model = stimuli_model_param['stimuli_model'] + self.target_layer = target_model_param['target_layer'] + self.stimuli_layer = stimuli_model_param['stimuli_layer'] + self.search_image_size = stimuli_model_param['search_image_size'] + self._logger = logging.getLogger(fullname(self)) + + def start_task(self, task: BrainModel.Task, **kwargs): + self.fix = kwargs['fix'] # fixation map + self.max_fix = kwargs['max_fix'] # maximum allowed fixation excluding the very first fixation + self.data_len = kwargs['data_len'] # Number of stimuli + self.current_task = task + + def look_at(self, stimuli_set): + self.gt_array = [] + gt = stimuli_set[stimuli_set['image_label'] == 'mask'] + gt_paths = list(gt.image_paths.values())[int(gt.index.values[0]):int(gt.index.values[-1] + 1)] + + for i in range(6): + imagename_gt = gt_paths[i] + + gt = cv2.imread(imagename_gt, 0) + gt = cv2.resize(gt, (self.search_image_size, self.search_image_size), interpolation=cv2.INTER_AREA) + retval, gt = cv2.threshold(gt, 125, 255, cv2.THRESH_BINARY) + temp_stim = np.uint8(np.zeros((3 * self.search_image_size, 3 * self.search_image_size))) + temp_stim[self.search_image_size:2 * self.search_image_size, + self.search_image_size:2 * self.search_image_size] = np.copy(gt) + gt = np.copy(temp_stim) + gt = gt / 255 + + self.gt_array.append(gt) + + self.gt_total = np.copy(self.gt_array[0]) + for i in range(1, 6): + self.gt_total += self.gt_array[i] + + self.score = np.zeros((self.data_len, self.max_fix + 1)) + self.data = np.zeros((self.data_len, self.max_fix + 2, 2), dtype=int) + S_data = np.zeros((300, 7, 2), dtype=int) + I_data = np.zeros((300, 1), dtype=int) + + data_cnt = 0 + + target = stimuli_set[stimuli_set['image_label'] == 'target'] + target_features = self.target_model(target, layers=[self.target_layer], stimuli_identifier=False) + if target_features.shape[0] == target_features['neuroid_num'].shape[0]: + target_features = target_features.T + + stimuli = stimuli_set[stimuli_set['image_label'] == 'stimuli'] + stimuli_features = self.stimuli_model(stimuli, layers=[self.stimuli_layer], stimuli_identifier=False) + if stimuli_features.shape[0] == stimuli_features['neuroid_num'].shape[0]: + stimuli_features = stimuli_features.T + + import torch + + for i in tqdm(range(self.data_len), desc="visual search stimuli: "): + op_target = self.unflat(target_features[i:i + 1]) + MMconv = torch.nn.Conv2d(op_target.shape[1], 1, kernel_size=(op_target.shape[2], op_target.shape[3]), + stride=1, bias=False) + MMconv.weight = torch.nn.Parameter(torch.Tensor(op_target)) + + gt_idx = target_features.tar_obj_pos.values[i] + gt = self.gt_array[gt_idx] + + op_stimuli = self.unflat(stimuli_features[i:i + 1]) + out = MMconv(torch.Tensor(op_stimuli)).detach().numpy() + out = out.reshape(out.shape[2:]) + + out = out - np.min(out) + out = out / np.max(out) + out *= 255 + out = np.uint8(out) + out = cv2.resize(out, (self.search_image_size, self.search_image_size), interpolation=cv2.INTER_AREA) + out = cv2.GaussianBlur(out, (7, 7), 3) + + temp_stim = np.uint8(np.zeros((3 * self.search_image_size, 3 * self.search_image_size))) + temp_stim[self.search_image_size:2 * self.search_image_size, + self.search_image_size:2 * self.search_image_size] = np.copy(out) + attn = np.copy(temp_stim * self.gt_total) + + saccade = [] + (x, y) = int(attn.shape[0] / 2), int(attn.shape[1] / 2) + saccade.append((x, y)) + + for k in range(self.max_fix): + (x, y) = np.unravel_index(np.argmax(attn), attn.shape) + + fxn_x, fxn_y = x, y + + fxn_x, fxn_y = max(fxn_x, self.search_image_size), max(fxn_y, self.search_image_size) + fxn_x, fxn_y = min(fxn_x, (attn.shape[0] - self.search_image_size)), min(fxn_y, ( + attn.shape[1] - self.search_image_size)) + + saccade.append((fxn_x, fxn_y)) + + attn, t = self.remove_attn(attn, saccade[-1][0], saccade[-1][1]) + + if (t == gt_idx): + self.score[data_cnt, k + 1] = 1 + data_cnt += 1 + break + + saccade = np.asarray(saccade) + j = saccade.shape[0] + + for k in range(j): + tar_id = self.get_pos(saccade[k, 0], saccade[k, 1], 0) + saccade[k, 0] = self.fix[tar_id][0] + saccade[k, 1] = self.fix[tar_id][1] + + I_data[i, 0] = min(7, j) + S_data[i, :j, 0] = saccade[:, 0].reshape((-1,))[:7] + S_data[i, :j, 1] = saccade[:, 1].reshape((-1,))[:7] + + self.data[:, :7, :] = S_data + self.data[:, 7, :] = I_data + + return (self.score, self.data) + + def remove_attn(self, img, x, y): + t = -1 + for i in range(5, -1, -1): + fxt_place = self.gt_array[i][x, y] + if (fxt_place > 0): + t = i + break + + if (t > -1): + img[self.gt_array[t] == 1] = 0 + + return img, t + + def get_pos(self, x, y, t): + for i in range(5, -1, -1): + fxt_place = self.gt_array[i][int(x), int(y)] + if (fxt_place > 0): + t = i + 1 + break + return t + + def unflat(self, X): + channel_names = ['channel', 'channel_x', 'channel_y'] + assert all(hasattr(X, coord) for coord in channel_names) + shapes = [len(set(X[channel].values)) for channel in channel_names] + X = np.reshape(X.values, [X.shape[0]] + shapes) + X = np.transpose(X, axes=[0, 3, 1, 2]) + return X From 0a40eb36b3fbf607ddc38bf221ca5cbbd2d520ea Mon Sep 17 00:00:00 2001 From: shashikg Date: Sat, 11 Jul 2020 08:31:40 +0530 Subject: [PATCH 7/9] add vs --- model_tools/brain_transformation/__init__.py | 9 +- model_tools/brain_transformation/search.py | 129 ++++++++++++++++++- 2 files changed, 134 insertions(+), 4 deletions(-) diff --git a/model_tools/brain_transformation/__init__.py b/model_tools/brain_transformation/__init__.py index 4e08be6..372a1af 100644 --- a/model_tools/brain_transformation/__init__.py +++ b/model_tools/brain_transformation/__init__.py @@ -6,7 +6,7 @@ from .behavior import BehaviorArbiter, LogitsBehavior, ProbabilitiesMapping from .neural import LayerMappedModel, LayerSelection, LayerScores from .stimuli import PixelsToDegrees -from .search import VisualSearchObjArray +from .search import VisualSearchObjArray, VisualSearch class ModelCommitment(BrainModel): @@ -32,11 +32,14 @@ def __init__(self, identifier, activations_model, layers, behavioral_readout_lay behavioral_readout_layer = behavioral_readout_layer or layers[-1] probabilities_behavior = ProbabilitiesMapping(identifier=identifier, activations_model=activations_model, layer=behavioral_readout_layer) - search_model = VisualSearchObjArray(identifier=identifier, target_model_param=search_target_model_param, + search_obj_model = VisualSearchObjArray(identifier=identifier, target_model_param=search_target_model_param, + stimuli_model_param=search_stimuli_model_param) + search_model = VisualSearch(identifier=identifier, target_model_param=search_target_model_param, stimuli_model_param=search_stimuli_model_param) self.behavior_model = BehaviorArbiter({BrainModel.Task.label: logits_behavior, BrainModel.Task.probabilities: probabilities_behavior, - BrainModel.Task.visual_search_obj_arr: search_model}) + BrainModel.Task.visual_search_obj_arr: search_obj_model, + BrainModel.Task.visual_search: search_model}) self.do_behavior = False def start_task(self, task: BrainModel.Task, *args, **kwargs): diff --git a/model_tools/brain_transformation/search.py b/model_tools/brain_transformation/search.py index 6fc5148..c9f92f3 100755 --- a/model_tools/brain_transformation/search.py +++ b/model_tools/brain_transformation/search.py @@ -6,7 +6,6 @@ from brainscore.model_interface import BrainModel from brainscore.utils import fullname - class VisualSearchObjArray(BrainModel): def __init__(self, identifier, target_model_param, stimuli_model_param): self.current_task = None @@ -158,3 +157,131 @@ def unflat(self, X): X = np.reshape(X.values, [X.shape[0]] + shapes) X = np.transpose(X, axes=[0, 3, 1, 2]) return X + + +class VisualSearch(BrainModel): + def __init__(self, identifier, target_model_param, stimuli_model_param): + self.current_task = None + self.identifier = identifier + self.target_model = target_model_param['target_model'] + self.stimuli_model = stimuli_model_param['stimuli_model'] + self.target_layer = target_model_param['target_layer'] + self.stimuli_layer = stimuli_model_param['stimuli_layer'] + self.search_image_size = stimuli_model_param['search_image_size'] + self._logger = logging.getLogger(fullname(self)) + + def start_task(self, task: BrainModel.Task, **kwargs): + self.fix = kwargs['fix'] # fixation map + self.max_fix = kwargs['max_fix'] # maximum allowed fixation excluding the very first fixation + self.data_len = kwargs['data_len'] # Number of stimuli + self.current_task = task + self.ior_size = kwargs['ior_size'] + + def look_at(self, stimuli_set): + self.score = np.zeros((self.data_len, self.max_fix + 1)) + self.data = np.zeros((self.data_len, self.max_fix + 2, 2), dtype=int) + S_data = np.zeros((self.data_len, self.max_fix + 1, 2), dtype=int) + I_data = np.zeros((self.data_len, 1), dtype=int) + + data_cnt = 0 + + target = stimuli_set[stimuli_set['image_label'] == 'target'] + target_features = self.target_model(target, layers=[self.target_layer], stimuli_identifier=False) + if target_features.shape[0] == target_features['neuroid_num'].shape[0]: + target_features = target_features.T + + stimuli = stimuli_set[stimuli_set['image_label'] == 'stimuli'] + stimuli_features = self.stimuli_model(stimuli, layers=[self.stimuli_layer], stimuli_identifier=False) + if stimuli_features.shape[0] == stimuli_features['neuroid_num'].shape[0]: + stimuli_features = stimuli_features.T + + gt = stimuli_set[stimuli_set['image_label'] == 'gt'] + gt_paths = list(gt.image_paths.values())[int(gt.index.values[0]):int(gt.index.values[-1] + 1)] + + import torch + + for i in tqdm(range(self.data_len), desc="visual search stimuli: "): + op_target = self.unflat(target_features[i:i + 1]) + MMconv = torch.nn.Conv2d(op_target.shape[1], 1, kernel_size=(op_target.shape[2], op_target.shape[3]), + stride=1, bias=False) + MMconv.weight = torch.nn.Parameter(torch.Tensor(op_target)) + + imagename_gt = gt_paths[i] + gt = cv2.imread(imagename_gt, 0) + gt = cv2.resize(gt, (self.search_image_size, self.search_image_size), interpolation=cv2.INTER_AREA) + retval, gt = cv2.threshold(gt, 125, 255, cv2.THRESH_BINARY) + temp_stim = np.uint8(np.zeros((3 * self.search_image_size, 3 * self.search_image_size))) + temp_stim[self.search_image_size:2 * self.search_image_size, + self.search_image_size:2 * self.search_image_size] = np.copy(gt) + gt = np.copy(temp_stim) + gt = gt / 255 + + op_stimuli = self.unflat(stimuli_features[i:i + 1]) + out = MMconv(torch.Tensor(op_stimuli)).detach().numpy() + out = out.reshape(out.shape[2:]) + + out = out - np.min(out) + out = out / np.max(out) + out *= 255 + out = np.uint8(out) + out = cv2.resize(out, (self.search_image_size, self.search_image_size), interpolation=cv2.INTER_AREA) + out = cv2.GaussianBlur(out, (7, 7), 3) + + temp_stim = np.uint8(np.zeros((3 * self.search_image_size, 3 * self.search_image_size))) + temp_stim[self.search_image_size:2 * self.search_image_size, + self.search_image_size:2 * self.search_image_size] = np.copy(out) + attn = np.copy(temp_stim) + + saccade = [] + (x, y) = int(attn.shape[0] / 2), int(attn.shape[1] / 2) + saccade.append((x, y)) + + for k in range(self.max_fix): + (x, y) = np.unravel_index(np.argmax(attn), attn.shape) + + fxn_x, fxn_y = x, y + + fxn_x, fxn_y = max(fxn_x, self.search_image_size), max(fxn_y, self.search_image_size) + fxn_x, fxn_y = min(fxn_x, (attn.shape[0] - self.search_image_size)), min(fxn_y, ( + attn.shape[1] - self.search_image_size)) + + saccade.append((fxn_x, fxn_y)) + + attn, t = self.remove_attn(attn, saccade[-1][0], saccade[-1][1]) + + if t: + self.score[data_cnt, k + 1] = 1 + data_cnt += 1 + break + + saccade = np.asarray(saccade) + j = saccade.shape[0] + + I_data[i, 0] = min(self.max_fix+1, j) + S_data[i, :j, 0] = saccade[:, 0].reshape((-1,))[:self.max_fix+1] + S_data[i, :j, 1] = saccade[:, 1].reshape((-1,))[:self.max_fix+1] + + self.data[:, :self.max_fix+1, :] = S_data + self.data[:, self.max_fix+1, :] = I_data + + return (self.score, self.data) + + def remove_attn(self, img, x, y): + img[(x - int(self.ior_size/2)):(x + int(self.ior_size/2)), (y - int(self.ior_size/2)):(y + int(self.ior_size/2))] = 0 + + fxt_xtop = x-int(self.ior_size/2) + fxt_ytop = y-int(self.ior_size/2) + fxt_place = gt[fxt_xtop:(fxt_xtop+self.ior_size), fxt_ytop:(fxt_ytop+self.ior_size)] + + if (np.sum(fxt_place)>0): + return img, True + else: + return img, False + + def unflat(self, X): + channel_names = ['channel', 'channel_x', 'channel_y'] + assert all(hasattr(X, coord) for coord in channel_names) + shapes = [len(set(X[channel].values)) for channel in channel_names] + X = np.reshape(X.values, [X.shape[0]] + shapes) + X = np.transpose(X, axes=[0, 3, 1, 2]) + return X From d5f29ac3c0609e978be73f89da34bb6febd22df6 Mon Sep 17 00:00:00 2001 From: shashikg Date: Sat, 11 Jul 2020 13:08:49 +0530 Subject: [PATCH 8/9] remove redundant import --- model_tools/brain_transformation/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/model_tools/brain_transformation/__init__.py b/model_tools/brain_transformation/__init__.py index 571eddd..b3f7f1a 100644 --- a/model_tools/brain_transformation/__init__.py +++ b/model_tools/brain_transformation/__init__.py @@ -5,7 +5,6 @@ from model_tools.brain_transformation.temporal import TemporalIgnore from .behavior import BehaviorArbiter, LogitsBehavior, ProbabilitiesMapping from .neural import LayerMappedModel, LayerSelection, LayerScores -from .stimuli import PixelsToDegrees from .search import VisualSearchObjArray, VisualSearch From f4cf9b39830270ca671ead368e07b33d59f77a11 Mon Sep 17 00:00:00 2001 From: shashikg Date: Wed, 22 Jul 2020 03:44:43 -0400 Subject: [PATCH 9/9] visual search - waldo and natural design --- model_tools/brain_transformation/__init__.py | 2 +- model_tools/brain_transformation/search.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/model_tools/brain_transformation/__init__.py b/model_tools/brain_transformation/__init__.py index b3f7f1a..233ff00 100644 --- a/model_tools/brain_transformation/__init__.py +++ b/model_tools/brain_transformation/__init__.py @@ -38,7 +38,7 @@ def __init__(self, identifier, activations_model, layers, behavioral_readout_lay stimuli_model_param=search_stimuli_model_param) self.behavior_model = BehaviorArbiter({BrainModel.Task.label: logits_behavior, BrainModel.Task.probabilities: probabilities_behavior, - BrainModel.Task.visual_search_obj_arr: search_obj_model, + BrainModel.Task.object_search: search_obj_model, BrainModel.Task.visual_search: search_model}) self.do_behavior = False diff --git a/model_tools/brain_transformation/search.py b/model_tools/brain_transformation/search.py index c9f92f3..1fe0848 100755 --- a/model_tools/brain_transformation/search.py +++ b/model_tools/brain_transformation/search.py @@ -171,7 +171,6 @@ def __init__(self, identifier, target_model_param, stimuli_model_param): self._logger = logging.getLogger(fullname(self)) def start_task(self, task: BrainModel.Task, **kwargs): - self.fix = kwargs['fix'] # fixation map self.max_fix = kwargs['max_fix'] # maximum allowed fixation excluding the very first fixation self.data_len = kwargs['data_len'] # Number of stimuli self.current_task = task @@ -247,7 +246,7 @@ def look_at(self, stimuli_set): saccade.append((fxn_x, fxn_y)) - attn, t = self.remove_attn(attn, saccade[-1][0], saccade[-1][1]) + attn, t = self.remove_attn(attn, saccade[-1][0], saccade[-1][1], gt) if t: self.score[data_cnt, k + 1] = 1 @@ -266,7 +265,7 @@ def look_at(self, stimuli_set): return (self.score, self.data) - def remove_attn(self, img, x, y): + def remove_attn(self, img, x, y, gt): img[(x - int(self.ior_size/2)):(x + int(self.ior_size/2)), (y - int(self.ior_size/2)):(y + int(self.ior_size/2))] = 0 fxt_xtop = x-int(self.ior_size/2)