-
Notifications
You must be signed in to change notification settings - Fork 29
ASK/TELL DEVELOP #1307
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: develop
Are you sure you want to change the base?
ASK/TELL DEVELOP #1307
Changes from 202 commits
507bc0a
18a52c9
231e6f0
eaebbff
043feeb
c66f10b
3d7981b
c380595
fdcfd66
1e0abd3
c7ea54b
c111afd
0ee448c
bb37f4b
38b3967
4b49233
1d213ef
f8c5eaf
dff6bad
c1ec7f6
a5133b9
682daa8
09ebdbc
9f200f0
cf5ac63
f2ef248
2c6a9c4
7224de3
99a7a2c
e8b7052
23e5164
06c14d7
bc1587e
5c2308d
0d146fc
ef906d5
9d07e6c
902b7f0
64b6401
ab09b9f
d66dafb
f4a9691
6fb608e
f926bfa
25bca85
2973b41
5a7160f
b5d66e0
bf4577d
c24730b
8695692
fcb434e
581c9a5
877ecef
54d7b3f
1cd8b45
05fca90
7a96995
c86c571
8379e0f
0ad9dcf
e2263a3
57e8c4b
a952fc1
6ec7528
3ac630a
08ad9df
aca1d50
6ac1faa
56c644e
fec7aa4
0db9f0b
49fda1e
1d6f83b
81267a5
b633f94
a3e8346
4da4298
c0e452c
f9385c2
02b349c
f162e75
c041a6b
23ed512
34f582e
390597f
f80aa9f
80c09d7
7ca34b1
f6341a4
fef5833
673c3eb
65ca79b
1ed05e5
9b1195b
24df60f
715773b
08ead4a
22b69b4
f664e37
99bc450
423f0d6
56e59aa
145e09d
9a6f299
9c0e258
4a52e0b
73bbf69
bf0d79e
b3ce513
2428152
0bcd7c9
bf9ed05
acc4811
2a67724
8c9e313
6b54991
a4ead36
3622219
c8d6a82
1dce059
77845a0
9b3429b
a2c58fc
2635236
012227a
03420b3
682425a
b209901
fd630eb
57a8de9
050c22d
5d31b63
d1d4b76
b05762a
0b8cdec
4e73c93
4357173
1e52d99
3c21202
1cb542f
9798b3e
585c521
cf36e85
77efa2a
ed6604d
9cbca1e
ec773d4
5ea9b2b
b14b85d
f8d1833
ad54abd
a383dc0
1353363
bb26e05
948c88b
047673d
b681398
b828cb4
c040721
cd776a1
7b22ab1
4e37554
41a4cbc
3e41b4b
2303512
802de9c
fa254a0
1606ef3
473487d
b42e1ba
a354339
5dc5155
ed03ac1
b7a79c3
1d335c8
b23d13c
c07611d
f39b937
fb9b01d
e88088f
57b466e
da20d5a
e5ff70d
d060f2e
d5e3c53
42f772a
bc3a976
12c0fe3
95bc6a0
f399750
cf9b1c1
fbf1288
4ca2153
d28681d
635439d
c91a824
854a336
4dbfc42
f4f6b96
2c28939
00bbebe
379868d
1c1e996
f6b2ce2
4ed9efd
a02e8d4
104424e
52d32a6
561ba54
7f4a4ee
3de1de5
3b21fe0
a11e04d
5800c93
253efea
4fda921
abdf4a6
6ac2880
6d649a3
56db443
4a47d2f
aabcf15
d345dce
fcb1af2
31f0778
548f580
aaac936
e9fa694
0b41dab
9903266
1634579
7a76385
55c346c
58f0d82
f4af3a6
cccd3bb
63b2fdd
3917e43
c8e6d62
589d8fb
7288d08
d5fbe14
68f1d4e
90fe2dc
ba1e2f3
3daf0c1
d847490
1fd96dc
ea9c35c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -12,3 +12,4 @@ | |
| from libensemble import logger | ||
|
|
||
| from .ensemble import Ensemble | ||
| from .generators import Generator | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,150 @@ | ||
| """Generator class exposing gpCAM functionality""" | ||
|
|
||
| import time | ||
| from typing import List | ||
|
|
||
| import numpy as np | ||
| from gpcam import GPOptimizer as GP | ||
| from numpy import typing as npt | ||
|
|
||
| # While there are class / func duplicates - re-use functions. | ||
| from libensemble.gen_funcs.persistent_gpCAM import ( | ||
| _calculate_grid_distances, | ||
| _eval_var, | ||
| _find_eligible_points, | ||
| _generate_mesh, | ||
| _read_testpoints, | ||
| ) | ||
| from libensemble.generators import LibensembleGenerator | ||
|
|
||
| __all__ = [ | ||
| "GP_CAM", | ||
| "GP_CAM_Covar", | ||
| ] | ||
|
|
||
|
|
||
| # Note - batch size is set in wrapper currently - and passed to ask as n_trials. | ||
| # To support empty ask(), add batch_size back in here. | ||
|
|
||
|
|
||
| # Equivalent to function persistent_gpCAM_ask_tell | ||
| class GP_CAM(LibensembleGenerator): | ||
| """ | ||
| This generation function constructs a global surrogate of `f` values. | ||
|
|
||
| It is a batched method that produces a first batch uniformly random from | ||
| (lb, ub). On subequent iterations, it calls an optimization method to | ||
| produce the next batch of points. This optimization might be too slow | ||
| (relative to the simulation evaluation time) for some use cases. | ||
| """ | ||
|
|
||
| def _initialize_gpcAM(self, user_specs): | ||
| """Extract user params""" | ||
| # self.b = user_specs["batch_size"] | ||
| self.lb = np.array(user_specs["lb"]) | ||
| self.ub = np.array(user_specs["ub"]) | ||
| self.n = len(self.lb) # dimension | ||
| assert isinstance(self.n, int), "Dimension must be an integer" | ||
| assert isinstance(self.lb, np.ndarray), "lb must be a numpy array" | ||
| assert isinstance(self.ub, np.ndarray), "ub must be a numpy array" | ||
| self.all_x = np.empty((0, self.n)) | ||
| self.all_y = np.empty((0, 1)) | ||
| np.random.seed(0) | ||
|
|
||
| def __init__(self, H, persis_info, gen_specs, libE_info=None): | ||
|
||
| self.H = H | ||
| self.persis_info = persis_info | ||
| self.gen_specs = gen_specs | ||
| self.libE_info = libE_info | ||
|
|
||
| self.U = self.gen_specs["user"] | ||
| self._initialize_gpcAM(self.U) | ||
| self.my_gp = None | ||
| self.noise = 1e-8 # 1e-12 | ||
|
|
||
| def ask_np(self, n_trials: int) -> npt.NDArray: | ||
| if self.all_x.shape[0] == 0: | ||
| self.x_new = self.persis_info["rand_stream"].uniform(self.lb, self.ub, (n_trials, self.n)) | ||
| else: | ||
| start = time.time() | ||
| self.x_new = self.my_gp.ask( | ||
| bounds=np.column_stack((self.lb, self.ub)), | ||
| n=n_trials, | ||
| pop_size=n_trials, | ||
| max_iter=1, | ||
| )["x"] | ||
| print(f"Ask time:{time.time() - start}") | ||
| H_o = np.zeros(n_trials, dtype=self.gen_specs["out"]) | ||
| H_o["x"] = self.x_new | ||
| return H_o | ||
|
|
||
| def tell_np(self, calc_in: npt.NDArray) -> None: | ||
| if calc_in is not None: | ||
| self.y_new = np.atleast_2d(calc_in["f"]).T | ||
| nan_indices = [i for i, fval in enumerate(self.y_new) if np.isnan(fval)] | ||
| self.x_new = np.delete(self.x_new, nan_indices, axis=0) | ||
| self.y_new = np.delete(self.y_new, nan_indices, axis=0) | ||
|
|
||
| self.all_x = np.vstack((self.all_x, self.x_new)) | ||
| self.all_y = np.vstack((self.all_y, self.y_new)) | ||
|
|
||
| if self.my_gp is None: | ||
| self.my_gp = GP(self.all_x, self.all_y, noise_variances=self.noise * np.ones(len(self.all_y))) | ||
|
||
| else: | ||
| self.my_gp.tell(self.all_x, self.all_y, noise_variances=self.noise * np.ones(len(self.all_y))) | ||
| self.my_gp.train() | ||
|
|
||
|
|
||
| class GP_CAM_Covar(GP_CAM): | ||
| """ | ||
| This generation function constructs a global surrogate of `f` values. | ||
|
|
||
| It is a batched method that produces a first batch uniformly random from | ||
| (lb, ub) and on following iterations samples the GP posterior covariance | ||
| function to find sample points. | ||
| """ | ||
|
|
||
| def __init__(self, H, persis_info, gen_specs, libE_info=None): | ||
| super().__init__(H, persis_info, gen_specs, libE_info) | ||
| self.test_points = _read_testpoints(self.U) | ||
| self.x_for_var = None | ||
| self.var_vals = None | ||
| if self.U.get("use_grid"): | ||
| self.num_points = 10 | ||
| self.x_for_var = _generate_mesh(self.lb, self.ub, self.num_points) | ||
| self.r_low_init, self.r_high_init = _calculate_grid_distances(self.lb, self.ub, self.num_points) | ||
|
|
||
| def ask_np(self, n_trials: int) -> List[dict]: | ||
| if self.all_x.shape[0] == 0: | ||
| x_new = self.persis_info["rand_stream"].uniform(self.lb, self.ub, (n_trials, self.n)) | ||
| else: | ||
| if not self.U.get("use_grid"): | ||
| x_new = self.x_for_var[np.argsort(self.var_vals)[-n_trials:]] | ||
| else: | ||
| r_high = self.r_high_init | ||
| r_low = self.r_low_init | ||
| x_new = [] | ||
| r_cand = r_high # Let's start with a large radius and stop when we have batchsize points | ||
|
|
||
| sorted_indices = np.argsort(-self.var_vals) | ||
| while len(x_new) < n_trials: | ||
| x_new = _find_eligible_points(self.x_for_var, sorted_indices, r_cand, n_trials) | ||
| if len(x_new) < n_trials: | ||
| r_high = r_cand | ||
| r_cand = (r_high + r_low) / 2.0 | ||
|
|
||
| self.x_new = x_new | ||
| H_o = np.zeros(n_trials, dtype=self.gen_specs["out"]) | ||
| H_o["x"] = self.x_new | ||
| return H_o | ||
|
|
||
| def tell_np(self, calc_in: npt.NDArray): | ||
| if calc_in is not None: | ||
| super().tell(calc_in) | ||
| if not self.U.get("use_grid"): | ||
| n_trials = len(self.y_new) | ||
| self.x_for_var = self.persis_info["rand_stream"].uniform(self.lb, self.ub, (10 * n_trials, self.n)) | ||
|
|
||
| self.var_vals = _eval_var( | ||
| self.my_gp, self.all_x, self.all_y, self.x_for_var, self.test_points, self.persis_info | ||
| ) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,48 @@ | ||
| """Generator classes providing points using sampling""" | ||
|
|
||
| import numpy as np | ||
|
|
||
| from libensemble.generators import LibensembleGenerator | ||
|
|
||
| __all__ = [ | ||
| "UniformSample", | ||
| ] | ||
|
|
||
|
|
||
| class UniformSample(LibensembleGenerator): | ||
| """ | ||
| This generator returns ``gen_specs["initial_batch_size"]`` uniformly | ||
| sampled points the first time it is called. Afterwards, it returns the | ||
| number of points given. This can be used in either a batch or asynchronous | ||
| mode by adjusting the allocation function. | ||
| """ | ||
|
|
||
| def __init__(self, _, persis_info, gen_specs, libE_info=None) -> list: | ||
| # self.H = H | ||
|
||
| self.persis_info = persis_info | ||
| self.gen_specs = gen_specs | ||
| self.libE_info = libE_info | ||
| self._get_user_params(self.gen_specs["user"]) | ||
|
|
||
| def ask_np(self, n_trials): | ||
| H_o = np.zeros(n_trials, dtype=self.gen_specs["out"]) | ||
| H_o["x"] = self.persis_info["rand_stream"].uniform(self.lb, self.ub, (n_trials, self.n)) | ||
|
|
||
| if "obj_component" in H_o.dtype.fields: # needs H_o - needs to be created in here. | ||
| H_o["obj_component"] = self.persis_info["rand_stream"].integers( | ||
| low=0, high=self.gen_specs["user"]["num_components"], size=n_trials | ||
| ) | ||
| return H_o | ||
|
|
||
| def tell_np(self, calc_in): | ||
| pass # random sample so nothing to tell | ||
|
|
||
| def _get_user_params(self, user_specs): | ||
| """Extract user params""" | ||
| # b = user_specs["initial_batch_size"] | ||
| self.ub = user_specs["ub"] | ||
| self.lb = user_specs["lb"] | ||
| self.n = len(self.lb) # dimension | ||
| assert isinstance(self.n, int), "Dimension must be an integer" | ||
| assert isinstance(self.lb, np.ndarray), "lb must be a numpy array" | ||
| assert isinstance(self.ub, np.ndarray), "ub must be a numpy array" | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,38 @@ | ||
| import inspect | ||
|
|
||
| import numpy as np | ||
|
|
||
| from libensemble.message_numbers import EVAL_GEN_TAG, FINISHED_PERSISTENT_GEN_TAG, PERSIS_STOP, STOP_TAG | ||
| from libensemble.tools.persistent_support import PersistentSupport | ||
| from libensemble.utils.misc import np_to_list_dicts | ||
|
|
||
|
|
||
| def persistent_gen_f(H, persis_info, gen_specs, libE_info): | ||
|
|
||
| ps = PersistentSupport(libE_info, EVAL_GEN_TAG) | ||
| U = gen_specs["user"] | ||
| b = U.get("initial_batch_size") or U.get("batch_size") | ||
|
|
||
| generator = U["generator"] | ||
| if inspect.isclass(generator): | ||
| gen = generator(H, persis_info, gen_specs, libE_info) | ||
| else: | ||
| gen = generator | ||
|
|
||
| tag = None | ||
| calc_in = None | ||
| while tag not in [STOP_TAG, PERSIS_STOP]: | ||
| H_o = gen.ask(b) | ||
| if isinstance(H_o, list): | ||
| H_o_arr = np.zeros(len(H_o), dtype=gen_specs["out"]) | ||
| for i in range(len(H_o)): | ||
| for key in H_o[0].keys(): | ||
| H_o_arr[i][key] = H_o[i][key] | ||
| H_o = H_o_arr | ||
| tag, Work, calc_in = ps.send_recv(H_o) | ||
| gen.tell(np_to_list_dicts(calc_in)) | ||
jlnav marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| if hasattr(calc_in, "__len__"): | ||
| b = len(calc_in) | ||
|
|
||
| return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG | ||
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We need to decide
__init__interface.We questioned before whether we keep the same interface - which mirrors the current gen_f, or to rearrange, as H is often not given (basically an H0). So it could be
gen_specsfirst. I'm leaning towards keeping the original ordering as it mirrors our user functions, but this should be discussed.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fair enough. My opinion/intuition is a user is more likely to prefer either "classical" gens (e.g. Jeff) or ask/tell gens (e.g. other CAMPA folks). With these gens' interfaces and users being so different, I don't think an arguably simpler rearrangement of the input parameters is too confusing.
Similarly to how some people prefer numpy or pandas; they do similar things, but their interfaces being different isn't a point of contention.
I'd also lean towards if someone were to initialize some object, like a gen, themselves, they'd prefer their specifications be provided as early and clearly as possible:
vs.