@@ -44,38 +44,53 @@ def acq_max(ac, gp, y_max, bounds, random_state, constraint=None, n_warmup=10000
4444 :return: x_max, The arg max of the acquisition function.
4545 """
4646
47+ # We need to adjust the acquisition function to deal with constraints when there is some
48+ if constraint is not None :
49+ def adjusted_ac (x ):
50+ """Acquisition function adjusted to fulfill the constraint when necessary"""
51+
52+ # Transforms the problem in a minimization problem, this is necessary
53+ # because the solver we are using later on is a minimizer
54+ values = - ac (x .reshape (- 1 , bounds .shape [0 ]), gp = gp , y_max = y_max )
55+ p_constraints = constraint .predict (x .reshape (- 1 , bounds .shape [0 ]))
56+
57+ # Slower fallback for the case where any values are negative
58+ if np .any (values > 0 ):
59+ # TODO: This is not exactly how Gardner et al do it.
60+ # Their way would require the result of the acquisition function
61+ # to be strictly positive, which is not the case here. For a
62+ # positive target value, we use Gardner's version. If the target
63+ # is negative, we instead slightly rescale the target depending
64+ # on the probability estimate to fulfill the constraint.
65+ return np .array (
66+ [
67+ value / (0.5 + 0.5 * p ) if value > 0 else value * p
68+ for value , p in zip (values , p_constraints )
69+ ]
70+ )
71+
72+ # Faster, vectorized version of Gardner et al's method
73+ return values * p_constraints
74+
75+ else :
76+ # Transforms the problem in a minimization problem, this is necessary
77+ # because the solver we are using later on is a minimizer
78+ adjusted_ac = lambda x : - ac (x .reshape (- 1 , bounds .shape [0 ]), gp = gp , y_max = y_max )
79+
4780 # Warm up with random points
4881 x_tries = random_state .uniform (bounds [:, 0 ], bounds [:, 1 ],
4982 size = (n_warmup , bounds .shape [0 ]))
50- ys = ac (x_tries , gp = gp , y_max = y_max )
83+ ys = - adjusted_ac (x_tries )
5184 x_max = x_tries [ys .argmax ()]
5285 max_acq = ys .max ()
5386
5487 # Explore the parameter space more thoroughly
5588 x_seeds = random_state .uniform (bounds [:, 0 ], bounds [:, 1 ],
5689 size = (n_iter , bounds .shape [0 ]))
5790
58- if constraint is not None :
59- def to_minimize (x ):
60- target = - ac (x .reshape (1 , - 1 ), gp = gp , y_max = y_max )
61- p_constraint = constraint .predict (x .reshape (1 , - 1 ))
62-
63- # TODO: This is not exactly how Gardner et al do it.
64- # Their way would require the result of the acquisition function
65- # to be strictly positive (or negative), which is not the case
66- # here. For a negative target value, we use Gardner's version. If
67- # the target is positive, we instead slightly rescale the target
68- # depending on the probability estimate to fulfill the constraint.
69- if target < 0 :
70- return target * p_constraint
71- else :
72- return target / (0.5 + 0.5 * p_constraint )
73- else :
74- to_minimize = lambda x : - ac (x .reshape (1 , - 1 ), gp = gp , y_max = y_max )
75-
7691 for x_try in x_seeds :
7792 # Find the minimum of minus the acquisition function
78- res = minimize (lambda x : to_minimize ( x ) ,
93+ res = minimize (adjusted_ac ,
7994 x_try ,
8095 bounds = bounds ,
8196 method = "L-BFGS-B" )
0 commit comments