Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions kernel_tuner/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@
pyatf_strategies,
random_sample,
simulated_annealing,
skopt
)
from kernel_tuner.strategies.wrapper import OptAlgWrapper

Expand All @@ -82,6 +83,7 @@
"mls": mls,
"pso": pso,
"simulated_annealing": simulated_annealing,
"skopt": skopt,
"firefly_algorithm": firefly_algorithm,
"bayes_opt": bayes_opt,
"pyatf_strategies": pyatf_strategies,
Expand Down Expand Up @@ -394,6 +396,7 @@ def __deepcopy__(self, _):
* "pso" particle swarm optimization
* "random_sample" takes a random sample of the search space
* "simulated_annealing" simulated annealing strategy
* "skopt" uses the minimization methods from `skopt`

Strategy-specific parameters and options are explained under strategy_options.

Expand Down
4 changes: 3 additions & 1 deletion kernel_tuner/strategies/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ def __init__(
snap=True,
return_invalid=False,
return_raw=None,
invalid_value=sys.float_info.max,
):
"""An abstract method to handle evaluation of configurations.

Expand Down Expand Up @@ -100,6 +101,7 @@ def __init__(
self.return_raw = f"{tuning_options['objective']}s"
self.results = []
self.budget_spent_fraction = 0.0
self.invalid_return_value = invalid_value


def __call__(self, x, check_restrictions=True):
Expand Down Expand Up @@ -168,7 +170,7 @@ def __call__(self, x, check_restrictions=True):
else:
# this is not a valid configuration, replace with float max if needed
if not self.return_invalid:
return_value = sys.float_info.max
return_value = self.invalid_return_value

# include raw data in return if requested
if self.return_raw is not None:
Expand Down
78 changes: 78 additions & 0 deletions kernel_tuner/strategies/skopt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
"""The strategy that uses a minimizer method for searching through the parameter space."""

from kernel_tuner.util import StopCriterionReached
from kernel_tuner.searchspace import Searchspace
from kernel_tuner.strategies.common import (
CostFunc,
get_options,
snap_to_nearest_config,
get_strategy_docstring,
)

supported_methods = ["forest", "gbrt", "gp", "dummy"]

_options = dict(
method=(f"Local optimization algorithm to use, choose any from {supported_methods}", "gp"),
options=("Options passed to the skopt method as kwargs.", dict()),
popsize=("Number of initial samples. If `None`, let skopt choose the initial population", None),
maxiter=("Maximum number of times to repeat the method until the budget is exhausted.", 1),
)


def tune(searchspace: Searchspace, runner, tuning_options):
import skopt

method, skopt_options, popsize, maxiter = get_options(tuning_options.strategy_options, _options)

# Get maximum number of evaluations
max_fevals = searchspace.size
if "max_fevals" in tuning_options:
max_fevals = min(tuning_options["max_fevals"], max_fevals)

# Set the maximum number of calls to 100 times the maximum number of evaluations.
# Not all calls by skopt will result in an evaluation since different calls might
# map to the same configuration.
if "n_calls" not in skopt_options:
skopt_options["n_calls"] = 100 * max_fevals

# If the initial population size is specified, we select `popsize` samples
# from the search space. This is more efficient than letting skopt select
# the samples as it is not aware of restrictions.
if popsize:
x0 = searchspace.get_random_sample(min(popsize, max_fevals))
skopt_options["x0"] = [searchspace.get_param_indices(x) for x in x0]

opt_result = None
tune_params_values = list(searchspace.tune_params.values())
bounds = [(0, len(p) - 1) if len(p) > 1 else [0] for p in tune_params_values]

cost_func = CostFunc(searchspace, tuning_options, runner)
objective = lambda x: cost_func(searchspace.get_param_config_from_param_indices(x))
space_constraint = lambda x: searchspace.is_param_config_valid(searchspace.get_param_config_from_param_indices(x))

skopt_options["space_constraint"] = space_constraint
skopt_options["verbose"] = tuning_options.verbose

try:
for _ in range(maxiter):
if method == "dummy":
opt_result = skopt.dummy_minimize(objective, bounds, **skopt_options)
elif method == "forest":
opt_result = skopt.forest_minimize(objective, bounds, **skopt_options)
elif method == "gp":
opt_result = skopt.gp_minimize(objective, bounds, **skopt_options)
elif method == "gbrt":
opt_result = skopt.gbrt_minimize(objective, bounds, **skopt_options)
else:
raise ValueError(f"invalid skopt method: {method}")
except StopCriterionReached as e:
if tuning_options.verbose:
print(e)

if opt_result and tuning_options.verbose:
print(opt_result)

return cost_func.results


tune.__doc__ = get_strategy_docstring("skopt minimize", _options)
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ nox-poetry = "^1.0.3"
ruff = "^0.4.8"
pep440 = "^0.1.2"
tomli = "^2.0.1" # held back by Python <= 3.10, can be replaced by built-in [tomllib](https://docs.python.org/3.11/library/tomllib.html) from Python 3.11 onwards
scikit-optimize = "0.10"

# development dependencies are unused for now, as this is already covered by test and docs
# # ATTENTION: if anything is changed here, run `poetry update`
Expand Down
Loading