Skip to content

Commit

Permalink
Adding hyperpparameter optim for cpsat solver of coloring problem
Browse files Browse the repository at this point in the history
- included a new default parametersCP for cpsat (including multiprocess)
- included hyperparameter in cpsatcoloringsolver and in coloring_solver_with_starting_solution
- added a full example of optuna study : which fails on a ortools callback call
  • Loading branch information
g-poveda committed Mar 5, 2024
1 parent 21e08f8 commit cd64a6b
Show file tree
Hide file tree
Showing 6 changed files with 215 additions and 26 deletions.
76 changes: 55 additions & 21 deletions discrete_optimization/coloring/solvers/coloring_cpsat_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@
Problem,
Solution,
)
from discrete_optimization.generic_tools.hyperparameters.hyperparameter import (
CategoricalHyperparameter,
EnumHyperparameter,
)
from discrete_optimization.generic_tools.ortools_cpsat_tools import OrtoolsCPSatSolver


Expand All @@ -24,6 +28,11 @@ class ModelingCPSat(Enum):


class ColoringCPSatSolver(OrtoolsCPSatSolver, SolverColoringWithStartingSolution):
hyperparameters = [
EnumHyperparameter(name="modeling", enum=ModelingCPSat),
CategoricalHyperparameter(name="warmstart", choices=[True, False]),
] + SolverColoringWithStartingSolution.hyperparameters

def __init__(
self,
problem: Problem,
Expand Down Expand Up @@ -102,6 +111,7 @@ def init_model_binary(self, nb_colors: int, **kwargs):
self.variables["used"] = used

def init_model_integer(self, nb_colors: int, **kwargs):
used_variable = kwargs.get("used_variable", False)
cp_model = CpModel()
variables = [
cp_model.NewIntVar(0, nb_colors - 1, name=f"c_{i}")
Expand All @@ -119,39 +129,63 @@ def init_model_integer(self, nb_colors: int, **kwargs):
== self.problem.constraints_coloring.color_constraint[node]
)
used = [cp_model.NewBoolVar(name=f"used_{c}") for c in range(nb_colors)]
if used_variable:

def add_indicator(vars, value, presence_value, model):
bool_vars = []
for var in vars:
bool_var = model.NewBoolVar("")
model.Add(var == value).OnlyEnforceIf(bool_var)
model.Add(var != value).OnlyEnforceIf(bool_var.Not())
bool_vars.append(bool_var)
model.AddMaxEquality(presence_value, bool_vars)

for j in range(nb_colors):
if self.problem.use_subset:
indexes = self.problem.index_subset_nodes
vars = [variables[i] for i in indexes]
else:
vars = variables
add_indicator(vars, j, used[j], cp_model)
cp_model.Minimize(sum(used))
def add_indicator(vars, value, presence_value, model):
bool_vars = []
for var in vars:
bool_var = model.NewBoolVar("")
model.Add(var == value).OnlyEnforceIf(bool_var)
model.Add(var != value).OnlyEnforceIf(bool_var.Not())
bool_vars.append(bool_var)
model.AddMaxEquality(presence_value, bool_vars)

for j in range(nb_colors):
if self.problem.use_subset:
indexes = self.problem.index_subset_nodes
vars = [variables[i] for i in indexes]
else:
vars = variables
add_indicator(vars, j, used[j], cp_model)
cp_model.Minimize(sum(used))
else:
nbc = cp_model.NewIntVar(0, nb_colors, name="nbcolors")
cp_model.AddMaxEquality(
nbc, [variables[i] for i in self.problem.index_subset_nodes]
)
cp_model.Minimize(nbc)
self.cp_model = cp_model
self.variables["colors"] = variables
self.variables["used"] = used

def set_warmstart(self, solution: ColoringSolution):
if self.modeling == ModelingCPSat.INTEGER:
self.set_warmstart_integer(solution)
if self.modeling == ModelingCPSat.BINARY:
self.set_warmstart_integer()

def set_warm_start_integer(self, solution: ColoringSolution):
for i in range(len(solution.colors)):
self.cp_model.AddHint(self.variables["colors"][i], solution.colors[i])

def set_warm_start_binary(self, solution: ColoringSolution):
for i in range(len(solution.colors)):
c = solution.colors[i]
for color in self.variables["colors"][i]:
self.cp_model.AddHint(self.variables["colors"][i][color], color == c)

def init_model(self, **args: Any) -> None:
modeling = args.get("modeling", ModelingCPSat.INTEGER)
do_warmstart = args.get("warmstart", True)
assert isinstance(modeling, ModelingCPSat)
if "nb_colors" not in args:
if "nb_colors" not in args or do_warmstart:
solution = self.get_starting_solution(**args)
nb_colors = self.problem.count_colors_all_index(solution.colors)
args["nb_colors"] = nb_colors
else:
nb_colors = args["nb_colors"]
args["nb_colors"] = min(args.get("nb_colors", nb_colors), nb_colors)
if modeling == ModelingCPSat.BINARY:
self.init_model_binary(**args)
if modeling == ModelingCPSat.INTEGER:
self.init_model_integer(**args)
if do_warmstart:
self.set_warmstart(solution=solution)
self.modeling = modeling
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,20 @@
GreedyColoring,
NXGreedyColoringMethod,
)
from discrete_optimization.generic_tools.hyperparameters.hyperparameter import (
CategoricalHyperparameter,
EnumHyperparameter,
)

logger = logging.getLogger(__name__)


class SolverColoringWithStartingSolution(SolverColoring):
hyperparameters = [
CategoricalHyperparameter("greedy_start", choices=[True], default=True),
EnumHyperparameter("greedy_method", enum=NXGreedyColoringMethod),
]

def get_starting_solution(self, **kwargs: Any) -> ColoringSolution:
"""Used by the init_model method to provide a greedy first solution
Expand Down
13 changes: 13 additions & 0 deletions discrete_optimization/generic_tools/cp_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,19 @@ def default() -> "ParametersCP":
optimisation_level=1,
)

@staticmethod
def default_cpsat() -> "ParametersCP":
return ParametersCP(
time_limit=100,
intermediate_solution=True,
all_solutions=False,
nr_solutions=1000,
free_search=False,
multiprocess=True,
nb_process=6,
optimisation_level=1,
)

@staticmethod
def default_fast_lns() -> "ParametersCP":
return ParametersCP(
Expand Down
2 changes: 1 addition & 1 deletion discrete_optimization/generic_tools/ortools_cpsat_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def solve(
if self.cp_model is None:
self.init_model(**kwargs)
if parameters_cp is None:
parameters_cp = ParametersCP.default()
parameters_cp = ParametersCP.default_cpsat()
solver = CpSolver()
solver.parameters.max_time_in_seconds = parameters_cp.time_limit
solver.parameters.num_workers = parameters_cp.nb_process
Expand Down
9 changes: 5 additions & 4 deletions examples/coloring/coloring_cpspat_solver_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,13 @@

def run_cpsat_coloring():
logging.basicConfig(level=logging.INFO)
file = [f for f in get_data_available() if "gc_70_5" in f][0]
file = [f for f in get_data_available() if "gc_100_7" in f][0]
color_problem = parse_file(file)
solver = ColoringCPSatSolver(color_problem, params_objective_function=None)
solver.init_model(nb_colors=20, modeling=ModelingCPSat.BINARY)
p = ParametersCP.default()
p.time_limit = 20
solver.init_model(modeling=ModelingCPSat.BINARY, warmstart=True)
p = ParametersCP.default_cpsat()
p.time_limit = 100
logging.info("Starting solve")
result_store = solver.solve(
callbacks=[NbIterationTracker(step_verbosity_level=logging.INFO)],
parameters_cp=p,
Expand Down
132 changes: 132 additions & 0 deletions examples/coloring/optuna_full_example_coloring.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
# Copyright (c) 2024 AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Example using OPTUNA to tune hyperparameters of Cpsat solver for coloring.
Results can be viewed on optuna-dashboard with:
optuna-dashboard optuna-journal.log
"""
import logging
from typing import Any, Dict, List, Type

import optuna
from optuna.storages import JournalFileStorage, JournalStorage
from optuna.trial import Trial, TrialState

from discrete_optimization.coloring.coloring_parser import (
get_data_available,
parse_file,
)
from discrete_optimization.coloring.solvers.coloring_cpsat_solver import (
ColoringCPSatSolver,
)
from discrete_optimization.generic_tools.callbacks.optuna import (
OptunaPruningSingleFitCallback,
)
from discrete_optimization.generic_tools.cp_tools import ParametersCP
from discrete_optimization.generic_tools.do_problem import ModeOptim
from discrete_optimization.generic_tools.do_solver import SolverDO

logger = logging.getLogger(__name__)


seed = 42
optuna_nb_trials = 150

study_name = f"coloring_cpsat-auto-250---"
storage_path = "./optuna-journal.log" # NFS path for distributed optimization

# Solvers to test
solvers_to_test: List[Type[SolverDO]] = [ColoringCPSatSolver]
kwargs_fixed_by_solver: Dict[Type[SolverDO], Dict[str, Any]] = {ColoringCPSatSolver: {}}
# we need to map the classes to a unique string, to be seen as a categorical hyperparameter by optuna
# by default, we use the class name, but if there are identical names, f"{cls.__module__}.{cls.__name__}" could be used.
solvers_by_name: Dict[str, Type[SolverDO]] = {
cls.__name__: cls for cls in solvers_to_test
}

# problem definition
file = [f for f in get_data_available() if "gc_250_5" in f][0]
problem = parse_file(file)

# sense of optimization
objective_register = problem.get_objective_register()
if objective_register.objective_sense == ModeOptim.MINIMIZATION:
direction = "minimize"
else:
direction = "maximize"

# objective names
objs, weights = objective_register.get_list_objective_and_default_weight()


# objective definition
def objective(trial: Trial):
# hyperparameters to test

# first parameter: solver choice
solver_name = trial.suggest_categorical("solver", choices=solvers_by_name)
solver_class = solvers_by_name[solver_name]

# hyperparameters for the chosen solver
hyperparameters_names = solver_class.get_hyperparameters_names()
hyperparameters_values = solver_class.suggest_hyperparameters_values_with_optuna(
names=hyperparameters_names,
trial=trial,
)
# use existing value if corresponding to a previous complete trial
states_to_consider = (TrialState.COMPLETE,)
trials_to_consider = trial.study.get_trials(
deepcopy=False, states=states_to_consider
)
for t in reversed(trials_to_consider):
if trial.params == t.params:
logger.warning(
"Trial with same hyperparameters as a previous complete trial: returning previous fit."
)
return t.value

# prune if corresponding to a previous failed trial
states_to_consider = (TrialState.FAIL,)
trials_to_consider = trial.study.get_trials(
deepcopy=False, states=states_to_consider
)
for t in reversed(trials_to_consider):
if trial.params == t.params:
raise optuna.TrialPruned(
"Pruning trial identical to a previous failed trial."
)

# construct kwargs for __init__, init_model, and solve
kwargs = kwargs_fixed_by_solver[solver_class]
kwargs.update(dict(zip(hyperparameters_names, hyperparameters_values)))
# solver init
solver = solver_class(problem=problem, **kwargs)
solver.init_model(**kwargs)
p = ParametersCP.default_cpsat()
p.nb_process = 4
p.time_limit = 10
# solve
sol, fit = solver.solve(
parameters_cp=p,
callbacks=[
OptunaPruningSingleFitCallback(trial=trial, **kwargs),
],
**kwargs,
).get_best_solution_fit()
return fit


# create study + database to store it
storage = JournalStorage(JournalFileStorage(storage_path))
study = optuna.create_study(
study_name=study_name,
direction=direction,
sampler=optuna.samplers.TPESampler(seed=seed),
storage=storage,
load_if_exists=True,
)
study.set_metric_names(["nb_colors"])
study.optimize(objective, n_trials=optuna_nb_trials)

0 comments on commit cd64a6b

Please sign in to comment.