future-notebook-79388
11/10/2022, 4:21 AMimport typing
import ray
from ray import tune
from flytekit import Resources, task, workflow
from flytekitplugins.ray import HeadNodeConfig, RayJobConfig, WorkerNodeConfig
@ray.remote
def objective(config):
return (config["x"] * config["x"])
ray_config = RayJobConfig(
head_node_config=HeadNodeConfig(ray_start_params={"log-color": "True"}),
worker_node_config=[WorkerNodeConfig(group_name="ray-group", replicas=2)],
runtime_env={"pip": ["numpy", "pandas"]},
)
@task(task_config=ray_config, limits=Resources(mem="2000Mi", cpu="1"))
def ray_task(n: int) -> int:
model_params = {
"x": tune.randint(-10, 10)
}
tuner = tune.Tuner(
objective,
tune_config=tune.TuneConfig(
num_samples=10,
max_concurrent_trials=n,
),
param_space=model_params,
)
results = tuner.fit()
return results
@workflow
def ray_workflow(n: int) -> int:
return ray_task(n=n)
is there any other ways to run hyperparameter tuning in a distributed manner like ray tune?freezing-airport-6809
freezing-airport-6809
freezing-airport-6809
freezing-airport-6809
freezing-airport-6809
future-notebook-79388
11/10/2022, 6:26 AMfuture-notebook-79388
11/10/2022, 6:32 AMglamorous-carpet-83516
11/10/2022, 5:46 PMglamorous-carpet-83516
11/10/2022, 5:47 PMresult
isn’t int. it’s ResultGrid
.future-notebook-79388
11/11/2022, 5:14 AMResultGrid
.
@workflow
def ray_workflow(n: int) -> ResultGrid:
return ray_task(n=n)
Is this the way?glamorous-carpet-83516
11/11/2022, 7:43 AMfuture-notebook-79388
11/11/2022, 7:58 AMfuture-notebook-79388
11/11/2022, 8:38 AMAttributeError: 'NoneType' object has no attribute 'encode'
ray.tune.error.TuneError: The Ray Tune run failed. Please inspect the previous error messages for a cause. After fixing the issue, you can restart the run from scratch or continue this run.
import ray
from ray import tune, air
from ray.air import Result
from ray.tune import ResultGrid
from flytekit import Resources, task, workflow
from flytekitplugins.ray import HeadNodeConfig, RayJobConfig, WorkerNodeConfig
@ray.remote
def objective(config):
return (config["x"]+2)
ray_config = RayJobConfig(
head_node_config=HeadNodeConfig(ray_start_params={"log-color": "True"}),
worker_node_config=[WorkerNodeConfig(group_name="ray-group", replicas=2)],
runtime_env={"pip": ["numpy", "pandas"]},
)
@task(task_config=ray_config, limits=Resources(mem="2000Mi", cpu="1"))
def ray_task() -> int:
model_params = {
"x": tune.randint(-10, 10)
}
tuner = tune.Tuner(
objective,
tune_config=tune.TuneConfig(
num_samples=10,
max_concurrent_trials=2,
),
param_space=model_params,
)
result_grid = tuner.fit()
return len(result_grid)
@workflow
def ray_workflow() -> int:
return ray_task()
freezing-airport-6809
glamorous-carpet-83516
11/11/2022, 6:53 PMobjective
) in tune.Tuner
should be a regular function instead of a ray remote function.