Padma Priya M
11/10/2022, 4:21 AMimport typing
import ray
from ray import tune
from flytekit import Resources, task, workflow
from flytekitplugins.ray import HeadNodeConfig, RayJobConfig, WorkerNodeConfig
@ray.remote
def objective(config):
return (config["x"] * config["x"])
ray_config = RayJobConfig(
head_node_config=HeadNodeConfig(ray_start_params={"log-color": "True"}),
worker_node_config=[WorkerNodeConfig(group_name="ray-group", replicas=2)],
runtime_env={"pip": ["numpy", "pandas"]},
)
@task(task_config=ray_config, limits=Resources(mem="2000Mi", cpu="1"))
def ray_task(n: int) -> int:
model_params = {
"x": tune.randint(-10, 10)
}
tuner = tune.Tuner(
objective,
tune_config=tune.TuneConfig(
num_samples=10,
max_concurrent_trials=n,
),
param_space=model_params,
)
results = tuner.fit()
return results
@workflow
def ray_workflow(n: int) -> int:
return ray_task(n=n)
is there any other ways to run hyperparameter tuning in a distributed manner like ray tune?Ketan (kumare3)
Padma Priya M
11/10/2022, 6:26 AMKevin Su
11/10/2022, 5:46 PMresult
isn’t int. it’s ResultGrid
.Padma Priya M
11/11/2022, 5:14 AMResultGrid
.
@workflow
def ray_workflow(n: int) -> ResultGrid:
return ray_task(n=n)
Is this the way?Kevin Su
11/11/2022, 7:43 AMPadma Priya M
11/11/2022, 7:58 AMAttributeError: 'NoneType' object has no attribute 'encode'
ray.tune.error.TuneError: The Ray Tune run failed. Please inspect the previous error messages for a cause. After fixing the issue, you can restart the run from scratch or continue this run.
import ray
from ray import tune, air
from ray.air import Result
from ray.tune import ResultGrid
from flytekit import Resources, task, workflow
from flytekitplugins.ray import HeadNodeConfig, RayJobConfig, WorkerNodeConfig
@ray.remote
def objective(config):
return (config["x"]+2)
ray_config = RayJobConfig(
head_node_config=HeadNodeConfig(ray_start_params={"log-color": "True"}),
worker_node_config=[WorkerNodeConfig(group_name="ray-group", replicas=2)],
runtime_env={"pip": ["numpy", "pandas"]},
)
@task(task_config=ray_config, limits=Resources(mem="2000Mi", cpu="1"))
def ray_task() -> int:
model_params = {
"x": tune.randint(-10, 10)
}
tuner = tune.Tuner(
objective,
tune_config=tune.TuneConfig(
num_samples=10,
max_concurrent_trials=2,
),
param_space=model_params,
)
result_grid = tuner.fit()
return len(result_grid)
@workflow
def ray_workflow() -> int:
return ray_task()
Ketan (kumare3)
Kevin Su
11/11/2022, 6:53 PMobjective
) in tune.Tuner
should be a regular function instead of a ray remote function.