How to use the sherpa.algorithms.RandomSearch function in sherpa

To help you get started, we’ve selected a few sherpa examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sherpa-ai / sherpa / tests / test_sequential_testing.py View on Github external
def test_wait():
    parameters = [sherpa.Continuous('myparam', [0, 1])]
    rs = sherpa.algorithms.RandomSearch()
    gs = SequentialTesting(algorithm=rs,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for _ in range(10*3 - 1):
        trial = study.get_suggestion()
        print(trial.id, trial.parameters, "{}/{}".format(gs.k, gs.K[gs.t]),
              "{}/{}".format(gs.t, gs.T))
        study.add_observation(trial,
                              iteration=1,
                              objective=trial.parameters['myparam'] + numpy.random.normal(
github sherpa-ai / sherpa / tests / test_algorithms.py View on Github external
def test_repeat_rs():
    parameters = [sherpa.Continuous('a', [0, 1]),
                  sherpa.Choice('b', ['x', 'y', 'z'])]
    rs = sherpa.algorithms.RandomSearch(max_num_trials=10)
    rs = sherpa.algorithms.Repeat(algorithm=rs, num_times=10)
    config_repeat = {}

    for i in range(10):
        config = rs.get_suggestion(parameters=parameters)
        assert config != config_repeat
        for j in range(9):
            config_repeat = rs.get_suggestion(parameters=parameters)
            assert config == config_repeat

    assert rs.get_suggestion(parameters=parameters) == sherpa.AlgorithmState.DONE
github sherpa-ai / sherpa / tests / test_sequential_testing.py View on Github external
def test_parallel():
    tempdir = tempfile.mkdtemp(dir=".")

    parameters = [sherpa.Continuous('myparam', [0, 1])]
    rs = sherpa.algorithms.RandomSearch()
    gs = SequentialTesting(algorithm=rs,
                           K=3,
                           n=(3, 6, 9),
                           P=0.5,
                           verbose=True)

    scheduler = sherpa.schedulers.LocalScheduler()

    filename = os.path.join(tempdir, "test.py")
    with open(filename, 'w') as f:
        f.write(testscript)

    try:
        results = sherpa.optimize(parameters=parameters,
                                  algorithm=gs,
                                  lower_is_better=True,
github sherpa-ai / sherpa / tests / long_tests.py View on Github external
def test_wrong_db_host_or_port(test_dir):
    print("MONGODB: ", shutil.which('mongod'))
    tempdir = test_dir

    parameters = [sherpa.Choice(name="param_a",
                                range=[1, 2, 3]),
                  sherpa.Continuous(name="param_b",
                                    range=[0, 1])]

    algorithm = sherpa.algorithms.RandomSearch(max_num_trials=3)

    db_port = 27000
    scheduler = sherpa.schedulers.LocalScheduler()

    filename = os.path.join(tempdir, "test.py")
    with open(filename, 'w') as f:
        f.write(testscript)

    with pytest.warns(RuntimeWarning):
        results = sherpa.optimize(filename=filename,
                                  lower_is_better=True,
                                  algorithm=algorithm,
                                  parameters=parameters,
                                  output_dir=tempdir,
                                  scheduler=scheduler,
                                  max_concurrent=1,
github sherpa-ai / sherpa / tests / test_sequential_testing.py View on Github external
def test_is_stage_done():
    results_df = pandas.DataFrame(collections.OrderedDict(
        [('Trial-ID', [1] * 1 + [2] * 1 + [3] * 1),
         ('Status', [sherpa.TrialStatus.COMPLETED] * 3),
         ('stage', [2, 2, 2] * 1),
         ('a', [1, 1, 1] * 1),
         ('b', [2, 2, 2] * 1),
         ('Objective', [0.1] * 1 + [0.2] * 1 + [0.3] * 1)]
    ))
    rs = sherpa.algorithms.RandomSearch()
    gs = SequentialTesting(algorithm=rs,
                           K=4,
                           n=(3, 6, 9),
                           P=0.5)
    assert gs._is_stage_done(results_df, stage=2,
                             num_trials_for_stage=3) == True
    assert gs._is_stage_done(results_df, stage=2,
                             num_trials_for_stage=4) == False
github sherpa-ai / sherpa / tests / test_sequential_testing.py View on Github external
def test_get_suggestion():
    for _ in range(10):
        parameters = [sherpa.Continuous('myparam', [0, 1]),
                      sherpa.Discrete('myparam2', [0, 10])]
        rs = sherpa.algorithms.RandomSearch()
        gs = SequentialTesting(algorithm=rs,
                               K=10,
                               n=(3, 6, 9),
                               P=0.5)
        study = sherpa.Study(algorithm=gs,
                             parameters=parameters,
                             lower_is_better=True,
                             disable_dashboard=True)
        seen_configs = []
        last_config = {}
        config_count = 3
        for trial in study:
            print(trial.id, trial.parameters, "{}/{}".format(gs.k, gs.K[gs.t]),
                  "{}/{}".format(gs.t, gs.T))
            if trial.parameters == last_config:
                config_count += 1
github sherpa-ai / sherpa / examples / simple.py View on Github external
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with SHERPA.  If not, see .
"""
from __future__ import print_function
import sherpa
import time

parameters = [sherpa.Choice(name="param_a",
                            range=[1, 2, 3]),
              sherpa.Continuous(name="param_b",
                                range=[0, 1])]

algorithm = sherpa.algorithms.RandomSearch(max_num_trials=40)
# algorithm = sherpa.algorithms.LocalSearch(num_random_seeds=20)
# algorithm = sherpa.algorithms.BayesianOptimization(num_grid_points=2, max_num_trials=50)
# stopping_rule = sherpa.algorithms.MedianStoppingRule(min_iterations=2,
#                                           min_trials=5)
stopping_rule = None
study = sherpa.Study(parameters=parameters,
                     algorithm=algorithm,
                     stopping_rule=stopping_rule,
                     lower_is_better=True,
                     dashboard_port=8999)

num_iterations = 10

# get trials from study by iterating or study.get_suggestion()
for trial in study:
    print("Trial {}:\t{}".format(trial.id, trial.parameters))
github sherpa-ai / sherpa / sherpa / algorithms / bayesian_optimization.py View on Github external
"num_initial_data_points needs to be 'infer' or int."
        self.num_initial_data_points = num_initial_data_points
        self._num_initial_data_points = -1
        self.initial_data_points = initial_data_points
        self.acquisition_type = acquisition_type

        assert model_type != 'GP_MCMC' and acquisition_type != 'EI_MCMC'\
            if max_concurrent > 1 else True,\
            "GPyOpt has a bug for _MCMC with batch size > 1."
        self.max_concurrent = max_concurrent
        self.verbosity = verbosity

        self.next_trials = collections.deque()
        self.num_points_seen_by_model = 0

        self.random_search = sherpa.algorithms.RandomSearch()

        self.domain = []

        self.max_num_trials = max_num_trials
        self.count = 0
github sherpa-ai / sherpa / examples / parallel-examples / simple.py View on Github external
parser.add_argument('--env', help='Your environment path.',
                    default='/home/lhertel/profiles/python3env.profile', type=str)
FLAGS = parser.parse_args()
# figuring out host and queue
host = socket.gethostname()
sge_q = 'arcus.q' if (host.startswith('arcus-1') or host.startswith('arcus-2') or host.startswith('arcus-3') or host.startswith('arcus-4')) else 'arcus-ubuntu.q'

tempdir = tempfile.mkdtemp(dir=".")

parameters = [sherpa.Choice(name="param_a",
                            range=[1, 2, 3]),
              sherpa.Continuous(name="param_b",
                                range=[0, 1])]


algorithm = sherpa.algorithms.RandomSearch(max_num_trials=10)
# stopping_rule = sherpa.algorithms.MedianStoppingRule(min_iterations=2,
#                                           min_trials=3)
# algorithm = bayesian_optimization.GPyOpt(max_concurrent=4,
#                                          model_type='GP',
#                                          acquisition_type='EI',
#                                          max_num_trials=100)

# scheduler = sherpa.schedulers.SGEScheduler(submit_options="-N example -P arcus.p -q {} -l hostname='{}'".format(sge_q, host), environment=FLAGS.env, output_dir=tempdir)

scheduler = sherpa.schedulers.LocalScheduler()

### The *training script*
testscript = """import sherpa
import time

client = sherpa.Client()
github sherpa-ai / sherpa / examples / parallel-examples / mnistmlp / runner.py View on Github external
sherpa.Continuous('dropout', [0., 0.5])]
    
    if FLAGS.algorithm == 'BayesianOptimization':  
        print('Running Bayesian Optimization')
        alg = sherpa.algorithms.BayesianOptimization(num_grid_points=2,
                                                     max_num_trials=150)
    elif FLAGS.algorithm == 'LocalSearch':
        print('Running Local Search')
        alg = sherpa.algorithms.LocalSearch(seed_configuration={'lrinit': 0.038,
                                                                'momentum': 0.92,
                                                                'lrdecay': 0.0001,
                                                                'dropout': 0.},
                                            perturbation_factors=(0.9, 1.1))
    else:
        print('Running Random Search')
        alg = sherpa.algorithms.RandomSearch(max_num_trials=150)

    if FLAGS.sge:
        assert FLAGS.env, "For SGE use, you need to set an environment path."
        # Submit to SGE queue.
        env = FLAGS.env  # Script specifying environment variables.
        opt = '-N MNISTExample -P {} -q {} -l {}'.format(FLAGS.P, FLAGS.q, FLAGS.l)
        sched = SGEScheduler(environment=env, submit_options=opt)
    else:
        # Run on local machine.
        sched = LocalScheduler()

    rval = sherpa.optimize(parameters=parameters,
                           algorithm=alg,
                           lower_is_better=True,
                           filename='trial.py',
                           output_dir='output_{}'.format(FLAGS.studyname),