Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_set_context(self):
context = 'rsmtool'
new_context = 'rsmcompare'
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"trim_min": 1,
"trim_max": 6,
"flag_column": "[advisories]",
"model": 'LinearRegression'},
context=context)
config.context = new_context
eq_(config.context, new_context)
def test_get_context(self):
context = 'rsmtool'
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"trim_min": 1,
"trim_max": 6,
"flag_column": "[advisories]",
"model": 'LinearRegression'},
context=context)
eq_(config.context, context)
def test_check_exclude_listwise_true(self):
dictionary = {"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"min_items_per_candidate": 4,
"candidate_column": "candidate",
"model": 'LinearRegression'}
config = Configuration(dictionary)
exclude_list_wise = config.check_exclude_listwise()
eq_(exclude_list_wise, True)
def test_get_trim_tolerance_no_min_max(self):
dictionary = {"experiment_id": '001',
"trim_tolerance": 0.49,
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"model": 'LinearRegression'}
config = Configuration(dictionary)
trim_min_max_tolerance = config.get_trim_min_max_tolerance()
eq_(trim_min_max_tolerance, (None, None, 0.49))
def test_save_rsmcompare(self):
dictionary = {"comparison_id": '001',
"experiment_id_old": 'foo',
"experiment_dir_old": 'foo',
"experiment_id_new": 'bar',
"experiment_dir_new": 'bar',
"description_old": "foo",
"description_new": "bar"}
config = Configuration(dictionary,
context='rsmcompare')
config.save()
out_path = 'output/001_rsmcompare.json'
with open(out_path) as buff:
config_new = json.loads(buff.read())
rmtree('output')
for key in dictionary:
eq_(config_new[key], dictionary[key])
def test_pop_value_default(self):
dictionary = {'experiment_id': '001',
'train_file': 'path/to/train.tsv',
'test_file': 'path/to/test.tsv',
"model": 'LinearRegression'}
config = Configuration(dictionary)
value = config.pop("foo", "bar")
eq_(value, 'bar')
def test_set_configdir(self):
configdir = '/path/to/dir/'
new_configdir = 'path/that/is/new/'
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"trim_min": 1,
"trim_max": 6,
"flag_column": "[advisories]",
"model": 'LinearRegression'},
configdir=configdir)
config.configdir = new_configdir
eq_(config.configdir, abspath(new_configdir))
'experiments',
source)
config_dict = {"comparison_id": "lr_self_compare_object",
"experiment_dir_old": "lr-subgroups",
"experiment_id_old": "lr_subgroups",
"description_old": "Using all features with a LinearRegression model.",
"use_scaled_predictions_old": True,
"experiment_dir_new": "lr-subgroups",
"experiment_id_new": "lr_subgroups",
"description_new": "Using all features with a LinearRegression model.",
"use_scaled_predictions_new": True,
"subgroups": ["QUESTION"]
}
config_obj = Configuration(config_dict,
context='rsmcompare',
configdir=configdir)
check_run_comparison(source,
experiment_id,
config_obj_or_dict=config_obj)
def test_get_rater_error_variance_none(self):
dictionary = {"experiment_id": 'abs',
"model": 'LinearRegression',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv"}
config = Configuration(dictionary)
rater_error_variance = config.get_rater_error_variance()
eq_(rater_error_variance, None)
Defaults to 'rsmtool'.
Returns
-------
configuration : Configuration
A Configuration object containing the parameters in the
file that we instantiated the parser for.
"""
filepath = self._configdir / self._filename
configdict = self._parse_json_file(filepath)
# create a new Configuration object which will automatically
# process and validate the configuration
# dictionary being passed in
return Configuration(configdict,
configdir=self._configdir,
context=context)