Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def get_cluster_fields(cluster, csv_properties, args):
"""Retrieves fields info from cluster resource
"""
if not csv_properties:
csv_properties = {}
csv_properties.update(verbose=True)
if args.user_locale is None:
args.user_locale = cluster['object'].get('locale', None)
csv_properties.update(data_locale=args.user_locale)
csv_properties.update(missing_tokens=DEFAULT_MISSING_TOKENS)
return Fields(cluster['object']['clusters']['fields'], **csv_properties)
def get_linear_fields(linear_regression, csv_properties, args):
"""Retrieves fields info from linear regression resource
"""
if not csv_properties:
csv_properties = {}
csv_properties.update(verbose=True)
if args.user_locale is None:
args.user_locale = linear_regression['object'].get('locale', None)
csv_properties.update(data_locale=args.user_locale)
csv_properties.update(missing_tokens=DEFAULT_MISSING_TOKENS)
return Fields(linear_regression['object'][ \
'linear_regression']['fields'], \
**csv_properties)
number_of_fusions, debug=args.debug)
if not resume:
message = u.dated("Found %s fusions out of %s."
" Resuming.\n"
% (len(fusion_ids),
number_of_fusions))
u.log_message(message, log_file=session_file,
console=args.verbosity)
fusion = fusion_ids[0]
first_model_id = api.get_fusion(fusion)[ \
"object"]["fusion"]["models"][0]["id"]
first_model_kind = api.get_fusion(fusion)[ \
"object"]["fusion"]["models"][0]["kind"]
first_model = api.getters[first_model_kind](first_model_id)
fields = Fields(first_model)
number_of_fusions -= len(fusion_ids)
fusion_args = r.set_fusion_args( \
args, fields)
fusion = \
r.create_fusion( \
args.fusion_models_, fusion, fusion_args, \
args, api, path, session_file, log)
# If a fusion is provided, we use it.
elif args.fusion:
fusion_ids = [args.fusion]
fusion = fusion_ids[0]
elif args.fusion or args.fusion_tag:
fusion = fusion_ids[0]
def get_fields_structure(resource, csv_properties):
"""Builds a Fields object from the fields information in the resource
"""
if not csv_properties and 'locale' in resource['object']:
csv_properties = {
'data_locale': resource['object']['locale']}
fields = Fields(resource['object']['fields'], **csv_properties)
return fields
# list of datasets
if args.multi_dataset:
dataset, resume = pd.create_new_dataset(
datasets, api, args, resume, fields=fields,
session_file=session_file, path=path, log=log)
datasets = [dataset]
# Check if the dataset has a generators file associated with it, and
# generate a new dataset with the specified field structure. Also
# if the --to-dataset flag is used to clone or sample the original dataset
if args.new_fields or (args.sample_rate != 1 and args.no_model) or \
(args.lisp_filter or args.json_filter) and not has_source(args):
if fields is None:
if isinstance(dataset, basestring):
dataset = u.check_resource(dataset, api=api)
fields = Fields(dataset, csv_properties)
args.objective_id_ = get_objective_id(args, fields)
args.objective_name_ = fields.field_name(args.objective_id_)
dataset, resume = pd.create_new_dataset(
dataset, api, args, resume, fields=fields,
session_file=session_file, path=path, log=log)
datasets[0] = dataset
# rebuild fields structure for new ids and fields
csv_properties.update({'objective_field': args.objective_name_,
'objective_field_present': True})
fields = pd.get_fields_structure(dataset, csv_properties)
args.objective_id_ = get_objective_id(args, fields)
if args.multi_label and dataset and multi_label_data is None:
multi_label_data = l.get_multi_label_data(dataset)
(args.objective_field,
labels,
all_labels,
resource_type = get_resource_type(resource)
# for sources, extract all the updatable attributes
if resource_type == 'source':
updatable_attrs = SOURCE_UPDATABLE
for field_id in resource_fields.keys():
field_opts = {}
field = resource_fields[field_id]
for attribute in updatable_attrs:
if field.get(attribute):
field_opts.update({attribute: field[attribute]})
if field_opts != {}:
fields_attributes.update({field_id: field_opts})
return fields_attributes
# for the rest of resources, check which attributes changed
if referrer:
referrer_fields = Fields(
{'resource': referrer['resource'], 'object': referrer}).fields
for field_id in resource_fields.keys():
field_opts = {}
if not field_id in referrer_fields.keys():
continue
field = resource_fields[field_id]
for attribute in updatable_attrs:
ref_values = ["", referrer_fields[field_id].get(attribute, "")]
if not field.get(attribute, "") in ref_values:
field_opts.update({attribute: field[attribute]})
if field_opts != {}:
fields_attributes.update({field_id: field_opts})
return fields_attributes
def get_fusion_fields(fusion, csv_properties, args):
"""Retrieves fields info from Fusion resource
"""
args.retrieve_api_.ok(fusion)
if not csv_properties:
csv_properties = {}
csv_properties.update(verbose=True)
csv_properties.update(missing_tokens=DEFAULT_MISSING_TOKENS)
return Fields(fusion['object']['fusion']['fields'], \
**csv_properties)
def get_anomaly_fields(anomaly, csv_properties, args):
"""Retrieves fields info from anomaly resource
"""
if not csv_properties:
csv_properties = {}
csv_properties.update(verbose=True)
if args.user_locale is None:
args.user_locale = anomaly['object'].get('locale', None)
csv_properties.update(data_locale=args.user_locale)
csv_properties.update(missing_tokens=DEFAULT_MISSING_TOKENS)
return Fields(anomaly['object']['model']['fields'], **csv_properties)
def get_pca_fields(pca, csv_properties, args):
"""Retrieves fields info from PCA resource
"""
args.retrieve_api_.ok(pca)
if not csv_properties:
csv_properties = {}
csv_properties.update(verbose=True)
if args.user_locale is None:
args.user_locale = pca['object'].get('locale', None)
csv_properties.update(data_locale=args.user_locale)
csv_properties.update(missing_tokens=DEFAULT_MISSING_TOKENS)
if args.exclude_objective:
csv_properties.update({"objective_field_present": False})
csv_properties.update({"objective_field": None})
return Fields(pca['object']['pca']['fields'], \
**csv_properties)