Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def run(param_dict):
default_params = defaults()
for key in default_params:
if key not in param_dict:
param_dict[key] = default_params[key]
optimizer = keras_cmdline.return_optimizer(param_dict)
print(param_dict)
BATCH_SIZE = param_dict['batch_size']
HIDDEN_SIZE = param_dict['hidden_size']
NUNITS = param_dict['nunits']
DROPOUT = param_dict['dropout']
fpath = os.path.dirname(os.path.abspath(__file__))
tag = 'ml-climate-hm-01'
inp_df = pd.read_csv(fpath+'/data/1980-2005_2d_002_new.txt', sep=" ", header=None, engine='python')
out_df1 = pd.read_csv(fpath+'/data/1980-2005_3d_vy_002.txt', sep=r"\s*", header=None, engine='python')
out_df2 = pd.read_csv(fpath+'/data/1980-2005_3d_ux_002.txt', sep=r"\s*", header=None, engine='python')
out_df3 = pd.read_csv(fpath+'/data/1980-2005_3d_wz_002.txt', sep=r"\s*", header=None, engine='python')
out_df4 = pd.read_csv(fpath+'/data/1980-2005_3d_tk_002.txt', sep=r"\s*", header=None, engine='python')
out_df5 = pd.read_csv(fpath+'/data/1980-2005_3d_qv_002.txt', sep=r"\s*", header=None, engine='python')
if model_path:
timer.start('model save')
model = Model(a, b)
model.save(model_path)
util.save_meta_data(param_dict, model_mda_path)
timer.end()
print(f"saved model to {model_path} and MDA to {model_mda_path}")
return mse
def augment_parser(parser):
parser.add_argument('--penalty', type=float, default=0.0)
return parser
if __name__ == "__main__":
parser = keras_cmdline.create_parser()
parser = augment_parser(parser)
cmdline_args = parser.parse_args()
param_dict = vars(cmdline_args)
run(param_dict)
parser.add_argument('--rnn_type', action='store',
dest='rnn_type',
nargs='?', const=1, type=str, default='LSTM',
choices=['LSTM', 'GRU', 'SimpleRNN'],
help='type of RNN')
parser.add_argument('--nhidden', action='store', dest='nhidden',
nargs='?', const=2, type=int, default='128',)
parser.add_argument('--nlayers', action='store', dest='nlayers',
nargs='?', const=2, type=int, default='1',)
return parser
if __name__ == "__main__":
parser = keras_cmdline.create_parser()
parser = augment_parser(parser)
cmdline_args = parser.parse_args()
param_dict = vars(cmdline_args)
print(param_dict)
run(param_dict)
help='Filter 2 units')
parser.add_argument('--p_size', action='store', dest='p_size',
nargs='?', const=2, type=int, default='2',
help='pool size')
parser.add_argument('--nunits', action='store', dest='nunits',
nargs='?', const=2, type=int, default='128',
help='number of units in FC layer')
parser.add_argument('--dropout2', type=float, default=0.5,
help='dropout after FC layer')
return parser
if __name__ == "__main__":
parser = keras_cmdline.create_parser()
parser = augment_parser(parser)
cmdline_args = parser.parse_args()
param_dict = vars(cmdline_args)
run(param_dict)
def run(param_dict):
timer.start('preprocessing')
param_dict = keras_cmdline.fill_missing_defaults(augment_parser, param_dict)
pprint(param_dict)
optimizer = keras_cmdline.return_optimizer(param_dict)
x_train, y_train, x_val, y_val, chars = generate_data()
if param_dict['rnn_type'] == 'GRU':
RNN = layers.GRU
elif param_dict['rnn_type'] == 'SimpleRNN':
RNN = layers.SimpleRNN
else:
RNN = layers.LSTM
HIDDEN_SIZE = param_dict['nhidden']
BATCH_SIZE = param_dict['batch_size']
NLAYERS = param_dict['nlayers']
DROPOUT = param_dict['dropout']