Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_evaluate_performance_default_protocol_without_filter():
wn18 = load_wn18()
model = TransE(batches_count=10, seed=0, epochs=1,
k=50, eta=10, verbose=True,
embedding_model_params={'normalize_ent_emb':False, 'norm': 1},
loss='self_adversarial', loss_params={'margin': 1, 'alpha': 0.5},
optimizer='adam',
optimizer_params={'lr': 0.0005})
model.fit(wn18['train'])
from ampligraph.evaluation import evaluate_performance
ranks_sep = []
from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score
ranks = evaluate_performance(wn18['test'][::100], model, verbose=True, corrupt_side='o',
use_default_protocol=False)
ranks_sep.extend(ranks)
from ampligraph.evaluation import evaluate_performance
def test_fit_predict_wn18_TransE():
X = load_wn18()
model = TransE(batches_count=1, seed=555, epochs=5, k=100, loss='pairwise',
loss_params={'margin': 5},
verbose=True, optimizer='adagrad',
optimizer_params={'lr': 0.1})
model.fit(X['train'])
y = model.predict(X['test'][:1])
print(y)
def test_evaluate_performance_TransE():
X = load_wn18()
model = TransE(batches_count=10, seed=0, epochs=100, k=100, eta=5, optimizer_params={'lr': 0.1},
loss='pairwise', loss_params={'margin': 5}, optimizer='adagrad')
model.fit(np.concatenate((X['train'], X['valid'])))
filter_triples = np.concatenate((X['train'], X['valid'], X['test']))
ranks = evaluate_performance(X['test'][:200], model=model, filter_triples=filter_triples, verbose=True)
# ranks = evaluate_performance(X['test'][:200], model=model)
mrr = mrr_score(ranks)
hits_10 = hits_at_n_score(ranks, n=10)
print("ranks: %s" % ranks)
print("MRR: %f" % mrr)
print("Hits@10: %f" % hits_10)
def test_fit_predict_TransE_early_stopping_with_filter():
X = load_wn18()
model = TransE(batches_count=1, seed=555, epochs=7, k=50, loss='pairwise', loss_params={'margin': 5},
verbose=True, optimizer='adagrad', optimizer_params={'lr': 0.1})
X_filter = np.concatenate((X['train'], X['valid'], X['test']))
model.fit(X['train'], True, {'x_valid': X['valid'][::100],
'criteria': 'mrr',
'x_filter': X_filter,
'stop_interval': 2,
'burn_in': 1,
'check_interval': 2})
y = model.predict(X['test'][:1])
print(y)
def test_evaluate_performance_default_protocol_with_filter():
wn18 = load_wn18()
X_filter = np.concatenate((wn18['train'], wn18['valid'], wn18['test']))
model = TransE(batches_count=10, seed=0, epochs=1,
k=50, eta=10, verbose=True,
embedding_model_params={'normalize_ent_emb': False, 'norm': 1},
loss='self_adversarial', loss_params={'margin': 1, 'alpha': 0.5},
optimizer='adam',
optimizer_params={'lr': 0.0005})
model.fit(wn18['train'])
from ampligraph.evaluation import evaluate_performance
ranks_sep = []
from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score
ranks = evaluate_performance(wn18['test'][::100], model, X_filter, verbose=True, corrupt_side='o',
use_default_protocol=False)
ranks_sep.extend(ranks)
from ampligraph.evaluation import evaluate_performance
def test_fit_predict_transE():
model = TransE(batches_count=1, seed=555, epochs=20, k=10, loss='pairwise', loss_params={'margin': 5},
optimizer='adagrad', optimizer_params={'lr': 0.1})
X = np.array([['a', 'y', 'b'],
['b', 'y', 'a'],
['a', 'y', 'c'],
['c', 'y', 'a'],
['a', 'y', 'd'],
['c', 'y', 'd'],
['b', 'y', 'c'],
['f', 'y', 'e']])
model.fit(X)
y_pred = model.predict(np.array([['f', 'y', 'e'], ['b', 'y', 'd']]))
print(y_pred)
assert y_pred[0] > y_pred[1]
def test_fit_predict_TransE_early_stopping_without_filter():
X = load_wn18()
model = TransE(batches_count=1, seed=555, epochs=7, k=50, loss='pairwise', loss_params={'margin': 5},
verbose=True, optimizer='adagrad', optimizer_params={'lr': 0.1})
model.fit(X['train'], True, {'x_valid': X['valid'][::100],
'criteria': 'mrr',
'stop_interval': 2,
'burn_in': 1,
'check_interval': 2})
y = model.predict(X['test'][:1])
print(y)
def test_select_best_model_ranking_grid():
X = load_wn18rr()
model_class = TransE
param_grid = {
"batches_count": [50],
"seed": 0,
"epochs": [1],
"k": [2, 50],
"eta": [1],
"loss": ["nll"],
"loss_params": {
},
"embedding_model_params": {
},
"regularizer": [None],
"regularizer_params": {
},
"optimizer": ["adagrad"],