Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_scale(self):
self.assertEqual(1, scale(0, 1, 5, 0, 1))
self.assertEqual(3, scale(0.5, 1, 5, 0, 1))
self.assertEqual(5, scale(1, 1, 5, 0, 1))
self.assertEqual(1, scale(5, 0, 1, 5, 5))
npt.assert_array_equal(scale(np.asarray([0, 0.25, 0.5, 0.75, 1]), 1, 5),
np.asarray([1, 2, 3, 4, 5]))
def test_scale(self):
self.assertEqual(1, scale(0, 1, 5, 0, 1))
self.assertEqual(3, scale(0.5, 1, 5, 0, 1))
self.assertEqual(5, scale(1, 1, 5, 0, 1))
self.assertEqual(1, scale(5, 0, 1, 5, 5))
npt.assert_array_equal(scale(np.asarray([0, 0.25, 0.5, 0.75, 1]), 1, 5),
np.asarray([1, 2, 3, 4, 5]))
def test_scale(self):
self.assertEqual(1, scale(0, 1, 5, 0, 1))
self.assertEqual(3, scale(0.5, 1, 5, 0, 1))
self.assertEqual(5, scale(1, 1, 5, 0, 1))
self.assertEqual(1, scale(5, 0, 1, 5, 5))
npt.assert_array_equal(scale(np.asarray([0, 0.25, 0.5, 0.75, 1]), 1, 5),
np.asarray([1, 2, 3, 4, 5]))
"""
from cornac.models.mcf import mcf
Recommender.fit(self, train_set, val_set)
if self.trainable:
# user-item interactions
(rat_uid, rat_iid, rat_val) = train_set.uir_tuple
# item-item affinity network
map_iid = train_set.item_indices
(net_iid, net_jid, net_val) = train_set.item_graph.get_train_triplet(map_iid, map_iid)
if [self.train_set.min_rating, self.train_set.max_rating] != [0, 1]:
if self.train_set.min_rating == self.train_set.max_rating:
rat_val = scale(rat_val, 0., 1., 0., self.train_set.max_rating)
else:
rat_val = scale(rat_val, 0., 1., self.train_set.min_rating, self.train_set.max_rating)
if [min(net_val), max(net_val)] != [0, 1]:
if min(net_val) == max(net_val):
net_val = scale(net_val, 0., 1., 0., max(net_val))
else:
net_val = scale(net_val, 0., 1., min(net_val), max(net_val))
rat_val = np.array(rat_val, dtype='float32')
rat_uid = np.array(rat_uid, dtype='int32')
rat_iid = np.array(rat_iid, dtype='int32')
net_val = np.array(net_val, dtype='float32')
net_iid = np.array(net_iid, dtype='int32')
net_jid = np.array(net_jid, dtype='int32')
if self.verbose:
if self.train_set.is_unk_user(user_idx):
raise ScoreException("Can't make score prediction for (user_id=%d)" % user_idx)
known_item_scores = self.V.dot(self.U[user_idx, :])
return known_item_scores
else:
if self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item(item_idx):
raise ScoreException("Can't make score prediction for (user_id=%d, item_id=%d)" % (user_idx, item_idx))
user_pred = self.V[item_idx, :].dot(self.U[user_idx, :])
user_pred = sigmoid(user_pred)
if self.train_set.min_rating == self.train_set.max_rating:
user_pred = scale(user_pred, 0., self.train_set.max_rating, 0., 1.)
else:
user_pred = scale(user_pred, self.train_set.min_rating, self.train_set.max_rating, 0., 1.)
return user_pred
degree = train_set.user_graph.get_node_degree(map_uid, map_uid)
weighted_net_val = []
for u, j, val in zip(net_uid, net_jid, net_val):
u_out = degree[int(u)][1]
j_in = degree[int(j)][0]
val_weighted = math.sqrt(j_in / (j_in + u_out)) * val
weighted_net_val.append(val_weighted)
net_val = weighted_net_val
if [self.train_set.min_rating, self.train_set.max_rating] != [0, 1]:
if self.train_set.min_rating == self.train_set.max_rating:
rat_val = scale(rat_val, 0., 1., 0., self.train_set.max_rating)
else:
rat_val = scale(rat_val, 0., 1., self.train_set.min_rating, self.train_set.max_rating)
rat_val = np.array(rat_val, dtype='float32')
rat_uid = np.array(rat_uid, dtype='int32')
rat_iid = np.array(rat_iid, dtype='int32')
net_val = np.array(net_val, dtype='float32')
net_uid = np.array(net_uid, dtype='int32')
net_jid = np.array(net_jid, dtype='int32')
if self.verbose:
print('Learning...')
res = sorec.sorec(rat_uid, rat_iid, rat_val, net_uid, net_jid, net_val, k=self.k,
n_users=train_set.num_users,
n_items=train_set.num_items, n_ratings=len(rat_val), n_edges=len(net_val),
n_epochs=self.max_iter, lamda_c=self.lamda_c,
if self.weight_link:
degree = train_set.user_graph.get_node_degree(map_uid, map_uid)
weighted_net_val = []
for u, j, val in zip(net_uid, net_jid, net_val):
u_out = degree[int(u)][1]
j_in = degree[int(j)][0]
val_weighted = math.sqrt(j_in / (j_in + u_out)) * val
weighted_net_val.append(val_weighted)
net_val = weighted_net_val
if [self.train_set.min_rating, self.train_set.max_rating] != [0, 1]:
if self.train_set.min_rating == self.train_set.max_rating:
rat_val = scale(rat_val, 0., 1., 0., self.train_set.max_rating)
else:
rat_val = scale(rat_val, 0., 1., self.train_set.min_rating, self.train_set.max_rating)
rat_val = np.array(rat_val, dtype='float32')
rat_uid = np.array(rat_uid, dtype='int32')
rat_iid = np.array(rat_iid, dtype='int32')
net_val = np.array(net_val, dtype='float32')
net_uid = np.array(net_uid, dtype='int32')
net_jid = np.array(net_jid, dtype='int32')
if self.verbose:
print('Learning...')
res = sorec.sorec(rat_uid, rat_iid, rat_val, net_uid, net_jid, net_val, k=self.k,
n_users=train_set.num_users,
if self.trainable:
# user-item interactions
(rat_uid, rat_iid, rat_val) = train_set.uir_tuple
# item-item affinity network
map_iid = train_set.item_indices
(net_iid, net_jid, net_val) = train_set.item_graph.get_train_triplet(map_iid, map_iid)
if [self.train_set.min_rating, self.train_set.max_rating] != [0, 1]:
if self.train_set.min_rating == self.train_set.max_rating:
rat_val = scale(rat_val, 0., 1., 0., self.train_set.max_rating)
else:
rat_val = scale(rat_val, 0., 1., self.train_set.min_rating, self.train_set.max_rating)
if [min(net_val), max(net_val)] != [0, 1]:
if min(net_val) == max(net_val):
net_val = scale(net_val, 0., 1., 0., max(net_val))
else:
net_val = scale(net_val, 0., 1., min(net_val), max(net_val))
rat_val = np.array(rat_val, dtype='float32')
rat_uid = np.array(rat_uid, dtype='int32')
rat_iid = np.array(rat_iid, dtype='int32')
net_val = np.array(net_val, dtype='float32')
net_iid = np.array(net_iid, dtype='int32')
net_jid = np.array(net_jid, dtype='int32')
if self.verbose:
print('Learning...')
res = mcf.mcf(rat_uid, rat_iid, rat_val, net_iid, net_jid, net_val, k=self.k, n_users=train_set.num_users,
n_items=train_set.num_items, n_ratings=len(rat_val), n_edges=len(net_val),
if item_idx is None:
if self.train_set.is_unk_user(user_idx):
raise ScoreException("Can't make score prediction for (user_id=%d)" % user_idx)
known_item_scores = self.V.dot(self.U[user_idx, :])
return known_item_scores
else:
if self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item(item_idx):
raise ScoreException("Can't make score prediction for (user_id=%d, item_id=%d)" % (user_idx, item_idx))
user_pred = self.V[item_idx, :].dot(self.U[user_idx, :])
user_pred = sigmoid(user_pred)
if self.train_set.min_rating == self.train_set.max_rating:
user_pred = scale(user_pred, 0., self.train_set.max_rating, 0., 1.)
else:
user_pred = scale(user_pred, self.train_set.min_rating, self.train_set.max_rating, 0., 1.)
return user_pred
"""
if item_idx is None:
if self.train_set.is_unk_user(user_idx):
raise ScoreException("Can't make score prediction for (user_id=%d)" % user_idx)
known_item_scores = self.V.dot(self.U[user_idx, :])
return known_item_scores
else:
if self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item(item_idx):
raise ScoreException("Can't make score prediction for (user_id=%d, item_id=%d)" % (user_idx, item_idx))
user_pred = self.V[item_idx, :].dot(self.U[user_idx, :])
user_pred = sigmoid(user_pred)
if self.train_set.min_rating == self.train_set.max_rating:
user_pred = scale(user_pred, 0., self.train_set.max_rating, 0., 1.)
else:
user_pred = scale(user_pred, self.train_set.min_rating, self.train_set.max_rating, 0., 1.)
return user_pred