Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
rule_index = option - starting_n_rules
primitive_index = 0
comment += 'new rule at position %d' % rule_index
consider_function = self.consider_addition_to_new_rule
result = consider_function(
current_rl,
rule_index
)
(new_rl,
relative_prior,
relative_likelihood,
candidate_distribution,
probability_remove_proposal) = result
logger.debug('Adding to rule %d' % rule_index)
index = lambda r: self.model.rule_population.get_primitive_index(r)
if debugcase == 2:
logger.debug('Rule is [] (new)')
else:
logger.debug('Rule is %s' % str(map(index, current_rl[rule_index])))
logger.debug('Candidate distribution (log scale):')
for i,value in enumerate(candidate_distribution):
logger.debug('%d\t%.3f' % (i,value))
probability_this_option = 1./(2.*len(current_rl) + 1.)
probability_reverse_option = probability_remove_proposal
logger.debug('Add block: %s' % comment)
logger.debug('Prior, log(marg. new/old): %.3g' %
relative_prior)
logger.debug('Move choice prob (rev/for): %.3g/%.3g' %
Lots of code overlap with update_phylogeny_parameters below:
to be fixed.
"""
# First, the typical window fraction.
delta_f = self.window_fraction_proposal_std * np.random.randn()
current_f = self.current_state.window_typical_fraction
proposed_state = self.current_state.copy()
# We want the increment of the fraction to 'bounce' off
# either end of the interval (0,1)-- more than once if necessary--
# leading to this rather clumsy function:
proposed_state.window_typical_fraction = np.abs(
((delta_f + current_f + 1.0) % 2.0) - 1.0
)
logger.debug(
'Incrementing window f by %.3f from %.3f.' %
(delta_f, current_f)
)
self.parameter_mh_update(proposed_state)
# Then the concentration parameter.
delta_cw = self.window_concentration_proposal_std * np.random.randn()
current_cw = self.current_state.window_concentration
proposed_state = self.current_state.copy()
proposed_state.window_concentration = np.abs(
delta_cw + current_cw
)
logger.debug(
'Incrementing window concenration by %.3f from %.3f.' %
(delta_cw, current_cw)
)
consider_function = self.consider_addition_to_new_rule
result = consider_function(
current_rl,
rule_index
)
(new_rl,
relative_prior,
relative_likelihood,
candidate_distribution,
probability_remove_proposal) = result
logger.debug('Adding to rule %d' % rule_index)
index = lambda r: self.model.rule_population.get_primitive_index(r)
if debugcase == 2:
logger.debug('Rule is [] (new)')
else:
logger.debug('Rule is %s' % str(map(index, current_rl[rule_index])))
logger.debug('Candidate distribution (log scale):')
for i,value in enumerate(candidate_distribution):
logger.debug('%d\t%.3f' % (i,value))
probability_this_option = 1./(2.*len(current_rl) + 1.)
probability_reverse_option = probability_remove_proposal
logger.debug('Add block: %s' % comment)
logger.debug('Prior, log(marg. new/old): %.3g' %
relative_prior)
logger.debug('Move choice prob (rev/for): %.3g/%.3g' %
(probability_reverse_option, probability_this_option))
logger.info('Likelihood, log(marg. new/old): %.3g' %
relative_likelihood)
def update_phylogeny_parameters(self):
""" MH updates for phylogenetic mean and std parameters.
For convenience, returns the prior probability after the MH
step is complete.
"""
# First, the mean.
delta_mean = self.phylogeny_mean_proposal_std * np.random.randn()
current_mean = self.current_state.phylogeny_mean
proposed_state = self.current_state.copy()
proposed_state.phylogeny_mean = (
delta_mean + current_mean
)
logger.debug(
'Incrementing phylogeny mean by %.3f from %.3f ' %
(delta_mean, current_mean)
)
self.parameter_mh_update(proposed_state)
# Then (with unfortunate code duplication) the standard
# deviation.
delta_std = self.phylogeny_std_proposal_std * np.random.randn()
current_std = self.current_state.phylogeny_std
# It's possible to do this more efficiently, but the situation
# where this requires multiple iterations should arise rarely
# with sensible parameter choices
proposed_std = delta_std + current_std
while (proposed_std < 0. or
proposed_std > self.model.phylogeny_std_upper_bound):
proposed_std = np.abs(proposed_std)
# phylogenetic prior hyperparameters
candidate_state = self.current_state.copy()
candidate_state.rl = rl
priors[k] = self.model.rule_list_prior(candidate_state)
# This function calculates only the prior contributions
# from the rule list, which is fine, as nothing else is
# being varied between candidates here.
new_X = self.model.data.covariate_matrix(rl)
likelihoods[k] = self.model.likelihood_z_given_X(
self.current_state.omega,
new_X
)
# Summarize the results in debugging output.
logger.debug('Move options:')
for c,p,l,rl in zip(comments, priors, likelihoods, rls):
logger.debug(c)
logger.debug('%.3g' % p)
logger.debug('%.3g' % l)
logger.debug('%s' % rl)
# Choose from among the options according to the posterior
# probabilities, and excecute and record the choice.
k = choose_from_relative_probabilities(np.exp(priors + likelihoods))
self.current_state.rl = rls[k]
self.ensure_current_rl_sorted()
self.current_X = self.model.data.covariate_matrix(
self.current_state.rl
)
# CAUTION: beta is wrong.
self.comments.append(comment_prefix + comments[k])
logger.info(comment_prefix + comments[k])
current_rl,
rule_index
)
(new_rl,
relative_prior,
relative_likelihood,
candidate_distribution,
probability_remove_proposal) = result
logger.debug('Adding to rule %d' % rule_index)
index = lambda r: self.model.rule_population.get_primitive_index(r)
if debugcase == 2:
logger.debug('Rule is [] (new)')
else:
logger.debug('Rule is %s' % str(map(index, current_rl[rule_index])))
logger.debug('Candidate distribution (log scale):')
for i,value in enumerate(candidate_distribution):
logger.debug('%d\t%.3f' % (i,value))
probability_this_option = 1./(2.*len(current_rl) + 1.)
probability_reverse_option = probability_remove_proposal
logger.debug('Add block: %s' % comment)
logger.debug('Prior, log(marg. new/old): %.3g' %
relative_prior)
logger.debug('Move choice prob (rev/for): %.3g/%.3g' %
(probability_reverse_option, probability_this_option))
logger.info('Likelihood, log(marg. new/old): %.3g' %
relative_likelihood)
acceptance_ratio = (
np.exp(relative_prior+relative_likelihood) *
(probability_reverse_option / probability_this_option)
float(len(positions)))
logger.debug('Removal block: %s' % comment)
logger.debug('Prior, log(marg. new/old): %.3g' %
relative_prior)
logger.debug('Move choice prob (rev/for): %.3g/%.3g' %
(probability_reverse_option, probability_this_option))
logger.debug('Likelihood, log(marg. new/old): %.3g' %
relative_likelihood)
acceptance_ratio = (
np.exp(relative_prior + relative_likelihood) *
(probability_reverse_option / probability_this_option)
)
logger.debug('Acceptance ratio: %.3g' % acceptance_ratio)
if dry_run:
logger.debug('dry run flag set, aborting before accept check')
return
self.attempts['remove'] = self.attempts.get('remove',0) + 1
if np.random.rand() < acceptance_ratio:
comment += ': accepted (ratio %.3g)' % acceptance_ratio
self.successes['remove'] = self.successes.get('add',0) + 1
self.current_state.rl = shorter_rule
shorter_X = self.model.data.covariate_matrix(shorter_rule)
self.current_X = shorter_X
# No need to ensure the current RL is sorted here;
# possibly removing one entry from a sorted list preserves
# its order.
# CAUTION: beta is wrong
else:
comment += ': rejected (ratio %.3g)' % acceptance_ratio
logger.info(comment)