Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
print result.r['p.value'][0][0]
# t test
a = [1,2,3,4]
b = [2,3,4,5]
result = R.r['t.test'](R.IntVector(a),R.IntVector(b),alternative='two.sided',paired=True)
print result.r['p.value'][0][0]
print result.r['estimate'][0][0]
# for paired test, if estimate > 0, then a is greater than b, else, b is greater than a (mean)
# FDR correction
# the pvalue_lst has to be FloatVector
# first we might have a list of test result objects, make pvalue list first
pvalue_lst = [v.r['p.value'][0][0] for v in testResult_lst]
p_adjust = R.r['p.adjust'](R.FloatVector(pvalue_lst),method='BY')
for v in p_adjust:
print v
condition,
covarsData,
interactionsData,
NZmean,
logitZPerc]) = self.melt_data(
norm_data,
conditions, covariates, interactions, NZMeanByRep, LogZPercByRep)
if (numpy.sum(readCounts) == 0):
status.append("pan-essential (no counts in all conditions) - not analyzed")
pvals.append(1)
else:
df_args = {
'cnt': IntVector(readCounts),
'cond': toRFloatOrStrVec(condition),
'NZmean': FloatVector(NZmean),
'logitZperc': FloatVector(logitZPerc)
}
## Add columns for covariates and interactions if they exist.
df_args.update(list(map(lambda t_ic: (t_ic[1], toRFloatOrStrVec(covarsData[t_ic[0]])), enumerate(self.covars))))
df_args.update(list(map(lambda t_ic: (t_ic[1], toRFloatOrStrVec(interactionsData[t_ic[0]])), enumerate(self.interactions))))
melted = DataFrame(df_args)
# r_args = [IntVector(readCounts), StrVector(condition), melted, map(lambda x: StrVector(x), covars), FloatVector(NZmean), FloatVector(logitZPerc)] + [True]
debugFlag = True if DEBUG or GENE else False
pval, msg = r_zinb_signif(melted, zinbMod1, zinbMod0, nbMod1, nbMod0, debugFlag)
status.append(msg)
pvals.append(float(pval))
if (DEBUG or GENE):
self.transit_message("Pval for Gene {0}: {1}, status: {2}".format(Rv, pvals[-1], status[-1]))
if (GENE):
self.transit_message("Ran for single gene. Exiting...")
sys.exit(0)
###########################################
###########################################
# plot length versus P-Value
data = Database.executewait(dbhandle,
'''SELECT end - start, pvalue
FROM %(tablename)s
WHERE significant''' % locals()).fetchall()
# require at least 10 datapoints - otherwise smooth scatter fails
if len(data) > 10:
data = list(zip(*data))
pngfile = "%(outdir)s/%(tileset)s_%(design)s_%(method)s_pvalue_vs_length.png" % locals()
R.png(pngfile)
R.smoothScatter(R.log10(ro.FloatVector(data[0])),
R.log10(ro.FloatVector(data[1])),
xlab='log10(length)',
ylab='log10(pvalue)',
log="x", pch=20, cex=.1)
R['dev.off']()
outf.close()
def spline_diffusivity(pup, params):
"""spline interpolation of diffusivity: D = 1/(df/dT * heta)
"""
from numpy import linspace
robjects.globalenv["hetay"] = \
robjects.FloatVector(linspace(0, 1, num=len(params)).tolist())
robjects.globalenv["hetax"] = robjects.FloatVector(params)
robjects.globalenv["pupx"] = robjects.FloatVector(params)
robjects.globalenv["pupy"] = robjects.FloatVector(pup)
heta = r('heta <- splinefun(hetax,hetay,method="monoH.FC")')
eff = r('eff <- splinefun(pupx,pupy,method="monoH.FC")')
diff = r('diff <- function(x) {-1/(heta(x,deriv=1)*eff(x,deriv=1))}')
return lambda x: diff(x)[0]
x1=junction.end, y1=bottom, col=color,
lwd=lwd)
else:
r.segments(x0=junction.start, y0=midpoint,
x1=junction.end, y1=midpoint, col=color,
lwd=lwd)
x0 = [junction.start, junction.end]
x1 = [junction.start, junction.end]
y0 = [bottom, bottom]
y1 = [top, top]
r.segments(x0=robjects.FloatVector(x0),
y0=robjects.FloatVector(y0),
x1=robjects.FloatVector(x1),
y1=robjects.FloatVector(y1), col=color,
lwd=lwd)
# add text giving read count
r.text(x=(junction.end + text_offset), y=midpoint,
labels=str(junction.read_count), col="black",
cex=0.5)
xlim = numpy.array([position-extend, position+extend])/scale
if isreversed:
xlim = xlim[::-1]
ylim = [0,0]
if len(bcs_to_rows) > 0:
ylim = [0,max(bcs_to_rows.values())]
ro.r.plot(ro.FloatVector([0]),
type="n", bty="n",
xlim=ro.FloatVector(xlim),
ylim=ro.FloatVector(ylim),
xlab="{} (mb)".format(chrom), ylab="Barcode")
ro.r.abline(v=ro.FloatVector(breakpoint_lines/scale), lty=2, col="gray")
if len(bcs_to_rows) == 0:
continue
ypos = numpy.array([bcs_to_rows[bc] for bc in frags["bc"]])
# colors = numpy.where(cur_frags["supporting"], "black", "gray")
support = []
for row in frags.itertuples():
if not row.supporting:
support.append("background")
elif numpy.isnan(row.hap):
support.append("nohap")
elif row.hap == 1:
support.append("hap0")
elif row.hap == 2 or row.hap == 0:
support.append("hap1")
xlim = [start, end]
if isreversed:
xlim = xlim[::-1]
ro.r.plot(
ro.FloatVector(numpy.array(xs[i])/scale),
ro.FloatVector(event_coverages[i].values),
xlim=ro.FloatVector(numpy.array(xlim)/scale),
ylim=ro.FloatVector(numpy.array(ylim)),
type="n",
bty="n", xlab="{} (mb)".format(chrom), ylab="Copy number", main="")
ro.r.abline(v=ro.FloatVector(breakpoint_lines/scale), lty=2, col="gray")
ro.r.abline(h=ro.FloatVector(numpy.arange(0,10)), lty=2, col="gray")
ro.r.lines(ro.FloatVector(numpy.array(xs[i])/scale), ro.FloatVector(event_coverages[i].values), lwd=2)
if len(self.features) == 0:
# no features
return
feat_left = []
feat_right = []
for feat in self.features:
feat_left.append(feat.start)
feat_right.append(feat.end)
feat_top = self.top - margin_height/2
feat_bottom = feat_top - feat_height
# draw rectangle for each feature
r.rect(robjects.FloatVector(feat_left),
feat_bottom,
robjects.FloatVector(feat_right),
feat_top,
col=self.color, border=self.border_color)
if self.max_val == self.min_val:
yscale = self.height
else:
yscale = self.height / (self.max_val - self.min_val)
y_transform = (y - self.min_val) * yscale + self.bottom
# draw tick marks
r.segments(x0=robjects.FloatVector(x0),
x1=robjects.FloatVector(x1),
y0=robjects.FloatVector(y_transform),
y1=robjects.FloatVector(y_transform))
# draw y-axis line
r.segments(x0=robjects.FloatVector([x0[0]]),
x1=robjects.FloatVector([x0[0]]),
y0=robjects.FloatVector([y_transform[0]]),
y1=robjects.FloatVector([y_transform[-1]]))
# draw labels beside tick marks
if span >= 10.0:
labels = [str(int(val)) for val in y]
elif span >= 0.5:
labels = ["%.1f" % val for val in y]
elif span > 0.01:
labels = ["%.2f" % val for val in y]
else:
labels = [str("%.2e" % val) for val in y]
# x_label = self.region.start - tick_width * 0.5
x_label = self.region.start
# polygons have too many points
block_sz = 10000
for block_start in range(0, vals.size, block_sz):
block_end = min(block_start + block_sz, vals.size)
block_vals = vals[block_start:block_end]
# identify contiguous segments with same values
(x1, x2, y) = self.get_segments(block_vals)
# new way of drawing: convert contiguous segments
# to polygon coordinates
(x, y) = self.get_polygon_coords(x1, x2, y)
if len(x) > 0:
r.polygon(robjects.FloatVector(x + block_start),
robjects.FloatVector(y),
col=self.color,
border=self.border_color)
# old way,
# n_seg = len(x1)
# if n_seg > 0:
# r.rect(robjects.FloatVector(x1),
# robjects.FloatVector([self.bottom]),
# robjects.FloatVector(x2),
# robjects.FloatVector(y), col=self.color,
# border=robjects.NA_Logical)
self.draw_y_axis(r, self.n_ticks)