Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
tstop = time.time()
print("\nself_cal time: %f s" % (tstop - tstart))
if caltable: # assemble the caltable to return
allsites = obs.tarr['site']
caldict = scans_cal[0]
for i in range(1,len(scans_cal)):
row = scans_cal[i]
for site in allsites:
try: dat = row[site]
except KeyError: continue
try: caldict[site] = np.append(caldict[site], row[site])
except KeyError: caldict[site] = dat
caltable = ehtim.caltable.Caltable(obs.ra, obs.dec, obs.rf, obs.bw, caldict, obs.tarr,
source = obs.source, mjd=obs.mjd, timetype=obs.timetype)
out = caltable
else: # return a calibrated observation
obs_cal = ehtim.obsdata.Obsdata(obs.ra, obs.dec, obs.rf, obs.bw, np.concatenate(scans_cal), obs.tarr,
polrep=obs.polrep, source=obs.source, mjd=obs.mjd,
ampcal=obs.ampcal, phasecal=obs.phasecal, dcal=obs.dcal, frcal=obs.frcal,
timetype=obs.timetype, scantable=obs.scans)
out = obs_cal
# close multiprocessing jobs
if processes != -1:
pool.close()
return out
lscale = float(row[2])
elif len(row) == 5:
rscale = float(row[1]) + 1j*float(row[2])
lscale = float(row[3]) + 1j*float(row[4])
else:
raise Exception("cannot load caltable -- format unknown!")
if sqrt_gains:
rscale = rscale**.5
lscale = lscale**.5
datatable.append(np.array((time, rscale, lscale), dtype=DTCAL))
#if onerowonly:
# datatable.append(np.array((1.1*time, rscale, lscale), dtype=DTCAL))
datatables[site] = np.array(datatable)
if len(datatables)>0:
caltable = Caltable(obs.ra, obs.dec, obs.rf, obs.bw, datatables, tarr, source=obs.source, mjd=obs.mjd, timetype=obs.timetype)
else:
print ("COULD NOT FIND CALTABLE IN DIRECTORY %s" % datadir)
caltable=False
return caltable
def copy(self):
"""Copy the observation object.
Args:
Returns:
(Caltable): a copy of the Caltable object.
"""
new_caltable = Caltable(self.ra, self.dec, self.rf, self.bw, self.data, self.tarr, source=self.source, mjd=self.mjd, timetype=self.timetype)
return new_caltable
Returns:
(Caltable): a caltable object
"""
ntele = len(sites)
ntimes = len(times)
datatables = {}
for s in range(0,ntele):
datatable = []
for t in range(0,ntimes):
gain = gains[s*ntele + t]
datatable.append(np.array((times[t], gain, gain), dtype=DTCAL))
datatables[sites[s]] = np.array(datatable)
if len(datatables)>0:
caltable = Caltable(obs.ra, obs.dec, obs.rf,
obs.bw, datatables, obs.tarr, source=obs.source,
mjd=obs.mjd, timetype=obs.timetype)
else:
caltable=False
return caltable
if caltable: # create and return a caltable
allsites = obs.tarr['site']
caldict = {k:v.reshape(1) for k,v in scans_cal[0].items()}
for i in range(1,len(scans_cal)):
row = scans_cal[i]
if len(row) == 0:
continue
for site in allsites:
try: dat = row[site]
except KeyError: continue
try: caldict[site] = np.append(caldict[site], row[site])
except KeyError: caldict[site] = [dat]
caltable = ehtim.caltable.Caltable(obs.ra, obs.dec, obs.rf, obs.bw, caldict, obs.tarr,
source = obs.source, mjd=obs.mjd, timetype=obs.timetype)
out = caltable
else: # return the calibrated observation
arglist, argdict = obs.obsdata_args()
arglist[4] = np.concatenate(scans_cal)
out = ehtim.obsdata.Obsdata(*arglist, **argdict)
# close multiprocessing jobs
if processes != -1:
pool.close()
return out
# if incoherent average then average the magnitude of gains
if incoherent:
gains_l = np.abs(gains_l)
gains_r = np.abs(gains_r)
# average the gains
gains_l_avg = np.mean(gains_l[np.array(times_stable==scan[0])])
gains_r_avg = np.mean(gains_r[np.array(times_stable==scan[0])])
# add them to a new datatable
datatable.append(np.array((scan[0], gains_r_avg, gains_l_avg), dtype=DTCAL))
datatables[site] = np.array(datatable)
if len(datatables)>0:
caltable = Caltable(obs.ra, obs.dec, obs.rf,
obs.bw, datatables, obs.tarr, source=obs.source,
mjd=obs.mjd, timetype=obs.timetype)
else:
caltable=False
return caltable
#TODO can we do this faster?
datatable = []
for i in range(len(times_merge)):
datatable.append(np.array((times_merge[i], rscale_merge[i], lscale_merge[i]), dtype=DTCAL))
data1[site] = np.array(datatable)
# sites not in both caltables
else:
if site not in tkey1.keys():
tarr1 = np.append(tarr1,tarr2[tkey2[site]])
data1[site] = data2[site]
#update tkeys every time
tkey1 = {tarr1[i]['site']: i for i in range(len(tarr1))}
new_caltable = Caltable(self.ra, self.dec, self.rf, self.bw, data1, tarr1, source=self.source, mjd=self.mjd, timetype=self.timetype)
return new_caltable
tstop = time.time()
print("\nself_cal time: %f s" % (tstop - tstart))
if caltable: # assemble the caltable to return
allsites = obs.tarr['site']
caldict = scans_cal[0]
for i in range(1,len(scans_cal)):
row = scans_cal[i]
for site in allsites:
try: dat = row[site]
except KeyError: continue
try: caldict[site] = np.append(caldict[site], row[site])
except KeyError: caldict[site] = dat
caltable = ehtim.caltable.Caltable(obs.ra, obs.dec, obs.rf, obs.bw, caldict, obs.tarr,
source=obs.source, mjd=obs.mjd, timetype=obs.timetype)
out = caltable
else: # return a calibrated observation
arglist, argdict = obs.obsdata_args()
arglist[4] = np.concatenate(scans_cal)
out = ehtim.obsdata.Obsdata(*arglist, **argdict)
if copy_closure_tables:
out.camp = obs.camp
out.logcamp = obs.logcamp
out.cphase = obs.cphase
# close multiprocessing jobs
if processes != -1:
pool.close()
return out