Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
title="VisualCanvas")
# Scenegraph version
scanvas = scene.SceneCanvas(show=True, keys='interactive',
title="SceneCanvas")
scanvas.size = 800, 600
grid = scanvas.central_widget.add_grid(margin=0)
lines = []
for i in range(10):
lines.append([])
for j in range(10):
vb = grid.add_view(camera='panzoom', row=i, col=j)
vb.camera.set_range([0, 100], [-5, 5], margin=0)
line = scene.visuals.Line(pos=data, color='w', method='gl')
vb.add(line)
scanvas.show()
import sys
if sys.flags.interactive != 1:
app.run()
emulate_texture=emulate_texture)
volume1.transform = scene.STTransform(translate=(64, 64, 0))
volume2 = scene.visuals.Volume(vol2, parent=view.scene, threshold=0.2,
emulate_texture=emulate_texture)
volume2.visible = False
# Create three cameras (Fly, Turntable and Arcball)
fov = 60.
cam1 = scene.cameras.FlyCamera(parent=view.scene, fov=fov, name='Fly')
cam2 = scene.cameras.TurntableCamera(parent=view.scene, fov=fov,
name='Turntable')
cam3 = scene.cameras.ArcballCamera(parent=view.scene, fov=fov, name='Arcball')
view.camera = cam2 # Select turntable at first
# Create an XYZAxis visual
axis = scene.visuals.XYZAxis(parent=view)
s = STTransform(translate=(50, 50), scale=(50, 50, 50, 1))
affine = s.as_matrix()
axis.transform = affine
# create colormaps that work well for translucent and additive volume rendering
class TransFire(BaseColormap):
glsl_map = """
vec4 translucent_fire(float t) {
return vec4(pow(t, 0.5), t, t*t, max(0, t*1.05 - 0.05));
}
"""
class TransGrays(BaseColormap):
glsl_map = """
# Convert array back to a single color without alpha
neuron_color = neuron_color[0][:3]
# Extract and plot soma
soma = utils.make_iterable(neuron.soma)
if any(soma):
# If soma detection is messed up we might end up producing
# dozens of soma which will freeze the kernel
if len(soma) >= 10:
logger.warning(f'{neuron.id}: {len(soma)} somas found.')
for s in soma:
n = neuron.nodes.set_index('node_id').loc[s]
r = getattr(n, neuron.soma_radius) if isinstance(neuron.soma_radius, str) else neuron.soma_radius
sp = create_sphere(7, 7, radius=r)
verts = sp.get_vertices() + n[['x', 'y', 'z']].values
s = scene.visuals.Mesh(vertices=verts,
shading='smooth',
faces=sp.get_faces(),
color=neuron_color)
s.ambient_light_color = vispy.color.Color('white')
# Make visual discoverable
s.interactive = True
# Add custom attributes
s.unfreeze()
s._object_type = 'neuron'
s._neuron_part = 'soma'
s._id = neuron.id
s._name = str(getattr(neuron, 'name', neuron.id))
s._object_id = object_id
s.freeze()
def tetplot(points, simplices, vertex_color=None,
edge_color=None, alpha=1.0, axis=True):
""" main function for tetplot """
TetPlot = scene.visuals.create_visual_node(TetPlotVisual)
# convert data types for OpenGL
pts_float32 = points.astype(np.float32)
sim_uint32 = simplices.astype(np.uint32)
# The real-things : plot using scene
# build canvas
canvas = scene.SceneCanvas(keys='interactive', show=True)
# Add a ViewBox to let the user zoom/rotate
view = canvas.central_widget.add_view()
view.camera = 'turntable'
view.camera.fov = 50
view.camera.distance = 3
if vertex_color is not None and vertex_color.ndim == 1:
def __init__(self, world, title='Noname', show=True):
if world is None:
raise ValueError("World is None.")
super().__init__(title=title, keys='interactive', size=(800, 550), show=show)
self.unfreeze()
self.viewBox = self.central_widget.add_view()
self.viewBox.bgcolor = '#efefef'
self.viewBox.camera = 'arcball'
self.viewBox.camera.fov = 50
# self.viewBox.camera.distance = 1
self.viewBox.padding = 0
self.axis = scene.visuals.XYZAxis(parent=self.viewBox.scene)
self.world = None
self.worldNode = None
self.freeze()
self.setWorld(world)
self.unfreeze()
self.events.key_press.connect(self.on_key_press)
self.timer = app.Timer('auto', self.on_timer)
self.timer.start()
self.freeze()
def set_data(self, data):
if data is not None:
color = np.array([self.non_selected_color for x in range(len(data))])
scene.visuals.Line.set_data(self, pos = data, color = color)
else:
color = None
self._bounds = None
self._changed['pos'] = True
self._pos = None
self.update()
self._hconvinv = {v: k for k, v in self._hconv.items()}
# Get color :
self.color = {k: color2vb(color=i) for k, i in zip(color.keys(),
color.values())}
# Create a default line :
pos = np.array([[0, 0], [0, 100]])
self.mesh = scene.visuals.Line(pos, name='hypnogram', method='gl',
parent=parent, width=width)
self.mesh._width = width
self.mesh.set_gl_state('translucent')
# Create a default marker (for edition):
self.edit = Markers(parent=parent)
# self.mesh.set_gl_state('translucent', depth_test=True)
self.edit.set_gl_state('translucent')
# Add grid :
self.grid = scene.visuals.GridLines(color=(.7, .7, .7, 1.),
scale=(30. * time[-1] / len(time),
1.),
parent=parent)
self.grid.set_gl_state('translucent')
if vol is None:
return
self.vol = vol
if slice is not None:
slicevol = self.vol[sliceobj]
else:
slicevol = self.vol
# Set whether we are emulating a 3D texture
emulate_texture = False
# Create the volume visuals
if self.volume is None:
self.volume = scene.visuals.Volume(slicevol, parent=self.view.scene, emulate_texture=emulate_texture)
self.volume.method = 'translucent'
else:
self.volume.set_data(slicevol)
self.volume._create_vertex_data() #TODO: Try using this instead of slicing array?
# Translate the volume into the center of the view (axes are in strange order for unkown )
scale = 3*(.0075,) # This works for now but might be different for different resolutions
translate = map(lambda x: -scale[0]*x/2, reversed(vol.shape))
self.volume.transform = scene.STTransform(translate=translate, scale=scale)
])
self.nose.set_data(pos=nose, connect='segments')
# ------------------ EAR ------------------
we, he = csize * 10. / 512., csize * 30. / 512.
ye = l + he * np.sin(theta)
# Ear left data :
self.earL = visuals.Line(pos=pos, name='EarLeft', **kw_line)
# Ear left visual :
ear_l = np.full((len(theta), 3), 3., dtype=np.float32)
ear_l[:, 0] = 2 * l + we * np.cos(theta)
ear_l[:, 1] = ye
self.earL.set_data(pos=ear_l)
# Ear right visual :
self.earR = visuals.Line(pos=pos, name='EarRight', **kw_line)
# Ear right data :
ear_r = np.full((len(theta), 3), 3., dtype=np.float32)
ear_r[:, 0] = 0. + we * np.cos(theta)
ear_r[:, 1] = ye
self.earR.set_data(pos=ear_r)
# ================== CHANNELS ==================
# Channel's markers :
self.chan_markers = visuals.Markers(pos=pos, name='ChanMarkers',
parent=self.node_chan)
# Channel's text :
self.chan_text = visuals.Text(pos=pos, name='ChanText',
parent=self.node_chan, anchor_x='center',
color=chan_txt_color,
font_size=chan_size)
def __init__(self, color=(0, 0, 0), duration=1.0, **kwargs):
super().__init__(**kwargs)
self.circle = None
self.duration = duration
self.color = color
self.stimulus = vispy.scene.visuals.Rectangle(
center=(self.w, self.h),
color=self.color,
parent=self.scene,
height=self.h,
width=self.w
)
self.stimulus.visible = False