Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Using ordinary set
start = time.time()
m1 = pmap(elements)
print("Done initializing, time=%s s, count=%s" % (time.time() - start, COUNT))
start = time.time()
m2 = pmap()
for x in test_range():
m2 = m2.set(x, x)
print("Done setting, time=%s s, count=%s" % (time.time() - start, COUNT))
assert m1 == m2
start = time.time()
m3 = pmap()
e3 = m3.evolver()
for x in test_range():
e3[x] = x
m3 = e3.persistent()
print("Done evolving, time=%s s, count=%s" % (time.time() - start, COUNT))
assert m3 == m2
start = time.time()
m4 = pmap()
m4 = m4.update(elements)
m4 = m4.update(elements)
print("Done updating, time=%s s, count=%s" % (time.time() - start, COUNT))
assert m4 == m3
application_name = random_name(self)
docker_client = DockerClient()
self.addCleanup(docker_client.remove, application_name)
deployer = P2PNodeDeployer(
u"localhost", volume_service, docker_client,
node_uuid=uuid4())
link = Link(alias=u"alias",
local_port=80,
remote_port=8080)
dataset = Dataset(
dataset_id=unicode(uuid4()),
metadata=pmap({"name": application_name}))
manifestation = Manifestation(dataset=dataset, primary=True)
desired_state = Deployment(nodes=frozenset([
Node(uuid=deployer.node_uuid,
applications=frozenset([Application(
name=application_name,
image=DockerImage.from_string(
image_name),
links=frozenset([link]),
volume=AttachedVolume(
manifestation=manifestation,
mountpoint=FilePath('/data'),
),
)]),
manifestations={
manifestation.dataset_id: manifestation})]))
def get_rrsets(self, zone_id):
"""
Retrieve all the rrsets that belong to the given zone.
@param zone_id: The zone to inspect.
@type zone_id: L{unicode}
@return: L{None} if the zone is not found. Otherwise, a L{PMap}
mapping L{RRSetKey} to L{RRSet}.
"""
if any(zone.identifier == zone_id for zone in self.zones):
return self.rrsets.get(zone_id, pmap())
# You cannot interact with rrsets unless a zone exists.
return None
def create_attached_volume(dataset_id, mountpoint, maximum_size=None,
metadata=pmap()):
"""
Create an ``AttachedVolume`` instance with the supplied parameters and
return it.
:param unicode dataset_id: The unique identifier of the dataset of the
attached volume.
:param bytes mountpoint: The path at which the volume is attached.
:param int maximum_size: An optional maximum size for the volume.
:return: A new ``AttachedVolume`` instance referencing a primary
manifestation of a dataset with the given unique identifier.
"""
return AttachedVolume(
manifestation=Manifestation(
dataset=Dataset(
dataset_id=dataset_id,
COUNT = 100000
def test_range():
prime = 317
return range(0, prime*COUNT, prime)
elements = {x: x for x in test_range()}
# Using ordinary set
start = time.time()
m1 = pmap(elements)
print("Done initializing, time=%s s, count=%s" % (time.time() - start, COUNT))
start = time.time()
m2 = pmap()
for x in test_range():
m2 = m2.set(x, x)
print("Done setting, time=%s s, count=%s" % (time.time() - start, COUNT))
assert m1 == m2
start = time.time()
m3 = pmap()
e3 = m3.evolver()
for x in test_range():
e3[x] = x
m3 = e3.persistent()
print("Done evolving, time=%s s, count=%s" % (time.time() - start, COUNT))
assert m3 == m2
"error. 'volumes' values must be string; got "
"type '{type}'.").format(
application=application,
type=type(volume).__name__)
)
if len(volumes) > 1:
raise ConfigurationError(
("Application '{application}' has a config "
"error. Only one volume per application is "
"supported at this time.").format(
application=application)
)
volume = AttachedVolume(
manifestation=Manifestation(
dataset=Dataset(dataset_id=dataset_id_from_name(application),
metadata=pmap({"name": application})),
primary=True),
mountpoint=FilePath(volumes[0])
)
return volume
return pod.metadata.annotations[u"leastauthority.com/introducer-tub-id"]
def _storage_tub(pod):
return pod.metadata.annotations[u"leastauthority.com/storage-tub-id"]
def _introducer_port_number(pod):
return int(pod.metadata.annotations[u"leastauthority.com/introducer-port-number"])
def _storage_port_number(pod):
return int(pod.metadata.annotations[u"leastauthority.com/storage-port-number"])
def _introducer_address(pod):
return (pod.status.podIP, _introducer_port_number(pod))
def _storage_address(pod):
return (pod.status.podIP, _storage_port_number(pod))
with start_action(action_type=u"router-update:set-pods", count=len(pods)):
new = pmap([
(_introducer_tub(pod), (pod, _introducer_address(pod)))
for pod in pods
] + [
(_storage_tub(pod), (pod, _storage_address(pod)))
for pod in pods
])
adding = pset(new.keys()) - pset(old.keys())
removing = pset(old.keys()) - pset(new.keys())
for tub_id in adding:
Message.log(event_type=u"router-update:add", pod=new[tub_id][0].metadata.name)
for tub_id in removing:
Message.log(event_type=u"router-update:remove", pod=old[tub_id][0].metadata.name)
return new
# Use persistence_service to get a Deployment for the cluster
# configuration.
deployment = self.persistence_service.get()
for node in deployment.nodes:
for manifestation in node.manifestations.values():
if manifestation.dataset.dataset_id == dataset_id:
raise DATASET_ID_COLLISION
# XXX Check cluster state to determine if the given primary node
# actually exists. If not, raise PRIMARY_NODE_NOT_FOUND.
# See FLOC-1278
dataset = Dataset(
dataset_id=dataset_id,
maximum_size=maximum_size,
metadata=pmap(metadata)
)
manifestation = Manifestation(dataset=dataset, primary=True)
primary_node = deployment.get_node(primary)
new_node_config = primary_node.transform(
("manifestations", manifestation.dataset_id), manifestation)
new_deployment = deployment.update_node(new_node_config)
saving = self.persistence_service.save(new_deployment)
def saved(ignored):
result = api_dataset_from_dataset_and_node(dataset, primary)
return EndpointResponse(CREATED, result)
saving.addCallback(saved)
return saving
@ivar content_sha256: L{unicode}
"""
region = attr.ib(validator=validators.instance_of(bytes))
service = attr.ib(validator=validators.instance_of(bytes))
method = attr.ib(validator=validators.instance_of(bytes))
url_context = attr.ib()
headers = attr.ib(
default=attr.Factory(Headers),
validator=validators.instance_of(Headers),
)
body_producer = attr.ib(
default=None,
validator=validators.optional(validators.provides(IBodyProducer)),
)
metadata = attr.ib(
default=pmap(),
convert=freeze,
validator=validators.instance_of(PMap),
)
amz_headers = attr.ib(
default=pmap(),
convert=freeze,
validator=validators.instance_of(PMap),
)
content_sha256 = attr.ib(
default=None,
validator=validators.optional(validators.instance_of(unicode)),
)
def query(**kw):
"""
def _populate_volume_state_table():
"""
Initialize volume state table with transitions for ``create_volume``,
``attach_volume``, ``detach_volume``, ``delete_volume`` operations.
"""
O = VolumeOperations
S = VolumeStates
table = pmap()
def add_flow(operation, start, transient, end, sets_attach,
unsets_attach):
"""
Helper to add expected volume states for given operation.
"""
return table.set(operation,
VolumeStateFlow(start_state=start,
transient_state=transient,
end_state=end,
sets_attach=sets_attach,
unsets_attach=unsets_attach))
table = add_flow(O.CREATE, S.EMPTY, S.CREATING, S.AVAILABLE,
False, False)
table = add_flow(O.ATTACH, S.AVAILABLE, S.ATTACHING, S.IN_USE,