Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_overshoot(self):
"""
When overshooting the desired capacity (group was below desired,
and is now above desired), no progress was made.
"""
previous_state = GroupState(
servers=self._create_servers(4),
lb_connections=pset([])
)
current_state = GroupState(
servers=self._create_servers(6),
lb_connections=pset([])
)
desired_state = DesiredGroupState(
server_config=pmap(),
capacity=5,
)
self.assertRaises(
OvershootError,
measure_progress, previous_state, current_state, desired_state)
def test_reaping_errored_servers(self):
"""
Errored servers are removed; no progress is made.
"""
previous_state = GroupState(
servers=(self._create_servers(1, state=ServerState.ACTIVE)
| self._create_servers(2, state=ServerState.ERROR)),
lb_connections=pset([])
)
current_state = GroupState(
servers=(self._create_servers(1, state=ServerState.ACTIVE)),
lb_connections=pset([])
)
desired_state = DesiredGroupState(
server_config=pmap(),
capacity=5,
)
progress = measure_progress(
previous_state, current_state, desired_state)
self.assertEqual(progress, 0)
continue
match = _SERVER_DOES_NOT_EXIST.match(error)
if match is not None:
del_server_id = match.group("server_id")
# consider all pairs with this server to be removed
removed = [(lb_id, node_id) for lb_id, node_id in attempted_pairs
if node_id == del_server_id]
non_members |= pset(removed)
else:
raise UnknownBulkResponse(body)
if errors:
raise BulkErrors(errors)
elif non_members:
to_retry = pset(attempted_pairs) - non_members
return bulk_delete(to_retry) if to_retry else None
else:
raise UnknownBulkResponse(body)
def optimize_steps(rcv3_steps):
return [
step_class(
lb_node_pairs=pset(
concat(s.lb_node_pairs for s in rcv3_steps)))
]
def decode(obj):
if isinstance(obj, ExtType):
if obj.code == TYPE_PSET:
unpacked_data = unpackb(obj.data,
use_list=False,
encoding='utf-8')
return pset(decode(item) for item in unpacked_data)
if obj.code == TYPE_PLIST:
unpacked_data = unpackb(obj.data,
use_list=False,
encoding='utf-8')
return plist(decode(item) for item in unpacked_data)
if obj.code == TYPE_PBAG:
unpacked_data = unpackb(obj.data,
use_list=False,
encoding='utf-8')
return pbag(decode(item) for item in unpacked_data)
if obj.code == TYPE_FUNC:
return decode_func(obj.data)
module_name, class_name, *data = unpackb(obj.data,
use_list=False,
encoding='utf-8')
cls = getattr(sys.modules[module_name],
def remove_node_from_lb(node):
"""
Remove a node from the load balancing entity.
:ivar node: The node to be removed.
:type node: :class:`ILBNode` provider
"""
if isinstance(node, CLBNode):
return RemoveNodesFromCLB(lb_id=node.description.lb_id,
node_ids=pset([node.node_id]))
elif isinstance(node, RCv3Node):
return BulkRemoveFromRCv3(lb_node_pairs=pset(
[(node.description.lb_id, node.cloud_server_id)]))
:ivar str image_id: The ID of the image the server was launched with
:ivar str flavor_id: The ID of the flavor the server was launched with
:ivar PSet desired_lbs: An immutable mapping of load balancer IDs to lists
of :class:`CLBDescription` instances.
:var dict json: JSON dict received from Nova from which this server
is created
"""
id = attr.ib()
state = attr.ib(validator=_validate_state)
created = attr.ib()
image_id = attr.ib()
flavor_id = attr.ib()
# type(pvector()) is pvectorc.PVector, which != pyrsistent.PVector
links = attr.ib(default=attr.Factory(pvector),
validator=instance_of(type(pvector())))
desired_lbs = attr.ib(default=attr.Factory(pset),
validator=instance_of(PSet))
servicenet_address = attr.ib(default='',
validator=instance_of(string_types))
json = attr.ib(default=attr.Factory(pmap), validator=instance_of(PMap))
@classmethod
def from_server_details_json(cls, server_json):
"""
Create a :obj:`NovaServer` instance from a server details JSON
dictionary, although without any 'server' or 'servers' initial resource
key.
See
http://docs.rackspace.com/servers/api/v2/cs-devguide/content/
Get_Server_Details-d1e2623.html
pair = match.groupdict()
non_members = non_members.add((pair["lb_id"], pair["server_id"]))
continue
match = _LB_INACTIVE_PATTERN.match(error)
if match is not None:
errors.append(LBInactive(match.group("lb_id")))
continue
match = _LB_DOESNT_EXIST_PATTERN.match(error)
if match is not None:
del_lb_id = match.group("lb_id")
# consider all pairs with this LB to be removed
removed = [(lb_id, node_id) for lb_id, node_id in attempted_pairs
if lb_id == del_lb_id]
non_members |= pset(removed)
continue
match = _SERVER_DOES_NOT_EXIST.match(error)
if match is not None:
del_server_id = match.group("server_id")
# consider all pairs with this server to be removed
removed = [(lb_id, node_id) for lb_id, node_id in attempted_pairs
if node_id == del_server_id]
non_members |= pset(removed)
else:
raise UnknownBulkResponse(body)
if errors:
raise BulkErrors(errors)
elif non_members:
to_retry = pset(attempted_pairs) - non_members
def optimize_steps(clb_steps):
steps_by_lb = groupby(lambda s: s.lb_id, clb_steps)
return [
step_class(**{
'lb_id': lb_id,
attr_name: pset(concat(getattr(s, attr_name) for s in steps))})
for lb_id, steps in steps_by_lb.iteritems()
]
match = _LB_DOESNT_EXIST_PATTERN.match(error)
if match is not None:
errors.append(NoSuchLBError(match.group("lb_id")))
continue
match = _SERVER_UNPROCESSABLE.match(error)
if match is not None:
errors.append(ServerUnprocessableError(match.group("server_id")))
else:
raise UnknownBulkResponse(body)
if errors:
raise BulkErrors(errors)
elif exists:
to_retry = pset(attempted_pairs) - exists
return bulk_add(to_retry) if to_retry else None
else:
raise UnknownBulkResponse(body)