How to use the storage.groups function in storage

To help you get started, we’ve selected a few storage examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github yandex / mastermind / src / cocaine-app / balancer.py View on Github external
def attach_groupset_to_couple(self, request):
        if 'couple' not in request:
            raise ValueError('Request should contain "couple" field')
        couple = storage.groupsets.get_couple(request['couple'])

        if 'groupset' not in request:
            raise ValueError('Request should contain "groupset" field')
        group_ids = [int(g) for g in request['groupset'].split(':')]
        groups = [storage.groups[gid] for gid in group_ids]

        if 'type' not in request:
            raise ValueError('Request should contain groupset "type" field')

        if 'settings' not in request:
            raise ValueError('Request should contain "settings" field')

        if request['type'] == 'lrc':
            if 'part_size' not in request['settings']:
                raise ValueError('Lrc groupset requires "part_size" setting')
            if 'scheme' not in request['settings']:
                raise ValueError('Lrc groupset requires "scheme" setting')

        Groupset = storage.groupsets.make_groupset(
            type=request['type'],
            settings=request['settings'],
github yandex / mastermind / src / cocaine-app / jobs / tasks / node_backend_defrag.py View on Github external
def execute(self, processor):
        # checking if task still applicable
        logger.info('Job {0}, task {1}: checking group {2} and node backend {3} '
                    'consistency'.format(
                        self.parent_job.id, self.id, self.group, self.node_backend))

        if self.group not in storage.groups:
            raise JobBrokenError('Group {0} is not found'.format(self.group))
        if self.node_backend not in storage.node_backends:
            raise JobBrokenError('Node backend {0} is not found'.format(self.node_backend))

        group = storage.groups[self.group]
        node_backend = storage.node_backends[self.node_backend]

        if group.couple is None:
            raise JobBrokenError('Task {0}: group {1} does not belong '
                                 'to any couple'.format(self, self.group))

        if group.couple.status not in storage.GOOD_STATUSES:
            raise RetryError(10, JobBrokenError('Task {}: group {} couple status is {}'.format(
                self, self.group, group.couple.status)))

        if node_backend not in group.node_backends:
            raise JobBrokenError('Task {0}: node backend {1} does not belong to '
                                 'group {2}'.format(self, self.node_backend, self.group))

        super(NodeBackendDefragTask, self).execute(processor)
github yandex / mastermind / src / cocaine-app / planner / move_planner.py View on Github external
def current(busy_group_ids):
        state = StorageState()

        good_uncoupled_groups = set(infrastructure.get_good_uncoupled_groups(
            max_node_backends=1,
            skip_groups=busy_group_ids,
        ))

        for group in storage.groups.keys():

            for nb in group.node_backends:
                if nb.stat is None:
                    continue
                try:
                    dc = nb.node.host.dc
                    hostname = nb.node.host.hostname
                except CacheUpstreamError:
                    logger.warn('Skipping host {} because of cache failure'.format(nb.node.host))
                    continue

                host_state = state.state[dc].hosts.setdefault(
                    nb.node.host,
                    HostState(state.state[dc], nb.node.host, hostname)
                )
github yandex / mastermind / src / cocaine-app / jobs / tasks / create_group.py View on Github external
def execute(self, processor):
        if self.group in storage.groups:
            raise JobBrokenError(
                'Group {group_id} already exists'.format(
                    group_id=self.group,
                )
            )
        try:
            minion_response = processor.minions_monitor.create_group(
                self.host,
                self.params,
                files=inventory.get_new_group_files(
                    group_id=self.group,
                    total_space=self.params['total_space'],
                )
            )
        except RuntimeError as e:
            raise RetryError(self.attempts, e)
github yandex / mastermind / src / cocaine-app / cache.py View on Github external
def update_cache_groups(self):
        new_groups = {}
        for group in storage.groups:
            if (group.type != storage.Group.TYPE_CACHE or
                    group.status != storage.Status.COUPLED):
                continue
            new_groups[group] = group

        new_cache_groups = {}
        for group in new_groups:
            cache_group = CacheGroup(group)
            new_cache_groups[cache_group] = cache_group

        new_groups_units = infrastructure.groups_units(new_groups.keys(),
                                                       self.node_types)

        new_executing_tasks = cache_task_manager.list()
        for task in new_executing_tasks:
            group_id = task['group']
github yandex / mastermind / src / cocaine-app / planner.py View on Github external
def __dcs():
        dcs = set()
        for group in storage.groups:
            for nb in group.node_backends:
                try:
                    dcs.add(nb.node.host.dc)
                except CacheUpstreamError:
                    continue
        return dcs
github yandex / mastermind / src / cocaine-app / jobs / convert_to_lrc_groupset.py View on Github external
if self.determine_data_size:
            tasks.append(
                self._determine_data_size_task(
                    src_storage=self.src_storage,
                    src_storage_options=self.src_storage_options,
                    part_size=self.part_size,
                    scheme=self.scheme,
                    trace_id=trace_id,
                )
            )
            # other tasks will be created on data size task completion
            self.tasks = tasks
            return

        dst_groups = [
            [storage.groups[group_id] for group_id in groupset_group_ids]
            for groupset_group_ids in self.groups
        ]
        tasks.extend(
            self._lrc_convert_tasks(
                dst_groups=dst_groups,
                src_storage=self.src_storage,
                src_storage_options=self.src_storage_options,
                part_size=self.part_size,
                scheme=self.scheme,
                trace_id=trace_id,
            )
        )

        couple_ids = self._generate_couple_ids(count=len(dst_groups))

        tasks.extend(
github yandex / mastermind / src / cocaine-app / statistics.py View on Github external
'total_couples': 0,
                'uncoupled_groups': 0,

                'total_keys': 0,
                'removed_keys': 0,
            }

        by_dc = defaultdict(default)
        by_ns = defaultdict(default)
        by_ns_dc = defaultdict(lambda: defaultdict(default))

        dc_couple_map = defaultdict(set)
        ns_couple_map = defaultdict(set)
        ns_dc_couple_map = defaultdict(lambda: defaultdict(set))

        for group in sorted(storage.groups, key=lambda g: not bool(g.couple)):

            try:

                couple = (group.couple
                          if group.couple else
                          str(group.group_id))

                ns = group.couple and group.couple.namespace.id or None

                for node_backend in group.node_backends:

                    try:
                        dc = node_backend.node.host.dc
                    except CacheUpstreamError:
                        continue
github yandex / mastermind / src / cocaine-app / lrc_builder.py View on Github external
def _build_lrc_tree(self):
        node_types = (self.DC_NODE_TYPE,) + ('host',)
        tree, nodes = infrastructure.infrastructure.filtered_cluster_tree(node_types)

        # NOTE:
        # lrc groups that are currently being processed by running jobs
        # are not accounted here because there is no easy and
        # straightforward way to do this. This is not crucial
        # at the moment.
        lrc_types = (storage.Group.TYPE_UNCOUPLED_LRC_8_2_2_V1, storage.Group.TYPE_LRC_8_2_2_V1)
        lrc_groups = (
            group
            for group in storage.groups.keys()
            if group.type in lrc_types
        )

        # TODO: rename, nothing about "ns" here
        infrastructure.infrastructure.account_ns_groups(nodes, lrc_groups)
        infrastructure.infrastructure.update_groups_list(tree)

        return tree, nodes
github yandex / mastermind / src / cocaine-app / jobs / backend_manager.py View on Github external
def _involved_groups(self):
        group_ids = set([self.group])
        if self.group in storage.groups:
            group = storage.groups[self.group]
            if group.couple:
                group_ids.update(g.group_id for g in group.coupled_groups)
        return group_ids