How to use the psyplot.compat.pycompat.OrderedDict function in psyplot

To help you get started, we’ve selected a few psyplot examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Chilipp / psyplot / tests / test_plotter.py View on Github external
import psyplot.config as psyc
try:
    from textwrap import indent
except ImportError:
    def indent(text, prefix, predicate=None):  # python2
        return '\n'.join(prefix + s if predicate is None or predicate(s) else s
                         for s in text.splitlines())


docstrings = psyp.docstrings


psyc.setup_logging(osp.join(osp.dirname(__file__), 'logging.yml'))


results = OrderedDict()


class TestFormatoption(psyp.Formatoption):

    removed = False

    @property
    def default(self):
        try:
            return super(TestFormatoption, self).default
        except KeyError:
            return ''

    _validate = str

    def update(self, value):
github Chilipp / psyplot / psyplot / project.py View on Github external
def axes(self):
        """A mapping from axes to data objects with the plotter in this axes
        """
        ret = utils.DefaultOrderedDict(lambda: self[1:0])
        for arr in self:
            if arr.psy.plotter is not None:
                ret[arr.psy.plotter.ax].append(arr)
        return OrderedDict(ret)
github ARVE-Research / gwgen / gwgen / parameterization.py View on Github external
def calculate_probabilities(cls, df):
        """Calculate the transition probabilities for one month across multiple
        years"""
        # we group here for each month because we do not want to treat each
        # month separately
        g = df.groupby(level=['year', 'month'])
        if g.ngroups > 10:
            dfs = g.apply(cls.calc_ndays).sum()
            return pd.DataFrame.from_dict(OrderedDict([
                ('p11', [dfs.np11 / dfs.nwet if dfs.nwet > 0 else 0]),
                ('p01', [dfs.np01 / dfs.ndry if dfs.ndry > 0 else 0]),
                ('p001', [dfs.np001 / dfs.np001_denom
                          if dfs.np001_denom > 0 else 0]),
                ('p101', [dfs.np101 / dfs.np101_denom
                          if dfs.np101_denom > 0 else 0]),
                ('wetf', [dfs.nwet / dfs.n if dfs.n > 0 else 0])]))
        else:
            return pd.DataFrame.from_dict(
                {'p11': [], 'p01': [], 'p001': [], 'p101': [], 'wetf': []})
github ARVE-Research / gwgen / gwgen / parameterization.py View on Github external
def make_run_config(self, sp, info, full_nml):
        """
        Configure with the wet/dry cloud - mean cloud correlation

        Parameters
        ----------
        %(Parameterizer.make_run_config.parameters)s
        """
        nml = full_nml.setdefault('weathergen_ctl', OrderedDict())
        states = ['wet', 'dry']
        for state in states:
            # linear fits of means
            t = ''
            vname = '%swind_%s' % (t, state)
            nml_name = 'wind%s_%s' % ("_sd" if t == 'sd_' else '', state[:1])
            info[vname] = vinfo = {}
            plotter = sp(name=vname).plotters[0]
            for key in ['rsquared', 'slope', 'intercept']:
                vinfo[key] = float(plotter.plot_data[1].attrs[key])
            nml[nml_name + '1'] = float(
                plotter.plot_data[1].attrs.get('intercept', 0))
            nml[nml_name + '2'] = float(
                plotter.plot_data[1].attrs.get('slope'))
            # polynomial fits of std
            t = 'sd_'
github ARVE-Research / gwgen / gwgen / evaluation.py View on Github external
def make_run_config(self, sp, info):
        for orig in self.names:
            info[orig] = d = OrderedDict()
            for plotter in sp(standard_name=orig).plotters:
                d[plotter.data.pctl if plotter.data.name.startswith('all') else
                  int(plotter.data.pctl.values)] = pctl_d = OrderedDict()
                for key in ['rsquared', 'slope', 'intercept']:
                        val = plotter.plot_data[1].attrs.get(key)
                        if val is not None:
                            pctl_d[key] = float(val)
        return info
github Chilipp / psyplot / psyplot / project.py View on Github external
def figs(self):
        """A mapping from figures to data objects with the plotter in this
        figure"""
        ret = utils.DefaultOrderedDict(lambda: self[1:0])
        for arr in self:
            if arr.psy.plotter is not None:
                ret[arr.psy.plotter.ax.get_figure()].append(arr)
        return OrderedDict(ret)
github Chilipp / psyplot / psyplot / utils.py View on Github external
"""Miscallaneous utility functions for the psyplot package"""
import re
import six
from difflib import get_close_matches
from itertools import chain
from psyplot.compat.pycompat import OrderedDict, filterfalse
from psyplot.docstring import dedent, docstrings


class DefaultOrderedDict(OrderedDict):
    """An ordered :class:`collections.defaultdict`

    Taken from http://stackoverflow.com/a/6190500/562769"""
    def __init__(self, default_factory=None, *a, **kw):
        if (default_factory is not None and
           not callable(default_factory)):
            raise TypeError('first argument must be callable')
        OrderedDict.__init__(self, *a, **kw)
        self.default_factory = default_factory

    def __getitem__(self, key):
        try:
            return OrderedDict.__getitem__(self, key)
        except KeyError:
            return self.__missing__(key)
github ARVE-Research / gwgen / gwgen / evaluation.py View on Github external
See also the :attr:`QuantileEvaluation.default_config` attribute

    Parameters
    ----------
    %(QuantileConfig.parameters)s"""
    return QuantileConfig(quantiles, *default_ks_config(*args, **kwargs))


class QuantileEvaluation(Evaluator):
    """Evaluator to evaluate specific quantiles"""

    name = 'quants'

    summary = 'Compare the quantiles of simulation and observation'

    names = OrderedDict([
        ('prcp', {'long_name': 'Precipitation',
                  'units': 'mm'}),
        ('tmin', {'long_name': 'Min. Temperature',
                  'units': 'degC'}),
        ('tmax', {'long_name': 'Max. Temperature',
                  'units': 'degC'}),
        ('mean_cloud', {'long_name': 'Cloud fraction',
                        'units': '-'}),
        ('wind', {'long_name': 'Wind Speed',
                  'units': 'm/s'})
        ])

    @property
    def all_variables(self):
        return [[v + '_ref', v + '_sim'] for v in self.names]
github ARVE-Research / gwgen / gwgen / preproc.py View on Github external
def run(self, info):

        self.__setup = False

        if self.setup_from == 'scratch':
            df = self.data
            # we may use a parallel setup which requires a weighted average
            g = df.groupby(level='station_id')
            total_counts = g.counts.transform("sum")
            df['lat'] = df.counts / total_counts * df.lat
            df['lon'] = df.counts / total_counts * df.lon
            df['lat_std'] = (df.counts / total_counts) * df.lat_std ** 2
            df['lon_std'] = (df.counts / total_counts) * df.lon_std ** 2
            eecra = g.agg(OrderedDict([
                    ('lat', 'sum'), ('lon', 'sum'), ('lat_std', 'sum'),
                    ('lon_std', 'sum'),
                    ('year', ('min', 'max')), ('counts', 'sum')]))
            eecra.columns = ['lat', 'lon', 'lat_std', 'lon_std',
                             'firstyear', 'lastyear', 'counts']
            eecra[['lat_std', 'lon_std']] **= 0.5

            use_xstall = self.task_config.xstall

            if use_xstall:
                to_replace = self.xstall_df
                # keep only matching stations
                to_replace = to_replace.join(eecra[[]], how='inner')
                eecra.loc[to_replace.index, ['lat', 'lon']] = to_replace
            self.data = eecra
github ARVE-Research / gwgen / gwgen / parameterization.py View on Github external
for n in df.index.names),
                                exc_info=True)
                            tmp = tempfile.NamedTemporaryFile(
                                suffix='.csv').name
                            df.to_csv(tmp)
                            self.logger.critical('Data stored in %s', tmp)
                        else:
                            # find the crossover point where the gamma and
                            # pareto distributions should match this follows
                            # Neykov et al. (Nat. Hazards Earth Syst. Sci.,
                            # 14, 2321-2335, 2014) bottom of page 2330 (left
                            # column)
                            pscale[i] = (1 - stats.gamma.cdf(
                                thresh, gshape, scale=gscale))/stats.gamma.pdf(
                                    thresh, gshape, scale=gscale)
        return pd.DataFrame.from_dict(OrderedDict([
            ('n', np.repeat(n, N)), ('ngamma', np.repeat(ngamma, N)),
            ('mean_wet', np.repeat(vals.mean(), N)),
            ('ngp', ngp), ('thresh', threshs),
            ('gshape', np.repeat(gshape, N)),
            ('gscale', np.repeat(gscale, N)), ('pshape', pshape),
            ('pscale', pscale),
            ('pscale_orig', pscale_orig)])).set_index('thresh')