How to use the xarray.DataArray function in xarray

To help you get started, we’ve selected a few xarray examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github joonro / BLP-Python / tests / test_BLP.py View on Github external
dims=['markets', 'nsiminds', 'vars'],
            attrs={'Desc': 'random draws given for the estimation.'}
            )

        s_jt = ps2['s_jt'].reshape(-1, )  # s_jt for nmkts * nbransd
        self.s_jt = xr.DataArray(
            s_jt.reshape((nmkts, nbrands)),
            coords=[range(nmkts), range(nbrands),],
            dims=['markets', 'brands'],
            attrs={'Desc': 'Market share of each brand.'}
            )

        self.ans = ps2['ans'].reshape(-1, )

        Z = np.c_[Z_org[:, 1:], X1[:, 1:]]
        self.Z = xr.DataArray(
            Z.reshape((self.nmkts, self.nbrands, -1)),
            coords=[range(nmkts), range(nbrands), range(Z.shape[-1])],
            dims=['markets', 'brands', 'vars'],
            attrs={'Desc': 'Instruments'}
            )
github ceholden / yatsm / tests / structural_break / test__cusum.py View on Github external
def test_cusum_OLS(test_data, strucchange_cusum_OLS):
    """ Tested against strucchange 1.5.1
    """
    y = test_data.pop('y')
    X = test_data
    # Test sending pandas
    result = cu.cusum_OLS(X, y)
    assert np.allclose(result.score, strucchange_cusum_OLS[0])
    assert np.allclose(result.pvalue, strucchange_cusum_OLS[1])

    # And ndarray and xarray
    result = cu.cusum_OLS(X.values, xr.DataArray(y, dims=['time']))
    assert np.allclose(result.score, strucchange_cusum_OLS[0])
    assert np.allclose(result.pvalue, strucchange_cusum_OLS[1])
github opendatacube / datacube-stats / tests / test_statistics.py View on Github external
def test_new_med_std():
    stdndwi = NormalisedDifferenceStats('green', 'nir', 'ndwi', stats=['std'])
    arr = np.random.uniform(low=-1, high=1, size=(5, 100, 100))
    data_array_1 = xr.DataArray(arr, dims=('time', 'y', 'x'),
                                coords={'time': list(range(5))}, attrs={'crs': 'Fake CRS'})
    arr = np.random.uniform(low=-1, high=1, size=(5, 100, 100))
    data_array_2 = xr.DataArray(arr, dims=('time', 'y', 'x'),
                                coords={'time': list(range(5))}, attrs={'crs': 'Fake CRS'})
    dataset = xr.Dataset(data_vars={'green': data_array_1, 'nir': data_array_2}, attrs={'crs': 'Fake CRS'})
    result = stdndwi.compute(dataset)

    assert isinstance(result, xr.Dataset)
    assert 'ndwi_std' in result.data_vars
github ECCO-GROUP / ECCOv4-py / ecco_v4_py / calc_meridional_trsp.py View on Github external
coords.update( {'time': cds['time'].values} )
        dims += ('time',)
        zeros = np.zeros((len(cds['time'].values),
                          len(cds['k'].values),
                          len(lat_vals)))
    else:
        zeros = np.zeros((len(cds['k'].values),
                          len(lat_vals)))


    coords.update( {'k': cds['k'].values} )
    coords.update( {'lat': lat_vals} )

    dims += ('k','lat')

    xda = xr.DataArray(data=zeros, coords=coords, dims=dims)

    # Convert to dataset to add Z coordinate
    xds = xda.to_dataset(name='trsp_z')
    xds['Z'] = cds['Z']
    xds = xds.set_coords('Z')

    return xds
github calliope-project / calliope / calliope / core.py View on Github external
md = self.get_metadata()
        md.columns.name = 'cols_metadata'
        md.index.name = 'y'
        self.solution = (self.solution.merge(xr.DataArray(md)
                                               .to_dataset(name='metadata')))
        # Add summary
        summary = self.get_summary()
        summary.columns.name = 'cols_summary'
        summary.index.name = 'techs'
        self.solution = (self.solution.merge(xr.DataArray(summary)
                                               .to_dataset(name='summary')))
        # Add groups
        groups = self.get_groups()
        groups.columns.name = 'cols_groups'
        groups.index.name = 'techs'
        self.solution = (self.solution.merge(xr.DataArray(groups)
                                               .to_dataset(name='groups')))
        # Add shares
        shares = self.get_shares(groups)
        shares.columns.name = 'cols_shares'
        shares.index.name = 'techs'
        self.solution = (self.solution.merge(xr.DataArray(shares)
                                               .to_dataset(name='shares')))
        # Add time resolution
        self.solution = (self.solution
                             .merge(self.data['_time_res']
                                        .copy(deep=True)
                                        .to_dataset(name='time_res')))
        # reorganise variable coordinates
        self.solution = self.solution.transpose('y', 'techs', 'x', 'c', 'k',
            't','cols_groups', 'cols_metadata', 'cols_shares', 'cols_summary')
        # Add model and run configuration
github jenfly / atmos-tools / atmos / constants.py View on Github external
const['R_gas'] = xray.DataArray(
    8.3143,
    attrs={'name' : 'Universal gas constant',
           'units' : 'J K^-1 mol^-1',
           'ref' : hart
          })


const['R_air'] = xray.DataArray(
    287.,
    attrs={'name' : 'Gas constant for dry air',
           'units' : 'J K^-1 kg^-1',
           'ref' : hart
          })

const['mm_air'] = xray.DataArray(
    28.97,
    attrs={'name' : 'Mean molar mass of dry air',
           'units' : 'g mol^-1',
           'ref' : hart
          })

const['density_air'] = xray.DataArray(
    1.293,
    attrs={'name' : 'Density of dry air at 0C and 101325 Pa',
           'units' : 'kg m^-3',
           'ref' : hart
          })


const['Cp'] = xray.DataArray(
    1004.,
github OGGM / oggm / oggm / core / flowline.py View on Github external
# to datasets
        run_ds = []
        for (s, w) in zip(sects, widths):
            ds = xr.Dataset()
            ds.attrs['description'] = 'OGGM model output'
            ds.attrs['oggm_version'] = __version__
            ds.attrs['calendar'] = '365-day no leap'
            ds.attrs['creation_date'] = strftime("%Y-%m-%d %H:%M:%S",
                                                 gmtime())
            ds.coords['time'] = yearly_time
            ds['time'].attrs['description'] = 'Floating hydrological year'
            varcoords = OrderedDict(time=('time', yearly_time),
                                    year=('time', yearly_time))
            ds['ts_section'] = xr.DataArray(s, dims=('time', 'x'),
                                            coords=varcoords)
            ds['ts_width_m'] = xr.DataArray(w, dims=('time', 'x'),
                                            coords=varcoords)
            run_ds.append(ds)

        # write output?
        if run_path is not None:
            encode = {'ts_section': {'zlib': True, 'complevel': 5},
                      'ts_width_m': {'zlib': True, 'complevel': 5},
                      }
            for i, ds in enumerate(run_ds):
                ds.to_netcdf(run_path, 'a', group='fl_{}'.format(i),
                             encoding=encode)
        if diag_path is not None:
            diag_ds.to_netcdf(diag_path)

        return run_ds, diag_ds
github wy2136 / xlearn / xlearn / decomposition.py View on Github external
def inverse_transform(self, da):
        '''xarray version of sklearn.decomposition.PCA.inverse_transform'''
        # Compatible with the slearn.decomposition.PCA fit method when the input data are not DataArray
        da = da.copy()
        if not isinstance(da, xr.DataArray):
            X = da
            return super().inverse_transform(X)
        
        # parameters
        pcs = da.data
        n_samples = pcs.shape[0]
        eofs_da = self.components_da
        grid_shape = eofs_da.shape[1:]
        n_grids = np.prod(grid_shape)
        valid_grids = ~np.isnan( eofs_da.sel(mode=0).data.reshape((n_grids,)) )
        
        
        # call the sklearn version model
        X = np.empty((n_samples, n_grids)) * np.nan
        X[:, valid_grids] = super().inverse_transform(pcs)
        X = X.reshape((n_samples,) + grid_shape )
github mathause / regionmask / regionmask / core / mask.py View on Github external
def _create_xarray_2D(mask, lon_or_obj, lat, lon_name, lat_name):
    """create an xarray DataArray for 2D fields"""

    lon2D, lat2D = _extract_lon_lat(lon_or_obj, lat, lon_name, lat_name)

    if isinstance(lon2D, xr.DataArray):
        dim1D_names = lon2D.dims
        dim1D_0 = lon2D[dim1D_names[0]]
        dim1D_1 = lon2D[dim1D_names[1]]
    else:
        dim1D_names = (lon_name + "_idx", lat_name + "_idx")
        dim1D_0 = np.arange(np.array(lon2D).shape[0])
        dim1D_1 = np.arange(np.array(lon2D).shape[1])

    # dict with the coordinates
    coords = {
        dim1D_names[0]: dim1D_0,
        dim1D_names[1]: dim1D_1,
        lat_name: (dim1D_names, lat2D),
        lon_name: (dim1D_names, lon2D),
    }
github wy2136 / xlearn / xlearn / decomposition.py View on Github external
eofs = eofs.reshape((self.n_components_,) + grid_shape)
        # reshape the mean_
        mean_ = np.empty(n_grids) * np.nan
        mean_[valid_grids] = self.mean_
        mean_ = mean_.reshape(grid_shape)
        
        # wrap regression coefficient into DataArray
        # dims
        grid_dims = da.dims[1:]
        eofs_dims = ('mode',) + grid_dims
        # coords
        grid_coords = {dim: da[dim] for dim in grid_dims}
        eofs_coords = grid_coords.copy()
        eofs_coords[eofs_dims[0]] = np.arange(self.n_components_)
        # DataArray
        self.components_da = xr.DataArray(eofs,
            dims=eofs_dims, coords=eofs_coords)
        # self.mean_
        self.mean_da = xr.DataArray(mean_,
            dims=grid_dims, coords=grid_coords)
        
        
        return self