Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_stack_on_colname():
""" Test that we can stack column with an implicit double level
in the column names indicated by a separator string"""
dframe = pd.DataFrame(
columns=["EQLNUM", "STATIC", "OWC@2000-01-01", "OWC@2020-01-01"],
data=[[1, 1.2, 2000, 1900], [2, 1.3, 2100, 2050]],
)
stacked = common.stack_on_colnames(dframe)
assert "DATE" in stacked
assert "OWC" in stacked
assert len(stacked.columns) == 4
assert len(stacked["DATE"].unique()) == 2
assert len(stacked) == 4
assert not stacked.isnull().sum().sum()
dframe = pd.DataFrame(
columns=[
"EQLNUM",
"STATIC",
"OWC@2000-01-01",
"OWC@2020-01-01",
"GOC@2000-01-01",
"GOC@2020-01-01",
],
def _df2ecl_equilfuncs(keyword, dframe, comment=None):
"""Internal function to be used by df2ecl_() functions"""
if dframe.empty:
return "-- No data!"
string = "{}\n".format(keyword)
string += common.comment_formatter(comment)
col_headers = RENAMERS[keyword]["DATA"]
string += "-- {:^21} {:^21} \n".format("DEPTH", col_headers[1])
if "KEYWORD" not in dframe:
# Use everything..
subset = dframe
else:
subset = dframe[dframe["KEYWORD"] == keyword]
if "EQLNUM" not in subset:
subset["EQLNUM"] = 1
def _df2ecl_equilfuncs_eqlnum(dframe):
"""Print one equilibriation function table for a specific
EQLNUM
Args:
def _df2ecl_satfuncs(keyword, dframe, comment=None):
if dframe.empty:
return "-- No data!\n"
string = "{}\n".format(keyword)
string += common.comment_formatter(comment)
if "KEYWORD" not in dframe:
# Use everything..
subset = dframe
else:
subset = dframe[dframe["KEYWORD"] == keyword]
if "SATNUM" not in subset:
subset["SATNUM"] = 1
subset = subset.set_index("SATNUM").sort_index()
# Make a function that is to be called for each SATNUM
def _df2ecl_satfuncs_satnum(keyword, dframe):
"""Print one saturation function for one specific SATNUM"""
col_headers = RENAMERS[keyword]["DATA"]
string = (
"-- "
def pvtw_fromdeck(deck, ntpvt=None):
"""Extract PVTW from a deck
Args:
deck (str or opm.common Deck)
ntpvt (int): Number of PVT regions in deck. Will
be inferred if not present in deck.
"""
if "TABDIMS" not in deck:
deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt)
return common.ecl_keyworddata_to_df(
deck, "PVTW", renamer=RENAMERS["PVTW"], recordcountername="PVTNUM"
)
if "SWAT@" + datestr in grid_df and (
"SOIL@" + datestr in grid_df or "SGAS@" + datestr in grid_df
):
contacts = compute_pillar_contacts(
grid_df,
region=region,
soilcutoff=soilcutoff,
sgascutoff=sgascutoff,
swatcutoff=swatcutoff,
datestr=datestr,
)
if not contacts.empty:
grouped = pd.merge(grouped, contacts, how="left")
if stackdates:
return ecl2df.common.stack_on_colnames(
grouped, sep="@", stackcolname="DATE", inplace=True
)
return grouped
def fill_reverse_parser(parser):
"""Set up sys.argv parsers for writing Eclipse include files from
dataframes (as CSV files)
Arguments:
parser (ArgumentParser or subparser): parser to fill with arguments
"""
return common.fill_reverse_parser(parser, "PVT", "pvt.inc")
def df2ecl(pvt_df, keywords=None, comments=None, filename=None):
"""Generate Eclipse include strings from PVT dataframes
Args:
pvt_df (pd.DataFrame): Dataframe with PVT data on ecl2df format.
keywords (list of str): List of keywords to include. Must be
supported and present in the incoming dataframe.
comments (dict): Dictionary indexed by keyword with comments to be
included pr. keyword. If a key named "master" is present
it will be used as a master comment for the outputted file.
filename (str): If supplied, the generated text will also be dumped
to file.
"""
return common.df2ecl(
pvt_df,
keywords,
comments,
supported=SUPPORTED_KEYWORDS,
consecutive="PVTNUM",
filename=filename,
)