Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_rft2df():
"""Test that dataframes are produced"""
eclfiles = EclFiles(DATAFILE)
rftdf = rft.rft2df(eclfiles)
assert "ZONE" in rftdf
assert "LEAF" not in rftdf # Topology metadata should not be exported
assert set(rftdf["WELLMODEL"]) == {"STANDARD"}
assert set(rftdf["WELL"]) == {
"OP_1",
"OP_2",
"OP_3",
"OP_4",
"OP_5",
"WI_1",
"WI_2",
"WI_3",
}
assert not rftdf.empty
assert len(rftdf) == 115
def test_df2ecl_editnnc(tmpdir):
"""Test generation of EDITNNC keyword"""
eclfiles = EclFiles(DATAFILE)
nncdf = nnc.df(eclfiles)
tmpdir.chdir()
nncdf["TRANM"] = 2
editnnc = nnc.df2ecl_editnnc(nncdf, filename="editnnc.inc")
editnnc_fromfile = "".join(open("editnnc.inc").readlines())
assert editnnc == editnnc_fromfile
assert "EDITNNC" in editnnc
assert editnnc.count("/") == len(nncdf) + 1
assert "avg multiplier" in editnnc
# Fails when columns are missing
with pytest.raises((KeyError, ValueError)):
nnc.df2ecl_editnnc(nncdf[["I1", "I2"]])
editnnc = nnc.df2ecl_editnnc(nncdf, nocomments=True)
def test_nnc2df():
"""Test that dataframes are produced"""
eclfiles = EclFiles(DATAFILE)
nncdf = nnc.df(eclfiles)
assert not nncdf.empty
assert "I1" in nncdf
assert "J1" in nncdf
assert "K1" in nncdf
assert "I2" in nncdf
assert "J2" in nncdf
assert "K2" in nncdf
assert "TRAN" in nncdf
prelen = len(nncdf)
nncdf = nnc.filter_vertical(nncdf)
assert (nncdf["I1"] == nncdf["I2"]).all()
assert (nncdf["J1"] == nncdf["J2"]).all()
assert len(nncdf) < prelen
def test_faults2df():
"""Test that dataframes are produced"""
eclfiles = EclFiles(DATAFILE)
faultsdf = faults.df(eclfiles.get_ecldeck())
assert "NAME" in faultsdf
assert "I" in faultsdf
assert "J" in faultsdf
assert "K" in faultsdf
assert "FACE" in faultsdf
assert not faultsdf.empty
def test_nx(tmpdir):
"""Test graph generation"""
eclfiles = EclFiles(DATAFILE)
network = trans.nx(eclfiles, region="FIPNUM")
assert network.number_of_nodes() == 6
networkx.write_gexf(
network, str(tmpdir.join("reek-fipnum-trans.gxf")), prettyprint=True
)
assert os.path.exists(str(tmpdir.join("reek-fipnum-trans.gxf")))
def gruptree_main(args):
"""Entry-point for module, for command line utility"""
if args.verbose:
logger.setLevel(logging.INFO)
if not args.output and not args.prettyprint:
print("Nothing to do. Set --output or --prettyprint")
sys.exit(0)
eclfiles = EclFiles(args.DATAFILE)
dframe = deck2df(eclfiles.get_ecldeck(), startdate=args.startdate)
if args.prettyprint:
if "DATE" in dframe:
for date in dframe["DATE"].dropna().unique():
print("Date: " + str(date.astype("M8[D]")))
trees = edge_dataframe2dict(dframe[dframe["DATE"] == date])
# Returns list of dicts, one for each root found
# (typically only one)
for tree in trees:
rootname = list(tree.keys())[0]
print(dict2treelib(rootname, tree[rootname]))
print("")
else:
logger.warning("No tree data to prettyprint")
if args.output == "-":
# Ignore pipe errors when writing to stdout.
def df(deck):
"""Produce a dataframe of fault data from a deck
All data for the keyword FAULTS will be returned.
Args:
deck (opm.io Deck or EclFiles): Eclipse deck
"""
if isinstance(deck, EclFiles):
deck = deck.get_ecldeck()
# In[91]: list(deck['FAULTS'][0])
# Out[91]: [[u'F1'], [36], [36], [41], [42], [1], [14], [u'I']]
data = []
# It is allowed in Eclipse to use the keyword FAULTS
# as many times as needed. Thus we need to loop in some way:
for keyword in deck:
if keyword.name == "FAULTS":
for rec in keyword:
# Each record now has a range potentially in three
# dimensions for the fault, unroll this:
frec_dict = parse_opmio_deckrecord(rec, "FAULTS")
faultname = frec_dict["NAME"]
faultface = frec_dict["FACE"]
for i_idx in range(frec_dict["IX1"], frec_dict["IX2"] + 1):
def faults_main(args):
"""Read from disk and write CSV back to disk"""
if args.verbose:
logger.setLevel(logging.INFO)
eclfiles = EclFiles(args.DATAFILE)
if eclfiles:
deck = eclfiles.get_ecldeck()
faults_df = df(deck)
if faults_df.empty:
logger.warning("Empty FAULT data, not written to disk!")
else:
faults_df.to_csv(args.output, index=False)
print("Wrote to " + args.output)
returning the first function by default).
Arguments:
deck (opm.io deck or str): Incoming data deck. Always
supply as a string if you don't know TABDIMS-NTSFUN.
keywords (list of str): Requested keywords for which to
to extract data.
ntsfun (int): Number of SATNUMs defined in the deck, only
needed if TABDIMS with NTSFUN is not found in the deck.
If not supplied (or None) and NTSFUN is not defined,
it will be attempted inferred.
Return:
pd.DataFrame, columns 'KEYWORD', 'SW', 'KRW', 'KROW', 'PC', ..
"""
if isinstance(deck, EclFiles):
# NB: If this is done on include files and not on DATA files
# we can loose data for SATNUM > 1
deck = deck.get_ecldeck()
deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTSFUN", deck, ntsfun)
assert "TABDIMS" in deck
ntsfun = deck["TABDIMS"][0][inferdims.DIMS_POS["NTSFUN"]].get_int(0)
keywords = common.handle_wanted_keywords(keywords, deck, SUPPORTED_KEYWORDS)
frames = []
for keyword in keywords:
# Construct the associated function names
function_name = keyword.lower() + "_fromdeck"
function = globals()[function_name]
dframe = function(deck, ntsfun=ntsfun)
frames.append(dframe.assign(KEYWORD=keyword))