Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_sklearn2pmml(self):
# Export to PMML
pipeline = PMMLPipeline([
("classifier", self.ref)
])
pipeline.fit(self.test[0], self.test[1])
sklearn2pmml(pipeline, "forest_sklearn2pmml.pmml", with_repr = True)
try:
# Import PMML
model = PMMLForestClassifier(pmml='forest_sklearn2pmml.pmml')
# Verify classification
Xte, _ = self.test
assert np.array_equal(
self.ref.predict_proba(Xte),
model.predict_proba(Xte)
)
from sklearn2pmml.feature_extraction.text import Splitter
from sklearn_pandas import DataFrameMapper
data = pd.read_csv("test/support/mpg.csv")
numeric_features = ["displ", "year", "cyl"]
categorical_features = ["drv", "class"]
text_features = ["model"]
mapper = DataFrameMapper(
[(numeric_features, [ContinuousDomain()])] +
[([f], [CategoricalDomain(), PMMLLabelEncoder()]) for f in categorical_features] +
[(f, [CategoricalDomain(), CountVectorizer(tokenizer=Splitter(), max_features=5)]) for f in text_features]
)
pipeline = PMMLPipeline([
("mapper", mapper),
("model", LGBMRegressor(n_estimators=1000))
])
# use model__sample_weight for weight
pipeline.fit(data, data["hwy"], model__categorical_feature=[3, 4])
sklearn2pmml(pipeline, "test/support/python/lightgbm_regression.pmml")
print(pipeline.predict(data[:10]))
data = pd.read_csv("test/support/mpg.csv")
if binary:
data["drv"] = data["drv"].replace("r", "4")
numeric_features = ["displ", "year", "cyl"]
categorical_features = ["class"]
text_features = []
mapper = DataFrameMapper(
[(numeric_features, [ContinuousDomain()])] +
[([f], [CategoricalDomain(), PMMLLabelEncoder()]) for f in categorical_features] +
[(f, [CategoricalDomain(), CountVectorizer(tokenizer=Splitter())]) for f in text_features]
)
pipeline = PMMLPipeline([
("mapper", mapper),
("model", LGBMClassifier(n_estimators=1000))
])
pipeline.fit(data, data["drv"], model__categorical_feature=[3])
suffix = "binary" if binary else "multiclass"
sklearn2pmml(pipeline, "test/support/python/lightgbm_" + suffix + ".pmml")
print(list(pipeline.predict(data[:10])))
print(list(pipeline.predict_proba(data[0:1])[0]))
from sklearn2pmml.feature_extraction.text import Splitter
from sklearn_pandas import DataFrameMapper
data = pd.read_csv("test/support/mpg.csv")
numeric_features = ["displ", "year", "cyl"]
categorical_features = ["drv", "class"]
text_features = ["model"]
mapper = DataFrameMapper(
[(numeric_features, [ContinuousDomain()])] +
[([f], [CategoricalDomain(), OneHotEncoder()]) for f in categorical_features] +
[(f, [CategoricalDomain(), CountVectorizer(tokenizer=Splitter(), max_features=5)]) for f in text_features]
)
pipeline = PMMLPipeline([
("mapper", mapper),
("model", LinearRegression())
])
pipeline.fit(data, data["hwy"])
sklearn2pmml(pipeline, "test/support/python/linear_regression_text.pmml")
print(list(pipeline.predict(data[:10])))
def test_sklearn2pmml(self):
# Export to PMML
pipeline = PMMLPipeline([
("classifier", self.ref)
])
pipeline.fit(self.train[0], self.train[1])
sklearn2pmml(pipeline, "tree_sklearn2pmml.pmml", with_repr = True)
try:
# Import PMML
model = PMMLTreeClassifier(pmml='tree_sklearn2pmml.pmml')
# Verify classification
Xte, _ = self.test
assert np.array_equal(
self.ref.predict_proba(Xte),
model.predict_proba(Xte)
)
import pandas
import sys
iris_df = pandas.read_csv("csv/Iris.csv")
#print(iris_df.head(5))
iris_X = iris_df[iris_df.columns.difference(["Species"])]
iris_y = iris_df["Species"]
classifier = RuleSetClassifier([
("X['Petal_Length'] < 2.45", "setosa"),
("X['Petal_Width'] < 1.75", "versicolor"),
], default_score = "virginica")
pipeline = PMMLPipeline([
("classifier", classifier)
])
pipeline.fit(iris_X, iris_y)
sklearn2pmml(pipeline, "pmml/RuleSetIris.pmml")
if "--deploy" in sys.argv:
from openscoring import Openscoring
os = Openscoring("http://localhost:8080/openscoring")
os.deployFile("RuleSetIris", "pmml/RuleSetIris.pmml")
("Gender", CategoricalDomain()),
("Marital", CategoricalDomain()),
("Occupation", CategoricalDomain()),
("Age", [ContinuousDomain(), CutTransformer(bins = [17, 28, 37, 47, 83], labels = ["q1", "q2", "q3", "q4"])]),
("Hours", ContinuousDomain()),
("Income", ContinuousDomain()),
(["Hours", "Income"], Alias(ExpressionTransformer("X[1] / (X[0] * 52)"), "Hourly_Income"))
])
classifier = H2ORandomForestEstimator(ntrees = 17)
predict_proba_transformer = Pipeline([
("expression", ExpressionTransformer("X[1]")),
("cut", Alias(CutTransformer(bins = [0.0, 0.75, 0.90, 1.0], labels = ["no", "maybe", "yes"]), "Decision", prefit = True))
])
pipeline = PMMLPipeline([
("local_mapper", mapper),
("uploader", H2OFrameCreator()),
("remote_classifier", classifier)
], predict_proba_transformer = predict_proba_transformer)
pipeline.fit(audit_X, H2OFrame(audit_y.to_frame(), column_types = ["categorical"]))
pipeline.verify(audit_X.sample(100))
sklearn2pmml(pipeline, "pmml/RandomForestAudit.pmml")
if "--deploy" in sys.argv:
from openscoring import Openscoring
os = Openscoring("http://localhost:8080/openscoring")
os.deployFile("RandomForestAudit", "pmml/RandomForestAudit.pmml")
scalar_mapper = DataFrameMapper([
("Education", [CategoricalDomain(), LabelBinarizer(), SelectKBest(chi2, k = 3)]),
("Employment", [CategoricalDomain(), LabelBinarizer(), SelectKBest(chi2, k = 3)]),
("Occupation", [CategoricalDomain(), LabelBinarizer(), SelectKBest(chi2, k = 3)]),
("Age", [ContinuousDomain(), CutTransformer(bins = [17, 28, 37, 47, 83], labels = ["q1", "q2", "q3", "q4"]), LabelBinarizer()]),
("Hours", ContinuousDomain()),
("Income", ContinuousDomain()),
(["Hours", "Income"], Alias(ExpressionTransformer("X[1] / (X[0] * 52)"), "Hourly_Income"))
])
interaction_mapper = DataFrameMapper([
("Gender", [CategoricalDomain(), LabelBinarizer()]),
("Marital", [CategoricalDomain(), LabelBinarizer()])
])
classifier = XGBClassifier()
pipeline = PMMLPipeline([
("mapper", FeatureUnion([
("scalar_mapper", scalar_mapper),
("interaction", Pipeline([
("interaction_mapper", interaction_mapper),
("polynomial", PolynomialFeatures())
]))
])),
("classifier", classifier)
])
pipeline.fit(audit_X, audit_y)
pipeline.configure(compact = True)
pipeline.verify(audit_X.sample(100), zeroThreshold = 1e-6, precision = 1e-6)
sklearn2pmml(pipeline, "pmml/XGBoostAudit.pmml")