-
Notifications
You must be signed in to change notification settings - Fork 4
/
train.py
138 lines (86 loc) · 3.13 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
# coding: utf-8
### BEGIN: SETUP ###
import atexit
import platform
import py4j
import sys
import pyspark
from pyspark.context import SparkContext
from pyspark.sql import SparkSession, SQLContext
from pyspark.storagelevel import StorageLevel
from pypio.utils import new_string_array
from pypio.data import PEventStore
SparkContext._ensure_initialized()
try:
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
spark = SparkSession.builder.enableHiveSupport().getOrCreate()
except py4j.protocol.Py4JError:
spark = SparkSession.builder.getOrCreate()
except TypeError:
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
sql = spark.sql
def pio_cleanup():
sc.stop()
sc._jvm.org.apache.predictionio.workflow.CleanupFunctions.run()
atexit.register(pio_cleanup)
sqlContext = spark._wrapped
sqlCtx = sqlContext
p_event_store = PEventStore(spark._jsparkSession, sqlContext)
def run_pio_workflow(model):
template_engine = sc._jvm.org.jpioug.template.python.Engine
template_engine.modelRef().set(model._to_java())
main_args = new_string_array(sys.argv, sc._gateway)
create_workflow = sc._jvm.org.apache.predictionio.workflow.CreateWorkflow
sc.stop()
create_workflow.main(main_args)
### END: SETUP ###
# In[ ]:
from pyspark.sql.functions import col
from pyspark.sql.functions import explode
from pyspark.ml.feature import StringIndexer
from pyspark.ml.feature import IndexToString
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml import Pipeline
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# In[ ]:
event_df = p_event_store.find('IrisApp')
# In[ ]:
def get_field_type(name):
if name.startswith('attr'):
return 'double'
else:
return 'string'
field_names = (event_df
.select(explode("fields"))
.select("key")
.distinct()
.rdd.flatMap(lambda x: x)
.collect())
field_names.sort()
exprs = [col("fields").getItem(k).cast(get_field_type(k)).alias(k) for k in field_names]
data_df = event_df.select(*exprs)
# In[ ]:
(train_df, test_df) = data_df.randomSplit([0.9, 0.1])
# In[ ]:
labelIndexer = StringIndexer(inputCol="target", outputCol="label").fit(train_df)
featureAssembler = VectorAssembler(inputCols=[x for x in field_names if x.startswith('attr')],
outputCol="features")
rf = RandomForestClassifier(labelCol="label", featuresCol="features", numTrees=10)
labelConverter = IndexToString(inputCol="prediction", outputCol="predictedLabel",
labels=labelIndexer.labels)
pipeline = Pipeline(stages=[featureAssembler, labelIndexer, rf, labelConverter])
# In[ ]:
model = pipeline.fit(train_df)
# In[ ]:
predict_df = model.transform(test_df)
# In[ ]:
predict_df.select("predictedLabel", "target", "features").show(5)
# In[ ]:
evaluator = MulticlassClassificationEvaluator(
labelCol="label", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predict_df)
print("Test Error = %g" % (1.0 - accuracy))
# In[ ]:
run_pio_workflow(model)