-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathexecution-time-per-benchmark.py
121 lines (103 loc) · 3.19 KB
/
execution-time-per-benchmark.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
#! /usr/bin/env python3
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from toolnames import toolverified, toolhighperf
assert (3 / 2) * 2 == 3, "Use python3, not python2"
allowed_executors = {
'native': 'native',
'vWasm': toolverified,
'vWasm-nosbx': toolverified + '*',
'rWasm': toolhighperf,
'WAMR-compiled-chkbounds': 'WAMR',
'WAMR-interpreter': '[WAMR]',
'wasm2c': 'wasm2c',
'wasm3': '[wasm3]',
'wasmer-jit': '{Wasmer}',
'wasmtime': '{wasmtime}',
'wavm-precompiled': 'WAVM',
}
executor_ordering = [
'[wasm3]',
'[WAMR]',
toolverified,
toolverified + '*',
'{Wasmer}',
toolhighperf,
'wasm2c',
'WAMR',
'{wasmtime}',
'WAVM',
]
data = pd.read_csv('./data.csv')
data = data[data['Executor'].isin(allowed_executors)]
data = data.rename(columns={'Executor': 'Executor-ID'})
data = pd.merge(left=data,
right=pd.DataFrame(list(allowed_executors.items()),
columns=['Executor-ID','Executor']),
left_on='Executor-ID',
right_on='Executor-ID',
validate='many_to_one')
data = data[["Test", "Executor", "Timing (s)"]]
mean_native_timing = (
data[data["Executor"] == 'native']
.groupby('Test')['Timing (s)']
.mean()
.to_frame()
.rename(columns={'Timing (s)': 'Mean Native Timing'})
)
mean_timing = (
data.groupby(['Test', 'Executor'])
.mean()
.rename(columns={'Timing (s)': 'Mean Timing'})
.reset_index()
)
normalized = pd.merge(left=mean_timing,
right=mean_native_timing,
left_on='Test',
right_on='Test',
validate='many_to_one')
normalized['Normalized Slowdown'] = (
normalized['Mean Timing'] / normalized['Mean Native Timing'])
normalized = normalized[['Test', 'Executor', 'Normalized Slowdown']]
assert set(normalized['Executor']) == set(allowed_executors.values())
assert set(executor_ordering) == {a for a in allowed_executors.values() if a != 'native'}
normalized = normalized[normalized['Executor'] != 'native']
sns.set_theme(
style='whitegrid'
)
sns.set_context("paper", font_scale=2)
fig, ax=plt.subplots(figsize=(16, 8))
x_axis = 'Benchmark'
y_axis = 'Normalized Slowdown (Log Scale)'
hue_axis = 'Tool'
graph_data=normalized.rename(columns={
'Test': x_axis,
'Normalized Slowdown': y_axis,
'Executor': hue_axis,
})
ax.set_yscale('log')
ax.set_ylim(0.01, 200)
g = sns.barplot(x=x_axis,
y=y_axis,
order=sorted(list(set(graph_data[x_axis]))),
hue=hue_axis,
hue_order=executor_ordering,
data=graph_data,
ax=ax,
)
plt.setp(ax.get_xticklabels(), rotation=90)
ax.legend(markerscale=2,
bbox_to_anchor=(1, 1),
loc='upper left')
for xtick in ax.get_xticklabels():
if xtick.get_text() in [toolverified, toolhighperf]:
xtick.set_fontweight('bold')
plt.tight_layout()
# plt.show()
plt.savefig((sys.path[0] or '.') + '/execution-time-per-benchmark.pdf')