-
Notifications
You must be signed in to change notification settings - Fork 25
/
fabfile.py
163 lines (122 loc) · 5.14 KB
/
fabfile.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
#!/usr/bin/python
import os
import xml.etree.ElementTree as ETree
from fabric.api import env, local, run, sudo, execute, hosts
from fabric.context_managers import shell_env, lcd, cd
from fabric.colors import yellow, green
# configure fabric to talk to the VMs
temp_ssh_config = '.ssh_config'
def vagrant():
'''sets up fabric environment to work with vagrant VMs'''
with open(temp_ssh_config, 'w') as f:
f.write(local('vagrant ssh-config', capture=True))
global total_nodes
total_nodes = int(local('vagrant status | grep node | wc -l', capture=True))
env.user = 'vagrant'
env.use_ssh_config = True
env.ssh_config_path = temp_ssh_config
@hosts('node1')
def format_namenode():
'''Formats namenode on node1'''
with shell_env(JAVA_HOME='/usr/java/default'):
sudo('/opt/hadoop/bin/hdfs namenode -format vagrant -nonInteractive', warn_only=True)
def supervisorctl_start(process):
'''Start a process managed by supervisor'''
sudo('supervisorctl start {0}'.format(process))
def supervisorctl_stop(process):
'''Stop a process managed by supervisor'''
sudo('supervisorctl stop {0}'.format(process))
def postsetup():
'''Perform post vagrant up tasks on cluster'''
execute(format_namenode)
execute(supervisorctl_start, 'namenode', host='node1')
execute(supervisorctl_start, 'resourcemanager', host='node1')
execute(supervisorctl_start, 'master', host='node1')
for x in range(2,total_nodes+1):
execute(supervisorctl_start, 'datanode', host='node{0}'.format(x))
execute(supervisorctl_start, 'nodemanager', host='node{0}'.format(x))
execute(supervisorctl_start, 'regionserver', host='node{0}'.format(x))
execute(init_ip_whitelist,host='node1')
def supervisorctl_reread_update():
sudo('supervisorctl reread')
sudo('supervisorctl update')
def update_supervisor():
execute(supervisorctl_reread_update, hosts=['node{0}'.format(x) for x in range(1,total_nodes+1)])
def supervisorctl_status():
sudo('supervisorctl status')
def status():
execute(supervisorctl_status, hosts=['node{0}'.format(x) for x in range(1,total_nodes+1)])
def init_ip_whitelist():
run('/opt/hbase/bin/hbase shell /vagrant/resources/opensoc/hbase_ip_whitelist.rb')
@hosts('node2')
def create_topic(topic, partitions=1, replication_factor=1):
run('/opt/kafka/bin/kafka-topics.sh --zookeeper localhost --create --topic {0} --partitions {1} --replication-factor {2}'.format(
topic,
partitions,
replication_factor
))
def get_topologies(repo='../opensoc-streaming'):
'''Build and fetch a new OpenSOC topology jar from repo (default: ../opensoc-streaming)'''
pom_file = os.path.join(repo, 'pom.xml')
pom = ETree.parse(pom_file)
version = pom.getroot().find('{http://maven.apache.org/POM/4.0.0}version').text
rev = local("git log | head -1 | cut -d ' ' -f 2 | cut -c1-11", capture=True)
topology_jar = os.path.join(
repo,
'OpenSOC-Topologies',
'target',
'OpenSOC-Topologies-{0}.jar'.format(version)
)
vagrant_jar = 'OpenSOC-Topologies-{0}-{1}.jar'.format(version, rev)
vagrant_jar_path = os.path.join('resources/opensoc', vagrant_jar)
if os.path.exists(vagrant_jar_path):
print yellow('{0} already exists. Not building a new jar.'.format(vagrant_jar_path))
print yellow('Remove the existing jar and run this command again to build a fresh jar.')
return vagrant_jar
with lcd(repo):
local('mvn clean package')
local('cp {0} {1}'.format(
topology_jar,
vagrant_jar_path
))
return vagrant_jar
@hosts('node1')
def start_topology(topology, repo=None, local_mode=False, config_path='/vagrant/opensoc/OpenSOC_Configs/', generator_spout=False):
'''Builds and copies a fresh topology jar from a locally cloned opensoc-streaming and submits it to storm'''
if repo is not None:
jar = get_topologies(repo)
else:
jar = get_topologies()
if local_mode:
local_mode='true'
else:
local_mode='false'
if generator_spout:
generator_spout='true'
else:
generator_spout='false'
with cd('/vagrant/resources/opensoc/'):
run('/opt/storm/bin/storm jar {0} {1} -local_mode {2} -config_path {3} -generator_spout {4}'.format(
jar,
topology,
local_mode,
config_path,
generator_spout
))
def quickstart():
'''Start OpenSOC with bro, snort, and pcap'''
# run post setup tasks
postsetup()
# clone opensoc-streaming if its not here locally
if not os.path.exists('../opensoc-streaming'):
with lcd('../'):
local('git clone https://github.com/OpenSOC/opensoc-streaming.git')
else:
print green('Found a copy of opensoc-streaming in ../opensoc-streaming.')
for top in ['bro', 'sourcefire', 'pcap']:
topic = '{0}_raw'.format(top)
# create kafka topic
execute(create_topic, topic, host='node2')
# launch topology
topology = 'com.opensoc.topology.{0}'.format(top.capitalize())
execute(start_topology, topology, config_path='config/')