-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.go
342 lines (306 loc) · 9.27 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
// Go-is-is daemon
// +build linux
package main
import (
"bytes"
"encoding/hex"
"flag"
pb "github.com/connorwstein/go-is-is/config"
"github.com/golang/glog"
"github.com/vishvananda/netlink"
"golang.org/x/net/context"
"golang.org/x/sys/unix"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
"net"
"os"
"os/signal"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
)
var wg sync.WaitGroup
var l1_multicast []byte
var cfg *Config
const (
GRPC_CFG_SERVER_PORT = "50051"
RECV_LOG_PREFIX = "RECV:"
SEND_LOG_PREFIX = "SEND:"
CHAN_BUF_SIZE = 1000
)
type Config struct {
lock sync.Mutex
sid string // Format is 6 bytes in a hex encoded string, with a '.' between bytes 2-3 and 4-5
// Keep adjacencies and interfaces separate in case we want to do multiple
// IS-IS levels, in which case there would be a level-1 and level-2 adjacency
// each pointing to the same interface
interfaces []*Intf // Slice of local interfaces
}
type Intf struct {
adj *Adjacency
name string
prefix net.IP
mask net.IPMask
routes []*net.IPNet
// Each interface has an SRM and SSN flag per LSP
// Map where the keys are the LspIDs
lock sync.Mutex
lspFloodStates map[uint64]*LspFloodState
}
type LspFloodState struct {
LspIDKey uint64
LspID [8]byte
SRM bool
SSN bool
}
type Adjacency struct {
state string // Can be NEW, INITIALIZING or UP
neighborSystemID []byte
metric uint32
intfName string
neighborIP net.IP
}
func getAdjacency(neighborSystemID string) *Adjacency {
for i, _ := range cfg.interfaces {
if cfg.interfaces[i].adj.state == "UP" {
if systemIDToString(cfg.interfaces[i].adj.neighborSystemID) == neighborSystemID {
return cfg.interfaces[i].adj
}
}
}
return nil
}
func systemIDToString(system_id []byte) string {
// Byte slice should be 6 bytes
if len(system_id) != 6 {
return ""
}
result := ""
for i := 0; i < 3; i++ {
result += hex.EncodeToString(system_id[i*2 : i*2+2])
if i != 2 {
result += "."
}
}
return result
}
func systemIDToBytes(sid string) [6]byte {
sid = strings.Replace(sid, ".", "", 6)
var sidBytes []byte = make([]byte, 6, 6)
sidBytes, _ = hex.DecodeString(sid)
var fixed [6]byte
copy(fixed[:], sidBytes)
return fixed
}
func getGID() uint64 {
b := make([]byte, 64)
b = b[:runtime.Stack(b, false)]
b = bytes.TrimPrefix(b, []byte("goroutine "))
b = b[:bytes.IndexByte(b, ' ')]
n, _ := strconv.ParseUint(string(b), 10, 64)
return n
}
func cleanup() {
glog.Infof("Cleanup")
}
type server struct{}
func (s *server) ConfigureSystemID(ctx context.Context, in *pb.SystemIDCfgRequest) (*pb.SystemIDCfgReply, error) {
cfg.lock.Lock()
cfg.sid = in.Sid
glog.Info("Got SID request, setting SID to " + cfg.sid)
cfg.lock.Unlock()
// Returning a pointer to the system ID reply struct with a message acknowledging that it was
// successfully configured.
// Note that even through the proto has a the field defined with lowercase, it is converted
// to uppercase so it can be exported golang style
return &pb.SystemIDCfgReply{Ack: "SID " + in.Sid + " successfully configured"}, nil
}
func (s *server) GetSystemID(ctx context.Context, in *pb.SystemIDRequest) (*pb.SystemIDReply, error) {
cfg.lock.Lock()
var reply pb.SystemIDReply
reply.Sid = cfg.sid
cfg.lock.Unlock()
return &reply, nil
}
func (s *server) GetIntf(ctx context.Context, in *pb.IntfRequest) (*pb.IntfReply, error) {
cfg.lock.Lock()
var reply pb.IntfReply
reply.Intf = make([]string, len(cfg.interfaces))
for i, intf := range cfg.interfaces {
intf.lock.Lock()
interfaces_string := ""
if intf.adj.state != "UP" {
interfaces_string += intf.prefix.String() + " " + intf.mask.String() + ", adjacency " + intf.adj.state
} else {
interfaces_string += intf.prefix.String() + " " + intf.mask.String() + ", adjacency " + intf.adj.state + " with " + systemIDToString(intf.adj.neighborSystemID)
}
reply.Intf[i] = interfaces_string
intf.lock.Unlock()
}
cfg.lock.Unlock()
return &reply, nil
}
func (s *server) GetLsp(ctx context.Context, in *pb.LspRequest) (*pb.LspReply, error) {
cfg.lock.Lock()
var reply pb.LspReply
reply.Lsp = make([]string, 0)
nodes := AvlGetAll(UpdateDB.Root)
for _, node := range nodes {
reply.Lsp = append(reply.Lsp, node.data.(*IsisLsp).String())
}
cfg.lock.Unlock()
return &reply, nil
}
func (s *server) GetTopo(ctx context.Context, in *pb.TopoRequest) (*pb.TopoReply, error) {
cfg.lock.Lock()
var reply pb.TopoReply
reply.Topo = make([]string, 0)
reply.Topo = append(reply.Topo, cfg.sid)
nodes := AvlGetAll(TopoDB.Root)
for _, node := range nodes {
reply.Topo = append(reply.Topo, node.data.(*Triple).String())
}
cfg.lock.Unlock()
return &reply, nil
}
func start_grpc() {
lis, err := net.Listen("tcp", strings.Join([]string{":", GRPC_CFG_SERVER_PORT}, ""))
if err != nil {
glog.Fatalf("gRPC server failed to start listening: %v", err)
}
s := grpc.NewServer()
pb.RegisterConfigureServer(s, &server{})
pb.RegisterStateServer(s, &server{})
// Register reflection service on gRPC server.
reflection.Register(s)
if err := s.Serve(lis); err != nil {
glog.Fatalf("gRPC server failed to start serving: %v", err)
}
}
func initInterfaces() {
// Initialize the configuration of this IS-IS node
// with the interface information and a NEW adjacency per
// interface.
ifaces, err := net.Interfaces()
cfg.interfaces = make([]*Intf, len(ifaces)-1)
index := 0
if err != nil {
glog.Errorf("initInterfaces: %+v\n", err.Error())
return
}
for _, i := range ifaces {
// Ignore loopback interfaces
if i.Name == "lo" {
continue
}
addrs, err := i.Addrs()
if err != nil {
glog.Errorf("initInterfaces: %+v\n", err.Error())
continue
}
for _, a := range addrs {
switch v := a.(type) {
case *net.IPNet: // Checking if this type of address a (v) is a pointer to a net.IPNet struct
glog.V(1).Info("Found interface ", i.Name, ": ", v)
// Only work with v4 addresses for now
if v.IP.To4() != nil {
var new_intf Intf
new_intf.name = i.Name
new_intf.prefix = v.IP
new_intf.mask = v.Mask
new_intf.lock = sync.Mutex{}
var adj Adjacency
adj.state = "NEW"
adj.intfName = i.Name
new_intf.adj = &adj
cfg.interfaces[index] = &new_intf
// Initialize the flood states slice on that interface
// Initially an empty slice, will grow as lsps are learned/created
cfg.interfaces[index].lspFloodStates = make(map[uint64]*LspFloodState)
cfg.interfaces[index].routes = make([]*net.IPNet, 0)
// Obtain the routes for that interface
link, _ := netlink.LinkByName(i.Name)
// Just v4 routes for now, filter by AF_INET
routes, _ := netlink.RouteList(link, unix.AF_INET)
for _, route := range routes {
// IP prefix type: route.Dst
if route.Dst != nil {
cfg.interfaces[index].routes = append(cfg.interfaces[index].routes, route.Dst)
}
}
index++
} else {
// TODO: ipv6 support
glog.V(1).Info("IPV6 interface ", i.Name, " not supported")
}
default:
glog.Errorf("Not an ip address %+v\n", v)
}
}
}
}
func initConfig() {
cfg = &Config{lock: sync.Mutex{}, sid: ""}
}
func main() {
flag.Parse()
glog.Info("Booting IS-IS node...")
// This is a special multicast mac address
l1_multicast = []byte{0x01, 0x80, 0xc2, 0x00, 0x00, 0x14}
// Exit go routine
c := make(chan os.Signal, 2)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
cleanup()
os.Exit(1)
}()
// Determine the interfaces available on the container
// and add that to the configuration
initConfig()
initInterfaces()
ethernetInit()
updateDBInit()
topoDBInit()
for _, intf := range cfg.interfaces {
ethernetIntfInit(intf.name) // Creates send/recv raw sockets
}
// Start a couple go routines to communicate with other nodes
// to establish adjacencies. Each go routine can run
// totally in parallel to establish adjacencies on each
// interface
// Each goroutine blocks on the hello channel waiting for a hello pdu
// from the recvPdus goroutine
wg.Add(1) // Just need one of these because none of the goroutines should exit
var helloChans, updateChans []chan []byte
var sendChans []chan []byte
for i := 0; i < len(cfg.interfaces); i++ {
helloChans = append(helloChans, make(chan []byte))
updateChans = append(updateChans, make(chan []byte))
sendChans = append(sendChans, make(chan []byte))
}
triggerSPF := make(chan bool)
for i, intf := range cfg.interfaces {
// Waiting to compute topology based on update db
go isisDecision(triggerSPF)
// The updateInput goroutine is responsible for setting the SRM flag if required to trigger
// the flooding
go isisUpdateInput(intf, updateChans[i], triggerSPF)
// Periodically check for SRMs on each interface
go isisUpdate(intf, sendChans[i])
// Periodically send hellos on each interface
// 3-way handshake occurs in parallel on each interface
go isisHelloSend(intf, sendChans[i])
go isisHelloRecv(intf, helloChans[i], sendChans[i])
// Each interface has a goroutine for sending and receiving PDUs
// the recv PDU goroutine will forward the PDU to either the hello or update
// chan for that interface
go recvPdus(intf.name, helloChans[i], updateChans[i])
go sendPdus(intf.name, sendChans[i])
}
// Start the gRPC server for accepting configuration (CLI commands)
go start_grpc()
wg.Wait()
}