func main() { quit = make(chan bool) update = make(chan *libovsdb.TableUpdates) cache = make(map[string]map[string]libovsdb.Row) // By default libovsdb connects to 127.0.0.0:6400. ovs, err := libovsdb.Connect("", 0) // If you prefer to connect to OVS in a specific location : // ovs, err := libovsdb.Connect("192.168.56.101", 6640) if err != nil { fmt.Println("Unable to Connect ", err) os.Exit(1) } var notifier Notifier ovs.Register(notifier) initial, _ := ovs.MonitorAll("Open_vSwitch", "") populateCache(*initial) fmt.Println(`Silly game of stopping this app when a Bridge with name "stop" is monitored !`) go play(ovs) <-quit }
func (o *OvsMonitor) StartMonitoring() error { ovsdb, err := libovsdb.Connect(o.Addr, o.Port) if err != nil { return err } o.OvsClient = &OvsClient{ovsdb: ovsdb} notifier := Notifier{monitor: o} ovsdb.Register(notifier) requests := make(map[string]libovsdb.MonitorRequest) err = o.setMonitorRequests("Bridge", &requests) if err != nil { return err } err = o.setMonitorRequests("Interface", &requests) if err != nil { return err } err = o.setMonitorRequests("Port", &requests) if err != nil { return err } updates, err := ovsdb.Monitor("Open_vSwitch", "", requests) if err != nil { return err } o.updateHandler(updates) return nil }
func NewDriver() (*Driver, error) { docker, err := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil) if err != nil { return nil, fmt.Errorf("could not connect to docker: %s", err) } // initiate the ovsdb manager port binding var ovsdb *libovsdb.OvsdbClient retries := 3 for i := 0; i < retries; i++ { ovsdb, err = libovsdb.Connect(localhost, ovsdbPort) if err == nil { break } log.Errorf("could not connect to openvswitch on port [ %d ]: %s. Retrying in 5 seconds", ovsdbPort, err) time.Sleep(5 * time.Second) } if ovsdb == nil { return nil, fmt.Errorf("could not connect to open vswitch") } d := &Driver{ dockerer: dockerer{ client: docker, }, ovsdber: ovsdber{ ovsdb: ovsdb, }, networks: make(map[string]*NetworkState), } // Initialize ovsdb cache at rpc connection setup d.ovsdber.initDBCache() return d, nil }
func NewOpsManager(target string) (*OpsManager, error) { ops, err := ovsdb.Connect("", 0) if err != nil { return nil, err } opsUpdateCh := make(chan *ovsdb.TableUpdates) n := NewNotifier(opsUpdateCh) ops.Register(n) return &OpsManager{ ops: ops, opsCh: make(chan *OpsOperation, 1024), opsUpdateCh: opsUpdateCh, bgpReady: false, cache: make(map[string]map[string]ovsdb.Row), target: target, }, nil }
func NewOpsManager(grpcCh chan *server.GrpcRequest) (*OpsManager, error) { ops, err := ovsdb.Connect("", 0) if err != nil { return nil, err } gQueue := make([]*server.GrpcRequest, 0) opsUpdateCh := make(chan *ovsdb.TableUpdates) n := NewNotifier(opsUpdateCh) ops.Register(n) return &OpsManager{ ops: ops, grpcCh: grpcCh, opsCh: make(chan *OpsOperation, 1024), opsUpdateCh: opsUpdateCh, grpcQueue: gQueue, bgpReady: false, cache: make(map[string]map[string]ovsdb.Row), }, nil }
func New(version string, ctx *cli.Context) (Driver, error) { docker, err := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil) if err != nil { return nil, fmt.Errorf("could not connect to docker: %s", err) } // initiate the ovsdb manager port binding ovsdb, err := libovsdb.Connect(localhost, ovsdbPort) if err != nil { return nil, fmt.Errorf("could not connect to openvswitch on port [ %d ]: %s", ovsdbPort, err) } // bind user defined flags to the plugin config if ctx.String("bridge-name") != "" { bridgeName = ctx.String("bridge-name") } // lower bound of v4 MTU is 68-bytes per rfc791 if ctx.Int("mtu") >= minMTU { defaultMTU = ctx.Int("mtu") } else { log.Fatalf("The MTU value passed [ %d ] must be greater then [ %d ] bytes per rfc791", ctx.Int("mtu"), minMTU) } // Parse the container subnet containerGW, containerCidr, err := net.ParseCIDR(ctx.String("bridge-subnet")) if err != nil { log.Fatalf("Error parsing cidr from the subnet flag provided [ %s ]: %s", FlagBridgeSubnet, err) } // Update the cli.go global var with the network if user provided bridgeSubnet = containerCidr.String() switch ctx.String("mode") { /* [ flat ] mode */ //Flat mode requires a gateway IP address is used just like any other //normal L2 domain. If no gateway is specified, we attempt to guess using //the first usable IP on the container subnet from the CLI argument. //Example "192.168.1.0/24" we guess at a gatway of "192.168.1.1". //Flat mode requires a bridge-subnet flag with a subnet from your existing network case modeFlat: ovsDriverMode = modeFlat if ctx.String("gateway") != "" { // bind the container gateway to the IP passed from the CLI cliGateway := net.ParseIP(ctx.String("gateway")) if cliGateway == nil { log.Fatalf("The IP passed with the [ gateway ] flag [ %s ] was not a valid address: %s", FlagGateway.Value, err) } containerGW = cliGateway } else { // if no gateway was passed, guess the first valid address on the container subnet containerGW = ipIncrement(containerGW) } /* [ nat ] mode */ //If creating a private network that will be NATed on the OVS bridge via IPTables //it is not required to pass a subnet since in a single host scenario it is hidden //from the network once it is masqueraded via IP tables. case modeNAT, "": ovsDriverMode = modeNAT if ctx.String("gateway") != "" { // bind the container gateway to the IP passed from the CLI cliGateway := net.ParseIP(ctx.String("gateway")) if cliGateway == nil { log.Fatalf("The IP passed with the [ gateway ] flag [ %s ] was not a valid address: %s", FlagGateway.Value, err) } containerGW = cliGateway } else { // if no gateway was passed, guess the first valid address on the container subnet containerGW = ipIncrement(containerGW) } default: log.Fatalf("Invalid ovs mode supplied [ %s ]. The plugin currently supports two modes: [ %s ] or [ %s ]", ctx.String("mode"), modeFlat, modeNAT) } pluginOpts := &pluginConfig{ mtu: defaultMTU, bridgeName: bridgeName, mode: ovsDriverMode, brSubnet: containerCidr, gatewayIP: containerGW, } // Leaving as info for now. Change to debug eventually log.Infof("Plugin configuration: \n %s", pluginOpts) ipAllocator := ipallocator.New() d := &driver{ dockerer: dockerer{ client: docker, }, ovsdber: ovsdber{ ovsdb: ovsdb, }, ipAllocator: ipAllocator, pluginConfig: *pluginOpts, version: version, } // Initialize ovsdb cache at rpc connection setup d.ovsdber.initDBCache() return d, nil }
// Base on RFC 7047, empty condition should return all rows from a table. However, // libovsdb does not seem to support that yet. This is the simplist, most common solution // to it. func noCondition() []interface{} { return []interface{}{ovs.NewCondition("_uuid", "!=", ovs.UUID{GoUUID: "_"})} } func newCondition(column, function string, value interface{}) []interface{} { return []interface{}{ovs.NewCondition(column, function, value)} } // Open creates a new Ovsdb connection. // It's stored in a variable so we can mock it out for the unit tests. var Open = func() (Client, error) { client, err := ovs.Connect("127.0.0.1", 6640) return Client{ovsdbClient{client}}, err } // Close destroys an Ovsdb connection created by Open. func (ovsdb Client) Close() { ovsdb.disconnect() } // CreateLogicalSwitch creates a new logical switch in OVN. func (ovsdb Client) CreateLogicalSwitch(lswitch string) error { check, err := ovsdb.transact("OVN_Northbound", ovs.Operation{ Op: "select", Table: "Logical_Switch", Where: newCondition("name", "==", lswitch), })
// connect to database server using IP and Port func Connect(ipAddr string, port int) (*libovsdb.OvsdbClient, error) { return libovsdb.Connect(ipAddr, port) }