// Release an interface for a select ip func Release(job *engine.Job) error { var ( id = job.Args[0] containerInterface = currentInterfaces.Get(id) ) if containerInterface == nil { return fmt.Errorf("No network information to release for %s", id) } for _, nat := range containerInterface.PortMappings { if err := portmapper.Unmap(nat); err != nil { logrus.Infof("Unable to unmap port %s: %s", nat, err) } } if err := ipAllocator.ReleaseIP(bridgeIPv4Network, containerInterface.IP); err != nil { logrus.Infof("Unable to release IPv4 %s", err) } if globalIPv6Network != nil { if err := ipAllocator.ReleaseIP(globalIPv6Network, containerInterface.IPv6); err != nil { logrus.Infof("Unable to release IPv6 %s", err) } } return nil }
func translatePullError(err error, ref reference.Named) error { switch v := err.(type) { case errcode.Errors: if len(v) != 0 { for _, extra := range v[1:] { logrus.Infof("Ignoring extra error returned from registry: %v", extra) } return translatePullError(v[0], ref) } case errcode.Error: var newErr error switch v.Code { case errcode.ErrorCodeDenied: // ErrorCodeDenied is used when access to the repository was denied newErr = errors.Errorf("repository %s not found: does not exist or no read access", ref.Name()) case v2.ErrorCodeManifestUnknown: newErr = errors.Errorf("manifest for %s not found", ref.String()) case v2.ErrorCodeNameUnknown: newErr = errors.Errorf("repository %s not found", ref.Name()) } if newErr != nil { logrus.Infof("Translating %q to %q", err, newErr) return newErr } case xfer.DoNotRetry: return translatePullError(v.Err, ref) } return err }
func (k *kubernetes) checkConnectionRetry(c *container, ipaddr, protocol string, port, delay, retries int) error { var protoStr string var err error err = nil if protocol == "udp" { protoStr = "-u" } logrus.Infof("Checking connection from %s to ip %s on port %d, delay: %d, retries: %d", c, ipaddr, port, delay, retries) for i := 0; i < retries; i++ { _, err = k.exec(c, fmt.Sprintf("nc -z -n -v -w 1 %s %s %v", protoStr, ipaddr, port)) if err == nil { logrus.Infof("Connection to ip %s on port %d SUCCEEDED, tries: %d", ipaddr, port, i+1) return nil } time.Sleep(2 * time.Second) } logrus.Errorf("Connection to ip %s on port %d FAILED %v", ipaddr, port, err) return err }
// setupTestServer creates a listener for the rest requests. func setupTestServer() { router := mux.NewRouter() // register handlers for cni t := router.Headers("Content-Type", "application/json").Methods("POST").Subrouter() t.HandleFunc(cniapi.EPAddURL, httpWrapper(stubAddPod)) t.HandleFunc(cniapi.EPDelURL, httpWrapper(stubDeletePod)) driverPath := cniapi.ContivCniSocket os.Remove(driverPath) os.MkdirAll(cniapi.PluginPath, 0700) go func() { l, err := net.ListenUnix("unix", &net.UnixAddr{Name: driverPath, Net: "unix"}) if err != nil { panic(err) } logger.Infof("k8s test plugin listening on %s", driverPath) http.Serve(l, router) l.Close() logger.Infof("k8s test plugin closing %s", driverPath) }() }
func main() { startTime := currentTimeMillis() transportFactory := thrift.NewTFramedTransportFactory(thrift.NewTTransportFactory()) protocolFactory := thrift.NewTBinaryProtocolFactoryDefault() transport, err := thrift.NewTSocket(NETWORK_ADDR) if err != nil { logrus.Fatal(os.Stderr, "error resolving address:", err) } useTransport := transportFactory.GetTransport(transport) client := rpc.NewSessionManagerClientFactory(useTransport, protocolFactory) if err := transport.Open(); err != nil { logrus.Fatal(os.Stderr, "Error opening socket to "+NETWORK_ADDR, err) } defer transport.Close() // 开始调用服务的接口 ctx := rpc.NewSessionContext() sid, _ := client.CreateSession(ctx) logrus.Infof("创新新的会话id => %s", sid) ctx, _ = client.GetSession(sid) logrus.Infof("获取会话上下文 => %+v", ctx) endTime := currentTimeMillis() logrus.Infof("本次调用用时: %d 毫秒", endTime-startTime) }
func (p *Resource) AddNodesHandler(req *restful.Request, resp *restful.Response) { logrus.Infof("AddNodesHandler is called!") // Stub an repairpolicy to be populated from the body request := entity.AddNodeRequest{} // Populate the user data err := json.NewDecoder(req.Request.Body).Decode(&request) logrus.Infof("Username is %v", request.UserName) logrus.Infof("Cluster is %v", request.ClusterName) logrus.Infof("CreateNumber is %v", request.CreateNumber) if err != nil { logrus.Errorf("convert body to AddNodesRequest failed, error is %v", err) response.WriteStatusError(services.COMMON_ERROR_INVALIDATE, err, resp) return } servers, code, err := services.GetDeployService().CreateNode(request) var res response.Response if err != nil { errObj := response.Error{Code: code, ErrorMsg: fmt.Sprintf("%v", err)} res = response.Response{Success: true, Error: &errObj, Data: servers} } else { res = response.Response{Success: true, Data: servers} } resp.WriteEntity(res) return }
func (p *Resource) DeleteClusterHandler(req *restful.Request, resp *restful.Response) { logrus.Infof("DeleteClusterHandler is called!") // Stub an repairpolicy to be populated from the body request := entity.DeleteRequest{} // Populate the user data err := json.NewDecoder(req.Request.Body).Decode(&request) logrus.Infof("Username is %v", request.UserName) logrus.Infof("Cluster is %v", request.ClusterName) logrus.Infof("Servers is %v", request.Servers) if err != nil { logrus.Errorf("convert body to DeleteClusterRequest failed, error is %v", err) response.WriteStatusError(services.COMMON_ERROR_INVALIDATE, err, resp) return } code, err := services.GetDeployService().DeleteCluster(request.UserName, request.ClusterName, request.Servers) if err != nil { response.WriteStatusError(code, err, resp) return } res := response.Response{Success: true} resp.WriteEntity(res) return }
// Create a new RPC server func NewRpcServer(portNo uint16) (*rpc.Server, net.Listener) { server := rpc.NewServer() // Listens on a port l, e := net.Listen("tcp", fmt.Sprintf(":%d", portNo)) if e != nil { log.Fatal("listen error:", e) } log.Infof("RPC Server is listening on %s\n", l.Addr()) // run in background go func() { for { conn, err := l.Accept() if err != nil { // if listener closed, just exit the groutine if strings.Contains(err.Error(), "use of closed network connection") { return } log.Fatal(err) } log.Infof("Server accepted connection to %s from %s\n", conn.LocalAddr(), conn.RemoteAddr()) go server.ServeCodec(jsonrpc.NewServerCodec(conn)) } }() return server, l }
// Create a new client func dialRpcClient(servAddr string, portNo uint16) (*rpc.Client, net.Conn) { var client *rpc.Client var conn net.Conn var err error log.Infof("Connecting to RPC server: %s:%d", servAddr, portNo) // Retry connecting for 10sec for i := 0; i < 10; i++ { // Connect to the server conn, err = net.Dial("tcp", fmt.Sprintf("%s:%d", servAddr, portNo)) if err == nil { log.Infof("Connected to RPC server: %s:%d", servAddr, portNo) // Create an RPC client client = jsonrpc.NewClient(conn) break } log.Warnf("Error %v connecting to %s:%s. Retrying..", err, servAddr, portNo) // Sleep for a second and retry again time.Sleep(1 * time.Second) } // If we failed to connect, report error if client == nil { log.Errorf("Failed to connect to Rpc server %s:%d", servAddr, portNo) return nil, nil } return client, conn }
func (driver *driver) RequestAddress(a *ipamApi.RequestAddressRequest) (*ipamApi.RequestAddressResponse, error) { log.Debugf("Address Request request: %+v", a) if len(a.Address) > 0 { addr := fmt.Sprintf("%s/32", a.Address) if _, ok := driver.pool.allocatedIPs[addr]; ok { return nil, fmt.Errorf("%s already allocated", addr) } resp := &ipamApi.RequestAddressResponse{ Address: addr, } log.Infof("Addresse request response: %+v", resp) return resp, nil } again: // just generate a random address rand.Seed(time.Now().UnixNano()) ip := driver.pool.subnet.IP.To4() ip[3] = byte(rand.Intn(254)) netIP := fmt.Sprintf("%s/32", ip) log.Infof("ip:%s", netIP) _, ok := driver.pool.allocatedIPs[netIP] if ok { goto again } driver.pool.allocatedIPs[netIP] = true resp := &ipamApi.RequestAddressResponse{ Address: fmt.Sprintf("%s", netIP), } log.Infof("Addresse request response: %+v", resp) return resp, nil }
func pluginInstall(c *cli.Context) error { // Input validation pluginSource := c.String("source") if pluginSource == "" { log.Fatal("Missing required input: source") } pluginBinary := c.String("bin-source") pluginVersionTag := c.String("version") // Install if pluginVersionTag == "" { log.Infof("=> Installing plugin from (%s) with latest version...", pluginSource) } else { log.Infof("=> Installing plugin (%s) with version (%s)...", pluginSource, pluginVersionTag) } plugin, version, err := plugins.InstallPlugin(pluginSource, pluginBinary, pluginVersionTag) if err != nil { log.Fatalf("Failed to install plugin from (%s), error: %s", pluginSource, err) } fmt.Println() log.Infoln(colorstring.Greenf("Plugin (%s) with version (%s) installed ", plugin.Name, version)) if len(plugin.Description) > 0 { fmt.Println() fmt.Println(plugin.Description) fmt.Println() } return nil }
func (b *B2dUtils) CopyIsoToMachineDir(isoURL, machineName string) error { machinesDir := GetMachineDir() machineIsoPath := filepath.Join(machinesDir, machineName, b.isoFilename) // just in case the cache dir has been manually deleted, // check for it and recreate it if it's gone if _, err := os.Stat(b.imgCachePath); os.IsNotExist(err) { log.Infof("Image cache does not exist, creating it at %s...", b.imgCachePath) if err := os.Mkdir(b.imgCachePath, 0700); err != nil { return err } } // By default just copy the existing "cached" iso to // the machine's directory... if isoURL == "" { if err := b.copyDefaultIsoToMachine(machineIsoPath); err != nil { return err } } else { // But if ISO is specified go get it directly log.Infof("Downloading %s from %s...", b.isoFilename, isoURL) if err := b.DownloadISO(filepath.Join(machinesDir, machineName), b.isoFilename, isoURL); err != nil { return err } } return nil }
// Create schema func createSchema() { dbDir := filepath.Dir(*flags.DBPath) if _, err := os.Stat(dbDir); os.IsNotExist(err) { os.MkdirAll(dbDir, 0777) log.Infof("Created directory path=%s", dbDir) } db, _ := connection() defer db.Close() stmt, err := db.Prepare(` CREATE TABLE notes ( uid string, created string, updated string, tags string, content string, encrypted INTEGER DEFAULT 0, subject TEXT );`) if err != nil { if err.Error() != "table notes already exists" { log.Panicf("Unable to prepare schema path=%s, err=%v", *flags.DBPath, err) } return } _, err = stmt.Exec() if err != nil { log.Errorf("Unable to create schema err=%v", err) } log.Infof("Schema created path=%s", *flags.DBPath) }
// Verify does the actual check. func (v RSAPSSVerifier) Verify(key data.Key, sig []byte, msg []byte) error { algorithm := key.Algorithm() var pubKey crypto.PublicKey switch algorithm { case data.RSAx509Key: pemCert, _ := pem.Decode([]byte(key.Public())) if pemCert == nil { logrus.Infof("failed to decode PEM-encoded x509 certificate") return ErrInvalid } cert, err := x509.ParseCertificate(pemCert.Bytes) if err != nil { logrus.Infof("failed to parse x509 certificate: %s\n", err) return ErrInvalid } pubKey = cert.PublicKey case data.RSAKey: var err error pubKey, err = x509.ParsePKIXPublicKey(key.Public()) if err != nil { logrus.Infof("failed to parse public key: %s\n", err) return ErrInvalid } default: logrus.Infof("invalid key type for RSAPSS verifier: %s", algorithm) return ErrInvalid } digest := sha256.Sum256(msg) return verifyPSS(pubKey, digest[:], sig) }
// Terminate kills the application func (a *App) Terminate() { a.signalLock.Lock() defer a.signalLock.Unlock() a.stopPolling() a.forAllServices(deregisterService) // Run and wait for preStop command to exit (continues // unconditionally so we don't worry about returned errors here) commands.RunAndWait(a.PreStopCmd, log.Fields{"process": "PreStop"}) if a.Command == nil || a.Command.Cmd == nil || a.Command.Cmd.Process == nil { // Not managing the process, so don't do anything return } cmd := a.Command.Cmd // get the underlying process if a.StopTimeout > 0 { if err := cmd.Process.Signal(syscall.SIGTERM); err != nil { log.Warnf("Error sending SIGTERM to application: %s", err) } else { time.AfterFunc(time.Duration(a.StopTimeout)*time.Second, func() { log.Infof("Killing Process %#v", cmd.Process) cmd.Process.Kill() }) return } } log.Infof("Killing Process %#v", a.Command.Cmd.Process) cmd.Process.Kill() }
// handleConflict takes a candidate and adds it as conflict file. // If the conflict file already exists, it will be updated. func handleConflict(cnd candidate) error { log.Infof("Conflicting files: %s (own) <-> %s (remote)", cnd.ownPath, cnd.bobPath) bobOwner, err := cnd.bobStore.Owner() if err != nil { return err } bobFile, err := cnd.bobStore.fs.LookupFile(cnd.bobPath) if err != nil { return err } conflictPath := cnd.ownPath + "." + bobOwner.ID().User() + ".conflict" log.Infof("Creating conflict file: %s", conflictPath) bobHash := bobFile.Hash() bobSize := bobFile.Size() bobKey := bobFile.Key() _, err = stageFile( cnd.ownStore.fs, conflictPath, bobHash, bobKey, bobSize, bobOwner.ID(), ) if err == ErrNoChange { return nil } return err }
// helper function to log the stream of bytes from a reader while waiting on // the error channel. It returns on first error received on the channel func logOutputAndReturnStatus(r io.Reader, errCh chan error, cancelCh CancelChannel, cancelFunc context.CancelFunc, jobLogs io.Writer) error { // this can happen if an error occurred before the ansible could be run, // just return that error if r == nil { return <-errCh } // redirect read output to job logs t := io.TeeReader(r, jobLogs) s := bufio.NewScanner(t) ticker := time.Tick(50 * time.Millisecond) for { var err error select { case <-cancelCh: err = errJobCancelled cancelFunc() for s.Scan() { logrus.Infof("%s", s.Bytes()) } return err case err := <-errCh: for s.Scan() { logrus.Infof("%s", s.Bytes()) } return err case <-ticker: // scan any available output while waiting if s.Scan() { logrus.Infof("%s", s.Bytes()) } } } }
func verifyEPDel(t *testing.T, brName, epIP string) { flowList, err := ofctlFlowDump(brName) if err != nil { t.Errorf("Error getting flow entries. Err: %v", err) return } matchStr := fmt.Sprintf("tcp,nw_src=%s", epIP) if ofctlRawFlowMatch(flowList, matchStr) { t.Errorf("DNAT TCP flows still present %s", flowList) } else { log.Infof("DNAT TCP flows removed as expected") } matchStr = fmt.Sprintf("udp,nw_src=%s", epIP) if ofctlRawFlowMatch(flowList, matchStr) { t.Errorf("DNAT UDP flows still present %s", flowList) } else { log.Infof("DNAT UDP flows removed as expected") } matchStr = fmt.Sprintf("nw_dst=%s,tp_src=9600", epIP) if ofctlRawFlowMatch(flowList, matchStr) { t.Errorf("SNAT flows still present %s", flowList) } else { log.Infof("SNAT flows removed as expected") } }
func (p *Resource) CreateClusterHandler(req *restful.Request, resp *restful.Response) { logrus.Infof("CreateClusterHandler is called!") // Stub an repairpolicy to be populated from the body request := entity.Request{} // Populate the user data err := json.NewDecoder(req.Request.Body).Decode(&request) logrus.Infof("Request is %v", request) logrus.Infof("ProviderInfo is %v", request.ProviderInfo) logrus.Infof("AwsEC2Info is %v", request.ProviderInfo.AwsEC2Info) if err != nil { logrus.Errorf("convert body to request failed, error is %v", err) response.WriteStatusError(services.COMMON_ERROR_INVALIDATE, err, resp) return } servers, code, err := services.GetDeployService().CreateCluster(request) if err != nil { response.WriteStatusError(code, err, resp) return } res := response.Response{Success: true, Data: servers} resp.WriteEntity(res) return }
func verifyLB(t *testing.T, brName string, provIPs, provMacs []string) { flowList, err := ofctlFlowDump(brName) if err != nil { t.Errorf("Error getting flow entries. Err: %v", err) return } for _, provIP := range provIPs { matchStr := fmt.Sprintf("set_field:%s->ip_dst", provIP) if ofctlRawFlowMatch(flowList, matchStr) { log.Infof("DNAT to %s found", provIP) } else { t.Errorf("DNAT flows to %s NOT found %s", provIP, flowList) } matchStr = fmt.Sprintf("tcp,nw_src=%s", provIP) if ofctlRawFlowMatch(flowList, matchStr) { log.Infof("SNAT for %s found", provIP) } else { t.Errorf("SNAT flows for %s NOT found %s", provIP, flowList) } } for _, provMac := range provMacs { matchStr := fmt.Sprintf("set_field:%s->eth_dst", provMac) if ofctlRawFlowMatch(flowList, matchStr) { log.Infof("MAC rewrite to %s found", provMac) } else { t.Errorf("MAC rewrite flows to %s NOT found %s", provMac, flowList) } } }
func freeNetworkResources(stateDriver core.StateDriver, nwMasterCfg *NwConfig, nwCfg *drivers.OvsCfgNetworkState, gCfg *gstate.Cfg) (err error) { tempRm, err := resources.GetStateResourceManager() if err != nil { return err } rm := core.ResourceManager(tempRm) if nwCfg.PktTagType == "vlan" { err = gCfg.FreeVLAN(rm, uint(nwCfg.PktTag)) if err != nil { return err } } else if nwCfg.PktTagType == "vxlan" { log.Infof("freeing vlan %d vxlan %d", nwCfg.PktTag, nwCfg.ExtPktTag) err = gCfg.FreeVXLAN(rm, uint(nwCfg.ExtPktTag), uint(nwCfg.PktTag)) if err != nil { return err } } if nwMasterCfg.SubnetIP == "" { log.Infof("freeing subnet %s/%s", nwCfg.SubnetIP, nwCfg.SubnetLen) err = gCfg.FreeSubnet(rm, nwCfg.SubnetIP) if err != nil { return err } } return err }
func rawConnectionFromSerial() (net.Conn, error) { log.Info("opening ttyS0 for backchannel") f, err := os.OpenFile(pathPrefix+"/ttyS0", os.O_RDWR|os.O_SYNC|syscall.O_NOCTTY, backchannelMode) if err != nil { detail := fmt.Errorf("failed to open serial port for backchannel: %s", err) log.Error(detail) return nil, detail } // set the provided FDs to raw if it's a termial // 0 is the uninitialized value for Fd if f.Fd() != 0 && terminal.IsTerminal(int(f.Fd())) { log.Debug("setting terminal to raw mode") s, err := terminal.MakeRaw(int(f.Fd())) if err != nil { return nil, err } log.Infof("s = %#v", s) } var conn net.Conn log.Infof("creating raw connection from ttyS0 (fd=%d)", f.Fd()) conn, err = serial.NewFileConn(f) return conn, err }
// Stop halts a container by sending SIGTERM, waiting for the given // duration in seconds, and then calling SIGKILL and waiting for the // process to exit. If a negative duration is given, Stop will wait // for SIGTERM forever. If the container is not running Stop returns // immediately. func (container *Container) Stop(seconds int) error { if !container.IsRunning() { return nil } // 1. Send a SIGTERM if err := container.killPossiblyDeadProcess(int(syscall.SIGTERM)); err != nil { logrus.Infof("Failed to send SIGTERM to the process, force killing") if err := container.killPossiblyDeadProcess(int(syscall.SIGKILL)); err != nil { return err } } // 2. Wait for the process to exit on its own if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { logrus.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) // 3. If it doesn't, then send SIGKILL if err := container.Kill(); err != nil { container.WaitStop(-1 * time.Second) return err } } container.logEvent("stop") return nil }
func createdb(conn string, dbname string) error { db, err := sql.Open("mysql", conn) if err != nil { log.Errorf("Cannot open database(%s), err: %v", conn, err) return err } usedbSql := fmt.Sprintf("use %s", dbname) _, err = db.Exec(usedbSql) if err == nil { log.Infof("DB(%s) already exists, no need to create it.", dbname) return err } defer db.Close() dbCreateSql := fmt.Sprintf("CREATE DATABASE `%s` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci", dbname) _, err = db.Exec(dbCreateSql) if err == nil { log.Infof("Succeed to create db(%s)", dbname) } usedbSql = fmt.Sprintf("use %s", dbname) _, err = db.Exec(usedbSql) if err == nil { log.Infof("DB(%s) already exists, no need to create it.", dbname) return err } return nil }
// supervise() listens for error notifications and triggers graceful restart func (s *Supervisor) supervise() { for { select { case err := <-s.restartC: // This means graceful shutdown, do nothing and return if err == nil { log.Infof("watchErrors - graceful shutdown") s.stop() return } for { s.options.Clock.Sleep(retryPeriod) log.Infof("supervise() restarting %s on error: %s", s.proxy, err) // We failed to initialize server, this error can not be recovered, so send an error and exit if err := s.init(); err != nil { log.Infof("Failed to initialize %s, will retry", err) } else { break } } case <-s.broadcastCloseC: s.Stop(false) } } }
func (b *Bridge) setupBridge(externalPort string) error { la := netlink.NewLinkAttrs() la.Name = b.bridgeName bridge, _ := netlink.LinkByName(b.bridgeName) if bridge == nil { log.Debugf("Bridge %s does not exist ", b.bridgeName) out, err := exec.Command("ovs-vsctl", "add-br", b.bridgeName).CombinedOutput() if err != nil { log.Fatalf("Bridge %s creation failed been created. Resp: %s, err: %s", b.bridgeName, out, err) } log.Infof("Bridge %s has been created. Resp: %s", b.bridgeName, out) out, err = exec.Command("ovs-vsctl", "add-port", b.bridgeName, externalPort).CombinedOutput() if err != nil { log.Fatalf("Failed to add external port %s. Resp: %s, err: %s", externalPort, out, err) } log.Infof("External port %s has been added to %s. Resp: %s", externalPort, b.bridgeName, out) out, err = exec.Command("ifconfig", externalPort, "0.0.0.0").CombinedOutput() if err != nil { log.Fatalf("Failed to ip address of port %s. Resp: %s, err: %s", externalPort, out, err) } log.Infof("Ip address of port %s has been cleaned. Resp: %s", externalPort, out) return err } else { log.Debugf("Bridge %s already exsist", b.bridgeName) } return nil }
func ClientOK(endpoint string, test func() bool) error { backoff := util.Backoff{} defer backoff.Close() var err error retry := false for ok := range backoff.Start() { if !ok { err = fmt.Errorf("Timeout waiting for Docker at %s", endpoint) break } if test() { break } retry = true log.Infof("Waiting for Docker at %s", endpoint) } if err != nil { return err } if retry { log.Infof("Connected to Docker at %s", endpoint) } return nil }
func applyNetConf(link netlink.Link, netConf config.InterfaceConfig) error { if netConf.DHCP { log.Infof("Running DHCP on %s", link.Attrs().Name) cmd := exec.Command("dhcpcd", "-A4", "-e", "force_hostname=true", link.Attrs().Name) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { log.Error(err) } } else if netConf.IPV4LL { if err := AssignLinkLocalIP(link); err != nil { log.Error("IPV4LL set failed") return err } } else if netConf.Address == "" { return nil } else { addr, err := netlink.ParseAddr(netConf.Address) if err != nil { return err } if err := netlink.AddrAdd(link, addr); err != nil { log.Error("addr add failed") return err } log.Infof("Set %s on %s", netConf.Address, link.Attrs().Name) } if netConf.MTU > 0 { if err := netlink.LinkSetMTU(link, netConf.MTU); err != nil { log.Error("set MTU Failed") return err } } if err := netlink.LinkSetUp(link); err != nil { log.Error("failed to setup link") return err } if netConf.Gateway != "" { gatewayIp := net.ParseIP(netConf.Gateway) if gatewayIp == nil { return errors.New("Invalid gateway address " + netConf.Gateway) } route := netlink.Route{ Scope: netlink.SCOPE_UNIVERSE, Gw: net.ParseIP(netConf.Gateway), } if err := netlink.RouteAdd(&route); err != nil { log.Error("gateway set failed") return err } log.Infof("Set default gateway %s", netConf.Gateway) } return nil }
func (c *controller) cleanupLocalEndpoints() { nl, err := c.getNetworksForScope(datastore.LocalScope) if err != nil { log.Warnf("Could not get list of networks during endpoint cleanup: %v", err) return } for _, n := range nl { epl, err := n.getEndpointsFromStore() if err != nil { log.Warnf("Could not get list of endpoints in network %s during endpoint cleanup: %v", n.name, err) continue } for _, ep := range epl { log.Infof("Removing stale endpoint %s (%s)", ep.name, ep.id) if err := ep.Delete(true); err != nil { log.Warnf("Could not delete local endpoint %s during endpoint cleanup: %v", ep.name, err) } } epl, err = n.getEndpointsFromStore() if err != nil { log.Warnf("Could not get list of endpoints in network %s for count update: %v", n.name, err) continue } epCnt := n.getEpCnt().EndpointCnt() if epCnt != uint64(len(epl)) { log.Infof("Fixing inconsistent endpoint_cnt for network %s. Expected=%d, Actual=%d", n.name, len(epl), epCnt) n.getEpCnt().setCnt(uint64(len(epl))) } } }
func (r *Router) handleQuery(serfClient *client.RPCClient, query client.QueryEventRecord) { r.Lock() defer r.Unlock() var ( response []byte handlerFunc ResponderFunc ok bool err error ) if f := r.findHandlerFunc(query.Name); f == nil { log.Infof("no handler for query: %q", query.Name) return } else if handlerFunc, ok = f.(ResponderFunc); !ok { log.Infof("no handler for query: %q", query.Name) return } if response, err = handlerFunc(query.Name, query); err != nil { log.Infof("query handler failed. Error: %s", err) // failure returned by handlers are not considered fatal // TODO: handle panics inside event handlers as well return } if err := serfClient.Respond(query.ID, response); err != nil { log.Errorf("responding to query failed. Response body: %v, Error: %s", response, err) } }