// DeleteEndpointID deletes an endpoint by ID. func DeleteEndpointID(stateDriver core.StateDriver, epID string) error { epCfg := &drivers.OvsCfgEndpointState{} epCfg.StateDriver = stateDriver err := epCfg.Read(epID) if err != nil { return err } nwCfg := &drivers.OvsCfgNetworkState{} nwCfg.StateDriver = stateDriver err = nwCfg.Read(epCfg.NetID) if err != nil { return err } err = freeEndpointResources(epCfg, nwCfg) if err != nil { return err } err = epCfg.Clear() if err != nil { log.Errorf("error writing nw config. Error: %s", err) return err } err = nwCfg.Write() if err != nil { log.Errorf("error writing nw config. Error: %s", err) return err } return err }
// Remove local IPv6 flow func (self *Vrouter) RemoveLocalIpv6Flow(endpoint OfnetEndpoint) error { // Find the flow entry flowId := self.agent.getEndpointIdByIpVlan(endpoint.Ipv6Addr, endpoint.Vlan) ipv6Flow := self.flowDb[flowId] if ipv6Flow == nil { log.Errorf("Error finding the flow for endpoint: %+v", endpoint) return errors.New("Flow not found") } // Delete the Fgraph entry err := ipv6Flow.Delete() if err != nil { log.Errorf("Error deleting the endpoint: %+v. Err: %v", endpoint, err) } // TODO: where do we add svcProxy endpoint? Do we need it for IPv6? //self.svcProxy.DelEndpoint(&endpoint) // Remove the endpoint from policy tables err = self.policyAgent.DelIpv6Endpoint(&endpoint) if err != nil { log.Errorf("Error deleting IPv6 endpoint to policy agent{%+v}. Err: %v", endpoint, err) return err } return nil }
// getSize returns the real size & virtual size of the container. func (daemon *Daemon) getSize(container *Container) (int64, int64) { var ( sizeRw, sizeRootfs int64 err error ) if err := daemon.Mount(container); err != nil { logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err) return sizeRw, sizeRootfs } defer daemon.Unmount(container) sizeRw, err = container.rwlayer.Size() if err != nil { logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", daemon.driver, container.ID, err) // FIXME: GetSize should return an error. Not changing it now in case // there is a side-effect. sizeRw = -1 } if parent := container.rwlayer.Parent(); parent != nil { sizeRootfs, err = parent.Size() if err != nil { sizeRootfs = -1 } else if sizeRw != -1 { sizeRootfs += sizeRw } } return sizeRw, sizeRootfs }
// GetSize, return real size, virtual size func (container *Container) GetSize() (int64, int64) { var ( sizeRw, sizeRootfs int64 err error driver = container.daemon.driver ) if err := container.Mount(); err != nil { log.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err) return sizeRw, sizeRootfs } defer container.Unmount() initID := fmt.Sprintf("%s-init", container.ID) sizeRw, err = driver.DiffSize(container.ID, initID) if err != nil { log.Errorf("Driver %s couldn't return diff size of container %s: %s", driver, container.ID, err) // FIXME: GetSize should return an error. Not changing it now in case // there is a side-effect. sizeRw = -1 } if _, err = os.Stat(container.basefs); err != nil { if sizeRootfs, err = directory.Size(container.basefs); err != nil { sizeRootfs = -1 } } return sizeRw, sizeRootfs }
func (d *Dispatcher) findApplianceByID(conf *metadata.VirtualContainerHostConfigSpec) (*vm.VirtualMachine, error) { defer trace.End(trace.Begin("")) var err error var vmm *vm.VirtualMachine moref := new(types.ManagedObjectReference) if ok := moref.FromString(conf.ID); !ok { message := "Failed to get appliance VM mob reference" log.Errorf(message) return nil, errors.New(message) } ref, err := d.session.Finder.ObjectReference(d.ctx, *moref) if err != nil { if _, ok := err.(*find.NotFoundError); !ok { err = errors.Errorf("Failed to query appliance (%s): %s", moref, err) return nil, err } log.Debugf("Appliance is not found") return nil, nil } ovm, ok := ref.(*object.VirtualMachine) if !ok { log.Errorf("Failed to find VM %s, %s", moref, err) return nil, err } vmm = vm.NewVirtualMachine(d.ctx, d.session, ovm.Reference()) return vmm, nil }
// Cleanup stops active swarm node. This is run before daemon shutdown. func (c *Cluster) Cleanup() { c.Lock() node := c.node if node == nil { c.Unlock() return } if c.isActiveManager() { active, reachable, unreachable, err := c.managerStats() if err == nil { singlenode := active && reachable == 1 && unreachable == 0 if active && !singlenode && reachable-2 <= unreachable { logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable) } } } c.cancelReconnect() c.Unlock() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if err := node.Stop(ctx); err != nil { logrus.Errorf("error cleaning up cluster: %v", err) } c.Lock() c.node = nil c.ready = false c.conn = nil c.Unlock() }
func (c *Client) update() error { err := c.downloadTimestamp() if err != nil { logrus.Errorf("Client Update (Timestamp): %s", err.Error()) return err } err = c.downloadSnapshot() if err != nil { logrus.Errorf("Client Update (Snapshot): %s", err.Error()) return err } err = c.checkRoot() if err != nil { // In this instance the root has not expired base on time, but is // expired based on the snapshot dictating a new root has been produced. logrus.Debug(err) return tuf.ErrLocalRootExpired{} } // will always need top level targets at a minimum err = c.downloadTargets("targets") if err != nil { logrus.Errorf("Client Update (Targets): %s", err.Error()) return err } return nil }
// ProcessDeletions deletes the configuration passed from netmaster's statestore. // This may result in generating Delete triggers for the netplugin. func ProcessDeletions(allCfg *intent.Config) (err error) { stateDriver, err := utils.GetStateDriver() if err != nil { return err } for _, tenant := range allCfg.Tenants { err1 := DeleteEndpoints(stateDriver, &tenant) if err1 != nil { log.Errorf("error deleting endpoints '%s' \n", err1) err = err1 continue } err1 = DeleteNetworks(stateDriver, &tenant) if err1 != nil { log.Errorf("error deleting networks '%s' \n", err1) err = err1 continue } err1 = DeleteTenant(stateDriver, &tenant) if err1 != nil { log.Errorf("error deleting tenant '%s' \n", err1) err = err1 continue } } return }
// stubAddPod is the handler for testing pod additions func stubAddPod(r *http.Request) (interface{}, error) { resp := cniapi.RspAddPod{} content, err := ioutil.ReadAll(r.Body) if err != nil { logger.Errorf("Failed to read request: %v", err) return resp, err } pInfo := cniapi.CNIPodAttr{} if err := json.Unmarshal(content, &pInfo); err != nil { return resp, err } // verify pod attributes are as expected. if pInfo.Name == "utPod" && pInfo.K8sNameSpace == "utK8sNS" && pInfo.InfraContainerID != "" && pInfo.IntfName != "" { _, err := nsToPID(pInfo.NwNameSpace) if err != nil { logger.Errorf("Failed to fetch pid from netns %s: %v", pInfo.NwNameSpace, err) } else { // respond with success resp.IPAddress = utPodIP resp.EndpointID = pInfo.InfraContainerID return resp, nil } } logger.Errorf("Failed pod %v", pInfo) return resp, fmt.Errorf("Failed to add pod") }
// GetEndpointStats gets all endpoints from all ovs instances func (d *OvsDriver) GetEndpointStats() ([]byte, error) { vxlanStats, err := d.switchDb["vxlan"].GetEndpointStats() if err != nil { log.Errorf("Error getting vxlan stats. Err: %v", err) return []byte{}, err } vlanStats, err := d.switchDb["vlan"].GetEndpointStats() if err != nil { log.Errorf("Error getting vlan stats. Err: %v", err) return []byte{}, err } // combine the maps for key, val := range vxlanStats { vlanStats[key] = val } jsonStats, err := json.Marshal(vlanStats) if err != nil { log.Errorf("Error encoding epstats. Err: %v", err) return jsonStats, err } return jsonStats, nil }
// Init registers a remote ipam when its plugin is activated func Init(cb ipamapi.Callback, l, g interface{}) error { newPluginHandler := func(name string, client *plugins.Client) { a := newAllocator(name, client) if cps, err := a.(*allocator).getCapabilities(); err == nil { if err := cb.RegisterIpamDriverWithCapabilities(name, a, cps); err != nil { logrus.Errorf("error registering remote ipam driver %s due to %v", name, err) } } else { logrus.Infof("remote ipam driver %s does not support capabilities", name) logrus.Debug(err) if err := cb.RegisterIpamDriver(name, a); err != nil { logrus.Errorf("error registering remote ipam driver %s due to %v", name, err) } } } // Unit test code is unaware of a true PluginStore. So we fall back to v1 plugins. handleFunc := plugins.Handle if pg := cb.GetPluginGetter(); pg != nil { handleFunc = pg.Handle activePlugins := pg.GetAllManagedPluginsByCap(ipamapi.PluginEndpointType) for _, ap := range activePlugins { newPluginHandler(ap.Name(), ap.Client()) } } handleFunc(ipamapi.PluginEndpointType, newPluginHandler) return nil }
func launch(c *cli.Context) { conf := config.Conf(c) resultChan := make(chan error) rClient, err := config.GetRancherClient(conf) if err != nil { log.Fatal(err) } kClient := kubernetesclient.NewClient(conf.KubernetesURL, true) rcHandler := kubernetesevents.NewHandler(rClient, kClient, kubernetesevents.RCKind) svcHandler := kubernetesevents.NewHandler(rClient, kClient, kubernetesevents.ServiceKind) handlers := []kubernetesevents.Handler{rcHandler, svcHandler} go func(rc chan error) { err := kubernetesevents.ConnectToEventStream(handlers, conf) log.Errorf("Kubernetes stream listener exited with error: %s", err) rc <- err }(resultChan) go func(rc chan error) { err := rancherevents.ConnectToEventStream(conf) log.Errorf("Rancher stream listener exited with error: %s", err) rc <- err }(resultChan) <-resultChan log.Info("Exiting.") }
// RuleDelete deletes the rule within a policy func (ac *APIController) RuleDelete(rule *contivModel.Rule) error { log.Infof("Received RuleDelete: %+v", rule) policyKey := rule.TenantName + ":" + rule.PolicyName // find the policy policy := contivModel.FindPolicy(policyKey) if policy == nil { log.Errorf("Error finding policy %s", policyKey) return core.Errorf("Policy not found") } // unlink the rule from policy modeldb.RemoveLinkSet(&policy.LinkSets.Rules, rule) err := policy.Write() if err != nil { return err } // Trigger policyDB Update err = master.PolicyDelRule(policy, rule) if err != nil { log.Errorf("Error deleting rule %s to policy %s. Err: %v", rule.Key, policy.Key, err) return err } return nil }
// EndpointGroupDelete deletes end point group func (ac *APIController) EndpointGroupDelete(endpointGroup *contivModel.EndpointGroup) error { log.Infof("Received EndpointGroupDelete: %+v", endpointGroup) // delete the endpoint group state err := master.DeleteEndpointGroup(endpointGroup.EndpointGroupID) if err != nil { log.Errorf("Error creating endpoing group %+v. Err: %v", endpointGroup, err) } // Detach the endpoint group from the Policies for _, policyName := range endpointGroup.Policies { policyKey := endpointGroup.TenantName + ":" + policyName // find the policy policy := contivModel.FindPolicy(policyKey) if policy == nil { log.Errorf("Could not find policy %s", policyName) continue } // detach policy to epg err := master.PolicyDetach(endpointGroup, policy) if err != nil && err != master.EpgPolicyExists { log.Errorf("Error detaching policy %s from epg %s", policyName, endpointGroup.Key) } // Remove links modeldb.RemoveLinkSet(&policy.LinkSets.EndpointGroups, endpointGroup) modeldb.RemoveLinkSet(&endpointGroup.LinkSets.Policies, policy) policy.Write() } return nil }
// authorizationMiddleware perform authorization on the request. func (s *Server) authorizationMiddleware(handler httputils.APIFunc) httputils.APIFunc { return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { // FIXME: fill when authN gets in // User and UserAuthNMethod are taken from AuthN plugins // Currently tracked in https://github.com/docker/docker/pull/13994 user := "" userAuthNMethod := "" authCtx := authorization.NewCtx(s.authZPlugins, user, userAuthNMethod, r.Method, r.RequestURI) if err := authCtx.AuthZRequest(w, r); err != nil { logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) return err } rw := authorization.NewResponseModifier(w) if err := handler(ctx, rw, r, vars); err != nil { logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, err) return err } if err := authCtx.AuthZResponse(rw, r); err != nil { logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) return err } return nil } }
func (container *Container) monitorExec(ExecConfig *ExecConfig, callback execdriver.StartCallback) error { var ( err error exitCode int ) pipes := execdriver.NewPipes(ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdout, ExecConfig.streamConfig.stderr, ExecConfig.OpenStdin) exitCode, err = container.daemon.Exec(container, ExecConfig, pipes, callback) if err != nil { logrus.Errorf("Error running command in existing container %s: %s", container.ID, err) } logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode) if ExecConfig.OpenStdin { if err := ExecConfig.streamConfig.stdin.Close(); err != nil { logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err) } } if err := ExecConfig.streamConfig.stdout.Clean(); err != nil { logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err) } if err := ExecConfig.streamConfig.stderr.Clean(); err != nil { logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err) } if ExecConfig.ProcessConfig.Terminal != nil { if err := ExecConfig.ProcessConfig.Terminal.Close(); err != nil { logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err) } } // remove the exec command from the container's store only and not the // daemon's store so that the exec command can be inspected. container.execCommands.Delete(ExecConfig.ID) return err }
// useDirperm checks dirperm1 mount option can be used with the current // version of aufs. func useDirperm() bool { enableDirpermLock.Do(func() { base, err := ioutil.TempDir("", "docker-aufs-base") if err != nil { logrus.Errorf("error checking dirperm1: %v", err) return } defer os.RemoveAll(base) union, err := ioutil.TempDir("", "docker-aufs-union") if err != nil { logrus.Errorf("error checking dirperm1: %v", err) return } defer os.RemoveAll(union) opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) if err := mount("none", union, "aufs", 0, opts); err != nil { return } enableDirperm = true if err := Unmount(union); err != nil { logrus.Errorf("error checking dirperm1: failed to unmount %v", err) } }) return enableDirperm }
func (m *MessagesController) likeOrUnlike(ctx *gin.Context, action string, message models.Message, topic models.Topic, user models.User) { isReadAccess := topic.IsUserReadAccess(user) if !isReadAccess { ctx.AbortWithError(http.StatusInternalServerError, errors.New("No Read Access to topic "+message.Topics[0])) return } info := "" if action == "like" { err := message.Like(user) if err != nil { log.Errorf("Error while like a message %s", err) ctx.AbortWithError(http.StatusInternalServerError, err) return } info = "like added" } else if action == "unlike" { err := message.Unlike(user) if err != nil { log.Errorf("Error while like a message %s", err) ctx.AbortWithError(http.StatusInternalServerError, err) return } info = "like removed" } else { ctx.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("Invalid action : " + action)}) return } go models.WSMessage(&models.WSMessageJSON{Action: action, Username: user.Username, Message: message}) ctx.JSON(http.StatusCreated, gin.H{"info": info}) }
func copyDir(src, dst string) error { // ensure the parent path of the src exists if err := os.MkdirAll(filepath.Dir(src), 0777); err != nil { log.Errorf("Error when creating parent directory for src: %s", err.Error()) return err } // ensure the parent path of the dst exists cmd := exec.Command("mkdir", "-p", filepath.Dir(dst)) out, err := cmd.Output() if err != nil { return err } // cp options: // -a: preserve attributes // -r: copy recursively cmd = exec.Command("cp", "-aR", src, dst) out, err = cmd.CombinedOutput() if err != nil { log.Errorf("Error when cp: %s", err.Error()) return err } log.Printf("The cp output is: %s", out) return nil }
func (m *MessagesController) addOrRemoveLabel(ctx *gin.Context, messageIn *messageJSON, message models.Message, user models.User) { if messageIn.Text == "" { ctx.AbortWithError(http.StatusBadRequest, errors.New("Invalid Text for label")) return } info := gin.H{} if messageIn.Action == "label" { addedLabel, err := message.AddLabel(messageIn.Text, messageIn.Option) if err != nil { log.Errorf("Error while adding a label to a message %s", err) ctx.AbortWithError(http.StatusInternalServerError, err) return } info = gin.H{"info": fmt.Sprintf("label %s added to message", addedLabel.Text), "label": addedLabel, "message": message} } else if messageIn.Action == "unlabel" { err := message.RemoveLabel(messageIn.Text) if err != nil { log.Errorf("Error while remove a label from a message %s", err) ctx.AbortWithError(http.StatusInternalServerError, err) return } info = gin.H{"info": fmt.Sprintf("label %s removed from message", messageIn.Text), "message": message} } else { ctx.AbortWithError(http.StatusBadRequest, errors.New("Invalid action : "+messageIn.Action)) return } go models.WSMessage(&models.WSMessageJSON{Action: messageIn.Action, Username: user.Username, Message: message}) ctx.JSON(http.StatusCreated, info) }
func (u *URLFetcher) FetchAuthToken(url *url.URL) (*Token, error) { defer trace.End(trace.Begin(url.String())) data, err := u.Fetch(context.Background(), url, false, nil) if err != nil { log.Errorf("Download failed: %v", err) return nil, err } token := &Token{} err = json.Unmarshal([]byte(data), &token) if err != nil { log.Errorf("Incorrect token format: %v", err) return nil, err } if token.ExpiresIn == 0 { token.Expires = time.Now().Add(DefaultTokenExpirationDuration) } else { token.Expires = time.Now().Add(time.Duration(token.ExpiresIn) * time.Second) } return token, nil }
func (m *MessagesController) addOrRemoveTag(ctx *gin.Context, messageIn *messageJSON, message models.Message, user models.User) { if !user.IsSystem { ctx.JSON(http.StatusForbidden, gin.H{"error": "Invalid Action for non-system user"}) return } if messageIn.Text == "" { ctx.AbortWithError(http.StatusBadRequest, errors.New("Invalid Text for tag")) return } if messageIn.Action == "tag" { err := message.AddTag(messageIn.Text) if err != nil { log.Errorf("Error while adding a tag to a message %s", err) ctx.AbortWithError(http.StatusInternalServerError, err) return } } else if messageIn.Action == "untag" { err := message.RemoveTag(messageIn.Text) if err != nil { log.Errorf("Error while remove a tag from a message %s", err) ctx.AbortWithError(http.StatusInternalServerError, err) return } } else { ctx.AbortWithError(http.StatusBadRequest, errors.New("Invalid action : "+messageIn.Action)) return } go models.WSMessage(&models.WSMessageJSON{Action: messageIn.Action, Username: user.Username, Message: message}) ctx.JSON(http.StatusCreated, "") }
func (clnt *client) Restore(containerID string, options ...CreateOption) error { w := clnt.getOrCreateExitNotifier(containerID) defer w.close() cont, err := clnt.getContainerdContainer(containerID) if err == nil && cont.Status != "stopped" { clnt.lock(cont.Id) container := clnt.newContainer(cont.BundlePath) container.systemPid = systemPid(cont) clnt.appendContainer(container) clnt.unlock(cont.Id) if err := clnt.Signal(containerID, int(syscall.SIGTERM)); err != nil { logrus.Errorf("error sending sigterm to %v: %v", containerID, err) } select { case <-time.After(10 * time.Second): if err := clnt.Signal(containerID, int(syscall.SIGKILL)); err != nil { logrus.Errorf("error sending sigkill to %v: %v", containerID, err) } select { case <-time.After(2 * time.Second): case <-w.wait(): return nil } case <-w.wait(): return nil } } return clnt.setExited(containerID) }
func (m *MessagesController) addOrRemoveTask(ctx *gin.Context, messageIn *messageJSON, message models.Message, user models.User, topic models.Topic) { info := "" if messageIn.Action == "task" { err := message.AddToTasks(user, topic) if err != nil { log.Errorf("Error while adding a message to tasks %s", err) ctx.AbortWithError(http.StatusInternalServerError, err) return } info = fmt.Sprintf("New Task created in %s", models.GetPrivateTopicTaskName(user)) } else if messageIn.Action == "untask" { err := message.RemoveFromTasks(user, topic) if err != nil { log.Errorf("Error while remove a message from tasks %s", err) ctx.AbortWithError(http.StatusInternalServerError, err) return } info = fmt.Sprintf("Task removed from %s", models.GetPrivateTopicTaskName(user)) } else { ctx.AbortWithError(http.StatusBadRequest, errors.New("Invalid action : "+messageIn.Action)) return } go models.WSMessage(&models.WSMessageJSON{Action: messageIn.Action, Username: user.Username, Message: message}) ctx.JSON(http.StatusCreated, gin.H{"info": info}) }
func (d *Dispatcher) configIso(conf *metadata.VirtualContainerHostConfigSpec, vm *vm.VirtualMachine) (object.VirtualDeviceList, error) { defer trace.End(trace.Begin("")) var devices object.VirtualDeviceList var err error vmDevices, err := vm.Device(d.ctx) if err != nil { log.Errorf("Failed to get vm devices for appliance: %s", err) return nil, err } ide, err := vmDevices.FindIDEController("") if err != nil { log.Errorf("Failed to find IDE controller for appliance: %s", err) return nil, err } cdrom, err := devices.CreateCdrom(ide) if err != nil { log.Errorf("Failed to create Cdrom device for appliance: %s", err) return nil, err } cdrom = devices.InsertIso(cdrom, fmt.Sprintf("[%s] %s/appliance.iso", conf.ImageStores[0].Host, d.vmPathName)) devices = append(devices, cdrom) return devices, nil }
func (c *controller) acceptClientConnections(sock string, l net.Listener) { for { conn, err := l.Accept() if err != nil { if _, err1 := os.Stat(sock); os.IsNotExist(err1) { logrus.Debugf("Unix socket %s doesn't exist. cannot accept client connections", sock) return } logrus.Errorf("Error accepting connection %v", err) continue } go func() { defer conn.Close() err := c.processExternalKey(conn) ret := success if err != nil { ret = err.Error() } _, err = conn.Write([]byte(ret)) if err != nil { logrus.Errorf("Error returning to the client %v", err) } }() } }
func (daemon *Daemon) releaseNetwork(container *Container) { if container.hostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { return } sid := container.NetworkSettings.SandboxID networks := container.NetworkSettings.Networks for n := range networks { networks[n] = &network.EndpointSettings{} } container.NetworkSettings = &network.Settings{Networks: networks} if sid == "" || len(networks) == 0 { return } sb, err := daemon.netController.SandboxByID(sid) if err != nil { logrus.Errorf("error locating sandbox id %s: %v", sid, err) return } if err := sb.Delete(); err != nil { logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err) } }
// deletePod is the handler for pod deletes func deletePod(r *http.Request) (interface{}, error) { resp := cniapi.RspAddPod{} logEvent("del pod") content, err := ioutil.ReadAll(r.Body) if err != nil { log.Errorf("Failed to read request: %v", err) return resp, err } pInfo := cniapi.CNIPodAttr{} if err := json.Unmarshal(content, &pInfo); err != nil { return resp, err } // Get labels from the kube api server epReq, err := getEPSpec(&pInfo) if err != nil { log.Errorf("Error getting labels. Err: %v", err) setErrorResp(&resp, "Error getting labels", err) return resp, err } netPlugin.DeleteHostAccPort(epReq.EndpointID) err = epCleanUp(epReq) resp.Result = 0 resp.EndpointID = pInfo.InfraContainerID return resp, err }
// test adding vlan func setupVlans() error { for i := 0; i < NUM_AGENT; i++ { log.Info("Index %d \n", i) for j := 1; j < 5; j++ { log.Info("Index %d \n", j) //log.Infof("Adding Vlan %d on %s", j, localIpList[i]) err := vrtrAgents[i].AddNetwork(uint16(j), uint32(j), "", "tenant1") if err != nil { log.Errorf("Error adding vlan %d to vrtrAgent. Err: %v", j, err) return err } err = vxlanAgents[i].AddNetwork(uint16(j), uint32(j), "", "default") if err != nil { log.Errorf("Error adding vlan %d to vxlanAgent. Err: %v", j, err) return err } err = vlanAgents[i].AddNetwork(uint16(j), uint32(j), "", "default") if err != nil { log.Errorf("Error adding vlan %d to vlanAgent. Err: %v", j, err) return err } } } for i := 0; i < NUM_VLRTR_AGENT; i++ { err := vlrtrAgents[i].AddNetwork(uint16(1), uint32(1), fmt.Sprintf("10.10.%d.%d", 1, 1), "default") if err != nil { log.Errorf("Error adding vlan 1 to vlrtrAgent. Err: %v", err) return err } } return nil }
func validateTenantConfig(tenant *intent.ConfigTenant) error { if tenant.Name == "" { return core.Errorf("invalid tenant name") } err := checkPktTagType(tenant.DefaultNetType) if err != nil { return err } if tenant.SubnetPool != "" { _, _, err = net.ParseCIDR(tenant.SubnetPool) if err != nil { return err } } if tenant.VLANs != "" { _, err = netutils.ParseTagRanges(tenant.VLANs, "vlan") if err != nil { log.Errorf("error parsing vlan range '%s'. Error: %s", tenant.VLANs, err) return err } } if tenant.VXLANs != "" { _, err = netutils.ParseTagRanges(tenant.VXLANs, "vxlan") if err != nil { log.Errorf("error parsing vxlan range '%s'.Error: %s", tenant.VXLANs, err) return err } } return nil }