// NewClient returns a new Client to handle requests to the // service at the other end of the connection. // A watcher is used to get updates about service endpoints func NewClient(serviceName string, watcher watch.Watcher) *Client { // Dial with no addresses means adding no endpoints client, err := Dial() if err != nil { glog.Fatal(err) } // Handle updates about service endpoints serviceChannel := make(chan watch.ServiceUpdate) go util.Forever(func() { serviceUpdate := <-serviceChannel switch serviceUpdate.Op { case watch.ADD: if err := client.endpoints.add(serviceUpdate.Value); err != nil { glog.Error(err) } case watch.REMOVE: if err := client.endpoints.del(serviceUpdate.Value); err != nil { glog.Error(err) } default: glog.Warning("zrpc: unknown service update op") } }, time.Second) // Register as a listener for passed service name watcher.Watch(serviceName, serviceChannel) return client }
// Load [...] func (g *Graph) Load(csvFile io.Reader) error { startT := time.Now() defer func() { log.Info("Graph.Load", time.Since(startT)) }() csvReader := csv.NewReader(csvFile) csvReader.TrailingComma = true for { fields, err := csvReader.Read() if err == io.EOF { break } if err != nil { log.Error(err) continue } if len(fields) != 3 { log.Error("Nothing there ", fields) continue } if fields[0] == SPEMPTY || fields[1] == SPEMPTY { log.Error("Invalid line ", fields) continue } g.Add(fields[0], fields[1], fields[2]) } return nil }
func (client *Client) HandleSaveAndEnqueueGroup(sae *SAEMessage) { if sae.msg == nil { log.Error("sae msg is nil") return } if sae.msg.cmd != MSG_GROUP_IM { log.Error("sae msg cmd:", sae.msg.cmd) return } appid := sae.appid gid := sae.receiver //保证群组消息以id递增的顺序发出去 t := make(chan int64) f := func() { msgid := storage.SaveGroupMessage(appid, gid, sae.device_id, sae.msg) s := FindGroupClientSet(appid, gid) for c := range s { log.Info("publish group message") am := &AppMessage{appid: appid, receiver: gid, msgid: msgid, device_id: sae.device_id, msg: sae.msg} m := &Message{cmd: MSG_PUBLISH_GROUP, body: am} c.wt <- m } if len(s) == 0 { log.Infof("can't publish group message:%d", gid) } t <- msgid } c := GetGroupChan(gid) c <- f msgid := <-t result := &MessageResult{} result.status = 0 buffer := new(bytes.Buffer) binary.Write(buffer, binary.BigEndian, msgid) result.content = buffer.Bytes() msg := &Message{cmd: MSG_RESULT, body: result} SendMessage(client.conn, msg) group := group_manager.FindGroup(gid) if group != nil { members := group.Members() off_members := make([]int64, 0) im := sae.msg.body.(*IMMessage) for uid, _ := range members { if !IsGroupUserOnline(appid, gid, uid) { off_members = append(off_members, uid) } } if len(off_members) > 0 { client.PublishGroupMessage(appid, off_members, im) } } }
func waitInitReady(ctx *VmContext) { conn, err := utils.UnixSocketConnect(ctx.HyperSockName) if err != nil { glog.Error("Cannot connect to hyper socket ", err.Error()) ctx.Hub <- &InitFailedEvent{ Reason: "Cannot connect to hyper socket " + err.Error(), } return } glog.Info("Wating for init messages...") msg, err := ReadVmMessage(conn.(*net.UnixConn)) if err != nil { glog.Error("read init message failed... ", err.Error()) ctx.Hub <- &InitFailedEvent{ Reason: "read init message failed... " + err.Error(), } conn.Close() } else if msg.Code == INIT_READY { glog.Info("Get init ready message") ctx.Hub <- &InitConnectedEvent{conn: conn.(*net.UnixConn)} go waitCmdToInit(ctx, conn.(*net.UnixConn)) } else { glog.Warningf("Get init message %d", msg.Code) ctx.Hub <- &InitFailedEvent{ Reason: fmt.Sprintf("Get init message %d", msg.Code), } conn.Close() } }
func deleteAll(fm kubeFramework, ns string, services []*kube_api.Service, rcs []*kube_api.ReplicationController) { var err error for _, rc := range rcs { if err = fm.DeleteRC(ns, rc); err != nil { glog.Error(err) } } for _, service := range services { if err = fm.DeleteService(ns, service); err != nil { glog.Error(err) } } if err = removeDockerImage(*heapsterImage); err != nil { glog.Error(err) } if err = removeDockerImage(*influxdbImage); err != nil { glog.Error(err) } if err = removeDockerImage(*grafanaImage); err != nil { glog.Error(err) } var nodes []string if nodes, err = fm.GetNodes(); err == nil { for _, node := range nodes { cleanupRemoteHost(node) } } else { glog.Errorf("failed to cleanup nodes - %v", err) } }
// syncNetworkStatus updates the network state, ensuring that the network is // configured correctly if the kubelet is set to configure cbr0: // * handshake flannel helper if the flannel experimental overlay is being used. // * ensure that iptables masq rules are setup // * reconcile cbr0 with the pod CIDR func (kl *Kubelet) syncNetworkStatus() { var err error if kl.configureCBR0 { if kl.flannelExperimentalOverlay { podCIDR, err := kl.flannelHelper.Handshake() if err != nil { glog.Infof("Flannel server handshake failed %v", err) return } kl.updatePodCIDR(podCIDR) } if err := ensureIPTablesMasqRule(kl.iptClient, kl.nonMasqueradeCIDR); err != nil { err = fmt.Errorf("Error on adding ip table rules: %v", err) glog.Error(err) kl.runtimeState.setNetworkState(err) return } podCIDR := kl.runtimeState.podCIDR() if len(podCIDR) == 0 { err = fmt.Errorf("ConfigureCBR0 requested, but PodCIDR not set. Will not configure CBR0 right now") glog.Warning(err) } else if err = kl.reconcileCBR0(podCIDR); err != nil { err = fmt.Errorf("Error configuring cbr0: %v", err) glog.Error(err) } if err != nil { kl.runtimeState.setNetworkState(err) return } } kl.runtimeState.setNetworkState(kl.networkPlugin.Status()) }
func (self *ProtoProc) procClientID(cmd protocol.Cmd, session *libnet.Session) error { glog.Info("procClientID") var err error ID := cmd.GetArgs()[0] sessionStoreData := storage.NewSessionStoreData(cmd.GetArgs()[0], session.Conn().RemoteAddr().String(), self.msgServer.cfg.LocalIP, strconv.FormatUint(session.Id(), 10)) glog.Info(sessionStoreData) args := make([]string, 0) args = append(args, cmd.GetArgs()[0]) CCmd := protocol.NewCmdInternal(protocol.STORE_SESSION_CMD, args, sessionStoreData) glog.Info(CCmd) if self.msgServer.channels[protocol.SYSCTRL_CLIENT_STATUS] != nil { _, err = self.msgServer.channels[protocol.SYSCTRL_CLIENT_STATUS].Channel.Broadcast(libnet.Json(CCmd)) if err != nil { glog.Error(err.Error()) return err } } self.msgServer.sessions[cmd.GetArgs()[0]] = session self.msgServer.sessions[cmd.GetArgs()[0]].State = base.NewSessionState(true, cmd.GetArgs()[0]) err = self.procOfflineMsg(session, ID) if err != nil { glog.Error(err.Error()) return err } return nil }
func (lc *LibvirtContext) RemoveNic(ctx *hypervisor.VmContext, n *hypervisor.InterfaceCreated, callback hypervisor.VmEvent) { if lc.domain == nil { glog.Error("Cannot find domain") ctx.Hub <- &hypervisor.DeviceFailed{ Session: nil, } return } nicXml, err := nicXml(n.Bridge, n.HostDevice, n.MacAddr, n.PCIAddr, ctx.Boot) if err != nil { glog.Error("generate detach-nic-xml failed, ", err.Error()) ctx.Hub <- &hypervisor.DeviceFailed{ Session: callback, } return } err = lc.domain.DetachDeviceFlags(nicXml, libvirtgo.VIR_DOMAIN_DEVICE_MODIFY_LIVE) if err != nil { glog.Error("detach nic failed, ", err.Error()) ctx.Hub <- &hypervisor.DeviceFailed{ Session: callback, } return } ctx.Hub <- callback }
func (lc *LibvirtContext) Launch(ctx *hypervisor.VmContext) { domainXml, err := lc.domainXml(ctx) if err != nil { glog.Error("Fail to get domain xml configuration:", err) ctx.Hub <- &hypervisor.VmStartFailEvent{Message: err.Error()} return } glog.V(3).Infof("domainXML: %v", domainXml) var domain libvirtgo.VirDomain if ctx.Boot.BootFromTemplate { domain, err = lc.driver.conn.DomainCreateXML(domainXml, libvirtgo.VIR_DOMAIN_START_PAUSED) } else { domain, err = lc.driver.conn.DomainCreateXML(domainXml, libvirtgo.VIR_DOMAIN_NONE) } if err != nil { glog.Error("Fail to launch domain ", err) ctx.Hub <- &hypervisor.VmStartFailEvent{Message: err.Error()} return } lc.domain = &domain err = lc.domain.SetMemoryStatsPeriod(1, 0) if err != nil { glog.Errorf("SetMemoryStatsPeriod failed for domain %v", ctx.Id) } }
func StoragePageData(path string, cfg conf.Config) error { rh := NewReqHttp(path, "GET", cfg.Spider.CrawlTimeout) rh.AddHeader("User-agent", USER_AGENT) httpRes, err := rh.DoGetData() if err != nil { glog.Error(err.Error()) return err } body, err := ioutil.ReadAll(httpRes.Body) if err != nil { glog.Error(err.Error()) return err } encodeStr := base64.StdEncoding.EncodeToString([]byte(path)) fout, err := os.Create(cfg.Spider.OutputDirectory + "/" + encodeStr) defer fout.Close() if err != nil { glog.Error(err.Error()) return err } fout.Write(body) return nil }
func (lc *LibvirtContext) AddNic(ctx *hypervisor.VmContext, host *hypervisor.HostNicInfo, guest *hypervisor.GuestNicInfo, result chan<- hypervisor.VmEvent) { if lc.domain == nil { glog.Error("Cannot find domain") result <- &hypervisor.DeviceFailed{ Session: nil, } return } nicXml, err := nicXml(host.Bridge, host.Device, host.Mac, guest.Busaddr, ctx.Boot) if err != nil { glog.Error("generate attach-nic-xml failed, ", err.Error()) result <- &hypervisor.DeviceFailed{ Session: nil, } return } glog.V(3).Infof("nicxml: %s", nicXml) err = lc.domain.AttachDeviceFlags(nicXml, libvirtgo.VIR_DOMAIN_DEVICE_MODIFY_LIVE) if err != nil { glog.Error("attach nic failed, ", err.Error()) result <- &hypervisor.DeviceFailed{ Session: nil, } return } result <- &hypervisor.NetDevInsertedEvent{ Index: guest.Index, DeviceName: guest.Device, Address: guest.Busaddr, } }
// inflate decodes and decompresses the data generated by codegen func inflate(data string) (string, error) { t := trackTime("inflate") defer t.finish() // fix some url-safeness that codegen does... var fixed string fixed = strings.Replace(data, "-", "+", -1) fixed = strings.Replace(fixed, "_", "/", -1) decoded, err := base64.StdEncoding.DecodeString(fixed) if err != nil { glog.Error(err) return "", err } r, err := zlib.NewReader(bytes.NewReader(decoded)) if err != nil { glog.Error(err) return "", err } defer r.Close() var buf bytes.Buffer buf.ReadFrom(r) inflated := buf.String() return inflated, nil }
func (c *channel) run() { head := &EventHeader{Id: uuid.NewV4().String(), Version: PROTOCAL_VERSION} req := &ServerResponse{head, "_zpc_hb", make([]interface{}, 0)} o, err := req.MarshalMsg(nil) if err != nil { glog.Error(err) return } for { select { case <-c.ticker.C: c.codec.mutex.Lock() glog.Error(o) _, err = c.codec.zsock.SendMessageDontwait(c.identity, o) if err != nil { c.ticker.Stop() c.codec = nil c.counter = 0 glog.Error(err) } c.codec.mutex.Unlock() return case <-c.closed: c.ticker.Stop() c.codec = nil c.ticker = nil return } } }
func (storage *Storage) LoadOfflineMessage(appid int64, uid int64) []*EMessage { log.Infof("load offline message appid:%d uid:%d\n", appid, uid) c := make([]*EMessage, 0, 10) start := fmt.Sprintf("%d_%d_1", appid, uid) end := fmt.Sprintf("%d_%d_9223372036854775807", appid, uid) r := &util.Range{Start: []byte(start), Limit: []byte(end)} iter := storage.db.NewIterator(r, nil) for iter.Next() { value := iter.Value() msgid, err := strconv.ParseInt(string(value), 10, 64) if err != nil { log.Error("parseint err:", err) continue } log.Info("offline msgid:", msgid) msg := storage.LoadMessage(msgid) if msg == nil { log.Error("can't load offline message:", msgid) continue } c = append(c, &EMessage{msgid: msgid, msg: msg}) } iter.Release() err := iter.Error() if err != nil { log.Warning("iterator err:", err) } log.Info("offline count:", len(c)) return c }
func CreateVolume(poolName, volName, dev_id string, size int, restore bool) error { glog.Infof("/dev/mapper/%s", volName) if _, err := os.Stat("/dev/mapper/" + volName); err == nil { return nil } if restore == false { parms := fmt.Sprintf("dmsetup message /dev/mapper/%s 0 \"create_thin %s\"", poolName, dev_id) if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil { glog.Error(string(res)) return fmt.Errorf(string(res)) } } parms := fmt.Sprintf("dmsetup create %s --table \"0 %d thin /dev/mapper/%s %s\"", volName, size/512, poolName, dev_id) if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil { glog.Error(string(res)) return fmt.Errorf(string(res)) } if restore == false { parms = fmt.Sprintf("mkfs.ext4 \"/dev/mapper/%s\"", volName) if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil { glog.Error(string(res)) return fmt.Errorf(string(res)) } } return nil }
// createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error). func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32) (string, string, error) { podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt) if err != nil { message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err) glog.Error(message) return "", message, err } // Create pod logs directory err = m.osInterface.MkdirAll(podSandboxConfig.GetLogDirectory(), 0755) if err != nil { message := fmt.Sprintf("Create pod log directory for pod %q failed: %v", format.Pod(pod), err) glog.Errorf(message) return "", message, err } podSandBoxID, err := m.runtimeService.RunPodSandbox(podSandboxConfig) if err != nil { message := fmt.Sprintf("CreatePodSandbox for pod %q failed: %v", format.Pod(pod), err) glog.Error(message) return "", message, err } return podSandBoxID, "", nil }
// connect to zookeeper, blocks on the initial call to doConnect() func (zkc *Client) connect() { select { case <-zkc.shouldStop: return default: zkc.connectOnce.Do(func() { if zkc.stateChange(disconnectedState, connectionRequestedState) { if err := zkc.doConnect(); err != nil { log.Error(err) zkc.errorHandler(zkc, err) } } go func() { for { select { case <-zkc.shouldStop: zkc.connLock.Lock() defer zkc.connLock.Unlock() if zkc.conn != nil { zkc.conn.Close() } return case <-zkc.shouldReconn: if err := zkc.reconnect(); err != nil { log.Error(err) zkc.errorHandler(zkc, err) } } } }() }) } return }
// Start local tcp service func (s *UMServer) Start() { if s.Addr == "" { s.Addr = ":8681" } glog.Infof("UMServer.Start starting (%s)...", s.Addr) s.Message = make(chan string) go func() { ln, err := net.Listen("tcp", s.Addr) if err != nil { glog.Error("UMServer.Start Listen error: ", err.Error()) return } defer func() { if ln != nil { ln.Close() } }() for { conn, err := ln.Accept() if err != nil { glog.Error("UMServer.Start Error accepting: ", err.Error()) return } buf := make([]byte, 1024) if reqLen, err := conn.Read(buf); err == nil || reqLen > 0 { s.Message <- string(buf[:reqLen]) } conn.Close() } }() }
// secretDeleted reacts to a Secret being deleted by removing a reference from the corresponding ServiceAccount if needed func (e *TokensController) secretDeleted(obj interface{}) { secret, ok := obj.(*api.Secret) if !ok { // Unknown type. If we missed a Secret deletion, the corresponding ServiceAccount (if it exists) // will get a secret recreated (if needed) during the ServiceAccount re-list return } serviceAccount, err := e.getServiceAccount(secret, false) if err != nil { glog.Error(err) return } if serviceAccount == nil { return } for i := 1; i <= NumServiceAccountRemoveReferenceRetries; i++ { if _, err := e.removeSecretReferenceIfNeeded(serviceAccount, secret.Name); err != nil { if apierrors.IsConflict(err) && i < NumServiceAccountRemoveReferenceRetries { time.Sleep(wait.Jitter(100*time.Millisecond, 0.0)) continue } glog.Error(err) break } break } }
func processProxy(token string, user_name string, text string) { cfg, ok := config.ProxyList[token] if !ok { glog.Error("does not exists %s", token) return } msg := fmt.Sprintf("(%s) %s", cfg.FromNetworkName, text) data := OutgoingContent{user_name, msg} jsonBytes, err := json.Marshal(data) if err != nil { glog.Error("json.Marshal failed") return } req, err := http.NewRequest( "POST", cfg.OutgoingUrl, bytes.NewBuffer(jsonBytes), ) if err != nil { glog.Error("http.NewRequset failed") return } req.Header.Set("Content-Type", "application/json") client := &http.Client{Timeout: time.Duration(15 * time.Second)} resp, err := client.Do(req) defer resp.Body.Close() }
func (self *ProtoProc) procOfflineMsg(session *libnet.Session, ID string) error { var err error exist, err := self.msgServer.offlineMsgStore.IsKeyExist(ID) if exist.(int64) == 0 { return err } else { omrd, err := common.GetOfflineMsgFromOwnerName(self.msgServer.offlineMsgStore, ID) if err != nil { glog.Error(err.Error()) return err } for _, v := range omrd.MsgList { resp := protocol.NewCmdSimple(protocol.RESP_MESSAGE_P2P_CMD) resp.AddArg(v.Msg) resp.AddArg(v.FromID) if self.msgServer.sessions[ID] != nil { self.msgServer.sessions[ID].Send(libnet.Json(resp)) if err != nil { glog.Error(err.Error()) return err } } } omrd.ClearMsg() self.msgServer.offlineMsgStore.Set(omrd) } return err }
// launchQemu run qemu and wait it's quit, includes func launchQemu(qc *QemuContext, ctx *hypervisor.VmContext) { qemu := qc.driver.executable if qemu == "" { ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "can not find qemu executable"} return } args := qc.arguments(ctx) if glog.V(1) { glog.Info("cmdline arguments: ", strings.Join(args, " ")) } pid, err := utils.ExecInDaemon(qemu, append([]string{"qemu-system-x86_64"}, args...)) if err != nil { //fail to daemonize glog.Error("%v", err) ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "try to start qemu failed"} return } glog.V(1).Infof("starting daemon with pid: %d", pid) err = ctx.DCtx.(*QemuContext).watchPid(int(pid), ctx.Hub) if err != nil { glog.Error("watch qemu process failed") ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "watch qemu process failed"} return } }
func waitConsoleOutput(ctx *VmContext) { conn, err := utils.UnixSocketConnect(ctx.ConsoleSockName) if err != nil { glog.Error("failed to connected to ", ctx.ConsoleSockName, " ", err.Error()) return } glog.V(1).Info("connected to ", ctx.ConsoleSockName) tc, err := telnet.NewConn(conn) if err != nil { glog.Error("fail to init telnet connection to ", ctx.ConsoleSockName, ": ", err.Error()) return } glog.V(1).Infof("connected %s as telnet mode.", ctx.ConsoleSockName) cout := make(chan string, 128) go TtyLiner(tc, cout) for { line, ok := <-cout if ok { glog.V(1).Info("[console] ", line) } else { glog.Info("console output end") break } } }
func TestKubernetesROService(c *client.Client) bool { svc := api.ServiceList{} err := c.Get(). Namespace("default"). AbsPath("/api/v1beta1/proxy/services/kubernetes-ro/api/v1beta1/services"). Do(). Into(&svc) if err != nil { glog.Errorf("unexpected error listing services using ro service: %v", err) return false } var foundRW, foundRO bool for i := range svc.Items { if svc.Items[i].Name == "kubernetes" { foundRW = true } if svc.Items[i].Name == "kubernetes-ro" { foundRO = true } } if !foundRW { glog.Error("no RW service found") } if !foundRO { glog.Error("no RO service found") } if !foundRW || !foundRO { return false } return true }
func (oc *OvsController) watchNetworks(ready chan<- bool, start <-chan string) { nsevent := make(chan *api.NamespaceEvent) stop := make(chan bool) go oc.subnetRegistry.WatchNamespaces(nsevent, ready, start, stop) for { select { case ev := <-nsevent: switch ev.Type { case api.Added: err := oc.assignVNID(ev.Name) if err != nil { log.Error("Error assigning Net ID: %v", err) continue } case api.Deleted: err := oc.revokeVNID(ev.Name) if err != nil { log.Error("Error revoking Net ID: %v", err) continue } } case <-oc.sig: log.Error("Signal received. Stopping watching of nodes.") stop <- true return } } }
func main() { version() fmt.Printf("built on %s\n", BuildTime()) flag.Parse() cfg := NewRouterConfig(*InputConfFile) err := cfg.LoadConfig() if err != nil { glog.Error(err.Error()) return } p := link.PacketN(2, link.BigEndianBO, link.LittleEndianBF) server, err := link.Listen(cfg.TransportProtocols, cfg.Listen, p) if err != nil { glog.Error(err.Error()) return } glog.Info("server start: ", server.Listener().Addr().String()) r := NewRouter(cfg) go r.subscribeChannels() server.AcceptLoop(func(session *link.Session) { }) }
// createServiceAccountIfNeeded creates a ServiceAccount with the given name in the given namespace if: // * the named ServiceAccount does not already exist // * the specified namespace exists // * the specified namespace is in the ACTIVE phase func (e *ServiceAccountsController) createServiceAccountIfNeeded(name, namespace string) { serviceAccount, err := e.getServiceAccount(name, namespace) if err != nil { glog.Error(err) return } if serviceAccount != nil { // If service account already exists, it doesn't need to be created return } namespaceObj, err := e.getNamespace(namespace) if err != nil { glog.Error(err) return } if namespaceObj == nil { // If namespace does not exist, no service account is needed return } if namespaceObj.Status.Phase != api.NamespaceActive { // If namespace is not active, we shouldn't try to create anything return } e.createServiceAccount(name, namespace) }
func Error(ctx context.Context, args ...interface{}) { if ctx == nil || !hasTraceKey(ctx) { glog.Error(args) return } glog.Error(prependParam(args, ctx)...) }
func (s *MemcacheStats) updateItemsStats() { if s.items == nil { return } s.readStats("items", func(sKey, sValue string) { ival, err := strconv.ParseInt(sValue, 10, 64) if err != nil { log.Error(err) internalErrors.Add("MemcacheStats", 1) return } subkey, slabid, err := parseItemKey(sKey) if err != nil { log.Error(err) internalErrors.Add("MemcacheStats", 1) return } m, ok := s.items[subkey] if !ok { log.Errorf("Unknown memcache items stats %v %v: %v", subkey, slabid, ival) internalErrors.Add("MemcacheStats", 1) return } m[slabid] = ival }) }
func (ka *allowTestAuthorizer) Authorize(a authorizer.Attributes) (string, error) { var ( tenantName string ns *api.Namespace err error ) if authorizer.IsWhiteListedUser(a.GetUserName()) { return "", nil } else { if !a.IsReadOnly() && a.GetResource() == "tenants" { return "", errors.New("only admin can write tenant") } } if a.GetNamespace() != "" { ns, err = ka.kubeClient.Namespaces().Get(a.GetNamespace()) if err != nil { glog.Error(err) return "", err } tenantName = ns.Tenant } else { if a.GetTenant() != "" { te, err := ka.kubeClient.Tenants().Get(a.GetTenant()) if err != nil { glog.Error(err) return "", err } tenantName = te.Name } } if tenantName == "" || tenantName == TenantTest { return TenantTest, nil } return "", errors.New("Keystone authorization failed") }