func loadMyApp() { timer := time.NewTicker(time.Second) for { <-timer.C glog.Info("Checking time ", time.Now().Unix()) fname := fmt.Sprintf("%d.so", time.Now().Unix()) if _, err := os.Stat(fname); os.IsNotExist(err) { continue } p, err := plugin.Open(fname) if err != nil { log.Println(err) continue } glog.Info("Plugin loaded: ", fname) f, err := p.Lookup("MyAppHandler") if err != nil { log.Println(err) continue } glog.Info("Func loaded") myApp.app = p h := f.(func(http.ResponseWriter, *http.Request)) myApp.Lock() myApp.handler = &h myApp.Unlock() } }
func (client *Client) HandleAuthToken(login *AuthenticationToken) { appid, uid, err := client.AuthToken(login.token) if err != nil { log.Info("auth token err:", err) msg := &Message{cmd: MSG_AUTH_STATUS, body: &AuthenticationStatus{1, 0}} client.wt <- msg return } if uid == 0 || appid == 0 { log.Info("auth token appid==0, uid==0") msg := &Message{cmd: MSG_AUTH_STATUS, body: &AuthenticationStatus{1, 0}} client.wt <- msg return } client.tm = time.Now() client.uid = uid client.appid = appid log.Infof("auth appid:%d uid:%d\n", appid, uid) msg := &Message{cmd: MSG_AUTH_STATUS, body: &AuthenticationStatus{0, client.public_ip}} client.wt <- msg client.SendLoginPoint() client.AddClient() }
func NewCompany(d *display.Display, pt complex128, dir complex128, nSoldiers int, f Formation) (c *Company) { c = new(Company) c.s = make([]*Soldier, nSoldiers) c.f = f _, cols := c.f.RowCols(len(c.s)) for i, _ := range c.s { if i == 0 { c.s[i] = NewSoldier(d, fmt.Sprintf("Sol %v ", i), pt, dir) c.alignBy = c.s[0] glog.Info(c.s[i].GetName(), c.s[i].Pt) } else { c.s[i] = NewRandSoldier(d, fmt.Sprintf("Sol %v ", i)) c.s[0].AddToForm(c.s[i], 1, cols) c.s[i].Pt = c.s[i].refPt() c.s[i].PastPt = c.s[i].Pt c.s[i].Dir = dir glog.Info(c.s[i].GetName(), c.s[i].Pt) } } for _, s := range c.s { s.Color() s.C = c } return c }
func (r *SrsServer) Serve() error { // too many open files will thows a panic. addr, err := net.ResolveTCPAddr("tcp", r.addr) if err != nil { glog.Errorf("resolve listen address failed, err=%v", err) return fmt.Errorf("resolve listen address failed, err=%v", err) } var listener *net.TCPListener listener, err = net.ListenTCP("tcp", addr) if err != nil { glog.Errorf("listen failed, err=%v", err) return fmt.Errorf("listen failed, err=%v", err) } defer listener.Close() for { glog.Info("listener ready to accept client") conn, err := listener.AcceptTCP() if err != nil { glog.Errorf("accept client failed, err=%v", err) return fmt.Errorf("accept client failed, err=%v", err) } glog.Info("TCP Connected") go r.serve(conn) } }
func (es *e2eService) stop() { glog.Info("Stopping e2e services...") es.getLogFiles() // TODO(random-liu): Use a loop to stop all services after introducing service interface. // Stop namespace controller if es.nsController != nil { if err := es.nsController.Stop(); err != nil { glog.Errorf("Failed to stop %q: %v", es.nsController.Name(), err) } } // Stop apiserver if es.apiServer != nil { if err := es.apiServer.Stop(); err != nil { glog.Errorf("Failed to stop %q: %v", es.apiServer.Name(), err) } } for _, s := range es.services { if err := s.kill(); err != nil { glog.Errorf("Failed to stop %v: %v", s.name, err) } } // Stop etcd if es.etcdServer != nil { if err := es.etcdServer.Stop(); err != nil { glog.Errorf("Failed to stop %q: %v", es.etcdServer.Name(), err) } } for _, d := range es.rmDirs { err := os.RemoveAll(d) if err != nil { glog.Errorf("Failed to delete directory %s.\n%v", d, err) } } glog.Info("E2E services stopped.") }
func (self *ProtoProc) procSendMessageTopic(cmd protocol.Cmd, session *libnet.Session) error { glog.Info("procSendMessageTopic") var err error topicName := cmd.GetArgs()[0] send2Msg := cmd.GetArgs()[1] glog.Info(send2Msg) glog.Info(topicName) if self.msgServer.topics[topicName] == nil { glog.Warning(topicName + " is not exist") } else { resp := protocol.NewCmdSimple(protocol.RESP_MESSAGE_TOPIC_CMD) resp.AddArg(topicName) resp.AddArg(send2Msg) resp.AddArg(session.State.(*base.SessionState).ClientID) _, err = self.msgServer.topics[topicName].Channel.Broadcast(libnet.Json(resp)) if err != nil { glog.Error(err.Error()) return err } } return err }
func Startcollect(port int, device string, timesignal <-chan time.Time) { handle, err = pcap.OpenLive(device, snapshotLen, promiscuous, timeout) if err != nil { glog.Info(err.Error()) } defer handle.Close() packetSource := gopacket.NewPacketSource(handle, handle.LinkType()) templocalip, err := checkLocalip(device) localip = templocalip if glog.V(0) { glog.Info(localip) } httpinstancelist = list.New() if err != nil { glog.Info(err.Error()) } A: for packet := range packetSource.Packets() { select { case <-timesignal: break A default: processPacketInfo(packet) } } }
func (storage *Storage) ReadMessage(file *os.File) *Message { //校验消息起始位置的magic var magic int32 err := binary.Read(file, binary.BigEndian, &magic) if err != nil { log.Info("read file err:", err) return nil } if magic != MAGIC { log.Warning("magic err:", magic) return nil } msg := ReceiveMessage(file) if msg == nil { return msg } err = binary.Read(file, binary.BigEndian, &magic) if err != nil { log.Info("read file err:", err) return nil } if magic != MAGIC { log.Warning("magic err:", magic) return nil } return msg }
func (storage *Storage) LoadOfflineMessage(appid int64, uid int64) []*EMessage { log.Infof("load offline message appid:%d uid:%d\n", appid, uid) c := make([]*EMessage, 0, 10) start := fmt.Sprintf("%d_%d_1", appid, uid) end := fmt.Sprintf("%d_%d_9223372036854775807", appid, uid) r := &util.Range{Start: []byte(start), Limit: []byte(end)} iter := storage.db.NewIterator(r, nil) for iter.Next() { value := iter.Value() msgid, err := strconv.ParseInt(string(value), 10, 64) if err != nil { log.Error("parseint err:", err) continue } log.Info("offline msgid:", msgid) msg := storage.LoadMessage(msgid) if msg == nil { log.Error("can't load offline message:", msgid) continue } c = append(c, &EMessage{msgid: msgid, msg: msg}) } iter.Release() err := iter.Error() if err != nil { log.Warning("iterator err:", err) } log.Info("offline count:", len(c)) return c }
func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c *client.Client) error { var notready []string for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) { pods, err := c.Pods(f.Namespace.Name).List(api.ListOptions{}) if err != nil { return fmt.Errorf("failed to get pods: %v", err) } notready = make([]string, 0) for _, pod := range pods.Items { ready := false for _, c := range pod.Status.Conditions { if c.Type == api.PodReady && c.Status == api.ConditionTrue { ready = true } } if !ready { notready = append(notready, pod.Name) } } if len(notready) == 0 { glog.Infof("All pods ready") return nil } glog.Infof("Some pods are not ready yet: %v", notready) } glog.Info("Timeout on waiting for pods being ready") glog.Info(framework.RunKubectlOrDie("get", "pods", "-o", "json", "--all-namespaces")) glog.Info(framework.RunKubectlOrDie("get", "nodes", "-o", "json")) // Some pods are still not running. return fmt.Errorf("Some pods are still not running: %v", notready) }
func main() { flag.Parse() ok := true glog.Info("Processing code generator request") res, err := convertFrom(os.Stdin) if err != nil { ok = false if res == nil { message := fmt.Sprintf("Failed to read input: %v", err) res = &plugin.CodeGeneratorResponse{ Error: &message, } } } glog.Info("Serializing code generator response") data, err := proto.Marshal(res) if err != nil { glog.Fatal("Cannot marshal response", err) } _, err = os.Stdout.Write(data) if err != nil { glog.Fatal("Failed to write response", err) } if ok { glog.Info("Succeeded to process code generator request") } else { glog.Info("Failed to process code generator but successfully sent the error to protoc") os.Exit(1) } }
func ReadMessage(b []byte) *Message { input, err := simplejson.NewJson(b) if err != nil { log.Info("json decode fail") return nil } cmd, err := input.Get("cmd").Int() if err != nil { log.Info("json decode cmd fail") return nil } seq, err := input.Get("seq").Int() if err != nil { log.Info("json decode seq fail") return nil } msg := new(Message) msg.cmd = cmd msg.seq = seq if msg.FromJson(input) { return msg } return nil }
func main() { flag.Parse() if err := embd.InitGPIO(); err != nil { panic(err) } defer embd.CloseGPIO() pin, err := embd.NewDigitalPin(7) if err != nil { panic(err) } defer pin.Close() fluidSensor := watersensor.New(pin) for { wet, err := fluidSensor.IsWet() if err != nil { panic(err) } if wet { glog.Info("bot is dry") } else { glog.Info("bot is Wet") } time.Sleep(500 * time.Millisecond) } }
func statePreparing(ctx *VmContext, ev VmEvent) { switch ev.Event() { case EVENT_VM_EXIT, ERROR_INTERRUPTED: glog.Info("VM exited before start...") case COMMAND_SHUTDOWN, COMMAND_RELEASE: glog.Info("got shutdown or release command, not started yet") ctx.reportVmShutdown() ctx.Become(nil, StateNone) case COMMAND_EXEC: ctx.execCmd(ev.(*ExecCommand)) case COMMAND_WINDOWSIZE: cmd := ev.(*WindowSizeCommand) ctx.setWindowSize(cmd.ClientTag, cmd.Size) case COMMAND_RUN_POD, COMMAND_REPLACE_POD: glog.Info("got spec, prepare devices") if ok := ctx.lazyPrepareDevice(ev.(*RunPodCommand)); ok { ctx.startSocks() ctx.DCtx.(LazyDriverContext).LazyLaunch(ctx) ctx.setTimeout(60) ctx.Become(stateStarting, StateStarting) } else { glog.Warning("Fail to prepare devices, quit") ctx.Become(nil, StateNone) } case GENERIC_OPERATION: ctx.handleGenericOperation(ev.(*GenericOperation)) default: unexpectedEventHandler(ctx, ev, "pod initiating") } }
// reconcileCMADAnnotationWithExistingNode reconciles the controller-managed // attach-detach annotation on a new node and the existing node, returning // whether the existing node must be updated. func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v1.Node) bool { var ( existingCMAAnnotation = existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation] newCMAAnnotation, newSet = node.Annotations[volumehelper.ControllerManagedAttachAnnotation] ) if newCMAAnnotation == existingCMAAnnotation { return false } // If the just-constructed node and the existing node do // not have the same value, update the existing node with // the correct value of the annotation. if !newSet { glog.Info("Controller attach-detach setting changed to false; updating existing Node") delete(existingNode.Annotations, volumehelper.ControllerManagedAttachAnnotation) } else { glog.Info("Controller attach-detach setting changed to true; updating existing Node") if existingNode.Annotations == nil { existingNode.Annotations = make(map[string]string) } existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation] = newCMAAnnotation } return true }
func (sq *SubmitQueue) doGenCommitters(config *github_util.Config) error { pushUsers, pullUsers, err := config.UsersWithAccess() if err != nil { glog.Fatalf("Unable to read committers from github: %v", err) } pushSet := sets.NewString() for _, user := range pushUsers { pushSet.Insert(*user.Login) } pullSet := sets.NewString() for _, user := range pullUsers { pullSet.Insert(*user.Login) } if err = writeWhitelist(sq.Committers, "# auto-generated by "+os.Args[0]+" gen-committers; manual additions should go in the whitelist", pushSet); err != nil { glog.Fatalf("Unable to write committers: %v", err) } glog.Info("Successfully updated committers file.") existingWhitelist, err := loadWhitelist(sq.Whitelist) if err != nil { glog.Fatalf("error loading whitelist; it will not be updated: %v", err) } neededInWhitelist := existingWhitelist.Union(pullSet) neededInWhitelist = neededInWhitelist.Difference(pushSet) if err = writeWhitelist(sq.Whitelist, "# auto-generated by "+os.Args[0]+" gen-committers; manual additions may be added by hand", neededInWhitelist); err != nil { glog.Fatalf("Unable to write additional user whitelist: %v", err) } glog.Info("Successfully update whitelist file.") return nil }
func (self *ProtoProc) procClientID(cmd protocol.Cmd, session *libnet.Session) error { glog.Info("procClientID") var err error ID := cmd.GetArgs()[0] sessionStoreData := storage.NewSessionStoreData(cmd.GetArgs()[0], session.Conn().RemoteAddr().String(), self.msgServer.cfg.LocalIP, strconv.FormatUint(session.Id(), 10)) glog.Info(sessionStoreData) args := make([]string, 0) args = append(args, cmd.GetArgs()[0]) CCmd := protocol.NewCmdInternal(protocol.STORE_SESSION_CMD, args, sessionStoreData) glog.Info(CCmd) if self.msgServer.channels[protocol.SYSCTRL_CLIENT_STATUS] != nil { _, err = self.msgServer.channels[protocol.SYSCTRL_CLIENT_STATUS].Channel.Broadcast(libnet.Json(CCmd)) if err != nil { glog.Error(err.Error()) return err } } self.msgServer.sessions[cmd.GetArgs()[0]] = session self.msgServer.sessions[cmd.GetArgs()[0]].State = base.NewSessionState(true, cmd.GetArgs()[0]) err = self.procOfflineMsg(session, ID) if err != nil { glog.Error(err.Error()) return err } return nil }
func (r *Rest) Return(str string) { glog.Info("Response received: ", str) restreq := <-outstanding io.WriteString(restreq.ReplyTo, str) glog.Info("Response sent") }
// Run starts listening for RPC and HTTP requests, // and blocks until it the process gets a signal. // It may also listen on a secure port, or on a unix socket. func Run() { onRunHooks.Fire() ServeRPC() l, err := proc.Listen(fmt.Sprintf("%v", *Port)) if err != nil { log.Fatal(err) } host, err := netutil.FullyQualifiedHostname() if err != nil { host, err = os.Hostname() if err != nil { log.Fatalf("os.Hostname() failed: %v", err) } } ListeningURL = url.URL{ Scheme: "http", Host: fmt.Sprintf("%v:%v", host, *Port), Path: "/", } go http.Serve(l, nil) serveSecurePort() serveSocketFile() proc.Wait() l.Close() log.Info("Entering lameduck mode") go onTermHooks.Fire() time.Sleep(*LameduckPeriod) log.Info("Shutting down") Close() }
func (cs *CustomerService) RunOnce() bool { c, err := redis.Dial("tcp", config.redis_address) if err != nil { log.Info("dial redis error:", err) return false } psc := redis.PubSubConn{c} psc.Subscribe("application_update") cs.Clear() for { switch v := psc.Receive().(type) { case redis.Message: if v.Channel == "application_update" { cs.HandleUpdate(string(v.Data)) } else { log.Infof("%s: message: %s\n", v.Channel, v.Data) } case redis.Subscription: log.Infof("%s: %s %d\n", v.Channel, v.Kind, v.Count) case error: log.Info("error:", v) return true } } }
func waitInitReady(ctx *VmContext) { conn, err := utils.UnixSocketConnect(ctx.HyperSockName) if err != nil { glog.Error("Cannot connect to hyper socket ", err.Error()) ctx.Hub <- &InitFailedEvent{ Reason: "Cannot connect to hyper socket " + err.Error(), } return } glog.Info("Wating for init messages...") msg, err := ReadVmMessage(conn.(*net.UnixConn)) if err != nil { glog.Error("read init message failed... ", err.Error()) ctx.Hub <- &InitFailedEvent{ Reason: "read init message failed... " + err.Error(), } conn.Close() } else if msg.Code == INIT_READY { glog.Info("Get init ready message") ctx.Hub <- &InitConnectedEvent{conn: conn.(*net.UnixConn)} go waitCmdToInit(ctx, conn.(*net.UnixConn)) } else { glog.Warningf("Get init message %d", msg.Code) ctx.Hub <- &InitFailedEvent{ Reason: fmt.Sprintf("Get init message %d", msg.Code), } conn.Close() } }
func doGenCommitters(client *github_api.Client) { c, err := github.UsersWithCommit(client, org, project) if err != nil { glog.Fatalf("Unable to read committers from github: %v", err) } if err = writeWhitelist(*committers, "# auto-generated by "+os.Args[0]+" -gen-committers; manual additions should go in the whitelist", c); err != nil { glog.Fatalf("Unable to write committers: %v", err) } glog.Info("Successfully updated committers file.") users, err := loadWhitelist(*userWhitelist) if err != nil { glog.Fatalf("error loading whitelist; it will not be updated: %v", err) } existing := util.NewStringSet(c...) newUsers := []string{} for _, u := range users { if existing.Has(u) { glog.Infof("%v is a dup, or already a committer. Will remove from whitelist.", u) continue } existing.Insert(u) newUsers = append(newUsers, u) } if err = writeWhitelist(*userWhitelist, "# remove dups with "+os.Args[0]+" -gen-committers", newUsers); err != nil { glog.Fatalf("Unable to write de-duped whitelist: %v", err) } glog.Info("Successfully de-duped whitelist.") os.Exit(0) }
func (es *e2eService) start() error { glog.Info("Starting e2e services...") err := es.startEtcd() if err != nil { return err } err = es.startApiServer() if err != nil { return err } s, err := es.startKubeletServer() if err != nil { return err } es.services = append(es.services, s) err = es.startNamespaceController() if err != nil { return nil } glog.Info("E2E services started.") return nil }
func SendSystemMessage(w http.ResponseWriter, req *http.Request) { body, err := ioutil.ReadAll(req.Body) if err != nil { WriteHttpError(400, err.Error(), w) return } m, _ := url.ParseQuery(req.URL.RawQuery) appid, err := strconv.ParseInt(m.Get("appid"), 10, 64) if err != nil { log.Info("error:", err) WriteHttpError(400, "invalid query param", w) return } uid, err := strconv.ParseInt(m.Get("uid"), 10, 64) if err != nil { log.Info("error:", err) WriteHttpError(400, "invalid query param", w) return } sys := &SystemMessage{string(body)} msg := &Message{cmd: MSG_SYSTEM, body: sys} _, err = SaveMessage(appid, uid, 0, msg) if err != nil { WriteHttpError(500, "internal server error", w) } else { w.WriteHeader(200) } }
// Push a private msg func private_msg(w http.ResponseWriter, r *http.Request, u *user) { glog.Info("Add a private msg.") msg := newMsg() r.ParseForm() msg.To_id = r.FormValue("to_id") glog.Infof("To_id is :%v", msg.To_id) if s := r.FormValue("expired"); s != "" { msg.Expired, _ = strconv.ParseInt(s, 10, 64) } var err error if msg.Body, err = ioutil.ReadAll(r.Body); err != nil { glog.Errorf("push private msg error%v\n", err) return } if store.Manager.IsUserExist(msg.To_id, u.ID) { msg.Owner = u.ID // msg.Msg_id = get_uuid() msg.Topic = Private + msg.Owner if err = write_msg(msg); err != nil { glog.Error(err) badReaquest(w, `{"status":"fail"}`) return } io.WriteString(w, `{"status":"success"}`) u.isOK = true } else { glog.Info("push private msg error: user not exist.") badReaquest(w, `{"status":"fail"}`) } }
// state machine func commonStateHandler(ctx *VmContext, ev VmEvent, hasPod bool) bool { processed := true switch ev.Event() { case EVENT_VM_EXIT: glog.Info("Got VM shutdown event, go to cleaning up") ctx.unsetTimeout() if closed := ctx.onVmExit(hasPod); !closed { ctx.Become(stateDestroying, StateDestroying) } case ERROR_INTERRUPTED: interruptEv := ev.(*Interrupted) glog.Info("Connection interrupted: %s, quit...", interruptEv.Reason) ctx.exitVM(true, fmt.Sprintf("connection to VM broken: %s", interruptEv.Reason), false, false) if hasPod { ctx.reclaimDevice() } case COMMAND_SHUTDOWN: glog.Info("got shutdown command, shutting down") ctx.exitVM(false, "", hasPod, ev.(*ShutdownCommand).Wait) case GENERIC_OPERATION: ctx.handleGenericOperation(ev.(*GenericOperation)) default: processed = false } return processed }
func (client *Client) Read() { for { client.conn.SetDeadline(time.Now().Add(CLIENT_TIMEOUT * time.Second)) msg := ReceiveMessage(client.conn) if msg == nil { client.wt <- nil client.RemoveClient() break } log.Info("msg:", msg.cmd) if msg.cmd == MSG_AUTH { client.HandleAuth(msg.body.(*Authentication)) } else if msg.cmd == MSG_AUTH_TOKEN { client.HandleAuthToken(msg.body.(*AuthenticationToken)) } else if msg.cmd == MSG_HEARTBEAT { } else if msg.cmd == MSG_PING { client.HandlePing() } else if msg.cmd == MSG_VOIP_CONTROL { client.HandleVOIPControl(msg.body.(*VOIPControl)) } else { log.Info("unknown msg:", msg.cmd) } } }
func Info(ctx context.Context, args ...interface{}) { if ctx == nil || !hasTraceKey(ctx) { glog.Info(args) return } glog.Info(prependParam(args, ctx)...) }
func BenchmarkRegressionSplitter(b *testing.B) { flag.Parse() forestConfig := &pb.ForestConfig{ NumWeakLearners: proto.Int64(int64(*numTrees)), SplittingConstraints: &pb.SplittingConstraints{ MaximumLevels: proto.Int64(int64(*numLevels)), }, LossFunctionConfig: &pb.LossFunctionConfig{ LossFunction: pb.LossFunction_LOGIT.Enum(), }, Algorithm: pb.Algorithm_BOOSTING.Enum(), } glog.Info(forestConfig.String()) generator, err := NewForestGenerator(forestConfig) if err != nil { glog.Fatal(err) } examples := constructBenchmarkExamples(b.N, *numFeatures, 0) glog.Infof("Starting with %v examples", len(examples)) b.ResetTimer() forest := generator.ConstructForest(examples) res, err := json.MarshalIndent(forest, "", " ") if err != nil { glog.Fatalf("Error: %v", err) } glog.Info(res) }
// Run manages all the channels. func (h *hub) Run(queue string) { for { select { case c := <-h.register: glog.Info(fmt.Sprintf("new connection from %s", c.uname)) h.connections[c.uname] = append(h.connections[c.uname], c) case c := <-h.unregister: conns := h.connections[c.uname] newConns := []*connection{} for _, cn := range conns { if cn.conn == c.conn { c.close() glog.Info(fmt.Sprintf("close 1 connection from %s", cn.uname)) } else { newConns = append(newConns, cn) } } h.connections[c.uname] = newConns case m := <-h.outgoing: if err := rdsPool.Enqueue(queue, m); err != nil { glog.Error(err) } } } }