func Do_push(raw_data []byte) error { msg := &XPush.CommonMsg{} err := proto.Unmarshal(raw_data, msg) if err != nil { log.Warnf("Do_push | proto Unmarshal failed, err:%s", err.Error()) return err } ios_msg := msg.GetIosMsg() appid := ios_msg.GetAppid() log.Infof("Do_push | appid:%s", appid) // 打包待发送消息 m, err := pkg_apns_msg(msg) if err != nil { log.Warnf("Do_push | pkg_apns_msg err:%s", err.Error()) return err } log.Infof("Do_push | msg:%s", m) // 推送消息到apns // 应用状态,生成环境为1,开发环境为2 env := ios_msg.GetEnvironment() push_to_apns(appid, env, m) return nil }
func newRedisStorage(server string, pass string, maxActive, maxIdle, idleTimeout, retry, cto, rto, wto int) *RedisStorage { return &RedisStorage{ pool: &redis.Pool{ MaxActive: maxActive, MaxIdle: maxIdle, IdleTimeout: time.Duration(idleTimeout) * time.Second, Dial: func() (redis.Conn, error) { //c, err := redis.Dial("tcp", server) c, err := redis.DialTimeout( "tcp", server, time.Duration(cto)*time.Second, time.Duration(rto)*time.Second, time.Duration(wto)*time.Second) if err != nil { log.Warnf("failed to connect Redis, (%s)", err) return nil, err } if pass != "" { if _, err := c.Do("AUTH", pass); err != nil { log.Warnf("failed to auth Redis, (%s)", err) return nil, err } } //log.Debugf("connected with Redis (%s)", server) return c, err }, TestOnBorrow: func(c redis.Conn, t time.Time) error { _, err := c.Do("PING") return err }, }, retry: retry, } }
func (c *memcacheCacher) doFetch(sessId string) (u *User, cacheHit bool, err error) { it, err := mc.Get(sessId) if err != nil && err != memcache.ErrCacheMiss { // actual error log.Warnf("[Auth] Token cache fetch error for '%s': %v", sessId, err) return nil, false, err } if err == memcache.ErrCacheMiss { // not found - not an error though log.Trace("[Auth] Token cache - miss") return nil, false, nil } if bytes.Equal(it.Value, []byte(invalidPlaceholder)) { // cached invalid log.Tracef("[Auth] Token cache - invalid placeholder in cache for %s", sessId) return nil, true, nil } u, err = FromSessionToken(sessId, string(it.Value)) if err != nil { // found, but we can't decode - treat as not found log.Warnf("[Auth] Token cache decode error: %v", err) return nil, false, nil } return u, true, nil }
func (c *ec2MetadataClientImpl) ReadResource(path string) ([]byte, error) { endpoint := c.ResourceServiceUrl(path) var err error var resp *http.Response utils.RetryNWithBackoff(utils.NewSimpleBackoff(metadataRetryStartDelay, metadataRetryMaxDelay, metadataRetryDelayMultiple, 0.2), metadataRetries, func() error { resp, err = c.client.Get(endpoint) if err == nil && resp.StatusCode == 200 { return nil } if resp != nil && resp.Body != nil { resp.Body.Close() } if err == nil { seelog.Warnf("Error accessing the EC2 Metadata Service; non-200 response: %v", resp.StatusCode) return fmt.Errorf("Error contacting EC2 Metadata service; non-200 response: %v", resp.StatusCode) } else { seelog.Warnf("Error accessing the EC2 Metadata Service; retrying: %v", err) return err } }) if resp != nil && resp.Body != nil { defer resp.Body.Close() } if err != nil { return nil, err } return ioutil.ReadAll(resp.Body) }
// checkMissingAndDeprecated checks all zero-valued fields for tags of the form // missing:STRING and acts based on that string. Current options are: fatal, // warn. Fatal will result in an error being returned, warn will result in a // warning that the field is missing being logged. func (cfg *Config) checkMissingAndDepreciated() error { cfgElem := reflect.ValueOf(cfg).Elem() cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type() fatalFields := []string{} for i := 0; i < cfgElem.NumField(); i++ { cfgField := cfgElem.Field(i) if utils.ZeroOrNil(cfgField.Interface()) { missingTag := cfgStructField.Field(i).Tag.Get("missing") if len(missingTag) == 0 { continue } switch missingTag { case "warn": seelog.Warnf("Configuration key not set, key: %v", cfgStructField.Field(i).Name) case "fatal": seelog.Criticalf("Configuration key not set, key: %v", cfgStructField.Field(i).Name) fatalFields = append(fatalFields, cfgStructField.Field(i).Name) default: seelog.Warnf("Unexpected `missing` tag value, tag %v", missingTag) } } else { // present deprecatedTag := cfgStructField.Field(i).Tag.Get("deprecated") if len(deprecatedTag) == 0 { continue } seelog.Warnf("Use of deprecated configuration key, key: %v message: %v", cfgStructField.Field(i).Name, deprecatedTag) } } if len(fatalFields) > 0 { return errors.New("Missing required fields: " + strings.Join(fatalFields, ", ")) } return nil }
func (this *LetvAuth) Auth(token string) (bool, string) { url := fmt.Sprintf("%s/%s", this.url, token) //log.Infof("letv auth: url(%s)", url) res, err := http.Get(url) if err != nil { log.Warnf("http get failed: %s", err) return false, "" } body, err := ioutil.ReadAll(res.Body) if err != nil { log.Warnf("ioutil readall failed: %s", err) res.Body.Close() return false, "" } res.Body.Close() //log.Infof("sso response (%s)", body) var tr tokenResult err = json.Unmarshal(body, &tr) if err != nil { log.Warnf("json unmarshal failed: %s (%s)", err, body) return false, "" } if tr.Status != "1" || tr.ErrCode != "0" { log.Infof("sso result failed: (%s) (%s)", tr.Status, tr.ErrCode) return false, "" } m := tr.Bean.(map[string]interface{}) result, ok := m["result"] if !ok { log.Infof("missing 'bean.result'") return false, "" } uid := result.(string) return true, "letv_" + uid }
// getContainerMetricsForTask gets all container metrics for a task arn. func (engine *DockerStatsEngine) getContainerMetricsForTask(taskArn string) ([]*ecstcs.ContainerMetric, error) { engine.containersLock.Lock() defer engine.containersLock.Unlock() containerMap, taskExists := engine.tasksToContainers[taskArn] if !taskExists { return nil, fmt.Errorf("Task not found") } var containerMetrics []*ecstcs.ContainerMetric for _, container := range containerMap { // Get CPU stats set. cpuStatsSet, err := container.statsQueue.GetCPUStatsSet() if err != nil { seelog.Warnf("Error getting cpu stats, err: %v, container: %v", err, container.containerMetadata) continue } // Get memory stats set. memoryStatsSet, err := container.statsQueue.GetMemoryStatsSet() if err != nil { seelog.Warnf("Error getting memory stats, err: %v, container: %v", err, container.containerMetadata) continue } containerMetrics = append(containerMetrics, &ecstcs.ContainerMetric{ CpuStatsSet: cpuStatsSet, MemoryStatsSet: memoryStatsSet, }) } return containerMetrics, nil }
func (container *StatsContainer) collect() { dockerID := container.containerMetadata.DockerID for { select { case <-container.ctx.Done(): seelog.Debugf("Stopping stats collection for container %s", dockerID) return default: seelog.Debugf("Collecting stats for container %s", dockerID) dockerStats, err := container.client.Stats(dockerID, container.ctx) if err != nil { seelog.Warnf("Error retrieving stats for container %s: %v", dockerID, err) continue } for rawStat := range dockerStats { stat, err := dockerStatsToContainerStats(rawStat) if err == nil { container.statsQueue.Add(stat) } else { seelog.Warnf("Error converting stats for container %s: %v", dockerID, err) } } seelog.Debugf("Disconnected from docker stats for container %s", dockerID) } } }
func controlDevice(w rest.ResponseWriter, r *rest.Request) { type ControlParam struct { Token string `json:"token"` Service string `json:"service"` Cmd string `json:"cmd"` } devId := r.PathParam("devid") body := r.Env["body"] if body == nil { rest.Error(w, "Empty body", http.StatusBadRequest) return } b := body.([]byte) param := ControlParam{} if err := json.Unmarshal(b, ¶m); err != nil { log.Warnf("Error decode body: %s", err.Error()) rest.Error(w, err.Error(), http.StatusBadRequest) return } if !checkAuthz(param.Token, devId) { log.Warnf("Auth failed. token: %s, device_id: %s", param.Token, devId) rest.Error(w, "Authorization failed", http.StatusForbidden) return } stats.Cmd(param.Service) resp := cloud.ApiResponse{} result, err := rpcClient.Control(devId, param.Service, param.Cmd) if err != nil { if _, ok := err.(*mq.NoDeviceError); ok { stats.CmdOffline(param.Service) rest.NotFound(w, r) return } else if _, ok := err.(*mq.TimeoutError); ok { stats.CmdTimeout(param.Service) resp.ErrNo = cloud.ERR_CMD_TIMEOUT resp.ErrMsg = fmt.Sprintf("recv response timeout [%s]", devId) } else if _, ok := err.(*mq.InvalidServiceError); ok { stats.CmdInvalidService(param.Service) resp.ErrNo = cloud.ERR_CMD_INVALID_SERVICE resp.ErrMsg = fmt.Sprintf("Device [%s] has no service [%s]", devId, param.Service) } else if _, ok := err.(*mq.SdkError); ok { stats.CmdOtherError(param.Service) resp.ErrNo = cloud.ERR_CMD_SDK_ERROR resp.ErrMsg = fmt.Sprintf("Error when calling service [%s] on [%s]", param.Service, devId) } else { stats.CmdOtherError(param.Service) resp.ErrNo = cloud.ERR_CMD_OTHER resp.ErrMsg = err.Error() } } else { stats.CmdSuccess(param.Service) resp.ErrNo = cloud.ERR_NOERROR resp.Data = result } w.WriteJson(resp) }
/* ** return: ** 1: invalid JSON ** 2: missing 'appid' or 'regid' ** 3: unknown 'regid' ** 4: storage I/O failed */ func handleUnsubscribe(conn *net.TCPConn, client *Client, header *Header, body []byte) int { log.Debugf("%s: RECV Unsubscribe (%s)", client.devId, body) var request UnsubscribeMessage var reply UnsubscribeReplyMessage onReply := func(result int, appId string) { reply.Result = result reply.AppId = appId sendReply(client, MSG_UNSUBSCRIBE_REPLY, header.Seq, &reply) } if err := json.Unmarshal(body, &request); err != nil { log.Warnf("%s: json decode failed: (%v)", client.devId, err) onReply(1, request.AppId) return 0 } if request.AppId == "" || request.RegId == "" { log.Warnf("%s: appid or regid is empty", client.devId) onReply(2, request.AppId) return 0 } // unknown AppId var ok bool regapp, ok := client.RegApps[request.AppId] if !ok { log.Warnf("%s: unkonw AppId %s", client.devId, request.AppId) onReply(3, request.AppId) return 0 } // unknown regid if regapp.RegId != request.RegId { log.Warnf("%s: unkonw regid %s", client.devId, request.RegId) onReply(10, request.AppId) return 0 } index := -1 for n, item := range regapp.Topics { if item == request.Topic { index = n } } if index >= 0 { topics := append(regapp.Topics[:index], regapp.Topics[index+1:]...) info := regapp.AppInfo info.Topics = topics if ok := AMInstance.UpdateAppInfo(client.devId, request.RegId, &info); !ok { onReply(4, request.AppId) return 0 } regapp.Topics = topics } reply.Result = 0 reply.AppId = request.AppId reply.RegId = request.RegId sendReply(client, MSG_UNSUBSCRIBE_REPLY, header.Seq, &reply) return 0 }
func (t *rabbitTransport) Send(req message.Request, _timeout time.Duration) (message.Response, error) { id := req.Id() if id == "" { _uuid, err := uuid.NewV4() if err != nil { log.Errorf("[Typhon:RabbitTransport] Failed to generate request uuid: %v", err) return nil, err } req.SetId(_uuid.String()) } rspQueue := req.Id() defer func() { t.inflightReqsM.Lock() delete(t.inflightReqs, rspQueue) t.inflightReqsM.Unlock() }() rspChan := make(chan message.Response, 1) t.inflightReqsM.Lock() t.inflightReqs[rspQueue] = rspChan t.inflightReqsM.Unlock() timeout := time.NewTimer(_timeout) defer timeout.Stop() headers := req.Headers() headers["Content-Encoding"] = "request" headers["Service"] = req.Service() headers["Endpoint"] = req.Endpoint() select { case <-t.Ready(): case <-timeout.C: log.Warnf("[Typhon:RabbitTransport] Timed out after %s waiting for ready", _timeout.String()) return nil, transport.ErrTimeout } if err := t.connection().Publish(Exchange, req.Service(), amqp.Publishing{ CorrelationId: req.Id(), Timestamp: time.Now().UTC(), Body: req.Payload(), ReplyTo: t.replyQueue, Headers: headersToTable(headers), }); err != nil { log.Errorf("[Typhon:RabbitTransport] Failed to publish: %v", err) return nil, err } select { case rsp := <-rspChan: return rsp, nil case <-timeout.C: log.Warnf("[Typhon:RabbitTransport] Timed out after %s waiting for response to %s", _timeout.String(), req.Id()) return nil, transport.ErrTimeout } }
// getContainerMetricsForTask gets all container metrics for a task arn. func (engine *DockerStatsEngine) getContainerMetricsForTask(taskArn string) ([]*ecstcs.ContainerMetric, error) { engine.containersLock.Lock() defer engine.containersLock.Unlock() containerMap, taskExists := engine.tasksToContainers[taskArn] if !taskExists { return nil, fmt.Errorf("Task not found") } var containerMetrics []*ecstcs.ContainerMetric for _, container := range containerMap { dockerID := container.containerMetadata.DockerID // Check if the container is terminal. If it is, make sure that it is // cleaned up properly. We might sometimes miss events from docker task // engine and this helps in reconciling the state. The tcs client's // GetInstanceMetrics probe is used as the trigger for this. terminal, err := container.terminal() if err != nil { // Error determining if the container is terminal. This means that the container // id could not be resolved to a container that is being tracked by the // docker task engine. If the docker task engine has already removed // the container from its state, there's no point in stats engine tracking the // container. So, clean-up anyway. seelog.Warnf("Error determining if the container %s is terminal, cleaning up and skipping", dockerID, err) engine.doRemoveContainer(container, taskArn) continue } else if terminal { // Container is in knonwn terminal state. Stop collection metrics. seelog.Infof("Container %s is terminal, cleaning up and skipping", dockerID) engine.doRemoveContainer(container, taskArn) continue } // Container is not terminal. Get CPU stats set. cpuStatsSet, err := container.statsQueue.GetCPUStatsSet() if err != nil { seelog.Warnf("Error getting cpu stats, err: %v, container: %v", err, dockerID) continue } // Get memory stats set. memoryStatsSet, err := container.statsQueue.GetMemoryStatsSet() if err != nil { seelog.Warnf("Error getting memory stats, err: %v, container: %v", err, dockerID) continue } containerMetrics = append(containerMetrics, &ecstcs.ContainerMetric{ CpuStatsSet: cpuStatsSet, MemoryStatsSet: memoryStatsSet, }) } return containerMetrics, nil }
// reloadSlas loads timeouts from discovery service for all services we know about (have tried to call) func (t *Timeout) reloadSlas() { replacement := make(map[string]map[string]time.Duration) for service := range t.endpoints { // load from discovery service log.Debugf("[Client] Loading SLAs from discovery service for %v...", service) req, err := NewRequest("com.hailocab.kernel.discovery", "endpoints", &eps.Request{ Service: proto.String(service), }) if err != nil { log.Warnf("[Client] Failed to create proto request to get endpoints for service: %s", service) continue } rsp := &eps.Response{} // explicitly define timeout since we're in no rush if err := t.client.Req(req, rsp, Options{"retries": 0, "timeout": time.Second * 5}); err != nil { log.Warnf("[Client] Trouble getting endpoint response back from discovery-service for service: %s", service) continue } for _, ep := range rsp.GetEndpoints() { endpoint := strings.TrimLeft(strings.TrimPrefix(ep.GetFqName(), service), ".") if _, ok := replacement[service]; !ok { replacement[service] = make(map[string]time.Duration) } replacement[service][endpoint] = msToDuration(ep.GetUpper95()) } } // double check we have all the things we started with -- if not, but back the "last known" (probably defaults) for service, serviceEndpoints := range t.endpoints { for endpoint, timeout := range serviceEndpoints { if _, ok := replacement[service]; !ok { replacement[service] = make(map[string]time.Duration) } if _, ok := replacement[service][endpoint]; !ok { log.Debugf("[Client] Failed to find SLA for %s.%s, falling back to %v", service, endpoint, timeout) replacement[service][endpoint] = timeout } } } // SLAs changed? if not, don't bother switching+logging if hashSlas(replacement) == t.hashEndpoints() { return } t.Lock() defer t.Unlock() t.endpoints = replacement log.Infof("[Client] Loaded new SLAs from discovery service: %v", t.endpoints) }
func zk_unreginster(path string, c *zk.Conn, exit chan os.Signal) { for sig := range exit { log.Warnf("zk_unreginster | received ctrl+c(%v)\n", sig) err := c.Delete(path, -1) log.Infof("zk_unreginster | path :%+v", path) if err != nil { log.Warnf("zk_unreginster | Delete returned: %+v", err) } os.Exit(0) } }
func main() { defer seelog.Flush() seelog.LoggerFromConfigAsString("formatid=\"debug\"") flag.Parse() cfg := FtpCfg{*host, *user, *pw, *port} fClient, err := NewFtpClient(cfg) if err != nil { panic(err) } iClient, err := NewInfluxClient(*surl, *db) if err != nil { panic(err) } files := make([]*FtpToInflux, 0) scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { line := scanner.Text() seelog.Tracef("Handle line '%s'", line) if strings.HasPrefix(line, commentPrefix) { //Comment continue } splittedLine := strings.Split(line, space) if len(splittedLine) != 2 { seelog.Warnf("Line '%s' has not exactly one space", line) continue } data := &FtpToInflux{splittedLine[0], strings.Split(splittedLine[1], sep)} files = append(files, data) } for _, f := range files { seelog.Tracef("Start with file '%s'!", f.Filename) buf, err := fClient.Download(f.Filename) if err != nil { seelog.Warnf("Error downloading file '%s': %v", f.Filename, err) continue } datas := Transform(buf) err = iClient.Write(datas, f.Measurements) if err != nil { seelog.Warnf("Error writing Data: %v", err) continue } seelog.Tracef("File '%s' downloaded and written to %d measurements!", f.Filename, len(f.Measurements)) } }
// Normalize all auth types into a uniform 'dockerAuths' type. // On error, any appropriate information will be logged and an empty dockerAuths will be returned func parseAuthData(authType string, authData json.RawMessage) dockerAuths { intermediateAuthData := make(dockerAuths) switch authType { case "docker": err := json.Unmarshal(authData, &intermediateAuthData) if err != nil { seelog.Warn("Could not parse 'docker' type auth config") return dockerAuths{} } case "dockercfg": var base64dAuthInfo dockercfgData err := json.Unmarshal(authData, &base64dAuthInfo) if err != nil { seelog.Warn("Could not parse 'dockercfg' type auth config") return dockerAuths{} } for registry, auth := range base64dAuthInfo { data, err := base64.StdEncoding.DecodeString(auth.Auth) if err != nil { seelog.Warnf("Malformed auth data for registry %v", registry) continue } usernamePass := strings.SplitN(string(data), ":", 2) if len(usernamePass) != 2 { seelog.Warnf("Malformed auth data for registry %v; must contain ':'", registry) continue } intermediateAuthData[registry] = docker.AuthConfiguration{ Username: usernamePass[0], Password: usernamePass[1], } } case "": // not set; no warn return dockerAuths{} default: seelog.Warnf("Unknown auth configuration: %v", authType) return dockerAuths{} } // Normalize intermediate registry keys into not having a schema output := make(dockerAuths) for key, val := range intermediateAuthData { output[stripRegistrySchema(key)] = val } return output }
// publishMetricsOnce is invoked by the ticker to periodically publish metrics to backend. func (cs *clientServer) publishMetricsOnce() { // Get the list of objects to send to backend. requests, err := cs.metricsToPublishMetricRequests() if err != nil { seelog.Warnf("Error getting instance metrics: %v", err) } // Make the publish metrics request to the backend. for _, request := range requests { err = cs.MakeRequest(request) if err != nil { seelog.Warnf("Error publishing metrics: %v. Request: %v", err, request) } } }
func (r *RedisStorage) HashGetAll(db string) ([]string, error) { ret, err := r.Do("HGETALL", db) if err != nil { log.Warnf("redis: HGET failed (%s)", err) return nil, err } if ret != nil { ret, err := redis.Strings(ret, nil) if err != nil { log.Warnf("redis: convert to strings failed (%s)", err) } return ret, err } return nil, nil }
/* ** return: ** 1: invalid JSON ** 2: missing 'appid' or 'regid' ** 3: unknown 'regid' */ func handleStats(conn *net.TCPConn, client *Client, header *Header, body []byte) int { log.Debugf("%s: RECV Stats (%s)", client.devId, body) var request StatsMessage var reply StatsReplyMessage onReply := func(result int, appId string) { reply.Result = result reply.AppId = appId sendReply(client, MSG_STATS_REPLY, header.Seq, &reply) } if err := json.Unmarshal(body, &request); err != nil { log.Warnf("%s: json decode failed: (%v)", client.devId, err) onReply(1, request.AppId) return 0 } if request.AppId == "" || request.RegId == "" { log.Warnf("%s: appid or regid is empty", client.devId) onReply(2, request.AppId) return 0 } // unknown AppId var ok bool regapp, ok := client.RegApps[request.AppId] if !ok { log.Warnf("%s: unkonw AppId %s", client.devId, request.AppId) onReply(4, request.AppId) return 0 } // unknown RegId if regapp.RegId != request.RegId { log.Warnf("%s: unkonw regid %s", client.devId, request.RegId) onReply(10, request.AppId) return 0 } if request.Click { storage.Instance.MsgStatsClick(request.MsgId) storage.Instance.AppStatsClick(request.AppId) } reply.Result = 0 reply.AppId = request.AppId reply.RegId = request.RegId sendReply(client, MSG_STATS_REPLY, header.Seq, &reply) return 0 }
// Stats returns a channel of *docker.Stats entries for the container. func (dg *dockerGoClient) Stats(id string, ctx context.Context) (<-chan *docker.Stats, error) { client, err := dg.dockerClient() if err != nil { return nil, err } stats := make(chan *docker.Stats) cancel := make(chan bool) options := docker.StatsOptions{ ID: id, Stats: stats, Stream: true, Done: cancel, InactivityTimeout: statsInactivityTimeout, } statsComplete := make(chan struct{}) go func() { statsErr := client.Stats(options) if statsErr != nil { seelog.Warnf("Error retrieving stats for container %s: %v", id, statsErr) } close(statsComplete) }() go func() { select { case <-ctx.Done(): cancel <- true case <-statsComplete: } }() return stats, nil }
func (r *RedisStorage) IncReplyTooLate() error { _, err := redis.Int(r.Do("INCR", "stats_reply_too_late")) if err != nil { log.Warnf("redis: INCR failed, (%s)", err) } return err }
func (r *RedisStorage) IncQueryDeviceInfo() error { _, err := redis.Int(r.Do("INCR", "stats_query_device_info")) if err != nil { log.Warnf("redis: INCR failed, (%s)", err) } return err }
func (r *RedisStorage) IncQueryOnlineDevices() error { _, err := redis.Int(r.Do("INCR", "stats_query_online_devices")) if err != nil { log.Warnf("redis: INCR failed, (%s)", err) } return err }
func (r *RedisStorage) InitDevices(serverName string) error { _, err := redis.Int(r.Do("DEL", "comet:"+serverName)) if err != nil { log.Warnf("redis: DEL failed, (%s)", err) } return err }
func (r *RedisStorage) initStatsService(service string) error { _, err := redis.Int(r.Do("SADD", "stats_service", service)) if err != nil { log.Warnf("redis: SADD failed, (%s)", err) } return err }
func (r *RedisStorage) RefreshDevices(serverName string, timeout int) error { _, err := redis.Int(r.Do("EXPIRE", "comet:"+serverName, timeout)) if err != nil { log.Warnf("redis: EXPIRE failed, (%s)", err) } return err }
func (this *AppManager) RegisterApp2(devId string, regId string, appId string, userId string) *RegApp { this.lock.RLock() if _, ok := this.regappMap[regId]; ok { log.Errorf("%s: regid %s in memory already", devId, regId) this.lock.RUnlock() return nil } this.lock.RUnlock() key := fmt.Sprintf("db_app_%s", appId) val, err := storage.Instance.HashGet(key, regId) if err != nil { return nil } if val == nil { return nil } var info AppInfo if err := json.Unmarshal(val, &info); err != nil { log.Warnf("invalid app info from storage") //TODO need replace it return nil } regapp := this.AddApp(devId, regId, &info) return regapp }
func (this *Agent) LoadRegIds() { fileName := "RegIds." + this.deviceId file, err := os.Open(fileName) if err != nil { log.Warnf("Open error: %s", err.Error()) return } buf := make([]byte, 1024) n, err := file.Read(buf) if err != nil { log.Errorf("Read file error: %s", err.Error()) file.Close() return } //log.Debugf("content in %s: %s", fileName, buf) err = json.Unmarshal(buf[:n], &this.RegIds) if err != nil { log.Errorf("Unarshal %s error: %s", fileName, err.Error()) file.Close() return } log.Debugf("RegIds: %s", this.RegIds) }
func (stormClient *StormClient) getOffsetsForPartition(consumerGroup string, partition int, partitionPath string) { zkNodeStat := &zk.Stat{} stateStr, zkNodeStat, err := stormClient.conn.Get(partitionPath) switch { case err == nil: offset, topic, errConversion := parseStormSpoutStateJson(string(stateStr)) switch { case errConversion == nil: log.Debugf("About to sync Storm offset: [%s,%s,%v]::[%v,%v]\n", consumerGroup, topic, partition, offset, zkNodeStat.Mtime) partitionOffset := &PartitionOffset{ Cluster: stormClient.cluster, Topic: topic, Partition: int32(partition), Group: consumerGroup, Timestamp: int64(zkNodeStat.Mtime), // note: this is millis Offset: int64(offset), } timeoutSendOffset(stormClient.app.Storage.offsetChannel, partitionOffset, 1) default: log.Errorf("Something is very wrong! Cannot parse state json for partition %v of consumer group %s in ZK path %s: %s. Error: %v", partition, consumerGroup, partitionPath, stateStr, errConversion) } default: log.Warnf("Failed to read data for partition %v of consumer group %s in ZK path %s. Error: %v", partition, consumerGroup, partitionPath, err) } }
func main() { logConfigFile := flag.String("l", "./conf/log.xml", "Log config file") configFile := flag.String("c", "./conf/conf.json", "Config file") flag.Parse() logger, err := log.LoggerFromConfigAsFile(*logConfigFile) if err != nil { fmt.Printf("Load log config failed: (%s)\n", err) os.Exit(1) } log.ReplaceLogger(logger) err = conf.LoadConfig(*configFile) if err != nil { log.Warnf("LoadConfig (%s) failed: (%s)\n", *configFile, err) os.Exit(1) } http.HandleFunc("/api/v1/notify", postNotify) err = http.ListenAndServe(conf.Config.Notify.Addr, nil) if err != nil { log.Warnf("failed to ListenAndServe: ", err) os.Exit(1) } }