func (zkClient *ZookeeperClient) getOffsetForPartition(consumerGroup string, topic string, partition string) { offsetStr, zkNodeStat, err := zkClient.conn.Get(zkClient.app.Config.Kafka[zkClient.cluster].ZookeeperPath + "/consumers/" + consumerGroup + "/offsets/" + topic + "/" + partition) if err != nil { log.Warnf("Failed to read partition %s:%v for group %s in cluster %s: %s", topic, partition, consumerGroup, zkClient.cluster, err) return } partitionNum, err := strconv.ParseInt(partition, 10, 32) if err != nil { log.Errorf("Partition (%s) for topic %s for group %s in cluster %s is not an integer", partition, topic, consumerGroup, zkClient.cluster) return } offset, err := strconv.ParseInt(string(offsetStr), 10, 64) if err != nil { log.Errorf("Offset value (%s) for partition %s:%v for group %s in cluster %s is not an integer", string(offsetStr), topic, partition, consumerGroup, zkClient.cluster) return } partitionOffset := &PartitionOffset{ Cluster: zkClient.cluster, Topic: topic, Partition: int32(partitionNum), Group: consumerGroup, Timestamp: zkNodeStat.Mtime, Offset: offset, } timeoutSendOffset(zkClient.app.Storage.offsetChannel, partitionOffset, 1) }
func (imageManager *dockerImageManager) deleteImage(imageID string, imageState *image.ImageState) { if imageID == "" { seelog.Errorf("Image ID to be deleted is null") return } seelog.Infof("Removing Image: %s", imageID) err := imageManager.client.RemoveImage(imageID, removeImageTimeout) if err != nil { if err.Error() == imageNotFoundForDeletionError { seelog.Errorf("Image already removed from the instance: %v", err) } else { seelog.Errorf("Error removing Image %v - %v", imageID, err) delete(imageManager.imageStatesConsideredForDeletion, imageState.Image.ImageID) return } } seelog.Infof("Image removed: %v", imageID) imageState.RemoveImageName(imageID) if len(imageState.Image.Names) == 0 { delete(imageManager.imageStatesConsideredForDeletion, imageState.Image.ImageID) imageManager.removeImageState(imageState) imageManager.state.RemoveImageState(imageState) imageManager.saver.Save() } }
/** * When allocation changed, modify the state of slot and update slotmaps. */ func HandleAllocationChange(oldAllocations, newAllocations *SlotAllocation, slotinfoMaps *SlotInfoMaps, zkHelper *utils.ZkHelper) { isChanged := false newSlotInfoMap := make(map[string]*SlotInfo) //create a new slotinfo maps for i := 0; i < oldAllocations.SlotCount; i++ { oldNodeId := oldAllocations.Allocations[strconv.Itoa(i)] newNodeId := newAllocations.Allocations[strconv.Itoa(i)] newSlotInfoMap[strconv.Itoa(i)] = slotinfoMaps.GetSlotInfoMap()[strconv.Itoa(i)].Clone() if oldNodeId != newNodeId { isChanged = true log.Infof("The slot %d's node changed to %d from %d.", i, newNodeId, oldNodeId) newSlotInfoMap[strconv.Itoa(i)].MigrateState = MigStateMigrating newSlotInfoMap[strconv.Itoa(i)].NodeId = strconv.Itoa(newNodeId) newSlotInfoMap[strconv.Itoa(i)].SrcNodeId = strconv.Itoa(oldNodeId) newSlotInfoMap[strconv.Itoa(i)].TargetNodeId = strconv.Itoa(newNodeId) //update the new slotinfo to zk jsonStr, err := utils.ToJson(newSlotInfoMap[strconv.Itoa(i)]) if err != nil { log.Errorf("Can not convert to json string from obj [%s]", newSlotInfoMap[strconv.Itoa(i)]) } else { _, err = zkHelper.CoverCreate("/yundis/ids/"+strconv.Itoa(i), []byte(jsonStr), 0, zk.WorldACL(zk.PermAll)) if err != nil { log.Errorf("Change the value of /yundis/ids/%d fail, err:%s.", i, err) } //zkHelper.Set("/yundis/ids/"+strconv.Itoa(i), []byte(jsonStr), 1) } } } if isChanged { log.Info("Update the slotinfoMaps.") //slotinfoMaps.SetSlotInfoMap(infoMap) slotinfoMaps.SetSlotInfoMap(newSlotInfoMap) } }
/** * CNAME -> A */ func dnss(host, dnsServer string) *net.IP { addrs, err := Lookup("CNAME", host, dnsServer) if err != nil { seelog.Errorf("dns cname fail with the host[%s]. error: [%s]", host, err.Error()) return nil } for { if len(addrs.Answer) == 0 { break } host = addrs.Answer[0].(*dns.CNAME).Target addrs, err = Lookup("CNAME", host, dnsServer) if err != nil { seelog.Errorf("dns cname fail with the host[%s]. error: [%s]", host, err.Error()) return nil } } addrs, err = Lookup("A", host, dnsServer) if err != nil { seelog.Errorf("dns a fail with the host[%s]. error: [%s]", host, err.Error()) return nil } for _, a := range addrs.Answer { if a.(*dns.A).A != nil { return &a.(*dns.A).A } } return nil }
func New(endpoint []string, path string) *opt { cfg := client.Config{ Endpoints: endpoint, Transport: client.DefaultTransport, HeaderTimeoutPerRequest: time.Second, } etcdClient, err := client.New(cfg) if err != nil { log.Errorf("new etcd client error: ", err) log.Flush() panic(0) } api := client.NewKeysAPI(etcdClient) resp, err := api.Get(context.Background(), "/swarm/docker/swarm/leader", nil) if err != nil { log.Errorf("get swarm leader error: %v", err) log.Flush() panic(0) } return &opt{ Client: etcdClient, Endpoint: endpoint, Path: path, Api: api, Leader: fmt.Sprintf("http://%s", resp.Node.Value), } }
func QueryTokenByUidClientID(uid int, clientID string) (token PGToken, err error) { err = db.Ping() if err != nil { seelog.Errorf("uid %v clientID %v ping mysql %v", uid, clientID, err) return } qrySql := "select oauth_token,client_id,expires,scope,uid from pg_token where uid=? and client_id=?" rs, err := db.Query(qrySql, uid, clientID) if err != nil { seelog.Errorf("uid %v clientID %v query sql %v", uid, clientID, err) return } if rs.Next() { if err = rs.Err(); err != nil { seelog.Errorf("uid %v clientID %v read rows %v", uid, clientID, err) return } rs.Scan(&token.OauthToken, &token.ClientId, &token.Expires, &token.Scope, &token.Uid) } else { err = pgpub.ErrNotExist } return }
// publishError publishes an event when a handler returns an error func publishError(req *Request, e errors.Error) { if !PublishErrors { return } stacktrace := "" if e.MultiStack() != nil { stacktrace = e.MultiStack().String() } msg := map[string]interface{}{ "created": time.Now(), "service": Name, "version": Version, "azName": az, "hostname": hostname, "instanceId": InstanceID, "error": e.Error(), "type": e.Type(), "code": e.Code(), "description": e.Description(), "httpCode": e.HttpCode(), "context": e.Context(), "traceId": req.TraceID(), "stacktrace": stacktrace, } payload, err := json.Marshal(msg) if err != nil { log.Errorf("[Server] Failed to JSON encode error event: %v", err) } if err = nsq.Publish(errorTopic, payload); err != nil { log.Errorf("[Server] Failed to publish error event: %v", err) } }
// handleSingleMessage processes a single refresh credentials message. func (refreshHandler *refreshCredentialsHandler) handleSingleMessage(message *ecsacs.IAMRoleCredentialsMessage) error { // Validate fields in the message err := validateIAMRoleCredentialsMessage(message) if err != nil { seelog.Errorf("Error validating credentials message: %v", err) return err } taskArn := aws.StringValue(message.TaskArn) messageId := aws.StringValue(message.MessageId) task, ok := refreshHandler.taskEngine.GetTaskByArn(taskArn) if !ok { seelog.Errorf("Task not found in the engine for the arn in credentials message, arn: %s, messageId: %s", taskArn, messageId) return fmt.Errorf("Task not found in the engine for the arn in credentials message, arn: %s", taskArn) } taskCredentials := credentials.TaskIAMRoleCredentials{ ARN: taskArn, IAMRoleCredentials: credentials.IAMRoleCredentialsFromACS(message.RoleCredentials), } err = refreshHandler.credentialsManager.SetTaskCredentials(taskCredentials) if err != nil { seelog.Errorf("Error updating credentials, err: %v messageId: %s", err, messageId) return fmt.Errorf("Error updating credentials %v", err) } task.SetCredentialsId(aws.StringValue(message.RoleCredentials.CredentialsId)) go func() { response := &ecsacs.IAMRoleCredentialsAckRequest{ Expiration: message.RoleCredentials.Expiration, MessageId: message.MessageId, CredentialsId: message.RoleCredentials.CredentialsId, } refreshHandler.ackRequest <- response }() return nil }
// ServeHttp serves IAM Role Credentials for Tasks being managed by the agent. func ServeHttp(credentialsManager credentials.Manager, containerInstanceArn string, cfg *config.Config) { // Create and initialize the audit log // TODO Use seelog's programmatic configuration instead of xml. logger, err := log.LoggerFromConfigAsString(audit.AuditLoggerConfig(cfg)) if err != nil { log.Errorf("Error initializing the audit log: %v", err) // If the logger cannot be initialized, use the provided dummy seelog.LoggerInterface, seelog.Disabled. logger = log.Disabled } auditLogger := audit.NewAuditLog(containerInstanceArn, cfg, logger) server := setupServer(credentialsManager, auditLogger) for { utils.RetryWithBackoff(utils.NewSimpleBackoff(time.Second, time.Minute, 0.2, 2), func() error { // TODO, make this cancellable and use the passed in context; err := server.ListenAndServe() if err != nil { log.Errorf("Error running http api: %v", err) } return err }) } }
func StartTcpServer(host, port string, service ConnectorService) { if service == nil { connService = &MqService{} } else { connService = service } connService.InitService() addr, err := net.ResolveTCPAddr("tcp", ":9000") if err != nil { logger.Errorf("Can not resolve tcp address for server. host: %s. port: %s. address string: ") panic(err) } listener, err := net.ListenTCP("tcp", addr) if err != nil { logger.Criticalf("Can not start tcp server on address: %s:%s. Error: %s", addr.IP, addr.Port, err) panic(err) } defer listener.Close() for { conn, err := listener.AcceptTCP() if err != nil { logger.Errorf("Create tcp connection error. Err: %s", err) continue } handleConnection(conn) } }
func (this *Orchestrator) handleAction(action Action) { var err error = nil ocSideOnly := false ocSide, ocSideOk := action.(OrchestratorSideAction) action.SetTriggeredTime(time.Now()) log.Debugf("action %s is executable on the orchestrator side: %t", action, ocSideOk) if ocSideOk { ocSideOnly = ocSide.OrchestratorSideOnly() log.Debugf("action %s is executable on only the orchestrator side: %t", action, ocSideOnly) err = ocSide.ExecuteOnOrchestrator() if err != nil { log.Errorf("ignoring an error occurred while ExecuteOnOrchestrator: %s", err) } } if !ocSideOnly { // pass to the inspector handler. entity := GetTransitionEntity(action.EntityID()) if entity == nil { err = fmt.Errorf("could find entity %s for %s", action.EntityID(), action) log.Errorf("ignoring an error: %s", err) } else { log.Debugf("Main[%s]->Handler: sending an action %s", entity.ID, action) entity.ActionFromMain <- action log.Debugf("Main[%s]->Handler: sent an action %s", entity.ID, action) } } // make sequence for tracing if this.collectTrace { this.actionSequence = append(this.actionSequence, action) } }
func HandlePush(agent *Agent, pkt *packet.Pkt) { dataMsg := packet.PktDataMessage{} err := packet.Unpack(pkt, &dataMsg) if err != nil { log.Errorf("Error unpack push msg: %s", err.Error()) } log.Infof("Received push message: MsgId: %d, Type: %d, Appid: %s, Msg: %s\n", dataMsg.MsgId, dataMsg.MsgType, dataMsg.AppId, dataMsg.Msg) dataAck := packet.PktDataACK{ MsgId: dataMsg.MsgId, AppId: dataMsg.AppId, RegId: agent.RegIds[dataMsg.AppId], } pktAck, err := packet.Pack(packet.PKT_ACK, 0, dataAck) if err != nil { log.Errorf("Pack error: %s", err.Error()) return } agent.SendPkt(pktAck) if agent.OnReceiveMsg != nil { agent.OnReceiveMsg(dataMsg.AppId, dataMsg.MsgId, dataMsg.MsgType, dataMsg.Msg) } }
func (migrator *Migrator) migrateAttachments(c *desk.Case, requester *zendesk.Customer) ([]zendesk.Comment, error) { comments := make([]zendesk.Comment, 0) for _, attachment := range c.Attachments { err := migrator.deskClient.DownloadFile(&attachment, fmt.Sprintf("%s/attachments/", migrator.path)) if err != nil { log.Errorf("Can't download attachment: %v. Ignoring.", err) continue } token, err := migrator.migrateAttachment(&attachment, fmt.Sprintf("%s/attachments/", migrator.path)) if err != nil { log.Errorf("Can't migrate attachment: %v. Ignoring", err) continue } if token != "" { comments = append(comments, zendesk.Comment{ AuthorId: requester.Id, Value: attachment.FileName, Uploads: []string{token}, }) } } return comments, nil }
/** * watch the slot list change. */ func (self *SlotInfoMaps) WatchSlotInfoMap() { _, _, ch, err := self.zk.GetZkConn().GetW("/yundis/ids") if err != nil { log.Errorf("Can not watch path /yundis/ids, err:%s", err) } go func() { for { event := <-ch log.Infof("Slotinfo list changed event, %+v", event) data, _, ch1, err1 := self.zk.GetZkConn().GetW("/yundis/ids") if err1 == nil { ch = ch1 //handle the node list change event log.Infof("Slotinfo list changed : %s", data) infoMap := self.GetSlotInfoMapFromZk() //change the slotinfo state. self.SetSlotInfoMap(infoMap) //refresh nodeinfo map by new zk data. log.Info("Refresh slotinfo map by new zk data.") } else { log.Errorf("Can not watching the children of /yundis/ids, err:%s", err1) break } time.Sleep(time.Second) } }() }
/** * watch the node list change. */ func (self *NodeInfoMaps) WatchNodeInfoMap() { _, _, ch, err := self.zk.GetZkConn().ChildrenW("/yundis/nodes") if err != nil { log.Errorf("Can not watch path /yundis/nodes, err:%s", err) } go func() { for { event := <-ch log.Infof("node list change, %+v", event) children, _, ch1, err1 := self.zk.GetZkConn().ChildrenW("/yundis/nodes") if err1 == nil { ch = ch1 //handle the node list change event log.Infof("node list changed : %s", children) infoMap := self.getNodeInfoMapFromZk() //change the slotinfo state. log.Info("The node list changed, begin to change the affected slot's info.") self.SetNodeInfoMap(infoMap) //refresh nodeinfo map by new zk data. self.ModifySlotState(infoMap) log.Info("Refresh nodeinfo map by new zk data.") } else { log.Errorf("Can not watching the children of /yundis/nodes, err:%s", err1) break } time.Sleep(time.Second) } }() }
func runFileChmod(base_path, path string) bool { listener := utils.GetListenerFromDir(base_path) rel_path := utils.GetRelativePath(listener, path) dbItem, err := datastore.GetOne(base_path, rel_path) if err != nil { log.Errorf("Error occurred trying to get %s from DB\nError: %s", rel_path, err.Error()) return false } fsItem, err := utils.GetFileInfo(path) if err != nil { log.Errorf("Could not find item on filesystem: %s\nError:%s", path, err.Error()) } if dbItem.Perms != fsItem.Perms { iPerm, _ := strconv.Atoi(dbItem.Perms) mode := int(iPerm) if _, err := os.Stat(path); os.IsNotExist(err) { log.Infof("File no longer exists returning") return true } else { err := os.Chmod(path, os.FileMode(mode)) if err != nil { log.Errorf("Error occurred changing file modes: %s", err.Error()) return false } return true } } else { log.Info("File modes are correct changing nothing") return false } return true }
func CreateRecursivePath(zooPath string) error { // check if parent dir is exist log.Debugf("Create zoo path [%s]", zooPath) if zooPath == "/" { return nil } else { //create directly bExist, _, err := gZKConn.Exists(zooPath) if err != nil { log.Errorf("Zookeeper execute Exists error=%s", err.Error()) return errors.New("Zookeeper Exists path error") } if bExist == false { parentDir := path.Dir(zooPath) err := CreateRecursivePath(parentDir) if err != nil { return err } // Create it selft flags := int32(0) acl := zk.WorldACL(zk.PermAll) _, err = gZKConn.Create(zooPath, []byte("1"), flags, acl) if err != nil { log.Errorf("Zookeeper create path [%s] error=%s", zooPath, err.Error()) return errors.New("Zookeeper create path error") } return nil } else { return nil } } return nil }
func queryUser(colName string, val string) (pguser PGUser, err error) { err = db.Ping() if err != nil { seelog.Errorf("queryUser(%v, %v) open db: %v", colName, val, err) return } qrySql := "select " + strings.Join(getStructCols(pguser), ",") + " from pg_user where " + colName + "=?" seelog.Debugf("queryUser(%v, %v) sql: %v", colName, val, qrySql) rs, err := db.Query(qrySql, val) if err != nil { seelog.Errorf("queryUser(%v, %v) qry %v %v", colName, val, qrySql, err) } if rs.Next() { if err = rs.Err(); err != nil { seelog.Errorf("queryUser(%v, %v) read rows %v", colName, val, err) return } if err = Scan2Struct(rs, &pguser); err != nil { seelog.Error(err) return } } else { err = pgpub.ErrNotExist } return }
func (slack *SlackNotifier) postToSlack(slackMessage *SlackMessage) error { data, err := json.Marshal(slackMessage) if err != nil { log.Errorf("Unable to marshal slack payload:%+v", err) return err } log.Debugf("struct = %+v, json = %s", slackMessage, string(data)) b := bytes.NewBuffer(data) req, err := http.NewRequest("POST", slack.Url, b) req.Header.Set("Content-Type", "application/json") if res, err := slack.HttpClient.Do(req); err != nil { log.Errorf("Unable to send data to slack:%+v", err) return err } else { defer res.Body.Close() statusCode := res.StatusCode if statusCode != 200 { body, _ := ioutil.ReadAll(res.Body) log.Errorf("Unable to notify slack:%s", string(body)) return errors.New("Send to Slack failed") } else { log.Debug("Slack notification sent") return nil } } }
func (r *RabbitConnection) Consume(serverName string) (<-chan amqp.Delivery, *RabbitChannel, error) { consumerChannel, err := NewRabbitChannel(r.Connection) if err != nil { log.Errorf("[Rabbit] Failed to create new channel: %s", err.Error()) return nil, nil, err } err = consumerChannel.DeclareQueue(serverName) if err != nil { log.Errorf("[Rabbit] Failed to declare queue %s: %s", serverName, err.Error()) return nil, nil, err } deliveries, err := consumerChannel.ConsumeQueue(serverName) if err != nil { log.Errorf("[Rabbit] Failed to declare queue %s: %s", serverName, err.Error()) return nil, nil, err } err = consumerChannel.BindQueue(serverName, Exchange) if err != nil { log.Errorf("[Rabbit] Failed to bind %s to %s exchange: %s", serverName, Exchange, err.Error()) return nil, nil, err } return deliveries, consumerChannel, nil }
func realMain(mkafka MKafka) { consumers, err := GetConsumsers(mkafka.ZNode, mkafka.zkc) if err != nil { log.Errorf("get consumers %v error: %v", mkafka.ZNode, err) } else { for _, consumer := range consumers { topics, err := GetTopicsWithConsumser(mkafka.ZNode, mkafka.zkc, consumer) if err != nil { log.Errorf("get topic of consumer %v:%v failed , error %v", mkafka.ZNode, consumer, err) } else { for _, topic := range topics { job := ConsumerTopic{ Topic: topic, Consumer: consumer, } mkafka.JobList = append(mkafka.JobList, job) } } } } //控制并发数量. doRequest(mkafka) }
func (fs *GDriveFileSystem) Put(p string, bytes io.ReadCloser) webdav.StatusCode { defer bytes.Close() parent := path.Dir(p) base := path.Base(p) parentId := fs.getFileId(parent, true) if parentId == "" { log.Errorf("ERROR: Parent not found") return webdav.StatusCode(http.StatusConflict) // 409 } parentRef := &drive.ParentReference{ Id: parentId, IsRoot: "parent" == "/", } f := &drive.File{ Title: base, Parents: []*drive.ParentReference{parentRef}, } _, err := fs.client.Files.Insert(f).Media(bytes).Do() if err != nil { log.Errorf("can't put: %v", err) return webdav.StatusCode(500) } fs.invalidatePath(p) fs.invalidatePath(parent) return webdav.StatusCode(201) }
func (this *Agent) LoadRegIds() { fileName := "RegIds." + this.deviceId file, err := os.Open(fileName) if err != nil { log.Warnf("Open error: %s", err.Error()) return } buf := make([]byte, 1024) n, err := file.Read(buf) if err != nil { log.Errorf("Read file error: %s", err.Error()) file.Close() return } //log.Debugf("content in %s: %s", fileName, buf) err = json.Unmarshal(buf[:n], &this.RegIds) if err != nil { log.Errorf("Unarshal %s error: %s", fileName, err.Error()) file.Close() return } log.Debugf("RegIds: %s", this.RegIds) }
func fileConfig() Config { config_file := utils.DefaultIfBlank(os.Getenv("ECS_AGENT_CONFIG_FILE_PATH"), "/etc/ecs_container_agent/config.json") file, err := os.Open(config_file) if err != nil { return Config{} } data, err := ioutil.ReadAll(file) if err != nil { seelog.Errorf("Unable to read config file, err %v", err) return Config{} } if strings.TrimSpace(string(data)) == "" { // empty file, not an error return Config{} } config := Config{} err = json.Unmarshal(data, &config) if err != nil { seelog.Errorf("Error reading config json data, err %v", err) } // Handle any deprecated keys correctly here if utils.ZeroOrNil(config.Cluster) && !utils.ZeroOrNil(config.ClusterArn) { config.Cluster = config.ClusterArn } return config }
func (t *rabbitTransport) handleRspDelivery(delivery amqp.Delivery) { logId := t.logId(delivery) enc := delivery.Headers["Content-Encoding"].(string) switch enc { case "response": rsp := message.NewResponse() t.deliveryToMessage(delivery, rsp) t.inflightReqsM.Lock() rspChan, ok := t.inflightReqs[rsp.Id()] delete(t.inflightReqs, rsp.Id()) t.inflightReqsM.Unlock() if !ok { log.Warnf("[Typhon:RabbitTransport] Could not match response %s to channel", logId) return } timeout := time.NewTimer(chanSendTimeout) defer timeout.Stop() select { case rspChan <- rsp: case <-timeout.C: log.Errorf("[Typhon:RabbitTransport] Could not deliver response %s after %s: receiving channel is full", logId, chanSendTimeout.String()) } default: log.Errorf("[Typhon:RabbitTransport] Cannot handle Content-Encoding \"%s\" as response for %s", enc, logId) } }
func (t *rabbitTransport) Send(req message.Request, _timeout time.Duration) (message.Response, error) { id := req.Id() if id == "" { _uuid, err := uuid.NewV4() if err != nil { log.Errorf("[Typhon:RabbitTransport] Failed to generate request uuid: %v", err) return nil, err } req.SetId(_uuid.String()) } rspQueue := req.Id() defer func() { t.inflightReqsM.Lock() delete(t.inflightReqs, rspQueue) t.inflightReqsM.Unlock() }() rspChan := make(chan message.Response, 1) t.inflightReqsM.Lock() t.inflightReqs[rspQueue] = rspChan t.inflightReqsM.Unlock() timeout := time.NewTimer(_timeout) defer timeout.Stop() headers := req.Headers() headers["Content-Encoding"] = "request" headers["Service"] = req.Service() headers["Endpoint"] = req.Endpoint() select { case <-t.Ready(): case <-timeout.C: log.Warnf("[Typhon:RabbitTransport] Timed out after %s waiting for ready", _timeout.String()) return nil, transport.ErrTimeout } if err := t.connection().Publish(Exchange, req.Service(), amqp.Publishing{ CorrelationId: req.Id(), Timestamp: time.Now().UTC(), Body: req.Payload(), ReplyTo: t.replyQueue, Headers: headersToTable(headers), }); err != nil { log.Errorf("[Typhon:RabbitTransport] Failed to publish: %v", err) return nil, err } select { case rsp := <-rspChan: return rsp, nil case <-timeout.C: log.Warnf("[Typhon:RabbitTransport] Timed out after %s waiting for response to %s", _timeout.String(), req.Id()) return nil, transport.ErrTimeout } }
func NewRpcClient(amqpURI, exchange string) (*RpcClient, error) { client := &RpcClient{ exchange: exchange, requestId: 0, rpcTimeout: 10, requestTable: make(map[uint32]chan []byte), lock: new(sync.RWMutex), } var err error client.conn, err = amqp.Dial(amqpURI) if err != nil { log.Errorf("Dial: %s", err) return nil, err } log.Infof("Got Connection to %s, getting Channel", amqpURI) client.channel, err = client.conn.Channel() if err != nil { log.Errorf("Channel: %s", err) return nil, err } log.Infof("Got Channel, declaring %q Exchange (%q)", rpcExchangeType, exchange) if err := client.channel.ExchangeDeclare( exchange, // name rpcExchangeType, // type true, // durable false, // auto-deleted false, // internal false, // noWait nil, // arguments ); err != nil { log.Errorf("Exchange Declare: %s", err) return nil, err } callbackQueue, err := client.channel.QueueDeclare( "", // name false, // durable true, // autoDelete true, // exclusive false, // noWait nil, // args ) if err != nil { log.Errorf("callbackQueue Declare error: %s", err) return nil, err } client.callbackQueue = callbackQueue.Name log.Infof("declared callback queue [%s]", client.callbackQueue) go client.handleResponse() return client, nil }
func initPool() { configs, err := goconfig.ReadConfigFile(configFileName) if err != nil { logger.Criticalf("Can not read nsq configs from %s. Error: %s", configFileName, err) panic(err) } options, err := configs.GetOptions(nsqdConfigSection) if err != nil { logger.Criticalf("Can not read nsqd config in %s. Error: $s", configFileName, err) panic(err) } addrs := make([]string, 0, len(options)) for _, option := range options { value, err := configs.GetString(nsqdConfigSection, option) if err != nil { logger.Errorf("Get error when reading section %s option %s in %s. Error: %s", nsqdConfigSection, option, configFileName, err) continue } addrs = append(addrs, value) } if len(addrs) <= 0 { logger.Criticalf("Read 0 configs for nsqd address in %s.", configFileName) panic("Read 0 configs for nsqd address in config file " + configFileName) } pool = make(map[string]*gonsq.Producer) lostConns = make([]string, 0) for _, addr := range addrs { config := gonsq.NewConfig() producer, err := gonsq.NewProducer(addr, config) if err != nil { logger.Errorf("Can not create nsq producer for address: %s. Error: %s", addr, err) continue } err = producer.Ping() if err != nil { logger.Errorf("Can not connect to address %s. Error: %s", addr, err) lostConns = append(lostConns, addr) } pool[addr] = producer } go autoReconnect() }
func calcDailyBytesServed(client influx.Client, bp influx.BatchPoints, startTime time.Time, endTime time.Time, config StartupConfig) { bytesToTerabytes := 1000000000.00 sampleTimeSecs := 60.00 bitsTobytes := 8.00 queryString := fmt.Sprintf(`select mean(value) from "monthly"."bandwidth.cdn.1min" where time > '%s' and time < '%s' group by time(1m), cdn`, startTime.Format(time.RFC3339), endTime.Format(time.RFC3339)) log.Infof("queryString = %v\n", queryString) res, err := queryDB(client, queryString, "cache_stats") if err != nil { log.Error("An error occured getting max bandwidth!\n") return } if res != nil && len(res[0].Series) > 0 { for _, row := range res[0].Series { bytesServed := float64(0) cdn := row.Tags["cdn"] for _, record := range row.Values { if record[1] != nil { value, err := record[1].(json.Number).Float64() if err != nil { log.Errorf("Couldn't parse value from record %v\n", record) continue } bytesServed += value * sampleTimeSecs / bitsTobytes } } bytesServedTB := bytesServed / bytesToTerabytes log.Infof("TBytes served for cdn %v = %v", cdn, bytesServedTB) //write to Traffic Ops var statsSummary traffic_ops.StatsSummary statsSummary.CDNName = cdn statsSummary.DeliveryService = "all" statsSummary.StatName = "daily_bytesserved" statsSummary.StatValue = strconv.FormatFloat(bytesServedTB, 'f', 2, 64) statsSummary.SummaryTime = time.Now().Format(time.RFC3339) statsSummary.StatDate = startTime.Format("2006-01-02") go writeSummaryStats(config, statsSummary) //write to Influxdb tags := map[string]string{"cdn": cdn, "deliveryservice": "all"} fields := map[string]interface{}{ "value": bytesServedTB, //converted to TB } pt, err := influx.NewPoint( "daily_bytesserved", tags, fields, startTime, ) if err != nil { log.Errorf("error adding creating data point for max Gbps...%v\n", err) continue } bp.AddPoint(pt) } config.BpsChan <- bp } }
// doRecoverSession is the meat and veg for RecoverSession func (s *realScope) doRecoverSession(sessId string) (*User, error) { // Check cache; ignore errors (will have impact on service performance, but not functionality) queryLogin := false u, hit, err := s.userCache.Fetch(sessId) if err != nil { log.Warnf("[Auth] Error fetching session from cache (will call login service): %v", err) queryLogin = true } else if u != nil && u.ExpiryTs.Before(time.Now()) && u.CanAutoRenew() { // Cached token has expired log.Infof("[Auth] Cache-recovered token has expired (%s); will call login service", u.ExpiryTs.String()) queryLogin = true } else { queryLogin = u == nil && !hit } if queryLogin { cl := multiclient.New().DefaultScopeFrom(s.getRpcScope()) rsp := &sessreadproto.Response{} cl.AddScopedReq(&multiclient.ScopedReq{ Uid: "readsess", Service: loginService, Endpoint: readSessionEndpoint, Req: &sessreadproto.Request{ SessId: proto.String(sessId), }, Rsp: rsp, }) if cl.Execute().AnyErrorsIgnoring([]string{errors.ErrorNotFound}, nil) { err := cl.Succeeded("readsess") log.Errorf("[Auth] Auth scope recovery error [%s: %s] %v", err.Type(), err.Code(), err.Description()) return nil, err } // found a session? if rsp.GetSessId() == "" && rsp.GetToken() == "" { log.Debugf("[Auth] Session '%s' not found (not valid) when trying to recover from login service", sessId) // @todo we could cache this (at least for a short time) to prevent repeated hammering of login service } else { u, err = FromSessionToken(rsp.GetSessId(), rsp.GetToken()) if err != nil { log.Errorf("[Auth] Error getting user from session: %v", err) } else { log.Tracef("[Auth] Auth scope - recovered user '%s' from session '%s'", u.Id, rsp.GetSessId()) } } // ignore errors; just means we have no user if u != nil { s.userCache.Store(u) } } return u, nil }