Exemplo n.º 1
0
func TestSTreeMod(t *testing.T) {

	defer log.Flush()

	Convey("Test clone\n", t, func() {

		s, err := NewSTreeJson(strings.NewReader(`{"key1": "val1", "key.2": 1234, "key3": {"key4": true, "key5": -12.34}}`))
		So(err, ShouldBeNil)

		c, err := s.clone()
		So(err, ShouldBeNil)
		s["key1"] = "valMod"

		s3, err := s.STreeVal(".key3")
		s3["key4"] = false

		log.Debugf("Test clone - s: %v", s)
		log.Debugf("Test clone - c: %v", c)

		v1, err := c.StrVal(".key1")
		So(err, ShouldBeNil)
		So(v1, ShouldEqual, "val1")

		v2, err := c.BoolVal(".key3.key4")
		So(err, ShouldBeNil)
		So(v2, ShouldBeTrue)
	})
}
Exemplo n.º 2
0
// Rescind should be called to indicate you no longer wish to be the leader
func (rl *regionLeader) Rescind() {
	rl.cleanup.Do(func() {
		log.Debugf("[Sync:RegionLeader] Cleaning up leadership of '%v'...", rl.lockNode)
		close(rl.rescinded)
		// keep trying to delete the ZK node (to release leadership) until we're sure it doesn't exist
		for {
			err := zookeeper.Delete(rl.lockNode, -1)
			if err == nil || err == gozk.ErrNoNode {
				log.Debugf("[Sync:RegionLeader] Have deleted leadership node '%v'", rl.lockNode)
				inst.Counter(1.0, "sync.regionleader.rescinded")
				break
			}
			log.Warnf("[Sync:RegionLeader] Failed to cleanup/rescind leadership (will retry): %v", err)
			time.Sleep(cleanupDelay)
		}

		// Unregister region leader
		mu.Lock()
		for i := 0; i < len(rls); i++ {
			if rls[i] == rl {
				rls = append(rls[:i], rls[i+1:]...)
				break
			}
		}
		mu.Unlock()
	})
}
Exemplo n.º 3
0
func (stormClient *StormClient) getOffsetsForPartition(consumerGroup string, partition int, partitionPath string) {
	zkNodeStat := &zk.Stat{}
	stateStr, zkNodeStat, err := stormClient.conn.Get(partitionPath)
	switch {
	case err == nil:
		offset, topic, errConversion := parseStormSpoutStateJson(string(stateStr))
		if (stormClient.app.Storage.topicBlacklist != nil) && stormClient.app.Storage.topicBlacklist.MatchString(topic) {
			log.Debugf("Skip checking Storn offsets for topic %s from group %s in cluster %s as topic has been blacklisted", topic, consumerGroup, stormClient.cluster)
			return
		}
		switch {
		case errConversion == nil:
			log.Debugf("About to sync Storm offset: [%s,%s,%v]::[%v,%v]\n", consumerGroup, topic, partition, offset, zkNodeStat.Mtime)
			partitionOffset := &PartitionOffset{
				Cluster:   stormClient.cluster,
				Topic:     topic,
				Partition: int32(partition),
				Group:     consumerGroup,
				Timestamp: int64(zkNodeStat.Mtime), // note: this is millis
				Offset:    int64(offset),
			}
			timeoutSendOffset(stormClient.app.Storage.offsetChannel, partitionOffset, 1)
		default:
			log.Errorf("Something is very wrong! Cannot parse state json for partition %v of consumer group %s in ZK path %s: %s. Error: %v",
				partition, consumerGroup, partitionPath, stateStr, errConversion)
		}
	default:
		log.Warnf("Failed to read data for partition %v of consumer group %s in ZK path %s. Error: %v", partition, consumerGroup, partitionPath, err)
	}
}
Exemplo n.º 4
0
func validateClientScheme(pBuffer []byte, scheme int) (result bool, schem int, challenge []byte, digest []byte) {
	digest_offset := -1
	challenge_offset := -1
	if scheme == 0 {
		digest_offset = getDigestOffset0(pBuffer)
		challenge_offset = getDHOffset0(pBuffer)
	} else if scheme == 1 {
		digest_offset = getDigestOffset1(pBuffer)
		challenge_offset = getDHOffset1(pBuffer)
	}
	p1 := pBuffer[:digest_offset]
	digest = pBuffer[digest_offset : digest_offset+32]
	p2 := pBuffer[digest_offset+32:]
	buf := new(bytes.Buffer)
	buf.Write(p1)
	buf.Write(p2)
	p := buf.Bytes()
	log.Debugf("Scheme: {%v} client digest offset: {%v}", scheme, digest_offset)
	tempHash, _ := HMACsha256(p, GENUINE_FP_KEY[:30])
	log.Debugf("Temp: {%0X}", tempHash)
	log.Debugf("Dig : {%0X}", digest)
	result = bytes.Compare(digest, tempHash) == 0
	challenge = pBuffer[challenge_offset : challenge_offset+128]
	schem = scheme
	return
}
Exemplo n.º 5
0
func (d *DocWalker) read_file(path string, info os.FileInfo, err error) error {

	if info.Mode().IsRegular() {
		file := filepath.Base(path)

		log.Debugf("Trying file %s", file)

		matched, err := regexp.MatchString(d.filepattern, file)
		log.Debugf("File match: %v, error: %v", matched, err)
		if matched && err == nil {

			fr := new(filereader.TrecFileReader)
			fr.Init(path)

			go func() {
				for doc := range fr.ReadAll() {
					d.output <- doc
				}
				d.workers <- fr.Path()
				return
			}()

			d.worker_count += 1
			/*log.Errorf("Now have %d workers", d.worker_count)*/
		}
	}
	return nil
}
Exemplo n.º 6
0
// FIXME: support qos = 2
func HandlePublish(mqtt *Mqtt, conn *net.Conn, client **ClientRep) {
	if *client == nil {
		panic("client_resp is nil, that means we don't have ClientRep for this client sending PUBLISH")
		return
	}

	client_id := (*client).Mqtt.ClientId
	client_rep := *client
	client_rep.UpdateLastTime()
	topic := mqtt.TopicName
	payload := string(mqtt.Data)
	qos := mqtt.FixedHeader.QosLevel
	retain := mqtt.FixedHeader.Retain
	message_id := mqtt.MessageId
	timestamp := time.Now().Unix()
	log.Debugf("Handling PUBLISH, client_id: %s, topic:(%s), payload:(%s), qos=%d, retain=%t, message_id=%d",
		client_id, topic, payload, qos, retain, message_id)

	// Create new MQTT message
	mqtt_msg := CreateMqttMessage(topic, payload, client_id, qos, message_id, timestamp, retain)
	msg_internal_id := mqtt_msg.InternalId
	log.Debugf("Created new MQTT message, internal id:(%s)", msg_internal_id)

	PublishMessage(mqtt_msg)

	// Send PUBACK if QOS is 1
	if qos == 1 {
		SendPuback(message_id, conn, client_rep.WriteLock)
		log.Debugf("PUBACK sent to client(%s)", client_id)
	}
}
Exemplo n.º 7
0
func RetryDeliver(sleep uint64, dest_client_id string, qos uint8, msg *MqttMessage) {
	defer func() {
		if r := recover(); r != nil {
			log.Debugf("got panic, will print stack")
			debug.PrintStack()
			panic(r)
		}
	}()

	if sleep > 3600*4 {
		log.Debugf("too long retry delay(%s), abort retry deliver", sleep)
		return
	}

	time.Sleep(time.Duration(sleep) * time.Second)

	if G_redis_client.IsFlyingMessagePendingAck(dest_client_id, msg.MessageId) {
		DeliverMessage(dest_client_id, qos, msg)
		log.Debugf("Retried delivering message %s:%d, will sleep %d seconds before next attampt",
			dest_client_id, msg.MessageId, sleep*2)
		RetryDeliver(sleep*2, dest_client_id, qos, msg)
	} else {
		log.Debugf("message (%s:%d) is not pending ACK, stop retry delivering",
			dest_client_id, msg.MessageId)
	}
}
Exemplo n.º 8
0
func (tz *BadXMLTokenizer) Tokens() <-chan *Token {

	token_channel := make(chan *Token)
	log.Debugf("Created channel %v as part of Tokens(), with"+
		" Scanner = %v", token_channel, tz)

	go func(ret chan *Token, tz *BadXMLTokenizer) {
		for {
			log.Tracef("Scanner calling Next()")
			tok, err := tz.Next()
			log.Tracef("scanner.Next() returned %s, %v", tok, err)
			switch err {
			case nil:
				log.Debugf("Pushing %s into token channel %v",
					tok, ret)
				ret <- tok
			case io.EOF:
				log.Debugf("received EOF, closing channel")
				close(ret)
				log.Debugf("Closed.")
				log.Flush()
				return
				panic("I should have exited the goroutine but " +
					"didn't")
			}
		}
	}(token_channel, tz)

	return token_channel
}
Exemplo n.º 9
0
func (this *Orchestrator) handleAction(action Action) {
	var err error = nil
	ocSideOnly := false
	ocSide, ocSideOk := action.(OrchestratorSideAction)
	action.SetTriggeredTime(time.Now())
	log.Debugf("action %s is executable on the orchestrator side: %t", action, ocSideOk)
	if ocSideOk {
		ocSideOnly = ocSide.OrchestratorSideOnly()
		log.Debugf("action %s is executable on only the orchestrator side: %t", action, ocSideOnly)
		err = ocSide.ExecuteOnOrchestrator()
		if err != nil {
			log.Errorf("ignoring an error occurred while ExecuteOnOrchestrator: %s", err)
		}
	}

	if !ocSideOnly {
		// pass to the inspector handler.
		entity := GetTransitionEntity(action.EntityID())
		if entity == nil {
			err = fmt.Errorf("could find entity %s for %s", action.EntityID(), action)
			log.Errorf("ignoring an error: %s", err)
		} else {
			log.Debugf("Main[%s]->Handler: sending an action %s", entity.ID, action)
			entity.ActionFromMain <- action
			log.Debugf("Main[%s]->Handler: sent an action %s", entity.ID, action)
		}
	}

	// make sequence for tracing
	if this.collectTrace {
		this.actionSequence = append(this.actionSequence, action)
	}
}
Exemplo n.º 10
0
func Boot(client *docker.Client, opt *docker.CreateContainerOptions,
	exitCh chan error) (*docker.Container, error) {
	log.Debugf("Creating container for image %s", opt.Config.Image)
	container, err := client.CreateContainer(*opt)
	if err != nil {
		return container, err
	}

	log.Debugf("Starting container %s", container.ID)
	go func() {
		exitCh <- dockerpty.Start(client, container, opt.HostConfig)
	}()

	trial := 0
	for {
		container, err = client.InspectContainer(container.ID)
		if container.State.StartedAt.Unix() > 0 {
			break
		}
		if trial > 30 {
			return container, fmt.Errorf("container %s seems not started. state=%#v", container.ID, container.State)
		}
		trial += 1
		time.Sleep(time.Duration(trial*100) * time.Millisecond)
	}
	log.Debugf("container state=%#v", container.State)
	return container, nil
}
Exemplo n.º 11
0
// getBytesWithTTL - get the path, and cache in the session
// return from cache is found and the ttl isn't expired, otherwise get it and
// store it in cache
func (to *Session) getBytesWithTTL(path string, ttl int64) ([]byte, error) {
	var body []byte
	var err error
	getFresh := false
	if cacheEntry, ok := to.Cache[path]; ok {
		if cacheEntry.Entered > time.Now().Unix()-ttl {
			seelog.Debugf("Cache HIT for %s%s", to.URL, path)
			body = cacheEntry.Bytes
		} else {
			seelog.Debugf("Cache HIT but EXPIRED for %s%s", to.URL, path)
			getFresh = true
		}
	} else {
		to.Cache = make(map[string]CacheEntry)
		seelog.Debugf("Cache MISS for %s%s", to.URL, path)
		getFresh = true
	}

	if getFresh {
		body, err = to.getBytes(path)
		if err != nil {
			return nil, err
		}

		newEntry := CacheEntry{
			Entered: time.Now().Unix(),
			Bytes:   body,
		}
		to.Cache[path] = newEntry
	}

	return body, nil
}
Exemplo n.º 12
0
func (m *MetricsManager) listenForMetrics() {
	if m.statsdEnabled {
		defer m.statsBuffer.Close()
	}

	var metric *Metric
	for {
		metric = <-m.metricsChannel
		log.Debugf("Received metric: %s - %v", metric.Name, metric.Value)

		if m.statsdEnabled {
			log.Debugf("Logging metrics")
			switch metric.Type {
			case "counter":
				m.statsBuffer.Incr(metric.Name, metric.Value)
			case "guage":
				m.statsBuffer.Gauge(metric.Name, metric.Value)
			case "timing":
				m.statsBuffer.Timing(metric.Name, metric.Value)
			default:
				log.Errorf("Unknown metric type received: %s", metric.Type)
			}
		}

		stringToPublish := fmt.Sprintf("%s:%d", metric.Name, metric.Value)
		messageHeaders := make(map[string]string)
		messageBody := []byte(stringToPublish)

		metricMessage := message.NewMessage(&messageHeaders, &messageBody)
		m.queueManager.Publish(metricsQueueName, metricMessage)
	}
}
Exemplo n.º 13
0
func (r *RabbitConnection) Connect(connected chan bool) {
	for {
		log.Debug("[Rabbit] Attempting to connect…")
		if err := r.tryToConnect(); err != nil {
			sleepFor := time.Second
			log.Debugf("[Rabbit] Failed to connect, sleeping %s…", sleepFor.String())
			time.Sleep(sleepFor)
			continue
		}
		connected <- true
		r.connected = true
		notifyClose := make(chan *amqp.Error)
		r.Connection.NotifyClose(notifyClose)

		// Block until we get disconnected, or shut down
		select {
		case err := <-notifyClose:
			r.connected = false
			log.Debugf("[Rabbit] AMQP connection closed (notifyClose): %s", err.Error())
			return

		case <-r.closeChan:
			// Shut down connection
			log.Debug("[Rabbit] Closing AMQP connection (closeChan closed)…")
			if err := r.Connection.Close(); err != nil {
				log.Errorf("Failed to close AMQP connection: %v", err)
			}
			r.connected = false
			return
		}
	}
}
Exemplo n.º 14
0
func (container *StatsContainer) collect() {
	dockerID := container.containerMetadata.DockerID
	for {
		select {
		case <-container.ctx.Done():
			seelog.Debugf("Stopping stats collection for container %s", dockerID)
			return
		default:
			seelog.Debugf("Collecting stats for container %s", dockerID)
			dockerStats, err := container.client.Stats(dockerID, container.ctx)
			if err != nil {
				seelog.Warnf("Error retrieving stats for container %s: %v", dockerID, err)
				continue
			}
			for rawStat := range dockerStats {
				stat, err := dockerStatsToContainerStats(rawStat)
				if err == nil {
					container.statsQueue.Add(stat)
				} else {
					seelog.Warnf("Error converting stats for container %s: %v", dockerID, err)
				}
			}
			seelog.Debugf("Disconnected from docker stats for container %s", dockerID)
		}
	}
}
Exemplo n.º 15
0
Arquivo: comet.go Projeto: houcy/push
func pushMessage(appId string, app *RegApp, rawMsg *storage.RawMessage, header *Header, body []byte) bool {
	//if len(app.SendIds) != 0 {
	// regapp with sendids
	log.Infof("msgid %d: before push to (device %s) (regid %s)", rawMsg.MsgId, app.DevId, app.RegId)
	if rawMsg.SendId != "" {
		found := false
		for _, sendid := range app.SendIds {
			if sendid == rawMsg.SendId {
				found = true
				break
			}
		}
		if !found {
			log.Debugf("msgid %d: check sendid (%s) failed", rawMsg.MsgId, rawMsg.SendId)
			return false
		}
	}

	x := DevicesMap.Get(app.DevId)
	if x == nil {
		log.Debugf("msgid %d: device %s offline", rawMsg.MsgId, app.DevId)
		return false
	}
	client := x.(*Client)
	client.SendMessage2(header, body)
	log.Infof("msgid %d: after push to (device %s) (regid %s)", rawMsg.MsgId, app.DevId, app.RegId)
	storage.Instance.MsgStatsSend(rawMsg.MsgId)
	storage.Instance.AppStatsSend(rawMsg.AppId)
	return true
}
Exemplo n.º 16
0
func FetchEmailTasksFromRedis() []interface{} {
	now := time.Now().Unix()
	emailTasks := make([]interface{}, 0)
	key := "email-task-set"
	conn := RedisPool.Get()
	if conn != nil {
		defer conn.Close()
		conn.Send("MULTI")
		conn.Send("ZRANGEBYSCORE", key, 0, now)
		conn.Send("ZREMRANGEBYSCORE", key, 0, now)
		queued, err := conn.Do("EXEC")
		if err == nil && queued != nil {
			jsonStrs, err := redis.Strings(queued.([]interface{})[0], nil)
			if err == nil {
				for _, jsonStr := range jsonStrs {
					seelog.Debugf("[Receive EmailTask From Redis] [Json : %v]", jsonStr)
					if emailTask, err := BuildEmailTaskFromJson(jsonStr); err == nil && emailTask != nil {
						if nt, err := strconv.Atoi(emailTask.NotificationTime); err == nil {
							/* 最多延迟一个小时发送 */
							delta := now - int64(nt)
							if delta < int64(time.Hour.Seconds()*1) {
								emailTasks = append(emailTasks, emailTask)
							} else {
								seelog.Debugf("[EmailTask Too Late] [Delta Seconds : %v][EmailTask : %v]", delta, *emailTask)
							}
						}
					}
				}
			}
		}
	}
	return emailTasks
}
Exemplo n.º 17
0
func (s *server) handle(trans transport.Transport, req_ tmsg.Request) {
	req := mercury.FromTyphonRequest(req_)
	req, rsp := s.applyRequestMiddleware(req)

	if rsp == nil {
		if ep, ok := s.Endpoint(req.Endpoint()); !ok {
			log.Warnf("[Mercury:Server] Received request %s for unknown endpoint %s", req.Id(), req.Endpoint())
			rsp = ErrorResponse(req, errEndpointNotFound)
		} else {
			if rsp_, err := ep.Handle(req); err != nil {
				log.Debugf("[Mercury:Server] Got error from endpoint %s for request %s: %v", ep.Name, req.Id(), err)
				rsp = ErrorResponse(req, err)
				// @todo happy to remove this verbose logging once we have tracing... For now it will allow us to debug things
				log.Debugf("[Mercury:Server] Full request: %+v", req.Body())
				log.Debugf("[Mercury:Server] Full error: %+v", rsp.Body())
			} else if rsp_ == nil {
				rsp = req.Response(nil)
			} else {
				rsp = rsp_
			}
		}
	}
	rsp = s.applyResponseMiddleware(rsp, req)
	if rsp != nil {
		trans.Respond(req, rsp)
	}
}
Exemplo n.º 18
0
func (client *ecrClient) GetAuthorizationToken(registryId string) (*ecrapi.AuthorizationData, error) {
	cachedToken, found := client.tokenCache.Get(registryId)
	if found {
		cachedAuthData := cachedToken.(*ecrapi.AuthorizationData)

		if client.IsTokenValid(cachedAuthData) {
			return cachedAuthData, nil
		} else {
			log.Debugf("Token found, but expires at %s", aws.TimeValue(cachedAuthData.ExpiresAt))
		}
	}

	log.Debugf("Calling GetAuthorizationToken for %q", registryId)

	output, err := client.sdkClient.GetAuthorizationToken(&ecrapi.GetAuthorizationTokenInput{
		RegistryIds: []*string{aws.String(registryId)},
	})

	if err != nil {
		return nil, err
	}

	if len(output.AuthorizationData) != 1 {
		return nil, fmt.Errorf("Unexpected number of results in AuthorizationData (%d)", len(output.AuthorizationData))
	}
	authData := output.AuthorizationData[0]
	client.tokenCache.Set(registryId, authData)

	return authData, nil
}
Exemplo n.º 19
0
// newAWsMgr returns a new AWS mgr instance
// configPath determines where the awsAccounts config is stored in the config service in respect to your service
// For example hailo/service/foo/awsAccounts.
func newAwsMgr(configPath ...string) *AwsMgr {

	m := &AwsMgr{
		Accounts: loadAccConfig(configPath...),
		sts:      sts.NewSTSConnectionManager(),
	}

	ch := config.SubscribeChanges()
	hash, _ := config.LastLoaded()
	// Launch our config updater
	go func() {
		for _ = range ch {
			newHash, _ := config.LastLoaded()
			if hash != newHash {
				hash = newHash
				accs := loadAccConfig(configPath...)
				m.Lock()
				if len(accs) > 0 {
					m.Accounts = accs
					log.Debugf("[AWS Manager] Updating AWS Accounts:%v", m.Accounts)
				}
				m.Unlock()
			}
		}
	}()
	log.Debugf("[AWS Manager] Accounts: %v", m.Accounts)
	return m

}
Exemplo n.º 20
0
func TestTrecFileWriter(t *testing.T) {
	filename := "/tmp/test_file_123"
	fw := new(TrecFileWriter)
	fw.Init(filename)
	go fw.WriteAllTokens()

	// Write these words to disk via the writer channel
	words := [3]string{"word1", "word2", "word3"}
	for i, _ := range words {
		log.Debugf("Adding %s to writer chan", words[i])
		fw.StringChan <- &words[i]
	}

	close(fw.StringChan)
	log.Debugf("Writer channel closed")

	// Verify file contents
	if file, err := os.Open(filename); err != nil {
		panic(fmt.Sprintf("Unable to open %s due to error: %s\n", filename, err))
	} else {
		scanner := bufio.NewScanner(file)
		ctr := 0
		for scanner.Scan() {
			if words[ctr] != scanner.Text() {
				t.Errorf("%s found, should have been %s", scanner.Text(), words[ctr])
			}
			ctr++
		}

		file.Close()
	}

}
func (engine *DockerTaskEngine) pullContainer(task *api.Task, container *api.Container) DockerContainerMetadata {
	log.Info("Pulling container", "task", task, "container", container)
	seelog.Debugf("Attempting to obtain ImagePullDeleteLock to pull image - %s", container.Image)

	ImagePullDeleteLock.Lock()
	seelog.Debugf("Obtained ImagePullDeleteLock to pull image - %s", container.Image)
	defer seelog.Debugf("Released ImagePullDeleteLock after pulling image - %s", container.Image)
	defer ImagePullDeleteLock.Unlock()

	// If a task is blocked here for some time, and before it starts pulling image,
	// the task's desired status is set to stopped, then don't pull the image
	if task.GetDesiredStatus() == api.TaskStopped {
		seelog.Infof("Task desired status is stopped, skip pull container: %v, task %v", container, task)
		container.SetDesiredStatus(api.ContainerStopped)
		return DockerContainerMetadata{Error: TaskStoppedBeforePullBeginError{task.Arn}}
	}

	metadata := engine.client.PullImage(container.Image, container.RegistryAuthentication)
	err := engine.imageManager.AddContainerReferenceToImageState(container)
	if err != nil {
		seelog.Errorf("Error adding container reference to image state: %v", err)
	}
	imageState := engine.imageManager.GetImageStateFromImageName(container.Image)
	engine.state.AddImageState(imageState)
	engine.saver.Save()
	return metadata
}
Exemplo n.º 22
0
// TraceLookup retrieves a trace from the persistence layer
func TraceLookup(w http.ResponseWriter, r *http.Request) {
	traceId := r.URL.Query().Get("traceId")
	if traceId == "" {
		errorResponse(r, w, http.StatusBadRequest, errors.New("traceId param not provided"))
		return
	}

	log.Debugf("Trace lookup - TraceId: %s", traceId)
	t, err := DefaultStore.ReadTrace(traceId)
	if err != nil {
		log.Errorf("Trace lookup failed: %s", err)
		errorResponse(r, w, http.StatusInternalServerError, fmt.Errorf("could not load trace: %s", err))
		return
	}

	// If we don't find the trace return 404
	if t == nil {
		log.Debugf("Trace not found: %s", traceId)
		errorResponse(r, w, http.StatusNotFound, errors.New("traceId not found"))
		return
	}

	// Return trace
	response(
		r,
		w,
		map[string]interface{}{
			"trace": prettyFormatTrace(t),
		},
	)
}
Exemplo n.º 23
0
func (fr *TrecFileReader) read_to_chan(count int) (i int) {
	//Catch and log panics
	defer func() {
		if x := recover(); x != nil {
			log.Criticalf("Error in document %d of %s: %v", fr.docCounter, fr.filename, x)
			log.Flush()
		}
	}()

	for i := 0; i < count || count == -1; i++ {
		log.Debugf("Reading document %d from %s", i, fr.filename)
		doc, err := fr.read_next_doc()

		switch err {

		case io.EOF:
			log.Debugf("Got EOF for file %s", fr.filename)
			close(fr.documents)
			return i

		case nil:
			log.Debugf("Successfully read document %s", doc.Identifier())
			fr.documents <- doc

		default:
			log.Criticalf("Oh f**k...%v", err)
			panic(err)

		}
	}
	log.Infof("Returning")
	return i
}
Exemplo n.º 24
0
// removeContainer deletes the container from the map of containers being watched.
// It also stops the periodic usage data collection for the container.
func (engine *DockerStatsEngine) removeContainer(dockerID string) {
	engine.containersLock.Lock()
	defer engine.containersLock.Unlock()

	// Make sure that this container belongs to a task.
	task, err := engine.resolver.ResolveTask(dockerID)
	if err != nil {
		seelog.Debugf("Could not map container to task, ignoring, err: %v, id: %s", err, dockerID)
		return
	}

	_, taskExists := engine.tasksToContainers[task.Arn]
	if !taskExists {
		seelog.Debugf("Container not being watched, id: %s", dockerID)
		return
	}

	// task arn exists in map.
	container, containerExists := engine.tasksToContainers[task.Arn][dockerID]
	if !containerExists {
		// container arn does not exist in map.
		seelog.Debugf("Container not being watched, id: %s", dockerID)
		return
	}

	engine.doRemoveContainer(container, task.Arn)
}
Exemplo n.º 25
0
func (zkOffsetClient *ZooKeeperOffsetClient) getOffsets(paths []string) {
	log.Debugf("Start to refresh ZK based offsets stored in Kafka base paths: %s", paths)

	for _, path := range paths {
		consumerGroupPath := path + "/consumers"
		consumerGroups, _, err := zkOffsetClient.conn.Children(consumerGroupPath)

		switch {
		case err == nil:
			for _, consumerGroup := range consumerGroups {
				if !zkOffsetClient.isConsumerGroupBlacklisted(consumerGroup) {
					go zkOffsetClient.getOffsetsForConsumerGroup(consumerGroup, consumerGroupPath+"/"+consumerGroup)
				} else {
					log.Debugf("Skip this consumer group as it is blacklisted: " + consumerGroup)
				}
			}

		case err == zk.ErrNoNode:
			// don't tolerate mis-configuration, let's bail out
			panic("Failed to read consumer groups in ZK path: " + consumerGroupPath)

		default:
			// if we cannot even read the top level directory to get the list of all consumer groups, let's bail out
			panic(err)
		}
	}
}
Exemplo n.º 26
0
func (client *RedisClient) Fetch(key string, value interface{}) int {
	log.Debugf("aqquiring g_redis_lock, fetch key=(%s)", key)
	g_redis_lock.Lock()
	defer g_redis_lock.Unlock()
	log.Debugf("aqquired g_redis_lock, fetch key=(%s)", key)

	return client.FetchNoLock(key, value)
}
Exemplo n.º 27
0
func (client *RedisClient) Store(key string, value interface{}) {
	log.Debugf("aqquiring g_redis_lock, store key=(%s)", key)
	g_redis_lock.Lock()
	defer g_redis_lock.Unlock()
	log.Debugf("aqquired g_redis_lock, store key=(%s)", key)

	client.StoreNoLock(key, value)
}
Exemplo n.º 28
0
// locked
func (this *ActionQueue) Put(action Action) {
	log.Debugf("ActionQueue[%s]: Putting %s", this.EntityID, action)
	this.actionsLock.Lock()
	oldLen := len(this.actions)
	this.actions = append(this.actions, action)
	this.actionsLock.Unlock()
	this.actionsUpdatedCh <- true
	log.Debugf("ActionQueue[%s]: Put(%d->%d) %s", this.EntityID, oldLen, oldLen+1, action)
}
Exemplo n.º 29
0
// GetInstanceMetrics gets all task metrics and instance metadata from stats engine.
func (engine *DockerStatsEngine) GetInstanceMetrics() (*ecstcs.MetricsMetadata, []*ecstcs.TaskMetric, error) {
	var taskMetrics []*ecstcs.TaskMetric
	idle := engine.isIdle()
	metricsMetadata := &ecstcs.MetricsMetadata{
		Cluster:           aws.String(engine.cluster),
		ContainerInstance: aws.String(engine.containerInstanceArn),
		Idle:              aws.Bool(idle),
		MessageId:         aws.String(uuid.NewRandom().String()),
	}

	if idle {
		seelog.Debug("Instance is idle. No task metrics to report")
		fin := true
		metricsMetadata.Fin = &fin
		return metricsMetadata, taskMetrics, nil
	}

	for taskArn := range engine.tasksToContainers {
		containerMetrics, err := engine.getContainerMetricsForTask(taskArn)
		if err != nil {
			seelog.Debugf("Error getting container metrics for task: %s, err: %v", taskArn, err)
			continue
		}

		if len(containerMetrics) == 0 {
			seelog.Debugf("Empty containerMetrics for task, ignoring, task: %s", taskArn)
			continue
		}

		taskDef, exists := engine.tasksToDefinitions[taskArn]
		if !exists {
			seelog.Debugf("Could not map task to definition, task: %s", taskArn)
			continue
		}

		metricTaskArn := taskArn
		taskMetric := &ecstcs.TaskMetric{
			TaskArn:               &metricTaskArn,
			TaskDefinitionFamily:  &taskDef.family,
			TaskDefinitionVersion: &taskDef.version,
			ContainerMetrics:      containerMetrics,
		}
		taskMetrics = append(taskMetrics, taskMetric)
	}

	if len(taskMetrics) == 0 {
		// Not idle. Expect taskMetrics to be there.
		return nil, nil, fmt.Errorf("No task metrics to report")
	}

	// Reset current stats. Retaining older stats results in incorrect utilization stats
	// until they are removed from the queue.
	engine.resetStats()
	return metricsMetadata, taskMetrics, nil
}
Exemplo n.º 30
0
func (self *defaultClient) GetCredentials(registry, image string) (string, string, error) {
	log.Debugf("GetCredentials for %s", registry)

	cachedEntry := self.credentialCache.Get(registry)

	if cachedEntry != nil {
		if cachedEntry.IsValid(time.Now()) {
			log.Debugf("Using cached token for %s", registry)
			return extractToken(cachedEntry.AuthorizationToken)
		} else {
			log.Debugf("Cached token is no longer valid. RequestAt: %s, ExpiresAt: %s", cachedEntry.RequestedAt, cachedEntry.ExpiresAt)
		}
	}

	log.Debugf("Calling ECR.GetAuthorizationToken for %s", registry)

	input := &ecr.GetAuthorizationTokenInput{
		RegistryIds: []*string{aws.String(registry)},
	}

	output, err := self.ecrClient.GetAuthorizationToken(input)

	if err != nil || output == nil {
		if err == nil {
			err = fmt.Errorf("Missing AuthorizationData in ECR response for %s", registry)
		}

		// if we have a cached token, fall back to avoid failing the request. This may result an expired token
		// being returned, but if there is a 500 or timeout from the service side, we'd like to attempt to re-use an
		// old token. We invalidate tokens prior to their expiration date to help mitigate this scenario.
		if cachedEntry != nil {
			log.Infof("Got error fetching authorization token. Falling back to cached token. Error was: %s", err)
			return extractToken(cachedEntry.AuthorizationToken)
		}

		return "", "", err
	}
	for _, authData := range output.AuthorizationData {
		if authData.ProxyEndpoint != nil &&
			strings.HasPrefix(proxyEndpointScheme+image, aws.StringValue(authData.ProxyEndpoint)) &&
			authData.AuthorizationToken != nil {
			authEntry := cache.AuthEntry{
				AuthorizationToken: aws.StringValue(authData.AuthorizationToken),
				RequestedAt:        time.Now(),
				ExpiresAt:          aws.TimeValue(authData.ExpiresAt),
				ProxyEndpoint:      aws.StringValue(authData.ProxyEndpoint),
			}

			self.credentialCache.Set(registry, &authEntry)
			return extractToken(aws.StringValue(authData.AuthorizationToken))
		}
	}
	return "", "", fmt.Errorf("No AuthorizationToken found for %s", registry)
}