Example #1
0
// ServeHttp serves IAM Role Credentials for Tasks being managed by the agent.
func ServeHttp(credentialsManager credentials.Manager, containerInstanceArn string, cfg *config.Config) {
	// Create and initialize the audit log
	// TODO Use seelog's programmatic configuration instead of xml.
	logger, err := log.LoggerFromConfigAsString(audit.AuditLoggerConfig(cfg))
	if err != nil {
		log.Errorf("Error initializing the audit log: %v", err)
		// If the logger cannot be initialized, use the provided dummy seelog.LoggerInterface, seelog.Disabled.
		logger = log.Disabled
	}

	auditLogger := audit.NewAuditLog(containerInstanceArn, cfg, logger)

	server := setupServer(credentialsManager, auditLogger)

	for {
		utils.RetryWithBackoff(utils.NewSimpleBackoff(time.Second, time.Minute, 0.2, 2), func() error {
			// TODO, make this cancellable and use the passed in context;
			err := server.ListenAndServe()
			if err != nil {
				log.Errorf("Error running http api: %v", err)
			}
			return err
		})
	}
}
Example #2
0
func ServeHttp(containerInstanceArn *string, taskEngine engine.TaskEngine, cfg *config.Config) {
	serverFunctions := map[string]func(w http.ResponseWriter, r *http.Request){
		"/v1/metadata": MetadataV1RequestHandlerMaker(containerInstanceArn, cfg),
		"/v1/tasks":    TasksV1RequestHandlerMaker(taskEngine),
		"/license":     LicenseHandler,
	}

	paths := make([]string, 0, len(serverFunctions))
	for path := range serverFunctions {
		paths = append(paths, path)
	}
	availableCommands := &RootResponse{paths}
	// Autogenerated list of the above serverFunctions paths
	availableCommandResponse, _ := json.Marshal(&availableCommands)

	defaultHandler := func(w http.ResponseWriter, r *http.Request) {
		w.Write(availableCommandResponse)
	}

	serverMux := http.NewServeMux()
	serverMux.HandleFunc("/", defaultHandler)
	for key, fn := range serverFunctions {
		serverMux.HandleFunc(key, fn)
	}

	// Log all requests and then pass through to serverMux
	loggingServeMux := http.NewServeMux()
	loggingServeMux.Handle("/", LoggingHandler{serverMux})

	server := http.Server{
		Addr:         ":" + strconv.Itoa(config.AGENT_INTROSPECTION_PORT),
		Handler:      loggingServeMux,
		ReadTimeout:  5 * time.Second,
		WriteTimeout: 5 * time.Second,
	}

	for {
		once := sync.Once{}
		utils.RetryWithBackoff(utils.NewSimpleBackoff(time.Second, time.Minute, 0.2, 2), func() error {
			// TODO, make this cancellable and use the passed in context; for
			// now, not critical if this gets interrupted
			err := server.ListenAndServe()
			once.Do(func() {
				log.Error("Error running http api", "err", err)
			})
			return err
		})
	}
}
// MustInit blocks and retries until an engine can be initialized.
func (engine *DockerTaskEngine) MustInit() {
	if engine.client != nil {
		return
	}

	errorOnce := sync.Once{}
	taskEngineConnectBackoff := utils.NewSimpleBackoff(200*time.Millisecond, 2*time.Second, 0.20, 1.5)
	utils.RetryWithBackoff(taskEngineConnectBackoff, func() error {
		err := engine.Init()
		if err != nil {
			errorOnce.Do(func() {
				log.Error("Could not connect to docker daemon", "err", err)
			})
		}
		return err
	})
}
// ServeHttp serves information about this agent / containerInstance and tasks
// running on it.
func ServeHttp(containerInstanceArn *string, taskEngine engine.TaskEngine, cfg *config.Config) {
	// Is this the right level to type assert, assuming we'd abstract multiple taskengines here?
	// Revisit if we ever add another type..
	dockerTaskEngine := taskEngine.(*engine.DockerTaskEngine)

	server := setupServer(containerInstanceArn, dockerTaskEngine, cfg)
	for {
		once := sync.Once{}
		utils.RetryWithBackoff(utils.NewSimpleBackoff(time.Second, time.Minute, 0.2, 2), func() error {
			// TODO, make this cancellable and use the passed in context; for
			// now, not critical if this gets interrupted
			err := server.ListenAndServe()
			once.Do(func() {
				log.Error("Error running http api", "err", err)
			})
			return err
		})
	}
}
// Continuously retries sending an event until it succeeds, sleeping between each
// attempt
func SubmitTaskEvents(events *eventList, client api.ECSClient) {
	backoff := utils.NewSimpleBackoff(1*time.Second, 30*time.Second, 0.20, 1.3)

	// Mirror events.sending, but without the need to lock since this is local
	// to our goroutine
	done := false

	for !done {
		// If we looped back up here, we successfully submitted an event, but
		// we haven't emptied the list so we should keep submitting
		backoff.Reset()
		utils.RetryWithBackoff(backoff, func() error {
			// Lock and unlock within this function, allowing the list to be added
			// to while we're not actively sending an event
			log.Debug("Waiting on semaphore to send...")
			handler.submitSemaphore.Wait()
			defer handler.submitSemaphore.Post()

			log.Debug("Aquiring lock for sending event...")
			events.Lock()
			defer events.Unlock()
			log.Debug("Aquired lock!")

			var err utils.RetriableError

			if events.Len() == 0 {
				log.Debug("No events left; not retrying more")

				events.sending = false
				done = true
				return nil
			}

			eventToSubmit := events.Front()
			event := eventToSubmit.Value.(*sendableEvent)
			llog := log.New("event", event)

			if event.containerShouldBeSent() {
				llog.Info("Sending container change", "change", event.containerChange)
				err = client.SubmitContainerStateChange(event.containerChange)
				if err == nil || !err.Retry() {
					// submitted or can't be retried; ensure we don't retry it
					event.containerSent = true
					if event.containerChange.SentStatus != nil {
						*event.containerChange.SentStatus = event.containerChange.Status
					}
					statesaver.Save()
					if err != nil {
						llog.Error("Unretriable error submitting container state change", "err", err)
					} else {
						llog.Debug("Submitted container")
					}
					events.Remove(eventToSubmit)
				} // else, leave event on and retry it next loop through
			} else if event.taskShouldBeSent() {
				llog.Info("Sending task change", "change", event.taskChange)
				err = client.SubmitTaskStateChange(event.taskChange)
				if err == nil || !err.Retry() {
					// submitted or can't be retried; ensure we don't retry it
					event.taskSent = true
					if event.taskChange.SentStatus != nil {
						*event.taskChange.SentStatus = event.taskChange.Status
					}
					statesaver.Save()
					if err != nil {
						llog.Error("Unretriable error submitting container state change", "err", err)
					} else {
						llog.Debug("Submitted container")
						backoff.Reset()
					}
					events.Remove(eventToSubmit)
				}
			} else {
				// Shouldn't be sent as either a task or container change event; must have been already sent
				llog.Info("Not submitting redundant event; just removing")
				events.Remove(eventToSubmit)
			}

			if events.Len() == 0 {
				llog.Debug("Removed the last element, no longer sending")
				events.sending = false
				done = true
				return nil
			}

			return err
		})
	}
}