Exemple #1
0
func (db *serviceClient) Cells(logger lager.Logger) (models.CellSet, error) {
	kvPairs, _, err := db.consulClient.KV().List(CellSchemaRoot(), nil)
	if err != nil {
		bbsErr := models.ConvertError(convertConsulError(err))
		if bbsErr.Type != models.Error_ResourceNotFound {
			return nil, bbsErr
		}
	}

	if kvPairs == nil {
		err = consuladapter.NewPrefixNotFoundError(CellSchemaRoot())
		bbsErr := models.ConvertError(convertConsulError(err))
		if bbsErr.Type != models.Error_ResourceNotFound {
			return nil, bbsErr
		}
	}

	cellPresences := models.NewCellSet()
	for _, kvPair := range kvPairs {
		if kvPair.Session == "" {
			continue
		}

		cell := kvPair.Value
		presence := new(models.CellPresence)
		err := models.FromJSON(cell, presence)
		if err != nil {
			logger.Error("failed-to-unmarshal-cells-json", err)
			continue
		}
		cellPresences.Add(presence)
	}

	return cellPresences, nil
}
Exemple #2
0
func (db *SQLDB) completeTask(logger lager.Logger, task *models.Task, failed bool, failureReason, result string, tx *sql.Tx) error {
	now := db.clock.Now().UnixNano()
	_, err := db.update(logger, tx, tasksTable,
		SQLAttributes{
			"failed":             failed,
			"failure_reason":     failureReason,
			"result":             result,
			"state":              models.Task_Completed,
			"first_completed_at": now,
			"updated_at":         now,
			"cell_id":            "",
		},
		"guid = ?", task.TaskGuid,
	)
	if err != nil {
		logger.Error("failed-updating-tasks", err)
		return db.convertSQLError(err)
	}

	task.State = models.Task_Completed
	task.UpdatedAt = now
	task.FirstCompletedAt = now
	task.Failed = failed
	task.FailureReason = failureReason
	task.Result = result
	task.CellId = ""

	return nil
}
func (h *TaskHandler) commonTaskByGuid(logger lager.Logger, w http.ResponseWriter, req *http.Request, version format.Version) {
	var err error
	logger = logger.Session("task-by-guid", lager.Data{"revision": 0})

	request := &models.TaskByGuidRequest{}
	response := &models.TaskResponse{}

	defer func() { exitIfUnrecoverable(logger, h.exitChan, response.Error) }()
	defer writeResponse(w, response)

	err = parseRequest(logger, req, request)
	if err != nil {
		logger.Error("failed-parsing-request", err)
		response.Error = models.ConvertError(err)
		return
	}

	response.Task, err = h.controller.TaskByGuid(logger, request.TaskGuid)
	if err != nil {
		response.Error = models.ConvertError(err)
		return
	}

	if response.Task.TaskDefinition != nil {
		response.Task = response.Task.VersionDownTo(version)
	}
}
Exemple #4
0
func (n *networker) Destroy(log lager.Logger, handle string) error {
	cfg, err := load(n.configStore, handle)
	if err != nil {
		log.Error("no-properties-for-container-skipping-destroy-network", err)
		return nil
	}

	if err := n.configurer.DestroyIPTablesRules(log, cfg); err != nil {
		return err
	}

	if err := n.subnetPool.Release(cfg.Subnet, cfg.ContainerIP); err != nil && err != subnets.ErrReleasedUnallocatedSubnet {
		log.Error("release-failed", err)
		return err
	}

	if ports, ok := n.configStore.Get(handle, gardener.MappedPortsKey); ok {
		mappings, err := portsFromJson(ports)
		if err != nil {
			return err
		}

		for _, m := range mappings {
			n.portPool.Release(m.HostPort)
		}
	}

	err = n.subnetPool.RunIfFree(cfg.Subnet, func() error {
		return n.configurer.DestroyBridge(log, cfg)
	})

	return err
}
Exemple #5
0
func (db *SQLDB) DesireTask(logger lager.Logger, taskDef *models.TaskDefinition, taskGuid, domain string) error {
	logger = logger.Session("desire-task", lager.Data{"task_guid": taskGuid})
	logger.Info("starting")
	defer logger.Info("complete")

	taskDefData, err := db.serializeModel(logger, taskDef)
	if err != nil {
		logger.Error("failed-serializing-task-definition", err)
		return err
	}

	return db.transact(logger, func(logger lager.Logger, tx *sql.Tx) error {
		now := db.clock.Now().UnixNano()

		_, err = db.insert(logger, tx, tasksTable,
			SQLAttributes{
				"guid":               taskGuid,
				"domain":             domain,
				"created_at":         now,
				"updated_at":         now,
				"first_completed_at": 0,
				"state":              models.Task_Pending,
				"task_definition":    taskDefData,
			},
		)
		if err != nil {
			logger.Error("failed-inserting-task", err)
			return db.convertSQLError(err)
		}

		return nil
	})
}
Exemple #6
0
func (db *ETCDDB) ConvergeLRPs(logger lager.Logger, cellSet models.CellSet) ([]*auctioneer.LRPStartRequest, []*models.ActualLRPKeyWithSchedulingInfo, []*models.ActualLRPKey) {
	convergeStart := db.clock.Now()
	convergeLRPRunsCounter.Increment()
	logger = logger.Session("etcd")
	logger.Info("starting-convergence")
	defer logger.Info("finished-convergence")

	defer func() {
		err := convergeLRPDuration.Send(time.Since(convergeStart))
		if err != nil {
			logger.Error("failed-sending-converge-lrp-duration-metric", err)
		}
	}()

	logger.Debug("gathering-convergence-input")
	input, err := db.GatherAndPruneLRPs(logger, cellSet)
	if err != nil {
		logger.Error("failed-gathering-convergence-input", err)
		return nil, nil, nil
	}
	logger.Debug("succeeded-gathering-convergence-input")

	changes := CalculateConvergence(logger, db.clock, models.NewDefaultRestartCalculator(), input)

	return db.ResolveConvergence(logger, input.DesiredLRPs, changes)
}
Exemple #7
0
func (db *ETCDDB) resolveRestartableCrashedActualLRPS(logger lager.Logger, actualLRP *models.ActualLRP, starts *startRequests) func() {
	return func() {
		actualKey := actualLRP.ActualLRPKey

		logger = logger.Session("restart-crash", lager.Data{
			"process_guid": actualKey.ProcessGuid,
			"index":        actualKey.Index,
		})

		if actualLRP.State != models.ActualLRPStateCrashed {
			logger.Error("failed-actual-lrp-state-is-not-crashed", nil)
			return
		}

		logger.Debug("unclaiming-actual-lrp", lager.Data{"process_guid": actualLRP.ActualLRPKey.ProcessGuid, "index": actualLRP.ActualLRPKey.Index})
		_, err := db.unclaimActualLRP(logger, &actualLRP.ActualLRPKey, &actualLRP.ActualLRPInstanceKey)
		if err != nil {
			logger.Error("failed-unclaiming-crash", err)
			return
		}
		logger.Debug("succeeded-unclaiming-actual-lrp")

		starts.Add(logger, &actualKey)
	}
}
Exemple #8
0
func (db *SQLDB) ConvergeLRPs(logger lager.Logger, cellSet models.CellSet) ([]*auctioneer.LRPStartRequest, []*models.ActualLRPKeyWithSchedulingInfo, []*models.ActualLRPKey) {
	convergeStart := db.clock.Now()
	convergeLRPRunsCounter.Increment()
	logger.Info("starting")
	defer logger.Info("completed")

	defer func() {
		err := convergeLRPDuration.Send(time.Since(convergeStart))
		if err != nil {
			logger.Error("failed-sending-converge-lrp-duration-metric", err)
		}
	}()

	now := db.clock.Now()

	db.pruneDomains(logger, now)
	db.pruneEvacuatingActualLRPs(logger, now)

	domainSet, err := db.domainSet(logger)
	if err != nil {
		return nil, nil, nil
	}

	db.emitDomainMetrics(logger, domainSet)

	converge := newConvergence(db)
	converge.staleUnclaimedActualLRPs(logger, now)
	converge.actualLRPsWithMissingCells(logger, cellSet)
	converge.lrpInstanceCounts(logger, domainSet)
	converge.orphanedActualLRPs(logger)
	converge.crashedActualLRPs(logger, now)

	return converge.result(logger)
}
Exemple #9
0
func (db *SQLDB) transact(logger lager.Logger, f func(logger lager.Logger, tx *sql.Tx) error) error {
	var err error

	for attempts := 0; attempts < 3; attempts++ {
		err = func() error {
			tx, err := db.db.Begin()
			if err != nil {
				return err
			}
			defer tx.Rollback()

			err = f(logger, tx)
			if err != nil {
				return err
			}

			return tx.Commit()
		}()

		if attempts >= 2 || db.convertSQLError(err) != models.ErrDeadlock {
			break
		} else {
			logger.Error("deadlock-transaction", err, lager.Data{"attempts": attempts})
			time.Sleep(500 * time.Millisecond)
		}
	}

	return err
}
// Destroy deletes the container and the bundle directory
func (c *Containerizer) Destroy(log lager.Logger, handle string) error {
	log = log.Session("destroy", lager.Data{"handle": handle})

	log.Info("started")
	defer log.Info("finished")

	state, err := c.runtime.State(log, handle)
	if err != nil {
		log.Info("state-failed-skipping-delete", lager.Data{"error": err.Error()})
		return nil
	}

	log.Info("state", lager.Data{
		"state": state,
	})

	if state.Status == runrunc.CreatedStatus || state.Status == runrunc.StoppedStatus {
		if err := c.runtime.Delete(log, handle); err != nil {
			log.Error("delete-failed", err)
			return err
		}
	}

	return nil
}
Exemple #11
0
// Unclaim Actual LRPs that have missing cells (not in the cell set passed to
// convergence) and add them to the list of start requests.
func (c *convergence) actualLRPsWithMissingCells(logger lager.Logger, cellSet models.CellSet) {
	// time.Sleep(1000 * time.Second)
	logger = logger.Session("actual-lrps-with-missing-cells")

	keysWithMissingCells := make([]*models.ActualLRPKeyWithSchedulingInfo, 0)

	rows, err := c.selectLRPsWithMissingCells(logger, c.db, cellSet)
	if err != nil {
		logger.Error("failed-query", err)
		return
	}

	for rows.Next() {
		var index int32
		schedulingInfo, err := c.fetchDesiredLRPSchedulingInfoAndMore(logger, rows, &index)
		if err == nil {
			keysWithMissingCells = append(keysWithMissingCells, &models.ActualLRPKeyWithSchedulingInfo{
				Key: &models.ActualLRPKey{
					ProcessGuid: schedulingInfo.ProcessGuid,
					Domain:      schedulingInfo.Domain,
					Index:       index,
				},
				SchedulingInfo: schedulingInfo,
			})
		}
	}

	if rows.Err() != nil {
		logger.Error("failed-getting-next-row", rows.Err())
	}

	c.keysWithMissingCells = keysWithMissingCells
}
Exemple #12
0
func initializeServer(logger lager.Logger, uploaderConfig config.UploaderConfig) ifrit.Runner {
	transport := &http.Transport{
		Proxy: http.ProxyFromEnvironment,
		Dial: (&net.Dialer{
			Timeout:   ccUploadDialTimeout,
			KeepAlive: ccUploadKeepAlive,
		}).Dial,
		TLSClientConfig: &tls.Config{
			InsecureSkipVerify: uploaderConfig.SkipCertVerify,
		},
		TLSHandshakeTimeout: ccUploadTLSHandshakeTimeout,
	}

	pollerHttpClient := cfhttp.NewClient()
	pollerHttpClient.Transport = transport

	uploader := ccclient.NewUploader(logger, &http.Client{Transport: transport})
	poller := ccclient.NewPoller(logger, pollerHttpClient, time.Duration(uploaderConfig.CCJobPollingInterval))

	ccUploaderHandler, err := handlers.New(uploader, poller, logger)
	if err != nil {
		logger.Error("router-building-failed", err)
		os.Exit(1)
	}

	return http_server.New(uploaderConfig.ListenAddress, ccUploaderHandler)
}
Exemple #13
0
func initializeDropsonde(logger lager.Logger, uploaderConfig config.UploaderConfig) {
	dropsondeDestination := fmt.Sprint("localhost:", uploaderConfig.DropsondePort)
	err := dropsonde.Initialize(dropsondeDestination, dropsondeOrigin)
	if err != nil {
		logger.Error("failed to initialize dropsonde: %v", err)
	}
}
func (p *ExternalImageManager) Metrics(log lager.Logger, _, rootfs string) (garden.ContainerDiskStat, error) {
	log = log.Session("image-plugin-metrics", lager.Data{"rootfs": rootfs})
	log.Debug("start")
	defer log.Debug("end")

	imagePath := filepath.Dir(rootfs)
	cmd := exec.Command(p.binPath, "stats", imagePath)
	cmd.Stderr = lagregator.NewRelogger(log)
	outBuffer := bytes.NewBuffer([]byte{})
	cmd.Stdout = outBuffer

	if err := p.commandRunner.Run(cmd); err != nil {
		logData := lager.Data{"action": "stats", "stderr": outBuffer.String()}
		log.Error("external-image-manager-result", err, logData)
		return garden.ContainerDiskStat{}, fmt.Errorf("external image manager metrics failed: %s (%s)", outBuffer.String(), err)
	}

	var metrics map[string]map[string]uint64
	if err := json.NewDecoder(outBuffer).Decode(&metrics); err != nil {
		return garden.ContainerDiskStat{}, fmt.Errorf("parsing metrics: %s", err)
	}

	return garden.ContainerDiskStat{
		TotalBytesUsed:     metrics["disk_usage"]["total_bytes_used"],
		ExclusiveBytesUsed: metrics["disk_usage"]["exclusive_bytes_used"],
	}, nil
}
Exemple #15
0
func (db *SQLDB) failExpiredPendingTasks(logger lager.Logger, expirePendingTaskDuration time.Duration) int64 {
	logger = logger.Session("fail-expired-pending-tasks")

	now := db.clock.Now()

	result, err := db.update(logger, db.db, tasksTable,
		SQLAttributes{
			"failed":             true,
			"failure_reason":     "not started within time limit",
			"result":             "",
			"state":              models.Task_Completed,
			"first_completed_at": now.UnixNano(),
			"updated_at":         now.UnixNano(),
		},
		"state = ? AND created_at < ?", models.Task_Pending, now.Add(-expirePendingTaskDuration).UnixNano())
	if err != nil {
		logger.Error("failed-query", err)
		return 0
	}

	rowsAffected, err := result.RowsAffected()
	if err != nil {
		logger.Error("failed-rows-affected", err)
		return 0
	}
	return rowsAffected
}
Exemple #16
0
func (db *ETCDDB) UnclaimActualLRP(logger lager.Logger, key *models.ActualLRPKey) (*models.ActualLRPGroup, *models.ActualLRPGroup, error) {
	actualLRP, modifiedIndex, err := db.rawActualLRPByProcessGuidAndIndex(logger, key.ProcessGuid, key.Index)
	bbsErr := models.ConvertError(err)
	if bbsErr != nil {
		return nil, nil, bbsErr
	}
	beforeActualLRP := *actualLRP

	if actualLRP.State == models.ActualLRPStateUnclaimed {
		logger.Debug("already-unclaimed")
		return nil, nil, models.ErrActualLRPCannotBeUnclaimed
	}

	actualLRP.State = models.ActualLRPStateUnclaimed
	actualLRP.ActualLRPKey = *key
	actualLRP.ActualLRPInstanceKey = models.ActualLRPInstanceKey{}
	actualLRP.ActualLRPNetInfo = models.EmptyActualLRPNetInfo()
	actualLRP.Since = db.clock.Now().UnixNano()
	actualLRP.ModificationTag.Increment()

	data, err := db.serializeModel(logger, actualLRP)
	if err != nil {
		return nil, nil, err
	}

	_, err = db.client.CompareAndSwap(ActualLRPSchemaPath(key.ProcessGuid, key.Index), data, 0, modifiedIndex)
	if err != nil {
		logger.Error("failed-compare-and-swap", err)
		return nil, nil, ErrorFromEtcdError(logger, err)
	}

	return &models.ActualLRPGroup{Instance: &beforeActualLRP}, &models.ActualLRPGroup{Instance: actualLRP}, nil
}
Exemple #17
0
func (db *ETCDDB) batchDeleteTasks(taskGuids []string, logger lager.Logger) {
	if len(taskGuids) == 0 {
		return
	}

	works := []func(){}

	for _, taskGuid := range taskGuids {
		taskGuid := taskGuid
		works = append(works, func() {
			_, err := db.client.Delete(taskGuid, true)
			if err != nil {
				logger.Error("failed-to-delete", err, lager.Data{
					"task_guid": taskGuid,
				})
			}
		})
	}

	throttler, err := workpool.NewThrottler(db.convergenceWorkersSize, works)
	if err != nil {
		logger.Error("failed-to-create-throttler", err)
	}

	throttler.Work()
	return
}
Exemple #18
0
func (db *ETCDDB) FailActualLRP(logger lager.Logger, key *models.ActualLRPKey, errorMessage string) (*models.ActualLRPGroup, *models.ActualLRPGroup, error) {
	logger = logger.WithData(lager.Data{"actual_lrp_key": key, "error_message": errorMessage})
	logger.Info("starting")
	lrp, prevIndex, err := db.rawActualLRPByProcessGuidAndIndex(logger, key.ProcessGuid, key.Index)
	if err != nil {
		logger.Error("failed-to-get-actual-lrp", err)
		return nil, nil, err
	}
	beforeActualLRP := *lrp

	if lrp.State != models.ActualLRPStateUnclaimed {
		return nil, nil, models.ErrActualLRPCannotBeFailed
	}

	lrp.ModificationTag.Increment()
	lrp.PlacementError = errorMessage
	lrp.Since = db.clock.Now().UnixNano()

	lrpData, serialErr := db.serializeModel(logger, lrp)
	if serialErr != nil {
		return nil, nil, serialErr
	}

	_, err = db.client.CompareAndSwap(ActualLRPSchemaPath(key.ProcessGuid, key.Index), lrpData, 0, prevIndex)
	if err != nil {
		logger.Error("failed", err)
		return nil, nil, models.ErrActualLRPCannotBeFailed
	}

	logger.Info("succeeded")
	return &models.ActualLRPGroup{Instance: &beforeActualLRP}, &models.ActualLRPGroup{Instance: lrp}, nil
}
Exemple #19
0
func (db *ETCDDB) batchDeleteNodes(keys []string, logger lager.Logger) {
	if len(keys) == 0 {
		return
	}

	works := []func(){}

	for _, key := range keys {
		key := key
		works = append(works, func() {
			logger.Info("deleting", lager.Data{"key": key})
			_, err := db.client.Delete(key, true)
			if err != nil {
				logger.Error("failed-to-delete", err, lager.Data{
					"key": key,
				})
			}
		})
	}

	throttler, err := workpool.NewThrottler(db.convergenceWorkersSize, works)
	if err != nil {
		logger.Error("failed-to-create-throttler", err)
	}

	throttler.Work()
	return
}
Exemple #20
0
func (db *ETCDDB) rawActualLRPGroupByProcessGuidAndIndex(logger lager.Logger, processGuid string, index int32) (*models.ActualLRPGroup, error) {
	node, err := db.fetchRecursiveRaw(logger, ActualLRPIndexDir(processGuid, index))
	if err != nil {
		return nil, err
	}

	group := models.ActualLRPGroup{}
	for _, instanceNode := range node.Nodes {
		var lrp models.ActualLRP
		deserializeErr := db.deserializeModel(logger, instanceNode, &lrp)
		if deserializeErr != nil {
			logger.Error("failed-parsing-actual-lrp", deserializeErr, lager.Data{"key": instanceNode.Key})
			return nil, deserializeErr
		}

		if isInstanceActualLRPNode(instanceNode) {
			group.Instance = &lrp
		}

		if isEvacuatingActualLRPNode(instanceNode) {
			group.Evacuating = &lrp
		}
	}

	if group.Evacuating == nil && group.Instance == nil {
		return nil, models.ErrResourceNotFound
	}

	return &group, nil
}
Exemple #21
0
func initializeDropsonde(logger lager.Logger) {
	dropsondeDestination := fmt.Sprint("localhost:", *dropsondePort)
	err := dropsonde.Initialize(dropsondeDestination, dropsondeOrigin)
	if err != nil {
		logger.Error("failed-to-initialize-dropsonde", err)
	}
}
Exemple #22
0
func (db *ETCDDB) createActualLRP(logger lager.Logger, desiredLRP *models.DesiredLRP, index int32) error {
	logger = logger.Session("create-actual-lrp")
	var err error
	if index >= desiredLRP.Instances {
		err = models.NewError(models.Error_InvalidRecord, "Index too large")
		logger.Error("actual-lrp-index-too-large", err, lager.Data{"actual_index": index, "desired_instances": desiredLRP.Instances})
		return err
	}

	guid, err := uuid.NewV4()
	if err != nil {
		return err
	}

	actualLRP := &models.ActualLRP{
		ActualLRPKey: models.NewActualLRPKey(
			desiredLRP.ProcessGuid,
			index,
			desiredLRP.Domain,
		),
		State: models.ActualLRPStateUnclaimed,
		Since: db.clock.Now().UnixNano(),
		ModificationTag: models.ModificationTag{
			Epoch: guid.String(),
			Index: 0,
		},
	}

	err = db.createRawActualLRP(logger, actualLRP)
	if err != nil {
		return err
	}

	return nil
}
func (p *externalBinaryNetworker) exec(log lager.Logger, action, handle string,
	inputData interface{}, outputData interface{}) error {

	stdinBytes, err := json.Marshal(inputData)
	if err != nil {
		return err
	}

	args := append(p.extraArg, "--action", action, "--handle", handle)
	cmd := exec.Command(p.path, args...)
	stdout := &bytes.Buffer{}
	cmd.Stdout = stdout
	stderr := &bytes.Buffer{}
	cmd.Stderr = stderr
	cmd.Stdin = bytes.NewReader(stdinBytes)

	err = p.commandRunner.Run(cmd)

	logData := lager.Data{"action": action, "stdin": string(stdinBytes), "stderr": stderr.String(), "stdout": stdout.String()}
	if err != nil {
		log.Error("external-networker-result", err, logData)
		return fmt.Errorf("external networker %s: %s", action, err)
	}

	if outputData != nil {
		err = json.Unmarshal(stdout.Bytes(), outputData)
		if err != nil {
			log.Error("external-networker-result", err, logData)
			return fmt.Errorf("unmarshaling result from external networker: %s", err)
		}
	}

	log.Debug("external-networker-result", logData)
	return nil
}
Exemple #24
0
func (db *SQLDB) fetchActualLRPForUpdate(logger lager.Logger, processGuid string, index int32, evacuating bool, tx *sql.Tx) (*models.ActualLRP, error) {
	expireTime := db.clock.Now().Round(time.Second).UnixNano()
	wheres := "process_guid = ? AND instance_index = ? AND evacuating = ?"
	bindings := []interface{}{processGuid, index, evacuating}
	if evacuating {
		wheres += " AND expire_time > ?"
		bindings = append(bindings, expireTime)
	}

	rows, err := db.all(logger, tx, actualLRPsTable,
		actualLRPColumns, LockRow, wheres, bindings...)
	if err != nil {
		logger.Error("failed-query", err)
		return nil, db.convertSQLError(err)
	}
	groups, err := db.scanAndCleanupActualLRPs(logger, tx, rows)
	if err != nil {
		return nil, db.convertSQLError(err)
	}

	if len(groups) == 0 {
		return nil, models.ErrResourceNotFound
	}

	actualLRP, _ := groups[0].Resolve()

	return actualLRP, nil
}
Exemple #25
0
func (db *SQLDB) FailTask(logger lager.Logger, taskGuid, failureReason string) (*models.Task, error) {
	logger = logger.Session("fail-task", lager.Data{"task_guid": taskGuid})
	logger.Info("starting")
	defer logger.Info("complete")

	var task *models.Task

	err := db.transact(logger, func(logger lager.Logger, tx *sql.Tx) error {
		var err error
		task, err = db.fetchTaskForUpdate(logger, taskGuid, tx)
		if err != nil {
			logger.Error("failed-locking-task", err)
			return err
		}

		if err = task.ValidateTransitionTo(models.Task_Completed); err != nil {
			if task.State != models.Task_Pending {
				logger.Error("failed-to-transition-task-to-completed", err)
				return err
			}
		}

		return db.completeTask(logger, task, true, failureReason, "", tx)
	})

	return task, err
}
Exemple #26
0
// The stager calls this when it wants to claim a completed task.  This ensures that only one
// stager ever attempts to handle a completed task
func (db *ETCDDB) ResolvingTask(logger lager.Logger, taskGuid string) error {
	logger = logger.WithData(lager.Data{"task_guid": taskGuid})

	logger.Info("starting")
	defer logger.Info("finished")

	task, index, err := db.taskByGuidWithIndex(logger, taskGuid)
	if err != nil {
		logger.Error("failed-getting-task", err)
		return err
	}

	err = task.ValidateTransitionTo(models.Task_Resolving)
	if err != nil {
		logger.Error("invalid-state-transition", err)
		return err
	}

	task.UpdatedAt = db.clock.Now().UnixNano()
	task.State = models.Task_Resolving

	value, err := db.serializeModel(logger, task)
	if err != nil {
		return err
	}

	_, err = db.client.CompareAndSwap(TaskSchemaPathByGuid(taskGuid), value, NO_TTL, index)
	if err != nil {
		return ErrorFromEtcdError(logger, err)
	}
	return nil
}
func (h *TaskHandler) commonTasks(logger lager.Logger, w http.ResponseWriter, req *http.Request, version format.Version) {
	var err error
	logger = logger.Session("tasks", lager.Data{"revision": 0})

	request := &models.TasksRequest{}
	response := &models.TasksResponse{}

	defer func() { exitIfUnrecoverable(logger, h.exitChan, response.Error) }()
	defer writeResponse(w, response)

	err = parseRequest(logger, req, request)
	if err != nil {
		logger.Error("failed-parsing-request", err)
		response.Error = models.ConvertError(err)
		return
	}

	filter := models.TaskFilter{Domain: request.Domain, CellID: request.CellId}
	response.Tasks, err = h.controller.Tasks(logger, filter.Domain, filter.CellID)
	if err != nil {
		response.Error = models.ConvertError(err)
		return
	}

	for i := range response.Tasks {
		task := response.Tasks[i]
		if task.TaskDefinition == nil {
			continue
		}
		response.Tasks[i] = task.VersionDownTo(version)
	}
}
func (client *localClient) Unmount(logger lager.Logger, driverId string, volumeName string) error {
	logger = logger.Session("unmount")
	logger.Info("start")
	defer logger.Info("end")
	logger.Debug("unmounting-volume", lager.Data{"volumeName": volumeName})

	unmountStart := client.clock.Now()

	defer func() {
		sendUnmountDurationMetrics(logger, time.Since(unmountStart), driverId)
	}()

	driver, found := client.driverRegistry.Driver(driverId)
	if !found {
		err := errors.New("Driver '" + driverId + "' not found in list of known drivers")
		logger.Error("mount-driver-lookup-error", err)
		volmanUnmountErrorsCounter.Increment()
		return err
	}

	env := driverhttp.NewHttpDriverEnv(logger, context.TODO())

	if response := driver.Unmount(env, voldriver.UnmountRequest{Name: volumeName}); response.Err != "" {
		err := errors.New(response.Err)
		logger.Error("unmount-failed", err)
		volmanUnmountErrorsCounter.Increment()
		return err
	}

	return nil
}
Exemple #29
0
func HandleCompletedTask(logger lager.Logger, httpClient *http.Client, taskDB db.TaskDB, task *models.Task) {
	logger = logger.Session("handle-completed-task", lager.Data{"task_guid": task.TaskGuid})

	if task.CompletionCallbackUrl != "" {
		modelErr := taskDB.ResolvingTask(logger, task.TaskGuid)
		if modelErr != nil {
			logger.Error("marking-task-as-resolving-failed", modelErr)
			return
		}

		logger = logger.WithData(lager.Data{"callback_url": task.CompletionCallbackUrl})

		json, err := json.Marshal(&models.TaskCallbackResponse{
			TaskGuid:      task.TaskGuid,
			Failed:        task.Failed,
			FailureReason: task.FailureReason,
			Result:        task.Result,
			Annotation:    task.Annotation,
			CreatedAt:     task.CreatedAt,
		})
		if err != nil {
			logger.Error("marshalling-task-failed", err)
			return
		}

		var statusCode int

		for i := 0; i < MAX_CB_RETRIES; i++ {
			request, err := http.NewRequest("POST", task.CompletionCallbackUrl, bytes.NewReader(json))
			if err != nil {
				logger.Error("building-request-failed", err)
				return
			}

			request.Header.Set("Content-Type", "application/json")
			response, err := httpClient.Do(request)
			if err != nil {
				matched, _ := regexp.MatchString("Client.Timeout|use of closed network connection", err.Error())
				if matched {
					continue
				}
				logger.Error("doing-request-failed", err)
				return
			}
			defer response.Body.Close()

			statusCode = response.StatusCode
			if shouldResolve(statusCode) {
				modelErr := taskDB.DeleteTask(logger, task.TaskGuid)
				if modelErr != nil {
					logger.Error("delete-task-failed", modelErr)
				}
				return
			}
		}

		logger.Info("callback-failed", lager.Data{"status_code": statusCode})
	}
	return
}
Exemple #30
0
// RemoveDesiredLRP deletes the DesiredLRPSchedulingInfo and the DesiredLRPRunInfo
// from the database. We delete DesiredLRPSchedulingInfo first because the system
// uses it to determine wheter the lrp is present. In the event that only the
// RunInfo fails to delete, the orphaned DesiredLRPRunInfo will be garbage
// collected later by convergence.
func (db *ETCDDB) RemoveDesiredLRP(logger lager.Logger, processGuid string) error {
	logger = logger.WithData(lager.Data{"process_guid": processGuid})
	logger.Info("starting")
	defer logger.Info("complete")

	_, schedulingInfoErr := db.client.Delete(DesiredLRPSchedulingInfoSchemaPath(processGuid), true)
	schedulingInfoErr = ErrorFromEtcdError(logger, schedulingInfoErr)
	if schedulingInfoErr != nil && schedulingInfoErr != models.ErrResourceNotFound {
		logger.Error("failed-deleting-scheduling-info", schedulingInfoErr)
		return schedulingInfoErr
	}

	_, runInfoErr := db.client.Delete(DesiredLRPRunInfoSchemaPath(processGuid), true)
	runInfoErr = ErrorFromEtcdError(logger, runInfoErr)
	if runInfoErr != nil && runInfoErr != models.ErrResourceNotFound {
		logger.Error("failed-deleting-run-info", runInfoErr)
		return runInfoErr
	}

	if schedulingInfoErr == models.ErrResourceNotFound && runInfoErr == models.ErrResourceNotFound {
		// If neither component of the desired LRP exists, don't bother trying to delete running instances
		return models.ErrResourceNotFound
	}

	return nil
}