Beispiel #1
0
func bind(serviceBroker ServiceBroker, router httpRouter, logger lager.Logger) http.HandlerFunc {
	return func(w http.ResponseWriter, req *http.Request) {
		vars := router.Vars(req)
		instanceID := vars["instance_id"]
		bindingID := vars["binding_id"]

		logger := logger.Session(bindLogKey, lager.Data{
			instanceIDLogKey: instanceID,
			bindingIDLogKey:  bindingID,
		})

		var details BindDetails
		if err := json.NewDecoder(req.Body).Decode(&details); err != nil {
			logger.Error(invalidBindDetailsErrorKey, err)
			respond(w, http.StatusBadRequest, ErrorResponse{
				Description: err.Error(),
			})
			return
		}

		logger = logger.WithData(lager.Data{
			bindDetailsLogKey: details,
		})

		bindingResponse, err := serviceBroker.Bind(instanceID, bindingID, details)
		if err != nil {
			switch err {
			case ErrInstanceDoesNotExist:
				logger.Error(instanceMissingErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			case ErrBindingAlreadyExists:
				logger.Error(bindingAlreadyExistsErrorKey, err)
				respond(w, http.StatusConflict, ErrorResponse{
					Description: err.Error(),
				})
			case ErrAppGUIDRequired:
				logger.Error(bindingAppGUIDRequiredErrorKey, err)
				respond(w, statusUnprocessableEntity, ErrorResponse{
					Error:       "RequiresApp",
					Description: err.Error(),
				})
			case ErrInstanceNotBindable:
				logger.Error(instanceNotBindableErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			default:
				logger.Error(unknownErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			}
			return
		}

		respond(w, http.StatusCreated, bindingResponse)
	}
}
Beispiel #2
0
func HandleCompletedTask(logger lager.Logger, httpClient *http.Client, taskDB db.TaskDB, task *models.Task) {
	logger = logger.Session("handle-completed-task", lager.Data{"task_guid": task.TaskGuid})

	if task.CompletionCallbackUrl != "" {
		modelErr := taskDB.ResolvingTask(logger, task.TaskGuid)
		if modelErr != nil {
			logger.Error("marking-task-as-resolving-failed", modelErr)
			return
		}

		logger = logger.WithData(lager.Data{"callback_url": task.CompletionCallbackUrl})

		json, err := json.Marshal(&models.TaskCallbackResponse{
			TaskGuid:      task.TaskGuid,
			Failed:        task.Failed,
			FailureReason: task.FailureReason,
			Result:        task.Result,
			Annotation:    task.Annotation,
			CreatedAt:     task.CreatedAt,
		})
		if err != nil {
			logger.Error("marshalling-task-failed", err)
			return
		}

		var statusCode int

		for i := 0; i < MAX_CB_RETRIES; i++ {
			request, err := http.NewRequest("POST", task.CompletionCallbackUrl, bytes.NewReader(json))
			if err != nil {
				logger.Error("building-request-failed", err)
				return
			}

			request.Header.Set("Content-Type", "application/json")
			response, err := httpClient.Do(request)
			if err != nil {
				matched, _ := regexp.MatchString("Client.Timeout|use of closed network connection", err.Error())
				if matched {
					continue
				}
				logger.Error("doing-request-failed", err)
				return
			}
			defer response.Body.Close()

			statusCode = response.StatusCode
			if shouldResolve(statusCode) {
				modelErr := taskDB.DeleteTask(logger, task.TaskGuid)
				if modelErr != nil {
					logger.Error("delete-task-failed", modelErr)
				}
				return
			}
		}

		logger.Info("callback-failed", lager.Data{"status_code": statusCode})
	}
	return
}
Beispiel #3
0
func provision(serviceBroker ServiceBroker, router httpRouter, logger lager.Logger) http.HandlerFunc {
	return func(w http.ResponseWriter, req *http.Request) {
		vars := router.Vars(req)
		instanceID := vars["instance_id"]
		acceptsIncomplete := false
		if req.URL.Query().Get("accepts_incomplete") == "true" {
			acceptsIncomplete = true
		}

		logger := logger.Session(provisionLogKey, lager.Data{
			instanceIDLogKey: instanceID,
		})

		var details ProvisionDetails
		if err := json.NewDecoder(req.Body).Decode(&details); err != nil {
			logger.Error(invalidProvisionDetailsErrorKey, err)
			respond(w, http.StatusBadRequest, ErrorResponse{
				Description: err.Error(),
			})
			return
		}

		logger = logger.WithData(lager.Data{
			provisionDetailsLogKey: details,
		})

		provisioningResponse, asynch, err := serviceBroker.Provision(instanceID, details, acceptsIncomplete)
		if err != nil {
			switch err {
			case ErrInstanceAlreadyExists:
				logger.Error(instanceAlreadyExistsErrorKey, err)
				respond(w, http.StatusConflict, EmptyResponse{})
			case ErrInstanceLimitMet:
				logger.Error(instanceLimitReachedErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			case ErrAsyncRequired:
				logger.Error(instanceAsyncRequiredErrorKey, err)
				respond(w, statusUnprocessableEntity, ErrorResponse{
					Error:       "AsyncRequired",
					Description: err.Error(),
				})
			default:
				logger.Error(unknownErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			}
			return
		}

		if asynch {
			respond(w, http.StatusAccepted, provisioningResponse)
			return
		}

		respond(w, http.StatusCreated, provisioningResponse)
	}
}
Beispiel #4
0
func (s *Scheduler) TriggerImmediately(logger lager.Logger, job atc.JobConfig, resources atc.ResourceConfigs, resourceTypes atc.ResourceTypes) (db.Build, Waiter, error) {
	logger = logger.Session("trigger-immediately", lager.Data{
		"job": job.Name,
	})

	build, err := s.PipelineDB.CreateJobBuild(job.Name)
	if err != nil {
		logger.Error("failed-to-create-build", err)
		return db.Build{}, nil, err
	}

	logger = logger.WithData(lager.Data{"build-id": build.ID, "build-name": build.Name})

	jobService, err := NewJobService(job, s.PipelineDB, s.Scanner)
	if err != nil {
		return db.Build{}, nil, err
	}

	wg := new(sync.WaitGroup)
	wg.Add(1)

	// do not block request on scanning input versions
	go func() {
		defer wg.Done()
		s.ScheduleAndResumePendingBuild(logger, nil, build, job, resources, resourceTypes, jobService)
	}()

	return build, wg, nil
}
Beispiel #5
0
func (db *ETCDDB) FailActualLRP(logger lager.Logger, key *models.ActualLRPKey, errorMessage string) (*models.ActualLRPGroup, *models.ActualLRPGroup, error) {
	logger = logger.WithData(lager.Data{"actual_lrp_key": key, "error_message": errorMessage})
	logger.Info("starting")
	lrp, prevIndex, err := db.rawActualLRPByProcessGuidAndIndex(logger, key.ProcessGuid, key.Index)
	if err != nil {
		logger.Error("failed-to-get-actual-lrp", err)
		return nil, nil, err
	}
	beforeActualLRP := *lrp

	if lrp.State != models.ActualLRPStateUnclaimed {
		return nil, nil, models.ErrActualLRPCannotBeFailed
	}

	lrp.ModificationTag.Increment()
	lrp.PlacementError = errorMessage
	lrp.Since = db.clock.Now().UnixNano()

	lrpData, serialErr := db.serializeModel(logger, lrp)
	if serialErr != nil {
		return nil, nil, serialErr
	}

	_, err = db.client.CompareAndSwap(ActualLRPSchemaPath(key.ProcessGuid, key.Index), lrpData, 0, prevIndex)
	if err != nil {
		logger.Error("failed", err)
		return nil, nil, models.ErrActualLRPCannotBeFailed
	}

	logger.Info("succeeded")
	return &models.ActualLRPGroup{Instance: &beforeActualLRP}, &models.ActualLRPGroup{Instance: lrp}, nil
}
Beispiel #6
0
// RemoveDesiredLRP deletes the DesiredLRPSchedulingInfo and the DesiredLRPRunInfo
// from the database. We delete DesiredLRPSchedulingInfo first because the system
// uses it to determine wheter the lrp is present. In the event that only the
// RunInfo fails to delete, the orphaned DesiredLRPRunInfo will be garbage
// collected later by convergence.
func (db *ETCDDB) RemoveDesiredLRP(logger lager.Logger, processGuid string) error {
	logger = logger.WithData(lager.Data{"process_guid": processGuid})
	logger.Info("starting")
	defer logger.Info("complete")

	_, schedulingInfoErr := db.client.Delete(DesiredLRPSchedulingInfoSchemaPath(processGuid), true)
	schedulingInfoErr = ErrorFromEtcdError(logger, schedulingInfoErr)
	if schedulingInfoErr != nil && schedulingInfoErr != models.ErrResourceNotFound {
		logger.Error("failed-deleting-scheduling-info", schedulingInfoErr)
		return schedulingInfoErr
	}

	_, runInfoErr := db.client.Delete(DesiredLRPRunInfoSchemaPath(processGuid), true)
	runInfoErr = ErrorFromEtcdError(logger, runInfoErr)
	if runInfoErr != nil && runInfoErr != models.ErrResourceNotFound {
		logger.Error("failed-deleting-run-info", runInfoErr)
		return runInfoErr
	}

	if schedulingInfoErr == models.ErrResourceNotFound && runInfoErr == models.ErrResourceNotFound {
		// If neither component of the desired LRP exists, don't bother trying to delete running instances
		return models.ErrResourceNotFound
	}

	return nil
}
Beispiel #7
0
func (s *Scheduler) TryNextPendingBuild(logger lager.Logger, versions *algorithm.VersionsDB, job atc.JobConfig, resources atc.ResourceConfigs, resourceTypes atc.ResourceTypes) Waiter {
	logger = logger.Session("try-next-pending")

	wg := new(sync.WaitGroup)

	wg.Add(1)
	go func() {
		defer wg.Done()

		build, found, err := s.PipelineDB.GetNextPendingBuild(job.Name)
		if err != nil {
			logger.Error("failed-to-get-next-pending-build", err)
			return
		}

		if !found {
			return
		}

		logger = logger.WithData(lager.Data{"build-id": build.ID, "build-name": build.Name})

		jobService, err := NewJobService(job, s.PipelineDB, s.Scanner)
		if err != nil {
			logger.Error("failed-to-get-job-service", err)
			return
		}

		s.ScheduleAndResumePendingBuild(logger, versions, build, job, resources, resourceTypes, jobService)
	}()

	return wg
}
Beispiel #8
0
func (db *SQLDB) DesireLRP(logger lager.Logger, desiredLRP *models.DesiredLRP) error {
	logger = logger.WithData(lager.Data{"process_guid": desiredLRP.ProcessGuid})
	logger.Info("starting")
	defer logger.Info("complete")

	return db.transact(logger, func(logger lager.Logger, tx *sql.Tx) error {
		routesData, err := json.Marshal(desiredLRP.Routes)
		runInfo := desiredLRP.DesiredLRPRunInfo(db.clock.Now())

		runInfoData, err := db.serializeModel(logger, &runInfo)
		if err != nil {
			logger.Error("failed-to-serialize-model", err)
			return err
		}

		volumePlacement := &models.VolumePlacement{}
		volumePlacement.DriverNames = []string{}
		for _, mount := range desiredLRP.VolumeMounts {
			volumePlacement.DriverNames = append(volumePlacement.DriverNames, mount.Driver)
		}

		volumePlacementData, err := db.serializeModel(logger, volumePlacement)
		if err != nil {
			logger.Error("failed-to-serialize-model", err)
			return err
		}

		guid, err := db.guidProvider.NextGUID()
		if err != nil {
			logger.Error("failed-to-generate-guid", err)
			return models.ErrGUIDGeneration
		}

		desiredLRP.ModificationTag = &models.ModificationTag{Epoch: guid, Index: 0}

		_, err = db.insert(logger, tx, desiredLRPsTable,
			SQLAttributes{
				"process_guid":           desiredLRP.ProcessGuid,
				"domain":                 desiredLRP.Domain,
				"log_guid":               desiredLRP.LogGuid,
				"annotation":             desiredLRP.Annotation,
				"instances":              desiredLRP.Instances,
				"memory_mb":              desiredLRP.MemoryMb,
				"disk_mb":                desiredLRP.DiskMb,
				"rootfs":                 desiredLRP.RootFs,
				"volume_placement":       volumePlacementData,
				"modification_tag_epoch": desiredLRP.ModificationTag.Epoch,
				"modification_tag_index": desiredLRP.ModificationTag.Index,
				"routes":                 routesData,
				"run_info":               runInfoData,
			},
		)
		if err != nil {
			logger.Error("failed-inserting-desired", err)
			return db.convertSQLError(err)
		}
		return nil
	})
}
Beispiel #9
0
func Explain(logger lager.Logger, conn Conn, timeout time.Duration) Conn {
	return &explainConn{
		Conn: conn,

		timeout: timeout,
		logger: logger.WithData(lager.Data{
			"timeout": timeout,
		}),
	}
}
Beispiel #10
0
func (db *ETCDDB) StartActualLRP(logger lager.Logger, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, netInfo *models.ActualLRPNetInfo) (*models.ActualLRPGroup, *models.ActualLRPGroup, error) {
	logger = logger.WithData(lager.Data{
		"actual_lrp_key":          key,
		"actual_lrp_instance_key": instanceKey,
		"net_info":                netInfo,
	})

	logger.Info("starting")
	defer logger.Info("completed")

	lrp, prevIndex, err := db.rawActualLRPByProcessGuidAndIndex(logger, key.ProcessGuid, key.Index)
	bbsErr := models.ConvertError(err)
	if bbsErr != nil {
		if bbsErr.Type == models.Error_ResourceNotFound {
			lrp, err := db.createRunningActualLRP(logger, key, instanceKey, netInfo)
			return nil, &models.ActualLRPGroup{Instance: lrp}, err
		}
		logger.Error("failed-to-get-actual-lrp", err)
		return nil, nil, err
	}
	beforeActualLRP := *lrp

	if lrp.ActualLRPKey.Equal(key) &&
		lrp.ActualLRPInstanceKey.Equal(instanceKey) &&
		lrp.ActualLRPNetInfo.Equal(netInfo) &&
		lrp.State == models.ActualLRPStateRunning {
		lrpGroup := &models.ActualLRPGroup{Instance: lrp}
		return lrpGroup, lrpGroup, nil
	}

	if !lrp.AllowsTransitionTo(key, instanceKey, models.ActualLRPStateRunning) {
		logger.Error("failed-to-transition-actual-lrp-to-started", nil)
		return nil, nil, models.ErrActualLRPCannotBeStarted
	}

	lrp.ModificationTag.Increment()
	lrp.State = models.ActualLRPStateRunning
	lrp.Since = db.clock.Now().UnixNano()
	lrp.ActualLRPInstanceKey = *instanceKey
	lrp.ActualLRPNetInfo = *netInfo
	lrp.PlacementError = ""

	lrpData, serializeErr := db.serializeModel(logger, lrp)
	if serializeErr != nil {
		return nil, nil, serializeErr
	}

	_, err = db.client.CompareAndSwap(ActualLRPSchemaPath(key.ProcessGuid, key.Index), lrpData, 0, prevIndex)
	if err != nil {
		logger.Error("failed", err)
		return nil, nil, models.ErrActualLRPCannotBeStarted
	}

	return &models.ActualLRPGroup{Instance: &beforeActualLRP}, &models.ActualLRPGroup{Instance: lrp}, nil
}
Beispiel #11
0
func (db *ETCDDB) DesiredLRPs(logger lager.Logger, filter models.DesiredLRPFilter) ([]*models.DesiredLRP, error) {
	logger = logger.WithData(lager.Data{"filter": filter})
	logger.Info("start")
	defer logger.Info("complete")

	desireds, _, err := db.desiredLRPs(logger, filter)
	if err != nil {
		logger.Error("failed", err)
	}
	return desireds, err
}
Beispiel #12
0
func (db *ETCDDB) CrashActualLRP(logger lager.Logger, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, errorMessage string) (*models.ActualLRPGroup, *models.ActualLRPGroup, bool, error) {
	logger = logger.WithData(lager.Data{"actual_lrp_key": key, "actual_lrp_instance_key": instanceKey})
	logger.Info("starting")

	lrp, prevIndex, err := db.rawActualLRPByProcessGuidAndIndex(logger, key.ProcessGuid, key.Index)
	if err != nil {
		logger.Error("failed-to-get-actual-lrp", err)
		return nil, nil, false, err
	}
	beforeActualLRP := *lrp

	latestChangeTime := time.Duration(db.clock.Now().UnixNano() - lrp.Since)

	var newCrashCount int32
	if latestChangeTime > models.CrashResetTimeout && lrp.State == models.ActualLRPStateRunning {
		newCrashCount = 1
	} else {
		newCrashCount = lrp.CrashCount + 1
	}

	logger.Debug("retrieved-lrp")
	if !lrp.AllowsTransitionTo(key, instanceKey, models.ActualLRPStateCrashed) {
		logger.Error("failed-to-transition-to-crashed", nil, lager.Data{"from_state": lrp.State, "same_instance_key": lrp.ActualLRPInstanceKey.Equal(instanceKey)})
		return nil, nil, false, models.ErrActualLRPCannotBeCrashed
	}

	lrp.State = models.ActualLRPStateCrashed
	lrp.Since = db.clock.Now().UnixNano()
	lrp.CrashCount = newCrashCount
	lrp.ActualLRPInstanceKey = models.ActualLRPInstanceKey{}
	lrp.ActualLRPNetInfo = models.EmptyActualLRPNetInfo()
	lrp.ModificationTag.Increment()
	lrp.CrashReason = errorMessage

	var immediateRestart bool
	if lrp.ShouldRestartImmediately(models.NewDefaultRestartCalculator()) {
		lrp.State = models.ActualLRPStateUnclaimed
		immediateRestart = true
	}

	lrpData, serializeErr := db.serializeModel(logger, lrp)
	if serializeErr != nil {
		return nil, nil, false, serializeErr
	}

	_, err = db.client.CompareAndSwap(ActualLRPSchemaPath(key.ProcessGuid, key.Index), lrpData, 0, prevIndex)
	if err != nil {
		logger.Error("failed", err)
		return nil, nil, false, models.ErrActualLRPCannotBeCrashed
	}

	logger.Info("succeeded")
	return &models.ActualLRPGroup{Instance: &beforeActualLRP}, &models.ActualLRPGroup{Instance: lrp}, immediateRestart, nil
}
Beispiel #13
0
func (db *SQLDB) DesiredLRPByProcessGuid(logger lager.Logger, processGuid string) (*models.DesiredLRP, error) {
	logger = logger.WithData(lager.Data{"process_guid": processGuid})
	logger.Debug("starting")
	defer logger.Debug("complete")

	row := db.one(logger, db.db, desiredLRPsTable,
		desiredLRPColumns, NoLockRow,
		"process_guid = ?", processGuid,
	)
	return db.fetchDesiredLRP(logger, row)
}
Beispiel #14
0
func deprovision(serviceBroker ServiceBroker, router httpRouter, logger lager.Logger) http.HandlerFunc {
	return func(w http.ResponseWriter, req *http.Request) {
		vars := router.Vars(req)
		instanceID := vars["instance_id"]
		acceptsIncomplete := false
		if req.URL.Query().Get("accepts_incomplete") == "true" {
			acceptsIncomplete = true
		}

		logger := logger.Session(deprovisionLogKey, lager.Data{
			instanceIDLogKey: instanceID,
		})

		details := DeprovisionDetails{
			ServiceID: req.FormValue("service_id"),
			PlanID:    req.FormValue("plan_id"),
		}

		logger = logger.WithData(lager.Data{
			deprovisionDetailsLogKey: details,
		})

		asynch, err := serviceBroker.Deprovision(instanceID, details, acceptsIncomplete)
		if err != nil {
			switch err {
			case ErrInstanceDoesNotExist:
				logger.Error(instanceMissingErrorKey, err)
				respond(w, http.StatusGone, EmptyResponse{})
			case ErrAsyncRequired:
				logger.Error(instanceAsyncRequiredErrorKey, err)
				respond(w, statusUnprocessableEntity, ErrorResponse{
					Error:       "AsyncRequired",
					Description: err.Error(),
				})
			default:
				logger.Error(unknownErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			}
			return
		}

		if asynch {
			respond(w, http.StatusAccepted, EmptyResponse{})
			return
		}

		respond(w, http.StatusOK, EmptyResponse{})
	}
}
Beispiel #15
0
func (db *ETCDDB) RemoveActualLRP(logger lager.Logger, processGuid string, index int32, instanceKey *models.ActualLRPInstanceKey) error {
	logger = logger.WithData(lager.Data{"process_guid": processGuid, "index": index, "instance_key": instanceKey})

	lrp, prevIndex, err := db.rawActualLRPByProcessGuidAndIndex(logger, processGuid, index)
	if err != nil {
		return err
	}

	if instanceKey != nil && !lrp.ActualLRPInstanceKey.Equal(instanceKey) {
		logger.Debug("not-found", lager.Data{"actual_lrp_instance_key": lrp.ActualLRPInstanceKey, "instance_key": instanceKey})
		return models.ErrResourceNotFound
	}

	return db.removeActualLRP(logger, lrp, prevIndex)
}
Beispiel #16
0
func (db *SQLDB) UpdateDesiredLRP(logger lager.Logger, processGuid string, update *models.DesiredLRPUpdate) (*models.DesiredLRP, error) {
	logger = logger.WithData(lager.Data{"process_guid": processGuid})
	logger.Info("starting")
	defer logger.Info("complete")

	var beforeDesiredLRP *models.DesiredLRP
	err := db.transact(logger, func(logger lager.Logger, tx *sql.Tx) error {
		var err error
		row := db.one(logger, tx, desiredLRPsTable,
			desiredLRPColumns, LockRow,
			"process_guid = ?", processGuid,
		)
		beforeDesiredLRP, err = db.fetchDesiredLRP(logger, row)
		if err != nil {
			logger.Error("failed-lock-desired", err)
			return err
		}

		updateAttributes := SQLAttributes{"modification_tag_index": beforeDesiredLRP.ModificationTag.Index + 1}

		if update.Annotation != nil {
			updateAttributes["annotation"] = *update.Annotation
		}

		if update.Instances != nil {
			updateAttributes["instances"] = *update.Instances
		}

		if update.Routes != nil {
			routeData, err := json.Marshal(update.Routes)
			if err != nil {
				logger.Error("failed-marshalling-routes", err)
				return models.ErrBadRequest
			}
			updateAttributes["routes"] = routeData
		}

		_, err = db.update(logger, tx, desiredLRPsTable, updateAttributes, `process_guid = ?`, processGuid)
		if err != nil {
			logger.Error("failed-executing-query", err)
			return db.convertSQLError(err)
		}

		return nil
	})

	return beforeDesiredLRP, err
}
Beispiel #17
0
func (db *ETCDDB) ClaimActualLRP(logger lager.Logger, processGuid string, index int32, instanceKey *models.ActualLRPInstanceKey) (*models.ActualLRPGroup, *models.ActualLRPGroup, error) {
	logger = logger.WithData(lager.Data{"process_guid": processGuid, "index": index, "actual_lrp_instance_key": instanceKey})
	logger.Info("starting")

	lrp, prevIndex, err := db.rawActualLRPByProcessGuidAndIndex(logger, processGuid, index)
	if err != nil {
		logger.Error("failed", err)
		return nil, nil, err
	}
	beforeActualLRP := *lrp

	if !lrp.AllowsTransitionTo(&lrp.ActualLRPKey, instanceKey, models.ActualLRPStateClaimed) {
		return nil, nil, models.ErrActualLRPCannotBeClaimed
	}

	if lrp.State == models.ActualLRPStateClaimed && lrp.ActualLRPInstanceKey.Equal(instanceKey) {
		return &models.ActualLRPGroup{Instance: &beforeActualLRP}, &models.ActualLRPGroup{Instance: lrp}, nil
	}

	lrp.PlacementError = ""
	lrp.State = models.ActualLRPStateClaimed
	lrp.ActualLRPInstanceKey = *instanceKey
	lrp.ActualLRPNetInfo = models.ActualLRPNetInfo{}
	lrp.ModificationTag.Increment()
	lrp.Since = db.clock.Now().UnixNano()

	err = lrp.Validate()
	if err != nil {
		logger.Error("failed", err)
		return nil, nil, models.NewError(models.Error_InvalidRecord, err.Error())
	}

	lrpData, serializeErr := db.serializeModel(logger, lrp)
	if serializeErr != nil {
		return nil, nil, serializeErr
	}

	_, err = db.client.CompareAndSwap(ActualLRPSchemaPath(processGuid, index), lrpData, 0, prevIndex)
	if err != nil {
		logger.Error("compare-and-swap-failed", err)
		return nil, nil, models.ErrActualLRPCannotBeClaimed
	}
	logger.Info("succeeded")

	return &models.ActualLRPGroup{Instance: &beforeActualLRP}, &models.ActualLRPGroup{Instance: lrp}, nil
}
Beispiel #18
0
func provision(serviceBroker ServiceBroker, router httpRouter, logger lager.Logger) http.HandlerFunc {
	return func(w http.ResponseWriter, req *http.Request) {
		vars := router.Vars(req)
		instanceID := vars["instance_id"]

		logger := logger.Session(provisionLogKey, lager.Data{
			instanceIDLogKey: instanceID,
		})

		var serviceDetails ServiceDetails
		if err := json.NewDecoder(req.Body).Decode(&serviceDetails); err != nil {
			logger.Error(invalidServiceDetailsErrorKey, err)
			respond(w, statusUnprocessableEntity, ErrorResponse{
				Description: err.Error(),
			})
			return
		}

		logger = logger.WithData(lager.Data{
			instanceDetailsLogKey: serviceDetails,
		})

		if err := serviceBroker.Provision(instanceID, serviceDetails); err != nil {
			switch err {
			case ErrInstanceAlreadyExists:
				logger.Error(instanceAlreadyExistsErrorKey, err)
				respond(w, http.StatusConflict, EmptyResponse{})
			case ErrInstanceLimitMet:
				logger.Error(instanceLimitReachedErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			default:
				logger.Error(unknownErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			}
			return
		}

		respond(w, http.StatusCreated, ProvisioningResponse{})
	}
}
Beispiel #19
0
func unbind(serviceBroker ServiceBroker, router httpRouter, logger lager.Logger) http.HandlerFunc {
	return func(w http.ResponseWriter, req *http.Request) {
		vars := router.Vars(req)
		instanceID := vars["instance_id"]
		bindingID := vars["binding_id"]

		logger := logger.Session(unbindLogKey, lager.Data{
			instanceIDLogKey: instanceID,
			bindingIDLogKey:  bindingID,
		})

		details := UnbindDetails{
			ServiceID: req.FormValue("service_id"),
			PlanID:    req.FormValue("plan_id"),
		}

		logger = logger.WithData(lager.Data{
			unbindDetailsLogKey: details,
		})

		if err := serviceBroker.Unbind(instanceID, bindingID, details); err != nil {
			switch err {
			case ErrInstanceDoesNotExist:
				logger.Error(instanceMissingErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			case ErrBindingDoesNotExist:
				logger.Error(bindingMissingErrorKey, err)
				respond(w, http.StatusGone, EmptyResponse{})
			default:
				logger.Error(unknownErrorKey, err)
				respond(w, http.StatusInternalServerError, ErrorResponse{
					Description: err.Error(),
				})
			}
			return
		}

		respond(w, http.StatusOK, EmptyResponse{})
	}
}
Beispiel #20
0
func (db *SQLDB) RemoveDesiredLRP(logger lager.Logger, processGuid string) error {
	logger = logger.WithData(lager.Data{"process_guid": processGuid})
	logger.Info("starting")
	defer logger.Info("complete")

	return db.transact(logger, func(logger lager.Logger, tx *sql.Tx) error {
		err := db.lockDesiredLRPByGuidForUpdate(logger, processGuid, tx)
		if err != nil {
			logger.Error("failed-lock-desired", err)
			return err
		}

		_, err = db.delete(logger, tx, desiredLRPsTable, "process_guid = ?", processGuid)
		if err != nil {
			logger.Error("failed-deleting-from-db", err)
			return db.convertSQLError(err)
		}

		return nil
	})
}
Beispiel #21
0
func (db *SQLDB) DesiredLRPs(logger lager.Logger, filter models.DesiredLRPFilter) ([]*models.DesiredLRP, error) {
	logger = logger.WithData(lager.Data{"filter": filter})
	logger.Debug("start")
	defer logger.Debug("complete")

	var wheres []string
	var values []interface{}

	if filter.Domain != "" {
		wheres = append(wheres, "domain = ?")
		values = append(values, filter.Domain)
	}

	rows, err := db.all(logger, db.db, desiredLRPsTable,
		desiredLRPColumns, NoLockRow,
		strings.Join(wheres, " AND "), values...,
	)
	if err != nil {
		logger.Error("failed-query", err)
		return nil, db.convertSQLError(err)
	}
	defer rows.Close()

	results := []*models.DesiredLRP{}
	for rows.Next() {
		desiredLRP, err := db.fetchDesiredLRP(logger, rows)
		if err != nil {
			logger.Error("failed-reading-row", err)
			continue
		}
		results = append(results, desiredLRP)
	}

	if rows.Err() != nil {
		logger.Error("failed-fetching-row", rows.Err())
		return nil, db.convertSQLError(rows.Err())
	}

	return results, nil
}
Beispiel #22
0
func (db *ETCDDB) DesiredLRPSchedulingInfos(logger lager.Logger, filter models.DesiredLRPFilter) ([]*models.DesiredLRPSchedulingInfo, error) {
	logger = logger.WithData(lager.Data{"filter": filter})
	logger.Info("start")
	defer logger.Info("complete")

	root, err := db.fetchRecursiveRaw(logger, DesiredLRPSchedulingInfoSchemaRoot)
	bbsErr := models.ConvertError(err)
	if bbsErr != nil {
		if bbsErr.Type == models.Error_ResourceNotFound {
			return []*models.DesiredLRPSchedulingInfo{}, nil
		}
		return nil, err
	}

	schedulingInfoMap, _ := db.deserializeScheduleInfos(logger, root.Nodes, filter)

	schedulingInfos := make([]*models.DesiredLRPSchedulingInfo, 0, len(schedulingInfoMap))
	for _, schedulingInfo := range schedulingInfoMap {
		schedulingInfos = append(schedulingInfos, schedulingInfo)
	}
	return schedulingInfos, nil
}
Beispiel #23
0
func (watcher *Watcher) handleEvent(logger lager.Logger, event models.Event) {
	if changed, ok := event.(*models.ActualLRPChangedEvent); ok {
		after, _ := changed.After.Resolve()

		if after.Domain == cc_messages.AppLRPDomain {
			before, _ := changed.Before.Resolve()

			if after.CrashCount > before.CrashCount {
				logger.Info("app-crashed", lager.Data{
					"process-guid": after.ProcessGuid,
					"index":        after.Index,
				})

				guid := after.ProcessGuid
				appCrashed := cc_messages.AppCrashedRequest{
					Instance:        before.InstanceGuid,
					Index:           int(after.Index),
					Reason:          "CRASHED",
					ExitDescription: after.CrashReason,
					CrashCount:      int(after.CrashCount),
					CrashTimestamp:  after.Since,
				}

				watcher.pool.Submit(func() {
					logger := logger.WithData(lager.Data{
						"process-guid": guid,
						"index":        appCrashed.Index,
					})
					logger.Info("recording-app-crashed")
					err := watcher.ccClient.AppCrashed(guid, appCrashed, logger)
					if err != nil {
						logger.Error("failed-recording-app-crashed", err)
					}
				})
			}
		}
	}
}
Beispiel #24
0
func (p *ordinaryLRPProcessor) Process(logger lager.Logger, container executor.Container) {
	logger = logger.Session("ordinary-lrp-processor", lager.Data{
		"container-guid":  container.Guid,
		"container-state": container.State,
	})
	logger.Debug("starting")
	defer logger.Debug("finished")

	lrpKey, err := rep.ActualLRPKeyFromTags(container.Tags)
	if err != nil {
		logger.Error("failed-to-generate-lrp-key", err)
		return
	}
	logger = logger.WithData(lager.Data{"lrp-key": lrpKey})

	instanceKey, err := rep.ActualLRPInstanceKeyFromContainer(container, p.cellID)
	if err != nil {
		logger.Error("failed-to-generate-instance-key", err)
		return
	}
	logger = logger.WithData(lager.Data{"lrp-instance-key": instanceKey})

	lrpContainer := newLRPContainer(lrpKey, instanceKey, container)
	switch lrpContainer.Container.State {
	case executor.StateReserved:
		p.processReservedContainer(logger, lrpContainer)
	case executor.StateInitializing:
		p.processInitializingContainer(logger, lrpContainer)
	case executor.StateCreated:
		p.processCreatedContainer(logger, lrpContainer)
	case executor.StateRunning:
		p.processRunningContainer(logger, lrpContainer)
	case executor.StateCompleted:
		p.processCompletedContainer(logger, lrpContainer)
	default:
		p.processInvalidContainer(logger, lrpContainer)
	}
}
Beispiel #25
0
// DesireLRP creates a DesiredLRPSchedulingInfo and a DesiredLRPRunInfo. In order
// to ensure that the complete model is available and there are no races in
// Desired Watches, DesiredLRPRunInfo is created before DesiredLRPSchedulingInfo.
func (db *ETCDDB) DesireLRP(logger lager.Logger, desiredLRP *models.DesiredLRP) error {
	logger = logger.WithData(lager.Data{"process_guid": desiredLRP.ProcessGuid})
	logger.Info("starting")
	defer logger.Info("complete")

	schedulingInfo, runInfo := desiredLRP.CreateComponents(db.clock.Now())

	err := db.createDesiredLRPRunInfo(logger, &runInfo)
	if err != nil {
		return err
	}

	schedulingErr := db.createDesiredLRPSchedulingInfo(logger, &schedulingInfo)
	if schedulingErr != nil {
		logger.Info("deleting-orphaned-run-info")
		_, err = db.client.Delete(DesiredLRPRunInfoSchemaPath(desiredLRP.ProcessGuid), true)
		if err != nil {
			logger.Error("failed-deleting-orphaned-run-info", err)
		}
		return schedulingErr
	}

	return nil
}
Beispiel #26
0
func (s *Scheduler) ScheduleAndResumePendingBuild(
	logger lager.Logger,
	versions *algorithm.VersionsDB,
	build db.Build,
	job atc.JobConfig,
	resources atc.ResourceConfigs,
	resourceTypes atc.ResourceTypes,
	jobService JobService,
) engine.Build {
	lease, acquired, err := s.BuildsDB.LeaseBuildScheduling(build.ID, 10*time.Second)
	if err != nil {
		logger.Error("failed-to-get-lease", err)
		return nil
	}

	if !acquired {
		return nil
	}

	defer lease.Break()

	logger = logger.WithData(lager.Data{"build": build.ID})

	buildPrep, found, err := s.BuildsDB.GetBuildPreparation(build.ID)
	if err != nil {
		logger.Error("failed-to-get-build-prep", err, lager.Data{"build-id": build.ID})
		return nil
	}

	if !found {
		logger.Debug("failed-to-find-build-prep", lager.Data{"build-id": build.ID})
		return nil
	}

	inputs, canBuildBeScheduled, reason, err := jobService.CanBuildBeScheduled(logger, build, buildPrep, versions)
	if err != nil {
		logger.Error("failed-to-schedule-build", err, lager.Data{
			"build-id": build.ID,
			"reason":   reason,
		})

		if reason == "failed-to-scan" {
			err = s.BuildsDB.ErrorBuild(build.ID, err)
			if err != nil {
				logger.Error("failed-to-mark-build-as-errored", err)
			}
		}
		return nil
	}

	if !s.updateBuildToScheduled(logger, canBuildBeScheduled, build.ID, reason) {
		return nil
	}

	plan, err := s.Factory.Create(job, resources, resourceTypes, inputs)
	if err != nil {
		// Don't use ErrorBuild because it logs a build event, and this build hasn't started
		err := s.BuildsDB.FinishBuild(build.ID, db.StatusErrored)
		if err != nil {
			logger.Error("failed-to-mark-build-as-errored", err)
		}
		return nil
	}

	createdBuild, err := s.Engine.CreateBuild(logger, build, plan)
	if err != nil {
		logger.Error("failed-to-create-build", err)
		return nil
	}

	if createdBuild != nil {
		logger.Info("building")
		go createdBuild.Resume(logger)
	}

	return createdBuild
}
					Ω(testSink.Logs()[1].Data["session"]).Should(Equal("2"))
					Ω(testSink.Logs()[2].Data["session"]).Should(Equal("2"))
					Ω(testSink.Logs()[3].Data["session"]).Should(Equal("2"))
				})

				It("logs with the task added to the message", func() {
					Ω(testSink.Logs()[0].Message).Should(Equal("my-component.next-sub-action.some-debug-action"))
					Ω(testSink.Logs()[1].Message).Should(Equal("my-component.next-sub-action.some-info-action"))
					Ω(testSink.Logs()[2].Message).Should(Equal("my-component.next-sub-action.some-error-action"))
					Ω(testSink.Logs()[3].Message).Should(Equal("my-component.next-sub-action.some-fatal-action"))
				})
			})

			Describe("WithData", func() {
				BeforeEach(func() {
					session = logger.WithData(lager.Data{"foo": "bar"})
				})

				It("returns a new logger with the given data", func() {
					Ω(testSink.Logs()[0].Data["foo"]).Should(Equal("bar"))
					Ω(testSink.Logs()[1].Data["foo"]).Should(Equal("bar"))
					Ω(testSink.Logs()[2].Data["foo"]).Should(Equal("bar"))
					Ω(testSink.Logs()[3].Data["foo"]).Should(Equal("bar"))
				})

				It("does not append to the logger's task", func() {
					Ω(testSink.Logs()[0].Message).Should(Equal("my-component.some-debug-action"))
				})
			})

			Context("with a nested session", func() {
Beispiel #28
0
func (s *Scheduler) BuildLatestInputs(logger lager.Logger, versions *algorithm.VersionsDB, job atc.JobConfig, resources atc.ResourceConfigs, resourceTypes atc.ResourceTypes) error {
	logger = logger.Session("build-latest")

	inputs := config.JobInputs(job)

	if len(inputs) == 0 {
		// no inputs; no-op
		return nil
	}

	latestInputs, found, err := s.PipelineDB.GetNextInputVersions(versions, job.Name, inputs)
	if err != nil {
		logger.Error("failed-to-get-latest-input-versions", err)
		return err
	}

	if !found {
		logger.Debug("no-input-versions-available")
		return nil
	}

	checkInputs := []db.BuildInput{}
	for _, input := range latestInputs {
		for _, ji := range inputs {
			if ji.Name == input.Name {
				if ji.Trigger {
					checkInputs = append(checkInputs, input)
				}

				break
			}
		}
	}

	if len(checkInputs) == 0 {
		logger.Debug("no-triggered-input-versions")
		return nil
	}

	existingBuild, found, err := s.PipelineDB.GetJobBuildForInputs(job.Name, checkInputs)
	if err != nil {
		logger.Error("could-not-determine-if-inputs-are-already-used", err)
		return err
	}

	if found {
		logger.Debug("build-already-exists-for-inputs", lager.Data{
			"existing-build": existingBuild.ID,
		})

		return nil
	}

	build, created, err := s.PipelineDB.CreateJobBuildForCandidateInputs(job.Name)
	if err != nil {
		logger.Error("failed-to-create-build", err)
		return err
	}

	if !created {
		logger.Debug("waiting-for-existing-build-to-determine-inputs", lager.Data{
			"existing-build": build.ID,
		})
		return nil
	}

	logger = logger.WithData(lager.Data{"build-id": build.ID, "build-name": build.Name})

	logger.Info("created-build")

	jobService, err := NewJobService(job, s.PipelineDB, s.Scanner)
	if err != nil {
		logger.Error("failed-to-get-job-service", err)
		return nil
	}

	// NOTE: this is intentionally serial within a scheduler tick, so that
	// multiple ATCs don't do redundant work to determine a build's inputs.

	s.ScheduleAndResumePendingBuild(logger, versions, build, job, resources, resourceTypes, jobService)

	return nil
}
Beispiel #29
0
func (s *Scheduler) scheduleAndResumePendingBuild(logger lager.Logger, build db.Build, job atc.JobConfig, resources atc.ResourceConfigs) engine.Build {
	logger = logger.WithData(lager.Data{"build": build.ID})

	scheduled, err := s.PipelineDB.ScheduleBuild(build.ID, job)
	if err != nil {
		logger.Error("failed-to-schedule-build", err)
		return nil
	}

	if !scheduled {
		logger.Debug("build-could-not-be-scheduled")
		return nil
	}

	buildInputs := job.Inputs()

	for _, input := range buildInputs {
		scanLog := logger.Session("scan", lager.Data{
			"input":    input.Name,
			"resource": input.Resource,
		})

		err := s.Scanner.Scan(scanLog, input.Resource)
		if err != nil {
			scanLog.Error("failed-to-scan", err)

			err := s.BuildsDB.ErrorBuild(build.ID, err)
			if err != nil {
				logger.Error("failed-to-mark-build-as-errored", err)
			}

			return nil
		}

		scanLog.Info("done")
	}

	inputs, err := s.PipelineDB.GetLatestInputVersions(job.Name, buildInputs)
	if err != nil {
		logger.Error("failed-to-get-latest-input-versions", err)
		return nil
	}

	err = s.PipelineDB.UseInputsForBuild(build.ID, inputs)
	if err != nil {
		logger.Error("failed-to-use-inputs-for-build", err)
		return nil
	}

	plan, err := s.Factory.Create(job, resources, inputs)
	if err != nil {
		logger.Error("failed-to-create-build-plan", err)
		return nil
	}

	createdBuild, err := s.Engine.CreateBuild(build, plan)
	if err != nil {
		logger.Error("failed-to-create-build", err)
		return nil
	}

	if createdBuild != nil {
		logger.Info("building")
		go createdBuild.Resume(logger)
	}

	return createdBuild
}
Beispiel #30
0
func (p V1Process) Deliver(job *gobble.Job, logger lager.Logger) error {
	var delivery Delivery
	err := job.Unmarshal(&delivery)
	if err != nil {
		metrics.NewMetric("counter", map[string]interface{}{
			"name": "notifications.worker.panic.json",
		}).Log()

		p.deliveryFailureHandler.Handle(job, logger)
		return nil
	}

	logger = logger.WithData(lager.Data{
		"message_id":      delivery.MessageID,
		"vcap_request_id": delivery.VCAPRequestID,
	})

	if p.dbTrace {
		p.database.TraceOn("", gorpCompatibleLogger{logger})
	}

	err = p.receiptsRepo.CreateReceipts(p.database.Connection(), []string{delivery.UserGUID}, delivery.ClientID, delivery.Options.KindID)
	if err != nil {
		p.deliveryFailureHandler.Handle(job, logger)
		return nil
	}

	if delivery.Email == "" {
		var token string

		token, err = p.tokenLoader.Load(p.uaaHost)
		if err != nil {
			p.deliveryFailureHandler.Handle(job, logger)
			return nil
		}

		users, err := p.userLoader.Load([]string{delivery.UserGUID}, token)
		if err != nil || len(users) < 1 {
			p.deliveryFailureHandler.Handle(job, logger)
			return nil
		}

		emails := users[delivery.UserGUID].Emails
		if len(emails) > 0 {
			delivery.Email = emails[0]
		}
	}

	logger = logger.WithData(lager.Data{
		"recipient": delivery.Email,
	})

	if p.shouldDeliver(delivery, logger) {
		status := p.deliver(delivery, logger)

		if status != StatusDelivered {
			p.deliveryFailureHandler.Handle(job, logger)
			return nil
		} else {
			metrics.NewMetric("counter", map[string]interface{}{
				"name": "notifications.worker.delivered",
			}).Log()
		}
	} else {
		metrics.NewMetric("counter", map[string]interface{}{
			"name": "notifications.worker.unsubscribed",
		}).Log()
	}

	return nil
}