コード例 #1
0
ファイル: skyring.go プロジェクト: skyrings/skyring
//Middleware to create the logging context
func (a *App) LoggingContext(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
	session, err := Store.Get(r, "session-key")
	if err != nil {
		logger.Get().Error("Error Getting the session. error: %v", err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}
	var username string
	if val, ok := session.Values["username"]; ok {
		username = val.(string)
	}

	reqId, err := uuid.New()
	if err != nil {
		logger.Get().Error("Error Creating the RequestId. error: %v", err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}

	loggingContext := fmt.Sprintf("%v:%v", username, reqId.String())

	context.Set(r, LoggingCtxt, loggingContext)

	defer context.Clear(r)
	next(w, r)
}
コード例 #2
0
ファイル: manager.go プロジェクト: skyrings/skyring-common
func (manager *Manager) Run(owner string, name string, f func(t *Task), startedFunc func(t *Task), completedFunc func(t *Task), statusFunc func(t *Task, s *models.Status)) (uuid.UUID, error) {
	if id, err := uuid.New(); err == nil {
		task := Task{
			Mutex:            &sync.Mutex{},
			ID:               *id,
			Owner:            owner,
			Name:             name,
			DoneCh:           make(chan bool, 1),
			StatusList:       []models.Status{},
			StopCh:           make(chan bool, 0),
			Func:             f,
			StartedCbkFunc:   startedFunc,
			CompletedCbkFunc: completedFunc,
			StatusCbkFunc:    statusFunc,
		}
		task.Run()
		manager.tasks[*id] = &task
		go func() {
			select {
			case <-task.DoneCh:
				return
			case <-task.StopCh:
				task.UpdateStatus("Force Stop. Task: %v explicitly stopped due to timeout.", task.ID)
				task.Done(models.TASK_STATUS_TIMED_OUT)
				task.StopCh <- true
				return
			}
		}()
		return *id, nil
	} else {
		return uuid.UUID{}, err
	}
}
コード例 #3
0
ファイル: object-utils.go プロジェクト: krishnasrinivas/minio
// mustGetUUID - get a random UUID.
func mustGetUUID() string {
	uuid, err := uuid.New()
	if err != nil {
		panic(fmt.Sprintf("Random UUID generation failed. Error: %s", err))
	}

	return uuid.String()
}
コード例 #4
0
func NewScheduler() (Scheduler, error) {
	id, err := uuid.New()
	if err != nil {
		return Scheduler{}, err
	}
	scheduler := Scheduler{Channel: make(chan string), Id: *id}
	schedules[*id] = scheduler
	return scheduler, nil
}
コード例 #5
0
ファイル: skyring.go プロジェクト: skyrings/skyring
func check_task_status(params map[string]interface{}) {
	reqId, err := uuid.New()
	if err != nil {
		logger.Get().Error("Error Creating the RequestId. error: %v", err)
		return
	}
	ctxt := fmt.Sprintf("%v:%v", models.ENGINE_NAME, reqId.String())
	var id uuid.UUID
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	var tasks []models.AppTask
	collection := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_TASKS)
	if err := collection.Find(bson.M{"completed": false, "parentid": id}).All(&tasks); err != nil {
		logger.Get().Warning("%s-%v", ctxt, err.Error())
		return
	}
	application := GetApp()
	for _, task := range tasks {
		if ok := check_TaskLastUpdate(task); ok {
			// Fetch the child tasks for this task id
			var subTasks []models.AppTask
			if err := collection.Find(bson.M{"completed": false, "parentid": task.Id}).All(&subTasks); err != nil && err != mgo.ErrNotFound {
				logger.Get().Warning("%s-%v", ctxt, err.Error())
				return
			}
			if len(subTasks) == 0 {
				//Stopping parent task
				stop_ParentTask(task, ctxt)
			} else {
				var result models.RpcResponse
				var stoppedSubTasksCount = 0
				for _, subTask := range subTasks {
					if ok := check_TaskLastUpdate(subTask); ok {
						provider := application.getProviderFromClusterType(subTask.Owner)
						vars := make(map[string]string)
						vars["task-id"] = subTask.Id.String()
						err := provider.Client.Call(fmt.Sprintf("%s.%s",
							provider.Name, "StopTask"),
							models.RpcRequest{RpcRequestVars: vars, RpcRequestData: []byte{}, RpcRequestContext: ctxt},
							&result)
						if err != nil || (result.Status.StatusCode != http.StatusOK) {
							logger.Get().Warning(fmt.Sprintf(":%s-Error stopping sub task: %v. error:%v", ctxt, subTask.Id, err))
							continue
						}
						stoppedSubTasksCount++
					}
				}
				if stoppedSubTasksCount == len(subTasks) {
					// Stopping parent task
					stop_ParentTask(task, ctxt)
				}
			}
		}
	}
}
コード例 #6
0
ファイル: auth.go プロジェクト: skyrings/skyring
func (a *App) login(rw http.ResponseWriter, req *http.Request) {
	reqId, err := uuid.New()
	if err != nil {
		logger.Get().Error("Error Creating the RequestId. error: %v", err)
		return
	}
	ctxt := fmt.Sprintf("%v:%v", models.ENGINE_NAME, reqId.String())

	body, err := ioutil.ReadAll(req.Body)
	if err != nil {
		logger.Get().Error("Error parsing http request body:%s", err)
		HandleHttpError(rw, err)
		return
	}
	var m map[string]interface{}
	if err = json.Unmarshal(body, &m); err != nil {
		logger.Get().Error("Unable to Unmarshall the data:%s", err)
		HandleHttpError(rw, err)
		return
	}
	if err := GetAuthProvider().Login(rw, req, m["username"].(string), m["password"].(string)); err != nil {
		logger.Get().Error("Unable to login User:%s", err)
		bytes, _ := json.Marshal(apiError{Error: err.Error()})
		if err := logAuditEvent(EventTypes["USER_LOGGED_IN"],
			fmt.Sprintf("Log in failed for user: %s", m["username"]),
			fmt.Sprintf("Log in failed for user: %s .Error: %s", m["username"], err),
			nil,
			nil,
			models.NOTIFICATION_ENTITY_USER,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log User event. Error: %v", ctxt, err)
		}
		rw.WriteHeader(http.StatusUnauthorized)
		rw.Write(bytes)
		return
	}
	message := fmt.Sprintf("User: %s logged in", m["username"])
	if err := logAuditEvent(
		EventTypes["USER_LOGGED_IN"],
		message,
		message,
		nil,
		nil,
		models.NOTIFICATION_ENTITY_USER,
		nil,
		false,
		ctxt); err != nil {
		logger.Get().Error("%s- Unable to log User event. Error: %v", ctxt, err)
	}

	bytes := []byte(`{"message": "Logged in"}`)
	rw.Write(bytes)
}
コード例 #7
0
ファイル: object-utils.go プロジェクト: hackintoshrao/minio
// getUUID() - get a unique uuid.
func getUUID() (uuidStr string) {
	for {
		uuid, err := uuid.New()
		if err != nil {
			errorIf(err, "Unable to initialize uuid")
			continue
		}
		uuidStr = uuid.String()
		break
	}
	return uuidStr
}
コード例 #8
0
ファイル: util.go プロジェクト: skyrings/skyring
func logAuditEvent(
	eventtype string,
	message string,
	description string,
	entityId *uuid.UUID,
	clusterId *uuid.UUID,
	notificationEntity models.NotificationEntity,
	taskId *uuid.UUID,
	deletionAudit bool,
	ctxt string) error {
	var event models.AppEvent
	eventId, err := uuid.New()
	if err != nil {
		logger.Get().Error("%s-Uuid generation for the event failed for event: %s. error: %v", ctxt, event.Name, err)
		return err
	}
	event.EventId = *eventId
	if entityId != nil {
		event.EntityId = *entityId
	}
	if clusterId != nil {
		event.ClusterId = *clusterId
	}
	event.NotificationEntity = notificationEntity
	event.Timestamp = time.Now()
	event.Notify = false
	event.Name = eventtype
	event.Message = message
	event.Description = description
	event.Severity = models.ALARM_STATUS_CLEARED
	if taskId != nil {
		event.Tags = map[string]string{
			"Task Id": (*taskId).String(),
		}
	}

	if err := common_event.AuditLog(ctxt, event, GetDbProvider()); err != nil {
		logger.Get().Error("%s- Error logging the event: %s. Error:%v", ctxt, event.Name, err)
		return err
	}

	if deletionAudit {
		if err := common_event.DismissAllEventsForEntity(event.EntityId, event, ctxt); err != nil {
			logger.Get().Error("%s-Error while dismissing events for entity: %v. Error: %v", ctxt, event.EntityId, err)
		}
	}

	return nil
}
コード例 #9
0
ファイル: skyring.go プロジェクト: skyrings/skyring
func (a *App) PostInitApplication(sysConfig conf.SkyringCollection) error {
	// Initialize the scheduler
	schedule.InitShechuleManager()

	// Create syncing schedule
	scheduler, err := schedule.NewScheduler()
	if err != nil {
		logger.Get().Error("Error scheduling clusters syncing")
	} else {
		if sysConfig.ScheduleConfig.ClustersSyncInterval == 0 {
			sysConfig.ScheduleConfig.ClustersSyncInterval = 86400 // 24hrs
		}
		go scheduler.Schedule(
			time.Duration(sysConfig.ScheduleConfig.ClustersSyncInterval)*time.Second,
			a.SyncClusterDetails,
			nil)
	}

	// Create monitoring schedule
	go InitMonitoringSchedules()

	// First time sync of the cluster details while startup
	go a.SyncClusterDetails(nil)

	reqId, err := uuid.New()
	if err != nil {
		logger.Get().Error("Error Creating the RequestId. error: %v", err)
		return fmt.Errorf("Error Creating the RequestId. error: %v", err)
	}

	ctxt := fmt.Sprintf("%v:%v", models.ENGINE_NAME, reqId.String())
	if err := schedulePhysicalResourceStatsFetch(sysConfig.SummaryConfig, map[string]interface{}{"ctxt": ctxt}); err != nil {
		logger.Get().Error("%s - Failed to schedule fetching node resource utilizations.Error %v", ctxt, err)
		return err
	}

	if err := scheduleSummaryMonitoring(sysConfig.SummaryConfig); err != nil {
		logger.Get().Error("%s - Failed to schedule fetching summary.Error %v", ctxt, err)
		return err
	}
	schedule_task_check(ctxt)
	node_Reinitialize()
	cleanupTasks()
	initializeAbout(ctxt)
	schedule_archive_activities(ctxt)
	FailStillCreatingClusters(ctxt)
	schedule_session_refresh(ctxt)
	return nil
}
コード例 #10
0
ファイル: format-config-v1.go プロジェクト: yrashk/minio
// initFormatXL - save XL format configuration on all disks.
func initFormatXL(storageDisks []StorageAPI) (err error) {
	var (
		jbod             = make([]string, len(storageDisks))
		formats          = make([]*formatConfigV1, len(storageDisks))
		saveFormatErrCnt = 0
	)
	for index, disk := range storageDisks {
		if err = disk.MakeVol(minioMetaBucket); err != nil {
			if err != errVolumeExists {
				saveFormatErrCnt++
				// Check for write quorum.
				if saveFormatErrCnt <= len(storageDisks)-(len(storageDisks)/2+3) {
					continue
				}
				return errXLWriteQuorum
			}
		}
		var u *uuid.UUID
		u, err = uuid.New()
		if err != nil {
			saveFormatErrCnt++
			// Check for write quorum.
			if saveFormatErrCnt <= len(storageDisks)-(len(storageDisks)/2+3) {
				continue
			}
			return err
		}
		formats[index] = &formatConfigV1{
			Version: "1",
			Format:  "xl",
			XL: &xlFormat{
				Version: "1",
				Disk:    u.String(),
			},
		}
		jbod[index] = formats[index].XL.Disk
	}
	for index, disk := range storageDisks {
		formats[index].XL.JBOD = jbod
		formatBytes, err := json.Marshal(formats[index])
		if err != nil {
			return err
		}
		if err = disk.AppendFile(minioMetaBucket, formatConfigFile, formatBytes); err != nil {
			return err
		}
	}
	return nil
}
コード例 #11
0
ファイル: monitoring.go プロジェクト: skyrings/skyring-common
func InitializeClusterSummary(cluster models.Cluster) {
	reqId, err := uuid.New()
	if err != nil {
		logger.Get().Error("Error Creating the RequestId. error: %v", err)
		return
	}

	ctxt := fmt.Sprintf("%v:%v", models.ENGINE_NAME, reqId.String())

	cSummary := models.ClusterSummary{}

	if mostUsedStorages, err := GetTopStorageUsage(bson.M{"clusterid": cluster.ClusterId}); err == nil {
		cSummary.MostUsedStorages = mostUsedStorages
	}

	sluStatusWiseCounts, err := ComputeSluStatusWiseCount(
		bson.M{"clusterid": cluster.ClusterId},
		bson.M{"utilizationtype": monitoring.SLU_UTILIZATION, "clusterid": cluster.ClusterId})
	if err == nil {
		cSummary.SLUCount = sluStatusWiseCounts
	}

	cSummary.Usage = cluster.Usage

	cSummary.ObjectCount = cluster.ObjectCount

	cSummary.Utilizations = cluster.Utilizations

	storageCount, err := GetStorageCount(bson.M{"clusterid": cluster.ClusterId})
	if err != nil {
		logger.Get().Error("%s - Failed to fetch storage status wise counts for cluster %v.Error %v", ctxt, cluster.Name, err)
	}
	cSummary.StorageCount = storageCount

	cSummary.ClusterId = cluster.ClusterId
	cSummary.Type = cluster.Type
	cSummary.Name = cluster.Name
	cSummary.MonitoringPlugins = cluster.Monitoring.Plugins
	cSummary.UpdatedAt = time.Now().String()

	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()

	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_CLUSTER_SUMMARY)
	if _, err := coll.Upsert(bson.M{"clusterid": cluster.ClusterId}, cSummary); err != nil {
		logger.Get().Error("%s - Error persisting the cluster summary.Error %v", ctxt, err)
	}
}
コード例 #12
0
ファイル: skyring.go プロジェクト: skyrings/skyring
func cleanupTasks() {
	reqId, err := uuid.New()
	if err != nil {
		logger.Get().Error("Error Creating the RequestId. error: %v", err)
		return
	}
	ctxt := fmt.Sprintf("%v:%v", models.ENGINE_NAME, reqId.String())
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	collection := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_TASKS)
	s := []models.Status{{time.Now(), "Force Stop. Task explicitly stopped due to timeout."}}
	if _, err := collection.UpdateAll(bson.M{"completed": false}, bson.M{"$set": bson.M{"completed": true,
		"status": models.TASK_STATUS_FAILURE, "statuslist": s}}); err != nil {
		logger.Get().Debug("%s-%v", ctxt, err.Error())
	}
}
コード例 #13
0
ファイル: monitoring.go プロジェクト: skyrings/skyring-common
func InitializeSystemSummary() {
	var system models.System

	reqId, err := uuid.New()
	if err != nil {
		logger.Get().Error("Error Creating the RequestId. error: %v", err)
		return
	}

	ctxt := fmt.Sprintf("%v:%v", models.ENGINE_NAME, reqId.String())

	system.Name = monitoring.SYSTEM

	_, clusterFetchError := GetClusters(nil)
	if clusterFetchError != nil {
		if clusterFetchError == mgo.ErrNotFound {
			return
		}
		logger.Get().Error("%s - Failed to fetch clusters.Err %v", ctxt, clusterFetchError)
	}

	sluStatusWiseCounts, err := ComputeSluStatusWiseCount(nil, bson.M{"utilizationtype": monitoring.SLU_UTILIZATION, "thresholdseverity": models.CRITICAL})
	system.SLUCount = sluStatusWiseCounts

	storageCount, err := GetStorageCount(nil)
	system.StorageCount = storageCount

	clustersCount, err := ComputeClustersStatusWiseCounts()
	system.ClustersCount = clustersCount
	systemthresholds := monitoring.GetSystemDefaultThresholdValues()

	net_storage_profile_utilization, err := ComputeStorageProfileUtilization(nil, systemthresholds[monitoring.STORAGE_PROFILE_UTILIZATION].Configs)
	system.StorageProfileUsage = net_storage_profile_utilization

	system.UpdatedAt = time.Now().String()

	mostUsedStorages, err := GetTopStorageUsage(nil)
	system.MostUsedStorages = mostUsedStorages

	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()

	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_SKYRING_UTILIZATION)
	if _, err := coll.Upsert(bson.M{"name": monitoring.SYSTEM}, system); err != nil {
		logger.Get().Error("%s - Error persisting the system.Error %v", ctxt, err)
	}
}
コード例 #14
0
ファイル: listener.go プロジェクト: skyrings/skyring
func (l *Listener) PushNodeStartEvent(args *NodeStartEventArgs, ack *bool) error {
	reqId, err := uuid.New()
	if err != nil {
		logger.Get().Error("Error Creating the RequestId. error: %v", err)
		return err
	}
	ctxt := fmt.Sprintf("%v:%v", models.ENGINE_NAME, reqId.String())
	timestamp := args.Timestamp
	node := strings.TrimSpace(args.Node)
	if node == "" || timestamp.IsZero() {
		*ack = false
		return nil
	}
	handle_node_start_event(node, ctxt)
	*ack = true
	return nil
}
コード例 #15
0
ファイル: salt_node_manager.go プロジェクト: skyrings/skyring
func (a SaltNodeManager) SyncStorageDisks(node string, sProfiles []models.StorageProfile, ctxt string) (bool, error) {
	disks, err := salt_backend.GetNodeDisk(node, ctxt)
	if err != nil {
		return false, err
	}
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES)
	var storage_node models.Node
	var updated_disks []models.Disk

	if err := coll.Find(bson.M{"hostname": node}).One(&storage_node); err != nil {
		logger.Get().Error("%s-Error updating the disk details for node: %s. error: %v", ctxt, node, err)
		return false, err
	}
	var present bool
	for _, disk := range disks {
		present = false
		for _, stored_disk := range storage_node.StorageDisks {
			if disk.DevName == stored_disk.DevName {
				present = true
				updated_disks = append(updated_disks, stored_disk)
				break
			}
		}
		if !present {
			dId, err := uuid.New()
			if err != nil {
				logger.Get().Error(fmt.Sprintf("%s-Unable to generate uuid for disk : %s. error: %v", ctxt, disk.DevName, err))
				return false, err
			}
			disk.DiskId = *dId
			applyStorageProfile(&disk, sProfiles)
			updated_disks = append(updated_disks, disk)
		}
	}
	if err := coll.Update(bson.M{"hostname": node}, bson.M{"$set": bson.M{"storagedisks": updated_disks}}); err != nil {
		logger.Get().Error("%s-Error updating the disk details for node: %s. error: %v", ctxt, node, err)
		return false, err
	}
	return true, nil
}
コード例 #16
0
ファイル: skyring.go プロジェクト: skyrings/skyring
func node_Reinitialize() {
	reqId, err := uuid.New()
	if err != nil {
		logger.Get().Error("Error Creating the RequestId. error: %v", err)
		return
	}
	ctxt := fmt.Sprintf("%v:%v", models.ENGINE_NAME, reqId.String())
	var nodes models.Nodes
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	collection := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES)
	if err := collection.Find(bson.M{"state": models.NODE_STATE_INITIALIZING}).All(&nodes); err != nil {
		logger.Get().Debug("%s-%v", ctxt, err.Error())
		return
	}
	for _, node := range nodes {
		go start_Reinitialize(node.Hostname, ctxt)

	}
}
コード例 #17
0
ファイル: storage.go プロジェクト: skyrings/bigfin
func createPool(ctxt string, clusterId uuid.UUID, request models.AddStorageRequest, t *task.Task) (*uuid.UUID, bool) {
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()

	t.UpdateStatus("Getting cluster details")
	// Get cluster details
	var cluster models.Cluster
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_CLUSTERS)
	if err := coll.Find(bson.M{"clusterid": clusterId}).One(&cluster); err != nil {
		utils.FailTask(fmt.Sprintf("Error getting the cluster details for :%v", clusterId), fmt.Errorf("%s - %v", ctxt, err), t)
		return nil, false
	}

	t.UpdateStatus("Getting a mon from cluster")
	monnode, err := GetCalamariMonNode(clusterId, ctxt)
	if err != nil {
		utils.FailTask(fmt.Sprintf("Error getting mon node details for cluster: %v", clusterId), fmt.Errorf("%s - %v", ctxt, err), t)
		return nil, false
	}

	t.UpdateStatus("Creating pool")
	// Get quota related details if quota enabled
	// If quota enabled, looks for quota config values
	var quotaMaxObjects int
	var quotaMaxBytes uint64
	if request.QuotaEnabled {
		var err error
		if request.QuotaParams["quota_max_objects"] != "" {
			if quotaMaxObjects, err = strconv.Atoi(request.QuotaParams["quota_max_objects"]); err != nil {
				utils.FailTask(fmt.Sprintf("Error parsing quota config value quota_max_objects for pool %s on cluster: %v", request.Name, clusterId), fmt.Errorf("%s - %v", ctxt, err), t)
				return nil, false
			}
		}
		if request.QuotaParams["quota_max_bytes"] != "" {
			if quotaMaxBytes, err = strconv.ParseUint(request.QuotaParams["quota_max_bytes"], 10, 64); err != nil {
				utils.FailTask(fmt.Sprintf("Error parsing quota config value quota_max_bytes for pool %s on cluster: %v", request.Name, clusterId), fmt.Errorf("%s - %v", ctxt, err), t)
				return nil, false
			}
		}
	}

	// Invoke backend api to create pool
	var pgNum uint
	if request.Options["pgnum"] == "" {
		utils.FailTask("", fmt.Errorf("%s - Pg num not provided", ctxt), t)
		return nil, false
	} else {
		val, _ := strconv.ParseUint(request.Options["pgnum"], 10, 32)
		pgNum = uint(val)
	}
	if request.Type == models.STORAGE_TYPE_ERASURE_CODED {
		ok, err := validECProfile(ctxt, monnode.Hostname, cluster, request.Options["ecprofile"])
		if err != nil {
			utils.FailTask("", fmt.Errorf("%s - Error checking validity of ec profile value. error: %v", ctxt, err), t)
			return nil, false
		}
		if !ok {
			utils.FailTask(
				"",
				fmt.Errorf(
					"%s-Invalid EC profile value: %s passed for pool: %s creation on cluster: %s. error: %v",
					ctxt,
					request.Options["ecprofile"],
					request.Name,
					cluster.Name,
					err),
				t)
			return nil, false
		}
	}
	rulesetmapval, ok := cluster.Options["rulesetmap"]
	if !ok {

		logger.Get().Error("Error getting the ruleset for cluster: %s", cluster.Name)
		utils.FailTask("", fmt.Errorf("%s - Error getting the ruleset for cluster: %s", ctxt, cluster.Name), t)
		return nil, false

	}
	rulesetmap := rulesetmapval.(map[string]interface{})
	rulesetval, ok := rulesetmap[request.Profile]
	if !ok {
		logger.Get().Error("Error getting the ruleset for cluster: %s", cluster.Name)
		return nil, false
	}
	ruleset := rulesetval.(map[string]interface{})

	if request.Type == models.STORAGE_TYPE_ERASURE_CODED {
		// cmd := fmt.Sprintf("ceph --cluster %s osd pool create %s %d %d erasure %s", cluster.Name, request.Name, uint(pgNum), uint(pgNum), request.Options["ecprofile"])
		// ok, _, err = cephapi_backend.ExecCmd(monnode.Hostname, clusterId, cmd, ctxt)
		// time.Sleep(10 * time.Second)
		ok, err = cephapi_backend.CreateECPool(
			request.Name,
			monnode.Hostname,
			cluster.Name,
			uint(pgNum),
			request.Replicas,
			quotaMaxObjects,
			quotaMaxBytes,
			request.Options["ecprofile"],
			ruleset,
			request.Profile,
			ctxt)
	} else {
		ok, err = cephapi_backend.CreatePool(
			request.Name,
			monnode.Hostname,
			cluster.Name,
			uint(pgNum),
			request.Replicas,
			quotaMaxObjects,
			quotaMaxBytes,
			ruleset["rulesetid"].(int),
			ctxt)
	}
	if err == cephapi.ErrTimedOut || err == nil {
		pools, err := cephapi_backend.GetPools(monnode.Hostname, clusterId, ctxt)
		if err != nil {
			utils.FailTask("Error getting created pools", fmt.Errorf("%s - %v", ctxt, err), t)
			return nil, false
		}
		storage_id, err := uuid.New()
		if err != nil {
			utils.FailTask("Error creating id for pool", fmt.Errorf("%s - %v", ctxt, err), t)
			return nil, false
		}
		for _, pool := range pools {
			if request.Name == pool.Name {
				t.UpdateStatus("Perisisting the storage entity")
				var storage models.Storage
				storage.StorageId = *storage_id
				storage.Name = request.Name
				storage.Type = request.Type
				storage.Tags = request.Tags
				storage.ClusterId = clusterId
				storage.Size = request.Size
				storage.Status = models.STORAGE_STATUS_OK
				storage.Replicas = request.Replicas
				storage.Profile = request.Profile
				storage.SnapshotsEnabled = request.SnapshotsEnabled
				// TODO: Populate the schedule ids once schedule created
				// storage.SnapshotScheduleIds = <created schedule ids>
				storage.QuotaEnabled = request.QuotaEnabled
				storage.QuotaParams = request.QuotaParams
				options := make(map[string]string)
				options["id"] = strconv.Itoa(pool.Id)
				options["pg_num"] = strconv.Itoa(pool.PgNum)
				options["pgp_num"] = strconv.Itoa(pool.PgpNum)
				options["full"] = strconv.FormatBool(pool.Full)
				options["hashpspool"] = strconv.FormatBool(pool.HashPsPool)
				options["min_size"] = strconv.FormatUint(pool.MinSize, 10)
				options["crash_replay_interval"] = strconv.Itoa(pool.CrashReplayInterval)
				options["crush_ruleset"] = strconv.Itoa(pool.CrushRuleSet)
				if request.Type == models.STORAGE_TYPE_ERASURE_CODED {
					options["ecprofile"] = request.Options["ecprofile"]
				}
				storage.Options = options

				coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE)
				if _, err := coll.Upsert(bson.M{"name": storage.Name, "clusterid": storage.ClusterId}, bson.M{"$set": storage}); err != nil {
					utils.FailTask(fmt.Sprintf("Error persisting pool %s for cluster: %s", request.Name, cluster.Name), fmt.Errorf("%s - %v", ctxt, err), t)
					return nil, false
				}
				break
			}
		}
		return storage_id, true
	} else {
		utils.FailTask(fmt.Sprintf("Create pool %s failed on cluster: %s", request.Name, cluster.Name), fmt.Errorf("%s - %v", ctxt, err), t)
		return nil, false
	}
}
コード例 #18
0
ファイル: salt_node_manager.go プロジェクト: skyrings/skyring
func GetStorageNodeInstance(hostname string, sProfiles []models.StorageProfile, ctxt string) (*models.Node, bool) {
	var storage_node models.Node
	storage_node.Hostname = hostname
	storage_node.Enabled = true
	storage_node.NodeId, _ = salt_backend.GetNodeID(hostname, ctxt)
	networkInfo, err := salt_backend.GetNodeNetwork(hostname, ctxt)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("%s-Error getting network details for node: %s. error: %v", ctxt, hostname, err))
		return nil, false
	}
	storage_node.NetworkInfo = networkInfo
	addrs, err := net.LookupHost(hostname)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("%s-Error looking up node IP for: %s. error: %v", ctxt, hostname, err))
		return nil, false
	}
	storage_node.ManagementIP4 = addrs[0]
	ok, err := salt_backend.NodeUp(hostname, ctxt)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("%s-Error getting status of node: %s. error: %v", ctxt, hostname, err))
		return nil, false
	}
	if ok {
		storage_node.Status = models.NODE_STATUS_OK
	} else {
		storage_node.Status = models.NODE_STATUS_ERROR
	}
	disks, err := salt_backend.GetNodeDisk(hostname, ctxt)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("%s-Error getting disk details for node: %s. error: %v", ctxt, hostname, err))
		return nil, false
	}
	for _, disk := range disks {
		dId, err := uuid.New()
		if err != nil {
			logger.Get().Error(fmt.Sprintf("%s-Unable to generate uuid for disk : %s. error: %v", ctxt, disk.DevName, err))
			return nil, false
		}
		disk.DiskId = *dId
		applyStorageProfile(&disk, sProfiles)
		storage_node.StorageDisks = append(storage_node.StorageDisks, disk)
	}

	cpus, err := salt_backend.GetNodeCpu(hostname, ctxt)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("%s-Error getting cpu details for node: %s. error: %v", ctxt, hostname, err))
		return nil, false
	}
	for _, cpu := range cpus {
		storage_node.CPUs = append(storage_node.CPUs, cpu)
	}

	osInfo, err := salt_backend.GetNodeOs(hostname, ctxt)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("%s-Error getting os details for node: %s", ctxt, hostname))
		return nil, false
	}
	storage_node.OS = osInfo

	memoryInfo, err := salt_backend.GetNodeMemory(hostname, ctxt)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("%s-Error getting memory details for node: %s", ctxt, hostname))
		return nil, false
	}
	storage_node.Memory = memoryInfo

	if !storage_node.NodeId.IsZero() && len(storage_node.NetworkInfo.Subnet) != 0 && len(storage_node.StorageDisks) != 0 {
		return &storage_node, true
	} else {
		return nil, false
	}
}
コード例 #19
0
ファイル: sync.go プロジェクト: skyrings/skyring
func (a *App) SyncClusterDetails(params map[string]interface{}) {
	reqId, err := uuid.New()
	if err != nil {
		logger.Get().Error("Error Creating the Request Id for context. error: %v", err)
	}
	ctxt := fmt.Sprintf("%v:%v", models.ENGINE_NAME, reqId.String())

	// Get the list of cluster
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_CLUSTERS)
	var clusters models.Clusters
	if err := coll.Find(nil).All(&clusters); err != nil {
		logger.Get().Error("%s-Error getting the clusters list. Unable to sync details. error: %v", ctxt, err)
		return
	}
	for _, cluster := range clusters {
		logger.Get().Info(fmt.Sprintf("Started syncing cluster: %s", cluster.Name))
		if cluster.State != models.CLUSTER_STATE_ACTIVE {
			logger.Get().Info("%s-Cluster %s is not in active state. Skipping sync.", ctxt, cluster.Name)
			continue
		}

		// Change the state of the cluster as syncing
		logger.Get().Debug("Setting the state of cluster: %s as syncing", cluster.Name)
		if err := coll.Update(
			bson.M{"clusterid": cluster.ClusterId},
			bson.M{"$set": bson.M{"state": models.CLUSTER_STATE_SYNCING}}); err != nil {
			logger.Get().Error("%s-Error marking the cluster %s as syncing. error: %s", ctxt, cluster.Name, err)
			continue
		}

		// Lock the cluster
		appLock, err := LockCluster(ctxt, cluster, "SyncClusterDetails")
		if err != nil {
			logger.Get().Error("Failed to acquire lock for cluster: %s. error: %v", cluster.Name, err)
			continue
		}
		defer a.GetLockManager().ReleaseLock(ctxt, *appLock)

		provider := a.GetProviderFromClusterId(ctxt, cluster.ClusterId)
		if provider == nil {
			logger.Get().Error("%s-Error getting provider for the cluster: %s", ctxt, cluster.Name)
			// Re-set the cluster state to active
			if err := coll.Update(
				bson.M{"clusterid": cluster.ClusterId},
				bson.M{"$set": bson.M{"state": models.CLUSTER_STATE_ACTIVE}}); err != nil {
				logger.Get().Debug("%s-Failed to set state set to active for cluster: %v", ctxt, cluster.ClusterId)
			}
			continue
		}

		// Sync the cluster status
		logger.Get().Debug("Syncing status of cluster: %s", cluster.Name)
		if ok, err := sync_cluster_status(ctxt, cluster, provider); err != nil || !ok {
			// Re-set the cluster state to active
			if err := coll.Update(
				bson.M{"clusterid": cluster.ClusterId},
				bson.M{"$set": bson.M{"state": models.CLUSTER_STATE_ACTIVE}}); err != nil {
				logger.Get().Debug("%s-Failed to set state set to active for cluster: %v", ctxt, cluster.ClusterId)
			}
			logger.Get().Error("%s-Error updating status for cluster: %s", ctxt, cluster.Name)
			continue
		}

		// Sync the cluster nodes
		logger.Get().Debug("Syncing nodes of cluster: %s", cluster.Name)
		if ok, err := sync_cluster_nodes(ctxt, cluster, provider); err != nil || !ok {
			// Re-set the cluster state to active
			if err := coll.Update(
				bson.M{"clusterid": cluster.ClusterId},
				bson.M{"$set": bson.M{"state": models.CLUSTER_STATE_ACTIVE}}); err != nil {
				logger.Get().Debug("%s-Failed to set state set to active for cluster: %v", ctxt, cluster.ClusterId)
			}
			logger.Get().Error("%s-Error syncing storage nodes for cluster: %s", ctxt, cluster.Name)
			continue
		}

		// Sync the cluster status
		logger.Get().Debug("Syncing SLUs of cluster: %s", cluster.Name)
		if ok, err := syncSlus(ctxt, cluster, provider); err != nil || !ok {
			// Re-set the cluster state to active
			if err := coll.Update(
				bson.M{"clusterid": cluster.ClusterId},
				bson.M{"$set": bson.M{"state": models.CLUSTER_STATE_ACTIVE}}); err != nil {
				logger.Get().Debug("%s-Failed to set state set to active for cluster: %v", ctxt, cluster.ClusterId)
			}
			logger.Get().Error("%s-Error syncing slus: %s", ctxt, cluster.Name)
			continue
		}

		// Sync the storage entities of the cluster
		logger.Get().Debug("Syncing storages of cluster: %s", cluster.Name)
		if ok, err := sync_cluster_storage_entities(ctxt, cluster, provider); err != nil || !ok {
			// Re-set the cluster state to active
			if err := coll.Update(
				bson.M{"clusterid": cluster.ClusterId},
				bson.M{"$set": bson.M{"state": models.CLUSTER_STATE_ACTIVE}}); err != nil {
				logger.Get().Debug("%s-Failed to set state set to active for cluster: %v", ctxt, cluster.ClusterId)
			}
			logger.Get().Error("%s-Error syncing storage entities for cluster: %s. error: %v", ctxt, cluster.Name, err)
			continue
		}
		// Sync block devices
		logger.Get().Debug("Syncing block devices of cluster: %s", cluster.Name)
		if ok, err := sync_block_devices(ctxt, cluster, provider); err != nil || !ok {
			// Re-set the cluster state to active
			if err := coll.Update(
				bson.M{"clusterid": cluster.ClusterId},
				bson.M{"$set": bson.M{"state": models.CLUSTER_STATE_ACTIVE}}); err != nil {
				logger.Get().Debug("%s-Failed to set state set to active for cluster: %v", ctxt, cluster.ClusterId)
			}
			logger.Get().Error("%s-Error syncing block devices for cluster: %s. error: %v", ctxt, cluster.Name, err)
			continue
		}

		logger.Get().Debug("Setting the cluster: %s back as active", cluster.Name)
		if err := coll.Update(
			bson.M{"clusterid": cluster.ClusterId},
			bson.M{"$set": bson.M{"state": models.CLUSTER_STATE_ACTIVE}}); err != nil {
			logger.Get().Error("%s-Error setting the back cluster state. error: %v", ctxt, err)
		}
	}
}
コード例 #20
0
ファイル: listener.go プロジェクト: skyrings/skyring
func RouteEvent(event models.NodeEvent) {
	reqId, err := uuid.New()
	if err != nil {
		logger.Get().Error("Error Creating the RequestId. error: %v", err)
		return
	}
	ctxt := fmt.Sprintf("%v:%v", models.ENGINE_NAME, reqId.String())

	var e models.AppEvent
	e.Timestamp = event.Timestamp
	e.Tags = event.Tags
	e.Message = event.Message
	eventId, err := uuid.New()
	if err != nil {
		logger.Get().Error("%s-Uuid generation for the event failed for node: %s. error: %v", ctxt, event.Node, err)
		return
	}

	e.EventId = *eventId
	// querying DB to get node ID and Cluster ID for the event
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	var node models.Node
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES)
	if err := coll.Find(bson.M{"hostname": event.Node}).One(&node); err != nil {
		logger.Get().Error("%s-Node information read from DB failed for node: %s. error: %v", ctxt, event.Node, err)
		return
	}

	// Push the event to DB only if the node is managed
	if node.Hostname == "" || !node.Enabled {
		return
	}
	e.ClusterId = node.ClusterId
	e.NodeId = node.NodeId
	e.NodeName = node.Hostname

	// Invoking the event handler
	for tag, handler := range handlermap {
		if match, err := filepath.Match(tag, event.Tag); err == nil {
			if match {
				if e, err = handler.(func(models.AppEvent, string) (models.AppEvent, error))(e, ctxt); err != nil {
					logger.Get().Error("%s-Event Handling Failed for event for node: %s. error: %v", ctxt, node.Hostname, err)
					return
				}
				if e.Name == "" {
					return
				}
				if err := common_event.AuditLog(ctxt, e, skyring.GetDbProvider()); err != nil {
					logger.Get().Error("%s-Could not persist the event to DB for node: %s. error: %v", ctxt, node.Hostname, err)
					return
				} else {
					// For upcoming any new event , broadcasting to all connected clients
					eventObj, err := json.Marshal(e)
					if err != nil {
						logger.Get().Error("%s-Error marshalling the event data for node: %s. error: %v", ctxt, node.Hostname, err)
					}
					GetBroadcaster().chBroadcast <- string(eventObj)
					return
				}
			}
		} else {
			logger.Get().Error("%s-Error while maping handler for event for node: %s. error: %v", ctxt, node.Hostname, err)
			return
		}
	}
	e.Name = event.Tag
	app := skyring.GetApp()
	if err := app.RouteProviderEvents(ctxt, e); err != nil {
		logger.Get().Error("%s-Event:%s could not be handled for node: %s. error: %v", ctxt, event.Tag, node.Hostname, err)
	}
	return
}