Ejemplo n.º 1
0
func (s *CephProvider) GetServiceCount(req models.RpcRequest, resp *models.RpcResponse) error {
	ctxt := req.RpcRequestContext
	request := make(map[string]interface{})
	if err := json.Unmarshal(req.RpcRequestData, &request); err != nil {
		logger.Get().Error(fmt.Sprintf("%s-Unbale to parse the Get Service Count request. error: %v", ctxt, err))
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Unbale to parse the request. error: %v", err))
		return err
	}
	Hostname := request["hostname"].(string)
	TotalSluCount := request["totalslu"].(float64)
	NodeRoles := request["noderoles"]
	ServiceDetails := make(map[string]interface{})
	ServiceCount, err := salt_backend.GetServiceCount(Hostname, ctxt)
	if err != nil {
		logger.Get().Error(
			"%s-Error fetching service count for node: %s. error: %v",
			ctxt,
			Hostname,
			err)
		*resp = utils.WriteResponse(http.StatusInternalServerError,
			fmt.Sprintf("Error fetching service count for node %s. error %v", Hostname, err))
		return err

	}
	for _, role := range NodeRoles.([]interface{}) {
		switch role.(string) {
		case strings.ToUpper(bigfin_models.NODE_SERVICE_OSD):
			SluUp := ServiceCount[bigfin_models.SLU_SERVICE_COUNT]
			SluDown := int(TotalSluCount) - ServiceCount[bigfin_models.SLU_SERVICE_COUNT]
			ServiceDetails["slu"] = map[string]int{"up": SluUp, "down": SluDown}
		case strings.ToUpper(bigfin_models.NODE_SERVICE_MON):
			ServiceDetails["mon"] = ServiceCount[bigfin_models.MON_SERVICE_COUNT]
		}
	}
	var bytes []byte
	bytes, err = json.Marshal(ServiceDetails)
	if err != nil {
		logger.Get().Error("%s-Unable to marshal the service count details :%s", ctxt, err)
		*resp = utils.WriteResponse(http.StatusInternalServerError,
			fmt.Sprintf("Unable to marshal the service count details for node :%s . error %v", Hostname, err))
		return err
	}
	*resp = utils.WriteResponseWithData(http.StatusOK, "", bytes)
	return nil
}
Ejemplo n.º 2
0
func (s *CephProvider) StopTask(req models.RpcRequest, resp *models.RpcResponse) error {
	ctxt := req.RpcRequestContext
	task_id_str := req.RpcRequestVars["task-id"]
	logger.Get().Debug(fmt.Sprintf("%s-Stopping sub-task: %s", ctxt, task_id_str))
	task_id, err := uuid.Parse(task_id_str)
	if err != nil {
		logger.Get().Error("%s-Error parsing the task id: %s. error: %v", ctxt, task_id_str, err)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Error parsing the task id: %s", task_id_str))
		return err
	}

	// Stop the task
	if ok, err := task.GetTaskManager().Stop(*task_id); !ok || err != nil {
		logger.Get().Error("%s-Failed to stop the task: %v. error: %v", ctxt, task_id, err)
		*resp = utils.WriteResponse(http.StatusInternalServerError, "Failed to stop task")
		return err
	} else {
		*resp = utils.WriteResponse(http.StatusOK, "Done")
	}
	return nil
}
Ejemplo n.º 3
0
func (s *CephProvider) MonitorCluster(req models.RpcRequest, resp *models.RpcResponse) error {
	ctxt := req.RpcRequestContext

	cluster_id_str, ok := req.RpcRequestVars["cluster-id"]
	var monnode *models.Node
	if !ok {
		logger.Get().Error("%s-Incorrect cluster id: %s", ctxt, cluster_id_str)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Incorrect cluster id: %s", cluster_id_str))
		return fmt.Errorf("Incorrect cluster id: %s", cluster_id_str)
	}
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s-Error parsing the cluster id: %s. Error: %v", ctxt, cluster_id_str, err)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s.Error: %v", cluster_id_str, err))
		return fmt.Errorf("Error parsing the cluster id: %s.Error: %v", cluster_id_str, err)
	}
	cluster, err := getCluster(*cluster_id)
	if err != nil {
		logger.Get().Error("%s-Unable to get cluster with id %v.Err %v", ctxt, cluster_id, err.Error())
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Unable to get cluster with id %v.Err %v", cluster_id, err.Error()))
		return fmt.Errorf("Unable to get cluster with id %v.Err %v", cluster_id, err.Error())
	}
	monnode, err = GetCalamariMonNode(*cluster_id, ctxt)
	if err != nil {
		logger.Get().Error("%s-Unable to pick a random mon from cluster %v.Error: %v", ctxt, cluster.Name, err.Error())
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Unable to pick a random mon from cluster %v.Error: %v", cluster.Name, err.Error()))
		return fmt.Errorf("Unable to pick a random mon from cluster %v.Error: %v", cluster.Name, err.Error())
	}
	monName := (*monnode).Hostname
	err = initMonitoringRoutines(ctxt, cluster, monName, MonitoringRoutines)
	if err != nil {
		logger.Get().Error("%s-Error: %v", ctxt, err.Error())
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Error: %v", err.Error()))
		return fmt.Errorf("Error: %v", err.Error())
	}
	*resp = utils.WriteResponseWithData(http.StatusOK, "", []byte{})
	return nil
}
Ejemplo n.º 4
0
func (s *CephProvider) UpdateStorage(req models.RpcRequest, resp *models.RpcResponse) error {
	ctxt := req.RpcRequestContext
	cluster_id_str := req.RpcRequestVars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error(
			"%s - Error parsing the cluster id: %s. error: %v",
			ctxt,
			cluster_id_str,
			err)
		*resp = utils.WriteResponse(
			http.StatusBadRequest,
			fmt.Sprintf(
				"Error parsing the cluster id: %s",
				cluster_id_str))
		return err
	}
	storage_id_str := req.RpcRequestVars["storage-id"]
	storage_id, err := uuid.Parse(storage_id_str)
	if err != nil {
		logger.Get().Error(
			"%s - Error parsing the storage id: %s. error: %v",
			ctxt,
			storage_id_str,
			err)
		*resp = utils.WriteResponse(
			http.StatusBadRequest,
			fmt.Sprintf(
				"Error parsing the storage id: %s",
				storage_id_str))
		return err
	}
	var request models.AddStorageRequest
	if err := json.Unmarshal(req.RpcRequestData, &request); err != nil {
		logger.Get().Error(
			"%s - Unbale to parse the request. error: %v",
			ctxt,
			err)
		*resp = utils.WriteResponse(
			http.StatusBadRequest,
			fmt.Sprintf(
				"Unbale to parse the request. error: %v",
				err))
		return err
	}

	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE)
	var storage models.Storage
	if err := coll.Find(bson.M{"storageid": *storage_id}).One(&storage); err != nil {
		logger.Get().Error(
			"%s - Error getting detals of storage: %v on cluster: %v. error: %v",
			ctxt,
			*storage_id,
			*cluster_id,
			err)
		*resp = utils.WriteResponse(
			http.StatusInternalServerError,
			fmt.Sprintf(
				"Error getting the details of storage: %v",
				*storage_id))
		return err
	}
	id, err := strconv.Atoi(storage.Options["id"])
	if err != nil {
		logger.Get().Error(
			"%s - Error getting id of the pool: %v of cluster: %v. error: %v",
			ctxt,
			*storage_id,
			*cluster_id,
			err)
		*resp = utils.WriteResponse(
			http.StatusInternalServerError,
			fmt.Sprintf(
				"Error getting id of the pool: %v",
				*storage_id))
		return err
	}
	asyncTask := func(t *task.Task) {
		sessionCopy := db.GetDatastore().Copy()
		defer sessionCopy.Close()
		coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE)
		for {
			select {
			case <-t.StopCh:
				return
			default:
				t.UpdateStatus("Started ceph provider pool updation: %v", t.ID)
				if request.Name != "" && (request.Replicas != 0 || len(request.Options) != 0) {
					utils.FailTask(
						fmt.Sprintf(
							"Invalid mix of fields to update for storage: %v of cluster: %v. "+
								"Name change cannot be mixed with other changes.",
							*storage_id,
							*cluster_id),
						fmt.Errorf("%s-Invalid mix of fields to update", ctxt),
						t)
					return
				}
				for key := range request.Options {
					if ok := skyring_util.StringInSlice(key, validConfigs); !ok {
						utils.FailTask(
							fmt.Sprintf(
								"Invalid configuration: %s mentioned for storage: %v of cluster: %v",
								key,
								*storage_id,
								*cluster_id),
							fmt.Errorf("%s-%v", ctxt, err),
							t)
						return
					}
				}
				t.UpdateStatus("Getting a radom mon from cluster")
				monnode, err := GetCalamariMonNode(*cluster_id, ctxt)
				if err != nil {
					utils.FailTask(
						fmt.Sprintf(
							"Error getting mon node from cluster: %v",
							*cluster_id),
						fmt.Errorf("%s-%v", ctxt, err),
						t)
					return
				}
				var updatedFields = make(map[string]interface{})
				if request.Name != "" {
					updatedFields["name"] = request.Name
				}
				if request.Replicas != 0 {
					updatedFields["size"] = request.Replicas
				}
				if request.QuotaEnabled {
					for key, value := range request.QuotaParams {
						reqVal, _ := strconv.ParseUint(value, 10, 64)
						updatedFields[key] = uint64(reqVal)
					}
				} else {
					if request.QuotaParams["quota_max_objects"] == "0" && request.QuotaParams["quota_max_bytes"] == "0" {
						updatedFields["quota_max_objects"] = 0
						updatedFields["quota_max_bytes"] = 0
					}
				}
				for key, value := range request.Options {
					reqVal, _ := strconv.ParseUint(value, 10, 32)
					updatedFields[key] = uint(reqVal)
				}
				t.UpdateStatus("Updating pool details")
				ok, err := cephapi_backend.UpdatePool(
					monnode.Hostname,
					*cluster_id, id,
					updatedFields,
					ctxt)
				if err != nil || !ok {
					utils.FailTask(
						fmt.Sprintf(
							"Error setting the configurations for storage: %v on cluster: %v",
							*storage_id,
							*cluster_id),
						fmt.Errorf("%s-%v", ctxt, err),
						t)
					return
				}
				var filter bson.M = make(map[string]interface{})
				var updates bson.M = make(map[string]interface{})
				filter["storageid"] = *storage_id
				filter["clusterid"] = *cluster_id
				if request.Name != "" {
					updates["name"] = request.Name
				}
				if request.Replicas != 0 {
					updates["replicas"] = request.Replicas
				}
				if request.QuotaEnabled {
					updates["quotaenabled"] = true
					params := make(map[string]string)
					for key, value := range request.QuotaParams {
						params[key] = string(value)
					}
					updates["quotaparams"] = params
				} else {
					if request.QuotaParams["quota_max_objects"] == "0" && request.QuotaParams["quota_max_bytes"] == "0" {
						updates["quotaenabled"] = false
						updates["quotaparams"] = map[string]string{}
					}
				}

				if value, ok := request.Options["pg_num"]; ok {
					updates["options.pgp_num"] = value
				}
				t.UpdateStatus("Persisting pool updates in DB")
				if err := coll.Update(filter, bson.M{"$set": updates}); err != nil {
					utils.FailTask(
						fmt.Sprintf(
							"Error updating storage entity: %v of cluster: %v",
							*storage_id,
							*cluster_id),
						fmt.Errorf("%s-%v", ctxt, err),
						t)
				}

				cluster, err := getCluster(*cluster_id)
				if err != nil {
					logger.Get().Error("%s - Failed to get details of cluster: %s. error: %v", ctxt, *cluster_id, err)
				} else {
					initMonitoringRoutines(ctxt, cluster, (*monnode).Hostname, []interface{}{FetchOSDStats})
					UpdatePgNumToSummaries(cluster, ctxt)
				}

				if _, ok := updates["name"]; ok {
					coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_BLOCK_DEVICES)
					if _, err := coll.UpdateAll(filter, bson.M{"$set": bson.M{"storagename": updates["name"]}}); err != nil && err != mgo.ErrNotFound {
						utils.FailTask(
							fmt.Sprintf(
								"Storage name has changed for storage:%v. Error while updating this info for RBDs in cluster:%v",
								*storage_id,
								*cluster_id),
							fmt.Errorf("%s-%v", ctxt, err),
							t)
					}
				}
				t.UpdateStatus("Success")
				t.Done(models.TASK_STATUS_SUCCESS)
				return
			}
		}
	}
	if taskId, err := bigfin_task.GetTaskManager().Run(
		bigfin_conf.ProviderName,
		"CEPH-UpdateStorage",
		asyncTask,
		nil,
		nil,
		nil); err != nil {
		logger.Get().Error(
			"%s-Task creation failed for update storage on cluster: %v. error: %v",
			ctxt,
			*cluster_id,
			err)
		*resp = utils.WriteResponse(
			http.StatusInternalServerError,
			"Task creation failed for storage update")
		return err
	} else {
		*resp = utils.WriteAsyncResponse(
			taskId,
			fmt.Sprintf(
				"Task Created for update storage on cluster: %v",
				*cluster_id),
			[]byte{})
	}
	return nil
}
Ejemplo n.º 5
0
func (s *CephProvider) CreateStorage(req models.RpcRequest, resp *models.RpcResponse) error {
	ctxt := req.RpcRequestContext
	var request models.AddStorageRequest
	if err := json.Unmarshal(req.RpcRequestData, &request); err != nil {
		logger.Get().Error("%s - Unbale to parse the request. error: %v", ctxt, err)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Unbale to parse the request. error: %v", err))
		return err
	}

	// Create the storage pool
	cluster_id_str := req.RpcRequestVars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s - Error parsing the cluster id: %s. error: %v", ctxt, cluster_id_str, err)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str))
		return err
	}

	asyncTask := func(t *task.Task) {
		sessionCopy := db.GetDatastore().Copy()
		defer sessionCopy.Close()
		for {
			select {
			case <-t.StopCh:
				return
			default:
				var cluster models.Cluster

				t.UpdateStatus("Started ceph provider storage creation: %v", t.ID)

				t.UpdateStatus("Getting cluster details")
				// Get cluster details
				coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_CLUSTERS)
				if err := coll.Find(bson.M{"clusterid": *cluster_id}).One(&cluster); err != nil {
					utils.FailTask(fmt.Sprintf("Error getting the cluster details for :%v", *cluster_id), fmt.Errorf("%s - %v", ctxt, err), t)
					return
				}

				t.UpdateStatus("Getting a mon from cluster")
				monnode, err := GetCalamariMonNode(*cluster_id, ctxt)
				if err != nil {
					utils.FailTask(fmt.Sprintf("Error getting mon node details for cluster: %v", *cluster_id), fmt.Errorf("%s - %v", ctxt, err), t)
					return
				}

				poolId, ok := createPool(ctxt, *cluster_id, request, t)
				if !ok {
					return
				}
				if request.Type != models.STORAGE_TYPE_ERASURE_CODED && len(request.BlockDevices) > 0 {
					createBlockDevices(ctxt, monnode.Hostname, cluster, *poolId, request, t)
				}

				t.UpdateStatus("Syncing SLUs")
				if err := SyncOsdStatus(*cluster_id, ctxt); err != nil {
					utils.FailTask("Error syncing SLUs", err, t)
					return
				}

				initMonitoringRoutines(ctxt, cluster, (*monnode).Hostname, []interface{}{FetchObjectCount})
				_, cStats, err := updateClusterStats(ctxt, cluster, (*monnode).Hostname)
				if err == nil {
					updateStatsToPools(ctxt, cStats, cluster.ClusterId)
				}
				skyring_util.UpdateStorageCountToSummaries(ctxt, cluster)
				UpdateObjectCountToSummaries(ctxt, cluster)
				t.UpdateStatus("Success")
				t.Done(models.TASK_STATUS_SUCCESS)
				return
			}
		}
	}
	if taskId, err := bigfin_task.GetTaskManager().Run(
		bigfin_conf.ProviderName,
		"CEPH-CreateStorage",
		asyncTask,
		nil,
		nil,
		nil); err != nil {
		logger.Get().Error("%s-Task creation failed for create storage %s on cluster: %v. error: %v", ctxt, request.Name, *cluster_id, err)
		*resp = utils.WriteResponse(http.StatusInternalServerError, "Task creation failed for storage creation")
		return err
	} else {
		*resp = utils.WriteAsyncResponse(taskId, fmt.Sprintf("Task Created for create storage %s on cluster: %v", request.Name, *cluster_id), []byte{})
	}
	return nil
}
Ejemplo n.º 6
0
func (s *CephProvider) RemoveStorage(req models.RpcRequest, resp *models.RpcResponse) error {
	ctxt := req.RpcRequestContext
	cluster_id_str := req.RpcRequestVars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s - Error parsing the cluster id: %s. error: %v", ctxt, cluster_id_str, err)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str))
		return err
	}
	storage_id_str := req.RpcRequestVars["storage-id"]
	storage_id, err := uuid.Parse(storage_id_str)
	if err != nil {
		logger.Get().Error("%s - Error parsing the storage id: %s. error: %v", ctxt, storage_id_str, err)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Error parsing the storage id: %s", storage_id_str))
		return err
	}

	asyncTask := func(t *task.Task) {
		sessionCopy := db.GetDatastore().Copy()
		defer sessionCopy.Close()
		for {
			select {
			case <-t.StopCh:
				return
			default:
				t.UpdateStatus("Started ceph provider pool deletion: %v", t.ID)
				// Get the storage details
				var storage models.Storage
				var cluster models.Cluster

				t.UpdateStatus("Getting details of cluster")
				coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_CLUSTERS)
				if err := coll.Find(bson.M{"clusterid": *cluster_id}).One(&cluster); err != nil {
					utils.FailTask("Error getting details of cluster", fmt.Errorf("%s - %v", ctxt, err), t)
					return
				}

				t.UpdateStatus("Getting details of storage")
				coll1 := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE)
				if err := coll1.Find(bson.M{"clusterid": *cluster_id, "storageid": *storage_id}).One(&storage); err != nil {
					utils.FailTask("Error getting details of storage", fmt.Errorf("%s - %v", ctxt, err), t)
					return
				}

				t.UpdateStatus("Getting a mon from cluster")
				monnode, err := GetCalamariMonNode(*cluster_id, ctxt)
				if err != nil {
					utils.FailTask("Error getting a mon node for cluster", fmt.Errorf("%s - %v", ctxt, err), t)
					return
				}

				poolId, err := strconv.Atoi(storage.Options["id"])
				if err != nil {
					utils.FailTask("Error getting id of storage", fmt.Errorf("%s - %v", ctxt, err), t)
					return
				}

				t.UpdateStatus("Deleting storage")
				ok, err := cephapi_backend.RemovePool(monnode.Hostname, *cluster_id, cluster.Name, storage.Name, poolId, ctxt)
				if err != nil || !ok {
					utils.FailTask(fmt.Sprintf("Deletion of storage %v failed on cluster: %s", *storage_id, *cluster_id), fmt.Errorf("%s - %v", ctxt, err), t)
					return
				} else {
					t.UpdateStatus("Removing the block devices (if any) for storage entoty")
					coll2 := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_BLOCK_DEVICES)
					if _, err := coll2.RemoveAll(bson.M{"clusterid": *cluster_id, "storageid": *storage_id}); err != nil {
						utils.FailTask(fmt.Sprintf("Error removing block devices for storage %v from DB for cluster: %d", *storage_id, *cluster_id), fmt.Errorf("%s - %v", ctxt, err), t)
						return
					}
					t.UpdateStatus("Removing the storage entity from DB")
					if err := coll1.Remove(bson.M{"clusterid": *cluster_id, "storageid": *storage_id}); err != nil {
						utils.FailTask(fmt.Sprintf("Error removing storage entity from DB for cluster: %d", *cluster_id), fmt.Errorf("%s - %v", ctxt, err), t)
						return
					}
				}

				t.UpdateStatus("Syncing SLUs")
				if err := SyncOsdStatus(*cluster_id, ctxt); err != nil {
					utils.FailTask("Error syncing SLUs", err, t)
					return
				}

				skyring_util.UpdateStorageCountToSummaries(ctxt, cluster)
				UpdateObjectCountToSummaries(ctxt, cluster)

				t.UpdateStatus("Success")
				t.Done(models.TASK_STATUS_SUCCESS)
				return
			}
		}
	}
	if taskId, err := bigfin_task.GetTaskManager().Run(
		bigfin_conf.ProviderName,
		"CEPH-DeleteStorage",
		asyncTask,
		nil,
		nil,
		nil); err != nil {
		logger.Get().Error("%s-Task creation failed for delete storage on cluster: %v. error: %v", ctxt, *cluster_id, err)
		*resp = utils.WriteResponse(http.StatusInternalServerError, "Task creation failed for storage deletion")
		return err
	} else {
		*resp = utils.WriteAsyncResponse(taskId, fmt.Sprintf("Task Created for delete storage on cluster: %v", *cluster_id), []byte{})
	}
	return nil
}
Ejemplo n.º 7
0
func (s *CephProvider) GetStorages(req models.RpcRequest, resp *models.RpcResponse) error {
	ctxt := req.RpcRequestContext
	cluster_id_str := req.RpcRequestVars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s-Error parsing the cluster id: %s. error: %v", ctxt, cluster_id_str, err)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str))
		return err
	}
	monnode, err := GetCalamariMonNode(*cluster_id, ctxt)
	if err != nil {
		logger.Get().Error("%s-Error getting a mon node in cluster: %v. error: %v", ctxt, *cluster_id, err)
		*resp = utils.WriteResponse(http.StatusInternalServerError, fmt.Sprintf("Error getting a mon node in cluster. error: %v", err))
		return err
	}

	// Get cluster details
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	var cluster models.Cluster
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_CLUSTERS)
	if err := coll.Find(bson.M{"clusterid": *cluster_id}).One(&cluster); err != nil {
		logger.Get().Error("%s-Error getting details for cluster: %v. error: %v", ctxt, *cluster_id, err)
		*resp = utils.WriteResponse(http.StatusInternalServerError, fmt.Sprintf("Error getting cluster details. error: %v", err))
		return err
	}

	// Get the pools for the cluster
	pools, err := cephapi_backend.GetPools(monnode.Hostname, *cluster_id, ctxt)
	if err != nil {
		logger.Get().Error("%s-Error getting storages for cluster: %s. error: %v", ctxt, cluster.Name, err)
		*resp = utils.WriteResponse(http.StatusInternalServerError, fmt.Sprintf("Error getting storages. error: %v", err))
		return err
	}
	var storages []models.AddStorageRequest
	for _, pool := range pools {
		storage := models.AddStorageRequest{
			Name:     pool.Name,
			Replicas: pool.Size,
		}
		if pool.QuotaMaxObjects != 0 && pool.QuotaMaxBytes != 0 {
			storage.QuotaEnabled = true
			quotaParams := make(map[string]string)
			quotaParams["quota_max_objects"] = strconv.Itoa(pool.QuotaMaxObjects)
			quotaParams["quota_max_bytes"] = strconv.FormatUint(pool.QuotaMaxBytes, 10)
			storage.QuotaParams = quotaParams
		}
		options := make(map[string]string)
		options["id"] = strconv.Itoa(pool.Id)
		options["pg_num"] = strconv.Itoa(pool.PgNum)
		options["pgp_num"] = strconv.Itoa(pool.PgpNum)
		options["full"] = strconv.FormatBool(pool.Full)
		options["hashpspool"] = strconv.FormatBool(pool.HashPsPool)
		options["min_size"] = strconv.FormatUint(pool.MinSize, 10)
		options["crash_replay_interval"] = strconv.Itoa(pool.CrashReplayInterval)
		options["crush_ruleset"] = strconv.Itoa(pool.CrushRuleSet)
		// Get EC profile details of pool
		ok, out, err := cephapi_backend.ExecCmd(
			monnode.Hostname,
			*cluster_id,
			fmt.Sprintf("ceph --cluster %s osd pool get %s erasure_code_profile --format=json", cluster.Name, pool.Name),
			ctxt)
		if err != nil || !ok {
			storage.Type = models.STORAGE_TYPE_REPLICATED
			logger.Get().Warning("%s-Error getting EC profile details of pool: %s of cluster: %s", ctxt, pool.Name, cluster.Name)
		} else {
			var ecprofileDet bigfinmodels.ECProfileDet
			if err := json.Unmarshal([]byte(out), &ecprofileDet); err != nil {
				logger.Get().Warning("%s-Error parsing EC profile details of pool: %s of cluster: %s", ctxt, pool.Name, cluster.Name)
			} else {
				storage.Type = models.STORAGE_TYPE_ERASURE_CODED
				options["ecprofile"] = ecprofileDet.ECProfile
			}
		}
		storage.Options = options
		storages = append(storages, storage)
	}
	result, err := json.Marshal(storages)
	if err != nil {
		logger.Get().Error("%s-Error forming the output for storage list for cluster: %s. error: %v", ctxt, cluster.Name, err)
		*resp = utils.WriteResponse(http.StatusInternalServerError, fmt.Sprintf("Error forming the output. error: %v", err))
		return err
	}
	*resp = utils.WriteResponseWithData(http.StatusOK, "", result)
	return nil
}
Ejemplo n.º 8
0
func (s *CephProvider) GetClusterNodesForImport(req models.RpcRequest, resp *models.RpcResponse) error {
	ctxt := req.RpcRequestContext
	bootstrapNode := req.RpcRequestVars["bootstrapnode"]
	var clusterForImport models.ClusterForImport

	out, err := cephapi_backend.GetCluster(bootstrapNode, ctxt)
	if err != nil {
		logger.Get().Error(
			"%s-Error getting cluster details. error: %v",
			ctxt,
			err)
		*resp = utils.WriteResponse(
			http.StatusInternalServerError,
			"Error getting cluster details")
		return err
	}
	clusterForImport.ClusterName = out.Name
	clusterForImport.ClusterId = out.Id
	clusterForImport.Compatible = true

	nodes, err := cephapi_backend.GetClusterNodes(bootstrapNode, out.Id, ctxt)
	if err != nil {
		logger.Get().Error(
			"%s-Error getting nodes participating in the cluster: %v",
			ctxt,
			out.Id)
		*resp = utils.WriteResponse(
			http.StatusInternalServerError,
			"Error getting nodes participating in the cluster")
		return err
	}
	var clusterNodes []models.NodeForImport
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES)
	var fetchedNode models.Node
	for _, node := range nodes {
		clusterNode := models.NodeForImport{
			Name: node.FQDN,
		}
		var nodeType []string
		for _, service := range node.Services {
			switch service.Type {
			case bigfin_models.NODE_SERVICE_MON:
				if ok := skyring_util.StringInSlice(bigfin_models.NODE_SERVICE_MON, nodeType); !ok {
					nodeType = append(nodeType, bigfin_models.NODE_SERVICE_MON)
				}
			case bigfin_models.NODE_SERVICE_OSD:
				if ok := skyring_util.StringInSlice(bigfin_models.NODE_SERVICE_OSD, nodeType); !ok {
					nodeType = append(nodeType, bigfin_models.NODE_SERVICE_OSD)
				}
			}
		}
		clusterNode.Type = nodeType
		if strings.HasPrefix(bootstrapNode, node.FQDN) {
			if node.CephVersion != "" && node.CephVersion != "null" {
				nodeVerStr := fmt.Sprintf(
					"%s.%s",
					strings.Split(node.CephVersion, ".")[0],
					strings.Split(node.CephVersion, ".")[1])
				nodeCephVersion, _ := strconv.ParseFloat(nodeVerStr, 64)
				clusterForImport.Compatible = (nodeCephVersion >= bigfin_conf.ProviderConfig.Provider.CompatVersion)
				clusterForImport.Version = nodeVerStr
			}
		}
		if err := coll.Find(
			bson.M{"hostname": node.FQDN}).One(&fetchedNode); err != nil {
			clusterNode.Found = false
		} else {
			clusterNode.Found = true
			clusterNode.Name = fetchedNode.Hostname
		}
		clusterNodes = append(clusterNodes, clusterNode)
	}

	clusterForImport.Nodes = clusterNodes
	result, err := json.Marshal(clusterForImport)
	if err != nil {
		logger.Get().Error(
			"%s-Error forming the output for import cluster nodes. error: %v",
			ctxt,
			err)
		*resp = utils.WriteResponse(
			http.StatusInternalServerError,
			fmt.Sprintf(
				"Error forming the output. error: %v",
				err))
		return err
	}
	*resp = utils.WriteResponseWithData(http.StatusOK, "", result)
	return nil
}
Ejemplo n.º 9
0
func (s *CephProvider) ImportCluster(req models.RpcRequest, resp *models.RpcResponse) error {
	var request models.ImportClusterRequest
	ctxt := req.RpcRequestContext

	if err := json.Unmarshal(req.RpcRequestData, &request); err != nil {
		logger.Get().Error(
			fmt.Sprintf("%s-Unbale to parse the import cluster request. error: %v",
				ctxt,
				err))
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Unbale to parse the request. error: %v", err))
		return err
	}

	asyncTask := func(t *task.Task) {
		// Get cluster details and populate
		t.UpdateStatus("Updating cluster generic dtails (name/id)")
		cluster_uuid, clusterName, err := PopulateClusterDetails(request.BootstrapNode, ctxt)
		if err != nil {
			utils.FailTask(
				"Failed to fetch and populate cluster details",
				err,
				t)
			return
		}
		setClusterState(*cluster_uuid, models.CLUSTER_STATE_CREATING, ctxt)

		// Get the cluster network details and populate
		t.UpdateStatus("Updating cluster network details")
		if err := PopulateClusterNetworkDetails(request.BootstrapNode, *cluster_uuid, ctxt); err != nil {
			utils.FailTask(
				fmt.Sprintf(
					"Error fetching and populating network details for cluster: %s",
					clusterName),
				err,
				t)
			setClusterState(*cluster_uuid, models.CLUSTER_STATE_FAILED, ctxt)
			return
		}
		// Get and populate cluster status details
		t.UpdateStatus("Updating cluster status")
		if err := PopulateClusterStatus(request.BootstrapNode, *cluster_uuid, ctxt); err != nil {
			utils.FailTask(
				fmt.Sprintf(
					"Failed to fetch and populate status details for cluster: %s",
					clusterName),
				err,
				t)
			setClusterState(*cluster_uuid, models.CLUSTER_STATE_FAILED, ctxt)
			return
		}
		// Get and update nodes of the cluster
		t.UpdateStatus("Updating cluster nodes")
		failedNodes, err := PopulateClusterNodes(request.BootstrapNode, *cluster_uuid, request.Nodes, ctxt)
		if err != nil {
			utils.FailTask(
				fmt.Sprintf(
					"Failed populating node details for cluster: %s",
					clusterName),
				err,
				t)
			setClusterState(*cluster_uuid, models.CLUSTER_STATE_FAILED, ctxt)
			return
		}
		if len(failedNodes) > 0 {
			t.UpdateStatus("Failed to updated details of nodes: %v", failedNodes)
		}
		// Get and update storage pools
		t.UpdateStatus("Updating cluster storages")
		if err := PopulateStoragePools(request.BootstrapNode, *cluster_uuid, ctxt); err != nil {
			t.UpdateStatus("Failed populating storage pools details")
			logger.Get().Warning(
				"%s-Failed populating storage pools details for cluster: %s. error: %v",
				ctxt,
				clusterName,
				err)
		}
		// Get and update OSDs
		t.UpdateStatus("Updating cluster SLUs")
		if err := PopulateClusterOSDs(request.BootstrapNode, *cluster_uuid, ctxt); err != nil {
			t.UpdateStatus("Failed populating OSD details")
			logger.Get().Warning(
				"%s-Failed populating OSD details for cluster: %s. error: %v",
				ctxt,
				clusterName,
				err)
		}
		// Check and disable auto expand if colocated journals
		checkAndUpdateAutoExpand(t, *cluster_uuid, ctxt)

		// Get and update block devices
		t.UpdateStatus("Updating cluster block devices")
		if err := PopulateBlockDevices(request.BootstrapNode, *cluster_uuid, ctxt); err != nil {
			t.UpdateStatus("Failed populating block devices details")
			logger.Get().Warning(
				"%s-Failed populating block devices details for cluster: %s. error: %v",
				ctxt,
				clusterName,
				err)
		}
		setClusterState(*cluster_uuid, models.CLUSTER_STATE_ACTIVE, ctxt)

		// Create default EC profiles
		t.UpdateStatus("Creating default EC profiles")
		if ok, err := CreateDefaultECProfiles(
			ctxt,
			request.BootstrapNode,
			*cluster_uuid); !ok || err != nil {
			logger.Get().Error(
				"%s-Error creating default EC profiles for cluster: %s. error: %v",
				ctxt,
				clusterName,
				err)
			t.UpdateStatus("Could not create default EC profile")
		}

		//Populate the CRUSH Details
		t.UpdateStatus("Setting up the CRUSH Map")
		if err := poplateCrushDetails(request.BootstrapNode, *cluster_uuid, ctxt); err != nil {
			t.UpdateStatus("Failed to update Crush map")
		}

		// Update the notification details
		t.UpdateStatus("Creating notification subscription configuartions")
		var bigfin_notifications []models.NotificationSubscription
		for _, notification := range models.Notifications {
			bigfin_notifications = append(bigfin_notifications, notification)
		}
		for _, notification := range bigfin_models.Notifications {
			bigfin_notifications = append(bigfin_notifications, notification)
		}
		sessionCopy := db.GetDatastore().Copy()
		defer sessionCopy.Close()
		notifSubsColl := sessionCopy.
			DB(conf.SystemConfig.DBConfig.Database).
			C(models.COLL_NAME_CLUSTER_NOTIFICATION_SUBSCRIPTIONS)
		if err := notifSubsColl.Insert(
			models.ClusterNotificationSubscription{
				ClusterId:     *cluster_uuid,
				Notifications: bigfin_notifications}); err != nil {
			logger.Get().Error(
				"%s-Error persisting the default notification subscriptions on cluster %s. error %v",
				ctxt,
				clusterName,
				err)
			t.UpdateStatus("Could not create notification subscription configuartions")
		}

		t.UpdateStatus("Success")
		t.Done(models.TASK_STATUS_SUCCESS)
		return
	}
	if taskId, err := bigfin_task.GetTaskManager().Run(
		bigfin_conf.ProviderName,
		fmt.Sprintf("%s-Import Cluster", bigfin_conf.ProviderName),
		asyncTask,
		nil,
		nil,
		nil); err != nil {
		logger.Get().Error(
			"%s - Task creation failed for import cluster. error: %v",
			ctxt,
			err)
		*resp = utils.WriteResponse(
			http.StatusInternalServerError,
			"Task creation failed for import cluster")
		return err
	} else {
		*resp = utils.WriteAsyncResponse(
			taskId,
			"Task Created for import cluster",
			[]byte{})
	}
	return nil
}
Ejemplo n.º 10
0
func (s *CephProvider) GetDiskHierarchy(req models.RpcRequest, resp *models.RpcResponse) error {
	var request models.DiskHierarchyRequest
	ctxt := req.RpcRequestContext

	if err := json.Unmarshal(req.RpcRequestData, &request); err != nil {
		logger.Get().Error(fmt.Sprintf("%s-Unbale to parse the disk hierarchy request. error: %v", ctxt, err))
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Unbale to parse the request. error: %v", err))
		return err
	}

	if request.JournalSize == "" {
		request.JournalSize = fmt.Sprintf("%dMB", JOURNALSIZE)
	}
	nodes, err := util.GetNodes(request.ClusterNodes)
	if err != nil {
		logger.Get().Error(
			"%s-Error getting nodes list from DB for cluster %s. error: %v",
			ctxt,
			request.ClusterName,
			err)
		return err
	}
	hierarchy := make(map[string]map[string]string)
	var storageSize float64
	for _, requestNode := range request.ClusterNodes {
		disksMap := make(map[string]string)
		uuid, err := uuid.Parse(requestNode.NodeId)
		if err != nil {
			logger.Get().Error(
				"%s-Error parsing node id: %s for cluster: %s. error: %v",
				ctxt,
				requestNode.NodeId,
				request.ClusterName,
				err)
			continue
		}

		devices := make(map[string]models.Disk)
		storageNode := nodes[*uuid]
		// Form a map of storage node disks
		var nodeDisksMap map[string]models.Disk = make(map[string]models.Disk)
		for _, storageDisk := range storageNode.StorageDisks {
			nodeDisksMap[storageDisk.Name] = storageDisk
		}

		for diskName, storageDisk := range nodeDisksMap {
			for idx := 0; idx < len(requestNode.Devices); idx++ {
				if diskName == requestNode.Devices[idx].Name {
					devices[requestNode.Devices[idx].Name] = storageDisk
				}
			}
		}

		// Utility function returns value in MB so multiply by 1024 to make is bytes
		jSize := utils.SizeFromStr(request.JournalSize) * float64(1024)
		diskWithJournalMapped := getDiskWithJournalMapped(devices, jSize)
		for disk, journal := range diskWithJournalMapped {
			disksMap[disk] = journal.JournalDisk
			for _, storageDisk := range storageNode.StorageDisks {
				if storageDisk.Name == disk {
					storageSize += storageDisk.Size
				}
			}
		}
		hierarchy[requestNode.NodeId] = disksMap
	}

	retVal := models.DiskHierarchyDetails{
		ClusterName: request.ClusterName,
		Hierarchy:   hierarchy,
		StorageSize: storageSize,
	}
	result, err := json.Marshal(retVal)
	if err != nil {
		logger.Get().Error(
			"%s-Error forming the output for get disk hierarchy of cluster: %s. error: %v",
			ctxt,
			request.ClusterName,
			err)
		*resp = utils.WriteResponse(
			http.StatusInternalServerError,
			fmt.Sprintf(
				"Error forming the output. error: %v",
				err))
		return err
	}
	*resp = utils.WriteResponseWithData(http.StatusOK, "", result)
	return nil
}
Ejemplo n.º 11
0
func (s *CephProvider) GetClusterSummary(req models.RpcRequest, resp *models.RpcResponse) error {
	ctxt := req.RpcRequestContext

	result := make(map[string]interface{})
	httpStatusCode := http.StatusOK

	cluster_id_str, ok := req.RpcRequestVars["cluster-id"]
	if !ok {
		logger.Get().Error("%s - Incorrect cluster id: %s", ctxt, cluster_id_str)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Incorrect cluster id: %s", cluster_id_str))
		return fmt.Errorf("Incorrect cluster id: %s", cluster_id_str)
	}
	clusterId, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s - Error parsing the cluster id: %s. Error: %v", ctxt, cluster_id_str, err)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s.Error: %v", cluster_id_str, err))
		return fmt.Errorf("Error parsing the cluster id: %s.Error: %v", cluster_id_str, err)
	}

	mon_down_count := 0
	monCriticalAlertsCount := 0
	mons, monErr := GetMons(bson.M{"clusterid": *clusterId})
	var err_str string
	if monErr != nil {
		err_str = fmt.Sprintf("Unable to fetch monitor nodes.Error %v", monErr.Error())
		logger.Get().Error("%s - Unable to fetch monitor nodes.Error %v", ctxt, monErr.Error())
	} else {
		for _, mon := range mons {
			monCriticalAlertsCount = monCriticalAlertsCount + mon.AlmCritCount
			if mon.Status == models.NODE_STATUS_ERROR {
				mon_down_count = mon_down_count + 1
			}
		}
		result[models.Monitor] = map[string]int{skyring_monitoring.TOTAL: len(mons), models.STATUS_DOWN: mon_down_count, "criticalAlerts": monCriticalAlertsCount}
	}

	cluster, clusterFetchErr := getCluster(*clusterId)
	if clusterFetchErr != nil {
		logger.Get().Error("%s - Unable to fetch cluster with id %v. Err %v", ctxt, *clusterId, clusterFetchErr)
		return fmt.Errorf("%s - Unable to fetch cluster with id %v. Err %v", ctxt, *clusterId, clusterFetchErr)
	}

	/*
		Fetch pg count
	*/
	if pgCount, err := ComputeClusterPGNum(cluster, ctxt); err == nil {
		result["pgnum"] = pgCount
	} else {
		err_str = err_str + fmt.Sprintf("%s", err.Error())
	}

	result[bigfin_models.OBJECTS] = ComputeClusterObjectCount(cluster)

	if err_str != "" {
		if len(result) != 0 {
			httpStatusCode = http.StatusPartialContent
		} else {
			httpStatusCode = http.StatusInternalServerError
		}
		err = fmt.Errorf("%s - %v", ctxt, err_str)
	}
	bytes, marshalErr := json.Marshal(result)
	if marshalErr != nil {
		logger.Get().Error("%s - Failed to marshal %v.Error %v", ctxt, result, marshalErr)
		*resp = utils.WriteResponseWithData(http.StatusInternalServerError, err_str, []byte{})
		return fmt.Errorf("%s - Failed to marshal %v.Error %v", ctxt, result, marshalErr)
	}
	*resp = utils.WriteResponseWithData(httpStatusCode, err_str, bytes)
	return nil
}