Exemplo n.º 1
0
func (a *App) GET_BlockDevice(w http.ResponseWriter, r *http.Request) {
	ctxt, err := GetContext(r)
	if err != nil {
		logger.Get().Error("Error Getting the context. error: %v", err)
	}

	vars := mux.Vars(r)
	cluster_id_str := vars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s - Error parsing the cluster id: %s. error: %v", ctxt, cluster_id_str, err)
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str))
		return
	}
	storage_id_str := vars["storage-id"]
	storage_id, err := uuid.Parse(storage_id_str)
	if err != nil {
		logger.Get().Error("%s - Error parsing the storage id: %s. error: %v", ctxt, storage_id_str, err)
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the storage id: %s", storage_id_str))
		return
	}
	blockdevice_id_str := vars["blockdevice-id"]
	blockdevice_id, err := uuid.Parse(blockdevice_id_str)
	if err != nil {
		logger.Get().Error("%s - Error parsing the block device id: %s. error: %v", ctxt, blockdevice_id_str, err)
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the block device id: %s", blockdevice_id_str))
		return
	}

	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()

	collection := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_BLOCK_DEVICES)
	var blkDevice models.BlockDevice
	if err := collection.Find(bson.M{"clusterid": *cluster_id, "storageid": *storage_id, "id": *blockdevice_id}).One(&blkDevice); err != nil {
		if err == mgo.ErrNotFound {
			HttpResponse(w, http.StatusNotFound, err.Error())
		} else {
			HttpResponse(w, http.StatusInternalServerError, err.Error())
		}
		logger.Get().Error("%s - Error getting the block device %v of storage %v on cluster: %v. error: %v", ctxt, *blockdevice_id, *storage_id, *cluster_id, err)
		return
	}
	if blkDevice.Name == "" {
		HttpResponse(w, http.StatusBadRequest, "Block device not found")
		logger.Get().Error("%s - Block device with id: %v not found for storage %v on cluster: %v. error: %v", ctxt, *blockdevice_id, *storage_id, *cluster_id, err)
		return
	} else {
		json.NewEncoder(w).Encode(blkDevice)
	}
}
Exemplo n.º 2
0
func (a *App) GET_Storage(w http.ResponseWriter, r *http.Request) {
	ctxt, err := GetContext(r)
	if err != nil {
		logger.Get().Error("Error Getting the context. error: %v", err)
	}

	vars := mux.Vars(r)
	cluster_id_str := vars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s-Error parsing the cluster id: %s. error: %v", ctxt, cluster_id_str, err)
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str))
		return
	}
	storage_id_str := vars["storage-id"]
	storage_id, err := uuid.Parse(storage_id_str)
	if err != nil {
		logger.Get().Error("%s-Error parsing the storage id: %s. error: %v", ctxt, storage_id_str, err)
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the storage id: %s", storage_id_str))
		return
	}

	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()

	collection := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE)
	var storage models.Storage
	if err := collection.Find(bson.M{"clusterid": *cluster_id, "storageid": *storage_id}).One(&storage); err != nil {
		if err == mgo.ErrNotFound {
			HttpResponse(w, http.StatusNotFound, err.Error())
		} else {
			HttpResponse(w, http.StatusInternalServerError, err.Error())
		}
		logger.Get().Error("%s-Error getting the storage: %v on cluster: %v. error: %v", ctxt, *storage_id, *cluster_id, err)
		return
	}
	if storage.Name == "" {
		HttpResponse(w, http.StatusBadRequest, "Storage not found")
		logger.Get().Error("%s-Storage with id: %v not found for cluster: %v. error: %v", ctxt, *storage_id, *cluster_id, err)
		return
	}
	if storage.SluIds, err = GetSluIds(storage.Profile, storage.ClusterId, ctxt); err != nil {
		HttpResponse(w, http.StatusInternalServerError, err.Error())
		logger.Get().Error("%s-Error getting SLUs with given storage profile: %s. error: %v", ctxt, storage.Profile, err)
		return
	}
	json.NewEncoder(w).Encode(storage)
}
Exemplo n.º 3
0
func (a *App) GET_ClusterBlockDevices(w http.ResponseWriter, r *http.Request) {
	ctxt, err := GetContext(r)
	if err != nil {
		logger.Get().Error("Error Getting the context. error: %v", err)
	}

	vars := mux.Vars(r)
	cluster_id_str := vars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s - Error parsing the cluster id: %s. error: %v", ctxt, cluster_id_str, err)
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str))
		return
	}

	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()

	collection := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_BLOCK_DEVICES)
	var blkDevices []models.BlockDevice
	if err := collection.Find(bson.M{"clusterid": *cluster_id}).All(&blkDevices); err != nil {
		if err == mgo.ErrNotFound {
			HttpResponse(w, http.StatusNotFound, err.Error())
		} else {
			HttpResponse(w, http.StatusInternalServerError, err.Error())
		}
		logger.Get().Error("%s - Error getting the block devices list for cluster: %v. error: %v", ctxt, *cluster_id, err)
		return
	}
	if len(blkDevices) == 0 {
		json.NewEncoder(w).Encode([]models.BlockDevice{})
	} else {
		json.NewEncoder(w).Encode(blkDevices)
	}
}
Exemplo n.º 4
0
func GetEventById(w http.ResponseWriter, r *http.Request) {
	ctxt, err := GetContext(r)
	if err != nil {
		logger.Get().Error("Error Getting the context. error: %v", err)
	}

	vars := mux.Vars(r)
	event_id_str := vars["event-id"]
	event_id, err := uuid.Parse(event_id_str)
	if err != nil {
		logger.Get().Error("%s-Error parsing event id: %s", ctxt, event_id_str)
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing event id: %s", event_id_str))
		return
	}

	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()

	collection := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_APP_EVENTS)
	var event models.AppEvent
	if err := collection.Find(bson.M{"eventid": *event_id}).One(&event); err == mgo.ErrNotFound {
		HttpResponse(w, http.StatusNotFound, "Event not found")
		logger.Get().Error("%s-Event: %v not found. error: %v", ctxt, *event_id, err)
		return
	} else if err != nil {
		logger.Get().Error("%s-Error getting the event detail for %v. error: %v", ctxt, *event_id, err)
		HttpResponse(w, http.StatusBadRequest, "Event finding the record")
		return
	} else {
		json.NewEncoder(w).Encode(event)
	}
}
Exemplo n.º 5
0
func (a *App) getSubTasks(rw http.ResponseWriter, req *http.Request) {
	ctxt, err := GetContext(req)
	if err != nil {
		logger.Get().Error("Error Getting the context. error: %v", err)
	}
	vars := mux.Vars(req)
	taskId, err := uuid.Parse(vars["taskid"])
	if err != nil {
		logger.Get().Error("%s-Unable to Parse the Id: %s. error: %v", ctxt, vars["taskId"], err)
		rw.WriteHeader(http.StatusBadRequest)
		bytes, _ := json.Marshal(APIError{Error: err.Error()})
		rw.Write(bytes)
		return
	}

	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_TASKS)
	var tasks []models.AppTask
	if err := coll.Find(bson.M{"parentid": *taskId}).All(&tasks); err != nil {
		logger.Get().Error("%s-Unable to get tasks. error: %v", ctxt, err)
		if err == mgo.ErrNotFound {
			HttpResponse(rw, http.StatusNotFound, err.Error())
		} else {
			HttpResponse(rw, http.StatusInternalServerError, err.Error())
		}
		return
	}
	if len(tasks) == 0 {
		json.NewEncoder(rw).Encode([]models.AppTask{})
	} else {
		json.NewEncoder(rw).Encode(tasks)
	}
}
Exemplo n.º 6
0
func (a *App) GET_BlockDevices(w http.ResponseWriter, r *http.Request) {
	ctxt, err := GetContext(r)
	if err != nil {
		logger.Get().Error("Error getting the context. error: %v", err)
	}

	var filter bson.M = make(map[string]interface{})
	params := r.URL.Query()
	cluster_id_str := params.Get("clusterid")
	if cluster_id_str != "" {
		filter["clusterid"], err = uuid.Parse(cluster_id_str)
		if err != nil {
			logger.Get().Error("%s - Error parsing the cluster id: %s. error: %v", ctxt, cluster_id_str, err)
			HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str))
			return
		}
	}
	storage_id_str := params.Get("storageid")
	if storage_id_str != "" {
		filter["storageid"], err = uuid.Parse(storage_id_str)
		if err != nil {
			logger.Get().Error("%s - Error parsing the storage id: %s. error: %v", ctxt, storage_id_str, err)
			HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the storage id: %s", storage_id_str))
			return
		}
	}

	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_BLOCK_DEVICES)
	var blkDevices []models.BlockDevice
	if err := coll.Find(filter).All(&blkDevices); err != nil || len(blkDevices) == 0 {
		json.NewEncoder(w).Encode([]models.BlockDevice{})
	} else {
		json.NewEncoder(w).Encode(blkDevices)
	}
}
Exemplo n.º 7
0
func (s Salt) GetNodeID(node string, ctxt string) (id uuid.UUID, err error) {
	mutex.Lock()
	defer mutex.Unlock()
	pyobj, loc_err := pyFuncs["GetNodeID"].Call(node, ctxt)
	if loc_err == nil {
		var s string
		loc_err = gopy.Convert(python.PyDict_GetItemString(pyobj, node), &s)
		if loc_err == nil {
			if i, loc_err := uuid.Parse(s); loc_err == nil {
				return *i, nil
			}
		}
	}
	return uuid.UUID{}, loc_err
}
Exemplo n.º 8
0
// ListMultipartUploads - lists all the pending multipart uploads on a
// bucket. Additionally takes 'prefix, keyMarker, uploadIDmarker and a
// delimiter' which allows us to list uploads match a particular
// prefix or lexically starting from 'keyMarker' or delimiting the
// output to get a directory like listing.
//
// Implements S3 compatible ListMultipartUploads API. The resulting
// ListMultipartsInfo structure is unmarshalled directly into XML and
// replied back to the client.
func (xl xlObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
	result := ListMultipartsInfo{}

	// Verify if bucket is valid.
	if !IsValidBucketName(bucket) {
		return ListMultipartsInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
	}
	if !xl.isBucketExist(bucket) {
		return ListMultipartsInfo{}, traceError(BucketNotFound{Bucket: bucket})
	}
	if !IsValidObjectPrefix(prefix) {
		return ListMultipartsInfo{}, traceError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
	}
	// Verify if delimiter is anything other than '/', which we do not support.
	if delimiter != "" && delimiter != slashSeparator {
		return ListMultipartsInfo{}, traceError(UnsupportedDelimiter{
			Delimiter: delimiter,
		})
	}
	// Verify if marker has prefix.
	if keyMarker != "" && !strings.HasPrefix(keyMarker, prefix) {
		return ListMultipartsInfo{}, traceError(InvalidMarkerPrefixCombination{
			Marker: keyMarker,
			Prefix: prefix,
		})
	}
	if uploadIDMarker != "" {
		if strings.HasSuffix(keyMarker, slashSeparator) {
			return result, traceError(InvalidUploadIDKeyCombination{
				UploadIDMarker: uploadIDMarker,
				KeyMarker:      keyMarker,
			})
		}
		id, err := uuid.Parse(uploadIDMarker)
		if err != nil {
			return result, traceError(err)
		}
		if id.IsZero() {
			return result, traceError(MalformedUploadID{
				UploadID: uploadIDMarker,
			})
		}
	}
	return xl.listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
}
Exemplo n.º 9
0
func GetNodesByIdStr(clusterNodes []models.ClusterNode) (map[string]models.Node, error) {
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	var nodes = make(map[string]models.Node)
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES)
	for _, clusterNode := range clusterNodes {
		uuid, err := uuid.Parse(clusterNode.NodeId)
		if err != nil {
			return nodes, errors.New(fmt.Sprintf("Error parsing node id: %v", clusterNode.NodeId))
		}
		var node models.Node
		if err := coll.Find(bson.M{"nodeid": *uuid}).One(&node); err != nil {
			return nodes, err
		}
		nodes[clusterNode.NodeId] = node
	}
	return nodes, nil
}
Exemplo n.º 10
0
// ListMultipartUploads - lists all the pending multipart uploads on a
// bucket. Additionally takes 'prefix, keyMarker, uploadIDmarker and a
// delimiter' which allows us to list uploads match a particular
// prefix or lexically starting from 'keyMarker' or delimiting the
// output to get a directory like listing.
//
// Implements S3 compatible ListMultipartUploads API. The resulting
// ListMultipartsInfo structure is unmarshalled directly into XML and
// replied back to the client.
func (fs fsObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
	// Validate input arguments.
	if !IsValidBucketName(bucket) {
		return ListMultipartsInfo{}, BucketNameInvalid{Bucket: bucket}
	}
	if !fs.isBucketExist(bucket) {
		return ListMultipartsInfo{}, BucketNotFound{Bucket: bucket}
	}
	if !IsValidObjectPrefix(prefix) {
		return ListMultipartsInfo{}, ObjectNameInvalid{Bucket: bucket, Object: prefix}
	}
	// Verify if delimiter is anything other than '/', which we do not support.
	if delimiter != "" && delimiter != slashSeparator {
		return ListMultipartsInfo{}, UnsupportedDelimiter{
			Delimiter: delimiter,
		}
	}
	// Verify if marker has prefix.
	if keyMarker != "" && !strings.HasPrefix(keyMarker, prefix) {
		return ListMultipartsInfo{}, InvalidMarkerPrefixCombination{
			Marker: keyMarker,
			Prefix: prefix,
		}
	}
	if uploadIDMarker != "" {
		if strings.HasSuffix(keyMarker, slashSeparator) {
			return ListMultipartsInfo{}, InvalidUploadIDKeyCombination{
				UploadIDMarker: uploadIDMarker,
				KeyMarker:      keyMarker,
			}
		}
		id, err := uuid.Parse(uploadIDMarker)
		if err != nil {
			return ListMultipartsInfo{}, err
		}
		if id.IsZero() {
			return ListMultipartsInfo{}, MalformedUploadID{
				UploadID: uploadIDMarker,
			}
		}
	}
	return fs.listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
}
Exemplo n.º 11
0
func lockNode(ctxt string, nodeId uuid.UUID, hostname string, operation string) (*lock.AppLock, error) {
	//lock the node
	locks := make(map[uuid.UUID]string)
	if nodeId.IsZero() {
		//Generate temporary UUID from hostname for the node
		//for locking as the UUID is not available at this point
		id, err := uuid.Parse(util.Md5FromString(hostname))
		if err != nil {
			return nil, fmt.Errorf("Unable to create the UUID for locking for host: %s. error: %v", hostname, err)
		}
		nodeId = *id
	}
	locks[nodeId] = fmt.Sprintf("%s : %s", operation, hostname)
	appLock := lock.NewAppLock(locks)
	if err := GetApp().GetLockManager().AcquireLock(ctxt, *appLock); err != nil {
		return nil, err
	}
	return appLock, nil
}
Exemplo n.º 12
0
func getClusterNodesFromRequest(clusterNodes []models.ClusterNode) (models.Nodes, error) {
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()

	var nodes models.Nodes
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES)
	for _, clusterNode := range clusterNodes {
		uuid, err := uuid.Parse(clusterNode.NodeId)
		if err != nil {
			return nodes, err
		}
		var node models.Node
		if err := coll.Find(bson.M{"nodeid": *uuid}).One(&node); err != nil {
			return nodes, err
		}
		nodes = append(nodes, node)
	}
	return nodes, nil

}
Exemplo n.º 13
0
func (s *CephProvider) StopTask(req models.RpcRequest, resp *models.RpcResponse) error {
	ctxt := req.RpcRequestContext
	task_id_str := req.RpcRequestVars["task-id"]
	logger.Get().Debug(fmt.Sprintf("%s-Stopping sub-task: %s", ctxt, task_id_str))
	task_id, err := uuid.Parse(task_id_str)
	if err != nil {
		logger.Get().Error("%s-Error parsing the task id: %s. error: %v", ctxt, task_id_str, err)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Error parsing the task id: %s", task_id_str))
		return err
	}

	// Stop the task
	if ok, err := task.GetTaskManager().Stop(*task_id); !ok || err != nil {
		logger.Get().Error("%s-Failed to stop the task: %v. error: %v", ctxt, task_id, err)
		*resp = utils.WriteResponse(http.StatusInternalServerError, "Failed to stop task")
		return err
	} else {
		*resp = utils.WriteResponse(http.StatusOK, "Done")
	}
	return nil
}
Exemplo n.º 14
0
func (s *CephProvider) MonitorCluster(req models.RpcRequest, resp *models.RpcResponse) error {
	ctxt := req.RpcRequestContext

	cluster_id_str, ok := req.RpcRequestVars["cluster-id"]
	var monnode *models.Node
	if !ok {
		logger.Get().Error("%s-Incorrect cluster id: %s", ctxt, cluster_id_str)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Incorrect cluster id: %s", cluster_id_str))
		return fmt.Errorf("Incorrect cluster id: %s", cluster_id_str)
	}
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s-Error parsing the cluster id: %s. Error: %v", ctxt, cluster_id_str, err)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s.Error: %v", cluster_id_str, err))
		return fmt.Errorf("Error parsing the cluster id: %s.Error: %v", cluster_id_str, err)
	}
	cluster, err := getCluster(*cluster_id)
	if err != nil {
		logger.Get().Error("%s-Unable to get cluster with id %v.Err %v", ctxt, cluster_id, err.Error())
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Unable to get cluster with id %v.Err %v", cluster_id, err.Error()))
		return fmt.Errorf("Unable to get cluster with id %v.Err %v", cluster_id, err.Error())
	}
	monnode, err = GetCalamariMonNode(*cluster_id, ctxt)
	if err != nil {
		logger.Get().Error("%s-Unable to pick a random mon from cluster %v.Error: %v", ctxt, cluster.Name, err.Error())
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Unable to pick a random mon from cluster %v.Error: %v", cluster.Name, err.Error()))
		return fmt.Errorf("Unable to pick a random mon from cluster %v.Error: %v", cluster.Name, err.Error())
	}
	monName := (*monnode).Hostname
	err = initMonitoringRoutines(ctxt, cluster, monName, MonitoringRoutines)
	if err != nil {
		logger.Get().Error("%s-Error: %v", ctxt, err.Error())
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Error: %v", err.Error()))
		return fmt.Errorf("Error: %v", err.Error())
	}
	*resp = utils.WriteResponseWithData(http.StatusOK, "", []byte{})
	return nil
}
Exemplo n.º 15
0
func PatchEvent(w http.ResponseWriter, r *http.Request) {
	ctxt, err := GetContext(r)
	if err != nil {
		logger.Get().Error("Error Getting the context. error: %v", err)
	}
	var m map[string]interface{}
	vars := mux.Vars(r)
	event_id_str := vars["event-id"]
	event_id, err := uuid.Parse(event_id_str)
	if err != nil {
		logger.Get().Error("%s-Error parsing event id: %s", ctxt, event_id_str)
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing event id: %s", event_id_str))
		return
	}

	body, err := ioutil.ReadAll(r.Body)
	if err != nil {
		logger.Get().Error("%s-Error parsing http request body:%s", ctxt, err)
		HandleHttpError(w, err)
		return
	}
	if err = json.Unmarshal(body, &m); err != nil {
		logger.Get().Error("%s-Unable to Unmarshall the data:%s", ctxt, err)
		HandleHttpError(w, err)
		return
	}

	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()

	collection := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_APP_EVENTS)
	var event models.AppEvent
	if err := collection.Find(bson.M{"eventid": *event_id}).One(&event); err == mgo.ErrNotFound {
		HttpResponse(w, http.StatusBadRequest, "Event not found")
		logger.Get().Error("%s-Event: %v not found. error: %v", ctxt, *event_id, err)
		return
	} else if err != nil {
		logger.Get().Error("%s-Error getting the event detail for %v. error: %v", ctxt, *event_id, err)
		HttpResponse(w, http.StatusBadRequest, "Event finding the record")
		return
	}

	if event.Severity == models.ALARM_STATUS_CLEARED {
		logger.Get().Error("%s-Cannot ack an event with severity: %s", ctxt, event.Severity.String())
		HttpResponse(w, http.StatusBadRequest, "Event with this severity cannot be acked. ")
		return
	}

	if event.Acked {
		logger.Get().Error("%s-Cannot ack this event as its already acked", ctxt)
		HttpResponse(w, http.StatusBadRequest, "Event Cannot be acked as its already acked.")
		return
	}

	if val, ok := m["acked"]; ok {
		event.Acked = val.(bool)
	} else {
		logger.Get().Error("%s-Insufficient details for patching event: %v", ctxt, *event_id)
		HandleHttpError(w, errors.New("insufficient detail for patching event"))
		return
	}
	if val, ok := m["ackcomment"]; ok {
		event.UserAckComment = val.(string)
	}
	event.AckedByUser = strings.Split(ctxt, ":")[0]
	event.UserAckedTime = time.Now()

	err = collection.Update(bson.M{"eventid": *event_id}, bson.M{"$set": event})
	if err != nil {
		logger.Get().Error(fmt.Sprintf("%s-Error updating record in DB for event: %v. error: %v", ctxt, event_id_str, err))
		HttpResponse(w, http.StatusInternalServerError, err.Error())
	}

	clearedSeverity := event.Severity
	event.Severity = models.ALARM_STATUS_CLEARED
	common_event.UpdateAlarmCount(event, clearedSeverity, ctxt)

	return
}
Exemplo n.º 16
0
func (s *CephProvider) UpdateStorage(req models.RpcRequest, resp *models.RpcResponse) error {
	ctxt := req.RpcRequestContext
	cluster_id_str := req.RpcRequestVars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error(
			"%s - Error parsing the cluster id: %s. error: %v",
			ctxt,
			cluster_id_str,
			err)
		*resp = utils.WriteResponse(
			http.StatusBadRequest,
			fmt.Sprintf(
				"Error parsing the cluster id: %s",
				cluster_id_str))
		return err
	}
	storage_id_str := req.RpcRequestVars["storage-id"]
	storage_id, err := uuid.Parse(storage_id_str)
	if err != nil {
		logger.Get().Error(
			"%s - Error parsing the storage id: %s. error: %v",
			ctxt,
			storage_id_str,
			err)
		*resp = utils.WriteResponse(
			http.StatusBadRequest,
			fmt.Sprintf(
				"Error parsing the storage id: %s",
				storage_id_str))
		return err
	}
	var request models.AddStorageRequest
	if err := json.Unmarshal(req.RpcRequestData, &request); err != nil {
		logger.Get().Error(
			"%s - Unbale to parse the request. error: %v",
			ctxt,
			err)
		*resp = utils.WriteResponse(
			http.StatusBadRequest,
			fmt.Sprintf(
				"Unbale to parse the request. error: %v",
				err))
		return err
	}

	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE)
	var storage models.Storage
	if err := coll.Find(bson.M{"storageid": *storage_id}).One(&storage); err != nil {
		logger.Get().Error(
			"%s - Error getting detals of storage: %v on cluster: %v. error: %v",
			ctxt,
			*storage_id,
			*cluster_id,
			err)
		*resp = utils.WriteResponse(
			http.StatusInternalServerError,
			fmt.Sprintf(
				"Error getting the details of storage: %v",
				*storage_id))
		return err
	}
	id, err := strconv.Atoi(storage.Options["id"])
	if err != nil {
		logger.Get().Error(
			"%s - Error getting id of the pool: %v of cluster: %v. error: %v",
			ctxt,
			*storage_id,
			*cluster_id,
			err)
		*resp = utils.WriteResponse(
			http.StatusInternalServerError,
			fmt.Sprintf(
				"Error getting id of the pool: %v",
				*storage_id))
		return err
	}
	asyncTask := func(t *task.Task) {
		sessionCopy := db.GetDatastore().Copy()
		defer sessionCopy.Close()
		coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE)
		for {
			select {
			case <-t.StopCh:
				return
			default:
				t.UpdateStatus("Started ceph provider pool updation: %v", t.ID)
				if request.Name != "" && (request.Replicas != 0 || len(request.Options) != 0) {
					utils.FailTask(
						fmt.Sprintf(
							"Invalid mix of fields to update for storage: %v of cluster: %v. "+
								"Name change cannot be mixed with other changes.",
							*storage_id,
							*cluster_id),
						fmt.Errorf("%s-Invalid mix of fields to update", ctxt),
						t)
					return
				}
				for key := range request.Options {
					if ok := skyring_util.StringInSlice(key, validConfigs); !ok {
						utils.FailTask(
							fmt.Sprintf(
								"Invalid configuration: %s mentioned for storage: %v of cluster: %v",
								key,
								*storage_id,
								*cluster_id),
							fmt.Errorf("%s-%v", ctxt, err),
							t)
						return
					}
				}
				t.UpdateStatus("Getting a radom mon from cluster")
				monnode, err := GetCalamariMonNode(*cluster_id, ctxt)
				if err != nil {
					utils.FailTask(
						fmt.Sprintf(
							"Error getting mon node from cluster: %v",
							*cluster_id),
						fmt.Errorf("%s-%v", ctxt, err),
						t)
					return
				}
				var updatedFields = make(map[string]interface{})
				if request.Name != "" {
					updatedFields["name"] = request.Name
				}
				if request.Replicas != 0 {
					updatedFields["size"] = request.Replicas
				}
				if request.QuotaEnabled {
					for key, value := range request.QuotaParams {
						reqVal, _ := strconv.ParseUint(value, 10, 64)
						updatedFields[key] = uint64(reqVal)
					}
				} else {
					if request.QuotaParams["quota_max_objects"] == "0" && request.QuotaParams["quota_max_bytes"] == "0" {
						updatedFields["quota_max_objects"] = 0
						updatedFields["quota_max_bytes"] = 0
					}
				}
				for key, value := range request.Options {
					reqVal, _ := strconv.ParseUint(value, 10, 32)
					updatedFields[key] = uint(reqVal)
				}
				t.UpdateStatus("Updating pool details")
				ok, err := cephapi_backend.UpdatePool(
					monnode.Hostname,
					*cluster_id, id,
					updatedFields,
					ctxt)
				if err != nil || !ok {
					utils.FailTask(
						fmt.Sprintf(
							"Error setting the configurations for storage: %v on cluster: %v",
							*storage_id,
							*cluster_id),
						fmt.Errorf("%s-%v", ctxt, err),
						t)
					return
				}
				var filter bson.M = make(map[string]interface{})
				var updates bson.M = make(map[string]interface{})
				filter["storageid"] = *storage_id
				filter["clusterid"] = *cluster_id
				if request.Name != "" {
					updates["name"] = request.Name
				}
				if request.Replicas != 0 {
					updates["replicas"] = request.Replicas
				}
				if request.QuotaEnabled {
					updates["quotaenabled"] = true
					params := make(map[string]string)
					for key, value := range request.QuotaParams {
						params[key] = string(value)
					}
					updates["quotaparams"] = params
				} else {
					if request.QuotaParams["quota_max_objects"] == "0" && request.QuotaParams["quota_max_bytes"] == "0" {
						updates["quotaenabled"] = false
						updates["quotaparams"] = map[string]string{}
					}
				}

				if value, ok := request.Options["pg_num"]; ok {
					updates["options.pgp_num"] = value
				}
				t.UpdateStatus("Persisting pool updates in DB")
				if err := coll.Update(filter, bson.M{"$set": updates}); err != nil {
					utils.FailTask(
						fmt.Sprintf(
							"Error updating storage entity: %v of cluster: %v",
							*storage_id,
							*cluster_id),
						fmt.Errorf("%s-%v", ctxt, err),
						t)
				}

				cluster, err := getCluster(*cluster_id)
				if err != nil {
					logger.Get().Error("%s - Failed to get details of cluster: %s. error: %v", ctxt, *cluster_id, err)
				} else {
					initMonitoringRoutines(ctxt, cluster, (*monnode).Hostname, []interface{}{FetchOSDStats})
					UpdatePgNumToSummaries(cluster, ctxt)
				}

				if _, ok := updates["name"]; ok {
					coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_BLOCK_DEVICES)
					if _, err := coll.UpdateAll(filter, bson.M{"$set": bson.M{"storagename": updates["name"]}}); err != nil && err != mgo.ErrNotFound {
						utils.FailTask(
							fmt.Sprintf(
								"Storage name has changed for storage:%v. Error while updating this info for RBDs in cluster:%v",
								*storage_id,
								*cluster_id),
							fmt.Errorf("%s-%v", ctxt, err),
							t)
					}
				}
				t.UpdateStatus("Success")
				t.Done(models.TASK_STATUS_SUCCESS)
				return
			}
		}
	}
	if taskId, err := bigfin_task.GetTaskManager().Run(
		bigfin_conf.ProviderName,
		"CEPH-UpdateStorage",
		asyncTask,
		nil,
		nil,
		nil); err != nil {
		logger.Get().Error(
			"%s-Task creation failed for update storage on cluster: %v. error: %v",
			ctxt,
			*cluster_id,
			err)
		*resp = utils.WriteResponse(
			http.StatusInternalServerError,
			"Task creation failed for storage update")
		return err
	} else {
		*resp = utils.WriteAsyncResponse(
			taskId,
			fmt.Sprintf(
				"Task Created for update storage on cluster: %v",
				*cluster_id),
			[]byte{})
	}
	return nil
}
Exemplo n.º 17
0
func (c CephApi) CreateECPool(
	name string,
	mon string,
	clusterName string,
	pgnum uint,
	replicas int,
	quotaMaxObjects int,
	quotaMaxBytes uint64,
	ecProfile string,
	ruleset map[string]interface{},
	sProfile string,
	ctxt string) (bool, error) {
	// Get the cluster id
	cluster_id, err := cluster_id(clusterName)
	if err != nil {
		return false, errors.New(fmt.Sprintf("Could not get id for cluster: %s. error: %v", clusterName, err))
	}

	cid, _ := uuid.Parse(cluster_id)

	//Create the crush rule
	cRule := backend.CrushRuleRequest{Name: name, Type: "erasure", MinSize: 3, MaxSize: ec_pool_sizes[ecProfile]}
	var steps []map[string]interface{}

	leafTries := make(map[string]interface{})
	leafTries["num"] = 5
	leafTries["op"] = "set_chooseleaf_tries"
	steps = append(steps, leafTries)

	tries := make(map[string]interface{})
	tries["num"] = 100
	tries["op"] = "set_choose_tries"
	steps = append(steps, tries)

	step_take := make(map[string]interface{})
	step_take["item_name"] = sProfile
	step_take["item"] = ruleset["crushnodeid"].(int)
	step_take["op"] = "take"
	steps = append(steps, step_take)

	leaf := make(map[string]interface{})
	leaf["num"] = 0
	leaf["type"] = "osd"
	leaf["op"] = "chooseleaf_indep"
	steps = append(steps, leaf)

	emit := make(map[string]interface{})
	emit["op"] = "emit"
	steps = append(steps, emit)

	cRule.Steps = steps

	cRuleId, err := c.CreateCrushRule(mon, *cid, cRule, ctxt)
	if err != nil {
		return false, errors.New(fmt.Sprintf("Failed to create pool: %s for cluster: %s. error: %v", name, clusterName, err))
	}

	// Replace cluster id in route pattern
	createPoolRoute := CEPH_API_ROUTES["CreatePool"]
	createPoolRoute.Pattern = strings.Replace(createPoolRoute.Pattern, "{cluster-fsid}", cluster_id, 1)

	pool := map[string]interface{}{
		"name":                 name,
		"quota_max_objects":    quotaMaxObjects,
		"quota_max_bytes":      quotaMaxBytes,
		"pg_num":               int(pgnum),
		"pgp_num":              int(pgnum),
		"type":                 "erasure",
		"erasure_code_profile": ecProfile,
		"crush_ruleset":        cRuleId,
	}

	buf, err := json.Marshal(pool)
	if err != nil {
		return false, errors.New(fmt.Sprintf("Error forming request body. error: %v", err))
	}
	body := bytes.NewBuffer(buf)
	resp, err := route_request(createPoolRoute, mon, body)
	defer closeRespBody(resp)
	if err != nil || (resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted) {
		return false, errors.New(fmt.Sprintf("Failed to create pool: %s for cluster: %s. error: %v", name, clusterName, err))
	} else {
		ok, err := syncRequestStatus(mon, resp)
		return ok, err
	}
}
Exemplo n.º 18
0
func (s *CephProvider) RemoveStorage(req models.RpcRequest, resp *models.RpcResponse) error {
	ctxt := req.RpcRequestContext
	cluster_id_str := req.RpcRequestVars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s - Error parsing the cluster id: %s. error: %v", ctxt, cluster_id_str, err)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str))
		return err
	}
	storage_id_str := req.RpcRequestVars["storage-id"]
	storage_id, err := uuid.Parse(storage_id_str)
	if err != nil {
		logger.Get().Error("%s - Error parsing the storage id: %s. error: %v", ctxt, storage_id_str, err)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Error parsing the storage id: %s", storage_id_str))
		return err
	}

	asyncTask := func(t *task.Task) {
		sessionCopy := db.GetDatastore().Copy()
		defer sessionCopy.Close()
		for {
			select {
			case <-t.StopCh:
				return
			default:
				t.UpdateStatus("Started ceph provider pool deletion: %v", t.ID)
				// Get the storage details
				var storage models.Storage
				var cluster models.Cluster

				t.UpdateStatus("Getting details of cluster")
				coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_CLUSTERS)
				if err := coll.Find(bson.M{"clusterid": *cluster_id}).One(&cluster); err != nil {
					utils.FailTask("Error getting details of cluster", fmt.Errorf("%s - %v", ctxt, err), t)
					return
				}

				t.UpdateStatus("Getting details of storage")
				coll1 := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE)
				if err := coll1.Find(bson.M{"clusterid": *cluster_id, "storageid": *storage_id}).One(&storage); err != nil {
					utils.FailTask("Error getting details of storage", fmt.Errorf("%s - %v", ctxt, err), t)
					return
				}

				t.UpdateStatus("Getting a mon from cluster")
				monnode, err := GetCalamariMonNode(*cluster_id, ctxt)
				if err != nil {
					utils.FailTask("Error getting a mon node for cluster", fmt.Errorf("%s - %v", ctxt, err), t)
					return
				}

				poolId, err := strconv.Atoi(storage.Options["id"])
				if err != nil {
					utils.FailTask("Error getting id of storage", fmt.Errorf("%s - %v", ctxt, err), t)
					return
				}

				t.UpdateStatus("Deleting storage")
				ok, err := cephapi_backend.RemovePool(monnode.Hostname, *cluster_id, cluster.Name, storage.Name, poolId, ctxt)
				if err != nil || !ok {
					utils.FailTask(fmt.Sprintf("Deletion of storage %v failed on cluster: %s", *storage_id, *cluster_id), fmt.Errorf("%s - %v", ctxt, err), t)
					return
				} else {
					t.UpdateStatus("Removing the block devices (if any) for storage entoty")
					coll2 := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_BLOCK_DEVICES)
					if _, err := coll2.RemoveAll(bson.M{"clusterid": *cluster_id, "storageid": *storage_id}); err != nil {
						utils.FailTask(fmt.Sprintf("Error removing block devices for storage %v from DB for cluster: %d", *storage_id, *cluster_id), fmt.Errorf("%s - %v", ctxt, err), t)
						return
					}
					t.UpdateStatus("Removing the storage entity from DB")
					if err := coll1.Remove(bson.M{"clusterid": *cluster_id, "storageid": *storage_id}); err != nil {
						utils.FailTask(fmt.Sprintf("Error removing storage entity from DB for cluster: %d", *cluster_id), fmt.Errorf("%s - %v", ctxt, err), t)
						return
					}
				}

				t.UpdateStatus("Syncing SLUs")
				if err := SyncOsdStatus(*cluster_id, ctxt); err != nil {
					utils.FailTask("Error syncing SLUs", err, t)
					return
				}

				skyring_util.UpdateStorageCountToSummaries(ctxt, cluster)
				UpdateObjectCountToSummaries(ctxt, cluster)

				t.UpdateStatus("Success")
				t.Done(models.TASK_STATUS_SUCCESS)
				return
			}
		}
	}
	if taskId, err := bigfin_task.GetTaskManager().Run(
		bigfin_conf.ProviderName,
		"CEPH-DeleteStorage",
		asyncTask,
		nil,
		nil,
		nil); err != nil {
		logger.Get().Error("%s-Task creation failed for delete storage on cluster: %v. error: %v", ctxt, *cluster_id, err)
		*resp = utils.WriteResponse(http.StatusInternalServerError, "Task creation failed for storage deletion")
		return err
	} else {
		*resp = utils.WriteAsyncResponse(taskId, fmt.Sprintf("Task Created for delete storage on cluster: %v", *cluster_id), []byte{})
	}
	return nil
}
Exemplo n.º 19
0
func (s *CephProvider) CreateStorage(req models.RpcRequest, resp *models.RpcResponse) error {
	ctxt := req.RpcRequestContext
	var request models.AddStorageRequest
	if err := json.Unmarshal(req.RpcRequestData, &request); err != nil {
		logger.Get().Error("%s - Unbale to parse the request. error: %v", ctxt, err)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Unbale to parse the request. error: %v", err))
		return err
	}

	// Create the storage pool
	cluster_id_str := req.RpcRequestVars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s - Error parsing the cluster id: %s. error: %v", ctxt, cluster_id_str, err)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str))
		return err
	}

	asyncTask := func(t *task.Task) {
		sessionCopy := db.GetDatastore().Copy()
		defer sessionCopy.Close()
		for {
			select {
			case <-t.StopCh:
				return
			default:
				var cluster models.Cluster

				t.UpdateStatus("Started ceph provider storage creation: %v", t.ID)

				t.UpdateStatus("Getting cluster details")
				// Get cluster details
				coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_CLUSTERS)
				if err := coll.Find(bson.M{"clusterid": *cluster_id}).One(&cluster); err != nil {
					utils.FailTask(fmt.Sprintf("Error getting the cluster details for :%v", *cluster_id), fmt.Errorf("%s - %v", ctxt, err), t)
					return
				}

				t.UpdateStatus("Getting a mon from cluster")
				monnode, err := GetCalamariMonNode(*cluster_id, ctxt)
				if err != nil {
					utils.FailTask(fmt.Sprintf("Error getting mon node details for cluster: %v", *cluster_id), fmt.Errorf("%s - %v", ctxt, err), t)
					return
				}

				poolId, ok := createPool(ctxt, *cluster_id, request, t)
				if !ok {
					return
				}
				if request.Type != models.STORAGE_TYPE_ERASURE_CODED && len(request.BlockDevices) > 0 {
					createBlockDevices(ctxt, monnode.Hostname, cluster, *poolId, request, t)
				}

				t.UpdateStatus("Syncing SLUs")
				if err := SyncOsdStatus(*cluster_id, ctxt); err != nil {
					utils.FailTask("Error syncing SLUs", err, t)
					return
				}

				initMonitoringRoutines(ctxt, cluster, (*monnode).Hostname, []interface{}{FetchObjectCount})
				_, cStats, err := updateClusterStats(ctxt, cluster, (*monnode).Hostname)
				if err == nil {
					updateStatsToPools(ctxt, cStats, cluster.ClusterId)
				}
				skyring_util.UpdateStorageCountToSummaries(ctxt, cluster)
				UpdateObjectCountToSummaries(ctxt, cluster)
				t.UpdateStatus("Success")
				t.Done(models.TASK_STATUS_SUCCESS)
				return
			}
		}
	}
	if taskId, err := bigfin_task.GetTaskManager().Run(
		bigfin_conf.ProviderName,
		"CEPH-CreateStorage",
		asyncTask,
		nil,
		nil,
		nil); err != nil {
		logger.Get().Error("%s-Task creation failed for create storage %s on cluster: %v. error: %v", ctxt, request.Name, *cluster_id, err)
		*resp = utils.WriteResponse(http.StatusInternalServerError, "Task creation failed for storage creation")
		return err
	} else {
		*resp = utils.WriteAsyncResponse(taskId, fmt.Sprintf("Task Created for create storage %s on cluster: %v", request.Name, *cluster_id), []byte{})
	}
	return nil
}
Exemplo n.º 20
0
func (a *App) PATCH_ResizeBlockDevice(w http.ResponseWriter, r *http.Request) {
	ctxt, err := GetContext(r)
	if err != nil {
		logger.Get().Error("Error Getting the context. error: %v", err)
	}

	vars := mux.Vars(r)
	cluster_id_str := vars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s - Error parsing the cluster id: %s. error: %v", ctxt, cluster_id_str, err)
		if err := logAuditEvent(EventTypes["BLOCK_DEVICVE_RESIZE"],
			fmt.Sprintf("Failed to resize block device for cluster: %s", cluster_id_str),
			fmt.Sprintf("Failed to resize block device for cluster: %s Error: %v", cluster_id_str, err),
			nil,
			nil,
			models.NOTIFICATION_ENTITY_BLOCK_DEVICE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log resize block device for cluster event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str))
		return
	}
	clusterName, err := GetClusterNameById(cluster_id)
	if err != nil {
		clusterName = cluster_id_str
	}

	blockdevice_id_str := vars["blockdevice-id"]
	blockdevice_id, err := uuid.Parse(blockdevice_id_str)
	if err != nil {
		logger.Get().Error("%s - Error parsing the block device id: %s. error: %v", ctxt, blockdevice_id_str, err)
		if err := logAuditEvent(EventTypes["BLOCK_DEVICVE_RESIZE"],
			fmt.Sprintf("Failed to resize block device: %s for cluster: %s",
				blockdevice_id_str, clusterName),
			fmt.Sprintf("Failed to resize block device: %s for cluster: %s Error: %v",
				blockdevice_id_str, clusterName, err),
			nil,
			cluster_id,
			models.NOTIFICATION_ENTITY_BLOCK_DEVICE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log resize block device for cluster event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the block device id: %s", blockdevice_id_str))
		return
	}
	blkDevice, _ := block_device_exists("id", *blockdevice_id)
	if blkDevice == nil {
		logger.Get().Error("%s - Block device: %v does not exist", ctxt, *blockdevice_id)
		if err := logAuditEvent(EventTypes["BLOCK_DEVICVE_RESIZE"],
			fmt.Sprintf("Failed to resize block device: %s for cluster: %s",
				blockdevice_id_str, clusterName),
			fmt.Sprintf(
				"Failed to resize block device: %s for cluster: %s Error: %v",
				blockdevice_id_str,
				cluster_id_str,
				fmt.Errorf("Block device does not exist")),
			blockdevice_id,
			cluster_id,
			models.NOTIFICATION_ENTITY_BLOCK_DEVICE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log resize block device for cluster event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusMethodNotAllowed, fmt.Sprintf("Block device: %v not found", *blockdevice_id))
		return
	}

	var request struct {
		Size string `json:"size"`
	}
	// Unmarshal the request body
	body, err := ioutil.ReadAll(io.LimitReader(r.Body, models.REQUEST_SIZE_LIMIT))
	if err != nil {
		logger.Get().Error("%s - Error parsing the request. error: %v", ctxt, err)
		if err := logAuditEvent(EventTypes["BLOCK_DEVICVE_RESIZE"],
			fmt.Sprintf("Failed to resize block device: %s for cluster: %s", blkDevice.Name,
				clusterName),
			fmt.Sprintf("Failed to resize block device: %s for cluster: %s Error: %v",
				blkDevice.Name, clusterName, err),
			blockdevice_id,
			cluster_id,
			models.NOTIFICATION_ENTITY_BLOCK_DEVICE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log resize block device for cluster event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Unable to parse the request: %v", err))
		return
	}
	if err := json.Unmarshal(body, &request); err != nil {
		logger.Get().Error("%s - Unable to unmarshal request. error: %v", ctxt, err)
		if err := logAuditEvent(EventTypes["BLOCK_DEVICVE_RESIZE"],
			fmt.Sprintf("Failed to resize block device: %s for cluster: %s", blkDevice.Name, clusterName),
			fmt.Sprintf("Failed to resize block device: %s for cluster: %s Error: %v", blkDevice.Name, clusterName, err),
			blockdevice_id,
			cluster_id,
			models.NOTIFICATION_ENTITY_BLOCK_DEVICE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log resize block device for cluster event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Unable to unmarshal request: %v", err))
		return
	}

	// Validate storage target size info
	if ok, err := valid_size(request.Size); !ok || err != nil {
		logger.Get().Error("%s - Invalid size: %v", ctxt, request.Size)
		if err := logAuditEvent(EventTypes["BLOCK_DEVICVE_RESIZE"],
			fmt.Sprintf("Failed to resize block device: %s for cluster: %s", blkDevice.Name, clusterName),
			fmt.Sprintf(
				"Failed to resize block device: %s for cluster: %s Error: %v",
				blkDevice.Name,
				clusterName,
				fmt.Errorf("Invalid size passed")),
			blockdevice_id,
			cluster_id,
			models.NOTIFICATION_ENTITY_BLOCK_DEVICE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log resize block device for cluster event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Invalid size: %s passed for: %v", request.Size, *blockdevice_id))
		return
	}

	var result models.RpcResponse
	var providerTaskId *uuid.UUID
	// Get the specific provider and invoke the method
	asyncTask := func(t *task.Task) {
		sessionCopy := db.GetDatastore().Copy()
		defer sessionCopy.Close()
		for {
			select {
			case <-t.StopCh:
				return
			default:
				t.UpdateStatus("Started the task for block device resize: %v", t.ID)
				provider := a.GetProviderFromClusterId(ctxt, *cluster_id)
				if provider == nil {
					util.FailTask("", errors.New(fmt.Sprintf("%s - Error getting provider for cluster: %v", ctxt, *cluster_id)), t)
					if err := logAuditEvent(EventTypes["BLOCK_DEVICVE_RESIZE"],
						fmt.Sprintf("Failed to resize block device: %s for cluster: %s", blkDevice.Name, clusterName),
						fmt.Sprintf("Failed to resize block device: %s for cluster: %s Error: %v",
							blkDevice.Name,
							clusterName,
							fmt.Errorf("Error getting provider for this cluster")),
						blockdevice_id,
						cluster_id,
						models.NOTIFICATION_ENTITY_BLOCK_DEVICE,
						&(t.ID),
						false,
						ctxt); err != nil {
						logger.Get().Error("%s- Unable to log resize block device for cluster event. Error: %v", ctxt, err)
					}
					return
				}
				err = provider.Client.Call(fmt.Sprintf("%s.%s",
					provider.Name, block_device_functions["resize"]),
					models.RpcRequest{RpcRequestVars: vars, RpcRequestData: body, RpcRequestContext: ctxt},
					&result)
				if err != nil || (result.Status.StatusCode != http.StatusOK && result.Status.StatusCode != http.StatusAccepted) {
					util.FailTask(fmt.Sprintf("%s - Error resizing block device: %v on cluster: %v", ctxt, *blockdevice_id, *cluster_id), err, t)
					if err := logAuditEvent(EventTypes["BLOCK_DEVICVE_RESIZE"],
						fmt.Sprintf("Failed to resize block device: %s for cluster: %s", blkDevice.Name, clusterName),
						fmt.Sprintf("Failed to resize block device: %s for cluster: %s Error: %v",
							blkDevice.Name, clusterName, err),
						blockdevice_id,
						cluster_id,
						models.NOTIFICATION_ENTITY_BLOCK_DEVICE,
						&(t.ID),
						false,
						ctxt); err != nil {
						logger.Get().Error("%s- Unable to log resize block device for cluster event. Error: %v", ctxt, err)
					}
					return
				} else {
					// Update the master task id
					providerTaskId, err = uuid.Parse(result.Data.RequestId)
					if err != nil {
						util.FailTask(fmt.Sprintf("%s - Error parsing provider task id while resizing block device: %v for cluster: %v", ctxt, *blockdevice_id, *cluster_id), err, t)
						if err := logAuditEvent(EventTypes["BLOCK_DEVICVE_RESIZE"],
							fmt.Sprintf("Failed to resize block device: %s for cluster: %s",
								blkDevice.Name, clusterName),
							fmt.Sprintf("Failed to resize block device: %s for cluster: %s Error: %v",
								blkDevice.Name, clusterName, err),
							blockdevice_id,
							cluster_id,
							models.NOTIFICATION_ENTITY_BLOCK_DEVICE,
							&(t.ID),
							false,
							ctxt); err != nil {
							logger.Get().Error("%s- Unable to log resize block device for cluster event. Error: %v", ctxt, err)
						}
						return
					}
					t.UpdateStatus(fmt.Sprintf("Started provider task: %v", *providerTaskId))
					if ok, err := t.AddSubTask(*providerTaskId); !ok || err != nil {
						util.FailTask(fmt.Sprintf("%s - Error adding sub task while resizing block device: %v on cluster: %v", ctxt, *blockdevice_id, *cluster_id), err, t)
						if err := logAuditEvent(EventTypes["BLOCK_DEVICVE_RESIZE"],
							fmt.Sprintf("Failed to resize block device: %s for cluster: %s",
								blkDevice.Name, clusterName),
							fmt.Sprintf("Failed to resize block device: %s for cluster: %s Error: %v",
								blkDevice.Name, clusterName, err),
							blockdevice_id,
							cluster_id,
							models.NOTIFICATION_ENTITY_BLOCK_DEVICE,
							&(t.ID),
							false,
							ctxt); err != nil {
							logger.Get().Error("%s- Unable to log resize block device for cluster event. Error: %v", ctxt, err)
						}
						return
					}
					// Check for provider task to complete and update the parent task
					coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_TASKS)
					var providerTask models.AppTask
					for {
						time.Sleep(2 * time.Second)
						if err := coll.Find(bson.M{"id": *providerTaskId}).One(&providerTask); err != nil {
							util.FailTask(fmt.Sprintf("%s - Error getting sub task status while resizing block device: %v on cluster: %v", ctxt, *blockdevice_id, *cluster_id), err, t)
							if err := logAuditEvent(EventTypes["BLOCK_DEVICVE_RESIZE"],
								fmt.Sprintf("Failed to resize block device: %s for cluster: %s",
									blkDevice.Name, clusterName),
								fmt.Sprintf("Failed to resize block device: %s for cluster: %s Error: %v",
									blkDevice.Name, clusterName, err),
								blockdevice_id,
								cluster_id,
								models.NOTIFICATION_ENTITY_BLOCK_DEVICE,
								&(t.ID),
								false,
								ctxt); err != nil {
								logger.Get().Error("%s- Unable to log resize block device for cluster event. Error: %v", ctxt, err)
							}
							return
						}
						if providerTask.Completed {
							if providerTask.Status == models.TASK_STATUS_SUCCESS {
								if err := logAuditEvent(EventTypes["BLOCK_DEVICVE_RESIZE"],
									fmt.Sprintf("Resized block device: %s for cluster: %s",
										blkDevice.Name, clusterName),
									fmt.Sprintf("Resized block device: %s for cluster: %s",
										blkDevice.Name, clusterName),
									blockdevice_id,
									cluster_id,
									models.NOTIFICATION_ENTITY_BLOCK_DEVICE,
									&(t.ID),
									false,
									ctxt); err != nil {
									logger.Get().Error("%s- Unable to log resize block device for cluster event. Error: %v", ctxt, err)
								}
								t.UpdateStatus("Success")
								t.Done(models.TASK_STATUS_SUCCESS)
							} else if providerTask.Status == models.TASK_STATUS_FAILURE {
								if err := logAuditEvent(EventTypes["BLOCK_DEVICVE_RESIZE"],
									fmt.Sprintf("Failed to resize block device: %s for cluster: %s",
										blkDevice.Name, clusterName),
									fmt.Sprintf("Failed to resize block device: %s for cluster: %s Error: %v",
										blkDevice.Name,
										clusterName,
										fmt.Errorf("Task for block device recize failed")),
									blockdevice_id,
									cluster_id,
									models.NOTIFICATION_ENTITY_BLOCK_DEVICE,
									&(t.ID),
									false,
									ctxt); err != nil {
									logger.Get().Error("%s- Unable to log resize block device for cluster event. Error: %v", ctxt, err)
								}
								t.UpdateStatus("Failed")
								t.Done(models.TASK_STATUS_FAILURE)
							}
							break
						}
					}
				}
				return
			}
		}
	}
	if taskId, err := a.GetTaskManager().Run(
		models.ENGINE_NAME,
		fmt.Sprintf("Resize Block Device: %v", *blockdevice_id),
		asyncTask,
		nil,
		nil,
		nil); err != nil {
		logger.Get().Error("%s - Unable to create task for resize block device:%v on cluster: %v. error: %v", ctxt, *blockdevice_id, *cluster_id, err)
		HttpResponse(w, http.StatusInternalServerError, "Task creation failed for resize block device")
		return
	} else {
		logger.Get().Debug("%s - Task Created: %v for resizing block device %v on cluster: %v", ctxt, taskId, *blockdevice_id, *cluster_id)
		bytes, _ := json.Marshal(models.AsyncResponse{TaskId: taskId})
		w.WriteHeader(http.StatusAccepted)
		w.Write(bytes)
	}
}
Exemplo n.º 21
0
func (s *CephProvider) GetStorages(req models.RpcRequest, resp *models.RpcResponse) error {
	ctxt := req.RpcRequestContext
	cluster_id_str := req.RpcRequestVars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s-Error parsing the cluster id: %s. error: %v", ctxt, cluster_id_str, err)
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str))
		return err
	}
	monnode, err := GetCalamariMonNode(*cluster_id, ctxt)
	if err != nil {
		logger.Get().Error("%s-Error getting a mon node in cluster: %v. error: %v", ctxt, *cluster_id, err)
		*resp = utils.WriteResponse(http.StatusInternalServerError, fmt.Sprintf("Error getting a mon node in cluster. error: %v", err))
		return err
	}

	// Get cluster details
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	var cluster models.Cluster
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_CLUSTERS)
	if err := coll.Find(bson.M{"clusterid": *cluster_id}).One(&cluster); err != nil {
		logger.Get().Error("%s-Error getting details for cluster: %v. error: %v", ctxt, *cluster_id, err)
		*resp = utils.WriteResponse(http.StatusInternalServerError, fmt.Sprintf("Error getting cluster details. error: %v", err))
		return err
	}

	// Get the pools for the cluster
	pools, err := cephapi_backend.GetPools(monnode.Hostname, *cluster_id, ctxt)
	if err != nil {
		logger.Get().Error("%s-Error getting storages for cluster: %s. error: %v", ctxt, cluster.Name, err)
		*resp = utils.WriteResponse(http.StatusInternalServerError, fmt.Sprintf("Error getting storages. error: %v", err))
		return err
	}
	var storages []models.AddStorageRequest
	for _, pool := range pools {
		storage := models.AddStorageRequest{
			Name:     pool.Name,
			Replicas: pool.Size,
		}
		if pool.QuotaMaxObjects != 0 && pool.QuotaMaxBytes != 0 {
			storage.QuotaEnabled = true
			quotaParams := make(map[string]string)
			quotaParams["quota_max_objects"] = strconv.Itoa(pool.QuotaMaxObjects)
			quotaParams["quota_max_bytes"] = strconv.FormatUint(pool.QuotaMaxBytes, 10)
			storage.QuotaParams = quotaParams
		}
		options := make(map[string]string)
		options["id"] = strconv.Itoa(pool.Id)
		options["pg_num"] = strconv.Itoa(pool.PgNum)
		options["pgp_num"] = strconv.Itoa(pool.PgpNum)
		options["full"] = strconv.FormatBool(pool.Full)
		options["hashpspool"] = strconv.FormatBool(pool.HashPsPool)
		options["min_size"] = strconv.FormatUint(pool.MinSize, 10)
		options["crash_replay_interval"] = strconv.Itoa(pool.CrashReplayInterval)
		options["crush_ruleset"] = strconv.Itoa(pool.CrushRuleSet)
		// Get EC profile details of pool
		ok, out, err := cephapi_backend.ExecCmd(
			monnode.Hostname,
			*cluster_id,
			fmt.Sprintf("ceph --cluster %s osd pool get %s erasure_code_profile --format=json", cluster.Name, pool.Name),
			ctxt)
		if err != nil || !ok {
			storage.Type = models.STORAGE_TYPE_REPLICATED
			logger.Get().Warning("%s-Error getting EC profile details of pool: %s of cluster: %s", ctxt, pool.Name, cluster.Name)
		} else {
			var ecprofileDet bigfinmodels.ECProfileDet
			if err := json.Unmarshal([]byte(out), &ecprofileDet); err != nil {
				logger.Get().Warning("%s-Error parsing EC profile details of pool: %s of cluster: %s", ctxt, pool.Name, cluster.Name)
			} else {
				storage.Type = models.STORAGE_TYPE_ERASURE_CODED
				options["ecprofile"] = ecprofileDet.ECProfile
			}
		}
		storage.Options = options
		storages = append(storages, storage)
	}
	result, err := json.Marshal(storages)
	if err != nil {
		logger.Get().Error("%s-Error forming the output for storage list for cluster: %s. error: %v", ctxt, cluster.Name, err)
		*resp = utils.WriteResponse(http.StatusInternalServerError, fmt.Sprintf("Error forming the output. error: %v", err))
		return err
	}
	*resp = utils.WriteResponseWithData(http.StatusOK, "", result)
	return nil
}
Exemplo n.º 22
0
func (a *App) POST_Storages(w http.ResponseWriter, r *http.Request) {
	ctxt, err := GetContext(r)
	if err != nil {
		logger.Get().Error("Error Getting the context. error: %v", err)
	}

	vars := mux.Vars(r)
	cluster_id_str := vars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s-Error parsing the cluster id: %s. error: %v", ctxt, cluster_id_str, err)
		if err := logAuditEvent(EventTypes["STORAGE_CREATED"],
			fmt.Sprintf("Failed to create storage for cluster: %v", cluster_id_str),
			fmt.Sprintf("Failed to create storage for cluster: %s. Error: %v", cluster_id_str, err),
			nil,
			nil,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log create storage event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str))
		return
	}
	clusterName, err := GetClusterNameById(cluster_id)
	if err != nil {
		clusterName = cluster_id_str
	}

	ok, err := ClusterUnmanaged(*cluster_id)
	if err != nil {
		logger.Get().Error("%s-Error checking managed state of cluster: %v. error: %v", ctxt, *cluster_id, err)
		if err := logAuditEvent(EventTypes["STORAGE_CREATED"],
			fmt.Sprintf("Failed to create storage for cluster: %s", clusterName),
			fmt.Sprintf("Failed to create storage for cluster: %s. Error: %v", clusterName, err),
			nil,
			cluster_id,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log create storage event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusMethodNotAllowed, fmt.Sprintf("Error checking managed state of cluster: %v", *cluster_id))
		return
	}
	if ok {
		logger.Get().Error("%s-Cluster: %v is in un-managed state", ctxt, *cluster_id)
		if err := logAuditEvent(EventTypes["STORAGE_CREATED"],
			fmt.Sprintf("Failed to create storage for cluster: %v", clusterName),
			fmt.Sprintf(
				"Failed to create storage for cluster: %s. Error: %v",
				clusterName,
				fmt.Errorf("Cluster is un-managed")),
			nil,
			cluster_id,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log create storage event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusMethodNotAllowed, fmt.Sprintf("Cluster: %v is in un-managed state", *cluster_id))
		return
	}

	var request models.AddStorageRequest
	// Unmarshal the request body
	body, err := ioutil.ReadAll(io.LimitReader(r.Body, models.REQUEST_SIZE_LIMIT))
	if err != nil {
		logger.Get().Error("%s-Error parsing the request. error: %v", ctxt, err)
		if err := logAuditEvent(EventTypes["STORAGE_CREATED"],
			fmt.Sprintf("Failed to create storage for cluster: %v", clusterName),
			fmt.Sprintf("Failed to create storage for cluster: %s. Error: %v", clusterName, err),
			nil,
			cluster_id,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log create storage event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Unable to parse the request: %v", err))
		return
	}
	if err := json.Unmarshal(body, &request); err != nil {
		logger.Get().Error("%s-Unable to unmarshal request. error: %v", ctxt, err)
		if err := logAuditEvent(EventTypes["STORAGE_CREATED"],
			fmt.Sprintf("Failed to create storage for cluster: %v", clusterName),
			fmt.Sprintf("Failed to create storage for cluster: %s. Error: %v", clusterName, err),
			nil,
			cluster_id,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log create storage event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Unable to unmarshal request: %v", err))
		return
	}

	// Check if storage entity already added
	// No need to check for error as storage would be nil in case of error and the same is checked
	if storage, _ := storage_exists("name", request.Name); storage != nil {
		logger.Get().Error("%s-Storage entity: %s already added", ctxt, request.Name)
		if err := logAuditEvent(EventTypes["STORAGE_CREATED"],
			fmt.Sprintf("Failed to create storage:%s for cluster: %v", request.Name, clusterName),
			fmt.Sprintf(
				"Failed to create storage: %s for cluster: %s. Error: %v",
				request.Name,
				clusterName,
				fmt.Errorf("Storage exists already")),
			nil,
			cluster_id,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log create storage event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusMethodNotAllowed, fmt.Sprintf("Storage entity: %s already added", request.Name))
		return
	}

	// Validate storage type
	if ok := valid_storage_type(request.Type); !ok {
		logger.Get().Error("Invalid storage type: %s", request.Type)
		if err := logAuditEvent(EventTypes["STORAGE_CREATED"],
			fmt.Sprintf("Failed to create storage:%s for cluster: %v", request.Name, clusterName),
			fmt.Sprintf(
				"Failed to create storage:%s for cluster: %s. Error: %v",
				request.Name,
				clusterName,
				fmt.Errorf("Invalid storage type passed")),
			nil,
			cluster_id,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log create storage event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Invalid storage type: %s", request.Type))
		return
	}

	// Validate storage target size info
	if request.Size != "" {
		if ok, err := valid_size(request.Size); !ok || err != nil {
			logger.Get().Error(
				"%s-Invalid storage size: %v",
				ctxt,
				request.Size)
			if err := logAuditEvent(EventTypes["STORAGE_CREATED"],
				fmt.Sprintf("Failed to create storage:%s for cluster: %v", request.Name, clusterName),
				fmt.Sprintf(
					"Failed to create storage:%s for cluster: %s. Error: %v",
					request.Name,
					clusterName,
					fmt.Errorf("Invalid storage size passed")),
				nil,
				cluster_id,
				models.NOTIFICATION_ENTITY_STORAGE,
				nil,
				false,
				ctxt); err != nil {
				logger.Get().Error("%s- Unable to log create storage event. Error: %v", ctxt, err)
			}
			HttpResponse(
				w,
				http.StatusBadRequest,
				fmt.Sprintf(
					"Invalid storage size: %s passed for: %s",
					request.Size,
					request.Name))
			return
		}
	}

	var result models.RpcResponse
	var providerTaskId *uuid.UUID
	// Get the specific provider and invoke the method
	asyncTask := func(t *task.Task) {
		sessionCopy := db.GetDatastore().Copy()
		defer sessionCopy.Close()
		for {
			select {
			case <-t.StopCh:
				return
			default:
				t.UpdateStatus("Started the task for pool creation: %v", t.ID)

				provider := a.GetProviderFromClusterId(ctxt, *cluster_id)
				if provider == nil {
					util.FailTask("", errors.New(fmt.Sprintf("%s-Error getting provider for cluster: %v", ctxt, *cluster_id)), t)
					if err := logAuditEvent(EventTypes["STORAGE_CREATED"],
						fmt.Sprintf("Failed to create storage:%s for cluster: %v",
							request.Name, clusterName),
						fmt.Sprintf(
							"Failed to create storage:%s for cluster: %s. Error: %v",
							request.Name,
							cluster_id_str,
							fmt.Errorf("Error getting storage provider")),
						nil,
						cluster_id,
						models.NOTIFICATION_ENTITY_STORAGE,
						&(t.ID),
						false,
						ctxt); err != nil {
						logger.Get().Error("%s- Unable to log create storage event. Error: %v", ctxt, err)
					}
					return
				}
				err = provider.Client.Call(fmt.Sprintf("%s.%s",
					provider.Name, storage_post_functions["create"]),
					models.RpcRequest{RpcRequestVars: vars, RpcRequestData: body, RpcRequestContext: ctxt},
					&result)
				if err != nil || (result.Status.StatusCode != http.StatusOK && result.Status.StatusCode != http.StatusAccepted) {
					util.FailTask(fmt.Sprintf("Error creating storage: %s on cluster: %v", request.Name, *cluster_id), fmt.Errorf("%s-%v", ctxt, err), t)
					if err := logAuditEvent(EventTypes["STORAGE_CREATED"],
						fmt.Sprintf("Failed to create storage:%s for cluster: %v", request.Name, clusterName),
						fmt.Sprintf("Failed to create storage:%s for cluster: %s. Error: %v", request.Name,
							clusterName, fmt.Errorf("Error creating storage")),
						nil,
						cluster_id,
						models.NOTIFICATION_ENTITY_STORAGE,
						&(t.ID),
						false,
						ctxt); err != nil {
						logger.Get().Error("%s- Unable to log create storage event. Error: %v", ctxt, err)
					}
					return
				} else {
					// Update the master task id
					providerTaskId, err = uuid.Parse(result.Data.RequestId)
					if err != nil {
						util.FailTask(fmt.Sprintf("Error parsing provider task id while creating storage: %s for cluster: %v", request.Name, *cluster_id), fmt.Errorf("%s-%v", ctxt, err), t)
						if err := logAuditEvent(EventTypes["STORAGE_CREATED"],
							fmt.Sprintf("Failed to create storage:%s for cluster: %v", request.Name, clusterName),
							fmt.Sprintf("Failed to create storage:%s for cluster: %s. Error: %v", request.Name,
								clusterName, err),
							nil,
							cluster_id,
							models.NOTIFICATION_ENTITY_STORAGE,
							&(t.ID),
							false,
							ctxt); err != nil {
							logger.Get().Error("%s- Unable to log create storage event. Error: %v", ctxt, err)
						}
						return
					}
					t.UpdateStatus(fmt.Sprintf("Started provider task: %v", *providerTaskId))
					if ok, err := t.AddSubTask(*providerTaskId); !ok || err != nil {
						util.FailTask(fmt.Sprintf("Error adding sub task while creating storage: %s on cluster: %v", request.Name, *cluster_id), fmt.Errorf("%s-%v", ctxt, err), t)
						if err := logAuditEvent(EventTypes["STORAGE_CREATED"],
							fmt.Sprintf("Failed to create storage:%s for cluster: %v", request.Name, clusterName),
							fmt.Sprintf("Failed to create storage:%s for cluster: %s. Error: %v", request.Name, clusterName,
								fmt.Errorf("Error adding subtask")),
							nil,
							cluster_id,
							models.NOTIFICATION_ENTITY_STORAGE,
							&(t.ID),
							false,
							ctxt); err != nil {
							logger.Get().Error("%s- Unable to log create storage event. Error: %v", ctxt, err)
						}
						return
					}

					// Check for provider task to complete and update the parent task
					done := false
					for {
						time.Sleep(2 * time.Second)
						coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_TASKS)
						var providerTask models.AppTask
						if err := coll.Find(bson.M{"id": *providerTaskId}).One(&providerTask); err != nil {
							util.FailTask(fmt.Sprintf("Error getting sub task status while creating storage: %s on cluster: %v", request.Name, *cluster_id), fmt.Errorf("%s-%v", ctxt, err), t)
							if err := logAuditEvent(EventTypes["STORAGE_CREATED"],
								fmt.Sprintf("Failed to create storage:%s for cluster: %v", request.Name, clusterName),
								fmt.Sprintf("Failed to create storage:%s for cluster: %s. Error: %v", request.Name,
									clusterName, err),
								nil,
								cluster_id,
								models.NOTIFICATION_ENTITY_STORAGE,
								&(t.ID),
								false,
								ctxt); err != nil {
								logger.Get().Error("%s- Unable to log create storage event. Error: %v", ctxt, err)
							}
							return
						}
						if providerTask.Completed {
							if providerTask.Status == models.TASK_STATUS_SUCCESS {
								t.UpdateStatus("Success")
								if err := logAuditEvent(EventTypes["STORAGE_CREATED"],
									fmt.Sprintf("Created storage:%s for cluster: %v", request.Name, clusterName),
									fmt.Sprintf("Created storage:%s for cluster: %s", request.Name, clusterName),
									nil,
									cluster_id,
									models.NOTIFICATION_ENTITY_STORAGE,
									&(t.ID),
									false,
									ctxt); err != nil {
									logger.Get().Error("%s- Unable to log create storage event. Error: %v", ctxt, err)
								}
								t.Done(models.TASK_STATUS_SUCCESS)
							} else if providerTask.Status == models.TASK_STATUS_FAILURE {
								t.UpdateStatus("Failed")
								if err := logAuditEvent(EventTypes["STORAGE_CREATED"],
									fmt.Sprintf("Failed to create storage:%s for cluster: %v", request.Name,
										clusterName),
									fmt.Sprintf(
										"Failed to create storage:%s for cluster: %s. Error: %v",
										request.Name,
										clusterName,
										fmt.Errorf("Provider task failed")),
									nil,
									cluster_id,
									models.NOTIFICATION_ENTITY_STORAGE,
									&(t.ID),
									false,
									ctxt); err != nil {
									logger.Get().Error("%s- Unable to log create storage event. Error: %v", ctxt, err)
								}
								t.Done(models.TASK_STATUS_FAILURE)
							}
							done = true
							break
						}
					}
					if !done {
						util.FailTask(
							"Sub task timed out",
							fmt.Errorf("%s-Could not get sub task status after 5 minutes", ctxt),
							t)
					}
					return
				}
			}
		}
	}
	if taskId, err := a.GetTaskManager().Run(
		models.ENGINE_NAME,
		fmt.Sprintf("Create Storage: %s", request.Name),
		asyncTask,
		nil,
		nil,
		nil); err != nil {
		logger.Get().Error("%s-Unable to create task for create storage:%s on cluster: %v. error: %v", ctxt, request.Name, *cluster_id, err)
		HttpResponse(w, http.StatusInternalServerError, "Task creation failed for create storage")
		return
	} else {
		logger.Get().Debug("%s-Task Created: %v for creating storage on cluster: %v", ctxt, taskId, request.Name, *cluster_id)
		bytes, _ := json.Marshal(models.AsyncResponse{TaskId: taskId})
		w.WriteHeader(http.StatusAccepted)
		w.Write(bytes)
	}
}
Exemplo n.º 23
0
func (a *App) GET_Storages(w http.ResponseWriter, r *http.Request) {
	ctxt, err := GetContext(r)
	if err != nil {
		logger.Get().Error("Error Getting the context. error: %v", err)
	}

	vars := mux.Vars(r)
	cluster_id_str := vars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s-Error parsing the cluster id: %s. error: %v", ctxt, cluster_id_str, err)
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str))
		return
	}

	params := r.URL.Query()
	storage_type := params.Get("type")
	storage_status := params.Get("status")
	alarmStatus := r.URL.Query()["alarmstatus"]

	var filter bson.M = make(map[string]interface{})
	if len(alarmStatus) != 0 {
		var arr []interface{}
		for _, as := range alarmStatus {
			if as == "" {
				continue
			}
			if s, ok := Event_severity[as]; !ok {
				logger.Get().Error("%s-Un-supported query param: %v", ctxt, alarmStatus)
				HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Un-supported query param: %s", alarmStatus))
				return
			} else {
				arr = append(arr, bson.M{"almstatus": s})
			}
		}
		if len(arr) != 0 {
			filter["$or"] = arr
		}
	}

	filter["clusterid"] = *cluster_id
	if storage_type != "" {
		filter["type"] = storage_type
	}
	if storage_status != "" {
		switch storage_status {
		case "ok":
			filter["status"] = models.STORAGE_STATUS_OK
		case "warning":
			filter["status"] = models.STORAGE_STATUS_WARN
		case "error":
			filter["status"] = models.STORAGE_STATUS_ERROR
		default:
			HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Invalid status %s for storage", storage_status))
			return
		}
	}

	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()

	collection := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE)
	var storages models.Storages
	if err := collection.Find(filter).All(&storages); err != nil {
		HttpResponse(w, http.StatusInternalServerError, err.Error())
		logger.Get().Error("%s-Error getting the storage list for cluster: %v. error: %v", ctxt, *cluster_id, err)
		return
	}
	if len(storages) == 0 {
		json.NewEncoder(w).Encode(models.Storages{})
		return
	}
	for i := range storages {
		if storages[i].SluIds, err = GetSluIds(storages[i].Profile, storages[i].ClusterId, ctxt); err != nil {
			HttpResponse(w, http.StatusInternalServerError, err.Error())
			logger.Get().Error("%s-Error getting SLUs with given storage profile: %s. error: %v", ctxt, storages[i].Profile, err)
			return
		}
	}

	json.NewEncoder(w).Encode(storages)

}
Exemplo n.º 24
0
func (a *App) ImportCluster(w http.ResponseWriter, r *http.Request) {
	ctxt, err := GetContext(r)
	if err != nil {
		logger.Get().Error("Error Getting the context. error: %v", err)
	}
	var request models.ImportClusterRequest
	vars := mux.Vars(r)

	// Unmarshal the request body
	body, err := ioutil.ReadAll(io.LimitReader(r.Body, models.REQUEST_SIZE_LIMIT))
	if err != nil {
		logger.Get().Error(
			"%s-Error parsing the request. error: %v",
			ctxt,
			err)
		HttpResponse(
			w,
			http.StatusBadRequest,
			fmt.Sprintf("Unable to parse the request: %v", err))
		return
	}
	if err := json.Unmarshal(body, &request); err != nil {
		logger.Get().Error(
			"%s-Unable to unmarshal request. error: %v",
			ctxt,
			err)
		HttpResponse(
			w,
			http.StatusBadRequest,
			fmt.Sprintf("Unable to unmarshal request. error: %v", err))
		return
	}

	if request.BootstrapNode == "" ||
		request.ClusterType == "" ||
		len(request.Nodes) == 0 {
		logger.Get().Error(
			"%s-Required details not provided. error: %v",
			ctxt,
			err)
		HttpResponse(w, http.StatusBadRequest, "Required details not provided")
		return
	}

	var result models.RpcResponse
	var providerTaskId *uuid.UUID
	asyncTask := func(t *task.Task) {
		sessionCopy := db.GetDatastore().Copy()
		defer sessionCopy.Close()
		for {
			select {
			case <-t.StopCh:
				return
			default:
				t.UpdateStatus("Started the task for import cluster: %v", t.ID)
				t.UpdateStatus("Checking/accepting participating nodes")
				var acceptFailedNodes []string
				for _, node := range request.Nodes {
					if ok, err := checkAndAcceptNode(
						node,
						ctxt); err != nil || !ok {
						acceptFailedNodes = append(acceptFailedNodes, node)
					}
				}
				if len(acceptFailedNodes) > 0 {
					util.FailTask(
						"",
						fmt.Errorf(
							"%s-Failed to accept nodes: %v",
							ctxt,
							acceptFailedNodes),
						t)
					return
				}

				provider := a.getProviderFromClusterType(request.ClusterType)
				if provider == nil {
					util.FailTask(
						fmt.Sprintf(
							"Error getting provider for cluster type: %s",
							request.ClusterType),
						fmt.Errorf("%s-%v", ctxt, err),
						t)
					return
				}
				err = provider.Client.Call(
					fmt.Sprintf("%s.%s", provider.Name, "ImportCluster"),
					models.RpcRequest{
						RpcRequestVars:    vars,
						RpcRequestData:    body,
						RpcRequestContext: ctxt},
					&result)
				if err != nil || (result.Status.StatusCode != http.StatusOK && result.Status.StatusCode != http.StatusAccepted) {
					util.FailTask(fmt.Sprintf("%s-Error importing cluster", ctxt), err, t)
					return
				}
				// Update the master task id
				providerTaskId, err = uuid.Parse(result.Data.RequestId)
				if err != nil {
					util.FailTask(
						fmt.Sprintf(
							"%s-Error parsing provider task id while importing cluster",
							ctxt),
						err,
						t)
					return
				}
				t.UpdateStatus("Started provider task: %v", *providerTaskId)
				if ok, err := t.AddSubTask(*providerTaskId); !ok || err != nil {
					util.FailTask(
						fmt.Sprintf(
							"%s-Error adding sub task while importing cluster",
							ctxt),
						err,
						t)
					return
				}

				// Check for provider task to complete
				coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_TASKS)
				var providerTask models.AppTask
				for {
					time.Sleep(2 * time.Second)
					if err := coll.Find(bson.M{"id": *providerTaskId}).One(&providerTask); err != nil {
						util.FailTask(
							fmt.Sprintf(
								"%s-Error getting sub task status while importing cluster",
								ctxt),
							err,
							t)
						return
					}
					if providerTask.Completed {
						if providerTask.Status == models.TASK_STATUS_SUCCESS {
							// Setup monitoring for the imported cluster
							t.UpdateStatus("Enabling/Scheduling monitoring for the cluster")
							if err := setupMonitoring(request.BootstrapNode); err != nil {
								logger.Get().Warning(
									"%s-Failed to setup monitoring for the cluster. error: %v",
									ctxt,
									err)
							}
							go ComputeSystemSummary(make(map[string]interface{}))
							t.UpdateStatus("Success")
							t.Done(models.TASK_STATUS_SUCCESS)
						} else { //if the task is failed????
							t.UpdateStatus("Failed")
							t.Done(models.TASK_STATUS_FAILURE)
							logger.Get().Error(
								"%s- Failed to import cluster",
								ctxt)
						}
						break
					}
				}
				return
			}
		}
	}
	if taskId, err := a.GetTaskManager().Run(
		models.ENGINE_NAME,
		fmt.Sprintf("Import Cluster"),
		asyncTask,
		nil,
		nil,
		nil); err != nil {
		logger.Get().Error(
			"%s-Unable to create task for import cluster. error: %v",
			ctxt,
			err)
		HttpResponse(
			w,
			http.StatusInternalServerError,
			"Task creation failed for import cluster")
		return
	} else {
		logger.Get().Debug(
			"%s-Task Created: %v for import cluster",
			ctxt,
			taskId)
		bytes, _ := json.Marshal(models.AsyncResponse{TaskId: taskId})
		w.WriteHeader(http.StatusAccepted)
		w.Write(bytes)
	}
}
Exemplo n.º 25
0
func drive_add_handler(event models.AppEvent, ctxt string) (models.AppEvent, error) {
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	var node models.Node
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES)
	if err := coll.Find(bson.M{"nodeid": event.NodeId}).One(&node); err != nil {
		logger.Get().Error("%s-Node information read from DB failed for node: %s. error: %v", ctxt, event.NodeId, err)
		return event, err
	}
	if node.State != models.NODE_STATE_ACTIVE {
		return event, nil
	}
	existing_disks := node.StorageDisks
	sProfiles, err := skyring.GetDbProvider().StorageProfileInterface().StorageProfiles(ctxt, nil, models.QueryOps{})
	if err != nil {
		logger.Get().Error("%s-Unable to get the storage profiles. err:%v", ctxt, err)
		return event, err
	}

	// sync the nodes to get new disks
	if ok, err := skyring.GetCoreNodeManager().SyncStorageDisks(node.Hostname, sProfiles, ctxt); err != nil || !ok {
		logger.Get().Error("%s-Failed to sync disk for host: %s Error: %v", ctxt, node.Hostname, err)
		return event, err
	}

	// get the list of disks after syncing.
	if err := coll.Find(bson.M{"nodeid": event.NodeId}).One(&node); err != nil {
		logger.Get().Error("%s-Node information read from DB failed for node: %s. error: %v", ctxt, event.NodeId, err)
		return event, err
	}
	var cluster_nodes []models.ClusterNode
	var cluster_node models.ClusterNode
	cluster_node.NodeId = event.NodeId.String()
	cluster_node.NodeType = []string{models.NODE_TYPE_OSD}
	var exists bool
	for _, disk := range node.StorageDisks {
		exists = false
		for _, existing_disk := range existing_disks {
			if disk.DevName == existing_disk.DevName {
				exists = true
				break
			}
		}
		if !exists && !disk.Used && disk.Type == "disk" {
			cluster_node.Devices = append(cluster_node.Devices, models.ClusterNodeDevice{Name: disk.DevName, FSType: "xfs"})
			event.EntityId = disk.DiskId
			event.Tags["DevName"] = disk.DevName
			event.Tags["size"] = strconv.FormatFloat(disk.Size, 'E', -1, 64)
			event.Tags["Type"] = disk.Type
		}
	}

	// adding the details for event

	event.Name = skyring.EventTypes["DRIVE_ADD"]
	event.NotificationEntity = models.NOTIFICATION_ENTITY_HOST
	event.Message = fmt.Sprintf("New Storage Drive: %s added to Host:%s", event.Tags["DevName"], node.Hostname)
	event.Severity = models.ALARM_STATUS_CLEARED
	event.Notify = false

	c := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_CLUSTERS)
	var cluster models.Cluster
	if err := c.Find(bson.M{"clusterid": event.ClusterId}).One(&cluster); err != nil {
		logger.Get().Error("%s-Cluster information read from DB failed for Cluster: %s. error: %v", ctxt, event.ClusterId, err)
		return event, nil
	}

	// check if cluster is in managed/un-managed state
	ok, err := skyring.ClusterUnmanaged(event.ClusterId)
	if err != nil {
		logger.Get().Warning("%s-Error checking managed state of cluster: %v. error: %v", ctxt, event.ClusterId, err)
		return event, err
	}
	if ok {
		logger.Get().Error("%s-Cluster: %v is in un-managed state", ctxt, event.ClusterId)
		return event, err
	}

	vars := map[string]string{"cluster-id": event.ClusterId.String()}
	cluster_nodes = append(cluster_nodes, cluster_node)
	body, err := json.Marshal(cluster_nodes)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("%s-Error forming request body. error: %v", ctxt, err))
		return event, nil
	}

	// lock the node for expanding

	var nodes models.Nodes
	if err := coll.Find(bson.M{"clusterid": event.ClusterId}).All(&nodes); err != nil {
		logger.Get().Error("%s-Node information read from DB failed . error: %v", ctxt, err)
		return event, nil
	}

	// check if autoExpand is enabled or not
	if !cluster.AutoExpand {
		return event, nil
	}

	// Expand cluster
	var result models.RpcResponse
	var providerTaskId *uuid.UUID
	asyncTask := func(t *task.Task) {
		sessionCopy := db.GetDatastore().Copy()
		defer sessionCopy.Close()
		for {
			select {
			case <-t.StopCh:
				return
			default:
				t.UpdateStatus("Started task for cluster expansion: %v", t.ID)
				appLock, err := skyring.LockNodes(ctxt, nodes, "Expand_Cluster")
				if err != nil {
					util.FailTask("Failed to acquire lock", fmt.Errorf("%s-%v", ctxt, err), t)
					return
				}
				defer skyring.GetApp().GetLockManager().ReleaseLock(ctxt, *appLock)

				provider := skyring.GetApp().GetProviderFromClusterId(ctxt, node.ClusterId)
				if provider == nil {
					util.FailTask("", errors.New(fmt.Sprintf("%s-Error etting provider for cluster: %v", ctxt, event.ClusterId)), t)
					return
				}
				err = provider.Client.Call(fmt.Sprintf("%s.%s",
					provider.Name, "ExpandCluster"),
					models.RpcRequest{RpcRequestVars: vars, RpcRequestData: body, RpcRequestContext: ctxt},
					&result)
				if err != nil || (result.Status.StatusCode != http.StatusOK && result.Status.StatusCode != http.StatusAccepted) {
					util.FailTask(fmt.Sprintf("Error expanding cluster: %v", event.ClusterId), fmt.Errorf("%s-%v", ctxt, err), t)
					return
				}
				// Update the master task id
				providerTaskId, err = uuid.Parse(result.Data.RequestId)
				if err != nil {
					util.FailTask(fmt.Sprintf("Error parsing provider task id while expand cluster: %v", event.ClusterId), fmt.Errorf("%s-%v", ctxt, err), t)
					return
				}
				t.UpdateStatus(fmt.Sprintf("Started provider task: %v", *providerTaskId))
				if ok, err := t.AddSubTask(*providerTaskId); !ok || err != nil {
					util.FailTask(fmt.Sprintf("Error adding sub task while expand cluster: %v", event.ClusterId), fmt.Errorf("%s-%v", ctxt, err), t)
					return
				}

				// Check for provider task to complete and update the disk info
				for {
					time.Sleep(2 * time.Second)
					coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_TASKS)
					var providerTask models.AppTask
					if err := coll.Find(bson.M{"id": *providerTaskId}).One(&providerTask); err != nil {
						util.FailTask(fmt.Sprintf("Error getting sub task status while expand cluster: %v", event.ClusterId), fmt.Errorf("%s-%v", ctxt, err), t)
						return
					}
					if providerTask.Completed {
						if providerTask.Status == models.TASK_STATUS_SUCCESS {
							t.UpdateStatus("Starting disk sync")
							if ok, err := skyring.GetCoreNodeManager().SyncStorageDisks(node.Hostname, sProfiles, ctxt); err != nil || !ok {
								logger.Get().Error("%s-Failed to sync disk for host: %s Error: %v", ctxt, node.Hostname, err)
								return
							}
							go skyring.ComputeSystemSummary(make(map[string]interface{}))
							t.UpdateStatus("Success")
							t.Done(models.TASK_STATUS_SUCCESS)

						} else {
							logger.Get().Error("%s-Failed to expand the cluster %s", ctxt, event.ClusterId)
							t.UpdateStatus("Failed")
							t.Done(models.TASK_STATUS_FAILURE)
						}
						break
					}
				}
				return

			}
		}
	}

	if taskId, err := skyring.GetApp().GetTaskManager().Run(
		models.ENGINE_NAME,
		fmt.Sprintf("Expand Cluster: %s", event.ClusterId.String()),
		asyncTask,
		nil,
		nil,
		nil); err != nil {
		logger.Get().Error("%s-Unable to create task to expand cluster: %v. error: %v", ctxt, event.ClusterId, err)
		return event, nil
	} else {
		logger.Get().Debug("%s-Task Created: %v to expand cluster: %v", ctxt, taskId, event.ClusterId)
		return event, nil
	}

}
Exemplo n.º 26
0
func (a *App) DEL_Storage(w http.ResponseWriter, r *http.Request) {
	ctxt, err := GetContext(r)
	if err != nil {
		logger.Get().Error("Error Getting the context. error: %v", err)
	}

	vars := mux.Vars(r)
	cluster_id_str := vars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error("%s - Error parsing the cluster id: %s. error: %v", ctxt, cluster_id_str, err)
		if err := logAuditEvent(EventTypes["STORAGE_DELETED"],
			fmt.Sprintf("Failed to delete storage for cluster: %v", cluster_id_str),
			fmt.Sprintf("Failed to delete storage for cluster: %s. Error: %v", cluster_id_str, err),
			nil,
			nil,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log delete storage event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str))
		return
	}
	clusterName, err := GetClusterNameById(cluster_id)
	if err != nil {
		clusterName = cluster_id_str
	}
	storage_id_str := vars["storage-id"]
	storage_id, err := uuid.Parse(storage_id_str)
	if err != nil {
		logger.Get().Error("%s - Error parsing the storage id: %s. error: %v", ctxt, storage_id_str, err)
		if err := logAuditEvent(EventTypes["STORAGE_DELETED"],
			fmt.Sprintf("Failed to delete storage:%v for cluster: %v", storage_id_str, clusterName),
			fmt.Sprintf("Failed to delete storage:%v for cluster: %s. Error: %v", storage_id_str, clusterName, err),
			nil,
			cluster_id,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log delete storage event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the storage id: %s", storage_id_str))
		return
	}
	storageName, err := getStorageNameById(*storage_id)
	if storageName == "" || err != nil {
		storageName = storage_id_str
	}

	// Check if block devices are backed by this storage
	// If so dont allow deletion and ask to delete block devices first
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_BLOCK_DEVICES)
	var blkDevices []models.BlockDevice
	err = coll.Find(bson.M{"clusterid": *cluster_id, "storageid": *storage_id}).All(&blkDevices)
	if err != nil {
		logger.Get().Error("%s-Error checking block devices backed by storage: %v", ctxt, *storage_id)
		if err := logAuditEvent(EventTypes["STORAGE_DELETED"],
			fmt.Sprintf("Failed to delete storage:%v for cluster: %v", storageName, clusterName),
			fmt.Sprintf("Failed to delete storage:%v for cluster: %s. Error: %v", storageName, clusterName, err),
			storage_id,
			cluster_id,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log delete storage event. Error: %v", ctxt, err)
		}
		HttpResponse(w, http.StatusInternalServerError, fmt.Sprintf("Error checking block devices backed by storage: %v", *storage_id))
		return
	}
	if len(blkDevices) > 0 {
		logger.Get().Warning("%s-There are block devices backed by storage: %v. First block devices should be deleted.", ctxt, *storage_id)
		if err := logAuditEvent(EventTypes["STORAGE_DELETED"],
			fmt.Sprintf("Failed to delete storage:%v for cluster: %v", storageName, clusterName),
			fmt.Sprintf(
				"Failed to delete storage:%v for cluster: %s. Error: %v",
				storageName,
				clusterName,
				fmt.Errorf("Storage has block devices associated")),
			storage_id,
			cluster_id,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log delete storage event. Error: %v", ctxt, err)
		}
		HttpResponse(
			w,
			http.StatusMethodNotAllowed,
			"There are block devices backed by storage. Make sure all connected clients are disconnected from block devices and first delete the block devices")
		return
	}
	var result models.RpcResponse
	var providerTaskId *uuid.UUID
	// Get the specific provider and invoke the method
	asyncTask := func(t *task.Task) {
		sessionCopy := db.GetDatastore().Copy()
		defer sessionCopy.Close()
		for {
			select {
			case <-t.StopCh:
				return
			default:
				t.UpdateStatus("Started the task for storage deletion: %v", t.ID)
				provider := a.GetProviderFromClusterId(ctxt, *cluster_id)
				if provider == nil {
					util.FailTask(fmt.Sprintf("%s - ", ctxt), errors.New(fmt.Sprintf("%s-Error getting provider for cluster: %v", ctxt, *cluster_id)), t)
					if err := logAuditEvent(EventTypes["STORAGE_DELETED"],
						fmt.Sprintf("Failed to delete storage:%v for cluster: %v", storageName, clusterName),
						fmt.Sprintf(
							"Failed to delete storage:%v for cluster: %s. Error: %v",
							storageName,
							clusterName,
							fmt.Errorf("Unbale to get storage provider")),
						storage_id,
						cluster_id,
						models.NOTIFICATION_ENTITY_STORAGE,
						&(t.ID),
						false,
						ctxt); err != nil {
						logger.Get().Error("%s- Unable to log delete storage event. Error: %v", ctxt, err)
					}
					return
				}
				err = provider.Client.Call(fmt.Sprintf("%s.%s",
					provider.Name, storage_post_functions["delete"]),
					models.RpcRequest{RpcRequestVars: vars, RpcRequestData: []byte{}, RpcRequestContext: ctxt},
					&result)
				if err != nil || (result.Status.StatusCode != http.StatusOK && result.Status.StatusCode != http.StatusAccepted) {
					util.FailTask(fmt.Sprintf("%s - Error deleting storage: %v", ctxt, *storage_id), err, t)
					if err := logAuditEvent(EventTypes["STORAGE_DELETED"],
						fmt.Sprintf("Failed to delete storage:%v for cluster: %v", storageName, clusterName),
						fmt.Sprintf(
							"Failed to delete storage:%v for cluster: %s. Error: %v",
							storageName,
							clusterName,
							fmt.Errorf("Task for deletion failed")),
						storage_id,
						cluster_id,
						models.NOTIFICATION_ENTITY_STORAGE,
						&(t.ID),
						false,
						ctxt); err != nil {
						logger.Get().Error("%s- Unable to log delete storage event. Error: %v", ctxt, err)
					}
					return
				} else {
					// Update the master task id
					providerTaskId, err = uuid.Parse(result.Data.RequestId)
					if err != nil {
						util.FailTask(fmt.Sprintf("%s - Error parsing provider task id while deleting storage: %v", ctxt, *storage_id), err, t)
						if err := logAuditEvent(EventTypes["STORAGE_DELETED"],
							fmt.Sprintf("Failed to delete storage:%v for cluster: %v", storageName, clusterName),
							fmt.Sprintf(
								"Failed to delete storage:%v for cluster: %s. Error: %v",
								storageName,
								clusterName,
								fmt.Errorf("Task for deletion failed")),
							storage_id,
							cluster_id,
							models.NOTIFICATION_ENTITY_STORAGE,
							&(t.ID),
							false,
							ctxt); err != nil {
							logger.Get().Error("%s- Unable to log delete storage event. Error: %v", ctxt, err)
						}
						return
					}
					t.UpdateStatus(fmt.Sprintf("Started provider task: %v", *providerTaskId))
					if ok, err := t.AddSubTask(*providerTaskId); !ok || err != nil {
						util.FailTask(fmt.Sprintf("%s - Error adding sub task while deleting storage: %v", ctxt, *storage_id), err, t)
						if err := logAuditEvent(EventTypes["STORAGE_DELETED"],
							fmt.Sprintf("Failed to delete storage:%v for cluster: %v", storageName, clusterName),
							fmt.Sprintf(
								"Failed to delete storage:%v for cluster: %s. Error: %v",
								storageName,
								clusterName,
								fmt.Errorf("Error while adding subtask")),
							storage_id,
							cluster_id,
							models.NOTIFICATION_ENTITY_STORAGE,
							&(t.ID),
							false,
							ctxt); err != nil {
							logger.Get().Error("%s- Unable to log delete storage event. Error: %v", ctxt, err)
						}
						return
					}

					// Check for provider task to complete and update the disk info
					coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_TASKS)
					var providerTask models.AppTask
					for {
						time.Sleep(2 * time.Second)
						if err := coll.Find(bson.M{"id": *providerTaskId}).One(&providerTask); err != nil {
							util.FailTask(fmt.Sprintf("%s - Error getting sub task status while deleting storage: %v", ctxt, *storage_id), err, t)
							if err := logAuditEvent(EventTypes["STORAGE_DELETED"],
								fmt.Sprintf("Failed to delete storage:%v for cluster: %v", storageName, clusterName),
								fmt.Sprintf(
									"Failed to delete storage:%v for cluster: %s. Error: %v",
									storageName,
									clusterName,
									fmt.Errorf("Error getting subtask status")),
								storage_id,
								cluster_id,
								models.NOTIFICATION_ENTITY_STORAGE,
								&(t.ID),
								false,
								ctxt); err != nil {
								logger.Get().Error("%s- Unable to log delete storage event. Error: %v", ctxt, err)
							}
							return
						}
						if providerTask.Completed {
							if providerTask.Status == models.TASK_STATUS_SUCCESS {
								t.UpdateStatus("Success")
								if err := logAuditEvent(EventTypes["STORAGE_DELETED"],
									fmt.Sprintf("Deleted storage:%v for cluster: %v", storageName, clusterName),
									fmt.Sprintf("Deleted storage:%v for cluster: %v", storageName, clusterName),
									storage_id,
									cluster_id,
									models.NOTIFICATION_ENTITY_STORAGE,
									&(t.ID),
									true,
									ctxt); err != nil {
									logger.Get().Error("%s- Unable to log delete storage event. Error: %v", ctxt, err)
								}
								t.Done(models.TASK_STATUS_SUCCESS)
							} else {
								t.UpdateStatus("Failed")
								if err := logAuditEvent(EventTypes["STORAGE_DELETED"],
									fmt.Sprintf("Failed to delete storage:%v for cluster: %v",
										storageName, clusterName),
									fmt.Sprintf(
										"Failed to delete storage:%v for cluster: %s. Error: %v",
										storageName,
										clusterName,
										fmt.Errorf("Provider task failed")),
									storage_id,
									cluster_id,
									models.NOTIFICATION_ENTITY_STORAGE,
									&(t.ID),
									false,
									ctxt); err != nil {
									logger.Get().Error("%s- Unable to log delete storage event. Error: %v", ctxt, err)
								}
								t.Done(models.TASK_STATUS_FAILURE)
							}
							break
						}
					}
				}
				return
			}
		}
	}
	if taskId, err := a.GetTaskManager().Run(
		models.ENGINE_NAME,
		fmt.Sprintf("Delete Storage: %s", cluster_id_str),
		asyncTask,
		nil,
		nil,
		nil); err != nil {
		logger.Get().Error("%s - Unable to create task to delete storage: %v. error: %v", ctxt, *cluster_id, err)
		HttpResponse(w, http.StatusInternalServerError, "Task creation failed for delete storage")
		return
	} else {
		logger.Get().Debug("%s-Task Created: %v to delete storage: %v", ctxt, taskId, *cluster_id)
		bytes, _ := json.Marshal(models.AsyncResponse{TaskId: taskId})
		w.WriteHeader(http.StatusAccepted)
		w.Write(bytes)
	}
}
Exemplo n.º 27
0
func (a *App) PATCH_Storage(w http.ResponseWriter, r *http.Request) {
	ctxt, err := GetContext(r)
	if err != nil {
		logger.Get().Error(
			"Error Getting the context. error: %v",
			err)
	}

	vars := mux.Vars(r)
	cluster_id_str := vars["cluster-id"]
	cluster_id, err := uuid.Parse(cluster_id_str)
	if err != nil {
		logger.Get().Error(
			"%s - Error parsing the cluster id: %s. error: %v",
			ctxt,
			cluster_id_str,
			err)
		if err := logAuditEvent(EventTypes["STORAGE_UPDATED"],
			fmt.Sprintf("Failed to update storage for cluster: %v", cluster_id_str),
			fmt.Sprintf("Failed to update storage for cluster: %s. Error: %v", cluster_id_str, err),
			nil,
			nil,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log update storage event. Error: %v", ctxt, err)
		}
		HttpResponse(
			w,
			http.StatusBadRequest,
			fmt.Sprintf(
				"Error parsing the cluster id: %s",
				cluster_id_str))
		return
	}
	clusterName, err := GetClusterNameById(cluster_id)
	if err != nil {
		clusterName = cluster_id_str
	}
	storage_id_str := vars["storage-id"]
	storage_id, err := uuid.Parse(storage_id_str)
	if err != nil {
		logger.Get().Error(
			"%s - Error parsing the storage id: %s. error: %v",
			ctxt,
			storage_id_str,
			err)
		if err := logAuditEvent(EventTypes["STORAGE_UPDATED"],
			fmt.Sprintf("Failed to update storage:%s for cluster: %v", storage_id_str, clusterName),
			fmt.Sprintf("Failed to update storage:%s for cluster: %s. Error: %v", storage_id_str, clusterName, err),
			nil,
			cluster_id,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log update storage event. Error: %v", ctxt, err)
		}
		HttpResponse(
			w,
			http.StatusBadRequest,
			fmt.Sprintf(
				"Error parsing the storage id: %s",
				storage_id_str))
		return
	}

	storageName, err := getStorageNameById(*storage_id)
	if storageName == "" || err != nil {
		storageName = storage_id_str
	}

	ok, err := ClusterUnmanaged(*cluster_id)
	if err != nil {
		logger.Get().Error(
			"%s-Error checking managed state of cluster: %v. error: %v",
			ctxt,
			*cluster_id,
			err)
		if err := logAuditEvent(EventTypes["STORAGE_UPDATED"],
			fmt.Sprintf("Failed to update storage:%s for cluster: %v", storageName, clusterName),
			fmt.Sprintf("Failed to update storage:%s for cluster: %s. Error: %v", storageName, clusterName, err),
			storage_id,
			cluster_id,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log update storage event. Error: %v", ctxt, err)
		}
		HttpResponse(
			w,
			http.StatusMethodNotAllowed,
			fmt.Sprintf(
				"Error checking managed state of cluster: %v",
				*cluster_id))
		return
	}
	if ok {
		logger.Get().Error(
			"%s-Cluster: %v is in un-managed state",
			ctxt,
			*cluster_id)
		if err := logAuditEvent(EventTypes["STORAGE_UPDATED"],
			fmt.Sprintf("Failed to update storage:%s for cluster: %v", storageName, clusterName),
			fmt.Sprintf(
				"Failed to update storage:%s for cluster: %s. Error: %v",
				storageName,
				clusterName,
				fmt.Errorf("Cluster is un-managed")),
			storage_id,
			cluster_id,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log update storage event. Error: %v", ctxt, err)
		}
		HttpResponse(
			w,
			http.StatusMethodNotAllowed,
			fmt.Sprintf(
				"Cluster: %v is in un-managed state",
				*cluster_id))
		return
	}

	body, err := ioutil.ReadAll(io.LimitReader(r.Body, models.REQUEST_SIZE_LIMIT))
	if err != nil {
		logger.Get().Error(
			"%s-Error parsing the request. error: %v",
			ctxt,
			err)
		if err := logAuditEvent(EventTypes["STORAGE_UPDATED"],
			fmt.Sprintf("Failed to update storage:%s for cluster: %v", storageName, clusterName),
			fmt.Sprintf("Failed to update storage:%s for cluster: %s. Error: %v", storageName, clusterName, err),
			storage_id,
			cluster_id,
			models.NOTIFICATION_ENTITY_STORAGE,
			nil,
			false,
			ctxt); err != nil {
			logger.Get().Error("%s- Unable to log update storage event. Error: %v", ctxt, err)
		}
		HttpResponse(
			w,
			http.StatusBadRequest,
			fmt.Sprintf(
				"Unable to parse the request: %v",
				err))
		return
	}

	var result models.RpcResponse
	var providerTaskId *uuid.UUID
	asyncTask := func(t *task.Task) {
		sessionCopy := db.GetDatastore().Copy()
		defer sessionCopy.Close()
		for {
			select {
			case <-t.StopCh:
				return
			default:
				t.UpdateStatus("Started the task for storage update: %v", t.ID)
				provider := a.GetProviderFromClusterId(ctxt, *cluster_id)
				if provider == nil {
					util.FailTask(
						fmt.Sprintf("Error getting the provider for cluster: %v", *cluster_id),
						fmt.Errorf("%s-%v", ctxt, err),
						t)
					if err := logAuditEvent(EventTypes["STORAGE_UPDATED"],
						fmt.Sprintf("Failed to update storage:%s for cluster: %v", storageName, clusterName),
						fmt.Sprintf(
							"Failed to update storage:%s for cluster: %s. Error: %v",
							storageName,
							clusterName,
							fmt.Errorf("Failed to get storage provider")),
						storage_id,
						cluster_id,
						models.NOTIFICATION_ENTITY_STORAGE,
						&(t.ID),
						false,
						ctxt); err != nil {
						logger.Get().Error("%s- Unable to log update storage event. Error: %v", ctxt, err)
					}
					return
				}
				err = provider.Client.Call(
					fmt.Sprintf(
						"%s.%s",
						provider.Name,
						storage_post_functions["update"]),
					models.RpcRequest{
						RpcRequestVars:    vars,
						RpcRequestData:    body,
						RpcRequestContext: ctxt},
					&result)
				if err != nil || (result.Status.StatusCode != http.StatusOK && result.Status.StatusCode != http.StatusAccepted) {
					util.FailTask(
						fmt.Sprintf(
							"Error updating storage: %v",
							*storage_id),
						fmt.Errorf("%s-%v", ctxt, err),
						t)
					if err := logAuditEvent(EventTypes["STORAGE_UPDATED"],
						fmt.Sprintf("Failed to update storage:%s for cluster: %v", storageName, clusterName),
						fmt.Sprintf(
							"Failed to update storage:%s for cluster: %s. Error: %v",
							storageName,
							clusterName,
							fmt.Errorf("Error executing provider task")),
						storage_id,
						cluster_id,
						models.NOTIFICATION_ENTITY_STORAGE,
						&(t.ID),
						false,
						ctxt); err != nil {
						logger.Get().Error("%s- Unable to log update storage event. Error: %v", ctxt, err)
					}
					return
				}
				// Update the master task id
				providerTaskId, err = uuid.Parse(result.Data.RequestId)
				if err != nil {
					util.FailTask(
						fmt.Sprintf(
							"Error parsing provider task id while updating storage: %v",
							*storage_id),
						err,
						t)
					if err := logAuditEvent(EventTypes["STORAGE_UPDATED"],
						fmt.Sprintf("Failed to update storage:%s for cluster: %v", storageName, clusterName),
						fmt.Sprintf("Failed to update storage:%s for cluster: %s. Error: %v", storageName, clusterName, err),
						storage_id,
						cluster_id,
						models.NOTIFICATION_ENTITY_STORAGE,
						&(t.ID),
						false,
						ctxt); err != nil {
						logger.Get().Error("%s- Unable to log update storage event. Error: %v", ctxt, err)
					}
					return
				}
				t.UpdateStatus(fmt.Sprintf("Started provider task: %v", *providerTaskId))
				if ok, err := t.AddSubTask(*providerTaskId); !ok || err != nil {
					util.FailTask(
						fmt.Sprintf(
							"Error adding sub task while updating storage: %v",
							*storage_id),
						err,
						t)
					if err := logAuditEvent(EventTypes["STORAGE_UPDATED"],
						fmt.Sprintf("Failed to update storage:%s for cluster: %v", storageName, clusterName),
						fmt.Sprintf("Failed to update storage:%s for cluster: %s. Error: %v", storageName, clusterName,
							fmt.Errorf("Error adding sub task")),
						storage_id,
						cluster_id,
						models.NOTIFICATION_ENTITY_STORAGE,
						&(t.ID),
						false,
						ctxt); err != nil {
						logger.Get().Error("%s- Unable to log update storage event. Error: %v", ctxt, err)
					}
					return
				}

				coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_TASKS)
				var providerTask models.AppTask
				for {
					time.Sleep(2 * time.Second)
					if err := coll.Find(
						bson.M{"id": *providerTaskId}).One(&providerTask); err != nil {
						util.FailTask(
							fmt.Sprintf(
								"Error getting sub task status while updating storage: %v",
								*storage_id),
							err,
							t)
						if err := logAuditEvent(EventTypes["STORAGE_UPDATED"],
							fmt.Sprintf("Failed to update storage:%s for cluster: %v", storageName, clusterName),
							fmt.Sprintf("Failed to update storage:%s for cluster: %s. Error: %v", storageName, clusterName,
								err),
							storage_id,
							cluster_id,
							models.NOTIFICATION_ENTITY_STORAGE,
							&(t.ID),
							false,
							ctxt); err != nil {
							logger.Get().Error("%s- Unable to log update storage event. Error: %v", ctxt, err)
						}
						return
					}
					if providerTask.Completed {
						if providerTask.Status == models.TASK_STATUS_SUCCESS {
							t.UpdateStatus("Success")
							if err := logAuditEvent(EventTypes["STORAGE_UPDATED"],
								fmt.Sprintf("Updated storage:%s for cluster: %v", storageName, clusterName),
								fmt.Sprintf("Updated storage:%s for cluster: %s", storageName, clusterName),
								storage_id,
								cluster_id,
								models.NOTIFICATION_ENTITY_STORAGE,
								&(t.ID),
								false,
								ctxt); err != nil {
								logger.Get().Error("%s- Unable to log update storage event. Error: %v", ctxt, err)
							}
							t.Done(models.TASK_STATUS_SUCCESS)
						} else {
							t.UpdateStatus("Failed")
							if err := logAuditEvent(EventTypes["STORAGE_UPDATED"],
								fmt.Sprintf("Failed to update storage:%s for cluster: %v", storageName, clusterName),
								fmt.Sprintf(
									"Failed to update storage:%s for cluster: %s. Error: %v",
									storageName,
									clusterName,
									fmt.Errorf("Provider task failed")),
								storage_id,
								cluster_id,
								models.NOTIFICATION_ENTITY_STORAGE,
								&(t.ID),
								false,
								ctxt); err != nil {
								logger.Get().Error("%s- Unable to log update storage event. Error: %v", ctxt, err)
							}
							t.Done(models.TASK_STATUS_FAILURE)
						}
						break
					}
				}
				return
			}
		}
	}
	if taskId, err := a.GetTaskManager().Run(
		models.ENGINE_NAME,
		fmt.Sprintf("Update Storage: %s", cluster_id_str),
		asyncTask,
		nil,
		nil,
		nil); err != nil {
		logger.Get().Error("%s - Unable to create task to update storage: %v. error: %v", ctxt, *cluster_id, err)
		HttpResponse(w, http.StatusInternalServerError, "Task creation failed for update storage")
		return
	} else {
		logger.Get().Debug("%s-Task Created: %v to update storage: %v", ctxt, taskId, *cluster_id)
		bytes, _ := json.Marshal(models.AsyncResponse{TaskId: taskId})
		w.WriteHeader(http.StatusAccepted)
		w.Write(bytes)
	}
}
Exemplo n.º 28
0
func (a *App) getTasks(rw http.ResponseWriter, req *http.Request) {
	ctxt, err := GetContext(req)
	if err != nil {
		logger.Get().Error("Error Getting the context. error: %v", err)
	}

	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()

	var filter bson.M = make(map[string]interface{})

	rootTask := req.URL.Query().Get("level")
	taskStatusArray := req.URL.Query()["state"]
	searchMessage := req.URL.Query().Get("searchmessage")

	if len(rootTask) != 0 {
		if strings.ToLower(rootTask) == "root" {
			filter["parentid"], err = uuid.Parse(rootTaskId)
			if err != nil {
				logger.Get().Error("%s-Unable to Parse the Id: %s. error: %v", ctxt, rootTaskId, err)
				HandleHttpError(rw, err)
				return
			}
		} else {
			logger.Get().Error("%s-Un-supported query param: %v", ctxt, rootTask)
			HttpResponse(rw, http.StatusInternalServerError, fmt.Sprintf("Un-supported query param: %s", rootTask))
			return
		}
	}

	taskStatusMap := make(map[string]string)
	for _, taskStatus := range taskStatusArray {
		taskStatusMap[strings.ToLower(taskStatus)] = taskStatus
	}

	if len(taskStatusMap) != 0 {
		_, inprogress := taskStatusMap["inprogress"]
		_, completed := taskStatusMap["completed"]
		_, failed := taskStatusMap["failed"]

		var arr []interface{}

		if inprogress {
			arr = append(arr, bson.M{"status": models.TASK_STATUS_NONE})
		}
		if completed {
			arr = append(arr, bson.M{"status": models.TASK_STATUS_SUCCESS})
		}
		if failed {
			arr = append(arr, bson.M{"status": models.TASK_STATUS_FAILURE})
			arr = append(arr, bson.M{"status": models.TASK_STATUS_TIMED_OUT})
		}
		filter["$or"] = arr
	}
	if len(searchMessage) != 0 {
		filter["name"] = bson.M{"$regex": searchMessage, "$options": "$i"}
	}
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_TASKS)
	var tasks []models.AppTask
	pageNo, pageNoErr := strconv.Atoi(req.URL.Query().Get("pageNo"))
	pageSize, pageSizeErr := strconv.Atoi(req.URL.Query().Get("pageSize"))
	// time stamp format of RFC3339 = "2006-01-02T15:04:05Z07:00"
	fromDateTime, fromDateTimeErr := time.Parse(time.RFC3339, req.URL.Query().Get("fromdatetime"))
	toDateTime, toDateTimeErr := time.Parse(time.RFC3339, req.URL.Query().Get("todatetime"))
	if fromDateTimeErr == nil && toDateTimeErr == nil {
		filter["lastupdated"] = bson.M{
			"$gt": fromDateTime.UTC(),
			"$lt": toDateTime.UTC(),
		}
	} else if fromDateTimeErr != nil && toDateTimeErr == nil {
		filter["lastupdated"] = bson.M{
			"$lt": toDateTime.UTC(),
		}
	} else if fromDateTimeErr == nil && toDateTimeErr != nil {
		filter["lastupdated"] = bson.M{
			"$gt": fromDateTime.UTC(),
		}
	}
	if err := coll.Find(filter).Sort("completed", "-lastupdated").All(&tasks); err != nil {
		logger.Get().Error("%s-Unable to get tasks. error: %v", ctxt, err)
		HttpResponse(rw, http.StatusInternalServerError, err.Error())
		return
	}

	if len(tasks) == 0 {
		json.NewEncoder(rw).Encode([]models.AppTask{})
	} else {
		var startIndex int = 0
		var endIndex int = models.TASKS_PER_PAGE
		if pageNoErr == nil && pageSizeErr == nil {
			startIndex, endIndex = Paginate(pageNo, pageSize, models.TASKS_PER_PAGE)
		}
		if endIndex > len(tasks) {
			endIndex = len(tasks) - 1
		}

		json.NewEncoder(rw).Encode(
			struct {
				Totalcount int              `json:"totalcount"`
				Startindex int              `json:"startindex"`
				Endindex   int              `json:"endindex"`
				Tasks      []models.AppTask `json:"tasks"`
			}{len(tasks), startIndex, endIndex, tasks[startIndex : endIndex+1]})

	}
}
Exemplo n.º 29
0
func FetchOSDStats(ctxt string, cluster models.Cluster, monName string) (map[string]map[string]string, error) {
	var osdEvents []models.Event
	metrics, statistics, err := updateOSDStats(ctxt, cluster, monName)
	if err != nil {
		logger.Get().Error("%s - %v", ctxt, err)
		return metrics, err
	}

	for _, osd := range statistics.OSDs {
		event, isRaiseEvent, err := skyring_utils.AnalyseThresholdBreach(ctxt, skyring_monitoring.SLU_UTILIZATION, osd.Name, float64(osd.UsagePercent), cluster)
		if err != nil {
			logger.Get().Error("%s - Failed to analyse threshold breach for osd utilization of %v.Error %v", ctxt, osd.Name, err)
			continue
		}

		if isRaiseEvent {
			osdEvents = append(osdEvents, event)
		}
	}

	/*
		Correlation of Osd threshold crossing and storage profile threshold crossing

		Facts:
			0. Storage Profile utilization is calculated cluster-wise
			1. Osds are grouped into storage profiles.
			2. Storage profile capacity is the sum of capacities of Osds that are associated with the storage profile.
			3. The default threshold values for:
		      	Storage Profile Utilization : Warning  -> 65
		      								Critical -> 85
		      	OSD Utilization : Warning -> Warning  -> 85
		                                   Critical -> 95
			4. From 1, 2 and 3, storage profile utilization crossing a threshold may or may not have threshold crossings of associated OSDs

		Logic:
			1. Fetch all Osds in the current cluster
			2. Group osds into a map with key as the osds storage profile
			3. Loop over the storage profile threshold events and for each storage profile event:
				3.1. Get the list of osd events for the current storage profile.
				3.2. Add the list of osd events obtained in 3.1(empty or non-empty) to the field ImpactingEvents of the event related to the storage profile
				3.3. Loop over the osd events in 3.2 and set the flag notify false so that they are not separately notified to end user
				3.3. Raise threshold crossing event for storage profile
			4. Iterate over the entries in the map fetched from 2 and raise osd threshold crossing event.
			   For the osds captured already in the storage profile event's ImpactingEvents, the notification flag is turned off so the eventing module doesn't notify this
			   but just maintains the detail.
	*/
	slus, sluFetchErr := getOsds(cluster.ClusterId)
	if sluFetchErr != nil {
		return nil, sluFetchErr
	}

	spToOsdEvent := make(map[string][]models.Event)
	for _, osdEvent := range osdEvents {
		for _, slu := range slus {
			osdIdFromEvent, osdIdFromEventError := uuid.Parse(osdEvent.Tags[models.ENTITY_ID])
			if osdIdFromEventError != nil {
				logger.Get().Error("%s - Failed to parse osd id %v from cluster %v.Error %v", osdIdFromEvent, cluster.ClusterId, osdIdFromEventError)
			}

			if uuid.Equal(slu.SluId, *osdIdFromEvent) && slu.Name == osdEvent.Tags[models.ENTITY_NAME] {
				spToOsdEvent[slu.StorageProfile] = append(spToOsdEvent[slu.StorageProfile], osdEvent)
			}
		}
	}

	storageProfileStats, storageProfileStatsErr, storageProfileEvents := FetchStorageProfileUtilizations(ctxt, statistics, cluster)
	if storageProfileStatsErr != nil {
		for _, event := range osdEvents {
			if err, _ := HandleEvent(event, ctxt); err != nil {
				logger.Get().Error("%s - Threshold: %v.Error %v", ctxt, event, err)
			}
		}
		return metrics, fmt.Errorf("Failed to fetch storage profile utilizations for cluster %v.Error %v", cluster.Name, storageProfileStatsErr)
	}

	for _, spEvent := range storageProfileEvents {
		osdEvents := spToOsdEvent[spEvent.Tags[models.ENTITY_NAME]]
		impactingEvents := make(map[string][]models.Event)
		for _, osdEvent := range osdEvents {
			osdIdFromEvent, osdIdFromEventError := uuid.Parse(osdEvent.Tags[models.ENTITY_ID])
			if osdIdFromEventError != nil {
				logger.Get().Error("%s - Failed to parse osd id %v from cluster %v.Error %v", osdIdFromEvent, cluster.ClusterId, osdIdFromEventError)
			}
			impactingEvents[models.COLL_NAME_STORAGE_LOGICAL_UNITS] = append(impactingEvents[models.COLL_NAME_STORAGE_LOGICAL_UNITS], osdEvent)
			osdEvent.Tags[models.NOTIFY] = strconv.FormatBool(false)
		}
		spEvent.ImpactingEvents = impactingEvents
		if err, _ := HandleEvent(spEvent, ctxt); err != nil {
			logger.Get().Error("%s - Threshold: %v.Error %v", ctxt, spEvent, err)
		}
	}

	for _, osdEvents := range spToOsdEvent {
		for _, osdEvent := range osdEvents {
			if err, _ := HandleEvent(osdEvent, ctxt); err != nil {
				logger.Get().Error("%s - Threshold: %v.Error %v", ctxt, osdEvent, err)
			}
		}
	}

	for key, timeStampValueMap := range storageProfileStats {
		metrics[key] = timeStampValueMap
	}

	return metrics, nil
}
Exemplo n.º 30
0
func (s *CephProvider) GetDiskHierarchy(req models.RpcRequest, resp *models.RpcResponse) error {
	var request models.DiskHierarchyRequest
	ctxt := req.RpcRequestContext

	if err := json.Unmarshal(req.RpcRequestData, &request); err != nil {
		logger.Get().Error(fmt.Sprintf("%s-Unbale to parse the disk hierarchy request. error: %v", ctxt, err))
		*resp = utils.WriteResponse(http.StatusBadRequest, fmt.Sprintf("Unbale to parse the request. error: %v", err))
		return err
	}

	if request.JournalSize == "" {
		request.JournalSize = fmt.Sprintf("%dMB", JOURNALSIZE)
	}
	nodes, err := util.GetNodes(request.ClusterNodes)
	if err != nil {
		logger.Get().Error(
			"%s-Error getting nodes list from DB for cluster %s. error: %v",
			ctxt,
			request.ClusterName,
			err)
		return err
	}
	hierarchy := make(map[string]map[string]string)
	var storageSize float64
	for _, requestNode := range request.ClusterNodes {
		disksMap := make(map[string]string)
		uuid, err := uuid.Parse(requestNode.NodeId)
		if err != nil {
			logger.Get().Error(
				"%s-Error parsing node id: %s for cluster: %s. error: %v",
				ctxt,
				requestNode.NodeId,
				request.ClusterName,
				err)
			continue
		}

		devices := make(map[string]models.Disk)
		storageNode := nodes[*uuid]
		// Form a map of storage node disks
		var nodeDisksMap map[string]models.Disk = make(map[string]models.Disk)
		for _, storageDisk := range storageNode.StorageDisks {
			nodeDisksMap[storageDisk.Name] = storageDisk
		}

		for diskName, storageDisk := range nodeDisksMap {
			for idx := 0; idx < len(requestNode.Devices); idx++ {
				if diskName == requestNode.Devices[idx].Name {
					devices[requestNode.Devices[idx].Name] = storageDisk
				}
			}
		}

		// Utility function returns value in MB so multiply by 1024 to make is bytes
		jSize := utils.SizeFromStr(request.JournalSize) * float64(1024)
		diskWithJournalMapped := getDiskWithJournalMapped(devices, jSize)
		for disk, journal := range diskWithJournalMapped {
			disksMap[disk] = journal.JournalDisk
			for _, storageDisk := range storageNode.StorageDisks {
				if storageDisk.Name == disk {
					storageSize += storageDisk.Size
				}
			}
		}
		hierarchy[requestNode.NodeId] = disksMap
	}

	retVal := models.DiskHierarchyDetails{
		ClusterName: request.ClusterName,
		Hierarchy:   hierarchy,
		StorageSize: storageSize,
	}
	result, err := json.Marshal(retVal)
	if err != nil {
		logger.Get().Error(
			"%s-Error forming the output for get disk hierarchy of cluster: %s. error: %v",
			ctxt,
			request.ClusterName,
			err)
		*resp = utils.WriteResponse(
			http.StatusInternalServerError,
			fmt.Sprintf(
				"Error forming the output. error: %v",
				err))
		return err
	}
	*resp = utils.WriteResponseWithData(http.StatusOK, "", result)
	return nil
}