func (a *App) GET_Storage(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) cluster_id_str := vars["cluster-id"] cluster_id, err := uuid.Parse(cluster_id_str) if err != nil { logger.Get().Error("Error parsing the cluster id: %s. error: %v", cluster_id_str, err) util.HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str)) return } storage_id_str := vars["storage-id"] storage_id, err := uuid.Parse(storage_id_str) if err != nil { logger.Get().Error("Error parsing the storage id: %s. error: %v", storage_id_str, err) util.HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the storage id: %s", storage_id_str)) return } sessionCopy := db.GetDatastore().Copy() defer sessionCopy.Close() collection := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE) var storage models.Storage if err := collection.Find(bson.M{"clusterid": *cluster_id, "storageid": *storage_id}).One(&storage); err != nil { util.HttpResponse(w, http.StatusInternalServerError, err.Error()) logger.Get().Error("Error getting the storage: %v on cluster: %v. error: %v", *storage_id, *cluster_id, err) return } if storage.Name == "" { util.HttpResponse(w, http.StatusBadRequest, "Storage not found") logger.Get().Error("Storage with id: %v not found for cluster: %v. error: %v", *storage_id, *cluster_id, err) return } else { json.NewEncoder(w).Encode(storage) } }
func (a *App) PATCH_Disk(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) node_id_str := vars["node-id"] node_id, err := uuid.Parse(node_id_str) if err != nil { util.HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing node id: %s. Error: %v", node_id_str, err)) return } disk_id_str := vars["disk-id"] disk_id, err := uuid.Parse(disk_id_str) if err != nil { util.HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing node id: %s. Error: %v", disk_id_str, err)) return } sessionCopy := db.GetDatastore().Copy() defer sessionCopy.Close() collection := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES) var node models.Node if err := collection.Find(bson.M{"nodeid": *node_id}).One(&node); err != nil { logger.Get().Error(fmt.Sprintf("Error getting the node detail for node: %s. error: %v", node_id_str, err)) util.HttpResponse(w, http.StatusInternalServerError, err.Error()) return } body, err := ioutil.ReadAll(r.Body) if err != nil { logger.Get().Error("Error parsing http request body:%s", err) util.HttpResponse(w, http.StatusInternalServerError, err.Error()) return } var m map[string]interface{} if err = json.Unmarshal(body, &m); err != nil { logger.Get().Error("Unable to Unmarshall the data:%s", err) util.HttpResponse(w, http.StatusInternalServerError, err.Error()) return } var disks []backend.Disk if val, ok := m["storageprofile"]; ok { //update the field for _, disk := range node.StorageDisks { if disk.DiskId == *disk_id { disk.StorageProfile = val.(string) } disks = append(disks, disk) } node.StorageDisks = disks } //Save err = collection.Update(bson.M{"nodeid": *node_id}, bson.M{"$set": node}) if err != nil { logger.Get().Error(fmt.Sprintf("Error updating record in DB for node: %s. error: %v", node_id_str, err)) util.HttpResponse(w, http.StatusInternalServerError, err.Error()) } }
func (a *App) DELETE_Nodes(w http.ResponseWriter, r *http.Request) { var nodeIds []struct { NodeId string `json:"nodeid"` } // Unmarshal the request body body, err := ioutil.ReadAll(io.LimitReader(r.Body, models.REQUEST_SIZE_LIMIT)) if err != nil { logger.Get().Error("Error parsing the request. error: %v", err) util.HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Unable to parse the request. error: %v", err)) return } if err := json.Unmarshal(body, &nodeIds); err != nil { logger.Get().Error("Unable to unmarshal request. error: %v", err) util.HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Unable to unmarshal request. error: %v", err)) return } for _, item := range nodeIds { node_id, _ := uuid.Parse(item.NodeId) if ok, err := removeNode(w, *node_id); err != nil || !ok { logger.Get().Error("Error removing the node: %v. error: %v", *node_id, err) util.HttpResponse(w, http.StatusInternalServerError, fmt.Sprintf("Error removing the node: %v. error: %v", *node_id, err)) return } } }
func (a *App) GET_Node(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) node_id_str := vars["node-id"] node_id, err := uuid.Parse(node_id_str) if err != nil { logger.Get().Error("Error parsing node id: %s", node_id_str) util.HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing node id: %s", node_id_str)) return } sessionCopy := db.GetDatastore().Copy() defer sessionCopy.Close() collection := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES) var node models.Node if err := collection.Find(bson.M{"nodeid": *node_id}).One(&node); err != nil { logger.Get().Error("Error getting the node detail for %v. error: %v", *node_id, err) } if node.Hostname == "" { util.HttpResponse(w, http.StatusBadRequest, "Node not found") logger.Get().Error("Node: %v not found. error: %v", *node_id, err) return } else { json.NewEncoder(w).Encode(node) } }
func (a *App) GET_Storages(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) cluster_id_str := vars["cluster-id"] cluster_id, err := uuid.Parse(cluster_id_str) if err != nil { logger.Get().Error("Error parsing the cluster id: %s. error: %v", cluster_id_str, err) util.HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str)) return } sessionCopy := db.GetDatastore().Copy() defer sessionCopy.Close() collection := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE) var storages models.Storages if err := collection.Find(bson.M{"clusterid": *cluster_id}).All(&storages); err != nil { util.HttpResponse(w, http.StatusInternalServerError, err.Error()) logger.Get().Error("Error getting the storage list for cluster: %v. error: %v", *cluster_id, err) return } if len(storages) == 0 { json.NewEncoder(w).Encode(models.Storages{}) } else { json.NewEncoder(w).Encode(storages) } }
func (a *App) getTask(rw http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) taskId, err := uuid.Parse(vars["taskid"]) if err != nil { logger.Get().Error("Unable to Parse the Id: %s. error: %v", vars["taskId"], err) util.HandleHttpError(rw, err) return } sessionCopy := db.GetDatastore().Copy() defer sessionCopy.Close() coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_TASKS) var task models.AppTask if err := coll.Find(bson.M{"id": *taskId}).One(&task); err != nil { logger.Get().Error("Unable to get task. error: %v", err) if err == mgo.ErrNotFound { util.HttpResponse(rw, http.StatusNotFound, err.Error()) return } else { util.HttpResponse(rw, http.StatusInternalServerError, err.Error()) return } } json.NewEncoder(rw).Encode(task) }
func (a *App) GET_Disk(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) node_id_str := vars["node-id"] node_id, err := uuid.Parse(node_id_str) if err != nil { util.HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing node id: %s. Error: %v", node_id_str, err)) return } disk_id_str := vars["disk-id"] disk_id, err := uuid.Parse(disk_id_str) if err != nil { util.HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing disk id: %s. Error: %v", disk_id_str, err)) return } sessionCopy := db.GetDatastore().Copy() defer sessionCopy.Close() collection := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES) var node models.Node if err := collection.Find(bson.M{"nodeid": *node_id}).One(&node); err != nil { logger.Get().Error(fmt.Sprintf("Error getting the node detail for node: %s. error: %v", node_id_str, err)) util.HttpResponse(w, http.StatusInternalServerError, err.Error()) return } var mdisk backend.Disk for _, disk := range node.StorageDisks { if disk.DiskId == *disk_id { mdisk = disk break } } if err := json.NewEncoder(w).Encode(mdisk); err != nil { logger.Get().Error("Error encoding data: %v", err) util.HttpResponse(w, http.StatusInternalServerError, err.Error()) } }
func (s Salt) GetNodeID(node string) (id uuid.UUID, err error) { mutex.Lock() defer mutex.Unlock() pyobj, loc_err := pyFuncs["GetNodeID"].Call(node) if loc_err == nil { var s string loc_err = gopy.Convert(python.PyDict_GetItemString(pyobj, node), &s) if loc_err == nil { if i, loc_err := uuid.Parse(s); loc_err == nil { return *i, nil } } } return uuid.UUID{}, loc_err }
func (a *App) getTasks(rw http.ResponseWriter, req *http.Request) { sessionCopy := db.GetDatastore().Copy() defer sessionCopy.Close() var filter bson.M = make(map[string]interface{}) rootTask := req.URL.Query().Get("level") taskStatus := req.URL.Query().Get("state") if len(rootTask) != 0 { if strings.ToLower(rootTask) == "root" { filter["parentid"], err = uuid.Parse(rootTaskId) if err != nil { logger.Get().Error("Unable to Parse the Id: %s. error: %v", rootTaskId, err) util.HandleHttpError(rw, err) return } } else { logger.Get().Error("Un-supported query param: %v", rootTask) util.HttpResponse(rw, http.StatusInternalServerError, fmt.Sprintf("Un-supported query param: %s", rootTask)) return } } if len(taskStatus) != 0 { if strings.ToLower(taskStatus) == "inprogress" { filter["completed"] = false } else if strings.ToLower(taskStatus) == "completed" { filter["completed"] = true } else { logger.Get().Error("Un-supported query param: %v", taskStatus) util.HttpResponse(rw, http.StatusInternalServerError, fmt.Sprintf("Un-supported query param: %s", taskStatus)) return } } coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_TASKS) var tasks []models.AppTask if err := coll.Find(filter).All(&tasks); err != nil { logger.Get().Error("Unable to get tasks. error: %v", err) util.HttpResponse(rw, http.StatusInternalServerError, err.Error()) return } if len(tasks) == 0 { json.NewEncoder(rw).Encode([]models.AppTask{}) } else { json.NewEncoder(rw).Encode(tasks) } }
func (a *App) DELETE_Node(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) node_id_str := vars["node-id"] node_id, err := uuid.Parse(node_id_str) if err != nil { logger.Get().Error("Error parsing node id: %s. error: %v", node_id_str, err) util.HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing node id: %s", node_id_str)) return } if ok, err := removeNode(w, *node_id); err != nil || !ok { logger.Get().Error("Error removing the node: %v. error: %v", *node_id, err) util.HttpResponse(w, http.StatusInternalServerError, fmt.Sprintf("Error removing the node: %v", err)) return } }
func getClusterNodesFromRequest(clusterNodes []models.ClusterNode) (models.Nodes, error) { sessionCopy := db.GetDatastore().Copy() defer sessionCopy.Close() var nodes models.Nodes coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES) for _, clusterNode := range clusterNodes { uuid, err := uuid.Parse(clusterNode.NodeId) if err != nil { return nodes, err } var node models.Node if err := coll.Find(bson.M{"nodeid": *uuid}).One(&node); err != nil { return nodes, err } nodes = append(nodes, node) } return nodes, nil }
func lockNode(nodeId uuid.UUID, hostname string, operation string) (*lock.AppLock, error) { //lock the node locks := make(map[uuid.UUID]string) if nodeId.IsZero() { //Generate temporary UUID from hostname for the node //for locking as the UUID is not available at this point id, err := uuid.Parse(util.Md5FromString(hostname)) if err != nil { logger.Get().Error(fmt.Sprintf("Unable to create the UUID for locking for host: %s:", hostname), err) return nil, err } nodeId = *id } locks[nodeId] = fmt.Sprintf("%s : %s", operation, hostname) appLock := lock.NewAppLock(locks) if err := GetApp().GetLockManager().AcquireLock(*appLock); err != nil { return nil, err } return appLock, nil }
func (a *App) GET_Utilization(w http.ResponseWriter, r *http.Request) { var start_time string var end_time string var interval string vars := mux.Vars(r) node_id_str := vars["node-id"] node_id, _ := uuid.Parse(node_id_str) params := r.URL.Query() resource_name := params.Get("resource") duration := params.Get("duration") // duration := params.Get("duration") storage_node := GetNode(*node_id) if storage_node.Hostname == "" { util.HttpResponse(w, http.StatusBadRequest, "Node not found") logger.Get().Error("Node: %v not found", *node_id) return } if duration != "" { if strings.Contains(duration, ",") { splt := strings.Split(duration, ",") start_time = splt[0] end_time = splt[1] } else { interval = duration } } //res, err := queryDB(query_cmd) res, err := GetMonitoringManager().QueryDB(map[string]interface{}{"nodename": storage_node.Hostname, "resource": resource_name, "start_time": start_time, "end_time": end_time, "interval": interval}) if err == nil { json.NewEncoder(w).Encode(res) } else { util.HttpResponse(w, http.StatusInternalServerError, err.Error()) } }
func (a *App) POST_Storages(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) cluster_id_str := vars["cluster-id"] cluster_id, err := uuid.Parse(cluster_id_str) if err != nil { logger.Get().Error("Error parsing the cluster id: %s. error: %v", cluster_id_str, err) util.HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Error parsing the cluster id: %s", cluster_id_str)) return } ok, err := ClusterDisabled(*cluster_id) if err != nil { logger.Get().Error("Error checking enabled state of cluster: %v. error: %v", *cluster_id, err) util.HttpResponse(w, http.StatusMethodNotAllowed, fmt.Sprintf("Error checking enabled state of cluster: %v", *cluster_id)) return } if ok { logger.Get().Error("Cluster: %v is in disabled state", *cluster_id) util.HttpResponse(w, http.StatusMethodNotAllowed, fmt.Sprintf("Cluster: %v is in disabled state", *cluster_id)) return } var request models.AddStorageRequest // Unmarshal the request body body, err := ioutil.ReadAll(io.LimitReader(r.Body, models.REQUEST_SIZE_LIMIT)) if err != nil { logger.Get().Error("Error parsing the request. error: %v", err) util.HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Unable to parse the request: %v", err)) return } if err := json.Unmarshal(body, &request); err != nil { logger.Get().Error("Unable to unmarshal request. error: %v", err) util.HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Unable to unmarshal request: %v", err)) return } // Check if storage entity already added // No need to check for error as storage would be nil in case of error and the same is checked if storage, _ := storage_exists("name", request.Name); storage != nil { logger.Get().Error("Storage entity: %s already added", request.Name) util.HttpResponse(w, http.StatusMethodNotAllowed, fmt.Sprintf("Storage entity: %s already added", request.Name)) return } // Validate storage target size info if ok, err := valid_storage_size(request.Size); !ok || err != nil { logger.Get().Error("Invalid storage size: %v", request.Size) util.HttpResponse(w, http.StatusBadRequest, fmt.Sprintf("Invalid storage size: %s passed for: %s", request.Size, request.Name)) return } var result models.RpcResponse var providerTaskId *uuid.UUID // Get the specific provider and invoke the method asyncTask := func(t *task.Task) { t.UpdateStatus("Started the task for pool creation: %v", t.ID) nodes, err := getClusterNodesById(cluster_id) if err != nil { util.FailTask("Failed to get nodes", err, t) return } appLock, err := lockNodes(nodes, "Manage_Cluster") if err != nil { util.FailTask("Failed to acquire lock", err, t) return } defer a.GetLockManager().ReleaseLock(*appLock) provider := a.getProviderFromClusterId(*cluster_id) if provider == nil { util.FailTask("", errors.New(fmt.Sprintf("Error getting provider for cluster: %v", *cluster_id)), t) return } err = provider.Client.Call(fmt.Sprintf("%s.%s", provider.Name, storage_post_functions["create"]), models.RpcRequest{RpcRequestVars: vars, RpcRequestData: body}, &result) if err != nil || (result.Status.StatusCode != http.StatusOK && result.Status.StatusCode != http.StatusAccepted) { util.FailTask(fmt.Sprintf("Error creating storage: %s on cluster: %v", request.Name, *cluster_id), err, t) return } else { // Update the master task id providerTaskId, err = uuid.Parse(result.Data.RequestId) if err != nil { util.FailTask(fmt.Sprintf("Error parsing provider task id while creating storage: %s for cluster: %v", request.Name, *cluster_id), err, t) return } t.UpdateStatus("Adding sub task") if ok, err := t.AddSubTask(*providerTaskId); !ok || err != nil { util.FailTask(fmt.Sprintf("Error adding sub task while creating storage: %s on cluster: %v", request.Name, *cluster_id), err, t) return } // Check for provider task to complete and update the parent task for { time.Sleep(2 * time.Second) sessionCopy := db.GetDatastore().Copy() defer sessionCopy.Close() coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_TASKS) var providerTask models.AppTask if err := coll.Find(bson.M{"id": *providerTaskId}).One(&providerTask); err != nil { util.FailTask(fmt.Sprintf("Error getting sub task status while creating storage: %s on cluster: %v", request.Name, *cluster_id), err, t) return } if providerTask.Completed { if providerTask.Status == models.TASK_STATUS_SUCCESS { t.UpdateStatus("Success") t.Done(models.TASK_STATUS_SUCCESS) } else if providerTask.Status == models.TASK_STATUS_FAILURE { t.UpdateStatus("Failed") t.Done(models.TASK_STATUS_FAILURE) } break } } } } if taskId, err := a.GetTaskManager().Run(fmt.Sprintf("Create Storage: %s", request.Name), asyncTask, nil, nil, nil); err != nil { logger.Get().Error("Unable to create task for create storage:%s on cluster: %v. error: %v", request.Name, *cluster_id, err) util.HttpResponse(w, http.StatusInternalServerError, "Task creation failed for create storage") return } else { logger.Get().Debug("Task Created: %v for creating storage on cluster: %v", taskId, request.Name, *cluster_id) bytes, _ := json.Marshal(models.AsyncResponse{TaskId: taskId}) w.WriteHeader(http.StatusAccepted) w.Write(bytes) } }