예제 #1
0
func initializeStorageNode(node string, t *task.Task) error {
	sProfiles, err := skyring.GetDbProvider().StorageProfileInterface().StorageProfiles(nil, models.QueryOps{})
	if err != nil {
		logger.Get().Error("Unable to get the storage profiles. May not be able to apply storage profiles for node: %v err:%v", node, err)
	}
	if storage_node, ok := saltnodemanager.GetStorageNodeInstance(node, sProfiles); ok {
		if err := updateStorageNodeToDB(*storage_node); err != nil {
			logger.Get().Error("Unable to add details of node: %s to DB. error: %v", node, err)
			t.UpdateStatus("Unable to add details of node: %s to DB. error: %v", node, err)
			updateNodeState(node, models.NODE_STATE_FAILED)
			return err
		}
		if nodeErrorMap, configureError := skyring.GetCoreNodeManager().SetUpMonitoring(node, curr_hostname); configureError != nil && len(nodeErrorMap) != 0 {
			if len(nodeErrorMap) != 0 {
				logger.Get().Error("Unable to setup collectd on %s because of %v", node, nodeErrorMap)
				t.UpdateStatus("Unable to setup collectd on %s because of %v", node, nodeErrorMap)
				updateNodeState(node, models.NODE_STATE_FAILED)
				return err
			} else {
				logger.Get().Error("Config Error during monitoring setup for node:%s Error:%v", node, configureError)
				t.UpdateStatus("Config Error during monitoring setup for node:%s Error:%v", node, configureError)
				updateNodeState(node, models.NODE_STATE_FAILED)
				return err
			}
		}
		return nil
	} else {
		logger.Get().Critical("Error getting the details for node: %s", node)
		t.UpdateStatus("Error getting the details for node: %s", node)
		updateNodeState(node, models.NODE_STATE_FAILED)
		return fmt.Errorf("Error getting the details for node: %s", node)
	}
}
예제 #2
0
파일: handler.go 프로젝트: skyrings/skyring
func drive_remove_handler(event models.AppEvent, ctxt string) (models.AppEvent, error) {
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	var node models.Node
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES)
	if err := coll.Find(bson.M{"nodeid": event.NodeId}).One(&node); err != nil {
		logger.Get().Error("%s-Node information read from DB failed for node: %s. error: %v", ctxt, event.NodeId, err)
		return event, err
	}

	sProfiles, err := skyring.GetDbProvider().StorageProfileInterface().StorageProfiles(ctxt, nil, models.QueryOps{})
	if err != nil {
		logger.Get().Error("%s-Unable to get the storage profiles. err:%v", ctxt, err)
		return event, err
	}

	previous_disks := node.StorageDisks
	// sync the nodes to get new disks
	if ok, err := skyring.GetCoreNodeManager().SyncStorageDisks(node.Hostname, sProfiles, ctxt); err != nil || !ok {
		logger.Get().Error("%s-Failed to sync disk for host: %s Error: %v", ctxt, node.Hostname, err)
		return event, err
	}

	if err := coll.Find(bson.M{"nodeid": event.NodeId}).One(&node); err != nil {
		logger.Get().Error("%s-Node information read from DB failed for node: %s. error: %v", ctxt, event.NodeId, err)
		return event, err
	}

	var exists bool
	for _, disk := range previous_disks {
		exists = false
		for _, new_disk := range node.StorageDisks {
			if disk.DevName == new_disk.DevName {
				exists = true
				break
			}
		}
		if !exists && !disk.Used {
			event.EntityId = disk.DiskId
			event.Tags["DevName"] = disk.DevName
			event.Tags["DriveSize"] = strconv.FormatFloat(disk.Size, 'E', -1, 64)
			event.Tags["Type"] = disk.Type

		}
	}

	// adding the details for event

	event.Name = skyring.EventTypes["DRIVE_REMOVE"]
	event.NotificationEntity = models.NOTIFICATION_ENTITY_HOST
	event.Message = fmt.Sprintf("Storage Drive: %s removed from Host:%s", event.Tags["DevName"], node.Hostname)
	event.Severity = models.ALARM_STATUS_CLEARED
	event.Notify = false
	return event, nil
}
예제 #3
0
파일: handler.go 프로젝트: skyrings/skyring
func handle_UnManagedNode(hostname string, ctxt string) error {
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES)
	var node models.Node
	err := coll.Find(bson.M{"hostname": hostname}).One(&node)
	if err == mgo.ErrNotFound {
		node.Hostname = hostname
		node.State = models.NODE_STATE_UNACCEPTED
		if node.Fingerprint, err = skyring.GetCoreNodeManager().GetFingerPrint(node.Hostname, ctxt); err != nil {
			logger.Get().Error(fmt.Sprintf("%s-Faild to retrive fingerprint from : %s", ctxt, node.Hostname))
			return err
		}
		if err := coll.Insert(node); err != nil {
			logger.Get().Error(fmt.Sprintf("%s-Error adding Unmanaged node : %s. error: %v", ctxt, node.Hostname, err))
			return err
		}
		return nil
	}
	return errors.New(fmt.Sprintf("Node with hostname: %v already exists", hostname))
}
예제 #4
0
파일: handler.go 프로젝트: skyrings/skyring
func drive_add_handler(event models.AppEvent, ctxt string) (models.AppEvent, error) {
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	var node models.Node
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES)
	if err := coll.Find(bson.M{"nodeid": event.NodeId}).One(&node); err != nil {
		logger.Get().Error("%s-Node information read from DB failed for node: %s. error: %v", ctxt, event.NodeId, err)
		return event, err
	}
	if node.State != models.NODE_STATE_ACTIVE {
		return event, nil
	}
	existing_disks := node.StorageDisks
	sProfiles, err := skyring.GetDbProvider().StorageProfileInterface().StorageProfiles(ctxt, nil, models.QueryOps{})
	if err != nil {
		logger.Get().Error("%s-Unable to get the storage profiles. err:%v", ctxt, err)
		return event, err
	}

	// sync the nodes to get new disks
	if ok, err := skyring.GetCoreNodeManager().SyncStorageDisks(node.Hostname, sProfiles, ctxt); err != nil || !ok {
		logger.Get().Error("%s-Failed to sync disk for host: %s Error: %v", ctxt, node.Hostname, err)
		return event, err
	}

	// get the list of disks after syncing.
	if err := coll.Find(bson.M{"nodeid": event.NodeId}).One(&node); err != nil {
		logger.Get().Error("%s-Node information read from DB failed for node: %s. error: %v", ctxt, event.NodeId, err)
		return event, err
	}
	var cluster_nodes []models.ClusterNode
	var cluster_node models.ClusterNode
	cluster_node.NodeId = event.NodeId.String()
	cluster_node.NodeType = []string{models.NODE_TYPE_OSD}
	var exists bool
	for _, disk := range node.StorageDisks {
		exists = false
		for _, existing_disk := range existing_disks {
			if disk.DevName == existing_disk.DevName {
				exists = true
				break
			}
		}
		if !exists && !disk.Used && disk.Type == "disk" {
			cluster_node.Devices = append(cluster_node.Devices, models.ClusterNodeDevice{Name: disk.DevName, FSType: "xfs"})
			event.EntityId = disk.DiskId
			event.Tags["DevName"] = disk.DevName
			event.Tags["size"] = strconv.FormatFloat(disk.Size, 'E', -1, 64)
			event.Tags["Type"] = disk.Type
		}
	}

	// adding the details for event

	event.Name = skyring.EventTypes["DRIVE_ADD"]
	event.NotificationEntity = models.NOTIFICATION_ENTITY_HOST
	event.Message = fmt.Sprintf("New Storage Drive: %s added to Host:%s", event.Tags["DevName"], node.Hostname)
	event.Severity = models.ALARM_STATUS_CLEARED
	event.Notify = false

	c := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_CLUSTERS)
	var cluster models.Cluster
	if err := c.Find(bson.M{"clusterid": event.ClusterId}).One(&cluster); err != nil {
		logger.Get().Error("%s-Cluster information read from DB failed for Cluster: %s. error: %v", ctxt, event.ClusterId, err)
		return event, nil
	}

	// check if cluster is in managed/un-managed state
	ok, err := skyring.ClusterUnmanaged(event.ClusterId)
	if err != nil {
		logger.Get().Warning("%s-Error checking managed state of cluster: %v. error: %v", ctxt, event.ClusterId, err)
		return event, err
	}
	if ok {
		logger.Get().Error("%s-Cluster: %v is in un-managed state", ctxt, event.ClusterId)
		return event, err
	}

	vars := map[string]string{"cluster-id": event.ClusterId.String()}
	cluster_nodes = append(cluster_nodes, cluster_node)
	body, err := json.Marshal(cluster_nodes)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("%s-Error forming request body. error: %v", ctxt, err))
		return event, nil
	}

	// lock the node for expanding

	var nodes models.Nodes
	if err := coll.Find(bson.M{"clusterid": event.ClusterId}).All(&nodes); err != nil {
		logger.Get().Error("%s-Node information read from DB failed . error: %v", ctxt, err)
		return event, nil
	}

	// check if autoExpand is enabled or not
	if !cluster.AutoExpand {
		return event, nil
	}

	// Expand cluster
	var result models.RpcResponse
	var providerTaskId *uuid.UUID
	asyncTask := func(t *task.Task) {
		sessionCopy := db.GetDatastore().Copy()
		defer sessionCopy.Close()
		for {
			select {
			case <-t.StopCh:
				return
			default:
				t.UpdateStatus("Started task for cluster expansion: %v", t.ID)
				appLock, err := skyring.LockNodes(ctxt, nodes, "Expand_Cluster")
				if err != nil {
					util.FailTask("Failed to acquire lock", fmt.Errorf("%s-%v", ctxt, err), t)
					return
				}
				defer skyring.GetApp().GetLockManager().ReleaseLock(ctxt, *appLock)

				provider := skyring.GetApp().GetProviderFromClusterId(ctxt, node.ClusterId)
				if provider == nil {
					util.FailTask("", errors.New(fmt.Sprintf("%s-Error etting provider for cluster: %v", ctxt, event.ClusterId)), t)
					return
				}
				err = provider.Client.Call(fmt.Sprintf("%s.%s",
					provider.Name, "ExpandCluster"),
					models.RpcRequest{RpcRequestVars: vars, RpcRequestData: body, RpcRequestContext: ctxt},
					&result)
				if err != nil || (result.Status.StatusCode != http.StatusOK && result.Status.StatusCode != http.StatusAccepted) {
					util.FailTask(fmt.Sprintf("Error expanding cluster: %v", event.ClusterId), fmt.Errorf("%s-%v", ctxt, err), t)
					return
				}
				// Update the master task id
				providerTaskId, err = uuid.Parse(result.Data.RequestId)
				if err != nil {
					util.FailTask(fmt.Sprintf("Error parsing provider task id while expand cluster: %v", event.ClusterId), fmt.Errorf("%s-%v", ctxt, err), t)
					return
				}
				t.UpdateStatus(fmt.Sprintf("Started provider task: %v", *providerTaskId))
				if ok, err := t.AddSubTask(*providerTaskId); !ok || err != nil {
					util.FailTask(fmt.Sprintf("Error adding sub task while expand cluster: %v", event.ClusterId), fmt.Errorf("%s-%v", ctxt, err), t)
					return
				}

				// Check for provider task to complete and update the disk info
				for {
					time.Sleep(2 * time.Second)
					coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_TASKS)
					var providerTask models.AppTask
					if err := coll.Find(bson.M{"id": *providerTaskId}).One(&providerTask); err != nil {
						util.FailTask(fmt.Sprintf("Error getting sub task status while expand cluster: %v", event.ClusterId), fmt.Errorf("%s-%v", ctxt, err), t)
						return
					}
					if providerTask.Completed {
						if providerTask.Status == models.TASK_STATUS_SUCCESS {
							t.UpdateStatus("Starting disk sync")
							if ok, err := skyring.GetCoreNodeManager().SyncStorageDisks(node.Hostname, sProfiles, ctxt); err != nil || !ok {
								logger.Get().Error("%s-Failed to sync disk for host: %s Error: %v", ctxt, node.Hostname, err)
								return
							}
							go skyring.ComputeSystemSummary(make(map[string]interface{}))
							t.UpdateStatus("Success")
							t.Done(models.TASK_STATUS_SUCCESS)

						} else {
							logger.Get().Error("%s-Failed to expand the cluster %s", ctxt, event.ClusterId)
							t.UpdateStatus("Failed")
							t.Done(models.TASK_STATUS_FAILURE)
						}
						break
					}
				}
				return

			}
		}
	}

	if taskId, err := skyring.GetApp().GetTaskManager().Run(
		models.ENGINE_NAME,
		fmt.Sprintf("Expand Cluster: %s", event.ClusterId.String()),
		asyncTask,
		nil,
		nil,
		nil); err != nil {
		logger.Get().Error("%s-Unable to create task to expand cluster: %v. error: %v", ctxt, event.ClusterId, err)
		return event, nil
	} else {
		logger.Get().Debug("%s-Task Created: %v to expand cluster: %v", ctxt, taskId, event.ClusterId)
		return event, nil
	}

}