func (a SaltNodeManager) SyncStorageDisks(node string, sProfiles []models.StorageProfile) (bool, error) {
	disks, err := salt_backend.GetNodeDisk(node)
	if err != nil {
		return false, err
	}
	for _, disk := range disks {
		dId, err := uuid.New()
		if err != nil {
			logger.Get().Error(fmt.Sprintf("Unable to generate uuid for disk : %s. error: %v", disk.DevName, err))
			return false, err
		}
		disk.DiskId = *dId
		applyStorageProfile(&disk, sProfiles)
	}
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES)
	if len(disks) != 0 {
		if err := coll.Update(bson.M{"hostname": node}, bson.M{"$set": bson.M{"storagedisks": disks}}); err != nil {
			logger.Get().Error("Error updating the disk details for node: %s. error: %v", node, err)
			return false, err
		}
	}
	return true, nil
}
func NewScheduler() (Scheduler, error) {
	id, err := uuid.New()
	if err != nil {
		return Scheduler{}, err
	}
	scheduler := Scheduler{Channel: make(chan string), Id: *id}
	schedules[*id] = scheduler
	return scheduler, nil
}
Example #3
0
func (manager *Manager) Run(name string, f func(t *Task), startedFunc func(t *Task), completedFunc func(t *Task), statusFunc func(t *Task, s *models.Status)) (uuid.UUID, error) {
	if id, err := uuid.New(); err == nil {
		task := Task{
			Mutex:            &sync.Mutex{},
			ID:               *id,
			Name:             name,
			DoneCh:           make(chan bool, 1),
			StatusList:       []models.Status{},
			StopCh:           make(chan bool, 1),
			Func:             f,
			StartedCbkFunc:   startedFunc,
			CompletedCbkFunc: completedFunc,
			StatusCbkFunc:    statusFunc,
		}
		task.Run()
		return *id, nil
	} else {
		return uuid.UUID{}, err
	}
}
Example #4
0
func RouteEvent(event models.NodeEvent) {
	var e models.Event
	e.Timestamp = event.Timestamp
	e.Tag = event.Tag
	e.Tags = event.Tags
	e.Message = event.Message
	e.Severity = event.Severity
	eventId, err := uuid.New()
	if err != nil {
		logger.Get().Error("Uuid generation for the event failed for node: %s. error: %v", event.Node, err)
		return
	}

	e.EventId = *eventId

	// querying DB to get node ID and Cluster ID for the event
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	var node models.Node
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE_NODES)
	if err := coll.Find(bson.M{"hostname": event.Node}).One(&node); err != nil {
		logger.Get().Error("Node information read from DB failed for node: %s. error: %v", event.Node, err)
		return
	}

	// Push the event to DB only if the node is managed
	if node.Hostname == "" || !node.Enabled {
		return
	}
	e.ClusterId = node.ClusterId
	e.NodeId = node.NodeId

	// Invoking the event handler
	for tag, handler := range handlermap {
		if match, err := filepath.Match(tag, e.Tag); err == nil {
			if match {
				if err := handler.(func(models.Event) error)(e); err != nil {
					logger.Get().Error("Event Handling Failed for event for node: %s. error: %v", node.Hostname, err)
					return
				}
				if err := Persist_event(e); err != nil {
					logger.Get().Error("Could not persist the event to DB for node: %s. error: %v", node.Hostname, err)
					return
				} else {
					// For upcoming any new event , broadcasting to all connected clients
					eventObj, err := json.Marshal(e)
					if err != nil {
						logger.Get().Error("Error marshalling the event data for node: %s. error: %v", node.Hostname, err)
					}
					GetBroadcaster().chBroadcast <- string(eventObj)
					return
				}
			}
		} else {
			logger.Get().Error("Error while maping handler for event for node: %s. error: %v", node.Hostname, err)
			return
		}
	}

	// Handle Provider specific events
	app := skyring.GetApp()
	if err := app.RouteProviderEvents(e); err != nil {
		logger.Get().Error("Event:%s could not be handled for node: %s. error: %v", e.Tag, node.Hostname, err)
	}

	return
}
func GetStorageNodeInstance(hostname string, sProfiles []models.StorageProfile) (*models.Node, bool) {
	var storage_node models.Node
	storage_node.Hostname = hostname
	storage_node.Enabled = true
	storage_node.NodeId, _ = salt_backend.GetNodeID(hostname)
	networkInfo, err := salt_backend.GetNodeNetwork(hostname)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("Error getting network details for node: %s. error: %v", hostname, err))
		return nil, false
	}
	storage_node.NetworkInfo.Subnet = networkInfo.Subnet
	storage_node.NetworkInfo.Ipv4 = networkInfo.IPv4
	storage_node.NetworkInfo.Ipv6 = networkInfo.IPv6
	addrs, err := net.LookupHost(hostname)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("Error looking up node IP for: %s. error: %v", hostname, err))
		return nil, false
	}
	storage_node.ManagementIP4 = addrs[0]
	ok, err := salt_backend.NodeUp(hostname)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("Error getting status of node: %s. error: %v", hostname, err))
		return nil, false
	}
	if ok {
		storage_node.Status = models.STATUS_UP
	} else {
		storage_node.Status = models.STATUS_DOWN
	}
	disks, err := salt_backend.GetNodeDisk(hostname)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("Error getting disk details for node: %s. error: %v", hostname, err))
		return nil, false
	}
	for _, disk := range disks {
		dId, err := uuid.New()
		if err != nil {
			logger.Get().Error(fmt.Sprintf("Unable to generate uuid for disk : %s. error: %v", disk.DevName, err))
			return nil, false
		}
		disk.DiskId = *dId
		applyStorageProfile(&disk, sProfiles)
		storage_node.StorageDisks = append(storage_node.StorageDisks, disk)
	}

	cpus, err := salt_backend.GetNodeCpu(hostname)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("Error getting cpu details for node: %s. error: %v", hostname, err))
		return nil, false
	}
	for _, cpu := range cpus {
		storage_node.CPUs = append(storage_node.CPUs, cpu)
	}

	osInfo, err := salt_backend.GetNodeOs(hostname)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("Error getting os details for node: %s", hostname))
		return nil, false
	}
	storage_node.OS.Name = osInfo.Name
	storage_node.OS.OSVersion = osInfo.OSVersion
	storage_node.OS.KernelVersion = osInfo.KernelVersion
	storage_node.OS.SELinuxMode = osInfo.SELinuxMode

	memoryInfo, err := salt_backend.GetNodeMemory(hostname)
	if err != nil {
		logger.Get().Error(fmt.Sprintf("Error getting memory details for node: %s", hostname))
		return nil, false
	}
	storage_node.Memory.TotalSize = memoryInfo.TotalSize
	storage_node.Memory.SwapTotal = memoryInfo.SwapTotal
	storage_node.Memory.Active = memoryInfo.Active
	storage_node.Memory.Type = memoryInfo.Type

	if !storage_node.NodeId.IsZero() && len(storage_node.NetworkInfo.Subnet) != 0 && len(storage_node.StorageDisks) != 0 {
		return &storage_node, true
	} else {
		return nil, false
	}
}
Example #6
0
func sync_cluster_storage_entities(cluster models.Cluster, provider *Provider) (bool, error) {
	sessionCopy := db.GetDatastore().Copy()
	defer sessionCopy.Close()
	coll := sessionCopy.DB(conf.SystemConfig.DBConfig.Database).C(models.COLL_NAME_STORAGE)

	// Get the list of storage entities from DB
	var fetchedStorages models.Storages
	if err := coll.Find(bson.M{"clusterid": cluster.ClusterId}).All(&fetchedStorages); err != nil {
		logger.Get().Error("Error getting the storage entities for cluster: %v from DB. error: %v", cluster.ClusterId, err)
		return false, err
	}

	// Get the list of storages from cluster
	var result models.RpcResponse
	vars := make(map[string]string)
	vars["cluster-id"] = cluster.ClusterId.String()
	err = provider.Client.Call(provider.Name+".GetStorages", models.RpcRequest{RpcRequestVars: vars, RpcRequestData: []byte{}}, &result)
	if err != nil || result.Status.StatusCode != http.StatusOK {
		logger.Get().Error("Error getting storage details for cluster: %s. error:%v", cluster.Name, err)
		return false, err
	} else {
		var storages []models.AddStorageRequest
		if err := json.Unmarshal(result.Data.Result, &storages); err != nil {
			logger.Get().Error("Error parsing result from provider for storages list of cluster: %s. error: %v", cluster.Name, err)
			return false, err
		}
		// Insert/update storages
		for _, storage := range storages {
			// Check if the pool already exists, if so update else insert
			if !storage_in_list(fetchedStorages, storage.Name) {
				// Not found, insert
				entity := models.Storage{
					ClusterId:    cluster.ClusterId,
					Name:         storage.Name,
					Type:         storage.Type,
					Replicas:     storage.Replicas,
					QuotaEnabled: storage.QuotaEnabled,
					QuotaParams:  storage.QuotaParams,
					Options:      storage.Options,
				}
				uuid, err := uuid.New()
				if err != nil {
					logger.Get().Error("Error creating id for the new storage entity: %s. error: %v", storage.Name, err)
					return false, err
				}
				entity.StorageId = *uuid
				if err := coll.Insert(entity); err != nil {
					logger.Get().Error("Error adding storage:%s to DB. error: %v", storage.Name, err)
					return false, err
				}
				logger.Get().Info("Added the new storage entity: %s", storage.Name)
			} else {
				// Update
				if err := coll.Update(
					bson.M{"name": storage.Name},
					bson.M{"$set": bson.M{
						"options":       storage.Options,
						"quota_enabled": storage.QuotaEnabled,
						"quota_params":  storage.QuotaParams,
					}}); err != nil {
					logger.Get().Error("Error updating the storage entity: %s. error: %v", storage.Name, err)
					return false, err
				}
				logger.Get().Info("Updated details of storage entity: %s", storage.Name)
			}
		}
		// Delete the un-wanted storages
		for _, fetchedStorage := range fetchedStorages {
			found := false
			for _, storage := range storages {
				if storage.Name == fetchedStorage.Name {
					found = true
					break
				}
			}
			if !found {
				if err := coll.Remove(bson.M{"storageid": fetchedStorage.StorageId}); err != nil {
					logger.Get().Error("Error removing the storage: %s. error: %v", fetchedStorage.Name, err)
				}
			}
		}
	}

	return true, nil
}