示例#1
0
func readDatabase() (Database, error) {
	kvdb := kvdb.Instance()

	db := Database{
		Status:      api.Status_STATUS_INIT,
		NodeEntries: make(map[string]NodeEntry),
	}

	kv, err := kvdb.Get(ClusterDBKey)
	if err != nil && !strings.Contains(err.Error(), "Key not found") {
		dlog.Warnln("Warning, could not read cluster database")
		return db, err
	}

	if kv == nil || bytes.Compare(kv.Value, []byte("{}")) == 0 {
		dlog.Infoln("Cluster is uninitialized...")
		return db, nil
	}
	if err := json.Unmarshal(kv.Value, &db); err != nil {
		dlog.Warnln("Fatal, Could not parse cluster database ", kv)
		return db, err
	}

	return db, nil
}
示例#2
0
func (c *ClusterManager) getLatestNodeConfig(nodeId string) *NodeEntry {
	db, err := readDatabase()
	if err != nil {
		dlog.Warnln("Failed to read the database for updating config")
		return nil
	}

	ne, exists := db.NodeEntries[nodeId]
	if !exists {
		dlog.Warnln("Could not find info for node with id ", nodeId)
		return nil
	}

	return &ne
}
示例#3
0
func startServer(name string, sockBase string, port int, routes []*Route) error {
	var (
		listener net.Listener
		err      error
	)
	router := mux.NewRouter()
	router.NotFoundHandler = http.HandlerFunc(notFound)

	for _, v := range routes {
		router.Methods(v.verb).Path(v.path).HandlerFunc(v.fn)
	}
	socket := path.Join(sockBase, name+".sock")
	os.Remove(socket)
	os.MkdirAll(path.Dir(socket), 0755)

	dlog.Printf("Starting REST service on %+v", socket)
	listener, err = net.Listen("unix", socket)
	if err != nil {
		dlog.Warnln("Cannot listen on UNIX socket: ", err)
		return err
	}
	go http.Serve(listener, router)
	if port != 0 {
		go http.ListenAndServe(fmt.Sprintf(":%v", port), router)
	}
	return nil
}
示例#4
0
// SetSize sets the maximum number of nodes in a cluster.
func (c *ClusterManager) SetSize(size int) error {
	kvdb := kvdb.Instance()
	kvlock, err := kvdb.Lock(clusterLockKey, 20)
	if err != nil {
		dlog.Warnln("Unable to obtain cluster lock for updating config", err)
		return nil
	}
	defer kvdb.Unlock(kvlock)

	db, err := readDatabase()
	if err != nil {
		return err
	}

	db.Size = size

	err = writeDatabase(&db)

	return err
}
示例#5
0
// Get the latest config.
func (c *ClusterManager) watchDB(key string, opaque interface{},
	kvp *kvdb.KVPair, err error) error {

	db, err := readDatabase()
	if err != nil {
		dlog.Warnln("Failed to read database after update ", err)
		return nil
	}

	// The only value we rely on during an update is the cluster size.
	c.size = db.Size
	for id, n := range db.NodeEntries {
		if id != c.config.NodeId {
			// Check to see if the IP is the same.  If it is, then we have a stale entry.
			c.gossip.UpdateNode(n.MgmtIp+":9002", types.NodeId(id))
		}
	}

	return nil
}
示例#6
0
func (c *ClusterManager) startHeartBeat() {
	gossipStoreKey := types.StoreKey(heartbeatKey + c.config.ClusterId)

	node := c.getCurrentState()
	c.gossip.UpdateSelf(gossipStoreKey, *node)
	c.gossip.Start()

	lastUpdateTs := time.Now()
	for {
		node = c.getCurrentState()

		currTime := time.Now()
		diffTime := currTime.Sub(lastUpdateTs)
		if diffTime > 10*time.Second {
			dlog.Warnln("No gossip update for ", diffTime.Seconds(), "s")
		}
		c.gossip.UpdateSelf(gossipStoreKey, *node)
		lastUpdateTs = currTime

		time.Sleep(2 * time.Second)
	}
}
示例#7
0
func Init(params map[string]string) (volume.VolumeDriver, error) {
	host, ok := params["url"]
	if !ok {
		return nil, ErrApiUrlRequired
	}

	token, ok := params["token"]
	if !ok {
		return nil, ErrApiAuthTokenRequired
	}

	// create a coprhd api client instance
	client := coprhd.NewClient(host, token)

	d := &driver{
		DefaultEnumerator: volume.NewDefaultEnumerator(Name, kvdb.Instance()),
		client:            client,
	}

	if projectName, ok := params["project"]; ok {
		if project, err := client.Project().Name(projectName).Query(); err != nil {
			return nil, err
		} else {
			d.project = project
		}
	} else {
		dlog.Warnln("Default coprhd 'project' not set")
	}

	if varrayName, ok := params["varray"]; ok {
		if varray, err := client.VArray().Name(varrayName).Query(); err != nil {
			return nil, err
		} else {
			d.varray = varray
		}
	} else {
		dlog.Warnf("Default coprhd 'varray' not set")
	}

	if vpoolName, ok := params["vpool"]; ok {
		if vpool, err := client.VPool().Name(vpoolName).Query(); err != nil {
			return nil, err
		} else {
			d.vpool = vpool
		}
	} else {
		dlog.Warnf("Default coprhd 'vpool' not set")
	}

	if port, ok := params["port"]; ok {
		if initiator, err := client.Initiator().Port(port).Query(); err != nil {
			return nil, err
		} else {
			d.initiator = initiator
		}
	} else {
		return nil, ErrPortRequired
	}

	return d, nil
}
示例#8
0
func (c *ClusterManager) EnableUpdates() error {
	dlog.Warnln("Enabling gossip updates")
	c.gEnabled = true

	return nil
}
示例#9
0
func (c *ClusterManager) DisableUpdates() error {
	dlog.Warnln("Disabling gossip updates")
	c.gEnabled = false

	return nil
}
示例#10
0
func (c *ClusterManager) updateClusterStatus() {
	gossipStoreKey := types.StoreKey(heartbeatKey + c.config.ClusterId)

	for {
		node := c.getCurrentState()
		c.nodeCache[node.Id] = *node

		// Process heartbeats from other nodes...
		gossipValues := c.gossip.GetStoreKeyValue(gossipStoreKey)

		numNodes := 0
		for id, nodeInfo := range gossipValues {
			numNodes = numNodes + 1

			// Check to make sure we are not exceeding the size of the cluster.
			if c.size > 0 && numNodes > c.size {
				dlog.Fatalf("Fatal, number of nodes in the cluster has"+
					"exceeded the cluster size: %d > %d", numNodes, c.size)
				os.Exit(-1)
			}

			// Ignore updates from self node.
			if id == types.NodeId(node.Id) {
				continue
			}

			// Notify node status change if required.
			newNodeInfo := api.Node{}
			newNodeInfo.Id = string(id)
			newNodeInfo.Status = api.Status_STATUS_OK

			switch {
			case nodeInfo.Status == types.NODE_STATUS_DOWN:
				newNodeInfo.Status = api.Status_STATUS_OFFLINE
				lastStatus, ok := c.nodeStatuses[string(id)]
				if ok && lastStatus == newNodeInfo.Status {
					break
				}

				// Check if it is a stale update
				ne := c.getLatestNodeConfig(string(id))
				if ne != nil && nodeInfo.GenNumber != 0 &&
					nodeInfo.GenNumber < ne.GenNumber {
					dlog.Warnln("Detected stale update for node ", id,
						" going down, ignoring it")
					c.gossip.MarkNodeHasOldGen(id)
					break
				}
				c.nodeStatuses[string(id)] = newNodeInfo.Status

				dlog.Warnln("Detected node ", id,
					" to be offline due to inactivity.")

				for e := c.listeners.Front(); e != nil && c.gEnabled; e = e.Next() {
					err := e.Value.(ClusterListener).Update(&newNodeInfo)
					if err != nil {
						dlog.Warnln("Failed to notify ",
							e.Value.(ClusterListener).String())
					}
				}

			case nodeInfo.Status == types.NODE_STATUS_DOWN_WAITING_FOR_NEW_UPDATE:
				newNodeInfo.Status = api.Status_STATUS_OFFLINE
				lastStatus, ok := c.nodeStatuses[string(id)]
				if ok && lastStatus == newNodeInfo.Status {
					break
				}
				c.nodeStatuses[string(id)] = newNodeInfo.Status

				dlog.Warnln("Detected node ", newNodeInfo.Id,
					" to be offline due to inactivity.")

				for e := c.listeners.Front(); e != nil && c.gEnabled; e = e.Next() {
					err := e.Value.(ClusterListener).Update(&newNodeInfo)
					if err != nil {
						dlog.Warnln("Failed to notify ",
							e.Value.(ClusterListener).String())
					}
				}

			case nodeInfo.Status == types.NODE_STATUS_UP:
				newNodeInfo.Status = api.Status_STATUS_OK
				lastStatus, ok := c.nodeStatuses[string(id)]
				if ok && lastStatus == newNodeInfo.Status {
					break
				}
				c.nodeStatuses[string(id)] = newNodeInfo.Status

				// A node discovered in the cluster.
				dlog.Warnln("Detected node ", newNodeInfo.Id,
					" to be in the cluster.")

				for e := c.listeners.Front(); e != nil && c.gEnabled; e = e.Next() {
					err := e.Value.(ClusterListener).Add(&newNodeInfo)
					if err != nil {
						dlog.Warnln("Failed to notify ",
							e.Value.(ClusterListener).String())
					}
				}
			}

			// Update cache.
			if nodeInfo.Value != nil {
				n, ok := nodeInfo.Value.(api.Node)
				if ok {
					n.Status = newNodeInfo.Status
					c.nodeCache[n.Id] = n
				} else {
					c.nodeCache[newNodeInfo.Id] = newNodeInfo
				}
			} else {
				newNodeInfo.Status = api.Status_STATUS_OFFLINE
				c.nodeCache[newNodeInfo.Id] = newNodeInfo
			}
		}

		time.Sleep(2 * time.Second)
	}
}
示例#11
0
func start(c *cli.Context) {

	if !osdcli.DaemonMode(c) {
		cli.ShowAppHelp(c)
		return
	}

	datastores := []string{mem.Name, etcd.Name, consul.Name}

	// We are in daemon mode.
	file := c.String("file")
	if file == "" {
		dlog.Warnln("OSD configuration file not specified.  Visit openstorage.org for an example.")
		return
	}

	cfg, err := config.Parse(file)
	if err != nil {
		dlog.Errorln(err)
		return
	}
	kvdbURL := c.String("kvdb")
	u, err := url.Parse(kvdbURL)
	scheme := u.Scheme
	u.Scheme = "http"

	kv, err := kvdb.New(scheme, "openstorage", []string{u.String()}, nil)
	if err != nil {
		dlog.Warnf("Failed to initialize KVDB: %v (%v)", scheme, err)
		dlog.Warnf("Supported datastores: %v", datastores)
		return
	}
	err = kvdb.SetInstance(kv)
	if err != nil {
		dlog.Warnf("Failed to initialize KVDB: %v", err)
		return
	}

	// Start the cluster state machine, if enabled.
	clusterInit := false
	if cfg.Osd.ClusterConfig.NodeId != "" && cfg.Osd.ClusterConfig.ClusterId != "" {
		dlog.Infof("OSD enabling cluster mode.")

		if err := cluster.Init(cfg.Osd.ClusterConfig); err != nil {
			dlog.Errorln("Unable to init cluster server: %v", err)
			return
		}
		clusterInit = true

		if err := server.StartClusterAPI(config.ClusterAPIBase); err != nil {
			dlog.Warnf("Unable to start cluster API server: %v", err)
			return
		}
	}

	// Start the volume drivers.
	for d, v := range cfg.Osd.Drivers {
		dlog.Infof("Starting volume driver: %v", d)
		if _, err := volume.New(d, v); err != nil {
			dlog.Warnf("Unable to start volume driver: %v, %v", d, err)
			return
		}

		if err := server.StartPluginAPI(d, config.DriverAPIBase, config.PluginAPIBase); err != nil {
			dlog.Warnf("Unable to start volume plugin: %v", err)
			return
		}
	}

	if err := server.StartFlexVolumeAPI(config.FlexVolumePort); err != nil {
		dlog.Warnf("Unable to start flexvolume API: %v", err)
		return
	}

	// Start the graph drivers.
	for d, _ := range cfg.Osd.GraphDrivers {
		dlog.Infof("Starting graph driver: %v", d)
		if err := server.StartGraphAPI(d, config.PluginAPIBase); err != nil {
			dlog.Warnf("Unable to start graph plugin: %v", err)
			return
		}
	}

	if clusterInit {
		cm, err := cluster.Inst()
		if err != nil {
			dlog.Warnf("Unable to find cluster instance: %v", err)
			return
		}
		if err := cm.Start(); err != nil {
			dlog.Warnf("Unable to start cluster manager: %v", err)
			return
		}
	}

	// Daemon does not exit.
	select {}
}
示例#12
0
func testPrint(t *testing.T) {
	dlog.WithField("key", "value").WithField("int", 1).Infof("number %d", 2)
	dlog.Warnln("warning line")
}