예제 #1
0
func volumeCreateHandler(w http.ResponseWriter, r *http.Request) {

	msg := new(volume.VolCreateRequest)

	httpStatusCode, e := validateVolumeCreateJSONRequest(msg, r)
	if e != nil {
		rest.SendHTTPError(w, httpStatusCode, e.Error())
		return
	}
	vol, e := createVolume(msg)
	if e != nil {
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}
	httpStatusCode, e = validateVolumeCreate(msg, vol)
	if e != nil {
		rest.SendHTTPError(w, httpStatusCode, e.Error())
		return
	}
	httpStatusCode, e = commitVolumeCreate(vol)
	if e != nil {
		rollBackVolumeCreate(vol)
		rest.SendHTTPError(w, httpStatusCode, e.Error())
		return
	}
	rest.SendHTTPResponse(w, http.StatusCreated, vol)
}
예제 #2
0
func volumeStartHandler(w http.ResponseWriter, r *http.Request) {
	p := mux.Vars(r)
	volname := p["volname"]

	log.Info("In Volume start API")

	vol, e := volume.GetVolume(volname)
	if e != nil {
		rest.SendHTTPError(w, http.StatusBadRequest, errors.ErrVolNotFound.Error())
		return
	}
	if vol.Status == volume.VolStarted {
		rest.SendHTTPError(w, http.StatusBadRequest, errors.ErrVolAlreadyStarted.Error())
		return
	}
	vol.Status = volume.VolStarted

	e = volume.AddOrUpdateVolumeFunc(vol)
	if e != nil {
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}
	log.WithField("volume", vol.Name).Debug("Volume updated into the store")
	rest.SendHTTPResponse(w, http.StatusOK, vol)
}
예제 #3
0
파일: getpeers.go 프로젝트: sac/glusterd2
func getPeersHandler(w http.ResponseWriter, r *http.Request) {
	if peers, err := peer.GetPeers(); err != nil {
		rest.SendHTTPError(w, http.StatusNotFound, err.Error())
	} else {
		rest.SendHTTPResponse(w, http.StatusOK, peers)
	}
}
예제 #4
0
func volumeStartHandler(w http.ResponseWriter, r *http.Request) {
	p := mux.Vars(r)
	volname := p["volname"]

	log.Info("In Volume start API")

	vol, e := volume.GetVolume(volname)
	if e != nil {
		rest.SendHTTPError(w, http.StatusBadRequest, errors.ErrVolNotFound.Error())
		return
	}
	if vol.Status == volume.VolStarted {
		rest.SendHTTPError(w, http.StatusBadRequest, errors.ErrVolAlreadyStarted.Error())
		return
	}

	// A simple one-step transaction to start the brick processes
	txn := transaction.NewTxn()
	defer txn.Cleanup()
	lock, unlock, err := transaction.CreateLockSteps(volname)
	if err != nil {
		rest.SendHTTPError(w, http.StatusInternalServerError, err.Error())
		return
	}
	txn.Nodes = vol.Nodes()
	txn.Steps = []*transaction.Step{
		lock,
		&transaction.Step{
			DoFunc:   "vol-start.Commit",
			UndoFunc: "vol-start.Undo",
			Nodes:    txn.Nodes,
		},
		unlock,
	}
	txn.Ctx.Set("volname", volname)

	_, e = txn.Do()
	if e != nil {
		log.WithFields(log.Fields{
			"error":  e.Error(),
			"volume": volname,
		}).Error("failed to start volume")
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}

	vol.Status = volume.VolStarted

	e = volume.AddOrUpdateVolumeFunc(vol)
	if e != nil {
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}
	log.WithField("volume", vol.Name).Debug("Volume updated into the store")
	rest.SendHTTPResponse(w, http.StatusOK, vol)
}
예제 #5
0
func volumeStatusHandler(w http.ResponseWriter, r *http.Request) {
	p := mux.Vars(r)
	volname := p["volname"]

	// Ensure that the volume exists.
	vol, err := volume.GetVolume(volname)
	if err != nil {
		rest.SendHTTPError(w, http.StatusNotFound, errors.ErrVolNotFound.Error())
	}

	// A very simple free-form transaction to query each node for brick
	// status. Fetching volume status does not modify state/data on the
	// remote node. So there's no need for locks.
	txn := transaction.NewTxn()
	defer txn.Cleanup()
	txn.Nodes = vol.Nodes()
	txn.Steps = []*transaction.Step{
		&transaction.Step{
			DoFunc: "vol-status.Check",
			Nodes:  txn.Nodes,
		},
	}

	// The remote nodes get args it needs from the transaction context.
	txn.Ctx.Set("volname", volname)

	// As all key-value pairs stored in transaction context ends up in etcd
	// store, using either the old txn context reference or the one
	// returned by txn.Do() is one and the same. The transaction context is
	// a way for the nodes store the results of the step runs.
	rtxn, err := txn.Do()
	if err != nil {
		log.WithFields(log.Fields{
			"error":  err.Error(),
			"volume": volname,
		}).Error("volumeStatusHandler: Failed to get volume status.")
		rest.SendHTTPError(w, http.StatusInternalServerError, err.Error())
		return
	}

	// Example of how an aggregate function will make sense from results of
	// run of a step on multiple nodes. The transaction context will have
	// results from each node, seggregated by the node's UUID.
	result, err := aggregateVolumeStatus(rtxn, txn.Nodes)
	if err != nil {
		errMsg := "Failed to aggregate brick status results from multiple nodes."
		log.WithField("error", err.Error()).Error("volumeStatusHandler:" + errMsg)
		rest.SendHTTPError(w, http.StatusInternalServerError, errMsg)
		return
	}

	// Send aggregated result back to the client.
	rest.SendHTTPResponse(w, http.StatusOK, result)
}
예제 #6
0
func volumeListHandler(w http.ResponseWriter, r *http.Request) {

	log.Info("In Volume list API")

	volumes, e := volume.GetVolumesList()

	if e != nil {
		rest.SendHTTPError(w, http.StatusNotFound, e.Error())
	} else {
		rest.SendHTTPResponse(w, http.StatusOK, volumes)
	}
}
예제 #7
0
func volumeInfoHandler(w http.ResponseWriter, r *http.Request) {
	p := mux.Vars(r)
	volname := p["volname"]

	log.Debug("In Volume info API")

	vol, e := volume.GetVolume(volname)
	if e != nil {
		rest.SendHTTPError(w, http.StatusNotFound, errors.ErrVolNotFound.Error())
	} else {

		rest.SendHTTPResponse(w, http.StatusOK, vol)
	}
}
예제 #8
0
func volumeListHandler(w http.ResponseWriter, r *http.Request) {

	log.Info("In Volume list API")

	// Simple read operations, which just read information from the store, need
	// not use the transaction framework
	volumes, e := volume.GetVolumesList()

	if e != nil {
		rest.SendHTTPError(w, http.StatusNotFound, e.Error())
	} else {
		rest.SendHTTPResponse(w, http.StatusOK, volumes)
	}
}
예제 #9
0
파일: getpeer.go 프로젝트: kshlm/glusterd2
func getPeerHandler(w http.ResponseWriter, r *http.Request) {
	p := mux.Vars(r)

	id := p["peerid"]
	if id == "" {
		rest.SendHTTPError(w, http.StatusBadRequest, "peerid not present in request")
		return
	}

	if peer, err := peer.GetPeerF(id); err != nil {
		rest.SendHTTPError(w, http.StatusNotFound, err.Error())
	} else {
		rest.SendHTTPResponse(w, http.StatusOK, peer)
	}
}
예제 #10
0
func volumeInfoHandler(w http.ResponseWriter, r *http.Request) {
	p := mux.Vars(r)
	volname := p["volname"]

	log.Debug("In Volume info API")

	// Simple read operations, which just read information from the store, need
	// not use the transaction framework
	vol, e := volume.GetVolume(volname)
	if e != nil {
		rest.SendHTTPError(w, http.StatusNotFound, errors.ErrVolNotFound.Error())
	} else {

		rest.SendHTTPResponse(w, http.StatusOK, vol)
	}
}
예제 #11
0
파일: deletepeer.go 프로젝트: sac/glusterd2
func deletePeerHandler(w http.ResponseWriter, r *http.Request) {
	p := mux.Vars(r)

	id := p["peerid"]
	if id == "" {
		rest.SendHTTPError(w, http.StatusBadRequest, "peerid not present in the request")
		return
	}

	if !peer.Exists(id) {
		rest.SendHTTPError(w, http.StatusNotFound, "peer not found in cluster")
		return
	}

	if e := peer.DeletePeer(id); e != nil {
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
	} else {
		rest.SendHTTPResponse(w, http.StatusNoContent, nil)
	}
}
예제 #12
0
func volumeDeleteHandler(w http.ResponseWriter, r *http.Request) {
	p := mux.Vars(r)
	volname := p["volname"]

	log.Info("In Volume delete API")

	if !volume.Exists(volname) {
		rest.SendHTTPError(w, http.StatusBadRequest, errors.ErrVolNotFound.Error())
		return
	}

	e := volume.DeleteVolume(volname)
	if e != nil {
		log.WithFields(log.Fields{"error": e.Error(),
			"volume": volname,
		}).Error("Failed to delete the volume")
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}
	rest.SendHTTPResponse(w, http.StatusOK, nil)
}
예제 #13
0
파일: addpeer.go 프로젝트: sac/glusterd2
func addPeerHandler(w http.ResponseWriter, r *http.Request) {
	var req peer.PeerAddRequest
	if e := utils.GetJSONFromRequest(r, &req); e != nil {
		rest.SendHTTPError(w, http.StatusBadRequest, e.Error())
		return
	}

	if len(req.Addresses) < 1 {
		rest.SendHTTPError(w, http.StatusBadRequest, errors.ErrNoHostnamesPresent.Error())
		return
	}

	if req.Name == "" {
		req.Name = req.Addresses[0]
	}

	//TODO: Do proper validation before initiating the add process

	//FIXME: In the correct add process, the peer being probed would add it's details to the store once it's been validated. The code below is just a temporary stand-in to show how the API's would work

	p := &peer.Peer{
		ID:        uuid.NewRandom(),
		Name:      req.Name,
		Addresses: req.Addresses,
	}

	rsp, e := client.ValidateAddPeer(&req)
	if e != nil {
		rest.SendHTTPError(w, http.StatusInternalServerError, *rsp.OpError)
		return
	}
	if e = peer.AddOrUpdatePeer(p); e != nil {
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}

	rest.SendHTTPResponse(w, http.StatusOK, nil)

}
예제 #14
0
func volumeCreateHandler(w http.ResponseWriter, r *http.Request) {
	req := new(volume.VolCreateRequest)

	httpStatus, e := unmarshalVolCreateRequest(req, r)
	if e != nil {
		rest.SendHTTPError(w, httpStatus, e.Error())
		return
	}

	if volume.ExistsFunc(req.Name) {
		rest.SendHTTPError(w, http.StatusInternalServerError, gderrors.ErrVolExists.Error())
		return
	}

	nodes, e := nodesForVolCreate(req)
	if e != nil {
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}

	txn, e := (&transaction.SimpleTxn{
		Nodes:    nodes,
		LockKey:  req.Name,
		Stage:    "vol-create.Stage",
		Commit:   "vol-create.Commit",
		Store:    "vol-create.Store",
		Rollback: "vol-create.Rollback",
		LogFields: &log.Fields{
			"reqid": uuid.NewRandom().String(),
		},
	}).NewTxn()
	if e != nil {
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}
	defer txn.Cleanup()

	e = txn.Ctx.Set("req", req)
	if e != nil {
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}

	vol, e := createVolinfo(req)
	if e != nil {
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}

	e = txn.Ctx.Set("volinfo", vol)
	if e != nil {
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}

	c, e := txn.Do()
	if e != nil {
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}

	e = c.Get("volinfo", &vol)
	if e == nil {
		rest.SendHTTPResponse(w, http.StatusCreated, vol)
		c.Logger().WithField("volname", vol.Name).Info("new volume created")
	} else {
		rest.SendHTTPError(w, http.StatusInternalServerError, "failed to get volinfo")
	}

	return
}
예제 #15
0
파일: addpeer.go 프로젝트: kshlm/glusterd2
func addPeerHandler(w http.ResponseWriter, r *http.Request) {

	// FIXME: This is not txn based, yet. Behaviour when multiple simultaneous
	// add peer requests are sent to same node is unknown.

	var req PeerAddReq
	if e := utils.GetJSONFromRequest(r, &req); e != nil {
		rest.SendHTTPError(w, http.StatusBadRequest, e.Error())
		return
	}

	if len(req.Addresses) < 1 {
		rest.SendHTTPError(w, http.StatusBadRequest, errors.ErrNoHostnamesPresent.Error())
		return
	}

	// A peer can have multiple addresses. For now, we use only the first
	// address present in the req.Addresses list.

	remotePeerAddress, err := utils.FormRemotePeerAddress(req.Addresses[0])
	if err != nil {
		rest.SendHTTPError(w, http.StatusBadRequest, err.Error())
		return
	}

	// This remote call will return the remote peer's ID (UUID), name
	// and etcd peer url.
	remotePeer, e := ValidateAddPeer(remotePeerAddress, &req)
	if e != nil {
		rest.SendHTTPError(w, http.StatusInternalServerError, remotePeer.OpError)
		return
	}

	// TODO: Parse addresses considering ports to figure this out.
	if isPeerInCluster(remotePeer.UUID) {
		rest.SendHTTPError(w, http.StatusInternalServerError, "Peer already in cluster")
		return
	}

	// If user hasn't specified peer name, use the name returned by remote
	// peer which defaults to it's hostname.
	if req.Name == "" {
		req.Name = remotePeer.PeerName
	}

	// Adding a member is a two step process:
	// 	1. Add the new member to the cluster via the members API. This is
	//	   performed on this node i.e the one that just accepted peer add
	//	   request from the user.
	//	2. Start the new member on the target node (the new peer) with the new
	//         cluster configuration, including a list of the updated members
	//	   (existing members + the new member).

	newMember, e := etcdmgmt.EtcdMemberAdd("http://" + remotePeer.EtcdPeerAddress)
	if e != nil {
		log.WithFields(log.Fields{
			"error":   e,
			"uuid":    remotePeer.UUID,
			"name":    req.Name,
			"address": remotePeer.EtcdPeerAddress,
		}).Error("Failed to add member to etcd cluster.")
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}

	log.WithField("member-id", newMember.ID).Info("Added new member to etcd cluster")

	mlist, e := etcdmgmt.EtcdMemberList()
	if e != nil {
		log.WithField("error", e).Error("Failed to list members in etcd cluster")
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}

	// Member name of the newly added etcd member has not been set at this point.
	conf := []string{}
	for _, memb := range mlist {
		for _, u := range memb.PeerURLs {
			n := memb.Name
			if memb.ID == newMember.ID {
				n = remotePeer.UUID
			}
			conf = append(conf, fmt.Sprintf("%s=%s", n, u))
		}
	}

	var etcdConf EtcdConfigReq
	etcdConf.EtcdName = remotePeer.UUID
	etcdConf.InitialCluster = strings.Join(conf, ",")
	etcdConf.ClusterState = "existing"

	log.WithField("initial-cluster", etcdConf.InitialCluster).Debug("Reconfiguring etcd on remote peer")

	etcdrsp, e := ConfigureRemoteETCD(remotePeerAddress, &etcdConf)
	if e != nil {
		log.WithField("err", e).Error("Failed to configure remote etcd")
		rest.SendHTTPError(w, http.StatusInternalServerError, etcdrsp.OpError)
		return
	}

	// Create a new peer object and add it to the store.
	p := &peer.Peer{
		ID:        uuid.Parse(remotePeer.UUID),
		Name:      req.Name,
		Addresses: req.Addresses,
		MemberID:  newMember.ID,
	}
	if e = peer.AddOrUpdatePeer(p); e != nil {
		log.WithFields(log.Fields{
			"error":     e,
			"peer/node": p.Name,
		}).Error("Failed to add peer into the etcd store")
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}

	body := map[string]uuid.UUID{"id": p.ID}
	rest.SendHTTPResponse(w, http.StatusCreated, body)
}
예제 #16
0
func deletePeerHandler(w http.ResponseWriter, r *http.Request) {

	// FIXME: This is not txn based, yet. Behaviour when multiple simultaneous
	// delete peer requests are sent to same node is unknown.

	peerReq := mux.Vars(r)

	id := peerReq["peerid"]
	if id == "" {
		rest.SendHTTPError(w, http.StatusBadRequest, "peerid not present in the request")
		return
	}
	// Check whether the member exists
	p, e := peer.GetPeerF(id)
	if e != nil || p == nil {
		rest.SendHTTPError(w, http.StatusNotFound, "peer not found in cluster")
		return
	}

	// Removing self should be disallowed (like in glusterd1)
	if id == gdctx.MyUUID.String() {
		rest.SendHTTPError(w, http.StatusBadRequest, "Removing self is disallowed.")
		return
	}

	remotePeerAddress, err := utils.FormRemotePeerAddress(p.Addresses[0])
	if err != nil {
		rest.SendHTTPError(w, http.StatusBadRequest, err.Error())
		return
	}

	// Validate whether the peer can be deleted
	rsp, e := ValidateDeletePeer(remotePeerAddress, id)
	if e != nil {
		rest.SendHTTPError(w, http.StatusInternalServerError, rsp.OpError)
		return
	}

	// Remove the peer from the store
	if e := peer.DeletePeer(id); e != nil {
		log.WithFields(log.Fields{
			"er":   e,
			"peer": id,
		}).Error("Failed to remove peer from the store")
		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
	} else {
		rest.SendHTTPResponse(w, http.StatusNoContent, nil)
	}

	// Delete member from etcd cluster
	e = etcdmgmt.EtcdMemberRemove(p.MemberID)
	if e != nil {
		log.WithFields(log.Fields{
			"er":   e,
			"peer": id,
		}).Error("Failed to remove member from etcd cluster")

		rest.SendHTTPError(w, http.StatusInternalServerError, e.Error())
		return
	}

	// Remove data dir of etcd on remote machine. Restart etcd on remote machine
	// in standalone (single cluster) mode.
	var etcdConf EtcdConfigReq
	etcdConf.DeletePeer = true
	etcdrsp, e := ConfigureRemoteETCD(remotePeerAddress, &etcdConf)
	if e != nil {
		log.WithField("err", e).Error("Failed to configure remote etcd.")
		rest.SendHTTPError(w, http.StatusInternalServerError, etcdrsp.OpError)
		return
	}
}
예제 #17
0
파일: version.go 프로젝트: sac/glusterd2
func getVersionHandler(w http.ResponseWriter, r *http.Request) {
	var v Response
	v.GlusterdVersion = context.GlusterdVersion
	v.APIVersion = context.APIVersion
	rest.SendHTTPResponse(w, http.StatusOK, v)
}