// StartCluster starts all nodes in a cluster
func StartCluster(w rest.ResponseWriter, r *rest.Request) {
	dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), 400)
		return

	}
	defer dbConn.Close()

	err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-cluster")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	ID := r.PathParam("ID")
	if ID == "" {
		logit.Error.Println("StartCluster: error cluster ID required")
		rest.Error(w, "cluster ID required", http.StatusBadRequest)
		return
	}

	cluster, err := admindb.GetCluster(dbConn, ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
	}

	//start docker containers
	containers, err := admindb.GetAllContainersForCluster(dbConn, cluster.ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
	}

	i := 0

	i = 0
	var response swarmapi.DockerStartResponse
	for i = range containers {

		req := &swarmapi.DockerStartRequest{}
		req.ContainerName = containers[i].Name
		logit.Info.Println("will attempt to start container " + req.ContainerName)
		response, err = swarmapi.DockerStart(req)
		if err != nil {
			logit.Error.Println("StartCluster: error when trying to start container" + err.Error())
		}
		logit.Info.Println("StartCluster: started " + response.Output)

		i++
	}

	status := types.SimpleStatus{}
	status.Status = "OK"
	w.WriteHeader(http.StatusOK)
	w.WriteJson(&status)
}
// GetAllNodesForCluster returns a list of nodes for a given cluster
func GetAllNodesForCluster(w rest.ResponseWriter, r *rest.Request) {
	dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), 400)
		return

	}
	defer dbConn.Close()

	err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	ClusterID := r.PathParam("ClusterID")
	if ClusterID == "" {
		logit.Error.Println("ClusterID required")
		rest.Error(w, "node ClusterID required", http.StatusBadRequest)
		return
	}

	results, err := admindb.GetAllContainersForCluster(dbConn, ClusterID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
	}
	nodes := make([]types.ClusterNode, len(results))
	i := 0
	for i = range results {
		nodes[i].ID = results[i].ID
		nodes[i].Name = results[i].Name
		nodes[i].ClusterID = results[i].ClusterID
		nodes[i].Role = results[i].Role
		nodes[i].Image = results[i].Image
		nodes[i].CreateDate = results[i].CreateDate
		nodes[i].ProjectID = results[i].ProjectID
		nodes[i].ProjectName = results[i].ProjectName
		nodes[i].ClusterName = results[i].ClusterName
		//nodes[i].Status = "UNKNOWN"
		i++
	}

	w.WriteJson(&nodes)

}
// getMasterValues returns a master node, pgpool node, and list of standby nodes
func getMasterValues(dbConn *sql.DB, clusterID string, domainname string) (types.Container, types.Container, []string, error) {
	master := types.Container{}
	pgpool := types.Container{}
	//we pass in a list of containers in this cluster
	//that will be added to the pg_hba.conf of the master
	//for allowing replication
	nodes, err1 := admindb.GetAllContainersForCluster(dbConn, clusterID)
	if err1 != nil {
		return master, pgpool, make([]string, 1), err1
	}

	masterFound := false
	pgpoolFound := false
	//nodelist := ""
	i := 0
	nodeslice := make([]string, 10, 10)
	var nodecount = 0

	for i = range nodes {
		if nodes[i].Role == "master" {
			master = nodes[i]
			masterFound = true
			nodeslice[nodecount] = nodes[i].Name + "." + domainname
			nodecount++
		} else if nodes[i].Role == "pgpool" {
			pgpool = nodes[i]
			pgpoolFound = true
		} else if nodes[i].Role == "standby" {
			nodeslice[nodecount] = nodes[i].Name + "." + domainname
			nodecount++
		}
		i++
	}

	if masterFound == false {
		return master, pgpool, make([]string, 1), errors.New("no master found in this cluster")
	}
	if pgpoolFound == false {
		return master, pgpool, make([]string, 1), errors.New("no pgpool found in this cluster")
	}

	nodelist := make([]string, nodecount, nodecount)
	copy(nodelist, nodeslice)
	return master, pgpool, nodelist, nil
}
// DeleteCluster deletes an existing cluster definition
func DeleteCluster(w rest.ResponseWriter, r *rest.Request) {
	dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), 400)
		return

	}
	defer dbConn.Close()

	err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-cluster")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	ID := r.PathParam("ID")
	if ID == "" {
		logit.Error.Println("cluster ID required")
		rest.Error(w, "cluster ID required", http.StatusBadRequest)
		return
	}

	cluster, err := admindb.GetCluster(dbConn, ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
	}

	//delete docker containers
	containers, err := admindb.GetAllContainersForCluster(dbConn, cluster.ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
	}

	var infoResponse swarmapi.DockerInfoResponse

	infoResponse, err = swarmapi.DockerInfo()
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	i := 0
	servers := make([]types.Server, len(infoResponse.Output))
	for i = range infoResponse.Output {
		servers[i].ID = infoResponse.Output[i]
		servers[i].Name = infoResponse.Output[i]
		servers[i].IPAddress = infoResponse.Output[i]
		i++
	}

	var pgdatapath types.Setting
	pgdatapath, err = admindb.GetSetting(dbConn, "PG-DATA-PATH")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	i = 0
	//server := types.Server{}
	for i = range containers {

		//logit.Info.Println("DeleteCluster: got server IP " + server.IPAddress)

		//it is possible that someone can remove a container
		//outside of us, so we let it pass that we can't remove
		//it
		//err = removeContainer(server.IPAddress, containers[i].Name)
		dremreq := &swarmapi.DockerRemoveRequest{}
		dremreq.ContainerName = containers[i].Name
		//logit.Info.Println("will attempt to delete container " + dremreq.ContainerName)
		_, err = swarmapi.DockerRemove(dremreq)
		if err != nil {
			logit.Error.Println("error when trying to remove container" + err.Error())
		}

		//send all the servers a deletevolume command
		ddreq := &cpmserverapi.DiskDeleteRequest{}
		ddreq.Path = pgdatapath.Value + "/" + containers[i].Name
		for _, each := range servers {
			_, err = cpmserverapi.DiskDeleteClient(each.Name, ddreq)
			if err != nil {
				logit.Error.Println("error when trying to remove disk volume" + err.Error())
			}
		}

		i++
	}

	//delete the container entries
	//delete the cluster entry
	admindb.DeleteCluster(dbConn, ID)

	for i = range containers {

		err = admindb.DeleteContainer(dbConn, containers[i].ID)
		if err != nil {
			logit.Error.Println(err.Error())
			rest.Error(w, err.Error(), http.StatusBadRequest)
			return
		}
	}

	status := types.SimpleStatus{}
	status.Status = "OK"
	w.WriteHeader(http.StatusOK)
	w.WriteJson(&status)
}
// ScaleUpCluster increases the count of standby containers in a cluster
func ScaleUpCluster(w rest.ResponseWriter, r *rest.Request) {

	dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), 400)
		return

	}
	defer dbConn.Close()

	err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	ID := r.PathParam("ID")
	cluster, err := admindb.GetCluster(dbConn, ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	var containers []types.Container
	containers, err = admindb.GetAllContainersForCluster(dbConn, ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	//determine number of standby nodes currently
	standbyCnt := 0
	for i := range containers {
		if containers[i].Role == STANDBY {
			standbyCnt++
		}
	}

	//logit.Info.Printf("standbyCnt ends at %d\n", standbyCnt)

	//provision new container
	params := new(swarmapi.DockerRunRequest)
	params.Image = "cpm-node"
	//TODO make the server choice smart
	params.ProjectID = cluster.ProjectID
	params.ContainerName = cluster.Name + "-" + STANDBY + "-" + fmt.Sprintf("%d", standbyCnt)
	params.Standalone = "false"
	var standby = true
	params.Profile = "LG"

	//logit.Info.Printf("here with ProjectID %s\n", cluster.ProjectID)

	_, err = provisionImpl(dbConn, params, standby)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	err = provisionImplInit(dbConn, params, false)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	//need to update the new container's ClusterID
	var node types.Container
	node, err = admindb.GetContainerByName(dbConn, params.ContainerName)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, "error"+err.Error(), http.StatusBadRequest)
		return
	}

	node.ClusterID = cluster.ID
	node.Role = STANDBY
	err = admindb.UpdateContainer(dbConn, node)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, "error"+err.Error(), http.StatusBadRequest)
		return
	}

	err = configureCluster(params.Profile, dbConn, cluster, false)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	w.WriteHeader(http.StatusOK)
	status := types.SimpleStatus{}
	status.Status = "OK"
	w.WriteJson(&status)
}