func provisionImpl(dbConn *sql.DB, params *cpmserverapi.DockerRunRequest, PROFILE string, standby bool) error {
	logit.Info.Println("PROFILE: provisionImpl starts 1")

	var errorStr string
	//make sure the container name is not already taken
	_, err := admindb.GetContainerByName(dbConn, params.ContainerName)
	if err != nil {
		if err != sql.ErrNoRows {
			return err
		}
	} else {
		errorStr = "container name" + params.ContainerName + " already used can't provision"
		logit.Error.Println("Provision error" + errorStr)
		return errors.New(errorStr)
	}

	//create the container by constructing a template and calling openshift

	params.CPU, params.MEM, err = getDockerResourceSettings(dbConn, PROFILE)
	if err != nil {
		logit.Error.Println("Provision: problem in getting profiles call" + err.Error())
		return err
	}

	//remove any existing pods and services with this name
	var username = "******"
	var password = "******"
	var objectName = params.ContainerName
	var objectType = "pod"

	err = OpenshiftDelete(username, password, objectName, objectType)
	if err != nil {
		logit.Info.Println("Provision:" + err.Error())
	}

	objectName = params.ContainerName
	err = OpenshiftDelete(username, password, objectName, objectType)
	if err != nil {
		logit.Info.Println("Provision:" + err.Error())
	}

	podInfo := template.KubePodParams{
		NAME:                 params.ContainerName,
		ID:                   params.ContainerName,
		PODID:                params.ContainerName,
		CPU:                  params.CPU,
		MEM:                  params.MEM,
		IMAGE:                params.Image,
		VOLUME:               params.PGDataPath,
		PORT:                 "13000",
		BACKUP_NAME:          "",
		BACKUP_SERVERNAME:    "",
		BACKUP_SERVERIP:      "",
		BACKUP_SCHEDULEID:    "",
		BACKUP_PROFILENAME:   "",
		BACKUP_CONTAINERNAME: "",
		BACKUP_PATH:          "",
		BACKUP_HOST:          "",
		BACKUP_PORT:          "",
		BACKUP_USER:          "",
		BACKUP_SERVER_URL:    "",
	}

	//generate the pod template
	var podTemplateData []byte
	podTemplateData, err = template.KubeNodePod(podInfo)
	if err != nil {
		logit.Error.Println("Provision:" + err.Error())
		return err
	}
	logit.Info.Println("pod template=" + string(podTemplateData[:]))

	//create the pod
	file, err := ioutil.TempFile("/tmp", "openshift-template")
	if err != nil {
		logit.Error.Println("Provision:" + err.Error())
		return err
	}
	defer os.Remove(file.Name())
	err = ioutil.WriteFile(file.Name(), podTemplateData, 0644)
	if err != nil {
		logit.Error.Println("Provision:" + err.Error())
		return err
	}

	err = OpenshiftCreate(username, password, file.Name())
	if err != nil {
		logit.Info.Println("Provision:" + err.Error())
	}

	var pgport admindb.Setting
	pgport, err = admindb.GetSetting(dbConn, "PG-PORT")
	if err != nil {
		logit.Error.Println("Provision:PG-PORT setting error " + err.Error())
		return err
	}

	//generate the admin service template
	serviceInfo := template.KubeServiceParams{
		SERVICENAME: params.ContainerName,
		NAME:        params.ContainerName,
		PORT:        "10001",
		DBPORT:      pgport.Value,
	}

	//create the admin service template
	var serviceTemplateData []byte
	serviceTemplateData, err = template.KubeNodeService(serviceInfo)
	if err != nil {
		logit.Error.Println("Provision:" + err.Error())
		return err
	}
	logit.Info.Println("service template=" + string(serviceTemplateData[:]))

	file, err = ioutil.TempFile("/tmp", "openshift-template")
	if err != nil {
		logit.Error.Println("Provision:" + err.Error())
		return err
	}
	defer os.Remove(file.Name())
	err = ioutil.WriteFile(file.Name(), serviceTemplateData, 0644)
	if err != nil {
		logit.Error.Println("Provision:" + err.Error())
		return err
	}

	//create the service
	err = OpenshiftCreate(username, password, file.Name())
	if err != nil {
		logit.Info.Println("Provision:" + err.Error())
	}

	dbnode := admindb.Container{}
	dbnode.ID = ""
	dbnode.Name = params.ContainerName
	dbnode.Image = params.Image
	dbnode.ClusterID = "-1"
	dbnode.ProjectID = params.ProjectID
	dbnode.ServerID = params.ServerID

	if params.Standalone == "true" {
		dbnode.Role = "standalone"
	} else {
		dbnode.Role = "unassigned"
	}

	var strid int
	strid, err = admindb.InsertContainer(dbConn, dbnode)
	newid := strconv.Itoa(strid)
	if err != nil {
		logit.Error.Println("Provision:" + err.Error())
		return err
	}
	dbnode.ID = newid

	//register default db users on the new node
	err = createDBUsers(dbConn, dbnode)

	return err

}
func AutoCluster(w rest.ResponseWriter, r *rest.Request) {
	dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
	if err != nil {
		logit.Error.Println("BackupNow: error " + err.Error())
		rest.Error(w, err.Error(), 400)
		return

	}
	defer dbConn.Close()
	logit.Info.Println("AUTO CLUSTER PROFILE starts")
	logit.Info.Println("AutoCluster: start AutoCluster")
	params := AutoClusterInfo{}
	err = r.DecodeJsonPayload(&params)
	if err != nil {
		logit.Error.Println("AutoCluster: error in decode" + err.Error())
		rest.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	err = secimpl.Authorize(dbConn, params.Token, "perm-cluster")
	if err != nil {
		logit.Error.Println("AutoCluster: authorize error " + err.Error())
		rest.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	if params.Name == "" {
		logit.Error.Println("AutoCluster: error in Name")
		rest.Error(w, "cluster name required", http.StatusBadRequest)
		return
	}
	if params.ClusterType == "" {
		logit.Error.Println("AutoCluster: error in ClusterType")
		rest.Error(w, "ClusterType name required", http.StatusBadRequest)
		return
	}
	if params.ProjectID == "" {
		logit.Error.Println("AutoCluster: error in ProjectID")
		rest.Error(w, "ProjectID name required", http.StatusBadRequest)
		return
	}
	if params.ClusterProfile == "" {
		logit.Error.Println("AutoCluster: error in ClusterProfile")
		rest.Error(w, "ClusterProfile name required", http.StatusBadRequest)
		return
	}

	logit.Info.Println("AutoCluster: Name=" + params.Name + " ClusterType=" + params.ClusterType + " Profile=" + params.ClusterProfile + " ProjectID=" + params.ProjectID)

	//create cluster definition
	dbcluster := admindb.Cluster{"", params.ProjectID, params.Name, params.ClusterType, "uninitialized", "", make(map[string]string)}
	var ival int
	ival, err = admindb.InsertCluster(dbConn, dbcluster)
	clusterID := strconv.Itoa(ival)
	dbcluster.ID = clusterID
	logit.Info.Println(clusterID)
	if err != nil {
		logit.Error.Println("AutoCluster:" + err.Error())
		rest.Error(w, "Insert Cluster error:"+err.Error(), http.StatusBadRequest)
		return
	}

	//lookup profile
	profile, err2 := getClusterProfileInfo(dbConn, params.ClusterProfile)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-" + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	var masterServer admindb.Server
	var chosenServers []admindb.Server
	if profile.Algo == "round-robin" {
		masterServer, chosenServers, err2 = roundRobin(dbConn, profile)
	} else {
		logit.Error.Println("AutoCluster: error-unsupported algorithm request")
		rest.Error(w, "AutoCluster error: unsupported algorithm", http.StatusBadRequest)
		return
	}

	//create master container
	dockermaster := cpmserverapi.DockerRunRequest{}
	dockermaster.Image = CPM_NODE_IMAGE
	dockermaster.ContainerName = params.Name + "-master"
	dockermaster.ServerID = masterServer.ID
	dockermaster.ProjectID = params.ProjectID
	dockermaster.Standalone = "false"
	if err != nil {
		logit.Error.Println("AutoCluster: error-create master node " + err.Error())
		rest.Error(w, "AutoCluster error"+err.Error(), http.StatusBadRequest)
		return
	}

	//	provision the master
	err2 = provisionImpl(dbConn, &dockermaster, profile.MasterProfile, false)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-provision master " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	logit.Info.Println("AUTO CLUSTER PROFILE master container created")
	var node admindb.Container
	//update node with cluster iD
	node, err2 = admindb.GetContainerByName(dbConn, dockermaster.ContainerName)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-get node by name " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	node.ClusterID = clusterID
	node.Role = "master"
	err2 = admindb.UpdateContainer(dbConn, node)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-update standby node " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	//create standby containers
	var count int
	count, err2 = strconv.Atoi(profile.Count)
	if err2 != nil {
		logit.Error.Println(err2.Error())
		rest.Error(w, err2.Error(), http.StatusBadRequest)
		return
	}

	dockerstandby := make([]cpmserverapi.DockerRunRequest, count)
	for i := 0; i < count; i++ {
		logit.Info.Println("working on standby ....")
		//	loop - provision standby
		dockerstandby[i].ServerID = chosenServers[i].ID
		dockerstandby[i].ProjectID = params.ProjectID
		dockerstandby[i].Image = CPM_NODE_IMAGE
		dockerstandby[i].ContainerName = params.Name + "-" + STANDBY + "-" + strconv.Itoa(i)
		dockerstandby[i].Standalone = "false"
		err2 = provisionImpl(dbConn, &dockerstandby[i], profile.StandbyProfile, true)
		if err2 != nil {
			logit.Error.Println("AutoCluster: error-provision master " + err2.Error())
			rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
			return
		}

		//update node with cluster iD
		node, err2 = admindb.GetContainerByName(dbConn, dockerstandby[i].ContainerName)
		if err2 != nil {
			logit.Error.Println("AutoCluster: error-get node by name " + err2.Error())
			rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
			return
		}

		node.ClusterID = clusterID
		node.Role = STANDBY
		err2 = admindb.UpdateContainer(dbConn, node)
		if err2 != nil {
			logit.Error.Println("AutoCluster: error-update standby node " + err2.Error())
			rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
			return
		}
	}
	logit.Info.Println("AUTO CLUSTER PROFILE standbys created")
	//create pgpool container
	//	provision
	dockerpgpool := cpmserverapi.DockerRunRequest{}
	dockerpgpool.ContainerName = params.Name + "-pgpool"
	dockerpgpool.Image = CPM_PGPOOL_IMAGE
	dockerpgpool.ServerID = chosenServers[count].ID
	dockerpgpool.ProjectID = params.ProjectID
	dockerpgpool.Standalone = "false"

	err2 = provisionImpl(dbConn, &dockerpgpool, profile.StandbyProfile, true)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-provision pgpool " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}
	logit.Info.Println("AUTO CLUSTER PROFILE pgpool created")
	//update node with cluster ID
	node, err2 = admindb.GetContainerByName(dbConn, dockerpgpool.ContainerName)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-get pgpool node by name " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	node.ClusterID = clusterID
	node.Role = "pgpool"
	err2 = admindb.UpdateContainer(dbConn, node)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-update pgpool node " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	//init the master DB
	//	provision the master
	err2 = provisionImplInit(dbConn, &dockermaster, profile.MasterProfile, false)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-provisionInit master " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	//make sure every node is ready
	err2 = waitTillAllReady(dockermaster, dockerpgpool, dockerstandby)
	if err2 != nil {
		logit.Error.Println("cluster members not responding in time")
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	//configure cluster
	//	ConfigureCluster
	logit.Info.Println("AUTO CLUSTER PROFILE configure cluster ")
	err2 = configureCluster(dbConn, dbcluster, true)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-configure cluster " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	logit.Info.Println("AUTO CLUSTER PROFILE done")
	w.WriteHeader(http.StatusOK)
	status := SimpleStatus{}
	status.Status = "OK"
	w.WriteJson(&status)
}
func AdminFailover(w rest.ResponseWriter, r *rest.Request) {
	dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
	if err != nil {
		logit.Error.Println("BackupNow: error " + err.Error())
		rest.Error(w, err.Error(), 400)
		return

	}
	defer dbConn.Close()
	err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-cluster")
	if err != nil {
		logit.Error.Println("AdminFailover: authorize error " + err.Error())
		rest.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}
	ID := r.PathParam("ID")
	if ID == "" {
		logit.Error.Println("AdminFailover: node ID required error")
		rest.Error(w, "node ID required", http.StatusBadRequest)
		return
	}

	//dbNode is the standby node we are going to fail over and
	//make the new master in the cluster
	var dbNode admindb.Container
	dbNode, err = admindb.GetContainer(dbConn, ID)
	if err != nil {
		logit.Error.Println("AdminFailover:" + err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	cluster, err := admindb.GetCluster(dbConn, dbNode.ClusterID)
	if err != nil {
		logit.Error.Println("AdminFailover:" + err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	var failoverResp cpmcontainerapi.FailoverResponse
	failoverResp, err = cpmcontainerapi.FailoverClient(dbNode.Name)
	if err != nil {
		logit.Error.Println("AdminFailover: fail-over error " + err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	logit.Info.Println("AdminFailover: fail-over output " + failoverResp.Output)

	//update the old master to standalone role
	oldMaster := admindb.Container{}
	oldMaster, err = admindb.GetContainerMaster(dbConn, dbNode.ClusterID)
	if err != nil {
		logit.Error.Println("AdminFailover:" + err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	oldMaster.Role = "standalone"
	oldMaster.ClusterID = "-1"
	err = admindb.UpdateContainer(dbConn, oldMaster)
	if err != nil {
		logit.Error.Println("AdminFailover:" + err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	//update the failover node to master role
	dbNode.Role = "master"
	err = admindb.UpdateContainer(dbConn, dbNode)
	if err != nil {
		logit.Error.Println("AdminFailover:" + err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	//stop pg on the old master
	//params.IPAddress1 = oldMaster.IPAddress
	var stopPGResp cpmcontainerapi.StopPGResponse
	stopPGResp, err = cpmcontainerapi.StopPGClient(oldMaster.Name)
	if err != nil {
		logit.Error.Println("AdminFailover: " + err.Error() + stopPGResp.Output)
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	err = configureCluster(dbConn, cluster, false)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	w.WriteHeader(http.StatusOK)
	status := SimpleStatus{}
	status.Status = "OK"
	w.WriteJson(&status)

	return
}
func EventJoinCluster(w rest.ResponseWriter, r *rest.Request) {
	dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
	if err != nil {
		logit.Error.Println("BackupNow: error " + err.Error())
		rest.Error(w, err.Error(), 400)
		return

	}
	defer dbConn.Close()
	err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-cluster")
	if err != nil {
		logit.Error.Println("EventJoinCluster: authorize error " + err.Error())
		rest.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	IDList := r.PathParam("IDList")
	if IDList == "" {
		logit.Error.Println("EventJoinCluster: error IDList required")
		rest.Error(w, "IDList required", http.StatusBadRequest)
		return
	} else {
		logit.Info.Println("EventJoinCluster: IDList=[" + IDList + "]")
	}

	MasterID := r.PathParam("MasterID")
	if MasterID == "" {
		logit.Error.Println("EventJoinCluster: error MasterID required")
		rest.Error(w, "MasterID required", http.StatusBadRequest)
		return
	} else {
		logit.Info.Println("EventJoinCluster: MasterID=[" + MasterID + "]")
	}
	ClusterID := r.PathParam("ClusterID")
	if ClusterID == "" {
		logit.Error.Println("EventJoinCluster: error ClusterID required")
		rest.Error(w, "node ClusterID required", http.StatusBadRequest)
		return
	} else {
		logit.Info.Println("EventJoinCluster: ClusterID=[" + ClusterID + "]")
	}

	var idList = strings.Split(IDList, "_")
	i := 0
	pgpoolCount := 0

	origDBNode := admindb.Container{}
	for i = range idList {
		if idList[i] != "" {
			logit.Info.Println("EventJoinCluster: idList[" + strconv.Itoa(i) + "]=" + idList[i])
			origDBNode, err = admindb.GetContainer(dbConn, idList[i])
			if err != nil {
				logit.Error.Println("EventJoinCluster:" + err.Error())
				rest.Error(w, err.Error(), http.StatusBadRequest)
				return
			}

			//update the node to be in the cluster
			origDBNode.ClusterID = ClusterID
			if origDBNode.Image == CPM_NODE_IMAGE {
				origDBNode.Role = STANDBY
			} else {
				origDBNode.Role = "pgpool"
				pgpoolCount++
			}

			if pgpoolCount > 1 {
				logit.Error.Println("EventJoinCluster: more than 1 pgpool is in the cluster")
				rest.Error(w, "only 1 pgpool is allowed in a cluster", http.StatusBadRequest)
				return
			}

			err = admindb.UpdateContainer(dbConn, origDBNode)
			if err != nil {
				logit.Error.Println("EventJoinCluster:" + err.Error())
				rest.Error(w, err.Error(), http.StatusBadRequest)
				return
			}
		}
		i++
	}

	//we use the -1 value to indicate that we are only adding
	//to an existing cluster, the UI doesn't know who the master
	//is at this point
	if MasterID != "-1" {
		//update the master node
		origDBNode, err = admindb.GetContainer(dbConn, MasterID)
		if err != nil {
			logit.Error.Println("EventJoinCluster:" + err.Error())
			rest.Error(w, err.Error(), http.StatusBadRequest)
			return
		}

		origDBNode.ClusterID = ClusterID
		origDBNode.Role = "master"
		err = admindb.UpdateContainer(dbConn, origDBNode)
		if err != nil {
			logit.Error.Println("EventJoinCluster:" + err.Error())
			rest.Error(w, err.Error(), http.StatusBadRequest)
			return
		}
	}

	w.WriteHeader(http.StatusOK)
	status := SimpleStatus{}
	status.Status = "OK"
	w.WriteJson(&status)
}
func ScaleUpCluster(w rest.ResponseWriter, r *rest.Request) {

	dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
	if err != nil {
		logit.Error.Println("BackupNow: error " + err.Error())
		rest.Error(w, err.Error(), 400)
		return

	}
	defer dbConn.Close()

	err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
	if err != nil {
		logit.Error.Println("GetCluster: authorize error " + err.Error())
		rest.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	ID := r.PathParam("ID")
	cluster, err := admindb.GetCluster(dbConn, ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	var containers []admindb.Container
	containers, err = admindb.GetAllContainersForCluster(dbConn, ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	//determine number of standby nodes currently
	standbyCnt := 0
	for i := range containers {
		if containers[i].Role == STANDBY {
			standbyCnt++
		}
	}

	logit.Info.Printf("standbyCnt ends at %d\n", standbyCnt)

	//provision new container
	params := new(cpmserverapi.DockerRunRequest)
	params.Image = CPM_NODE_IMAGE
	//TODO make the server choice smart
	params.ServerID = containers[0].ServerID
	params.ProjectID = cluster.ProjectID
	params.ContainerName = cluster.Name + "-" + STANDBY + "-" + fmt.Sprintf("%d", standbyCnt)
	params.Standalone = "false"
	var standby = true
	var PROFILE = "LG"

	logit.Info.Printf("here with ProjectID %s\n", cluster.ProjectID)

	err = provisionImpl(dbConn, params, PROFILE, standby)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	err = provisionImplInit(dbConn, params, PROFILE, false)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	//need to update the new container's ClusterID
	var node admindb.Container
	node, err = admindb.GetContainerByName(dbConn, params.ContainerName)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, "error"+err.Error(), http.StatusBadRequest)
		return
	}

	node.ClusterID = cluster.ID
	node.Role = STANDBY
	err = admindb.UpdateContainer(dbConn, node)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, "error"+err.Error(), http.StatusBadRequest)
		return
	}

	err = configureCluster(dbConn, cluster, false)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	w.WriteHeader(http.StatusOK)
	status := SimpleStatus{}
	status.Status = "OK"
	w.WriteJson(&status)
}