//currently we define default DB users (postgres, cpmtest, pgpool)
//for all database containers
func createDBUsers(dbConn *sql.DB, dbnode types.Container) error {
	var err error
	var password types.Setting

	//get the postgres password
	password, err = admindb.GetSetting(dbConn, "POSTGRESPSW")
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}
	//register postgres user
	var user = types.ContainerUser{}
	user.Containername = dbnode.Name
	user.Rolname = "postgres"
	user.Passwd = password.Value
	_, err = admindb.AddContainerUser(dbConn, user)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	//cpmtest and pgpool users are created by the node-setup.sql script
	//here, we just register them when we create a new node

	//get the cpmtest password
	password, err = admindb.GetSetting(dbConn, "CPMTESTPSW")
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}
	//register cpmtest user
	user.Containername = dbnode.Name
	user.Rolname = "cpmtest"
	user.Passwd = password.Value
	_, err = admindb.AddContainerUser(dbConn, user)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	//get the pgpool password
	password, err = admindb.GetSetting(dbConn, "PGPOOLPSW")
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}
	user.Containername = dbnode.Name
	user.Rolname = "pgpool"
	user.Passwd = password.Value
	//register pgpool user
	_, err = admindb.AddContainerUser(dbConn, user)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	return err
}
//return the CPU MEM settings
func getDockerResourceSettings(dbConn *sql.DB, size string) (string, string, error) {
	var CPU, MEM string
	var setting types.Setting
	var err error

	switch size {
	case "SM":
		setting, err = admindb.GetSetting(dbConn, "S-DOCKER-PROFILE-CPU")
		CPU = setting.Value
		setting, err = admindb.GetSetting(dbConn, "S-DOCKER-PROFILE-MEM")
		MEM = setting.Value
	case "MED":
		setting, err = admindb.GetSetting(dbConn, "M-DOCKER-PROFILE-CPU")
		CPU = setting.Value
		setting, err = admindb.GetSetting(dbConn, "M-DOCKER-PROFILE-MEM")
		MEM = setting.Value
	default:
		setting, err = admindb.GetSetting(dbConn, "L-DOCKER-PROFILE-CPU")
		CPU = setting.Value
		setting, err = admindb.GetSetting(dbConn, "L-DOCKER-PROFILE-MEM")
		MEM = setting.Value
	}

	return CPU, MEM, err

}
func GetTuningParms(dbConn *sql.DB, profile string, info *PostgresqlParameters) error {
	var err error
	logit.Info.Println("GetTuningParms with profile=[" + profile + "]")
	switch profile {
	case "SM", "MED", "LG":
	default:
		return errors.New("profile not valid: " + profile)
	}

	var setting types.Setting
	setting, err = admindb.GetSetting(dbConn, "TUNE-"+profile+"-MWM")
	if err != nil {
		return err
	}
	info.TUNE_MWM = setting.Value
	logit.Info.Println("GetTuningParms with MWM=" + info.TUNE_MWM)
	setting, err = admindb.GetSetting(dbConn, "TUNE-"+profile+"-CCT")
	if err != nil {
		return err
	}
	info.TUNE_CCT = setting.Value
	setting, err = admindb.GetSetting(dbConn, "TUNE-"+profile+"-ECS")
	if err != nil {
		return err
	}
	info.TUNE_ECS = setting.Value
	setting, err = admindb.GetSetting(dbConn, "TUNE-"+profile+"-WM")
	if err != nil {
		return err
	}
	info.TUNE_WM = setting.Value
	setting, err = admindb.GetSetting(dbConn, "TUNE-"+profile+"-WB")
	if err != nil {
		return err
	}
	info.TUNE_WB = setting.Value
	setting, err = admindb.GetSetting(dbConn, "TUNE-"+profile+"-CS")
	if err != nil {
		return err
	}
	info.TUNE_CS = setting.Value
	setting, err = admindb.GetSetting(dbConn, "TUNE-"+profile+"-SB")
	if err != nil {
		return err
	}
	info.TUNE_SB = setting.Value

	return nil
}
// ProvisionRestoreJob creates a docker container to orchestrate a restore job
func ProvisionRestoreJob(dbConn *sql.DB, args *TaskRequest) error {

	logit.Info.Println("task.ProvisionRestoreJob called")
	logit.Info.Println("with scheduleid=" + args.ScheduleID)
	logit.Info.Println("with containername=" + args.ContainerName)
	logit.Info.Println("with profilename=" + args.ProfileName)
	logit.Info.Println("with statusid=" + args.StatusID)

	restorecontainername := args.ContainerName + "-restore-job"

	//remove any existing container with the same name
	inspectReq := &swarmapi.DockerInspectRequest{}
	inspectReq.ContainerName = restorecontainername
	inspectResponse, err := swarmapi.DockerInspect(inspectReq)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}
	if inspectResponse.RunningState != "not-found" {
		rreq := &swarmapi.DockerRemoveRequest{}
		rreq.ContainerName = restorecontainername
		_, err = swarmapi.DockerRemove(rreq)
		if err != nil {
			logit.Error.Println(err.Error())
			return err
		}
	}

	//create the new container
	params := &swarmapi.DockerRunRequest{}
	params.Image = "cpm-restore-job"
	params.ContainerName = restorecontainername
	params.Standalone = "false"
	params.Profile = "SM"

	schedule, err := GetSchedule(dbConn, args.ScheduleID)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	logit.Info.Println("schedule serverip is " + schedule.Serverip)

	var taskstatus TaskStatus
	taskstatus, err = GetStatus(dbConn, args.StatusID)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	//params.PGDataPath = server.PGDataPath + "/" + restorecontainername + "/" + getFormattedDate()

	//get the docker profile settings
	var setting types.Setting
	setting, err = admindb.GetSetting(dbConn, "S-DOCKER-PROFILE-CPU")
	params.CPU = setting.Value
	setting, err = admindb.GetSetting(dbConn, "S-DOCKER-PROFILE-MEM")
	params.MEM = setting.Value
	setting, err = admindb.GetSetting(dbConn, "PG-DATA-PATH")
	var datapath string
	datapath = setting.Value

	//this gets mounted under /pgdata and allows us to access
	//both the backup files and the restored containers files
	params.PGDataPath = datapath

	params.EnvVars = make(map[string]string)

	params.EnvVars["RestoreServerip"] = schedule.Serverip
	params.EnvVars["RestoreBackupPath"] = taskstatus.Path
	params.EnvVars["RestorePath"] = args.ContainerName
	params.EnvVars["RestoreContainerName"] = args.ContainerName
	params.EnvVars["RestoreScheduleID"] = args.ScheduleID
	params.EnvVars["RestoreProfileName"] = args.ProfileName
	params.EnvVars["RestoreStatusID"] = args.StatusID

	//run the container
	//params.CommandPath = "docker-run-restore.sh"
	var response swarmapi.DockerRunResponse
	//var url = "http://" + server.IPAddress + ":10001"
	response, err = swarmapi.DockerRun(params)
	if err != nil {
		logit.Error.Println(response.ID)
		return err
	}
	logit.Info.Println("docker run output=" + response.ID)

	return nil
}
// GetNode returns the container node definition
func GetNode(w rest.ResponseWriter, r *rest.Request) {
	dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), 400)
		return

	}
	defer dbConn.Close()

	err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	ID := r.PathParam("ID")
	if ID == "" {
		logit.Error.Println("error node ID required")
		rest.Error(w, "node ID required", http.StatusBadRequest)
		return
	}

	node, err2 := admindb.GetContainer(dbConn, ID)

	if node.ID == "" {
		rest.NotFound(w, r)
		return
	}
	if err2 != nil {
		logit.Error.Println(err2.Error())
		rest.Error(w, err2.Error(), http.StatusBadRequest)
		return
	}

	var currentStatus = "UNKNOWN"

	request := &swarmapi.DockerInspectRequest{}
	var inspectInfo swarmapi.DockerInspectResponse
	request.ContainerName = node.Name
	inspectInfo, err = swarmapi.DockerInspect(request)
	if err != nil {
		logit.Error.Println(err.Error())
		currentStatus = CONTAINER_NOT_FOUND
	}

	if currentStatus != "CONTAINER NOT FOUND" {
		var pgport types.Setting
		pgport, err = admindb.GetSetting(dbConn, "PG-PORT")
		if err != nil {
			logit.Error.Println(err.Error())
			rest.Error(w, err.Error(), http.StatusBadRequest)
			return
		}
		currentStatus, err = util.FastPing(pgport.Value, node.Name)
		if err != nil {
			logit.Error.Println(err.Error())
			rest.Error(w, err.Error(), http.StatusBadRequest)
			return
		}
		//logit.Info.Println("pinging db finished")
	}

	clusternode := new(types.ClusterNode)
	clusternode.ID = node.ID
	clusternode.ClusterID = node.ClusterID
	clusternode.Name = node.Name
	clusternode.Role = node.Role
	clusternode.Image = node.Image
	clusternode.CreateDate = node.CreateDate
	clusternode.Status = currentStatus
	clusternode.ProjectID = node.ProjectID
	clusternode.ProjectName = node.ProjectName
	clusternode.ClusterName = node.ClusterName
	clusternode.ServerID = inspectInfo.ServerID
	clusternode.IPAddress = inspectInfo.IPAddress

	w.WriteJson(clusternode)
}
/*
 TODO refactor this to share code with DeleteCluster!!!!!
*/
func DeleteNode(w rest.ResponseWriter, r *rest.Request) {
	dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), 400)
		return

	}
	defer dbConn.Close()

	err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-container")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	ID := r.PathParam("ID")
	if ID == "" {
		logit.Error.Println("DeleteNode: error node ID required")
		rest.Error(w, "node ID required", http.StatusBadRequest)
		return
	}

	//go get the node we intend to delete
	var dbNode types.Container
	dbNode, err = admindb.GetContainer(dbConn, ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	var infoResponse swarmapi.DockerInfoResponse
	infoResponse, err = swarmapi.DockerInfo()
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	servers := make([]types.Server, len(infoResponse.Output))
	i := 0
	for i = range infoResponse.Output {
		servers[i].ID = infoResponse.Output[i]
		servers[i].Name = infoResponse.Output[i]
		servers[i].IPAddress = infoResponse.Output[i]
		i++
	}

	var pgdatapath types.Setting
	pgdatapath, err = admindb.GetSetting(dbConn, "PG-DATA-PATH")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	err = admindb.DeleteContainer(dbConn, ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	logit.Info.Println("remove 1")
	//it is possible that someone can remove a container
	//outside of us, so we let it pass that we can't remove
	//it

	request := &swarmapi.DockerRemoveRequest{}
	request.ContainerName = dbNode.Name
	_, err = swarmapi.DockerRemove(request)
	if err != nil {
		logit.Error.Println(err.Error())
	}

	logit.Info.Println("remove 2")
	//send the server a deletevolume command
	request2 := &cpmserverapi.DiskDeleteRequest{}
	request2.Path = pgdatapath.Value + "/" + dbNode.Name
	for _, each := range servers {
		_, err = cpmserverapi.DiskDeleteClient(each.Name, request2)
		if err != nil {
			logit.Error.Println(err.Error())
		}
	}
	logit.Info.Println("remove 3")

	//we should not have to delete the DNS entries because
	//of the dnsbridge, it should remove them when we remove
	//the containers via the docker api

	w.WriteHeader(http.StatusOK)
	status := types.SimpleStatus{}
	status.Status = "OK"
	w.WriteJson(&status)
}
// AutoCluster creates a new cluster
func AutoCluster(w rest.ResponseWriter, r *rest.Request) {
	dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), 400)
		return

	}
	defer dbConn.Close()
	logit.Info.Println("AUTO CLUSTER PROFILE starts")
	params := AutoClusterInfo{}
	err = r.DecodeJsonPayload(&params)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	err = secimpl.Authorize(dbConn, params.Token, "perm-cluster")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	if params.Name == "" {
		logit.Error.Println("AutoCluster: error in Name")
		rest.Error(w, "cluster name required", http.StatusBadRequest)
		return
	}
	if params.ClusterType == "" {
		logit.Error.Println("AutoCluster: error in ClusterType")
		rest.Error(w, "ClusterType name required", http.StatusBadRequest)
		return
	}
	if params.ProjectID == "" {
		logit.Error.Println("AutoCluster: error in ProjectID")
		rest.Error(w, "ProjectID name required", http.StatusBadRequest)
		return
	}
	if params.ClusterProfile == "" {
		logit.Error.Println("AutoCluster: error in ClusterProfile")
		rest.Error(w, "ClusterProfile name required", http.StatusBadRequest)
		return
	}

	logit.Info.Println("AutoCluster: Name=" + params.Name + " ClusterType=" + params.ClusterType + " Profile=" + params.ClusterProfile + " ProjectID=" + params.ProjectID)

	//create cluster definition
	dbcluster := types.Cluster{}
	dbcluster.ID = ""
	dbcluster.ProjectID = params.ProjectID
	dbcluster.Name = util.CleanName(params.Name)
	dbcluster.ClusterType = params.ClusterType
	dbcluster.Status = "uninitialized"
	dbcluster.Containers = make(map[string]string)

	var ival int
	ival, err = admindb.InsertCluster(dbConn, dbcluster)
	clusterID := strconv.Itoa(ival)
	dbcluster.ID = clusterID
	//logit.Info.Println(clusterID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, "Insert Cluster error:"+err.Error(), http.StatusBadRequest)
		return
	}

	//lookup profile
	profile, err2 := getClusterProfileInfo(dbConn, params.ClusterProfile)
	if err2 != nil {
		logit.Error.Println(err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	//var masterServer types.Server
	//var chosenServers []types.Server
	if profile.Algo == "round-robin" {
		//masterServer, chosenServers, err2 = roundRobin(dbConn, profile)
	} else {
		logit.Error.Println("AutoCluster: error-unsupported algorithm request")
		rest.Error(w, "AutoCluster error: unsupported algorithm", http.StatusBadRequest)
		return
	}

	//create master container
	dockermaster := swarmapi.DockerRunRequest{}
	dockermaster.Image = "cpm-node"
	dockermaster.ContainerName = params.Name + "-master"
	dockermaster.ProjectID = params.ProjectID
	dockermaster.Standalone = "false"
	dockermaster.Profile = profile.MasterProfile
	if err != nil {
		logit.Error.Println("AutoCluster: error-create master node " + err.Error())
		rest.Error(w, "AutoCluster error"+err.Error(), http.StatusBadRequest)
		return
	}

	//	provision the master
	logit.Info.Println("dockermaster profile is " + dockermaster.Profile)

	_, err2 = provisionImpl(dbConn, &dockermaster, false)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-provision master " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	logit.Info.Println("AUTO CLUSTER PROFILE master container created")
	var node types.Container
	//update node with cluster iD
	node, err2 = admindb.GetContainerByName(dbConn, dockermaster.ContainerName)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-get node by name " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	node.ClusterID = clusterID
	node.Role = "master"
	err2 = admindb.UpdateContainer(dbConn, node)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-update standby node " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	var sleepSetting types.Setting
	sleepSetting, err2 = admindb.GetSetting(dbConn, "SLEEP-PROV")
	if err2 != nil {
		logit.Error.Println("SLEEP-PROV setting error " + err2.Error())
		rest.Error(w, err2.Error(), http.StatusInternalServerError)
		return
	}

	var sleepTime time.Duration
	sleepTime, err2 = time.ParseDuration(sleepSetting.Value)
	if err2 != nil {
		logit.Error.Println(err2.Error())
		rest.Error(w, err2.Error(), http.StatusInternalServerError)
		return
	}

	//create standby containers
	var count int
	count, err2 = strconv.Atoi(profile.Count)
	if err2 != nil {
		logit.Error.Println(err2.Error())
		rest.Error(w, err2.Error(), http.StatusBadRequest)
		return
	}

	dockerstandby := make([]swarmapi.DockerRunRequest, count)
	for i := 0; i < count; i++ {
		logit.Info.Println("working on standby ....")
		//	loop - provision standby
		dockerstandby[i].ProjectID = params.ProjectID
		dockerstandby[i].Image = "cpm-node"
		dockerstandby[i].ContainerName = params.Name + "-" + STANDBY + "-" + strconv.Itoa(i)
		dockerstandby[i].Standalone = "false"
		dockerstandby[i].Profile = profile.StandbyProfile

		_, err2 = provisionImpl(dbConn, &dockerstandby[i], true)
		if err2 != nil {
			logit.Error.Println("AutoCluster: error-provision master " + err2.Error())
			rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
			return
		}

		//update node with cluster iD
		node, err2 = admindb.GetContainerByName(dbConn, dockerstandby[i].ContainerName)
		if err2 != nil {
			logit.Error.Println("AutoCluster: error-get node by name " + err2.Error())
			rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
			return
		}

		node.ClusterID = clusterID
		node.Role = STANDBY
		err2 = admindb.UpdateContainer(dbConn, node)
		if err2 != nil {
			logit.Error.Println("AutoCluster: error-update standby node " + err2.Error())
			rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
			return
		}
	}
	logit.Info.Println("AUTO CLUSTER PROFILE standbys created")
	//create pgpool container
	//	provision
	dockerpgpool := swarmapi.DockerRunRequest{}
	dockerpgpool.ContainerName = params.Name + "-pgpool"
	dockerpgpool.Image = "cpm-pgpool"
	dockerpgpool.ProjectID = params.ProjectID
	dockerpgpool.Standalone = "false"
	dockerpgpool.Profile = profile.StandbyProfile

	_, err2 = provisionImpl(dbConn, &dockerpgpool, true)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-provision pgpool " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}
	logit.Info.Println("AUTO CLUSTER PROFILE pgpool created")
	//update node with cluster ID
	node, err2 = admindb.GetContainerByName(dbConn, dockerpgpool.ContainerName)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-get pgpool node by name " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	node.ClusterID = clusterID
	node.Role = "pgpool"
	err2 = admindb.UpdateContainer(dbConn, node)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-update pgpool node " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	//init the master DB
	//	provision the master
	dockermaster.Profile = profile.MasterProfile
	err2 = provisionImplInit(dbConn, &dockermaster, false)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-provisionInit master " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	//make sure every node is ready
	err2 = waitTillAllReady(dockermaster, dockerpgpool, dockerstandby, sleepTime)
	if err2 != nil {
		logit.Error.Println("cluster members not responding in time")
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	//configure cluster
	//	ConfigureCluster
	logit.Info.Println("AUTO CLUSTER PROFILE configure cluster ")
	err2 = configureCluster(profile.MasterProfile, dbConn, dbcluster, true)
	if err2 != nil {
		logit.Error.Println("AutoCluster: error-configure cluster " + err2.Error())
		rest.Error(w, "AutoCluster error"+err2.Error(), http.StatusBadRequest)
		return
	}

	logit.Info.Println("AUTO CLUSTER PROFILE done")
	w.WriteHeader(http.StatusOK)
	status := types.SimpleStatus{}
	status.Status = "OK"
	w.WriteJson(&status)
}
// DeleteCluster deletes an existing cluster definition
func DeleteCluster(w rest.ResponseWriter, r *rest.Request) {
	dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), 400)
		return

	}
	defer dbConn.Close()

	err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-cluster")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	ID := r.PathParam("ID")
	if ID == "" {
		logit.Error.Println("cluster ID required")
		rest.Error(w, "cluster ID required", http.StatusBadRequest)
		return
	}

	cluster, err := admindb.GetCluster(dbConn, ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
	}

	//delete docker containers
	containers, err := admindb.GetAllContainersForCluster(dbConn, cluster.ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
	}

	var infoResponse swarmapi.DockerInfoResponse

	infoResponse, err = swarmapi.DockerInfo()
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	i := 0
	servers := make([]types.Server, len(infoResponse.Output))
	for i = range infoResponse.Output {
		servers[i].ID = infoResponse.Output[i]
		servers[i].Name = infoResponse.Output[i]
		servers[i].IPAddress = infoResponse.Output[i]
		i++
	}

	var pgdatapath types.Setting
	pgdatapath, err = admindb.GetSetting(dbConn, "PG-DATA-PATH")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	i = 0
	//server := types.Server{}
	for i = range containers {

		//logit.Info.Println("DeleteCluster: got server IP " + server.IPAddress)

		//it is possible that someone can remove a container
		//outside of us, so we let it pass that we can't remove
		//it
		//err = removeContainer(server.IPAddress, containers[i].Name)
		dremreq := &swarmapi.DockerRemoveRequest{}
		dremreq.ContainerName = containers[i].Name
		//logit.Info.Println("will attempt to delete container " + dremreq.ContainerName)
		_, err = swarmapi.DockerRemove(dremreq)
		if err != nil {
			logit.Error.Println("error when trying to remove container" + err.Error())
		}

		//send all the servers a deletevolume command
		ddreq := &cpmserverapi.DiskDeleteRequest{}
		ddreq.Path = pgdatapath.Value + "/" + containers[i].Name
		for _, each := range servers {
			_, err = cpmserverapi.DiskDeleteClient(each.Name, ddreq)
			if err != nil {
				logit.Error.Println("error when trying to remove disk volume" + err.Error())
			}
		}

		i++
	}

	//delete the container entries
	//delete the cluster entry
	admindb.DeleteCluster(dbConn, ID)

	for i = range containers {

		err = admindb.DeleteContainer(dbConn, containers[i].ID)
		if err != nil {
			logit.Error.Println(err.Error())
			rest.Error(w, err.Error(), http.StatusBadRequest)
			return
		}
	}

	status := types.SimpleStatus{}
	status.Status = "OK"
	w.WriteHeader(http.StatusOK)
	w.WriteJson(&status)
}
func configureCluster(profile string, dbConn *sql.DB, cluster types.Cluster, autocluster bool) error {
	logit.Info.Println("configureCluster:GetCluster")

	//get master node for this cluster
	master, err := admindb.GetContainerMaster(dbConn, cluster.ID)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	var pgport types.Setting
	pgport, err = admindb.GetSetting(dbConn, "PG-PORT")
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}
	var sleepSetting types.Setting
	sleepSetting, err = admindb.GetSetting(dbConn, "SLEEP-PROV")
	if err != nil {
		logit.Error.Println("configureCluster:" + err.Error())
		return err
	}

	//logit.Info.Println("configureCluster:GetContainerMaster")

	//configure master postgresql.conf file
	var data string
	info := new(template.PostgresqlParameters)
	info.PG_PORT = pgport.Value
	err = template.GetTuningParms(dbConn, profile, info)
	if err != nil {
		logit.Error.Println("configureCluster:" + err.Error())
		return err
	}

	if cluster.ClusterType == "synchronous" {
		info.CLUSTER_TYPE = "*"
		data, err = template.Postgresql("master", info)
	} else {
		//TODO verify these next 2 lines look erroneous
		info.CLUSTER_TYPE = "*"
		data, err = template.Postgresql("master", info)

		//info.CLUSTER_TYPE = ""
		//data, err = template.Postgresql("master", info)
	}
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	logit.Info.Println("configureCluster:master postgresql.conf generated")

	//write master postgresql.conf file remotely
	_, err = cpmcontainerapi.RemoteWritefileClient("/pgdata/postgresql.conf", data, master.Name)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	logit.Info.Println("configureCluster:master postgresql.conf copied to remote")

	//get domain name
	var domainname types.Setting
	domainname, err = admindb.GetSetting(dbConn, "DOMAIN-NAME")
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	//configure master pg_hba.conf file
	rules := make([]template.Rule, 0)
	data, err = template.Hba(dbConn, "master", master.Name, pgport.Value, cluster.ID, domainname.Value, rules)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	logit.Info.Println("configureCluster:master pg_hba.conf generated")

	//write master pg_hba.conf file remotely
	_, err = cpmcontainerapi.RemoteWritefileClient("/pgdata/pg_hba.conf", data, master.Name)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	logit.Info.Println("configureCluster:master pg_hba.conf copied remotely")

	//restart postgres after the config file changes
	var stopResp cpmcontainerapi.StopPGResponse
	stopResp, err = cpmcontainerapi.StopPGClient(master.Name)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}
	logit.Info.Println("configureCluster: master stoppg output was" + stopResp.Output)

	var startResp cpmcontainerapi.StartPGResponse
	startResp, err = cpmcontainerapi.StartPGClient(master.Name)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}
	logit.Info.Println("configureCluster:master startpg output was" + startResp.Output)

	//sleep loop until the master's PG can respond
	var sleepTime time.Duration
	sleepTime, err = time.ParseDuration(sleepSetting.Value)
	var found = false
	var currentStatus string
	var masterhost = master.Name
	for i := 0; i < 20; i++ {
		//currentStatus, err = GetPGStatus2(dbConn, master.Name, masterhost)
		currentStatus, err = util.FastPing(pgport.Value, master.Name)
		if currentStatus == "RUNNING" {
			logit.Info.Println("master is running...continuing")
			found = true
			break
		} else {
			logit.Info.Println("sleeping waiting on master..")
			time.Sleep(sleepTime)
		}
	}
	if !found {
		logit.Info.Println("configureCluster: timed out waiting on master pg to start")
		return errors.New("timeout waiting for master pg to respond")
	}

	standbynodes, err2 := admindb.GetAllStandbyContainers(dbConn, cluster.ID)
	if err2 != nil {
		logit.Error.Println(err.Error())
		return err
	}
	//configure all standby nodes
	var stopPGResp cpmcontainerapi.StopPGResponse
	i := 0
	for i = range standbynodes {
		if standbynodes[i].Role == STANDBY {

			//stop standby
			if !autocluster {
				stopPGResp, err = cpmcontainerapi.StopPGClient(standbynodes[i].Name)
				if err != nil {
					logit.Error.Println(err.Error())
					logit.Error.Println("configureCluster:stop output was" + stopPGResp.Output)
					return err
				}
				//logit.Info.Println("configureCluster:stop output was" + stopPGResp.Output)
			}

			//
			var username = "******"
			var password = "******"

			//create base backup from master
			var backupresp cpmcontainerapi.BasebackupResponse
			backupresp, err = cpmcontainerapi.BasebackupClient(masterhost+"."+domainname.Value, standbynodes[i].Name, username, password)
			if err != nil {
				logit.Error.Println(err.Error())
				logit.Error.Println("configureCluster:basebackup output was" + backupresp.Output)
				return err
			}
			//logit.Info.Println("configureCluster:basebackup output was" + backupresp.Output)

			data, err = template.Recovery(masterhost, pgport.Value, "postgres")
			if err != nil {
				logit.Error.Println(err.Error())
				return err
			}
			logit.Info.Println("configureCluster:standby recovery.conf generated")

			//write standby recovery.conf file remotely
			_, err = cpmcontainerapi.RemoteWritefileClient("/pgdata/recovery.conf", data, standbynodes[i].Name)
			if err != nil {
				logit.Error.Println(err.Error())
				return err
			}
			logit.Info.Println("configureCluster:standby recovery.conf copied remotely")

			info := new(template.PostgresqlParameters)
			info.PG_PORT = pgport.Value
			info.CLUSTER_TYPE = ""
			err = template.GetTuningParms(dbConn, profile, info)
			if err != nil {
				logit.Error.Println(err.Error())
				return err
			}

			data, err = template.Postgresql(STANDBY, info)
			if err != nil {
				logit.Error.Println(err.Error())
				return err
			}

			//write standby postgresql.conf file remotely
			_, err = cpmcontainerapi.RemoteWritefileClient("/pgdata/postgresql.conf", data, standbynodes[i].Name)
			if err != nil {
				logit.Error.Println(err.Error())
				return err
			}
			logit.Info.Println("configureCluster:standby postgresql.conf copied remotely")

			//configure standby pg_hba.conf file
			data, err = template.Hba(dbConn, STANDBY, standbynodes[i].Name, pgport.Value, cluster.ID, domainname.Value, rules)
			if err != nil {
				logit.Error.Println(err.Error())
				return err
			}

			logit.Info.Println("configureCluster:standby pg_hba.conf generated")

			//write standby pg_hba.conf file remotely
			_, err = cpmcontainerapi.RemoteWritefileClient("/pgdata/pg_hba.conf", data, standbynodes[i].Name)
			if err != nil {
				logit.Error.Println(err.Error())
				return err
			}
			logit.Info.Println("configureCluster:standby pg_hba.conf copied remotely")

			//start standby

			var stResp cpmcontainerapi.StartPGOnStandbyResponse
			stResp, err = cpmcontainerapi.StartPGOnStandbyClient(standbynodes[i].Name)
			if err != nil {
				logit.Error.Println(err.Error())
				return err
			}
			logit.Info.Println("configureCluster:standby startpg output was" + stResp.Output)
		}
		i++
	}

	logit.Info.Println("configureCluster: sleeping 5 seconds before configuring pgpool...")
	clustersleepTime, _ := time.ParseDuration("5s")
	time.Sleep(clustersleepTime)

	pgpoolNode, err4 := admindb.GetContainerPgpool(dbConn, cluster.ID)
	//logit.Info.Println("configureCluster: lookup pgpool node")
	if err4 != nil {
		logit.Error.Println(err.Error())
		return err
	}
	//logit.Info.Println("configureCluster:" + pgpoolNode.Name)

	//configure the pgpool includes all standby nodes AND the master node
	poolnames := make([]string, len(standbynodes)+1)

	i = 0
	for i = range standbynodes {
		poolnames[i] = standbynodes[i].Name + "." + domainname.Value
		i++
	}
	poolnames[i] = master.Name + "." + domainname.Value

	//generate pgpool.conf HOST_LIST
	data, err = template.Poolconf(poolnames)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	logit.Info.Println("configureCluster:pgpool pgpool.conf generated")

	//write pgpool.conf to remote pool node
	_, err = cpmcontainerapi.RemoteWritefileClient(util.GetBase()+"/bin/"+"pgpool.conf", data, pgpoolNode.Name)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}
	logit.Info.Println("configureCluster:pgpool pgpool.conf copied remotely")

	//generate pool_passwd
	data, err = template.Poolpasswd()
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	logit.Info.Println("configureCluster:pgpool pool_passwd generated")

	//write pgpool.conf to remote pool node
	_, err = cpmcontainerapi.RemoteWritefileClient(util.GetBase()+"/bin/"+"pool_passwd", data, pgpoolNode.Name)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}
	logit.Info.Println("configureCluster:pgpool pool_passwd copied remotely")

	//g enerate pool_hba.conf
	cars := make([]template.Rule, 0)
	data, err = template.Poolhba(cars)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	logit.Info.Println("configureCluster:pgpool pool_hba generated")

	//write pgpool.conf to remote pool node
	_, err = cpmcontainerapi.RemoteWritefileClient(util.GetBase()+"/bin/"+"pool_hba.conf", data, pgpoolNode.Name)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}
	logit.Info.Println("configureCluster:pgpool pool_hba copied remotely")

	//start pgpool
	var startPoolResp cpmcontainerapi.StartPgpoolResponse
	startPoolResp, err = cpmcontainerapi.StartPgpoolClient(pgpoolNode.Name)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}
	logit.Info.Println("configureCluster: pgpool startpgpool output was" + startPoolResp.Output)

	//finally, update the cluster to show that it is
	//initialized!
	cluster.Status = "initialized"
	err = admindb.UpdateCluster(dbConn, cluster)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	return nil

}
// ProvisionRestoreJob creates a docker container to orchestrate a pg_backrest restore job
func ProvisionBackrestRestoreJob(dbConn *sql.DB, args *TaskRequest) error {

	logit.Info.Println("task.ProvisionBackrestRestoreJob called")
	logit.Info.Println("with scheduleid=" + args.ScheduleID)
	logit.Info.Println("with containername=" + args.ContainerName)
	logit.Info.Println("with profilename=" + args.ProfileName)

	params := &swarmapi.DockerRunRequest{}
	params.Image = "cpm-backrest-restore-job"
	restorecontainername := args.ContainerName + "-backrest-restore-job"
	params.ContainerName = restorecontainername
	params.Standalone = "false"
	params.Profile = "SM"

	schedule, err := GetSchedule(dbConn, args.ScheduleID)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	//params.PGDataPath = server.PGDataPath + "/" + restorecontainername + "/" + getFormattedDate()

	//get the docker profile settings
	var setting types.Setting
	setting, err = admindb.GetSetting(dbConn, "S-DOCKER-PROFILE-CPU")
	params.CPU = setting.Value
	setting, err = admindb.GetSetting(dbConn, "S-DOCKER-PROFILE-MEM")
	params.MEM = setting.Value

	params.EnvVars = make(map[string]string)

	params.EnvVars["RestoreRemotePath"] = schedule.RestoreRemotePath
	params.EnvVars["RestoreRemoteHost"] = schedule.RestoreRemoteHost
	params.EnvVars["RestoreRemoteUser"] = schedule.RestoreRemoteUser
	params.EnvVars["RestoreDbUser"] = schedule.RestoreDbUser
	params.EnvVars["RestoreDbPass"] = schedule.RestoreDbPass
	params.EnvVars["RestoreSet"] = schedule.RestoreSet
	params.EnvVars["RestoreContainerName"] = args.ContainerName
	params.EnvVars["RestoreScheduleID"] = args.ScheduleID
	params.EnvVars["RestoreProfileName"] = args.ProfileName

	setting, err = admindb.GetSetting(dbConn, "PG-PORT")
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}
	params.EnvVars["RestorePGPort"] = setting.Value

	//run the container
	//params.CommandPath = "docker-run-restore.sh"
	var response swarmapi.DockerRunResponse
	//var url = "http://" + server.IPAddress + ":10001"
	response, err = swarmapi.DockerRun(params)
	if err != nil {
		logit.Error.Println(response.ID)
		return err
	}
	logit.Info.Println("docker-run-restore.sh output=" + response.ID)

	return nil
}
// Hba create a pg_hba.conf file from a template and passed values, return the new file contents
func Hba(dbConn *sql.DB, mode string, hostname string, port string, clusterid string, domainname string, cars []Rule) (string, error) {

	var hbaInfo HBAParameters

	//hbaInfo.PG_HOST_IP = hostname + "." + domainname
	//hbaInfo.BACKUP_HOST = hostname + "-backup." + domainname
	//hbaInfo.MONITOR_HOST = "cpm-mon." + domainname
	//hbaInfo.ADMIN_HOST = "cpm-admin." + domainname
	hbaInfo.PG_HOST_IP = hostname
	hbaInfo.BACKUP_HOST = hostname + "-backup"
	hbaInfo.MONITOR_HOST = "cpm-mon"
	hbaInfo.ADMIN_HOST = "cpm-admin"
	hbaInfo.RULES_LIST = cars

	bridges, err := admindb.GetSetting(dbConn, "DOCKER-BRIDGES")
	if err != nil {
		logit.Error.Println("Hba:" + err.Error())
		return "", err
	}

	var infoResponse swarmapi.DockerInfoResponse

	infoResponse, err = swarmapi.DockerInfo()
	if err != nil {
		logit.Error.Println("Hba:" + err.Error())
		return "", err
	}

	servers := make([]types.Server, len(infoResponse.Output))
	i := 0
	for i = range infoResponse.Output {
		parts := strings.Split(infoResponse.Output[i], ":")
		servers[i].IPAddress = parts[0]
		i++
	}

	i = 0
	var allservers = ""
	//TODO make this configurable as a setting value
	var allbridges = bridges.Value
	for i = range servers {
		logit.Info.Println("Hba:" + servers[i].IPAddress)
		if allservers == "" {
			allservers = servers[i].IPAddress
			//allbridges = servers[i].DockerBridgeIP
		} else {
			allservers = allservers + ":" + servers[i].IPAddress
			//allbridges = allbridges + ":" + servers[i].DockerBridgeIP
		}
	}
	logit.Info.Println("Hba:processing serverlist=" + allservers)
	hbaInfo.SERVER_IP_LIST = strings.Split(allservers, ":")
	hbaInfo.BRIDGE_IP_LIST = strings.Split(allbridges, ":")

	var path string
	switch mode {
	case "unassigned":
		path = util.GetBase() + "/conf/standalone/pg_hba.conf.template"
	case "standalone", "master", "standby":
		path = util.GetBase() + "/conf/" + mode + "/pg_hba.conf.template"
	default:
		return "", errors.New("invalid mode in processHba of " + mode)
	}

	if mode == "standby" || mode == "master" {
		_, pgpoolNode, standbyList, err := getMasterValues(dbConn, clusterid, domainname)
		if err != nil {
			return "", err
		}

		hbaInfo.PGPOOL_HOST = pgpoolNode.Name + "." + domainname
		hbaInfo.STANDBY_LIST = standbyList
	}

	contents, err := ioutil.ReadFile(path)
	if err != nil {
		return "", err
	}

	tmpl, err := template.New("hba").Parse(string(contents))
	if err != nil {
		return "", err
	}
	buff := bytes.NewBufferString("")

	logInfo(hbaInfo)

	err = tmpl.Execute(buff, hbaInfo)
	logit.Info.Println("Hba:" + buff.String())

	return buff.String(), nil
}
func provisionImplInit(dbConn *sql.DB, params *swarmapi.DockerRunRequest, standby bool) error {
	//go get the domain name from the settings
	var domainname types.Setting
	var pgport types.Setting
	var sleepSetting types.Setting
	var err error

	domainname, err = admindb.GetSetting(dbConn, "DOMAIN-NAME")
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}
	pgport, err = admindb.GetSetting(dbConn, "PG-PORT")
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	sleepSetting, err = admindb.GetSetting(dbConn, "SLEEP-PROV")
	if err != nil {
		logit.Error.Println("Provision:SLEEP-PROV setting error " + err.Error())
		return err
	}
	var sleepTime time.Duration
	sleepTime, err = time.ParseDuration(sleepSetting.Value)

	fqdn := params.ContainerName + "." + domainname.Value

	//we are depending on a DNS entry being created shortly after
	//creating the node in Docker
	//you might need to wait here until you can reach the new node's agent
	logit.Info.Println("PROFILE waiting till DNS ready")
	err = waitTillReady(fqdn, sleepTime)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}
	logit.Info.Println("checkpt 1")

	if standby {
		logit.Info.Println("standby node being created, will not initdb")
	} else {
		if params.RestoreJob != "" {
			logit.Info.Println("RestoreJob found, not doing initdb...")
		} else {
			//initdb on the new node
			logit.Info.Println("PROFILE running initdb on the node")
			var resp cpmcontainerapi.InitdbResponse

			logit.Info.Println("checkpt 2")
			resp, err = cpmcontainerapi.InitdbClient(fqdn)
			if err != nil {
				logit.Error.Println(err.Error())
				return err
			}
			logit.Info.Println("checkpt 3")
			logit.Info.Println("initdb output was" + resp.Output)
			//create postgresql.conf
			var data string
			var mode = "standalone"

			info := new(template.PostgresqlParameters)
			info.PG_PORT = pgport.Value
			info.CLUSTER_TYPE = ""
			err = template.GetTuningParms(dbConn, params.Profile, info)
			if err != nil {
				logit.Error.Println(err.Error())
				return err
			}
			logit.Info.Println("after GetTuning call with MWM = " + info.TUNE_MWM)
			data, err = template.Postgresql(mode, info)
			if err != nil {
				logit.Error.Println(err.Error())
				return err
			}
			logit.Info.Println("provision chkpt 4")

			//place postgresql.conf on new node
			logit.Info.Println("fqdn is " + fqdn)
			logit.Info.Println("postgresql.conf file is " + data)
			_, err = cpmcontainerapi.RemoteWritefileClient("/pgdata/postgresql.conf", data, fqdn)
			if err != nil {
				logit.Error.Println(err.Error())
				return err
			}
			logit.Info.Println("provision chkpt 5")
			//create pg_hba.conf
			rules := make([]template.Rule, 0)
			data, err = template.Hba(dbConn, mode, params.ContainerName, pgport.Value, "", domainname.Value, rules)
			if err != nil {
				logit.Error.Println(err.Error())
				return err
			}
			logit.Info.Println("provision chkpt 6")
			//place pg_hba.conf on new node
			_, err = cpmcontainerapi.RemoteWritefileClient("/pgdata/pg_hba.conf", data, fqdn)
			if err != nil {
				logit.Error.Println(err.Error())
				return err
			}
			logit.Info.Println("PROFILE templates all built and copied to node")
		}

		//start pg on new node
		var startResp cpmcontainerapi.StartPGResponse
		startResp, err = cpmcontainerapi.StartPGClient(fqdn)
		if err != nil {
			logit.Error.Println(err.Error())
			return err
		}
		logit.Info.Println("startpg output was" + startResp.Output)

		//seed database with initial objects
		var seedResp cpmcontainerapi.SeedResponse
		seedResp, err = cpmcontainerapi.SeedClient(fqdn)
		if err != nil {
			logit.Error.Println(err.Error())
			return err
		}
		logit.Info.Println("seed output was" + seedResp.Output)
	}
	logit.Info.Println("PROFILE node provisioning completed")

	return nil
}
func provisionImpl(dbConn *sql.DB, params *swarmapi.DockerRunRequest, standby bool) (string, error) {
	logit.Info.Println("PROFILE: provisionImpl starts 1")

	var errorStr string
	//make sure the container name is not already taken
	_, err := admindb.GetContainerByName(dbConn, params.ContainerName)
	if err != nil {
		if err != sql.ErrNoRows {
			return "", err
		}
	} else {
		errorStr = "container name " + params.ContainerName + " already used can't provision"
		logit.Error.Println(errorStr)
		return "", errors.New(errorStr)
	}

	//get the pg data path
	var pgdatapath types.Setting
	pgdatapath, err = admindb.GetSetting(dbConn, "PG-DATA-PATH")
	if err != nil {
		logit.Error.Println(err.Error())
		return "", err
	}

	var infoResponse swarmapi.DockerInfoResponse
	infoResponse, err = swarmapi.DockerInfo()
	servers := make([]types.Server, len(infoResponse.Output))
	i := 0
	for i = range infoResponse.Output {
		servers[i].ID = infoResponse.Output[i]
		servers[i].Name = infoResponse.Output[i]
		servers[i].IPAddress = infoResponse.Output[i]
		i++
	}

	//for database nodes, on the target server, we need to allocate
	//a disk volume on all CPM servers for the /pgdata container volume to work with
	//this causes a volume to be created with the directory
	//named the same as the container name

	params.PGDataPath = pgdatapath.Value + "/" + params.ContainerName

	logit.Info.Println("PROFILE provisionImpl 2 about to provision volume " + params.PGDataPath)
	if params.Image != "cpm-pgpool" {
		preq := &cpmserverapi.DiskProvisionRequest{}
		preq.Path = params.PGDataPath
		var response cpmserverapi.DiskProvisionResponse
		for _, each := range servers {
			logit.Info.Println("Provision: provisionvolume on server " + each.Name)
			response, err = cpmserverapi.DiskProvisionClient(each.Name, preq)
			if err != nil {
				logit.Info.Println("Provision: provisionvolume error" + err.Error())
				logit.Error.Println(err.Error())
				return "", err
			}
			logit.Info.Println("Provision: provisionvolume call response=" + response.Status)
		}
	}
	logit.Info.Println("PROFILE provisionImpl 3 provision volume completed")

	//run docker run to create the container

	params.CPU, params.MEM, err = getDockerResourceSettings(dbConn, params.Profile)
	if err != nil {
		logit.Error.Println(err.Error())
		return "", err
	}

	//inspect and remove any existing container
	logit.Info.Println("PROFILE provisionImpl inspect 4")
	inspectReq := &swarmapi.DockerInspectRequest{}
	inspectReq.ContainerName = params.ContainerName
	var inspectResponse swarmapi.DockerInspectResponse
	inspectResponse, err = swarmapi.DockerInspect(inspectReq)
	if err != nil {
		logit.Error.Println(err.Error())
		return "", err
	}
	if inspectResponse.RunningState != "not-found" {
		logit.Info.Println("PROFILE provisionImpl remove existing container 4a")
		rreq := &swarmapi.DockerRemoveRequest{}
		rreq.ContainerName = params.ContainerName
		_, err = swarmapi.DockerRemove(rreq)
		if err != nil {
			logit.Error.Println(err.Error())
			return "", err
		}
	}

	//pass any restore env vars to the new container
	if params.RestoreJob != "" {
		if params.EnvVars == nil {
			//logit.Info.Println("making envvars map")
			params.EnvVars = make(map[string]string)
		}
		params.EnvVars["RestoreJob"] = params.RestoreJob
		params.EnvVars["RestoreRemotePath"] = params.RestoreRemotePath
		params.EnvVars["RestoreRemoteHost"] = params.RestoreRemoteHost
		params.EnvVars["RestoreRemoteUser"] = params.RestoreRemoteUser
		params.EnvVars["RestoreDbUser"] = params.RestoreDbUser
		params.EnvVars["RestoreDbPass"] = params.RestoreDbPass
		params.EnvVars["RestoreSet"] = params.RestoreSet
	}

	//
	runReq := swarmapi.DockerRunRequest{}
	runReq.PGDataPath = params.PGDataPath
	runReq.Profile = params.Profile
	runReq.Image = params.Image
	runReq.ContainerName = params.ContainerName
	runReq.EnvVars = params.EnvVars
	//logit.Info.Println("CPU=" + params.CPU)
	//logit.Info.Println("MEM=" + params.MEM)
	runReq.CPU = "0"
	runReq.MEM = "0"
	var runResp swarmapi.DockerRunResponse
	runResp, err = swarmapi.DockerRun(&runReq)
	if err != nil {
		logit.Error.Println(err.Error())
		return "", err
	}
	logit.Info.Println("PROFILE provisionImpl created container 5 " + runResp.ID)

	dbnode := types.Container{}
	dbnode.ID = ""
	dbnode.Name = params.ContainerName
	dbnode.Image = params.Image
	dbnode.ClusterID = "-1"
	dbnode.ProjectID = params.ProjectID

	if params.Standalone == "true" {
		dbnode.Role = "standalone"
	} else {
		dbnode.Role = "unassigned"
	}

	var strid int
	strid, err = admindb.InsertContainer(dbConn, dbnode)
	newid := strconv.Itoa(strid)
	if err != nil {
		logit.Error.Println(err.Error())
		return "", err
	}
	dbnode.ID = newid

	if params.Image != "cpm-node-proxy" {
		//register default db users on the new node
		err = createDBUsers(dbConn, dbnode)
	}

	return newid, err

}
Exemplo n.º 14
0
func templateChange(dbConn *sql.DB, containerName string, cars []ContainerAccessRule, containerRole string) error {
	var err error

	logit.Info.Println("templateChange called")
	//create pg_hba.conf
	var mode = containerRole

	domainname, err := admindb.GetSetting(dbConn, "DOMAIN-NAME")
	if err != nil {
		logit.Error.Println("templateChange:DOMAIN-NAME error " + err.Error())
		return err
	}

	rules := make([]template.Rule, 0)
	var ar Rule
	for i := range cars {
		logit.Info.Println("templateChange cars found")
		if cars[i].Selected == "true" {
			logit.Info.Println("templateChange cars found to be true")
			rule := template.Rule{}
			ar, err = GetAccessRule(dbConn, cars[i].AccessRuleID)
			if err != nil {
				logit.Error.Println("templateChange:get access rule error " + err.Error())
				return err
			}
			rule.Type = ar.Type
			rule.Database = ar.Database
			rule.User = ar.User
			rule.Address = ar.Address
			rule.Method = ar.Method
			rules = append(rules, rule)
		}
	}

	logit.Info.Printf("templateChange rules going to template %d\n", len(rules))
	var data string

	fqdn := containerName + "." + domainname.Value

	//place pg_hba.conf on node
	if containerRole == "pgpool" {
		//generate pool_hba.conf
		data, err = template.Poolhba(rules)
		if err != nil {
			logit.Error.Println(err.Error())
			return err
		}

		logit.Info.Println("configureCluster:pgpool pool_hba generated")

		//write pgpool.conf to remote pool node
		var dest = util.GetBase() + "/bin/pool_hba.conf"
		_, err = cpmcontainerapi.RemoteWritefileClient(dest, data, fqdn)
		if err != nil {
			logit.Error.Println(err.Error())
			return err
		}
		logit.Info.Println("configureCluster:pgpool pool_hba copied remotely")

	} else {
		data, err = template.Hba(dbConn, mode, containerName, "", "", domainname.Value, rules)

		if err != nil {
			logit.Error.Println("templateChange:" + err.Error())
			return err
		}

		_, err = cpmcontainerapi.RemoteWritefileClient("/pgdata/pg_hba.conf", data, fqdn)
		if err != nil {
			logit.Error.Println("templateChange:" + err.Error())
			return err
		}
	}

	return err
}
Exemplo n.º 15
0
func performConfigUpdate(dbConn *sql.DB, ContainerID string) error {
	logit.Info.Println("performConfigUpdate....")

	cars, err := GetAllContainerAccessRule(dbConn, ContainerID)
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}

	container, err := admindb.GetContainer(dbConn, ContainerID)
	if err != nil {
		logit.Error.Println("GetNode: " + err.Error())
		return err
	}

	var currentStatus string
	//currentStatus, err = GetPGStatus2(dbConn, container.Name, container.Name)
	var pgport types.Setting
	pgport, err = admindb.GetSetting(dbConn, "PG-PORT")
	if err != nil {
		logit.Error.Println(err.Error())
		return err
	}
	currentStatus, err = util.FastPing(pgport.Value, container.Name)
	if err != nil {
		logit.Error.Println("GetNode:" + err.Error())
		return err
	}

	if currentStatus != "RUNNING" {
		logit.Info.Println("performConfigUpdate....starting postgres")
		if container.Role == "pgpool" {
			var spgresp cpmcontainerapi.StartPgpoolResponse
			spgresp, err = cpmcontainerapi.StartPgpoolClient(container.Name)
			logit.Info.Println("AdminStartpg:" + spgresp.Output)
		} else {
			var srep cpmcontainerapi.StartPGResponse
			srep, err = cpmcontainerapi.StartPGClient(container.Name)
			logit.Info.Println("AdminStartpg:" + srep.Output)
		}

		if err != nil {
			logit.Error.Println("AdminStartpg:" + err.Error())
			return err
		}

	}

	//make template changes here
	logit.Info.Println("performConfigUpdate....making template changes")
	templateChange(dbConn, container.Name, cars, container.Role)

	//restart postgres

	if container.Role == "pgpool" {
		logit.Info.Println("performConfigUpdate....stopping pgpool")
		var stoppoolResp cpmcontainerapi.StopPgpoolResponse
		stoppoolResp, err = cpmcontainerapi.StopPgpoolClient(container.Name)
		logit.Info.Println("AdminStoppg:" + stoppoolResp.Output)
	} else {
		logit.Info.Println("performConfigUpdate....stopping postgres")
		var stoppgResp cpmcontainerapi.StopPGResponse
		stoppgResp, err = cpmcontainerapi.StopPGClient(container.Name)
		logit.Info.Println("AdminStoppg:" + stoppgResp.Output)
	}
	if err != nil {
		logit.Error.Println("AdminStoppg:" + err.Error())
		return err
	}

	if container.Role == "pgpool" {
		logit.Info.Println("performConfigUpdate....starting pgpool")
		var spgresp cpmcontainerapi.StartPgpoolResponse
		spgresp, err = cpmcontainerapi.StartPgpoolClient(container.Name)
		logit.Info.Println("AdminStartpg:" + spgresp.Output)
	} else {
		logit.Info.Println("performConfigUpdate....starting postgres")
		var srep cpmcontainerapi.StartPGResponse
		srep, err = cpmcontainerapi.StartPGClient(container.Name)
		logit.Info.Println("AdminStartpg:" + srep.Output)
	}

	if err != nil {
		logit.Error.Println("AdminStartpg:" + err.Error())
		return err
	}

	return err

}