func main() {

	fmt.Println("at top of testswarm main")

	var err error

	//var infoResponse swarmapi.DockerInfoResponse
	_, err = swarmapi.DockerInfo()
	os.Exit(0)

	inspectReq := swarmapi.DockerInspectRequest{}
	inspectReq.ContainerName = "cpm"
	var inspectResp swarmapi.DockerInspectResponse
	inspectResp, err = swarmapi.DockerInspect(&inspectReq)
	if err != nil {
		fmt.Println(err.Error())
	}
	fmt.Println(inspectResp.IPAddress)

	runReq := swarmapi.DockerRunRequest{}
	runReq.PGDataPath = "/var/cpm/data/pgsql/swarmtest"
	runReq.ContainerType = "cpm-node"
	runReq.ContainerName = "swarmtest"
	runReq.EnvVars = make(map[string]string)
	runReq.EnvVars["one"] = "value of one"
	runReq.EnvVars["two"] = "value of two"
	runReq.CPU = "0"
	runReq.MEM = "0"
	var runResp swarmapi.DockerRunResponse
	runResp, err = swarmapi.DockerRun(&runReq)
	if err != nil {
		fmt.Println(err.Error())
	}
	fmt.Println(runResp.ID)
}
// GetAllServers return a list of servers
func GetAllServers(w rest.ResponseWriter, r *rest.Request) {
	dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), 400)
		return

	}
	defer dbConn.Close()
	err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	//use swarm to get the list of servers

	var infoResponse swarmapi.DockerInfoResponse
	infoResponse, err = swarmapi.DockerInfo()
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	for x := 0; x < len(infoResponse.Output); {
		//logit.Info.Println("got back " + infoResponse.Output[x])
		x++
	}

	servers := make([]types.Server, len(infoResponse.Output))
	i := 0
	for i = range infoResponse.Output {
		parts := strings.Split(infoResponse.Output[i], ":")
		servers[i].ID = infoResponse.Output[i]
		servers[i].Name = parts[0]
		servers[i].IPAddress = parts[0]
		servers[i].Port = parts[1]
		i++
	}

	w.WriteJson(&servers)
}
/*
 TODO refactor this to share code with DeleteCluster!!!!!
*/
func DeleteNode(w rest.ResponseWriter, r *rest.Request) {
	dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), 400)
		return

	}
	defer dbConn.Close()

	err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-container")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	ID := r.PathParam("ID")
	if ID == "" {
		logit.Error.Println("DeleteNode: error node ID required")
		rest.Error(w, "node ID required", http.StatusBadRequest)
		return
	}

	//go get the node we intend to delete
	var dbNode types.Container
	dbNode, err = admindb.GetContainer(dbConn, ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	var infoResponse swarmapi.DockerInfoResponse
	infoResponse, err = swarmapi.DockerInfo()
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	servers := make([]types.Server, len(infoResponse.Output))
	i := 0
	for i = range infoResponse.Output {
		servers[i].ID = infoResponse.Output[i]
		servers[i].Name = infoResponse.Output[i]
		servers[i].IPAddress = infoResponse.Output[i]
		i++
	}

	var pgdatapath types.Setting
	pgdatapath, err = admindb.GetSetting(dbConn, "PG-DATA-PATH")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	err = admindb.DeleteContainer(dbConn, ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	logit.Info.Println("remove 1")
	//it is possible that someone can remove a container
	//outside of us, so we let it pass that we can't remove
	//it

	request := &swarmapi.DockerRemoveRequest{}
	request.ContainerName = dbNode.Name
	_, err = swarmapi.DockerRemove(request)
	if err != nil {
		logit.Error.Println(err.Error())
	}

	logit.Info.Println("remove 2")
	//send the server a deletevolume command
	request2 := &cpmserverapi.DiskDeleteRequest{}
	request2.Path = pgdatapath.Value + "/" + dbNode.Name
	for _, each := range servers {
		_, err = cpmserverapi.DiskDeleteClient(each.Name, request2)
		if err != nil {
			logit.Error.Println(err.Error())
		}
	}
	logit.Info.Println("remove 3")

	//we should not have to delete the DNS entries because
	//of the dnsbridge, it should remove them when we remove
	//the containers via the docker api

	w.WriteHeader(http.StatusOK)
	status := types.SimpleStatus{}
	status.Status = "OK"
	w.WriteJson(&status)
}
// DeleteCluster deletes an existing cluster definition
func DeleteCluster(w rest.ResponseWriter, r *rest.Request) {
	dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), 400)
		return

	}
	defer dbConn.Close()

	err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-cluster")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	ID := r.PathParam("ID")
	if ID == "" {
		logit.Error.Println("cluster ID required")
		rest.Error(w, "cluster ID required", http.StatusBadRequest)
		return
	}

	cluster, err := admindb.GetCluster(dbConn, ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
	}

	//delete docker containers
	containers, err := admindb.GetAllContainersForCluster(dbConn, cluster.ID)
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
	}

	var infoResponse swarmapi.DockerInfoResponse

	infoResponse, err = swarmapi.DockerInfo()
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	i := 0
	servers := make([]types.Server, len(infoResponse.Output))
	for i = range infoResponse.Output {
		servers[i].ID = infoResponse.Output[i]
		servers[i].Name = infoResponse.Output[i]
		servers[i].IPAddress = infoResponse.Output[i]
		i++
	}

	var pgdatapath types.Setting
	pgdatapath, err = admindb.GetSetting(dbConn, "PG-DATA-PATH")
	if err != nil {
		logit.Error.Println(err.Error())
		rest.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	i = 0
	//server := types.Server{}
	for i = range containers {

		//logit.Info.Println("DeleteCluster: got server IP " + server.IPAddress)

		//it is possible that someone can remove a container
		//outside of us, so we let it pass that we can't remove
		//it
		//err = removeContainer(server.IPAddress, containers[i].Name)
		dremreq := &swarmapi.DockerRemoveRequest{}
		dremreq.ContainerName = containers[i].Name
		//logit.Info.Println("will attempt to delete container " + dremreq.ContainerName)
		_, err = swarmapi.DockerRemove(dremreq)
		if err != nil {
			logit.Error.Println("error when trying to remove container" + err.Error())
		}

		//send all the servers a deletevolume command
		ddreq := &cpmserverapi.DiskDeleteRequest{}
		ddreq.Path = pgdatapath.Value + "/" + containers[i].Name
		for _, each := range servers {
			_, err = cpmserverapi.DiskDeleteClient(each.Name, ddreq)
			if err != nil {
				logit.Error.Println("error when trying to remove disk volume" + err.Error())
			}
		}

		i++
	}

	//delete the container entries
	//delete the cluster entry
	admindb.DeleteCluster(dbConn, ID)

	for i = range containers {

		err = admindb.DeleteContainer(dbConn, containers[i].ID)
		if err != nil {
			logit.Error.Println(err.Error())
			rest.Error(w, err.Error(), http.StatusBadRequest)
			return
		}
	}

	status := types.SimpleStatus{}
	status.Status = "OK"
	w.WriteHeader(http.StatusOK)
	w.WriteJson(&status)
}
// Hba create a pg_hba.conf file from a template and passed values, return the new file contents
func Hba(dbConn *sql.DB, mode string, hostname string, port string, clusterid string, domainname string, cars []Rule) (string, error) {

	var hbaInfo HBAParameters

	//hbaInfo.PG_HOST_IP = hostname + "." + domainname
	//hbaInfo.BACKUP_HOST = hostname + "-backup." + domainname
	//hbaInfo.MONITOR_HOST = "cpm-mon." + domainname
	//hbaInfo.ADMIN_HOST = "cpm-admin." + domainname
	hbaInfo.PG_HOST_IP = hostname
	hbaInfo.BACKUP_HOST = hostname + "-backup"
	hbaInfo.MONITOR_HOST = "cpm-mon"
	hbaInfo.ADMIN_HOST = "cpm-admin"
	hbaInfo.RULES_LIST = cars

	bridges, err := admindb.GetSetting(dbConn, "DOCKER-BRIDGES")
	if err != nil {
		logit.Error.Println("Hba:" + err.Error())
		return "", err
	}

	var infoResponse swarmapi.DockerInfoResponse

	infoResponse, err = swarmapi.DockerInfo()
	if err != nil {
		logit.Error.Println("Hba:" + err.Error())
		return "", err
	}

	servers := make([]types.Server, len(infoResponse.Output))
	i := 0
	for i = range infoResponse.Output {
		parts := strings.Split(infoResponse.Output[i], ":")
		servers[i].IPAddress = parts[0]
		i++
	}

	i = 0
	var allservers = ""
	//TODO make this configurable as a setting value
	var allbridges = bridges.Value
	for i = range servers {
		logit.Info.Println("Hba:" + servers[i].IPAddress)
		if allservers == "" {
			allservers = servers[i].IPAddress
			//allbridges = servers[i].DockerBridgeIP
		} else {
			allservers = allservers + ":" + servers[i].IPAddress
			//allbridges = allbridges + ":" + servers[i].DockerBridgeIP
		}
	}
	logit.Info.Println("Hba:processing serverlist=" + allservers)
	hbaInfo.SERVER_IP_LIST = strings.Split(allservers, ":")
	hbaInfo.BRIDGE_IP_LIST = strings.Split(allbridges, ":")

	var path string
	switch mode {
	case "unassigned":
		path = util.GetBase() + "/conf/standalone/pg_hba.conf.template"
	case "standalone", "master", "standby":
		path = util.GetBase() + "/conf/" + mode + "/pg_hba.conf.template"
	default:
		return "", errors.New("invalid mode in processHba of " + mode)
	}

	if mode == "standby" || mode == "master" {
		_, pgpoolNode, standbyList, err := getMasterValues(dbConn, clusterid, domainname)
		if err != nil {
			return "", err
		}

		hbaInfo.PGPOOL_HOST = pgpoolNode.Name + "." + domainname
		hbaInfo.STANDBY_LIST = standbyList
	}

	contents, err := ioutil.ReadFile(path)
	if err != nil {
		return "", err
	}

	tmpl, err := template.New("hba").Parse(string(contents))
	if err != nil {
		return "", err
	}
	buff := bytes.NewBufferString("")

	logInfo(hbaInfo)

	err = tmpl.Execute(buff, hbaInfo)
	logit.Info.Println("Hba:" + buff.String())

	return buff.String(), nil
}
func provisionImpl(dbConn *sql.DB, params *swarmapi.DockerRunRequest, standby bool) (string, error) {
	logit.Info.Println("PROFILE: provisionImpl starts 1")

	var errorStr string
	//make sure the container name is not already taken
	_, err := admindb.GetContainerByName(dbConn, params.ContainerName)
	if err != nil {
		if err != sql.ErrNoRows {
			return "", err
		}
	} else {
		errorStr = "container name " + params.ContainerName + " already used can't provision"
		logit.Error.Println(errorStr)
		return "", errors.New(errorStr)
	}

	//get the pg data path
	var pgdatapath types.Setting
	pgdatapath, err = admindb.GetSetting(dbConn, "PG-DATA-PATH")
	if err != nil {
		logit.Error.Println(err.Error())
		return "", err
	}

	var infoResponse swarmapi.DockerInfoResponse
	infoResponse, err = swarmapi.DockerInfo()
	servers := make([]types.Server, len(infoResponse.Output))
	i := 0
	for i = range infoResponse.Output {
		servers[i].ID = infoResponse.Output[i]
		servers[i].Name = infoResponse.Output[i]
		servers[i].IPAddress = infoResponse.Output[i]
		i++
	}

	//for database nodes, on the target server, we need to allocate
	//a disk volume on all CPM servers for the /pgdata container volume to work with
	//this causes a volume to be created with the directory
	//named the same as the container name

	params.PGDataPath = pgdatapath.Value + "/" + params.ContainerName

	logit.Info.Println("PROFILE provisionImpl 2 about to provision volume " + params.PGDataPath)
	if params.Image != "cpm-pgpool" {
		preq := &cpmserverapi.DiskProvisionRequest{}
		preq.Path = params.PGDataPath
		var response cpmserverapi.DiskProvisionResponse
		for _, each := range servers {
			logit.Info.Println("Provision: provisionvolume on server " + each.Name)
			response, err = cpmserverapi.DiskProvisionClient(each.Name, preq)
			if err != nil {
				logit.Info.Println("Provision: provisionvolume error" + err.Error())
				logit.Error.Println(err.Error())
				return "", err
			}
			logit.Info.Println("Provision: provisionvolume call response=" + response.Status)
		}
	}
	logit.Info.Println("PROFILE provisionImpl 3 provision volume completed")

	//run docker run to create the container

	params.CPU, params.MEM, err = getDockerResourceSettings(dbConn, params.Profile)
	if err != nil {
		logit.Error.Println(err.Error())
		return "", err
	}

	//inspect and remove any existing container
	logit.Info.Println("PROFILE provisionImpl inspect 4")
	inspectReq := &swarmapi.DockerInspectRequest{}
	inspectReq.ContainerName = params.ContainerName
	var inspectResponse swarmapi.DockerInspectResponse
	inspectResponse, err = swarmapi.DockerInspect(inspectReq)
	if err != nil {
		logit.Error.Println(err.Error())
		return "", err
	}
	if inspectResponse.RunningState != "not-found" {
		logit.Info.Println("PROFILE provisionImpl remove existing container 4a")
		rreq := &swarmapi.DockerRemoveRequest{}
		rreq.ContainerName = params.ContainerName
		_, err = swarmapi.DockerRemove(rreq)
		if err != nil {
			logit.Error.Println(err.Error())
			return "", err
		}
	}

	//pass any restore env vars to the new container
	if params.RestoreJob != "" {
		if params.EnvVars == nil {
			//logit.Info.Println("making envvars map")
			params.EnvVars = make(map[string]string)
		}
		params.EnvVars["RestoreJob"] = params.RestoreJob
		params.EnvVars["RestoreRemotePath"] = params.RestoreRemotePath
		params.EnvVars["RestoreRemoteHost"] = params.RestoreRemoteHost
		params.EnvVars["RestoreRemoteUser"] = params.RestoreRemoteUser
		params.EnvVars["RestoreDbUser"] = params.RestoreDbUser
		params.EnvVars["RestoreDbPass"] = params.RestoreDbPass
		params.EnvVars["RestoreSet"] = params.RestoreSet
	}

	//
	runReq := swarmapi.DockerRunRequest{}
	runReq.PGDataPath = params.PGDataPath
	runReq.Profile = params.Profile
	runReq.Image = params.Image
	runReq.ContainerName = params.ContainerName
	runReq.EnvVars = params.EnvVars
	//logit.Info.Println("CPU=" + params.CPU)
	//logit.Info.Println("MEM=" + params.MEM)
	runReq.CPU = "0"
	runReq.MEM = "0"
	var runResp swarmapi.DockerRunResponse
	runResp, err = swarmapi.DockerRun(&runReq)
	if err != nil {
		logit.Error.Println(err.Error())
		return "", err
	}
	logit.Info.Println("PROFILE provisionImpl created container 5 " + runResp.ID)

	dbnode := types.Container{}
	dbnode.ID = ""
	dbnode.Name = params.ContainerName
	dbnode.Image = params.Image
	dbnode.ClusterID = "-1"
	dbnode.ProjectID = params.ProjectID

	if params.Standalone == "true" {
		dbnode.Role = "standalone"
	} else {
		dbnode.Role = "unassigned"
	}

	var strid int
	strid, err = admindb.InsertContainer(dbConn, dbnode)
	newid := strconv.Itoa(strid)
	if err != nil {
		logit.Error.Println(err.Error())
		return "", err
	}
	dbnode.ID = newid

	if params.Image != "cpm-node-proxy" {
		//register default db users on the new node
		err = createDBUsers(dbConn, dbnode)
	}

	return newid, err

}
func main() {

	var err error
	var tempVal string

	tempVal = os.Getenv("HC_POLL_INT")
	if tempVal == "" {
		HC_POLL_INT = 3
	}

	tempVal = os.Getenv("SERVER_POLL_INT")
	if tempVal == "" {
		SERVER_POLL_INT = 3
	}
	tempVal = os.Getenv("SERVER_POLL_INT")
	if tempVal == "" {
		CONT_POLL_INT = 3
	}
	HC_POLL_INT, err = strconv.ParseInt(tempVal, 10, 64)
	if err != nil {
		logit.Error.Println(err.Error())
		return
	}
	SERVER_POLL_INT, err = strconv.ParseInt(tempVal, 10, 64)
	if err != nil {
		logit.Error.Println(err.Error())
		return
	}
	CONT_POLL_INT, err = strconv.ParseInt(tempVal, 10, 64)
	if err != nil {
		logit.Error.Println(err.Error())
		return
	}
	logit.Info.Printf("HealthCheck Polling Interval: %d\n", HC_POLL_INT)
	logit.Info.Printf("Server Polling Interval: %d\n", SERVER_POLL_INT)
	logit.Info.Printf("Container Polling Interval: %d\n", CONT_POLL_INT)

	go func() {
		//register a gauge vector
		gauge := prometheus.NewGaugeVec(
			prometheus.GaugeOpts{
				Name: "cpm_server_cpu",
				Help: "CPU Utilization.",
			}, []string{
				"server",
			})
		prometheus.MustRegister(gauge)
		gaugeMem := prometheus.NewGaugeVec(
			prometheus.GaugeOpts{
				Name: "cpm_server_mem",
				Help: "Memory Utilization.",
			}, []string{
				"server",
			})
		prometheus.MustRegister(gaugeMem)

		var serverinfo swarmapi.DockerInfoResponse
		serverinfo, err = swarmapi.DockerInfo()
		if err != nil {
			logit.Error.Println(err.Error())
		}
		logit.Info.Printf("got %d servers\n", len(serverinfo.Output))

		var metric collect.Metric

		for {
			//get metrics for each server
			for x := 0; x < len(serverinfo.Output); {
				//v := rand.Float64() * 100.00
				parts := strings.Split(serverinfo.Output[x], ":")
				ipaddress := parts[0]
				metric, err = collect.Collectcpu(ipaddress)
				gauge.WithLabelValues(ipaddress).Set(metric.Value)
				//logit.Info.Println("setting cpu metric for " + ipaddress + " to " + strconv.FormatFloat(metric.Value, 'f', -1, 64))
				metric, err = collect.Collectmem(ipaddress)
				gaugeMem.WithLabelValues(ipaddress).Set(metric.Value)
				//logit.Info.Println("setting mem metric for " + ipaddress + " to " + strconv.FormatFloat(metric.Value, 'f', -1, 64))
				x++
			}

			//logit.Info.Println("sleeping in the server metrics ....")

			time.Sleep(time.Duration(SERVER_POLL_INT) * time.Minute)
		}
	}()

	go func() {
		for true {
			collect.Collecthc()
			time.Sleep(time.Duration(HC_POLL_INT) * time.Minute)
		}
	}()

	go func() {
		//register a gauge vector
		dbsizegauge := prometheus.NewGaugeVec(
			prometheus.GaugeOpts{
				Name: "cpm_container_dbsize",
				Help: "Database Size.",
			}, []string{
				"containername",
				"databasename",
			})
		prometheus.MustRegister(dbsizegauge)

		for true {
			//dbsizegauge.WithLabelValues("node1", "db1").Set(v)
			logit.Info.Println("collecting dbsize")
			collect.CollectDBSize(dbsizegauge)
			time.Sleep(time.Duration(CONT_POLL_INT) * time.Minute)
		}
	}()

	http.Handle("/metrics", prometheus.Handler())
	http.ListenAndServe(":8080", nil)
}