Exemplo n.º 1
0
// This is called on the management cluster when it is running the scheduled task
// which reconciles the databases comparing against the service clusters lists.
func (i *Instance) Reconcile() (err error) {
	p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("instances.Register() p.Connect(%s) ! %s", p.URI, err))
		return err
	}
	defer db.Close()
	err = i.Lock()
	if err != nil {
		log.Error(fmt.Sprintf("instances.Instance#Reconcile(%s) Failed Locking instance %s ! %s", i.Database, i.Database, err))
		return
	}
	ei, err := FindByDatabase(i.Database)
	if err != nil {
		log.Error(fmt.Sprintf("instances.Instance#Reconcile() ! %s", err))
	} else if ei == nil {
		log.Trace(fmt.Sprintf(`instances.Instance#Reconcile() Reconciling database %s for cluster %s`, i.Database, i.ClusterID))
		sq := fmt.Sprintf(`INSERT INTO cfsb.instances (cluster_id,service_id ,plan_id ,instance_id ,organization_id ,space_id,dbname, dbuser, dbpass,effective_at) VALUES ('%s', '%s', '%s', '%s', '%s','%s','%s','%s','%s',CURRENT_TIMESTAMP)`, i.ClusterID, i.ServiceID, i.PlanID, i.InstanceID, i.OrganizationID, i.SpaceID, i.Database, i.User, i.Pass)
		log.Trace(fmt.Sprintf(`instances.Instance#Reconcile(%s) > %s`, i.Database, sq))
		_, err = db.Exec(sq)
		if err != nil {
			log.Error(fmt.Sprintf("instances.Instance#Reconcile(%s) ! %s", i.Database, err))
		}
	}
	err = i.Unlock()
	if err != nil {
		log.Error(fmt.Sprintf(`instances.Instance#Reconcile(%s) Unlocking ! %s`, i.InstanceID, err))
	}
	return
}
Exemplo n.º 2
0
/*
 columnMigrations migrates columns testing for conditions,
 eg. handle the migration of pre-existing environments.
*/
func columnMigrations() (err error) {
	p := pg.NewPG(`127.0.0.1`, pgPort, `rdpg`, `rdpg`, pgPass)
	p.Set(`database`, `rdpg`)

	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf(`rdpg.columnMigration() Could not open connection ! %s`, err))
		return
	}
	defer db.Close()

	sq := fmt.Sprintf(`SELECT constraint_name FROM information_schema.table_constraints WHERE table_name='instances' AND constraint_type='UNIQUE';`)
	log.Trace(fmt.Sprintf("rdpg.columnMigrations() %s", sq))
	var constraintName string
	if err = db.QueryRow(sq).Scan(&constraintName); err != nil {
		if err == sql.ErrNoRows {
			log.Trace(fmt.Sprintf("The instance table db name is not set UNIQUE constraints"))
			_, err = db.Exec(`ALTER TABLE cfsb.instances ADD CONSTRAINT instances_dbname_key UNIQUE (dbname)`)
			if err != nil {
				log.Error(fmt.Sprintf("rdpg.columnMigrations()%s", err))
				return
			}
		} else {
			log.Error(fmt.Sprintf("rdpg.columnMigrations() ! %s", err))
			return
		}
	}
	return
}
Exemplo n.º 3
0
func (b *Binding) Find() (err error) {
	log.Trace(fmt.Sprintf(`cfsb.Binding#Find(%s) ... `, b.BindingID))

	if b.BindingID == "" {
		return errors.New("Binding ID is empty, can not Binding#Find()")
	}
	p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("cfsb.Binding#Find(%s) ! %s", b.BindingID, err))
		return
	}
	defer db.Close()

	sq := fmt.Sprintf(`SELECT id,instance_id FROM cfsb.bindings WHERE binding_id=lower('%s') LIMIT 1`, b.BindingID)
	log.Trace(fmt.Sprintf(`cfsb.Binding#Find(%s) > %s`, b.BindingID, sq))
	err = db.Get(b, sq)
	if err != nil {
		if err == sql.ErrNoRows {
			log.Error(fmt.Sprintf("cfsb.Binding#Find(%s) ! Could not find binding with given Binding ID", b.BindingID))
		} else {
			log.Error(fmt.Sprintf("cfsb.Binding#Find(%s) ! %s", b.BindingID, err))
		}
	} else {
		// TODO: Load creds: b.Creds := Credentials{} ... b.Creds.Find()
	}
	return
}
Exemplo n.º 4
0
func (b *Binding) Remove() (err error) {
	log.Trace(fmt.Sprintf(`cfsb.Binding#Remove(%s) ... `, b.BindingID))
	err = b.Find()
	if err != nil {
		log.Error(fmt.Sprintf(`cfsb.Binding#Remove(%s) ! %s`, b.BindingID, err))
		return
	}
	p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("cfsb.Binding#Remove(%s) ! %s", b.BindingID, err))
		return
	}
	defer db.Close()

	// TODO: Scheduled background task that does any cleanup necessary for an
	// unbinding (remove credentials?)
	sq := fmt.Sprintf(`UPDATE cfsb.bindings SET ineffective_at=CURRENT_TIMESTAMP WHERE binding_id=lower('%s')`, b.BindingID)
	log.Trace(fmt.Sprintf(`cfsb.Binding#Remove(%s) SQL > %s`, b.BindingID, sq))
	_, err = db.Exec(sq)
	if err != nil {
		log.Error(fmt.Sprintf(`cfsb.Binding#Remove(%s) ! %s`, b.BindingID, err))
	}

	b.Creds = &Credentials{
		InstanceID: b.InstanceID,
		BindingID:  b.BindingID,
	}

	err = b.Creds.Remove()
	if err != nil {
		log.Error(fmt.Sprintf(`cfsb.Binding#Remove(%s) b.Creds.Remove() ! %s`, b.BindingID, err))
	}
	return
}
Exemplo n.º 5
0
// Create Credentials in the data store
func (c *Credentials) Create() (err error) {
	log.Trace(fmt.Sprintf(`cfsb.Credentials#Create(%s,%s) ... `, c.InstanceID, c.BindingID))

	p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("cfsb.Credentials#Create(%s) ! %s", c.BindingID, err))
		return
	}
	defer db.Close()

	err = c.Find()
	if err != nil { // Does not yet exist, insert the credentials.
		if err == sql.ErrNoRows { // Does not yet exist, insert the credentials.
			sq := fmt.Sprintf(`INSERT INTO cfsb.credentials (instance_id,binding_id,host,port,dbuser,dbpass,dbname) VALUES (lower('%s'),lower('%s'),'%s','%s','%s','%s','%s');`, c.InstanceID, c.BindingID, c.Host, c.Port, c.UserName, c.Password, c.Database)
			log.Trace(fmt.Sprintf(`cfsb.Credentials#Create() > %s`, sq))
			_, err = db.Exec(sq)
			if err != nil {
				log.Error(fmt.Sprintf(`cfsb.Credentials#Create()  %s ! %s`, sq, err))
			}
		} else {
			log.Error(fmt.Sprintf(`cfsb.Credentials#Create() c.Find() binding %s ! %s`, c.BindingID, err))
		}
		return
	} else { // Credentials already exists, return.
		log.Trace(fmt.Sprintf(`cfsb.Credentials#Create() Credentials already exist for binding %s, returning`, c.BindingID))
		return
	}
}
Exemplo n.º 6
0
// CleanupUnusedDatabases - Identify Databases that should be decommissioned and decommission them.
func (t *Task) CleanupUnusedDatabases() (err error) {
	// eg. Look for databases that that should have been decommissioned and insert
	// a CleanupUnusedDatabases task to target each database found.

	log.Trace(fmt.Sprintf("tasks.CleanupUnusedDatabases(%s)...", t.Data))

	//SELECT - Identify the databases which should have been dropped.
	address := `127.0.0.1`
	sq := `SELECT dbname FROM cfsb.instances WHERE effective_at IS NOT NULL AND ineffective_at IS NOT NULL AND dbname IN (SELECT datname FROM pg_database WHERE datname LIKE 'd%');`
	log.Trace(fmt.Sprintf("tasks.CleanupUnusedDatabases() > getList(%s, %s", address, sq))
	listCleanupDatabases, err := getList(address, sq)
	log.Trace(fmt.Sprintf("tasks.CleanupUnusedDatabases() - listCleanupDatabases=%s and err=%s", listCleanupDatabases, err))
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task<%d>#CleanupUnusedDatabases() Failed to load list of databases ! %s", t.ID, err))
		return err
	}

	for _, databaseName := range listCleanupDatabases {
		log.Trace(fmt.Sprintf("tasks.CleanupUnusedDatabases() > Database Name to Cleanup: %s", databaseName))

		err := CleanupDatabase(databaseName, t.ClusterService)
		if err != nil {
			log.Error(fmt.Sprintf("tasks.CleanUpUnusedDatabases() > tasks.LoopThruDBs(): %s", err))
			return err
		}
	}
	return
}
Exemplo n.º 7
0
// Create a given user on a single target host.
func (p *PG) GrantUserPrivileges(dbuser string, priviliges []string) (err error) {
	log.Trace(fmt.Sprintf(`pg.PG<%s>#GrantUserPrivileges(%s) Granting postgres user priviliges %+v...`, p.IP, dbuser, priviliges))
	p.Set(`database`, `postgres`)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("pg.PG<%s>#UserGrantPrivileges(%s) %s ! %s", p.IP, dbuser, p.URI, err))
		return
	}
	defer db.Close()

	for _, priv := range priviliges {
		sq := fmt.Sprintf(`ALTER USER %s WITH %s;`, dbuser, priv)
		log.Trace(fmt.Sprintf(`pg.PG<%s>#UserGrantPrivileges(%s) > %s`, p.IP, dbuser, sq))
		result, err := db.Exec(sq)
		rows, _ := result.RowsAffected()
		if rows > 0 {
			log.Trace(fmt.Sprintf(`pg.PG<%s>#CreateUser(%s) Successfully Created.`, p.IP, dbuser))
		}
		if err != nil {
			log.Error(fmt.Sprintf(`pg.PG<%s>#CreateUser(%s) ! %s`, p.IP, dbuser, err))
			return err
		}
	}
	return nil
}
Exemplo n.º 8
0
func (p *PG) DropDatabase(dbname string) (err error) {
	log.Trace(fmt.Sprintf(`pg.PG<%s>#DropDatabase(%s) Dropping postgres database...`, p.IP, dbname))
	p.Set(`database`, `postgres`)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("pg.PG<%s>#DropDatabase(%s) ! %s", p.IP, dbname, err))
		return
	}
	defer db.Close()

	exists, err := p.DatabaseExists(dbname)
	if err != nil {
		log.Error(fmt.Sprintf("pg.PG<%s>#DropDatabase(%s) ! %s", p.IP, dbname, err))
		return
	}
	if !exists {
		log.Error(fmt.Sprintf("pg.PG<%s>#DropDatabase(%s) Database %s already does not exist.", p.IP, dbname, err))
		return
	}

	// TODO: How do we drop a database in bdr properly?
	sq := fmt.Sprintf(`DROP DATABASE IF EXISTS %s`, dbname)
	log.Trace(fmt.Sprintf(`pg.PG<%s>#DropDatabase(%s) > %s`, p.IP, dbname, sq))
	_, err = db.Exec(sq)
	if err != nil {
		log.Error(fmt.Sprintf("pg.PG<%s>#DropDatabase(%s) ! %s", p.IP, dbname, err))
		return
	}
	return
}
Exemplo n.º 9
0
func (i *Instance) Decommission() (err error) {
	p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("instances.Decommission() p.Connect(%s) ! %s", p.URI, err))
		return
	}
	defer db.Close()

	// TODO: i.SetIneffective()
	sq := fmt.Sprintf(`UPDATE cfsb.instances SET ineffective_at=CURRENT_TIMESTAMP WHERE dbname='%s'`, i.Database)
	log.Trace(fmt.Sprintf(`instances.Instance<%s>#Decommission() SQL > %s`, i.InstanceID, sq))
	_, err = db.Exec(sq)
	if err != nil {
		log.Error(fmt.Sprintf("Instance#Decommission(%s) setting inefective_at ! %s", i.InstanceID, err))
		return
	}

	// TODO: tasks.Task{ClusterID: ,Node: ,Role: ,Action:, Data: }.Enqueue()
	// Question is how to do this without an import cycle? Some tasks require instances.
	sq = fmt.Sprintf(`INSERT INTO tasks.tasks (cluster_id,role,action,data, cluster_service) VALUES ('%s','all','DecommissionDatabase','%s', '%s')`, i.ClusterID, i.Database, i.ClusterService)
	log.Trace(fmt.Sprintf(`instances.Instance#Decommission(%s) Scheduling Instance Removal > %s`, i.InstanceID, sq))
	_, err = db.Exec(sq)
	if err != nil {
		log.Error(fmt.Sprintf(`instances.Instance#Decommission(%s) ! %s`, i.InstanceID, err))
	}
	return
}
Exemplo n.º 10
0
func (c *Catalog) Fetch() (err error) {
	log.Trace(`cfsb.Catalog#Fetch()...`)
	p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("cfsb.Catalog#Fetch() ! %s", err))
		return
	}
	defer db.Close()

	sq := `SELECT service_id,name,description,bindable FROM cfsb.services;`
	log.Trace(fmt.Sprintf(`cfsb.Catalog#Fetch() > %s`, sq))
	err = db.Select(&c.Services, sq)
	if err != nil {
		log.Error(fmt.Sprintf("cfsb.Catalog#Fetch() db.Select() ! %s", err.Error()))
		return
	}

	// TODO: Account for plans being associated with a service.
	for i, _ := range c.Services {
		service := &c.Services[i]
		sq := `SELECT plan_id,name,description FROM cfsb.plans;`
		log.Trace(fmt.Sprintf(`cfsb.Catalog#Fetch() > %s`, sq))
		err = db.Select(&service.Plans, sq)
		if err != nil {
			log.Error(fmt.Sprintf("cfsb.Catalog#Fetch() db.Select() ! %s", err.Error()))
			return
		}
		c.Services[i].Tags = []string{"rdpg", "postgresql"}
		// c.Services[i].Dashboard = DashboardClient{}
	}
	return
}
Exemplo n.º 11
0
func (r *RDPG) bdrBootstrap() (err error) {
	_, err = r.bootstrapLock()
	if err != nil {
		log.Error(fmt.Sprintf(`rdpg.RDPG<%s>#Bootstrap() r.bootstrapLock() ! %s`, ClusterID, err))
		return
	}

	leader := false
	key := fmt.Sprintf(`rdpg/%s/bdr/join/ip`, ClusterID)
	bdrJoinIP, err = r.getValue(key)
	if err != nil {
		log.Error(fmt.Sprintf(`rdpg.RDPG<%s>#Bootstrap() kv.getValue(%s) ! %s ...`, ClusterID, key, err))
		return err
	}
	if len(bdrJoinIP) == 0 || bdrJoinIP == globals.MyIP {
		log.Trace(fmt.Sprintf(`rdpg.RDPG<%s>#Bootstrap() kv.getValue(%s) BDR Join IP has not been set`, ClusterID, key))
		leader = true
	} else {
		log.Trace(fmt.Sprintf(`rdpg.RDPG<%s>#Bootstrap() kv.getValue(%s) BDR Join Node IP has been set to %s`, ClusterID, key, bdrJoinIP))
		leader = false
	}

	if leader {
		err = r.bdrLeaderBootstrap()
		if err != nil {
			log.Error(fmt.Sprintf(`rdpg.RDPG<%s>#Bootstrap() r.bdrLeaderBootstrap() ! %s`, ClusterID, err))
		}
	} else {
		err = r.bdrNonLeaderBootstrap()
		if err != nil {
			log.Error(fmt.Sprintf(`rdpg.RDPG<%s>#Bootstrap() r.bdrNonLeaderBootstrap() ! %s`, ClusterID, err))
		}
	}
	return
}
Exemplo n.º 12
0
/*
Dequeue dequeue's a given task from the database's rdpg.tasks table.
*/
func (t *Task) Dequeue() (err error) {
	tasks := []Task{}
	sq := fmt.Sprintf(`SELECT id,node,cluster_id,role,action,data,ttl,node_type,cluster_service FROM tasks.tasks WHERE id=%d LIMIT 1`, t.ID)
	log.Trace(fmt.Sprintf(`tasks.Task<%d>#Dequeue() > %s`, t.ID, sq))
	OpenWorkDB()
	err = workDB.Select(&tasks, sq)
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Task<%d>#Dequeue() Selecting Task %+v ! %s`, t.ID, t, err.Error()))
		return
	}
	if len(tasks) == 0 {
		log.Error(fmt.Sprintf(`tasks.Task<%d>#Dequeue() No rows returned for task %+v`, t.ID, t))
		return
	}
	t = &tasks[0]
	// TODO: Add the information for who has this task locked using IP
	sq = fmt.Sprintf(`UPDATE tasks.tasks SET locked_by='%s', processing_at=CURRENT_TIMESTAMP WHERE id=%d`, myIP, t.ID)
	log.Trace(fmt.Sprintf(`tasks.Task<%d>#Dequeue() > %s`, t.ID, sq))
	_, err = workDB.Exec(sq)
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Task<%d>#Dequeue() Updating Task processing_at ! %s`, t.ID, err))
		return
	}
	log.Trace(fmt.Sprintf(`tasks.Task<%d>#Dequeue() Task Dequeued > %+v`, t.ID, t))
	return
}
/*EnforceFileRetention - Responsible for adding removing files which are no longer
needed on the local file system.  For example, backup files which have been created
successfully locally and copied to S3 successfully can be deleted to preserve
local disk storage */
func (t *Task) EnforceFileRetention() (err error) {

	/*
	   If s3 copy is enabled you cannot delete files until they have been copied to s3
	   otherwise keep the most recent backups, say the last 48 hours worth and delete all others

	*/

	//Select eligible files
	address := `127.0.0.1`
	sq := ``
	if isS3FileCopyEnabled() {
		sq = `SELECT a.params FROM backups.file_history a WHERE a.removed_at IS NULL AND a.action = 'CreateBackup' AND EXISTS (SELECT b.params FROM backups.file_history b WHERE a.cluster_id = b.cluster_id AND a.dbname = b.dbname AND a.node=b.node AND a.file_name = b.file_name AND b.action='CopyToS3' AND b.status='ok') AND NOT EXISTS (SELECT id FROM tasks.tasks WHERE action = 'DeleteFile' AND data = a.params::text)`
	} else {
		sq = `SELECT a.params FROM backups.file_history a WHERE a.removed_at IS NULL AND a.action = 'CreateBackup' AND a.status = 'ok' AND created_at < current_timestamp - '48 hours'::interval  AND NOT EXISTS (SELECT id FROM tasks.tasks WHERE action = 'DeleteFile' AND data = a.params::text)`
	}
	filesToDelete, err := getList(address, sq)
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Task<%d>#EnforceFileRetention() Failed to load list of files ! %s`, t.ID, err))
	}

	log.Trace(fmt.Sprintf("tasks.EnforceFileRetention() > Found %d files to delete", len(filesToDelete)))

	for _, fileToDeleteParams := range filesToDelete {
		log.Trace(fmt.Sprintf("tasks.EnforceFileRetention() > Attempting to add %s", fileToDeleteParams))

		newTask := Task{ClusterID: t.ClusterID, Node: t.Node, Role: t.Role, Action: "DeleteFile", Data: fileToDeleteParams, TTL: t.TTL, NodeType: t.NodeType}
		err = newTask.Enqueue()
		if err != nil {
			log.Error(fmt.Sprintf(`tasks.FindFilesToCopyToS3() service task schedules ! %s`, err))
		}
	}

	return
}
Exemplo n.º 14
0
func getMasterIP(clusterName string) (masterIp string, err error) {

	log.Trace(fmt.Sprintf("gpb#consul.getMasterIP() Calling out to Consul at address %s", mcConsulIP))

	consulConfig := consulapi.DefaultConfig()
	consulConfig.Address = mcConsulIP
	consulClient, err := consulapi.NewClient(consulConfig)
	if err != nil {
		log.Error(fmt.Sprintf(`gpb#consul.getMasterIP() Consul IP: %s ! %s`, mcConsulIP, err))
		return
	}

	masterNode, _, err := consulClient.Catalog().Service(fmt.Sprintf(`%s-master`, clusterName), "", nil)
	if err != nil {
		log.Error(fmt.Sprintf("gpb#consul.getMasterIP() Cluster Name: %s ! %s", clusterName, err))
		return
	}

	if len(masterNode) == 0 {
		masterIp = "0.0.0.0"
		return masterIp, errors.New("Could not find the consul master ip")
	}

	masterIp = masterNode[0].Address
	log.Trace(fmt.Sprintf("gpb#consul.getMasterIP() Found master ip for %s = %s", clusterName, masterIp))
	return masterIp, err

}
Exemplo n.º 15
0
func (p *PG) WaitForRegClass(k string) (err error) {
	log.Trace(fmt.Sprintf(`pg.PG<%s>#WaitForRegClass(%s) %+v`, p.IP, k, p.URI))
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("pg.PG<%s>#WaitForRegClass(%s) Failed connecting to %s err: %s", p.IP, k, p.URI, err))
		return err
	}
	defer db.Close()

	for { // TODO: Max Attempts.
		names := []string{}
		sq := fmt.Sprintf(`SELECT to_regclass('%s') AS name`, k)
		err := db.Select(&names, sq)
		if err != nil {
			log.Trace(fmt.Sprintf(`pg.PG<%s>#WaitForRegClass(%s) ! %s`, p.IP, k, err))
			time.Sleep(3 * time.Second)
			continue
		}
		log.Trace(fmt.Sprintf(`pg.PG<%s>#WaitForRegClass(%s) names: %+v`, p.IP, k, names))
		if len(names[0]) == 0 {
			time.Sleep(3 * time.Second)
			continue
		} else {
			break
		}
	}
	return nil
}
Exemplo n.º 16
0
func (c *Credentials) Find() (err error) {
	log.Trace(fmt.Sprintf(`cfsb.Credentials#Find(%s) ... `, c.BindingID))

	if c.BindingID == "" {
		return errors.New("Credentials ID is empty, can not Credentials#Find()")
	}
	p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("cfsb.Credentials#Find(%s) PG#Connect() ! %s", c.BindingID, err))
		return
	}
	defer db.Close()

	sq := fmt.Sprintf(`SELECT id,instance_id,binding_id FROM cfsb.credentials WHERE binding_id=lower('%s') LIMIT 1`, c.BindingID)
	log.Trace(fmt.Sprintf(`cfsb.Credentials#Find(%s) SQL > %s`, c.BindingID, sq))
	err = db.Get(c, sq)
	if err != nil {
		if err == sql.ErrNoRows {
			log.Error(fmt.Sprintf("cfsb.Credentials#Find(%s) ! Could not find binding with given Credentials ID", c.BindingID))
		} else {
			log.Error(fmt.Sprintf("cfsb.Credentials#Find(%s) ! %s", c.BindingID, err))
		}
	}
	return
}
Exemplo n.º 17
0
func (p *PG) DropUser(dbuser string) (err error) {
	log.Trace(fmt.Sprintf(`pg.PG<%s>#DropUser(%s) Dropping postgres user...`, p.IP, dbuser))
	p.Set(`database`, `postgres`)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("pg.PG<%s>#DropUser(%s) %s ! %s", p.IP, dbuser, p.URI, err))
		return
	}
	defer db.Close()

	exists, err := p.UserExists(dbuser)
	if err != nil {
		log.Error(fmt.Sprintf("pg.PG<%s>#DropUser(%s) ! %s", p.IP, dbuser, err))
		return
	}
	if !exists {
		log.Error(fmt.Sprintf("pg.PG<%s>#DropUser(%s) User already does not exist, skipping.", p.IP, dbuser))
		return
	}
	// TODO: How do we drop a database in bdr properly?
	sq := fmt.Sprintf(`DROP USER %s`, dbuser)
	log.Trace(fmt.Sprintf(`pg.PG<%s>#DropDatabase(%s) > %s`, p.IP, dbuser, sq))
	_, err = db.Exec(sq)
	if err != nil {
		log.Error(fmt.Sprintf("pg.PG<%s>#DropDatabase(%s) ! %s", p.IP, dbuser, err))
		return
	}
	return
}
Exemplo n.º 18
0
func (p *PG) BDRGroupJoin(nodeName, dbname string, target PG) (err error) {
	log.Trace(fmt.Sprintf(`pg.PG<%s>#BDRGroupJoin(%s,%s) Joining postgres BDR Group for database...`, p.IP, dbname, nodeName))
	p.Set(`database`, dbname)
	db, err := p.Connect()
	if err != nil {
		return
	}
	defer db.Close()

	sq := fmt.Sprintf(`SELECT bdr.bdr_group_join( local_node_name := '%s', node_external_dsn := 'host=%s port=%s user=%s dbname=%s', join_using_dsn := 'host=%s port=%s user=%s dbname=%s'); `, nodeName, p.IP, p.Port, p.User, p.Database, target.IP, target.Port, target.User, dbname)
	log.Trace(fmt.Sprintf(`pg.PG<%s>#BDRGroupJoin(%s) > %s`, p.IP, dbname, sq))
	_, err = db.Exec(sq)
	if err == nil {
		sq = `SELECT bdr.bdr_node_join_wait_for_ready();`
		log.Trace(fmt.Sprintf(`pg.PG<%s>#BDRGroupJoin(%s) > %s`, p.IP, dbname, sq))
		for {
			_, err = db.Exec(sq)
			if err == nil {
				break
			} else {
				re := regexp.MustCompile(`canceling statement due to conflict with recovery`)
				if re.MatchString(err.Error()) {
					time.Sleep(3 * time.Second)
					continue
				} else {
					log.Error(fmt.Sprintf(`pg.PG<%s>#BDRGroupJoin(%s) ! %s`, p.IP, dbname, err))
					return err
				}
			}
		}
	}
	return
}
Exemplo n.º 19
0
// Create given extensions on a single target host.
func (p *PG) CreateExtensions(dbname string, exts []string) (err error) {
	log.Trace(fmt.Sprintf(`pg.PG<%s>#CreateExtensions(%s) Creating postgres extensions %+v on database...`, p.IP, dbname, exts))
	p.Set(`database`, dbname)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("pg.PG<%s>#CreateExtensions(%s) %s ! %s", p.IP, dbname, p.URI, err))
		return
	}

	ddlLockRE := regexp.MustCompile(`cannot acquire DDL lock|Database is locked against DDL operations`)
	// TODO: Only create extension if it doesn't already exist.
	for _, ext := range exts {
		sq := fmt.Sprintf(`CREATE EXTENSION IF NOT EXISTS "%s";`, ext)
		log.Trace(fmt.Sprintf(`pg.PG<%s>#CreateExtensions() > %s`, p.IP, sq))
		for { // Retry loop for acquiring DDL schema lock.
			_, err = db.Exec(sq)
			if err != nil {
				if ddlLockRE.MatchString(err.Error()) {
					log.Trace("pg.PG#CreateExtensions() DDL Lock not available, waiting...")
					time.Sleep(1 * time.Second)
					continue
				}
				db.Close()
				log.Error(fmt.Sprintf("pg.PG<%s>#CreateExtensions() %s ! %s", p.IP, ext, err))
				return
			}
			break
		}
	}
	db.Close()
	return
}
Exemplo n.º 20
0
/*
ConfigurePGBouncer configures PGBouncer on the current system.
*/
func (s *Service) ConfigurePGBouncer() (err error) {
	log.Trace(fmt.Sprintf("services#Service.ConfigurePGBouncer()..."))

	dir := `/var/vcap/jobs/rdpgd-service`
	if _, err := os.Stat(dir); os.IsNotExist(err) {
		log.Trace(fmt.Sprintf("services#Service.ConfigurePGBouncer() Not a service node since %s doesn't exist, skipping.", dir))
		return nil
	}
	// TODO: Adjust for cluster role...
	// TODO: This only happens on service clusters... simply return for management
	instances, err := instances.Active()
	if err != nil {
		log.Error(fmt.Sprintf("services#Service.ConfigurePGBouncer() ! %s", err))
		return err
	}

	pgbIni, err := ioutil.ReadFile(`/var/vcap/jobs/rdpgd-service/config/pgbouncer/pgbouncer.ini`)
	if err != nil {
		log.Error(fmt.Sprintf("services#Service.ConfigurePGBouncer() ! %s", err))
		return err
	}
	pgbUsers, err := ioutil.ReadFile(`/var/vcap/jobs/rdpgd-service/config/pgbouncer/users`)
	if err != nil {
		log.Error(fmt.Sprintf("services#Service.ConfigurePGBouncer() ! %s", err))
		return err
	}
	pi := []string{string(pgbIni)}
	pu := []string{string(pgbUsers)}
	// Currently these are done in the bosh release:
	//pi = append(pi, fmt.Sprintf(`health = host=127.0.0.1 port=%s dbname=health`, pgPort))
	//pu = append(pu, fmt.Sprintf(`"health" md5("checkhealth")`))
	for index := range instances {
		i := instances[index]
		pi = append(pi, fmt.Sprintf(`%s = host=%s port=%s dbname=%s`, i.Database, "127.0.0.1", pgPort, i.Database))
		pu = append(pu, fmt.Sprintf(`"%s" "%s"`, i.User, i.Pass))
	}
	pi = append(pi, "")
	pu = append(pu, "")

	err = ioutil.WriteFile(`/var/vcap/store/pgbouncer/config/pgbouncer.ini`, []byte(strings.Join(pi, "\n")), 0640)
	if err != nil {
		log.Error(fmt.Sprintf("services#Service.ConfigurePGBouncer() ! %s", err))
		return err
	}

	err = ioutil.WriteFile(`/var/vcap/store/pgbouncer/config/users`, []byte(strings.Join(pu, "\n")), 0640)
	if err != nil {
		log.Error(fmt.Sprintf("services#Service.ConfigurePGBouncer() ! %s", err))
		return err
	}

	cmd := exec.Command("/var/vcap/jobs/pgbouncer/bin/control", "reload")
	err = cmd.Run()
	if err != nil {
		log.Error(fmt.Sprintf("services#Service.ConfigurePGBouncer() ! %s", err))
		return err
	}
	return
}
Exemplo n.º 21
0
func (t *Task) DecommissionDatabase(workRole string) (err error) {
	log.Trace(fmt.Sprintf(`tasks.DecommissionDatabase(%s)...`, t.Data))

	i, err := instances.FindByDatabase(t.Data)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.DecommissionDatabase(%s) instances.FindByDatabase() ! %s", i.Database, err))
		return err
	}

	ips, err := i.ClusterIPs()
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) i.ClusterIPs() ! %s`, i.Database, err))
		return err
	}
	if len(ips) == 0 {
		log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! No service cluster nodes found in Consul?!", i.Database))
		return
	}
	p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) p.Connect(%s) ! %s", t.Data, p.URI, err))
		return err
	}
	defer db.Close()

	switch workRole {
	case "manager":
		path := fmt.Sprintf(`databases/decommission/%s`, t.Data)
		url := fmt.Sprintf("http://%s:%s/%s", ips[0], os.Getenv("RDPGD_ADMIN_PORT"), path)
		req, err := http.NewRequest("DELETE", url, bytes.NewBuffer([]byte("{}")))
		log.Trace(fmt.Sprintf(`tasks.Task#Decommission() > DELETE %s`, url))
		//req.Header.Set("Content-Type", "application/json")
		// TODO: Retrieve from configuration in database.
		req.SetBasicAuth(os.Getenv("RDPGD_ADMIN_USER"), os.Getenv("RDPGD_ADMIN_PASS"))
		httpClient := &http.Client{}
		_, err = httpClient.Do(req)
		if err != nil {
			log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) httpClient.Do() %s ! %s`, i.Database, url, err))
			return err
		}
		// TODO: Is there anything we want to do on successful request?
	case "service":
		for _, ip := range ips {
			newTask := Task{ClusterID: ClusterID, Node: ip, Role: "all", Action: "Reconfigure", Data: "pgbouncer"}
			err = newTask.Enqueue()
			if err != nil {
				log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) ! %s`, i.Database, err))
			}
		}
		log.Trace(fmt.Sprintf(`tasks.DecommissionDatabase(%s) TODO: Here is where we finally decommission on the service cluster...`, i.Database))
		return nil
	default:
		log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) ! Unknown work role: '%s' -> BUG!!!`, i.Database, workRole))
		return nil
	}
	return
}
Exemplo n.º 22
0
func AddBackupPathConfig(dc *config.DefaultConfig) (err error) {
	log.Trace("Entering AddBackupPathConfig")
	if dc.Key != "BackupsPath" {
		errorMessage := fmt.Sprintf("utils/backup.AddBackupPathConfig ! Key specified: %s != 'BackupsPath'", dc.Key)
		log.Error(errorMessage)
		return errors.New(errorMessage)
	}
	p := pg.NewPG(`127.0.0.1`, globals.PGPort, `rdpg`, `rdpg`, globals.PGPass)
	p.Set(`database`, `rdpg`)

	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf(`config.DefaultConfig() Could not open connection ! %s`, err))
	}
	defer db.Close()

	oldConfigs := []config.DefaultConfig{}
	sql := fmt.Sprintf("SELECT key, cluster_id, value FROM rdpg.config WHERE key = 'BackupsPath' AND cluster_id = '%s';", dc.ClusterID)
	err = db.Select(&oldConfigs, sql)
	//If there is no preexisting config, then just insert this...
	if len(oldConfigs) == 0 {
		sq := fmt.Sprintf(`INSERT INTO rdpg.config (key,cluster_id,value) SELECT '%s', '%s', '%s' WHERE NOT EXISTS (SELECT key FROM rdpg.config WHERE key = '%s' AND cluster_id = '%s')`, dc.Key, dc.ClusterID, dc.Value, dc.Key, dc.ClusterID)
		log.Trace(fmt.Sprintf(`config.DefaultConfig.Add(): %s`, sq))
		_, err = db.Exec(sq)
		if err != nil {
			log.Error(fmt.Sprintf(`config.DefaultConfig.Add():%s`, err))
			return err
		}
	} else { //Otherwise, need to check if we need to move the backup files.
		if oldConfigs[0].Value != dc.Value {
			//If the path has changed, move the files.
			sq := fmt.Sprintf(`UPDATE rdpg.config SET value = '%s' WHERE key = '%s' AND cluster_id = '%s';`, dc.Value, dc.Key, dc.ClusterID)
			log.Trace(fmt.Sprintf(`config.DefaultConfig.Add(): %s`, sq))
			_, err = db.Exec(sq)
			if err != nil {
				log.Error(fmt.Sprintf(`config.DefaultConfig.Add():%s`, err))
				return err
			}
			err = MoveBackupFiles(oldConfigs[0].Value, dc.Value)
			var localError error = nil
			if err != nil {
				log.Error(fmt.Sprintf("utils/backup.AddBackupPathConfig() ! utils/backup.MoveBackupFiles erred: %s", err.Error()))
				//Still want to try remote move. Don't just explode now. Return this error later if necessary.
				localError = err
				err = nil
			}
			if rdpgs3.Configured {
				err = MoveRemoteBackupFiles(oldConfigs[0].Value, dc.Value)
				if err != nil {
					log.Error(fmt.Sprintf("utils/backup.AddBackupPath() ! utils/backup.MoveRemoteBackupFiles erred: %s", err.Error()))
					return err
				}
			}
			return localError //is nil by default. Only returns error if MoveBackupFiles erred.
		}
	}
	return nil
}
Exemplo n.º 23
0
//Scheduler - Entry point for executing the long running tasks scheduler
func Scheduler() {
	p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass)
	p.Set(`database`, `rdpg`)

	err := p.WaitForRegClass("tasks.schedules")
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Scheduler() p.WaitForRegClass() ! %s`, err))
	}

	scheduleDB, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Scheduler() p.Connect() Failed connecting to %s ! %s`, p.URI, err))
		proc, _ := os.FindProcess(os.Getpid())
		proc.Signal(syscall.SIGTERM)
	}
	defer scheduleDB.Close()

	for {
		err = SchedulerLock()
		if err != nil {
			time.Sleep(10 * time.Second)
			continue
		}
		schedules := []Schedule{}
		sq := fmt.Sprintf(`SELECT id,cluster_id, role, action, data, ttl, node_type, cluster_service FROM tasks.schedules WHERE enabled = true AND CURRENT_TIMESTAMP >= (last_scheduled_at + frequency::interval) AND role IN ('all','%s')`, globals.ServiceRole)
		log.Trace(fmt.Sprintf(`tasks#Scheduler() Selecting Schedules > %s`, sq))
		err = scheduleDB.Select(&schedules, sq)
		if err != nil {
			log.Error(fmt.Sprintf(`tasks.Scheduler() Selecting Schedules ! %s`, err))
			SchedulerUnlock()
			time.Sleep(10 * time.Second)
			continue
		}
		for index := range schedules {
			sq = fmt.Sprintf(`UPDATE tasks.schedules SET last_scheduled_at = CURRENT_TIMESTAMP WHERE id=%d`, schedules[index].ID)
			log.Trace(fmt.Sprintf(`tasks#Scheduler() %+v > %s`, schedules[index], sq))
			_, err = scheduleDB.Exec(sq)
			if err != nil {
				log.Error(fmt.Sprintf(`tasks.Scheduler() Schedule: %+v ! %s`, schedules[index], err))
				continue
			}
			task := NewTask()
			task.ClusterID = schedules[index].ClusterID
			task.ClusterService = schedules[index].ClusterService
			task.Role = schedules[index].Role
			task.Action = schedules[index].Action
			task.Data = schedules[index].Data
			task.TTL = schedules[index].TTL
			task.NodeType = schedules[index].NodeType
			err = task.Enqueue()
			if err != nil {
				log.Error(fmt.Sprintf(`tasks.Scheduler() Task.Enqueue() %+v ! %s`, task, err))
			}
		}
		SchedulerUnlock()
		time.Sleep(10 * time.Second)
	}
}
Exemplo n.º 24
0
func ClusterCapacity() (totalClusterCapacity int, err error) {
	totalClusterCapacity = 0
	client, err := consulapi.NewClient(consulapi.DefaultConfig())
	if err != nil {
		log.Error(fmt.Sprintf("instances.cluster#ClusterCapacity() ! %s", err))
		return
	}

	catalog := client.Catalog()
	services, _, err := catalog.Services(nil)
	if err != nil {
		log.Error(fmt.Sprintf("instances.cluster#ClusterCapacity() ! %s", err))
		return
	}
	if len(services) == 0 {
		log.Error(fmt.Sprintf("instances.cluster#ClusterCapacity() ! No services found, no known clusters?!"))
		return
	}
	re := regexp.MustCompile(`^(rdpgsc[0-9]+$)|(sc-([[:alnum:]|-])*m[0-9]+-c[0-9]+$)`)

	kv := client.KV()
	for key, _ := range services {
		if re.MatchString(key) {
			kvp, _, err := kv.Get("rdpg/"+key+"/capacity/instances/allowed", nil)
			if err != nil {
				log.Error(fmt.Sprintf("instances.cluster#ClusterCapacity() : getKeyValue! %s", err))
				return 0, err
			}
			if kvp == nil {
				log.Trace(fmt.Sprintf(`instances.cluster#ClusterCapacity() kv.Get(%s) Key is not set...`, ClusterID, key))
				return 0, err
			}
			s := string(kvp.Value)
			allowed, err := strconv.Atoi(s)

			kvp, _, err = kv.Get("rdpg/"+key+"/capacity/instances/limit", nil)
			if err != nil {
				log.Error(fmt.Sprintf("instances.cluster#ClusterCapacity() : getKeyValue! %s", err))
				return 0, err
			}
			if kvp == nil {
				log.Trace(fmt.Sprintf(`rdpg.RDPG<%s>#getKey() kv.Get(%s) Key is not set...`, ClusterID, key))
				return 0, err
			}
			s = string(kvp.Value)
			limit, err := strconv.Atoi(s)

			if allowed < limit {
				totalClusterCapacity += allowed
			} else {
				totalClusterCapacity += limit
			}
		}
	}
	return
}
Exemplo n.º 25
0
// Create a given database owned by user on a single target host.
func (p *PG) CreateDatabase(dbname, dbuser string) (err error) {
	log.Trace(fmt.Sprintf(`pg.PG#CreateDatabase(%s) Creating postgres database...`, dbname))
	p.Set(`database`, `postgres`)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("pg.PG<%s>#CreateDatabase(%s,%s) %s ! %s", p.IP, dbname, dbuser, p.URI, err))
		return
	}
	defer db.Close()

	exists, err := p.UserExists(dbuser)
	if err != nil {
		log.Error(fmt.Sprintf("pg.PG<%s>#CreateDatabase(%s,%s) ! %s", p.IP, dbname, dbuser, err))
		return
	}
	if !exists {
		err = fmt.Errorf(`User does not exist, ensure that postgres user '%s' exists first.`, dbuser)
		log.Error(fmt.Sprintf("pg.PG<%s>#CreateDatabase(%s,%s) ! %s", p.IP, dbname, dbuser, err))
		return
	}

	exists, err = p.DatabaseExists(dbname)
	if err != nil {
		log.Error(fmt.Sprintf("pg.PG<%s>#CreateDatabase(%s,%s) ! %s", p.IP, dbname, dbuser, err))
		return
	}
	if exists {
		log.Trace(fmt.Sprintf("pg.PG<%s>#CreateDatabase(%s,%s) Database already exists, skipping.", p.IP, dbname, dbuser))
		return
	}

	sq := fmt.Sprintf(`CREATE DATABASE %s WITH OWNER %s TEMPLATE template0 ENCODING 'UTF8'`, dbname, dbuser)
	log.Trace(fmt.Sprintf(`pg.PG<%s>#CreateDatabase(%s,%s) > %s`, p.IP, dbname, dbuser, sq))
	_, err = db.Query(sq)
	if err != nil {
		log.Error(fmt.Sprintf("pg.PG<%s>#CreateDatabase(%s,%s) ! %s", p.IP, dbname, dbuser, err))
		return
	}

	sq = fmt.Sprintf(`REVOKE ALL ON DATABASE "%s" FROM public`, dbname)
	log.Trace(fmt.Sprintf(`pg.PG<%s>#CreateDatabase(%s,%s) > %s`, p.IP, dbname, dbuser, sq))
	_, err = db.Exec(sq)
	if err != nil {
		log.Error(fmt.Sprintf("pg.PG<%s>#CreateDatabase(%s,%s) ! %s", p.IP, dbname, dbuser, err))
	}

	sq = fmt.Sprintf(`GRANT ALL PRIVILEGES ON DATABASE %s TO %s`, dbname, dbuser)
	log.Trace(fmt.Sprintf(`pg.PG<%s>#CreateDatabase(%s,%s) > %s`, p.IP, dbname, dbuser, sq))
	_, err = db.Query(sq)
	if err != nil {
		log.Error(fmt.Sprintf("pg.PG<%s>#CreateDatabase(%s,%s) ! %s", p.IP, dbname, dbuser, err))
		return
	}
	return nil
}
Exemplo n.º 26
0
// Assign() is called when the master cluster tells the service cluster about
// an assignment.
func (i *Instance) Assign() (err error) {
	p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("instances.Assign() p.Connect(%s) ! %s", p.URI, err))
		return err
	}
	defer db.Close()

	for {
		err = i.Lock()
		if err != nil {
			log.Error(fmt.Sprintf("instances.Instance#Assign(%s) Failed Locking instance ! %s", i.Database, err))
			continue
		}
		sq := fmt.Sprintf(`UPDATE cfsb.instances SET service_id='%s',plan_id='%s',instance_id='%s',organization_id='%s',space_id='%s' WHERE dbname='%s'`, i.ServiceID, i.PlanID, i.InstanceID, i.OrganizationID, i.SpaceID, i.Database)
		log.Trace(fmt.Sprintf(`instances.Instance#Assign(%s) > %s`, i.Database, sq))
		_, err = db.Exec(sq)
		if err != nil {
			log.Error(fmt.Sprintf("instances.Instance#Assign(%s) ! %s", i.Database, err))
			err = i.Unlock()
			if err != nil {
				log.Error(fmt.Sprintf(`instances.Instance#Assign(%s) Unlocking ! %s`, i.InstanceID, err))
			}
			err = i.Unlock()
			if err != nil {
				log.Error(fmt.Sprintf(`instances.Instance#Assign(%s) Unlocking ! %s`, i.InstanceID, err))
			}
			continue
		} else {
			err = i.Unlock()
			if err != nil {
				log.Error(fmt.Sprintf(`instances.Instance#Assign(%s) Unlocking ! %s`, i.InstanceID, err))
			}
			break
		}
	}

	ips, err := i.ClusterIPs()
	if err != nil {
		log.Error(fmt.Sprintf(`instances.Instance#Assign(%s) i.ClusterIPs() ! %s`, i.InstanceID, err))
		return
	}
	for _, ip := range ips {
		// TODO: tasks.Task{ClusterID: ,Node: ,Role: ,Action:, Data: }.Enqueue()
		// Question is how to do this without an import cycle? Smoe tasks require instances.
		sq := fmt.Sprintf(`INSERT INTO tasks.tasks (cluster_id,node,role,action,data) VALUES ('%s','%s','service','Reconfigure','pgbouncer')`, ClusterID, ip)
		log.Trace(fmt.Sprintf(`instances.Instance#Assign(%s) Enqueue Reconfigure of pgbouncer > %s`, i.InstanceID, sq))
		_, err = db.Exec(sq)
		if err != nil {
			log.Error(fmt.Sprintf(`instances.Instance#Assign(%s) Unlocking ! %s`, i.InstanceID, err))
		}
	}
	return
}
Exemplo n.º 27
0
//FindFilesToCopyToS3 - Responsible for copying files, such as database backups
//to S3 storage
func (t *Task) FindFilesToCopyToS3() (err error) {
	if err != nil {
		log.Error(fmt.Sprintf("tasks.FindFilesToCopyToS3() Could not retrieve S3 Credentials ! %s", err))
		return err
	}

	//If S3 creds/bucket aren't set just exit since they aren't configured
	if rdpgs3.Configured == false {
		log.Error(fmt.Sprintf("tasks.FindFilesToCopyToS3() S3 CONFIGURATION MISSING FOR THIS DEPLOYMENT ! S3 Credentials are not configured, skipping attempt to copy until configured "))
		return
	}

	//Select eligible files
	//Diff with empty string means get me the diff for ALL THE THINGS
	localDiff, _, err := backup.Diff("", true)
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Task<%d>#CopyFileToS3() Failed to load list of files ! %s`, t.ID, err))
		return
	}
	numFilesToCopy := 0
	for _, dbWithBackupsToCopy := range localDiff {
		numFilesToCopy += len(dbWithBackupsToCopy.Backups)
	}

	log.Trace(fmt.Sprintf("tasks.FindFilesToCopyToS3() > Found %d files to copy over %d unique databases", numFilesToCopy, len(localDiff)))

	//Loop and add Tasks CopyFileToS3
	for _, dbWithBackupsToCopy := range localDiff {
		for _, backupToCopy := range dbWithBackupsToCopy.Backups {
			//Gather the info necessary for uploading the file.
			fm := S3FileMetadata{}
			fm.Location = backup.Location(dbWithBackupsToCopy.Database, backupToCopy.Name)
			fm.DBName = dbWithBackupsToCopy.Database
			fm.Node = globals.MyIP
			fm.ClusterID = globals.ClusterID
			//JSONify that info
			fileToCopyParams, err := json.Marshal(fm)
			if err != nil {
				log.Error(fmt.Sprintf("tasks.FindFilesToCopyToS3() > Error attempting to marshal some JSON ! %+v %s", fm, err))
				return err
			}
			log.Trace(fmt.Sprintf("tasks.FindFilesToCopyToS3() > Attempting to add %s", fileToCopyParams))
			//Insert the task
			newTask := Task{ClusterID: t.ClusterID, Node: t.Node, Role: t.Role, Action: "CopyFileToS3", Data: string(fileToCopyParams), TTL: t.TTL, NodeType: t.NodeType}
			err = newTask.Enqueue()
			if err != nil {
				log.Error(fmt.Sprintf(`tasks.FindFilesToCopyToS3() service task schedules ! %s`, err))
			}
		}

	}
	return

}
Exemplo n.º 28
0
/*
ConfigureHAProxy configures HAProxy on the current system.
*/
func (s *Service) ConfigureHAProxy() (err error) {
	log.Trace(fmt.Sprintf(`services#Service.ConfigureHAProxy()...`))

	dir := `/var/vcap/jobs/rdpgd-service`
	if _, err := os.Stat(dir); os.IsNotExist(err) {
		log.Trace(fmt.Sprintf(`services#Service.Configure() Not a service node since %s doesn't exist, skipping.`, dir))
		return nil
	}
	header, err := ioutil.ReadFile(`/var/vcap/jobs/rdpgd-service/config/haproxy/haproxy.cfg.header`)
	if err != nil {
		log.Error(fmt.Sprintf(`services.Service#Configure() ! %s`, err))
		return err
	}

	writeMasterIP, err := s.GetWriteMasterIP()
	if err != nil {
		log.Error(fmt.Sprintf(`services.Service#ConfigureHAProxy() ! %s`, err))
	}
	if writeMasterIP == "" {
		log.Trace(fmt.Sprintf(`services.Service#ConfigureHAProxy() No Write Master IP.`))
		return
	}

	// TODO: 5432 & 6432 from environmental configuration.
	// TODO: Should this list come from active Consul registered hosts instead?
	footer := fmt.Sprintf(`
frontend pgbdr_write_port
bind 0.0.0.0:5432
  mode tcp
  default_backend pgbdr_write_master

backend pgbdr_write_master
  mode tcp
	server master %s:6432 check
	`, writeMasterIP)

	hc := []string{string(header), footer}
	err = ioutil.WriteFile(`/var/vcap/jobs/haproxy/config/haproxy.cfg`, []byte(strings.Join(hc, "\n")), 0640)
	if err != nil {
		log.Error(fmt.Sprintf(`services#Service.Configure() ! %s`, err))
		return err
	}

	cmd := exec.Command(`/var/vcap/jobs/haproxy/bin/control`, "reload")
	err = cmd.Run()
	if err != nil {
		log.Error(fmt.Sprintf(`services#Service.Configure() ! %s`, err))
		return err
	}
	return
}
Exemplo n.º 29
0
func (r *RDPG) registerConsulServices() (err error) {
	log.Trace(fmt.Sprintf(`rdpg.RDPG<%s>#registerConsulServices() Registering Consul Services...`, ClusterID))

	re := regexp.MustCompile(`^(rdpg(sc[0-9]+$))|(sc-([[:alnum:]|-])*m[0-9]+-c[0-9]+$)`)
	if !re.MatchString(ClusterID) {
		log.Trace(fmt.Sprintf(`rdpg.RDPG<%s>#registerConsulServices() Not a service cluster, skipping consul service registration.`, ClusterID))
		return
	}

	agent := r.ConsulClient.Agent()

	agent.ServiceRegister(&consulapi.AgentServiceRegistration{
		ID:   fmt.Sprintf("%s-haproxy", ClusterID),
		Name: "haproxy",
		Tags: []string{},
		Port: 5432, // TODO: Get write port from environment configuration.
		Check: &consulapi.AgentServiceCheck{
			HTTP:     fmt.Sprintf(`http://%s:%[email protected]:%s/health/ha_pb_pg`, rdpgdAdminUser, rdpgdAdminPass, rdpgdAdminPort),
			Interval: "10s",
			TTL:      "0s",
			Timeout:  "5s",
		},
	})

	agent.ServiceRegister(&consulapi.AgentServiceRegistration{
		ID:   fmt.Sprintf("%s-pgbouncer", ClusterID),
		Name: "pgbouncer",
		Tags: []string{},
		Port: 6432, // TODO: Get pgbouncer port from environment configuration.
		Check: &consulapi.AgentServiceCheck{
			HTTP:     fmt.Sprintf(`http://%s:%[email protected]:%s/health/pb`, rdpgdAdminUser, rdpgdAdminPass, rdpgdAdminPort),
			Interval: "10s",
			TTL:      "0s",
			Timeout:  "5s",
		},
	})

	agent.ServiceRegister(&consulapi.AgentServiceRegistration{
		ID:   fmt.Sprintf("%s-postgres", ClusterID),
		Name: "postgres",
		Tags: []string{},
		Port: 7432, // TODO: Get write port from environment configuration.
		Check: &consulapi.AgentServiceCheck{
			HTTP:     fmt.Sprintf(`http://%s:%[email protected]:%s/health/pg`, rdpgdAdminUser, rdpgdAdminPass, rdpgdAdminPort),
			Interval: "10s",
			TTL:      "0s",
			Timeout:  "5s",
		},
	})
	return
}
Exemplo n.º 30
0
func (r *RDPG) waitForBDRNodes() (err error) {
	log.Trace(fmt.Sprintf(`rdpg.RDPG<%s>#waitForBDRNodes() waiting for all BDR nodes to be joined...`, ClusterID))
	p := pg.NewPG(`127.0.0.1`, pgPort, `rdpg`, `rdpg`, pgPass)
	var db *sqlx.DB
	for {
		db, err = p.Connect()
		if err != nil {
			re := regexp.MustCompile("canceling statement due to conflict with recovery")
			if re.MatchString(err.Error()) {
				log.Error(fmt.Sprintf("rdpg.RDPG<%s>#waitForBDRNodes() p.Connect() (sleeping 2 seconds and trying again) ! %s", ClusterID, err))
				time.Sleep(2 * time.Second)
				continue // Sleep 2 seconds and try again...
			} else {
				log.Error(fmt.Sprintf("rdpg.RDPG<%s>#waitForBDRNodes() p.Connect() ! %s", ClusterID, err))
				return err
			}
		} else {
			break
		}
	}
	defer db.Close()

	for {
		nodes := []string{}
		sq := `SELECT node_name FROM bdr.bdr_nodes;`
		err = db.Select(&nodes, sq)
		if err != nil {
			if err == sql.ErrNoRows {
				log.Error(fmt.Sprintf("rdpg.RDPG<%s>#waitForBDRNodes() db.Select() %sq ! Sleeping 2 seconds and trying again.", ClusterID, sq))
				time.Sleep(2 * time.Second)
				continue
			}
			log.Error(fmt.Sprintf("rdpg.RDPG<%s>#waitForBDRNodes() db.Select() ! %s", ClusterID, err))
			return err
		}
		log.Trace(fmt.Sprintf(`rdpg.RDPG<%s>#waitForBDRNodes() nodes %+v`, ClusterID, nodes))
		switch ClusterID {
		case "rdpgmc":
			if len(nodes) > 2 {
				return nil
			}
		default: // rdpgsc*
			if len(nodes) > 1 {
				return nil
			}
		}
		time.Sleep(2 * time.Second)
	}
}