Exemplo n.º 1
0
func (t *Task) bdrPrecreateDatabase(client *consulapi.Client) (err error) {
	/*
	   key := fmt.Sprintf(`rdpg/%s/cluster/service`, ClusterID)
	   kvp, _, err := kv.Get(key, nil)
	   if err != nil {
	   	log.Error(fmt.Sprintf(`rdpg.RDPG<%s>#bdrGroupCreate() kv.Get() ! %s`, ClusterID, err))
	   	return
	   }
	   v := ``
	   if kvp != nil {
	   	v = string(kvp.Value)
	   }
	   if len(v) > 0 {
	   	log.Trace(fmt.Sprintf(`rdpg.RDPG<%s>#bdrPrecreateDatabase()`, ClusterID))
	   	return
	   }
	*/
	b := bdr.NewBDR(t.ClusterID, client)
	re := regexp.MustCompile("[^A-Za-z0-9_]")
	u1 := uuid.NewUUID().String()
	u2 := uuid.NewUUID().String()
	identifier := strings.ToLower(string(re.ReplaceAll([]byte(u1), []byte(""))))
	dbpass := strings.ToLower(string(re.ReplaceAll([]byte(u2), []byte(""))))

	i := &instances.Instance{
		ClusterID:      ClusterID,
		Database:       "d" + identifier,
		User:           "******" + identifier,
		Pass:           dbpass,
		ClusterService: t.ClusterService,
	}
	// TODO: Keep the databases under rdpg schema, link to them in the
	// cfsb.instances table so that we separate the concerns of CF and databases.
	err = b.CreateUser(i.User, i.Pass)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#bdrPrecreateDatabases(%s) CreateUser(%s) ! %s", i.Database, i.User, err))
		return err
	}

	err = b.CreateDatabase(i.Database, i.User)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#bdrPrecreateDatabases(%s) CreateDatabase(%s,%s) ! %s", i.Database, i.Database, i.User, err))
		return err
	}

	p := pg.NewPG(`127.0.0.1`, pgPort, `rdpg`, `rdpg`, pgPass)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Work() Failed connecting to %s err: %s", p.URI, err))
		return err
	}
	defer db.Close()

	sq := fmt.Sprintf(`INSERT INTO cfsb.instances (cluster_id,dbname, dbuser, dbpass, cluster_service) VALUES ('%s','%s','%s','%s','%s')`, ClusterID, i.Database, i.User, i.Pass, t.ClusterService)
	log.Trace(fmt.Sprintf(`tasks.bdrPrecreateDatabase(%s) > %s`, i.Database, sq))
	_, err = db.Query(sq)
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.bdrPrecreateDatabase(%s) ! %s`, i.Database, err))
		return err
	}

	err = b.CreateExtensions(i.Database, []string{`btree_gist`, `bdr`, `pg_stat_statements`, `uuid-ossp`, `hstore`, `pg_trgm`, `pgcrypto`})
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#bdrPrecreateDatabases(%s) CreateExtensions(%s,%s) ! %s", i.Database, i.Database, i.User, err))
		return err
	}

	//Loop through and add any additional extensions specified in the rdpgd_service properties of the deployment manifest
	if len(globals.UserExtensions) > 1 {
		userExtensions := strings.Split(globals.UserExtensions, " ")
		err = b.CreateExtensions(i.Database, userExtensions)
		if err != nil {
			log.Error(fmt.Sprintf("tasks.Task#bdrPrecreateDatabases(%s) CreateExtensions(%s,%s) Creating Extra User Extensions ! %s", i.Database, i.Database, i.User, err))
			return err
		}
	}

	err = b.CreateReplicationGroup(i.Database)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#bdrPrecreateDatabases(%s) CreateReplicationGroup() ! %s", i.Database, err))
		return err
	}

	sq = fmt.Sprintf(`UPDATE cfsb.instances SET effective_at=CURRENT_TIMESTAMP WHERE dbname='%s'`, i.Database)
	log.Trace(fmt.Sprintf(`tasks.bdrPrecreateDatabase(%s) > %s`, i.Database, sq))
	_, err = db.Query(sq)
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.bdrPrecreateDatabase(%s) ! %s`, i.Database, err))
		return err
	}

	// Tell the management cluster about the newly available database.
	// TODO: This can be in a function.
	catalog := client.Catalog()
	svcs, _, err := catalog.Service("rdpgmc", "", nil)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#bdrPrecreateDatabase(%s) catalog.Service() ! %s", i.Database, err))
		return err
	}
	if len(svcs) == 0 {
		log.Error(fmt.Sprintf("tasks.Task#bdrPrecreateDatabase(%s) ! No services found, no known nodes?!", i.Database))
		return err
	}
	body, err := json.Marshal(i)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#bdrPrecreateDatabase(%s) json.Marchal(i) ! %s", i.Database, err))
		return err
	}
	url := fmt.Sprintf("http://%s:%s/%s", svcs[0].Address, os.Getenv("RDPGD_ADMIN_PORT"), `databases/register`)
	req, err := http.NewRequest("POST", url, bytes.NewBuffer([]byte(body)))
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Task#bdrPrecreateDatabase(%s) http.NewRequest() POST %s ! %s`, i.Database, url, err))
		return err
	}
	log.Trace(fmt.Sprintf(`tasks.Task#bdrPrecreateDatabase(%s) POST %s`, i.Database, url))
	req.SetBasicAuth(os.Getenv("RDPGD_ADMIN_USER"), os.Getenv("RDPGD_ADMIN_PASS"))
	httpClient := &http.Client{}
	resp, err := httpClient.Do(req)
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Task#bdrPrecreateDatabase(%s) httpClient.Do() %s ! %s`, i.Database, url, err))
		return err
	}
	resp.Body.Close()
	return
}
Exemplo n.º 2
0
// Precreate database functionality, note that the database creation lock is
// expected to be held when this is called as this must be done in sequence.
func (t *Task) precreateDatabase(workRole string, client *consulapi.Client) (err error) {
	log.Trace(fmt.Sprintf("tasks.Task#precreateDatabase()..."))

	b := bdr.NewBDR(t.ClusterID, client)
	re := regexp.MustCompile("[^A-Za-z0-9_]")
	u1 := uuid.NewUUID().String()
	u2 := uuid.NewUUID().String()
	identifier := strings.ToLower(string(re.ReplaceAll([]byte(u1), []byte(""))))
	dbpass := strings.ToLower(string(re.ReplaceAll([]byte(u2), []byte(""))))

	i := &instances.Instance{
		ClusterID: ClusterID,
		Database:  "d" + identifier,
		User:      "******" + identifier,
		Pass:      dbpass,
	}
	// TODO: Keep the databases under rdpg schema, link to them in the
	// cfsb.instances table so that we separate the concerns of CF and databases.
	err = b.CreateUser(i.User, i.Pass)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#PrecreateDatabases(%s) CreateUser(%s) ! %s", i.Database, i.User, err))
		return err
	}

	err = b.CreateDatabase(i.Database, i.User)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#PrecreateDatabases(%s) CreateDatabase(%s,%s) ! %s", i.Database, i.Database, i.User, err))
		return err
	}

	p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Work() Failed connecting to %s err: %s", p.URI, err))
		return err
	}
	defer db.Close()

	sq := fmt.Sprintf(`INSERT INTO cfsb.instances (cluster_id,dbname, dbuser, dbpass) VALUES ('%s','%s','%s','%s')`, ClusterID, i.Database, i.User, i.Pass)
	log.Trace(fmt.Sprintf(`tasks.precreateDatabase(%s) > %s`, i.Database, sq))
	_, err = db.Query(sq)
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.precreateDatabase(%s) ! %s`, i.Database, err))
		return err
	}

	err = b.CreateExtensions(i.Database, []string{`btree_gist`, `bdr`})
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#PrecreateDatabases(%s) CreateExtensions(%s,%s) ! %s", i.Database, i.Database, i.User, err))
		return err
	}

	err = b.CreateReplicationGroup(i.Database)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#PrecreateDatabases(%s) CreateReplicationGroup() ! %s", i.Database, err))
		return err
	}

	sq = fmt.Sprintf(`UPDATE cfsb.instances SET effective_at=CURRENT_TIMESTAMP WHERE dbname='%s'`, i.Database)
	log.Trace(fmt.Sprintf(`tasks.precreateDatabase(%s) > %s`, i.Database, sq))
	_, err = db.Query(sq)
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.precreateDatabase(%s) ! %s`, i.Database, err))
		return err
	}

	// Tell the management cluster about the newly available database.
	// TODO: This can be in a function.
	catalog := client.Catalog()
	svcs, _, err := catalog.Service("rdpgmc", "", nil)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#precreateDatabase(%s) catalog.Service() ! %s", i.Database, err))
		return err
	}
	if len(svcs) == 0 {
		log.Error(fmt.Sprintf("tasks.Task#precreateDatabase(%s) ! No services found, no known nodes?!", i.Database))
		return err
	}
	body, err := json.Marshal(i)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#precreateDatabase(%s) json.Marchal(i) ! %s", i.Database, err))
		return err
	}
	url := fmt.Sprintf("http://%s:%s/%s", svcs[0].Address, os.Getenv("RDPGD_ADMIN_PORT"), `databases/register`)
	req, err := http.NewRequest("POST", url, bytes.NewBuffer([]byte(body)))
	log.Trace(fmt.Sprintf(`tasks.Task#precreateDatabase(%s) POST %s`, i.Database, url))
	// req.Header.Set("Content-Type", "application/json")
	// TODO: Retrieve from configuration in database.
	req.SetBasicAuth(os.Getenv("RDPGD_ADMIN_USER"), os.Getenv("RDPGD_ADMIN_PASS"))
	httpClient := &http.Client{}
	resp, err := httpClient.Do(req)
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Task#precreateDatabase(%s) httpClient.Do() %s ! %s`, i.Database, url, err))
		return err
	}
	resp.Body.Close()
	return
}
Exemplo n.º 3
0
// TODO: This should be remove database
func (t *Task) RemoveDatabase(workRole string) (err error) {
	// For now we assume data is simply the database name.
	key := fmt.Sprintf("rdpg/%s/work/databases/remove", os.Getenv(`RDPGD_CLUSTER`))
	client, _ := consulapi.NewClient(consulapi.DefaultConfig())
	lock, err := client.LockKey(key)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#RemoveDatabase() Error aquiring lock ! %s", err))
		return
	}
	leaderCh, err := lock.Lock(nil)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#RemoveDatabase() Error aquiring lock ! %s", err))
		return
	}
	if leaderCh == nil {
		log.Trace(fmt.Sprintf("tasks.Task#RemoveDatabase() > Not Leader."))
		return
	}
	log.Trace(fmt.Sprintf("tasks.Task#RemoveDatabase() > Leader."))

	p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#RemoveDatabase() p.Connect() ! %s", err))
		return
	}
	ids := []string{}
	sq := fmt.Sprintf(`SELECT instance_id from cfsb.instances WHERE ineffective_at IS NOT NULL AND ineffective_at < CURRENT_TIMESTAMP AND decommissioned_at IS NULL`)
	err = db.Select(&ids, sq)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#RemoveDatabase() Querying for Databases to Cleanup ! %s", err))
	}
	db.Close()
	for _, id := range ids {
		db, err := p.Connect()
		if err != nil {
			log.Error(fmt.Sprintf("tasks.Task#RemoveDatabase() p.Connect() ! %s", err))
			return err
		}

		uri := "postgres://"
		b := bdr.NewBDR(uri, client)

		i, err := instances.FindByInstanceID(id)
		if err != nil {
			log.Error(fmt.Sprintf("tasks.Task#RemoveDatabase(%s) FindingInstance(%s) ! %s", i.Database, i.InstanceID, err))
			db.Close()
			continue
		}

		err = b.DisableDatabase(i.Database)
		if err != nil {
			log.Error(fmt.Sprintf("tasks.Task#RemoveDatabase() DisableDatabase(%s) for %s ! %s", i.Database, i.InstanceID, err))
			db.Close()
			continue
		}

		err = b.BackupDatabase(i.Database)
		if err != nil {
			log.Error(fmt.Sprintf("tasks.Task#RemoveDatabase() BackupDatabase(%s) ! %s", i.Database, err))
			db.Close()
			continue
		}

		// Question, How do we "stop" the replication group before dropping the database?
		err = b.DropDatabase(i.Database)
		if err != nil {
			log.Error(fmt.Sprintf("tasks.Task#RemoveDatabase() DropDatabase(%s) for %s ! %s", i.Database, i.InstanceID, err))
			db.Close()
			continue
		}

		err = b.DropUser(i.User)
		if err != nil {
			log.Error(fmt.Sprintf("tasks.Task#RemoveDatabase() DropUser(%s) for %s ! %s", i.User, i.InstanceID, err))
			db.Close()
			continue
		}

		err = b.DropDatabase(i.Database)
		if err != nil {
			log.Error(fmt.Sprintf("tasks.Task#RemoveDatabase() DropDatabase(%s) for %s ! %s", i.Database, i.InstanceID, err))
			db.Close()
			continue
		}
	}
	db.Close()

	return
}
Exemplo n.º 4
0
//DecommissionDatabase - Remove targeted database specified in Data
func (t *Task) DecommissionDatabase() (err error) {
	log.Trace(fmt.Sprintf(`tasks.DecommissionDatabase(%s)...`, t.Data))

	i, err := instances.FindByDatabase(t.Data)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.DecommissionDatabase(%s) instances.FindByDatabase() ! %s", i.Database, err))
		return err
	}
	//TODO: Check if i == nil; i.e. if database doesn't exist

	ips, err := i.ClusterIPs()
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) i.ClusterIPs() ! %s`, i.Database, err))
		return err
	}
	if len(ips) == 0 {
		log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! No service cluster nodes found in Consul?!", i.Database))
		return
	}
	p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass)
	db, err := p.Connect()
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) p.Connect(%s) ! %s", t.Data, p.URI, err))
		return err
	}
	defer db.Close()

	switch globals.ServiceRole {
	case "manager":
		path := fmt.Sprintf(`databases/decommission/%s`, t.Data)
		url := fmt.Sprintf("http://%s:%s/%s", ips[0], os.Getenv("RDPGD_ADMIN_PORT"), path)
		req, err := http.NewRequest("DELETE", url, bytes.NewBuffer([]byte("{}")))
		log.Trace(fmt.Sprintf(`tasks.Task#Decommission() > DELETE %s`, url))
		//req.Header.Set("Content-Type", "application/json")
		// TODO: Retrieve from configuration in database.
		req.SetBasicAuth(os.Getenv("RDPGD_ADMIN_USER"), os.Getenv("RDPGD_ADMIN_PASS"))
		httpClient := &http.Client{}
		_, err = httpClient.Do(req)
		if err != nil {
			log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) httpClient.Do() %s ! %s`, i.Database, url, err))
			return err
		}
		// TODO: Is there anything we want to do on successful request?
	case "service":
		// In here we must do everything necessary to physically delete and clean up
		// the database from all service cluster nodes.
		if err = t.BackupDatabase(); err != nil {
			log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) t.BackupDatabase(%s) ! %s`, i.Database, err))
		} else {
			for _, ip := range ips { // Schedule pgbouncer reconfigure on each cluster node.
				newTask := Task{ClusterID: ClusterID, Node: ip, Role: "all", Action: "Reconfigure", Data: "pgbouncer", NodeType: "any"}
				err = newTask.Enqueue()
				if err != nil {
					log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) Reconfigure PGBouncer! %s`, i.Database, err))
				}
			}
			log.Trace(fmt.Sprintf(`tasks.DecommissionDatabase(%s) TODO: Here is where we finally decommission on the service cluster...`, i.Database))

			client, err := consulapi.NewClient(consulapi.DefaultConfig())
			if err != nil {
				log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) consulapi.NewClient() ! %s", i.Database, err))
				return err
			}

			// Lock Database Deletion via Consul Lock
			key := fmt.Sprintf(`rdpg/%s/database/existance/lock`, t.ClusterID)
			lo := &consulapi.LockOptions{
				Key:         key,
				SessionName: fmt.Sprintf(`rdpg/%s/databases/existance`, t.ClusterID),
			}
			log.Trace(fmt.Sprintf(`tasks.Task<%s>#DecommissionDatabase() Attempting to acquire database existance lock %s...`, t.ClusterID, key))
			databaseCreateLock, err := client.LockOpts(lo)
			if err != nil {
				log.Error(fmt.Sprintf(`tasks.Task<%s>#DecommissionDatabase() LockKey() database/existance Lock Key %s ! %s`, t.ClusterID, key, err))
				return err
			}
			databaseCreateLockCh, err := databaseCreateLock.Lock(nil)
			if err != nil {
				log.Error(fmt.Sprintf(`tasks.Task<%s>#DecommissionDatabase() Lock() database/existance lock %s ! %s`, t.ClusterID, key, err))
				return err
			}
			if databaseCreateLockCh == nil {
				err := fmt.Errorf(`tasks.Task<%s>#DecommissionDatabase() database/existance Lock not aquired, halting Decommission!!!`, t.ClusterID)
				log.Error(err.Error())
				return err
			}
			defer databaseCreateLock.Unlock()

			p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass)
			db, err := p.Connect()
			if err != nil {
				log.Error(fmt.Sprintf("instances.Decommission() p.Connect(%s) ! %s", p.URI, err))
				return err
			}
			defer db.Close()

			sq := fmt.Sprintf(`DELETE FROM tasks.tasks WHERE action='BackupDatabase' AND data='%s'`, i.Database)
			log.Trace(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) SQL > %s`, i.Database, sq))
			_, err = db.Exec(sq)
			if err != nil {
				log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! %s", i.Database, err))
			}
			sq = fmt.Sprintf(`UPDATE tasks.schedules SET enabled = false WHERE action='BackupDatabase' AND data='%s'`, i.Database)
			log.Trace(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) SQL > %s`, i.Database, sq))
			_, err = db.Exec(sq)
			if err != nil {
				log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! %s", i.Database, err))
			}

			if t.ClusterService == "pgbdr" {
				b := bdr.NewBDR(ClusterID, client)
				b.DropDatabase(i.Database)

				dbuser := ""
				sq = fmt.Sprintf(`SELECT dbuser FROM cfsb.instances WHERE dbname='%s' LIMIT 1`, i.Database)
				log.Trace(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) SQL > %s`, i.Database, sq))
				err = db.Get(&dbuser, sq)
				if err != nil {
					log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! %s", i.Database, err))
				}
				b.DropUser(dbuser)

				sq = fmt.Sprintf(`UPDATE cfsb.instances SET decommissioned_at=CURRENT_TIMESTAMP WHERE dbname='%s'`, i.Database)
				log.Trace(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) SQL > %s`, i.Database, sq))
				_, err = db.Exec(sq)
				if err != nil {
					log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! %s", i.Database, err))
				}

			} else {
				p.DisableDatabase(i.Database)
				p.DropDatabase(i.Database)

				dbuser := ""
				sq = fmt.Sprintf(`SELECT dbuser FROM cfsb.instances WHERE dbname='%s' LIMIT 1`, i.Database)
				log.Trace(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) SQL > %s`, i.Database, sq))
				err = db.Get(&dbuser, sq)
				if err != nil {
					log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! %s", i.Database, err))
				}
				p.DropUser(dbuser)

				sq = fmt.Sprintf(`UPDATE cfsb.instances SET decommissioned_at=CURRENT_TIMESTAMP WHERE dbname='%s'`, i.Database)
				log.Trace(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) SQL > %s`, i.Database, sq))
				_, err = db.Exec(sq)
				if err != nil {
					log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! %s", i.Database, err))
				}

			}

			// Notify management cluster that the instance has been decommissioned
			// Find management cluster API address
			catalog := client.Catalog()
			svcs, _, err := catalog.Service(`rdpgmc`, "", nil)
			if err != nil {
				log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) consulapi.Client.Catalog() ! %s", i.Database, err))
				return err
			}
			if len(svcs) == 0 {
				log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! No services found, no known nodes?!", i.Database))
				return err
			}
			mgtAPIIPAddress := svcs[0].Address

			// Query the database for the decommissioned_at timestamp set
			timestamp := ""
			sq = fmt.Sprintf(`SELECT decommissioned_at::text FROM cfsb.instances WHERE dbname='%s' LIMIT 1;`, i.Database)
			db.Get(&timestamp, sq)

			type decomm struct {
				Database  string `json:"database"`
				Timestamp string `json:"timestamp"`
			}
			dc := decomm{Database: i.Database, Timestamp: timestamp}
			// Tell the management cluster (via admin api) about the timestamp.
			url := fmt.Sprintf("http://%s:%s/%s", mgtAPIIPAddress, os.Getenv("RDPGD_ADMIN_PORT"), `databases/decommissioned`)
			body, err := json.Marshal(dc)
			if err != nil {
				log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) json.Marchal(i) ! %s", i.Database, err))
				return err
			}
			req, err := http.NewRequest("PUT", url, bytes.NewBuffer([]byte(body)))
			log.Trace(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) PUT %s body: %s`, i.Database, url, body))
			req.SetBasicAuth(os.Getenv("RDPGD_ADMIN_USER"), os.Getenv("RDPGD_ADMIN_PASS"))
			httpClient := &http.Client{}
			resp, err := httpClient.Do(req)
			if err != nil {
				log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) httpClient.Do() PUT %s ! %s`, i.Database, url, err))
				return err
			}
			resp.Body.Close()
		}
		return nil
	default:
		log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) ! Unknown work role: '%s' -> BUG!!!`, i.Database, globals.ServiceRole))
		return nil
	}
	return
}