/* Should contain a form value dbname which equals the database name e.g. curl www.hostname.com/backup/now -X POST -d "dbname=nameofdatabase" The {how} should be either "now" or "enqueue" */ func BackupHandler(w http.ResponseWriter, request *http.Request) { vars := mux.Vars(request) dbname := request.FormValue("dbname") t := tasks.NewTask() t.Action = "BackupDatabase" t.Data = dbname t.Node = globals.MyIP t.Role = globals.ServiceRole t.TTL = 3600 t.ClusterService = globals.ClusterService t.NodeType = "read" if rdpgconsul.IsWriteNode(globals.MyIP) { t.NodeType = "write" } var err error if dbname != "rdpg" { //Using FindByDatabase to determine if the database actually exists to be backed up. inst, err := instances.FindByDatabase(dbname) if err != nil { log.Error(fmt.Sprintf("admin.BackupHandler() instances.FindByDatabase(%s) Error occurred when searching for database.", dbname)) w.WriteHeader(http.StatusInternalServerError) w.Write([]byte("Error encountered while searching for database")) return } if inst == nil { //...then the database doesn't exist on this cluster. log.Debug(fmt.Sprintf("admin.BackupHandler() Attempt to initiate backup on non-existant database with name: %s", dbname)) w.WriteHeader(http.StatusNotFound) w.Write([]byte("Database not found")) return } } switch vars[`how`] { //Immediately calls Backup() and performs the backup case "now": err = t.BackupDatabase() if err != nil { log.Error(fmt.Sprintf(`api.BackupHandler() Task.BackupDatabase() %+v ! %s`, t, err)) w.WriteHeader(http.StatusInternalServerError) w.Write([]byte("Error encountered while trying to perform backup")) return } w.Write([]byte("Backup completed.")) case "enqueue": // Queues up a backup to be done with a worker thread gets around to it. // This call returns after the queuing process is done; not after the backup is done. err = t.Enqueue() if err != nil { log.Error(fmt.Sprintf(`api.BackupHandler() Task.Enqueue() %+v ! %s`, t, err)) w.WriteHeader(http.StatusInternalServerError) w.Write([]byte("Error while trying to queue")) return } w.Write([]byte("Backup successfully queued.")) default: w.WriteHeader(http.StatusNotFound) } }
func (t *Task) DecommissionDatabase(workRole string) (err error) { log.Trace(fmt.Sprintf(`tasks.DecommissionDatabase(%s)...`, t.Data)) i, err := instances.FindByDatabase(t.Data) if err != nil { log.Error(fmt.Sprintf("tasks.DecommissionDatabase(%s) instances.FindByDatabase() ! %s", i.Database, err)) return err } ips, err := i.ClusterIPs() if err != nil { log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) i.ClusterIPs() ! %s`, i.Database, err)) return err } if len(ips) == 0 { log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! No service cluster nodes found in Consul?!", i.Database)) return } p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass) db, err := p.Connect() if err != nil { log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) p.Connect(%s) ! %s", t.Data, p.URI, err)) return err } defer db.Close() switch workRole { case "manager": path := fmt.Sprintf(`databases/decommission/%s`, t.Data) url := fmt.Sprintf("http://%s:%s/%s", ips[0], os.Getenv("RDPGD_ADMIN_PORT"), path) req, err := http.NewRequest("DELETE", url, bytes.NewBuffer([]byte("{}"))) log.Trace(fmt.Sprintf(`tasks.Task#Decommission() > DELETE %s`, url)) //req.Header.Set("Content-Type", "application/json") // TODO: Retrieve from configuration in database. req.SetBasicAuth(os.Getenv("RDPGD_ADMIN_USER"), os.Getenv("RDPGD_ADMIN_PASS")) httpClient := &http.Client{} _, err = httpClient.Do(req) if err != nil { log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) httpClient.Do() %s ! %s`, i.Database, url, err)) return err } // TODO: Is there anything we want to do on successful request? case "service": for _, ip := range ips { newTask := Task{ClusterID: ClusterID, Node: ip, Role: "all", Action: "Reconfigure", Data: "pgbouncer"} err = newTask.Enqueue() if err != nil { log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) ! %s`, i.Database, err)) } } log.Trace(fmt.Sprintf(`tasks.DecommissionDatabase(%s) TODO: Here is where we finally decommission on the service cluster...`, i.Database)) return nil default: log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) ! Unknown work role: '%s' -> BUG!!!`, i.Database, workRole)) return nil } return }
// CleanupDatabase - Decommission the database sent as a paramenter. func CleanupDatabase(dbname string, clusterService string) (err error) { i, err := instances.FindByDatabase(dbname) if err != nil { log.Error(fmt.Sprintf("tasks.CleanupUnusedDatabases(%s) instances.FindByDatabase() ! %s", i.Database, err)) return err } ips, err := i.ClusterIPs() if err != nil { log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) i.ClusterIPs() ! %s`, i.Database, err)) return err } if len(ips) == 0 { return fmt.Errorf("tasks.Task#DecommissionDatabase(%s) ! No service cluster nodes found in Consul", i.Database) } p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass) db, err := p.Connect() if err != nil { log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) p.Connect(%s) ! %s", dbname, p.URI, err)) return err } defer db.Close() if globals.ServiceRole == "service" { // In here we must do everything necessary to physically delete and clean up // the database from all service cluster nodes. for _, ip := range ips { // Schedule pgbouncer reconfigure on each cluster node. newTask := Task{ClusterID: ClusterID, Node: ip, Role: "all", Action: "Reconfigure", Data: "pgbouncer", NodeType: "any"} err = newTask.Enqueue() if err != nil { log.Error(fmt.Sprintf(`tasks.Task#CleanupUnusedDatabases(%s) Reconfigure PGBouncer! %s`, i.Database, err)) } } log.Trace(fmt.Sprintf(`tasks.CleanupUnusedDatabases(%s) - Here is where we finally decommission on the service cluster...`, i.Database)) sq := fmt.Sprintf(`DELETE FROM tasks.tasks WHERE action='BackupDatabase' AND data='%s'`, i.Database) _, err = db.Exec(sq) if err != nil { log.Error(fmt.Sprintf("tasks.Task#CleanupUnusedDatabases(%s) ! %s", i.Database, err)) } sq = fmt.Sprintf(`UPDATE tasks.schedules SET enabled = false WHERE action='BackupDatabase' AND data='%s'`, i.Database) _, err = db.Exec(sq) if err != nil { log.Error(fmt.Sprintf("tasks.Task#CleanupUnusedDatabases(%s) ! %s", i.Database, err)) } if clusterService == "pgbdr" { log.Error(fmt.Sprintf("tasks.Task#CleanupUnusedDatabases(%s) ! Cannot cleanup BDR Servers", i.Database)) } else { p.DisableDatabase(i.Database) p.DropDatabase(i.Database) dbuser := "" sq = fmt.Sprintf(`SELECT dbuser FROM cfsb.instances WHERE dbname='%s' LIMIT 1`, i.Database) err = db.Get(&dbuser, sq) if err != nil { log.Error(fmt.Sprintf("tasks.Task#CleanupUnusedDatabases(%s) ! %s", i.Database, err)) } p.DropUser(dbuser) sq = fmt.Sprintf(`UPDATE cfsb.instances SET decommissioned_at=CURRENT_TIMESTAMP WHERE dbname='%s'`, i.Database) _, err = db.Exec(sq) if err != nil { log.Error(fmt.Sprintf("tasks.Task#CleanupUnusedDatabases(%s) ! %s", i.Database, err)) } } // Notify management cluster that the instance has been decommissioned // Find management cluster API address client, err := consulapi.NewClient(consulapi.DefaultConfig()) if err != nil { log.Error(fmt.Sprintf("tasks.Task#CleanupUnusedDatabases(%s) consulapi.NewClient() ! %s", i.Database, err)) return err } catalog := client.Catalog() svcs, _, err := catalog.Service(`rdpgmc`, "", nil) if err != nil { log.Error(fmt.Sprintf("tasks.Task#CleanupUnusedDatabases(%s) consulapi.Client.Catalog() ! %s", i.Database, err)) return err } if len(svcs) == 0 { log.Error(fmt.Sprintf("tasks.Task#CleanupUnusedDatabases(%s) ! No services found, no known nodes?!", i.Database)) return err } //mgtAPIIPAddress := svcs[0].Address } return nil }
func (t *Task) ReconcileAvailableDatabases() (err error) { log.Trace(fmt.Sprintf(`tasks.ReconcileAvailableDatabases(%s)...`, t.Data)) client, err := consulapi.NewClient(consulapi.DefaultConfig()) if err != nil { log.Error(fmt.Sprintf("rdpg.newRDPG() consulapi.NewClient()! %s", err)) return } catalog := client.Catalog() svcs, _, err := catalog.Services(nil) if err != nil { log.Error(fmt.Sprintf("tasks.Task#ReconcileAvailableDatabases() catalog.Service() ! %s", err)) return err } clusterInstances := []instances.Instance{} re := regexp.MustCompile(`^(rdpg(sc[0-9]+$))|(sc-([[:alnum:]|-])*m[0-9]+-c[0-9]+$)`) for key, _ := range svcs { if re.MatchString(key) { // Fetch list of available databases for each service cluster svcs, _, err := catalog.Service(key, "", nil) if err != nil { log.Error(fmt.Sprintf("tasks.Task#ReconcileAvailableDatabases() catalog.Service() ! %s", err)) return err } log.Trace(fmt.Sprintf("tasks.Task#ReconcileAvailableDatabases() svcs: %+v", svcs)) if len(svcs) == 0 { log.Error("tasks.Task#ReconcileAvailableDatabases() ! No services found, no known nodes?!") return err } url := fmt.Sprintf("http://%s:%s/%s", svcs[0].Address, os.Getenv("RDPGD_ADMIN_PORT"), `databases/available`) req, err := http.NewRequest("GET", url, bytes.NewBuffer([]byte("{}"))) log.Trace(fmt.Sprintf(`tasks.Task#ReconcileAvailableDatabases() > POST %s`, url)) //req.Header.Set("Content-Type", "application/json") // TODO: Retrieve from configuration in database. req.SetBasicAuth(os.Getenv("RDPGD_ADMIN_USER"), os.Getenv("RDPGD_ADMIN_PASS")) httpClient := &http.Client{} resp, err := httpClient.Do(req) if err != nil { log.Error(fmt.Sprintf(`tasks.Task#ReconcileAvailableDatabases() httpClient.Do() %s ! %s`, url, err)) return err } body, err := ioutil.ReadAll(resp.Body) if err != nil { log.Error(fmt.Sprintf("tasks.Task#ReconcileAvailableDatabases() ! %s", err)) continue } is := []instances.Instance{} err = json.Unmarshal(body, &is) if err != nil { log.Error(fmt.Sprintf("tasks.Task#ReconcileAvailableDatabases() ! %s json: %s", err, string(body))) continue } for _, i := range is { clusterInstances = append(clusterInstances, i) } } } p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass) db, err := p.Connect() if err != nil { log.Error(fmt.Sprintf("tasks.Task#ReconcileAvailableDatabases() Failed connecting to %s err: %s", p.URI, err)) return err } defer db.Close() for index, _ := range clusterInstances { i, err := instances.FindByDatabase(clusterInstances[index].Database) if err != nil { log.Error(fmt.Sprintf("tasks.Task#ReconcileAvailableDatabases() Failed connecting to %s err: %s", p.URI, err)) return err } if i == nil { i = &clusterInstances[index] log.Trace(fmt.Sprintf(`tasks.Task#ReconcileAvailableDatabases() Reconciling database %s for cluster %s`, i.Database, i.ClusterID)) i.Register() } else { continue } } return }
//DecommissionDatabase - Remove targeted database specified in Data func (t *Task) DecommissionDatabase() (err error) { log.Trace(fmt.Sprintf(`tasks.DecommissionDatabase(%s)...`, t.Data)) i, err := instances.FindByDatabase(t.Data) if err != nil { log.Error(fmt.Sprintf("tasks.DecommissionDatabase(%s) instances.FindByDatabase() ! %s", i.Database, err)) return err } //TODO: Check if i == nil; i.e. if database doesn't exist ips, err := i.ClusterIPs() if err != nil { log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) i.ClusterIPs() ! %s`, i.Database, err)) return err } if len(ips) == 0 { log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! No service cluster nodes found in Consul?!", i.Database)) return } p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass) db, err := p.Connect() if err != nil { log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) p.Connect(%s) ! %s", t.Data, p.URI, err)) return err } defer db.Close() switch globals.ServiceRole { case "manager": path := fmt.Sprintf(`databases/decommission/%s`, t.Data) url := fmt.Sprintf("http://%s:%s/%s", ips[0], os.Getenv("RDPGD_ADMIN_PORT"), path) req, err := http.NewRequest("DELETE", url, bytes.NewBuffer([]byte("{}"))) log.Trace(fmt.Sprintf(`tasks.Task#Decommission() > DELETE %s`, url)) //req.Header.Set("Content-Type", "application/json") // TODO: Retrieve from configuration in database. req.SetBasicAuth(os.Getenv("RDPGD_ADMIN_USER"), os.Getenv("RDPGD_ADMIN_PASS")) httpClient := &http.Client{} _, err = httpClient.Do(req) if err != nil { log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) httpClient.Do() %s ! %s`, i.Database, url, err)) return err } // TODO: Is there anything we want to do on successful request? case "service": // In here we must do everything necessary to physically delete and clean up // the database from all service cluster nodes. if err = t.BackupDatabase(); err != nil { log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) t.BackupDatabase(%s) ! %s`, i.Database, err)) } else { for _, ip := range ips { // Schedule pgbouncer reconfigure on each cluster node. newTask := Task{ClusterID: ClusterID, Node: ip, Role: "all", Action: "Reconfigure", Data: "pgbouncer", NodeType: "any"} err = newTask.Enqueue() if err != nil { log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) Reconfigure PGBouncer! %s`, i.Database, err)) } } log.Trace(fmt.Sprintf(`tasks.DecommissionDatabase(%s) TODO: Here is where we finally decommission on the service cluster...`, i.Database)) client, err := consulapi.NewClient(consulapi.DefaultConfig()) if err != nil { log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) consulapi.NewClient() ! %s", i.Database, err)) return err } // Lock Database Deletion via Consul Lock key := fmt.Sprintf(`rdpg/%s/database/existance/lock`, t.ClusterID) lo := &consulapi.LockOptions{ Key: key, SessionName: fmt.Sprintf(`rdpg/%s/databases/existance`, t.ClusterID), } log.Trace(fmt.Sprintf(`tasks.Task<%s>#DecommissionDatabase() Attempting to acquire database existance lock %s...`, t.ClusterID, key)) databaseCreateLock, err := client.LockOpts(lo) if err != nil { log.Error(fmt.Sprintf(`tasks.Task<%s>#DecommissionDatabase() LockKey() database/existance Lock Key %s ! %s`, t.ClusterID, key, err)) return err } databaseCreateLockCh, err := databaseCreateLock.Lock(nil) if err != nil { log.Error(fmt.Sprintf(`tasks.Task<%s>#DecommissionDatabase() Lock() database/existance lock %s ! %s`, t.ClusterID, key, err)) return err } if databaseCreateLockCh == nil { err := fmt.Errorf(`tasks.Task<%s>#DecommissionDatabase() database/existance Lock not aquired, halting Decommission!!!`, t.ClusterID) log.Error(err.Error()) return err } defer databaseCreateLock.Unlock() p := pg.NewPG(`127.0.0.1`, pbPort, `rdpg`, `rdpg`, pgPass) db, err := p.Connect() if err != nil { log.Error(fmt.Sprintf("instances.Decommission() p.Connect(%s) ! %s", p.URI, err)) return err } defer db.Close() sq := fmt.Sprintf(`DELETE FROM tasks.tasks WHERE action='BackupDatabase' AND data='%s'`, i.Database) log.Trace(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) SQL > %s`, i.Database, sq)) _, err = db.Exec(sq) if err != nil { log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! %s", i.Database, err)) } sq = fmt.Sprintf(`UPDATE tasks.schedules SET enabled = false WHERE action='BackupDatabase' AND data='%s'`, i.Database) log.Trace(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) SQL > %s`, i.Database, sq)) _, err = db.Exec(sq) if err != nil { log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! %s", i.Database, err)) } if t.ClusterService == "pgbdr" { b := bdr.NewBDR(ClusterID, client) b.DropDatabase(i.Database) dbuser := "" sq = fmt.Sprintf(`SELECT dbuser FROM cfsb.instances WHERE dbname='%s' LIMIT 1`, i.Database) log.Trace(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) SQL > %s`, i.Database, sq)) err = db.Get(&dbuser, sq) if err != nil { log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! %s", i.Database, err)) } b.DropUser(dbuser) sq = fmt.Sprintf(`UPDATE cfsb.instances SET decommissioned_at=CURRENT_TIMESTAMP WHERE dbname='%s'`, i.Database) log.Trace(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) SQL > %s`, i.Database, sq)) _, err = db.Exec(sq) if err != nil { log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! %s", i.Database, err)) } } else { p.DisableDatabase(i.Database) p.DropDatabase(i.Database) dbuser := "" sq = fmt.Sprintf(`SELECT dbuser FROM cfsb.instances WHERE dbname='%s' LIMIT 1`, i.Database) log.Trace(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) SQL > %s`, i.Database, sq)) err = db.Get(&dbuser, sq) if err != nil { log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! %s", i.Database, err)) } p.DropUser(dbuser) sq = fmt.Sprintf(`UPDATE cfsb.instances SET decommissioned_at=CURRENT_TIMESTAMP WHERE dbname='%s'`, i.Database) log.Trace(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) SQL > %s`, i.Database, sq)) _, err = db.Exec(sq) if err != nil { log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! %s", i.Database, err)) } } // Notify management cluster that the instance has been decommissioned // Find management cluster API address catalog := client.Catalog() svcs, _, err := catalog.Service(`rdpgmc`, "", nil) if err != nil { log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) consulapi.Client.Catalog() ! %s", i.Database, err)) return err } if len(svcs) == 0 { log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) ! No services found, no known nodes?!", i.Database)) return err } mgtAPIIPAddress := svcs[0].Address // Query the database for the decommissioned_at timestamp set timestamp := "" sq = fmt.Sprintf(`SELECT decommissioned_at::text FROM cfsb.instances WHERE dbname='%s' LIMIT 1;`, i.Database) db.Get(×tamp, sq) type decomm struct { Database string `json:"database"` Timestamp string `json:"timestamp"` } dc := decomm{Database: i.Database, Timestamp: timestamp} // Tell the management cluster (via admin api) about the timestamp. url := fmt.Sprintf("http://%s:%s/%s", mgtAPIIPAddress, os.Getenv("RDPGD_ADMIN_PORT"), `databases/decommissioned`) body, err := json.Marshal(dc) if err != nil { log.Error(fmt.Sprintf("tasks.Task#DecommissionDatabase(%s) json.Marchal(i) ! %s", i.Database, err)) return err } req, err := http.NewRequest("PUT", url, bytes.NewBuffer([]byte(body))) log.Trace(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) PUT %s body: %s`, i.Database, url, body)) req.SetBasicAuth(os.Getenv("RDPGD_ADMIN_USER"), os.Getenv("RDPGD_ADMIN_PASS")) httpClient := &http.Client{} resp, err := httpClient.Do(req) if err != nil { log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) httpClient.Do() PUT %s ! %s`, i.Database, url, err)) return err } resp.Body.Close() } return nil default: log.Error(fmt.Sprintf(`tasks.Task#DecommissionDatabase(%s) ! Unknown work role: '%s' -> BUG!!!`, i.Database, globals.ServiceRole)) return nil } return }
/* POST /databases/register PUT /databases/assign */ func DatabasesHandler(w http.ResponseWriter, request *http.Request) { vars := mux.Vars(request) log.Trace(fmt.Sprintf("admin.DatabasesHandler() > %s /databases/%s %+v", request.Method, vars["action"], vars)) switch request.Method { case "GET": switch vars["action"] { case "": // List All Databases instances, err := instances.All() if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`, http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): instances.All() %s %+v ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } jsonInstances, err := json.Marshal(instances) if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`, http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): json.Marshal(instances) %s %+v ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) } else { w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusOK) w.Write(jsonInstances) } case "available": // Lists Available Databases instances, err := instances.Available() if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`, http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): instances.Available() %s %+v ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } jsonInstances, err := json.Marshal(instances) if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`, http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): json.Marshal(instances) %s %+v ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) } else { w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusOK) w.Write(jsonInstances) } default: msg := fmt.Sprintf(`{"status": %d, "description": "Invalid Action %s"}`, http.StatusBadRequest, vars["action"]) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): %s %s`, msg, vars)) http.Error(w, msg, http.StatusBadRequest) } case `POST`: var i instances.Instance decoder := json.NewDecoder(request.Body) err := decoder.Decode(&i) if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`, http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): decoder.Decode() %s %s ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } switch vars[`action`] { case `register`: // Creates a new record. err = i.Register() if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`, http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): Instance#Register() %s %s ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } else { w.Header().Set(`Content-Type`, `application/json; charset=UTF-8`) w.WriteHeader(http.StatusOK) w.Write([]byte(`{}`)) return } default: msg := fmt.Sprintf(`{"status": %d, "description": "Invalid Action %s"}`, http.StatusBadRequest, vars[`action`]) log.Error(msg) http.Error(w, msg, http.StatusBadRequest) } case `PUT`: var i instances.Instance decoder := json.NewDecoder(request.Body) err := decoder.Decode(&i) if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`, http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): decoder.Decode() %s %s ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } switch vars[`action`] { case `assign`: // updates an existing record. err = i.Assign() if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`, http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): instances.Assign() %s %s ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } else { w.Header().Set(`Content-Type`, `application/json; charset=UTF-8`) w.WriteHeader(http.StatusOK) w.Write([]byte(`{}`)) return } default: msg := fmt.Sprintf(`{"status": %d, "description": "Invalid Action %s"}`, http.StatusBadRequest, vars[`action`]) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): %s %s`, msg, vars)) http.Error(w, msg, http.StatusBadRequest) } case `DELETE`: switch vars[`action`] { case `decommission`: i, err := instances.FindByDatabase(vars[`database`]) if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`, http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): instance.FindByDatabase() %s %s ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } else { err = i.Decommission() if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "There was an error decommissioning the database (%s)"}`, http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): instance#Decommission() %s %s ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) } } default: msg := fmt.Sprintf(`{"status": %d, "description": "Invalid Action %s"}`, http.StatusBadRequest, vars[`action`]) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): %s %s`, msg, vars)) http.Error(w, msg, http.StatusBadRequest) } default: msg := fmt.Sprintf(`{"status": %d, "description": "Method not allowed %s"}`, http.StatusMethodNotAllowed, request.Method) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): %s %s`, msg, vars)) http.Error(w, msg, http.StatusMethodNotAllowed) return } }
/* POST /databases/register PUT /databases/assign */ func DatabasesHandler(w http.ResponseWriter, request *http.Request) { vars := mux.Vars(request) log.Trace(fmt.Sprintf("admin.DatabasesHandler() > %s /databases/%s %+v", request.Method, vars["action"], vars)) switch request.Method { case "GET": switch vars["action"] { case "": // List All Databases instances, err := instances.All() if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`+"\n", http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): instances.All() %s %+v ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } jsonInstances, err := json.Marshal(instances) if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`+"\n", http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): json.Marshal(instances) %s %+v ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) } else { w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusOK) w.Write(jsonInstances) } case "available": // Lists Available Databases instances, err := instances.Available() if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`+"\n", http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): instances.Available() %s %+v ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } jsonInstances, err := json.Marshal(instances) if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`+"\n", http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): json.Marshal(instances) %s %+v ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) } else { w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusOK) w.Write(jsonInstances) } default: msg := fmt.Sprintf(`{"status": %d, "description": "Invalid Action %s"}`+"\n", http.StatusBadRequest, vars["action"]) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): %s %s`, msg, vars)) http.Error(w, msg, http.StatusBadRequest) } case `POST`: var i instances.Instance decoder := json.NewDecoder(request.Body) err := decoder.Decode(&i) if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`+"\n", http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): decoder.Decode() %s %s ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } switch vars[`action`] { case `register`: err = i.Register() if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`+"\n", http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): Instance#Register() %s %s ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } else { w.Header().Set(`Content-Type`, `application/json; charset=UTF-8`) w.WriteHeader(http.StatusOK) w.Write([]byte(`{}`)) return } default: msg := fmt.Sprintf(`{"status": %d, "description": "Invalid Action %s"}`+"\n", http.StatusBadRequest, vars[`action`]) log.Error(msg) http.Error(w, msg, http.StatusBadRequest) } case `PUT`: switch vars[`action`] { case `assign`: // updates an existing record. // PUT /databases/assign/database var i instances.Instance decoder := json.NewDecoder(request.Body) err := decoder.Decode(&i) if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`+"\n", http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): decoder.Decode() assign %s %s ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } err = i.Assign() if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`+"\n", http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): instances.Assign() %s %s ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } else { w.Header().Set(`Content-Type`, `application/json; charset=UTF-8`) w.WriteHeader(http.StatusOK) w.Write([]byte(`{}`)) return } case `decommissioned`: // updates an existing record to show it was deprovisioned. // PUT /databases/decommissioned // This is requested from service cluster to master cluster type decomm struct { Database string `json:"database"` Timestamp string `json:"timestamp"` } dc := decomm{} decoder := json.NewDecoder(request.Body) err := decoder.Decode(&dc) if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`+"\n", http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): decoder.Decode() decommissioned %s %s ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } if len(dc.Timestamp) < 1 { err = errors.New(`Timestamp query parameter assignment is required!`) msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`+"\n", http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): decommissioned %s ! %s`, msg, err)) http.Error(w, msg, http.StatusInternalServerError) return } i, err := instances.FindByDatabase(dc.Database) if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`+"\n", http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): instances.FindByDatabase(%s) %s ! %s`, dc.Database, msg, err)) http.Error(w, msg, http.StatusInternalServerError) return } err = i.DecommissionedAt(dc.Timestamp) if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}"`+"\n", http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): instances.DecommissionedAt() %s %s ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } else { w.Header().Set(`Content-Type`, `application/json; charset=UTF-8`) w.WriteHeader(http.StatusOK) w.Write([]byte(`{}`)) return } default: msg := fmt.Sprintf(`{"status": %d, "description": "Invalid Action %s"}`+"\n", http.StatusBadRequest, vars[`action`]) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): %s %s`, msg, vars)) http.Error(w, msg, http.StatusBadRequest) } case `DELETE`: switch vars[`action`] { case `decommission`: i, err := instances.FindByDatabase(vars[`database`]) if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "%s"}`+"\n", http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): instance.FindByDatabase() %s %s ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) return } else { err = i.Decommission() if err != nil { msg := fmt.Sprintf(`{"status": %d, "description": "There was an error decommissioning the database (%s)"}`+"\n", http.StatusInternalServerError, err) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): instance#Decommission() %s %s ! %s`, msg, vars, err)) http.Error(w, msg, http.StatusInternalServerError) } } default: msg := fmt.Sprintf(`{"status": %d, "description": "Invalid Action %s"}`+"\n", http.StatusBadRequest, vars[`action`]) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): %s %s`, msg, vars)) http.Error(w, msg, http.StatusBadRequest) } default: msg := fmt.Sprintf(`{"status": %d, "description": "Method not allowed %s"}`+"\n", http.StatusMethodNotAllowed, request.Method) log.Error(fmt.Sprintf(`admin.DatabasesHandler(): %s %s`, msg, vars)) http.Error(w, msg, http.StatusMethodNotAllowed) return } }