func (tk *TestKeeper) GetKeeperInfo(timeout time.Duration) (*cluster.KeeperInfo, error) { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/info", net.JoinHostPort(tk.listenAddress, tk.port)), nil) if err != nil { return nil, err } var data cluster.KeeperInfo err = httpDo(ctx, req, nil, func(resp *http.Response, err error) error { if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return fmt.Errorf("http error code: %d, error: %s", resp.StatusCode, resp.Status) } if err := json.NewDecoder(resp.Body).Decode(&data); err != nil { return err } return nil }) if err != nil { return nil, err } return &data, nil }
func (e *EtcdManager) SetKeeperDiscoveryInfo(id string, ms *cluster.KeeperDiscoveryInfo) (*etcd.Response, error) { msj, err := json.Marshal(ms) if err != nil { return nil, err } return e.kAPI.Set(context.Background(), filepath.Join(e.etcdPath, keepersDiscoveryInfoDir, id), string(msj), nil) }
// AtomicDelete deletes a value at "key" if the key // has not been modified in the meantime, throws an // error if this is the case func (s *Etcd) AtomicDelete(key string, previous *store.KVPair) (bool, error) { if previous == nil { return false, store.ErrPreviousNotSpecified } delOpts := &etcd.DeleteOptions{} if previous != nil { delOpts.PrevIndex = previous.LastIndex if previous.Value != nil { delOpts.PrevValue = string(previous.Value) } } _, err := s.client.Delete(context.Background(), s.normalize(key), delOpts) if err != nil { if etcdError, ok := err.(etcd.Error); ok { // Key Not Found if etcdError.Code == etcd.ErrorCodeKeyNotFound { return false, store.ErrKeyNotFound } // Compare failed if etcdError.Code == etcd.ErrorCodeTestFailed { return false, store.ErrKeyModified } } return false, err } return true, nil }
func (s *Sentinel) Start() { endCh := make(chan struct{}) endApiCh := make(chan error) router := s.NewRouter() go func() { endApiCh <- http.ListenAndServe(fmt.Sprintf("%s:%s", s.listenAddress, s.port), router) }() ctx, cancel := context.WithCancel(context.Background()) timerCh := time.NewTimer(0).C for true { select { case <-s.stop: log.Debugf("stopping stolon sentinel") cancel() s.end <- true return case <-timerCh: go func() { s.clusterSentinelCheck(ctx) endCh <- struct{}{} }() case <-endCh: timerCh = time.NewTimer(s.clusterConfig.SleepInterval).C case err := <-endApiCh: if err != nil { log.Fatal("ListenAndServe: ", err) } close(s.stop) } } }
func (p *Manager) GetRoleFromDB() (common.Role, error) { db, err := sql.Open("postgres", p.connString) if err != nil { return 0, err } defer db.Close() ctx, cancel := context.WithTimeout(context.Background(), p.requestTimeout) rows, err := Query(ctx, db, "SELECT pg_is_in_recovery from pg_is_in_recovery()") cancel() if err != nil { return 0, err } defer rows.Close() for rows.Next() { var isInRecovery bool if err := rows.Scan(&isInRecovery); err != nil { return 0, err } if isInRecovery { return common.StandbyRole, nil } return common.MasterRole, nil } return 0, fmt.Errorf("cannot get pg role from db: no rows returned") }
func (p *Manager) GetReplicatinSlots() ([]string, error) { db, err := sql.Open("postgres", p.connString) if err != nil { return nil, err } defer db.Close() replSlots := []string{} ctx, cancel := context.WithTimeout(context.Background(), p.requestTimeout) rows, err := Query(ctx, db, "SELECT slot_name from pg_replication_slots") cancel() if err != nil { return nil, err } defer rows.Close() for rows.Next() { var slotName string if err := rows.Scan(&slotName); err != nil { return nil, err } replSlots = append(replSlots, slotName) } return replSlots, nil }
// List child nodes of a given directory func (s *Etcd) List(directory string) ([]*store.KVPair, error) { getOpts := &etcd.GetOptions{ Quorum: true, Recursive: true, Sort: true, } resp, err := s.client.Get(context.Background(), s.normalize(directory), getOpts) if err != nil { if keyNotFound(err) { return nil, store.ErrKeyNotFound } return nil, err } kv := []*store.KVPair{} for _, n := range resp.Node.Nodes { kv = append(kv, &store.KVPair{ Key: n.Key, Value: []byte(n.Value), LastIndex: n.ModifiedIndex, }) } return kv, nil }
func (e *EtcdManager) SetLeaderSentinelInfo(si *cluster.SentinelInfo, ttl time.Duration) (*etcd.Response, error) { sij, err := json.Marshal(si) if err != nil { return nil, err } opts := &etcd.SetOptions{TTL: ttl} return e.kAPI.Set(context.Background(), filepath.Join(e.etcdPath, leaderSentinelInfoFile), string(sij), opts) }
func (e *EtcdManager) SetProxyInfo(pi *cluster.ProxyInfo, ttl time.Duration) (*etcd.Response, error) { pij, err := json.Marshal(pi) if err != nil { return nil, err } opts := &etcd.SetOptions{TTL: ttl} return e.kAPI.Set(context.Background(), filepath.Join(e.etcdPath, proxiesInfoDir, pi.ID), string(pij), opts) }
func (e *EtcdManager) SetClusterConfig(cfg *cluster.Config) (*etcd.Response, error) { cfgj, err := json.Marshal(cfg) if err != nil { return nil, err } path := filepath.Join(e.etcdPath, configFile) opts := &etcd.SetOptions{} return e.kAPI.Set(context.Background(), path, string(cfgj), opts) }
func (c *httpClusterClient) getLeaderEndpoint() (string, error) { mAPI := NewMembersAPI(c) leader, err := mAPI.Leader(context.Background()) if err != nil { return "", err } return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? }
func (te *TestEtcd) GetEtcdNode(timeout time.Duration, path string) (*etcd.Node, error) { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() res, err := te.kAPI.Get(ctx, path, &etcd.GetOptions{Quorum: true}) if err != nil { return nil, err } return res.Node, nil }
func (p *PostgresKeeper) pgStateHandler(w http.ResponseWriter, req *http.Request) { pgState := &cluster.PostgresState{} p.cvMutex.Lock() defer p.cvMutex.Unlock() initialized, err := p.pgm.IsInitialized() if err != nil { w.WriteHeader(http.StatusInternalServerError) return } if !initialized { pgState.Initialized = false } else { var err error ctx, cancel := context.WithTimeout(context.Background(), p.clusterConfig.RequestTimeout) pgState, err = pg.GetPGState(ctx, p.getOurReplConnString()) cancel() if err != nil { log.Errorf("error getting pg state: %v", err) w.WriteHeader(http.StatusInternalServerError) return } pgState.Initialized = true // if timeline <= 1 then no timeline history file exists. pgState.TimelinesHistory = cluster.PostgresTimeLinesHistory{} if pgState.TimelineID > 1 { ctx, cancel = context.WithTimeout(context.Background(), p.clusterConfig.RequestTimeout) tlsh, err := pg.GetTimelinesHistory(ctx, pgState.TimelineID, p.getOurReplConnString()) cancel() if err != nil { log.Errorf("error getting timeline history: %v", err) w.WriteHeader(http.StatusInternalServerError) return } pgState.TimelinesHistory = tlsh } } if err := json.NewEncoder(w).Encode(&pgState); err != nil { w.WriteHeader(http.StatusInternalServerError) } }
// DeleteTree deletes a range of keys under a given directory func (s *Etcd) DeleteTree(directory string) error { delOpts := &etcd.DeleteOptions{ Recursive: true, } _, err := s.client.Delete(context.Background(), s.normalize(directory), delOpts) if keyNotFound(err) { return store.ErrKeyNotFound } return err }
// Delete a value at "key" func (s *Etcd) Delete(key string) error { opts := &etcd.DeleteOptions{ Recursive: false, } _, err := s.client.Delete(context.Background(), s.normalize(key), opts) if keyNotFound(err) { return store.ErrKeyNotFound } return err }
func (p *Manager) DropReplicationSlot(name string) error { db, err := sql.Open("postgres", p.connString) if err != nil { return err } defer db.Close() ctx, cancel := context.WithTimeout(context.Background(), p.requestTimeout) _, err = Exec(ctx, db, fmt.Sprintf("select pg_drop_replication_slot('%s')", name)) cancel() return err }
// Put a value at "key" func (s *Etcd) Put(key string, value []byte, opts *store.WriteOptions) error { setOpts := &etcd.SetOptions{} // Set options if opts != nil { setOpts.Dir = opts.IsDir setOpts.TTL = opts.TTL } _, err := s.client.Set(context.Background(), s.normalize(key), string(value), setOpts) return err }
func (p *Manager) CreateReplUser() error { db, err := sql.Open("postgres", p.connString) if err != nil { return err } defer db.Close() ctx, cancel := context.WithTimeout(context.Background(), p.requestTimeout) _, err = Exec(ctx, db, fmt.Sprintf(`CREATE USER "%s" WITH REPLICATION ENCRYPTED PASSWORD '%s';`, p.replUser, p.replPassword)) cancel() return err }
func (p *PostgresKeeper) Start() { endSMCh := make(chan struct{}) endPgStatecheckerCh := make(chan struct{}) endApiCh := make(chan error) err := p.loadCVVersion() if err != nil { p.end <- err return } p.pgm.Stop(true) http.HandleFunc("/info", p.infoHandler) http.HandleFunc("/pgstate", p.pgStateHandler) go func() { endApiCh <- http.ListenAndServe(fmt.Sprintf("%s:%s", p.listenAddress, p.port), nil) }() ctx, cancel := context.WithCancel(context.Background()) smTimerCh := time.NewTimer(0).C updatePGStateTimerCh := time.NewTimer(0).C for true { select { case <-p.stop: log.Debugf("stopping postgres keeper") cancel() p.pgm.Stop(true) p.end <- nil return case <-smTimerCh: go func() { p.postgresKeeperSM(ctx) endSMCh <- struct{}{} }() case <-endSMCh: smTimerCh = time.NewTimer(p.clusterConfig.SleepInterval).C case <-updatePGStateTimerCh: go func() { p.updatePGState(ctx) endPgStatecheckerCh <- struct{}{} }() case <-endPgStatecheckerCh: updatePGStateTimerCh = time.NewTimer(p.clusterConfig.SleepInterval).C case err := <-endApiCh: if err != nil { log.Fatal("ListenAndServe: ", err) } close(p.stop) } } }
func (e *EtcdManager) GetClusterConfig() (*cluster.Config, *etcd.Response, error) { path := filepath.Join(e.etcdPath, configFile) res, err := e.kAPI.Get(context.Background(), path, &etcd.GetOptions{Quorum: true}) if err != nil && !IsEtcdNotFound(err) { log.Errorf("err: %v", err) return nil, nil, err } else if !IsEtcdNotFound(err) { cfg, err := cluster.ParseConfig([]byte(res.Node.Value)) if err != nil { return nil, nil, err } return cfg, res, nil } return cluster.NewDefaultConfig(), res, nil }
func (e *EtcdManager) GetClusterData() (*cluster.ClusterData, *etcd.Response, error) { var cd *cluster.ClusterData path := filepath.Join(e.etcdPath, clusterDataFile) res, err := e.kAPI.Get(context.Background(), path, &etcd.GetOptions{Quorum: true}) if err != nil && !IsEtcdNotFound(err) { return nil, nil, err } else if !IsEtcdNotFound(err) { err = json.Unmarshal([]byte(res.Node.Value), &cd) if err != nil { return nil, nil, err } return cd, res, nil } return nil, nil, nil }
func (e *EtcdManager) GetLeaderSentinelInfo() (*cluster.SentinelInfo, *etcd.Response, error) { var si *cluster.SentinelInfo path := filepath.Join(e.etcdPath, leaderSentinelInfoFile) res, err := e.kAPI.Get(context.Background(), path, &etcd.GetOptions{Quorum: true}) if err != nil && !IsEtcdNotFound(err) { return nil, nil, err } else if !IsEtcdNotFound(err) { err = json.Unmarshal([]byte(res.Node.Value), &si) if err != nil { return nil, nil, err } return si, res, nil } return nil, nil, nil }
// Unlock the "key". Calling unlock while // not holding the lock will throw an error func (l *etcdLock) Unlock() error { if l.stopLock != nil { l.stopLock <- struct{}{} } if l.last != nil { delOpts := &etcd.DeleteOptions{ PrevIndex: l.last.Node.ModifiedIndex, } _, err := l.client.Delete(context.Background(), l.key, delOpts) if err != nil { return err } } return nil }
func (e *EtcdManager) GetProxyView() (*cluster.ProxyView, *etcd.Response, error) { var pv *cluster.ProxyView path := filepath.Join(e.etcdPath, proxyViewFile) res, err := e.kAPI.Get(context.Background(), path, &etcd.GetOptions{Quorum: true}) if err != nil && !IsEtcdNotFound(err) { log.Errorf("err: %v", err) return nil, nil, err } else if !IsEtcdNotFound(err) { err = json.Unmarshal([]byte(res.Node.Value), &pv) if err != nil { return nil, nil, err } return pv, res, nil } return nil, nil, nil }
// WaitLock simply waits for the key to be available for creation func (l *etcdLock) waitLock(key string, errorCh chan error, stopWatchCh chan bool, free chan<- bool) { opts := &etcd.WatcherOptions{Recursive: false} watcher := l.client.Watcher(key, opts) for { event, err := watcher.Next(context.Background()) if err != nil { errorCh <- err return } if event.Action == "delete" || event.Action == "expire" { free <- true return } } }
// AtomicPut puts a value at "key" if the key has not been // modified in the meantime, throws an error if this is the case func (s *Etcd) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) { var ( meta *etcd.Response err error ) setOpts := &etcd.SetOptions{} if previous != nil { setOpts.PrevExist = etcd.PrevExist setOpts.PrevIndex = previous.LastIndex if previous.Value != nil { setOpts.PrevValue = string(previous.Value) } } else { setOpts.PrevExist = etcd.PrevNoExist } if opts != nil { if opts.TTL > 0 { setOpts.TTL = opts.TTL } } meta, err = s.client.Set(context.Background(), s.normalize(key), string(value), setOpts) if err != nil { if etcdError, ok := err.(etcd.Error); ok { // Compare failed if etcdError.Code == etcd.ErrorCodeTestFailed { return false, nil, store.ErrKeyModified } // Node exists error (when PrevNoExist) if etcdError.Code == etcd.ErrorCodeNodeExist { return false, nil, store.ErrKeyExists } } return false, nil, err } updated := &store.KVPair{ Key: key, Value: value, LastIndex: meta.Node.ModifiedIndex, } return true, updated, nil }
func (e *EtcdManager) SetProxyView(pv *cluster.ProxyView, prevIndex uint64) (*etcd.Response, error) { log.Debugf("prevIndex: %d", prevIndex) // write cluster view pvj, err := json.Marshal(pv) if err != nil { return nil, err } path := filepath.Join(e.etcdPath, proxyViewFile) opts := &etcd.SetOptions{} if prevIndex == 0 { opts.PrevExist = etcd.PrevNoExist } else { opts.PrevExist = etcd.PrevExist opts.PrevIndex = prevIndex } return e.kAPI.Set(context.Background(), path, string(pvj), opts) }
func (e *EtcdManager) GetProxiesInfo() (cluster.ProxiesInfo, error) { psi := cluster.ProxiesInfo{} res, err := e.kAPI.Get(context.Background(), filepath.Join(e.etcdPath, proxiesInfoDir), &etcd.GetOptions{Recursive: true, Quorum: true}) if err != nil && !IsEtcdNotFound(err) { return nil, err } else if !IsEtcdNotFound(err) { for _, node := range res.Node.Nodes { var pi cluster.ProxyInfo err = json.Unmarshal([]byte(node.Value), &pi) if err != nil { return nil, err } psi = append(psi, &pi) } } return psi, nil }
func (e *EtcdManager) GetProxyInfo(id string) (*cluster.ProxyInfo, bool, error) { if id == "" { return nil, false, fmt.Errorf("empty proxy id") } var pi cluster.ProxyInfo res, err := e.kAPI.Get(context.Background(), filepath.Join(e.etcdPath, proxiesInfoDir, id), &etcd.GetOptions{Quorum: true}) if err != nil && !IsEtcdNotFound(err) { return nil, false, err } else if !IsEtcdNotFound(err) { err = json.Unmarshal([]byte(res.Node.Value), &pi) if err != nil { return nil, false, err } return &pi, true, nil } return nil, false, nil }
func (e *EtcdManager) GetKeepersDiscoveryInfo() (cluster.KeepersDiscoveryInfo, error) { keepers := cluster.KeepersDiscoveryInfo{} res, err := e.kAPI.Get(context.Background(), filepath.Join(e.etcdPath, keepersDiscoveryInfoDir), &etcd.GetOptions{Recursive: true, Quorum: true}) if err != nil && !IsEtcdNotFound(err) { return nil, err } else if !IsEtcdNotFound(err) { for _, node := range res.Node.Nodes { var keeper cluster.KeeperDiscoveryInfo err = json.Unmarshal([]byte(node.Value), &keeper) if err != nil { return nil, err } keepers = append(keepers, &keeper) } } return keepers, nil }