Ejemplo n.º 1
1
func (zom *zookeeperOffsetManager) commitOffset(topic string, partition int32, tracker *partitionOffsetTracker, logger zap.Logger) error {
	err := tracker.commit(func(offset int64) error {
		if offset >= 0 {
			return zom.cg.group.CommitOffset(topic, partition, offset+1)
		} else {
			return nil
		}
	})

	if err != nil {
		logger.Warn("ZOOKEEPER: FAILED to commit offset",
			zap.Int64("highestProcessedOffset", tracker.highestProcessedOffset),
			zap.String("topic", topic),
			zap.Int64("partition", int64(partition)),
		)
	} else if zom.config.VerboseLogging {
		logger.Debug("ZOOKEEPER: Committed offset",
			zap.Int64("lastCommittedOffset", tracker.lastCommittedOffset),
			zap.String("topic", topic),
			zap.Int64("partition", int64(partition)),
		)
	}

	return err
}
Ejemplo n.º 2
0
func main() {
	logLevel := zap.LevelFlag("v", zap.InfoLevel, "log level: all, debug, info, warn, error, panic, fatal, none")
	flag.StringVar(&botName, "botname", "satpam_bot", "bot name")
	flag.StringVar(&adminID, "admin", "", "admin id")
	flag.Parse()

	// setup logger
	log.SetLevel(*logLevel)
	bot.SetLogger(log)
	log.Info("STARTED", zap.String("version", VERSION), zap.String("buildtime", BUILDTIME))

	key := os.Getenv("TELEGRAM_KEY")
	if key == "" {
		log.Fatal("TELEGRAM_KEY can not be empty")
	}

	startedAt = time.Now()
	telegram := bot.NewTelegram(key)
	plugin := satpamBot{t: telegram}
	if err := telegram.AddPlugin(&plugin); err != nil {
		log.Fatal("Failed AddPlugin", zap.Error(err))
	}
	plugin.start()
	telegram.Start()

}
Ejemplo n.º 3
0
// WrongURLsToLog - write to log add wrong URLs
func (h *HTMLMetadata) WrongURLsToLog(logger zap.Logger) {
	for url, error := range h.wrongURLs {
		logger.Warn("Error parse URL",
			zap.String("err_url", url),
			zap.String("details", error),
		)
	}
}
Ejemplo n.º 4
0
func (k KeeperInfoHistories) DeepCopy() KeeperInfoHistories {
	if k == nil {
		return nil
	}
	nk, err := copystructure.Copy(k)
	if err != nil {
		panic(err)
	}
	log.Debug("", zap.String("k", spew.Sdump(k)), zap.String("nk", spew.Sdump(nk)))
	if !reflect.DeepEqual(k, nk) {
		panic("not equal")
	}
	return nk.(KeeperInfoHistories)
}
Ejemplo n.º 5
0
func (s *Server) serveH2c(w http.ResponseWriter, r *http.Request) {
	defer func() {
		if err := recover(); err != nil {
			w.WriteHeader(http.StatusInternalServerError)
			Log.Error("CONNECT failed", zap.Object("err", err))
		}
	}()
	remote, err := net.DialTimeout("tcp", r.Host, time.Second*10)
	if err != nil {
		Log.Error("dail failed", zap.Error(err), zap.String("host", r.Host))
		w.WriteHeader(http.StatusNotImplemented)
		return
	}
	defer remote.Close()

	fw := &flushWriter{w}
	fw.FlushHeader(http.StatusOK)
	go io.Copy(remote, r.Body)
	srcRemote := &TryReader{
		c:        remote,
		ignore:   3,
		maxRetry: 2,
		tryDur:   time.Millisecond * 600,
		timeout:  time.Second * 15,
	}
	io.Copy(fw, srcRemote)
}
Ejemplo n.º 6
0
func sentinel(cmd *cobra.Command, args []string) {
	if cfg.debug {
		log.SetLevel(zap.DebugLevel)
	}
	if cfg.clusterName == "" {
		fmt.Println("cluster name required")
		os.Exit(1)
	}
	if cfg.storeBackend == "" {
		fmt.Println("store backend type required")
		os.Exit(1)

	}

	id := common.UID()
	log.Info("sentinel id", zap.String("id", id))

	stop := make(chan bool, 0)
	end := make(chan bool, 0)
	sigs := make(chan os.Signal, 1)
	signal.Notify(sigs, os.Interrupt, os.Kill)
	go sigHandler(sigs, stop)

	s, err := NewSentinel(id, &cfg, stop, end)
	if err != nil {
		fmt.Printf("cannot create sentinel: %v\n", err)
		os.Exit(1)
	}
	go s.Start()

	<-end
}
Ejemplo n.º 7
0
func (p *Manager) SyncFromFollowedPGRewind(followedConnParams ConnParams, password string) error {
	// ioutil.Tempfile already creates files with 0600 permissions
	pgpass, err := ioutil.TempFile("", "pgpass")
	if err != nil {
		return err
	}
	defer os.Remove(pgpass.Name())
	defer pgpass.Close()

	host := followedConnParams.Get("host")
	port := followedConnParams.Get("port")
	user := followedConnParams.Get("user")
	pgpass.WriteString(fmt.Sprintf("%s:%s:*:%s:%s\n", host, port, user, password))

	// Disable synchronous commits. pg_rewind needs to create a
	// temporary table on the master but if synchronous replication is
	// enabled and there're no active standbys it will hang.
	followedConnParams.Set("options", "-c synchronous_commit=off")
	followedConnString := followedConnParams.ConnString()

	log.Info("running pg_rewind")
	name := filepath.Join(p.pgBinPath, "pg_rewind")
	cmd := exec.Command(name, "--debug", "-D", p.dataDir, "--source-server="+followedConnString)
	cmd.Env = append(cmd.Env, fmt.Sprintf("PGPASSFILE=%s", pgpass.Name()))
	log.Debug("execing cmd", zap.Object("cmd", cmd))
	out, err := cmd.CombinedOutput()
	if err != nil {
		return fmt.Errorf("error: %v, output: %s", err, string(out))
	}
	log.Debug("cmd out", zap.String("out", string(out)))
	return nil
}
Ejemplo n.º 8
0
func (s *Server) serveH2r(w http.ResponseWriter, r *http.Request) {
	defer func() {
		if err := recover(); err != nil {
			w.WriteHeader(http.StatusInternalServerError)
			Log.Error("REVERSE failed", zap.Object("err", err))
		} else {
			w.WriteHeader(http.StatusOK)
		}
	}()

	remote, err := net.DialTimeout("tcp", r.Host, time.Second*10)
	if err != nil {
		Log.Error("dail failed", zap.Error(err), zap.String("host", r.Host))
		w.WriteHeader(http.StatusNotImplemented)
		return
	}
	defer remote.Close()

	go io.Copy(remote, r.Body)
	//	go io.Copy(remote, io.TeeReader(r.Body, os.Stdout))
	resr := io.TeeReader(remote, w)
	//	resr = io.TeeReader(resr, os.Stdout)
	res, err := http.ReadResponse(bufio.NewReader(resr), nil)
	if err != nil {
		return
	}
	if res.Body != nil {
		defer res.Body.Close()
		io.Copy(ioutil.Discard, res.Body)
	}
}
Ejemplo n.º 9
0
func (w *hostWorkers) Init(db *content.DBrw, logger zap.Logger, baseHosts []string, cnt int) error {
	hostMng := &hostsManager{}
	err := hostMng.Init(db, baseHosts)
	if err != nil {
		return err
	}

	hosts := hostMng.GetHosts()
	w.workers = make([]*hostWorker, 0)
	cntPerHost := cnt / len(hosts)
	if cntPerHost < 1 {
		cntPerHost = 1
	}
	for hostName, hostID := range hosts {
		worker := &hostWorker{Request: &request{hostMng: hostMng}}
		worker.Request.Init(logger.With(zap.String("host", hostName)))
		worker.Tasks, err = db.GetNewURLs(hostID, cntPerHost)
		if err != nil {
			return err
		}
		w.workers = append(w.workers, worker)
	}

	return nil
}
Ejemplo n.º 10
0
// TestLogError ...
func TestLogError(t *testing.T) {
	Convey("Check Log standart error", t, func() {
		buf := &bytes.Buffer{}
		logger := zap.NewJSON(zap.DebugLevel, zap.Output(zap.AddSync(buf)))
		logger.StubTime()

		LogError(logger, errors.New("message"))
		So(string(buf.Bytes()), ShouldEqual, `{"msg":"message","level":"error","ts":0,"fields":{}}`+"\n")
	})

	Convey("Check Log ErrorEx", t, func() {
		callerTest = true
		defer func() { callerTest = false }()

		buf := &bytes.Buffer{}
		logger := zap.NewJSON(zap.DebugLevel, zap.Output(zap.AddSync(buf)))
		logger.StubTime()

		LogError(logger, NewEx(zap.DebugLevel, "message",
			zap.String("field1", "field1 data"),
			zap.Int("field2", 5)))

		So(string(buf.Bytes()), ShouldEqual,
			`{"msg":"message","level":"debug","ts":0,"fields":{"caller":"<fake>","field1":"field1 data","field2":5}}`+"\n")
	})

}
Ejemplo n.º 11
0
func readBody(contentEncoding string, body io.Reader) ([]byte, error) {
	var err error
	result := []byte{}
	if contentEncoding == "gzip" {
		reader, err := gzip.NewReader(body)
		if err != nil {
			return result, werrors.NewDetails(ErrReadGZipResponse, err)
		}
		result, err = ioutil.ReadAll(reader)
		if err == nil {
			err = reader.Close()
		} else {
			_ = reader.Close()
		}
		if err != nil {
			return result, werrors.NewDetails(ErrReadGZipResponse, err)
		}
	} else if contentEncoding == "identity" || contentEncoding == "" {
		result, err = ioutil.ReadAll(body)
		if err != nil {
			return result, werrors.NewDetails(ErrReadResponse, err)
		}
	} else {
		return result, werrors.NewFields(ErrUnknownContentEncoding, zap.String("encoding", contentEncoding))
	}

	return result, nil
}
Ejemplo n.º 12
0
func proxy(cmd *cobra.Command, args []string) {
	if cfg.debug {
		log.SetLevel(zap.DebugLevel)
	}
	stdlog, _ := zwrap.Standardize(log, zap.DebugLevel)
	pollon.SetLogger(stdlog)

	if cfg.clusterName == "" {
		fmt.Printf("cluster name required")
		os.Exit(1)
	}
	if cfg.storeBackend == "" {
		fmt.Printf("store backend type required")
		os.Exit(1)
	}

	uid := common.UID()
	log.Info("proxy uid", zap.String("uid", uid))

	clusterChecker, err := NewClusterChecker(uid, cfg)
	if err != nil {
		fmt.Printf("cannot create cluster checker: %v", err)
		os.Exit(1)
	}
	clusterChecker.Start()
}
Ejemplo n.º 13
0
func (p *Manager) SetupRoles() error {
	ctx, cancel := context.WithTimeout(context.Background(), p.requestTimeout)
	defer cancel()

	if p.suUsername == p.replUsername {
		log.Info("adding replication role to superuser")
		if err := alterRole(ctx, p.localConnParams, []string{"replication"}, p.suUsername, p.suPassword); err != nil {
			return fmt.Errorf("error adding replication role to superuser: %v", err)
		}
		log.Info("replication role added to superuser")
	} else {
		// Configure superuser role password
		if p.suPassword != "" {
			log.Info("setting superuser password")
			if err := setPassword(ctx, p.localConnParams, p.suUsername, p.suPassword); err != nil {
				return fmt.Errorf("error setting superuser password: %v", err)
			}
			log.Info("superuser password set")
		}
		roles := []string{"login", "replication"}
		log.Info("creating replication role")
		if err := createRole(ctx, p.localConnParams, roles, p.replUsername, p.replPassword); err != nil {
			return fmt.Errorf("error creating replication role: %v", err)
		}
		log.Info("replication role created", zap.String("role", p.replUsername))
	}
	return nil
}
Ejemplo n.º 14
0
func (p *PostgresKeeper) getLastPGState() *cluster.PostgresState {
	p.pgStateMutex.Lock()
	pgState := p.lastPGState.DeepCopy()
	p.pgStateMutex.Unlock()
	log.Debug("pgstate dump", zap.String("pgState", spew.Sdump(pgState)))
	return pgState
}
Ejemplo n.º 15
0
func (zom *zookeeperOffsetManager) FinalizePartition(topic string, partition int32, lastOffset int64, timeout time.Duration, replicaId int, logger zap.Logger) error {
	zom.l.RLock()
	tracker := zom.offsets[topic][partition]
	zom.l.RUnlock()

	if lastOffset >= 0 {
		if lastOffset-tracker.highestProcessedOffset > 0 {
			logger.Info("ZOOKEEPER: Finalizing partition. Waiting before processing remaining messages",
				zap.Int("replicaId", replicaId),
				zap.String("topic", topic),
				zap.Int64("partition", int64(partition)),
				zap.Int64("lastProcessedOffset", tracker.highestProcessedOffset),
				zap.Duration("waitingTimeToProcessMoreMessages", timeout/time.Second),
				zap.Int64("numMessagesToProcess", lastOffset-tracker.highestProcessedOffset),
			)
			if !tracker.waitForOffset(lastOffset, timeout) {
				return fmt.Errorf("REP %d - TIMEOUT waiting for offset %d. Last committed offset: %d", replicaId, lastOffset, tracker.lastCommittedOffset)
			}
		}

		if err := zom.commitOffset(topic, partition, tracker, logger); err != nil {
			return fmt.Errorf("REP %d - FAILED to commit offset %d to Zookeeper. Last committed offset: %d", replicaId, tracker.highestProcessedOffset, tracker.lastCommittedOffset)
		}
	}

	zom.l.Lock()
	delete(zom.offsets[topic], partition)
	zom.l.Unlock()

	return nil
}
Ejemplo n.º 16
0
func main() {
	mongoSession, err := mgo.Dial("127.0.0.1")
	if err != nil {
		panic(err)
	}
	defer mongoSession.Close()

	mongoSession.SetMode(mgo.Monotonic, true)

	log := zap.NewJSON(
		zap.Debug,
		zap.Fields(zap.Int("count", 1)),
		zap.Output(NewWriter(mongoSession)),
	)
	url := "http://example.local"
	tryNum := 42
	startTime := time.Now()
	for i := range [logCounts]struct{}{} {
		log.Info("Failed to fetch URL.",
			zap.String("url", url),
			zap.Int("attempt", tryNum),
			zap.Duration("backoff", time.Since(startTime)),
			zap.Int("index", i),
		)
	}
	fmt.Printf("Finished in %v\n", time.Since(startTime))
}
Ejemplo n.º 17
0
// NewHTMLMetadata - create new HTMLMetadata struct
func NewHTMLMetadata(hostMng *hostsManager, urlStr string) (*HTMLMetadata, error) {
	baseURL, err := url.Parse(urlStr)
	if err != nil {
		return nil, werrors.NewFields(ErrParseBaseURL,
			zap.String("details", err.Error()),
			zap.String("parsed_url", urlStr))
	}

	return &HTMLMetadata{
		URLs:         make(map[string]sql.NullInt64),
		wrongURLs:    make(map[string]string),
		title:        "",
		MetaTagIndex: true,
		baseURL:      baseURL,
		hostMng:      hostMng,
	}, nil
}
Ejemplo n.º 18
0
func (m *hostsManager) resolveURL(hostName string) (string, error) {
	hostURL := NormalizeURL(&url.URL{Scheme: "http", Host: hostName})
	response, err := http.Get(hostURL)
	if err == nil {
		err = response.Body.Close()
		if response.StatusCode != 200 {
			return "", werrors.NewFields(ErrResolveBaseURL,
				zap.Int("status_code", response.StatusCode),
				zap.String("url", hostURL))
		}
	}
	if err != nil {
		return "", werrors.NewFields(ErrGetRequest,
			zap.String("details", err.Error()),
			zap.String("url", hostURL))
	}

	return response.Request.URL.String(), nil
}
Ejemplo n.º 19
0
func (m *hostsManager) initByDb(db proxy.DbHost) error {
	hosts, err := db.GetHosts()
	if err != nil {
		return err
	}
	for id, host := range hosts {
		hostName := host.GetName()
		robot, err := robotstxt.FromStatusAndBytes(host.GetRobotsTxt())
		if err != nil {
			return werrors.NewFields(ErrCreateRobotsTxtFromDb,
				zap.String("host", hostName),
				zap.String("details", err.Error()))
		}
		m.hosts[hostName] = id
		m.robotsTxt[id] = robot.FindGroup("Googlebot")
	}

	return nil
}
Ejemplo n.º 20
0
func (m *hostsManager) readRobotTxt(hostName string) (int, []byte, error) {
	var body []byte
	robotsURL := NormalizeURL(&url.URL{Scheme: "http", Host: hostName, Path: "robots.txt"})
	response, err := http.Get(robotsURL)
	if err == nil {
		body, err = ioutil.ReadAll(response.Body)
		closeErr := response.Body.Close()
		if err == nil {
			err = closeErr
		}
	}

	if err != nil {
		return 0, body, werrors.NewFields(ErrGetRequest,
			zap.String("details", err.Error()),
			zap.String("url", robotsURL))
	}

	return response.StatusCode, body, nil
}
Ejemplo n.º 21
0
func (s *Sentinel) setSentinelInfo(ttl time.Duration) error {
	sentinelInfo := &cluster.SentinelInfo{
		UID: s.id,
	}
	log.Debug("sentinelInfod dump", zap.String("sentinelInfo", spew.Sdump(sentinelInfo)))

	if err := s.e.SetSentinelInfo(sentinelInfo, ttl); err != nil {
		return err
	}
	return nil
}
Ejemplo n.º 22
0
func (s *Sentinel) findBestNewMasters(cd *cluster.ClusterData, masterDB *cluster.DB) []*cluster.DB {
	bestNewMasters := s.findBestStandbys(cd, masterDB)
	// Add the previous masters to the best standbys (if valid and in good state)
	goodMasters, _, _ := s.validMastersByStatus(cd)
	log.Debug("goodMasters", zap.String("goodMasters", spew.Sdump(goodMasters)))
	for _, db := range goodMasters {
		if db.UID == masterDB.UID {
			log.Debug("ignoring db since it's the current master", zap.String("db", db.UID), zap.String("keeper", db.Spec.KeeperUID))
			continue
		}
		if db.Status.TimelineID != masterDB.Status.TimelineID {
			log.Debug("ignoring keeper since its pg timeline is different than master timeline", zap.String("db", db.UID), zap.Uint64("dbTimeline", db.Status.TimelineID), zap.Uint64("masterTimeline", masterDB.Status.TimelineID))
			continue
		}
		bestNewMasters = append(bestNewMasters, db)
	}
	// Sort by XLogPos
	sort.Sort(dbSlice(bestNewMasters))
	log.Debug("bestNewMasters", zap.String("bestNewMasters", spew.Sdump(bestNewMasters)))
	return bestNewMasters
}
Ejemplo n.º 23
0
func (conn *Connection) connect(logger zap.Logger) error {
	if conn.conn != nil {
		conn.conn.Close()
	}
	if conn.connAux != nil {
		conn.connAux.Close()
	}

	var cert tls.Certificate
	var err error
	if len(conn.CertificateBase64) == 0 && len(conn.KeyBase64) == 0 {
		// The user did not specify raw block contents, so check the filesystem.
		cert, err = tls.LoadX509KeyPair(conn.CertificateFile, conn.KeyFile)
	} else {
		// The user provided the raw block contents, so use that.
		cert, err = tls.X509KeyPair([]byte(conn.CertificateBase64), []byte(conn.KeyBase64))
	}

	if err != nil {
		logger.Fatal("APNS: Failed to obtain certificate",
			zap.Error(err),
		)
		return err
	}

	conf := &tls.Config{
		Certificates: []tls.Certificate{cert},
		ServerName:   strings.Split(conn.Gateway, ":")[0],
	}

	connAux, err := net.Dial("tcp", conn.Gateway)
	if err != nil {
		logger.Fatal("APNS: Failed while dialing gateway",
			zap.String("gateway", conn.Gateway),
			zap.Error(err),
		)
		return err
	}
	tlsConn := tls.Client(connAux, conf)
	err = tlsConn.Handshake()
	if err != nil {
		logger.Fatal("APNS: Failed while handshaking",
			zap.Error(err),
		)
		_ = tlsConn.Close()
		return err
	}
	conn.conn = tlsConn
	conn.connAux = connAux
	//Start reader goroutine
	go conn.reader(conn.responses, logger)
	return nil
}
Ejemplo n.º 24
0
func checkContentType(header *http.Header) (string, error) {
	contentTypeArr, ok := (*header)["Content-Type"]
	if !ok || len(contentTypeArr) == 0 {
		return "", werrors.New(ErrNotFountContentType)
	}
	contentType := contentTypeArr[0]

	mediatype, _, err := mime.ParseMediaType(contentType)
	if err != nil {
		return "", werrors.NewFields(ErrParseContentType,
			zap.String("detail", err.Error()),
			zap.String("content_type", contentType))
	}

	if mediatype != "text/html" {
		return "", werrors.NewEx(zap.InfoLevel, InfoUnsupportedMimeFormat,
			zap.String("content_type", contentType))
	}

	return contentType, nil
}
Ejemplo n.º 25
0
func NewSentinel(uid string, cfg *config, stop chan bool, end chan bool) (*Sentinel, error) {
	var initialClusterSpec *cluster.ClusterSpec
	if cfg.initialClusterSpecFile != "" {
		configData, err := ioutil.ReadFile(cfg.initialClusterSpecFile)
		if err != nil {
			return nil, fmt.Errorf("cannot read provided initial cluster config file: %v", err)
		}
		if err := json.Unmarshal(configData, &initialClusterSpec); err != nil {
			return nil, fmt.Errorf("cannot parse provided initial cluster config: %v", err)
		}
		log.Debug("initialClusterSpec dump", zap.String("initialClusterSpec", spew.Sdump(initialClusterSpec)))
		if err := initialClusterSpec.Validate(); err != nil {
			return nil, fmt.Errorf("invalid initial cluster: %v", err)
		}
	}

	storePath := filepath.Join(common.StoreBasePath, cfg.clusterName)

	kvstore, err := store.NewStore(store.Config{
		Backend:       store.Backend(cfg.storeBackend),
		Endpoints:     cfg.storeEndpoints,
		CertFile:      cfg.storeCertFile,
		KeyFile:       cfg.storeKeyFile,
		CAFile:        cfg.storeCAFile,
		SkipTLSVerify: cfg.storeSkipTlsVerify,
	})
	if err != nil {
		return nil, fmt.Errorf("cannot create store: %v", err)
	}
	e := store.NewStoreManager(kvstore, storePath)

	candidate := leadership.NewCandidate(kvstore, filepath.Join(storePath, common.SentinelLeaderKey), uid, store.MinTTL)

	return &Sentinel{
		uid:                uid,
		cfg:                cfg,
		e:                  e,
		candidate:          candidate,
		leader:             false,
		initialClusterSpec: initialClusterSpec,
		stop:               stop,
		end:                end,
		UIDFn:              common.UID,
		// This is just to choose a pseudo random keeper so
		// use math.rand (no need for crypto.rand) without an
		// initial seed.
		RandFn: rand.Intn,

		sleepInterval:  cluster.DefaultSleepInterval,
		requestTimeout: cluster.DefaultRequestTimeout,
	}, nil
}
Ejemplo n.º 26
0
func newError(logLevel zap.Level, msg string, fields ...zap.Field) error {
	result := &ErrorEx{
		msg:    msg,
		Level:  logLevel,
		Fields: make([]zap.Field, len(fields)+1)}

	result.Fields[0] = zap.String("caller", getCaller())
	for i, field := range fields {
		result.Fields[i+1] = field
	}

	return result
}
Ejemplo n.º 27
0
func (c *ClusterChecker) SetProxyInfo(e *store.StoreManager, uid string, generation int64, ttl time.Duration) error {
	proxyInfo := &cluster.ProxyInfo{
		UID:             c.uid,
		ProxyUID:        uid,
		ProxyGeneration: generation,
	}
	log.Debug("proxyInfo dump", zap.String("proxyInfo", spew.Sdump(proxyInfo)))

	if err := c.e.SetProxyInfo(proxyInfo, ttl); err != nil {
		return err
	}
	return nil
}
Ejemplo n.º 28
0
//Inform send message to the right channel in MM
func (c *Controller) Inform(update jira.IssueEvent) <-chan Response {

	c.l.Info("about to inform")
	count := metrics.GetOrRegisterCounter("inform.request.total", c.reg)
	count.Inc(1)
	ch := make(chan Response)
	go func() {
		response := Response{Project: strings.ToLower(update.Project), ID: update.ID}
		count := metrics.GetOrRegisterCounter("inform.request."+response.Project, c.reg)
		count.Inc(1)

		purl := c.hooks[strings.ToLower(update.Project)]
		if purl == "" {
			response.Status = "1002 - not mapped"
			response.StatusCode = 1002
			ch <- response
			return
		}
		response.EndPoint = purl
		c.l.Debug("about to post", zap.String("post url", purl))
		buff, err := c.converter.Convert(update)
		if err != nil {
			response.Error = err.Error()
			response.Status = "1003 - not templated"
			response.StatusCode = 1003
			ch <- response
			return
		}

		s2, _ := json.Marshal(&Request{User: c.name, Icon: c.icon, Text: string(buff.Bytes())})
		req, err := http.NewRequest("POST", purl, bytes.NewBuffer(s2))
		req.Header.Set("Content-Type", "application/json")

		client := &http.Client{}

		resp, err := client.Do(req)

		if err != nil {
			response.Error = err.Error()
			ch <- response
			return
		}
		response.Error = ""
		response.Status = resp.Status
		response.StatusCode = resp.StatusCode

		ch <- response
		close(ch)
	}()
	return ch
}
Ejemplo n.º 29
0
func (s *Sentinel) findBestStandby(cd *cluster.ClusterData, masterDB *cluster.DB) (*cluster.DB, error) {
	var bestDB *cluster.DB
	for _, db := range cd.DBs {
		if db.UID == masterDB.UID {
			log.Debug("ignoring db since it's the current master", zap.String("db", db.UID), zap.String("keeper", db.Spec.KeeperUID))
			continue
		}
		if db.Status.SystemID != masterDB.Status.SystemID {
			log.Debug("ignoring db since the postgres systemdID is different that the master one", zap.String("db", db.UID), zap.String("keeper", db.Spec.KeeperUID), zap.String("dbSystemdID", db.Status.SystemID), zap.String("masterSystemID", masterDB.Status.SystemID))
			continue

		}
		if !db.Status.Healthy {
			log.Debug("ignoring db since it's not healthy", zap.String("db", db.UID), zap.String("keeper", db.Spec.KeeperUID))
			continue
		}
		if db.Status.CurrentGeneration != db.Generation {
			log.Debug("ignoring keeper since its generation is different that the current one", zap.String("db", db.UID), zap.Int64("currentGeneration", db.Status.CurrentGeneration), zap.Int64("generation", db.Generation))
			continue
		}
		if db.Status.TimelineID != masterDB.Status.TimelineID {
			log.Debug("ignoring keeper since its pg timeline is different than master timeline", zap.String("db", db.UID), zap.Uint64("dbTimeline", db.Status.TimelineID), zap.Uint64("masterTimeline", masterDB.Status.TimelineID))
			continue
		}
		if bestDB == nil {
			bestDB = db
			continue
		}
		if db.Status.XLogPos > bestDB.Status.XLogPos {
			bestDB = db
		}
	}
	if bestDB == nil {
		return nil, fmt.Errorf("no standbys available")
	}
	return bestDB, nil
}
Ejemplo n.º 30
0
Archivo: main.go Proyecto: tixu/mmjira
// GetTarget retrieve the hook assigned to a projet, return an error in anyother case
func (b MMJira) postHandler(w http.ResponseWriter, r *http.Request) {
	vars := mux.Vars(r)
	hookid := strings.ToLower(vars["hookid"])
	b.l.Info("project", zap.String("hook", hookid))
	if b.c.Hooks[hookid] == "" {
		c := metrics.GetOrRegisterCounter("hooks.post.unknown.project", b.reg)
		c.Inc(1)
		http.Error(w, "unknwon project", http.StatusBadRequest)
		return
	}
	b.l.Debug("received a request")
	c := metrics.GetOrRegisterCounter("hooks.received."+hookid, b.reg)
	c.Inc(1)
	if b.c.Debug {
		if err := utils.DumpRequest(r, b.c.DumpDir); err != nil {
			b.l.Info("unable to dump the request in the directory", zap.String("Directory", b.c.DumpDir))
		}
	}
	issue, err := b.m.Create(r.Body)
	if err != nil {
		http.Error(w, fmt.Sprint(err), http.StatusBadRequest)
		return
	}

	if err != nil {
		http.Error(w, fmt.Sprint(err), http.StatusBadRequest)
		return
	}

	// We only know our top-level keys are strings

	b.l.Debug("sending", zap.Object("issue", issue))

	ch := b.m.Inform(issue)
	go b.m.Analyse(ch)
}