func main() {
	group := bcast.NewGroup()
	go group.Broadcasting(0)
	go TimeProvider(group, 100*time.Millisecond)
	go newReciever(group)
	newReciever(group)

}
func main() {
	group := bcast.NewGroup()
	go group.Broadcasting(0)
	go provider.TimeProvider(group, 10*time.Millisecond)
	app := appContext{group}

	gin.SetMode(gin.ReleaseMode)
	router := gin.Default()
	router.LoadHTMLGlob("templates/*")
	router.GET("time/", app.timeStream)
	router.GET("client/", client)
	router.Run(":8080")
}
Exemple #3
0
func main() {
	var err error
	var latestheight, latestheightcache int
	var blockscached *[]*btcplex.Block
	usage := `BTCplex webapp/API server.

Usage:
  btcplex-server [--config=<path>]
  btcplex-server -h | --help

Options:
  -h --help         Show this screen.
  -c <path>, --config <path>    Path to config file [default: config.json].
`
	arguments, _ := docopt.Parse(usage, nil, true, "btcplex-server", false)

	confFile := "config.json"
	if arguments["--config"] != nil {
		confFile = arguments["--config"].(string)
	}

	log.Println("Starting btcplex-server")

	conf, err = btcplex.LoadConfig(confFile)
	if err != nil {
		log.Fatalf("Can't load config file: %v\n", err)
	}

	// Used for pub/sub in the webapp and data like latest processed height
	pool, err := btcplex.GetRedis(conf)
	if err != nil {
		log.Fatalf("Can't connect to Redis: %v\n", err)
	}

	// Due to args injection I can't use two *redis.Pool with maritini
	rediswrapper := new(RedisWrapper)
	rediswrapper.Pool = pool

	ssdb, err := btcplex.GetSSDB(conf)
	if err != nil {
		log.Fatalf("Can't connect to SSDB: %v\n", err)
	}

	// Setup some pubsub:

	// Compute the unconfirmed transaction count in a ticker
	utxscnt := 0
	utxscntticker := time.NewTicker(1 * time.Second)
	go func(pool *redis.Pool, utxscnt *int) {
		c := pool.Get()
		defer c.Close()
		for _ = range utxscntticker.C {
			*utxscnt, _ = redis.Int(c.Do("ZCARD", "btcplex:rawmempool"))
		}
	}(pool, &utxscnt)

	// Pool the latest height from BTCplex db,
	// also track the status/check if BTCplex goes out of sync
	latestheightticker := time.NewTicker(1 * time.Second)
	checkinprogress := false
	bitcoindheight := btcplex.GetBlockCountRPC(conf)
	btcplexsynced := true
	go func(pool *redis.Pool, latestheight *int) {
		c := pool.Get()
		defer c.Close()
		for _ = range latestheightticker.C {
			*latestheight, _ = redis.Int(c.Do("GET", "height:latest"))

			if latestheightcache != *latestheight {
				log.Println("Re-building homepage blocks cache")
				blocks, _ := btcplex.GetLastXBlocks(ssdb, uint(*latestheight), uint(*latestheight-30))
				blockscached = &blocks
				latestheightcache = *latestheight
			}

			bitcoindheight = btcplex.GetBlockCountRPC(conf)
			if uint(latestheightcache) != bitcoindheight && !checkinprogress && btcplexsynced {
				checkinprogress = true
				go func(checkinprogress *bool) {
					if bitcoindheight-uint(latestheightcache) > 20 {
						btcplexsynced = false
						log.Printf("CRITICAL: OUT OF SYNC / btcplex:%v, bitcoind:%v\n", latestheightcache, bitcoindheight)
					} else {
						log.Println("WARNING: BTCplex Out of sync, waiting before another check")
						time.Sleep(synctimeout * time.Second)
						if btcplexsynced && uint(latestheightcache) != bitcoindheight {
							btcplexsynced = false
							log.Printf("CRITICAL: OUT OF SYNC / btcplex:%v, bitcoind:%v\n", latestheightcache, bitcoindheight)
						}
					}
					*checkinprogress = false
				}(&checkinprogress)
			}
			if uint(latestheightcache) == bitcoindheight && !btcplexsynced {
				log.Println("INFO: Sync with bitcoind done")
				btcplexsynced = true
			}
		}
	}(ssdb, &latestheight)

	// PubSub channel for blocknotify bitcoind RPC like
	blocknotifygroup := bcast.NewGroup()
	go blocknotifygroup.Broadcasting(0)
	go bcastToRedisPubSub(pool, blocknotifygroup, "btcplex:blocknotify2")

	// PubSub channel for unconfirmed txs / rawmemorypool
	utxgroup := bcast.NewGroup()
	go utxgroup.Broadcasting(0)
	go bcastToRedisPubSub(pool, utxgroup, "btcplex:utxs")
	// TODO Ticker for utxs count => events_unconfirmed

	newblockgroup := bcast.NewGroup()
	go newblockgroup.Broadcasting(0)
	go bcastToRedisPubSub(pool, newblockgroup, "btcplex:newblock")

	btcplexsyncedgroup := bcast.NewGroup()
	go btcplexsyncedgroup.Broadcasting(0)

	// Go template helper
	appHelpers := template.FuncMap{
		"cut": func(addr string, length int) string {
			return fmt.Sprintf("%v...", addr[:length])
		},
		"cutmiddle": func(addr string, length int) string {
			return fmt.Sprintf("%v...%v", addr[:length], addr[len(addr)-length:])
		},
		"tokb": func(size uint32) string {
			return fmt.Sprintf("%.3f", float32(size)/1024)
		},
		"computefee": func(tx *btcplex.Tx) string {
			if tx.TotalIn == 0 {
				return "0"
			}
			return fmt.Sprintf("%v", float32(tx.TotalIn-tx.TotalOut)/1e8)
		},
		"generationmsg": func(tx *btcplex.Tx) string {
			reward := btcplex.GetBlockReward(tx.BlockHeight)
			fee := float64(tx.TotalOut-uint64(reward)) / 1e8
			return fmt.Sprintf("%v MAZA + %.8f total fees", float64(reward)/1e8, fee)
		},
		"tobtc": func(val uint64) string {
			return fmt.Sprintf("%.8f", float64(val)/1e8)
		},
		"inttobtc": func(val int64) string {
			return fmt.Sprintf("%.8f", float64(val)/1e8)
		},
		"formatprevout": func(prevout *btcplex.PrevOut) string {
			return fmt.Sprintf("%v:%v", prevout.Hash, prevout.Vout)
		},
		"formattime": func(ts uint32) string {
			return fmt.Sprintf("%v", time.Unix(int64(ts), 0).UTC())
		},
		"formatiso": func(ts uint32) string {
			return fmt.Sprintf("%v", time.Unix(int64(ts), 0).Format(time.RFC3339))
		},
		"sub": func(h, p uint) uint {
			return h - p
		},
		"add": func(h, p uint) uint {
			return h + p
		},
		"iadd": func(h, p int) int {
			return h + p
		},
		"confirmation": func(hash string, height uint) uint {
			bm, _ := btcplex.NewBlockMeta(ssdb, hash)
			if bm.Main == false {
				return 0
			}
			return uint(latestheight) - height + 1
		},
		"is_orphaned": func(block *btcplex.Block) bool {
			if block.Height == uint(latestheight) {
				return false
			}
			return !block.Main
		},
	}

	m := martini.Classic()
	m.Map(rediswrapper)
	m.Map(ssdb)

	tmpldir := "templates"
	if conf.AppTemplatesPath != "" {
		tmpldir = conf.AppTemplatesPath
	}
	m.Use(render.Renderer(render.Options{
		Directory: tmpldir,
		Layout:    "layout",
		Funcs:     []template.FuncMap{appHelpers},
	}))

	// We rate limit the API if enabled in the config
	if conf.AppApiRateLimited {
		m.Use(func(res http.ResponseWriter, req *http.Request, rediswrapper *RedisWrapper, log *log.Logger) {
			remoteIP := strings.Split(req.RemoteAddr, ":")[0]
			_, xforwardedfor := req.Header["X-Forwarded-For"]
			if xforwardedfor {
				remoteIP = req.Header["X-Forwarded-For"][1]
			}
			log.Printf("R:%v\nip:%+v\n", time.Now(), remoteIP)
			if strings.Contains(req.RequestURI, "/api/") {
				ratelimited, cnt, reset := rateLimited(rediswrapper, remoteIP)
				// Set X-RateLimit-* Header
				res.Header().Set("X-RateLimit-Limit", strconv.Itoa(ratelimitcnt))
				res.Header().Set("X-RateLimit-Remaining", strconv.Itoa(ratelimitcnt-cnt))
				res.Header().Set("X-RateLimit-Reset", strconv.Itoa(reset))
				// Set CORS header
				res.Header().Set("Access-Control-Expose-Headers", " X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset")
				res.Header().Set("Access-Control-Allow-Origin", "*")

				if ratelimited {
					res.WriteHeader(429)
				}
			}
		})
	}

	// Don't want Google to crawl API
	m.Get("/robots.txt", func() string {
		return "User-agent: *\nDisallow: /api"
	})

	m.Get("/", func(r render.Render, db *redis.Pool) {
		pm := new(pageMeta)
		pm.BtcplexSynced = btcplexsynced
		pm.Blocks = blockscached
		pm.Title = "Latest Bitcoin blocks"
		pm.Description = "Open source Bitcoin block chain explorer with JSON API"
		pm.Menu = "latest_blocks"
		pm.LastHeight = uint(latestheight)
		pm.Analytics = conf.AppGoogleAnalytics
		r.HTML(200, "index", &pm)
	})

	m.Get("/blocks/:currentheight", func(params martini.Params, r render.Render, db *redis.Pool) {
		pm := new(pageMeta)
		pm.BtcplexSynced = btcplexsynced
		currentheight, _ := strconv.ParseUint(params["currentheight"], 10, 0)
		blocks, _ := btcplex.GetLastXBlocks(db, uint(currentheight), uint(currentheight-30))
		pm.Blocks = &blocks
		pm.Title = "Bitcoin blocks"
		pm.Menu = "blocks"
		pm.LastHeight = uint(latestheight)
		pm.CurrentHeight = uint(currentheight)
		pm.Analytics = conf.AppGoogleAnalytics
		r.HTML(200, "blocks", &pm)
	})

	m.Get("/block/:hash", func(params martini.Params, r render.Render, db *redis.Pool) {
		pm := new(pageMeta)
		pm.BtcplexSynced = btcplexsynced
		pm.LastHeight = uint(latestheight)
		block, _ := btcplex.GetBlockCachedByHash(db, params["hash"])
		block.FetchMeta(db)
		btcplex.By(btcplex.TxIndex).Sort(block.Txs)
		pm.Block = block
		pm.Title = fmt.Sprintf("Bitcoin block #%v", block.Height)
		pm.Description = fmt.Sprintf("Bitcoin block #%v summary and related transactions", block.Height)
		pm.Analytics = conf.AppGoogleAnalytics
		r.HTML(200, "block", &pm)
	})

	m.Get("/api/block/:hash", func(params martini.Params, r render.Render, db *redis.Pool, req *http.Request) {
		block, _ := btcplex.GetBlockCachedByHash(db, params["hash"])
		block.FetchMeta(db)
		btcplex.By(btcplex.TxIndex).Sort(block.Txs)
		block.Links = initHATEOAS(block.Links, req)
		if block.Parent != "" {
			block.Links = addHATEOAS(block.Links, "previous_block", fmt.Sprintf("%v/api/block/%v", conf.AppUrl, block.Parent))
		}
		if block.Next != "" {
			block.Links = addHATEOAS(block.Links, "next_block", fmt.Sprintf("%v/api/block/%v", conf.AppUrl, block.Next))
		}
		r.JSON(200, block)
	})

	m.Get("/unconfirmed-transactions", func(params martini.Params, r render.Render, db *redis.Pool, rdb *RedisWrapper) {
		//rpool := rdb.Pool
		pm := new(pageMeta)
		pm.BtcplexSynced = btcplexsynced
		pm.LastHeight = uint(latestheight)
		pm.Menu = "utxs"
		pm.Title = "Unconfirmed transactions"
		pm.Description = "Transactions waiting to be included in a Bitcoin block, updated in real time."
		//utxs, _ := btcplex.GetUnconfirmedTxs(rpool)
		pm.Txs = &[]*btcplex.Tx{}
		pm.Analytics = conf.AppGoogleAnalytics
		r.HTML(200, "unconfirmed-transactions", &pm)
	})

	m.Get("/tx/:hash", func(params martini.Params, r render.Render, db *redis.Pool, rdb *RedisWrapper) {
		var tx *btcplex.Tx
		rpool := rdb.Pool
		pm := new(pageMeta)
		pm.BtcplexSynced = btcplexsynced
		pm.LastHeight = uint(latestheight)
		isutx, _ := btcplex.IsUnconfirmedTx(rpool, params["hash"])
		if isutx {
			pm.TxUnconfirmed = true
			tx, _ = btcplex.GetUnconfirmedTx(rpool, params["hash"])
		} else {
			tx, _ = btcplex.GetTx(db, params["hash"])
			tx.Build(db)
		}
		pm.Tx = tx
		pm.Title = fmt.Sprintf("Bitcoin transaction %v", tx.Hash)
		pm.Description = fmt.Sprintf("Bitcoin transaction %v summary.", tx.Hash)
		pm.Analytics = conf.AppGoogleAnalytics
		r.HTML(200, "tx", pm)
	})
	m.Get("/api/tx/:hash", func(params martini.Params, r render.Render, db *redis.Pool, rdb *RedisWrapper, req *http.Request) {
		var tx *btcplex.Tx
		rpool := rdb.Pool
		isutx, _ := btcplex.IsUnconfirmedTx(rpool, params["hash"])
		if isutx {
			tx, _ = btcplex.GetUnconfirmedTx(rpool, params["hash"])
		} else {
			tx, _ = btcplex.GetTx(db, params["hash"])
			tx.Build(db)
		}
		tx.Links = initHATEOAS(tx.Links, req)
		if tx.BlockHash != "" {
			tx.Links = addHATEOAS(tx.Links, "block", fmt.Sprintf("%v/api/block/%v", conf.AppUrl, tx.BlockHash))
		}
		r.JSON(200, tx)
	})

	m.Get("/address/:address", func(params martini.Params, r render.Render, db *redis.Pool, req *http.Request) {
		pm := new(pageMeta)
		pm.BtcplexSynced = btcplexsynced
		pm.LastHeight = uint(latestheight)
		pm.PaginationData = new(PaginationData)
		pm.Title = fmt.Sprintf("Bitcoin address %v", params["address"])
		pm.Description = fmt.Sprintf("Transactions and summary for the Bitcoin address %v.", params["address"])
		// AddressData
		addressdata, _ := btcplex.GetAddress(db, params["address"])
		pm.AddressData = addressdata
		// Pagination
		d := float64(addressdata.TxCnt) / float64(txperpage)
		pm.PaginationData.MaxPage = int(math.Ceil(d))
		currentPage := req.URL.Query().Get("page")
		if currentPage == "" {
			currentPage = "1"
		}
		pm.PaginationData.CurrentPage, _ = strconv.Atoi(currentPage)
		pm.PaginationData.Pages = N(pm.PaginationData.MaxPage)
		pm.PaginationData.Next = 0
		pm.PaginationData.Prev = 0
		if pm.PaginationData.CurrentPage > 1 {
			pm.PaginationData.Prev = pm.PaginationData.CurrentPage - 1
		}
		if pm.PaginationData.CurrentPage < pm.PaginationData.MaxPage {
			pm.PaginationData.Next = pm.PaginationData.CurrentPage + 1
		}
		fmt.Printf("%+v\n", pm.PaginationData)
		// Fetch txs given the pagination
		addressdata.FetchTxs(db, txperpage*(pm.PaginationData.CurrentPage-1), txperpage*pm.PaginationData.CurrentPage)
		r.HTML(200, "address", pm)
	})
	m.Get("/api/address/:address", func(params martini.Params, r render.Render, db *redis.Pool, req *http.Request) {
		addressdata, _ := btcplex.GetAddress(db, params["address"])
		lastPage := int(math.Ceil(float64(addressdata.TxCnt) / float64(txperpage)))
		currentPageStr := req.URL.Query().Get("page")
		if currentPageStr == "" {
			currentPageStr = "1"
		}
		currentPage, _ := strconv.Atoi(currentPageStr)
		// HATEOS section
		addressdata.Links = initHATEOAS(addressdata.Links, req)
		pageurl := "%v/api/address/%v?page=%v"
		if currentPage < lastPage {
			addressdata.Links = addHATEOAS(addressdata.Links, "last", fmt.Sprintf(pageurl, conf.AppUrl, params["address"], lastPage))
			addressdata.Links = addHATEOAS(addressdata.Links, "next", fmt.Sprintf(pageurl, conf.AppUrl, params["address"], currentPage+1))
		}
		if currentPage > 1 {
			addressdata.Links = addHATEOAS(addressdata.Links, "previous", fmt.Sprintf(pageurl, conf.AppUrl, params["address"], currentPage-1))
		}
		addressdata.FetchTxs(db, txperpage*(currentPage-1), txperpage*currentPage)
		r.JSON(200, addressdata)
	})

	m.Get("/about", func(r render.Render) {
		pm := new(pageMeta)
		pm.BtcplexSynced = btcplexsynced
		pm.LastHeight = uint(latestheight)
		pm.Title = "About"
		pm.Description = "Learn more about BTCPlex, an open source Bitcoin blockchain browser written in Go."
		pm.Menu = "about"
		pm.Analytics = conf.AppGoogleAnalytics
		r.HTML(200, "about", pm)
	})

	m.Get("/status", func(r render.Render) {
		pm := new(pageMeta)
		pm.BtcplexSynced = btcplexsynced
		pm.LastHeight = uint(latestheight)
		pm.Title = "Status"
		pm.Description = "BTCplex status page."
		pm.Menu = "status"
		pm.Analytics = conf.AppGoogleAnalytics
		btcplexinfo, _ := btcplex.GetInfoRPC(conf)
		pm.BitcoindInfo = btcplexinfo
		r.HTML(200, "status", pm)
	})

	m.Post("/search", binding.Form(searchForm{}), binding.ErrorHandler, func(search searchForm, r render.Render, db *redis.Pool, rdb *RedisWrapper) {
		rpool := rdb.Pool
		pm := new(pageMeta)
		pm.BtcplexSynced = btcplexsynced
		// Check if the query isa block height
		isblockheight, hash := btcplex.IsBlockHeight(db, search.Query)
		if isblockheight && hash != "" {
			r.Redirect(fmt.Sprintf("/block/%v", hash))
		}
		// Check if the query is block hash
		isblockhash, hash := btcplex.IsBlockHash(db, search.Query)
		if isblockhash {
			r.Redirect(fmt.Sprintf("/block/%v", hash))
		}
		// Check for TX
		istxhash, txhash := btcplex.IsTxHash(db, search.Query)
		if istxhash {
			r.Redirect(fmt.Sprintf("/tx/%v", txhash))
		}
		isutx, txhash := btcplex.IsUnconfirmedTx(rpool, search.Query)
		if isutx {
			r.Redirect(fmt.Sprintf("/tx/%v", txhash))
		}
		// Check for Bitcoin address
		isaddress, address := btcplex.IsAddress(search.Query)
		if isaddress {
			r.Redirect(fmt.Sprintf("/address/%v", address))
		}
		pm.Title = "Search"
		pm.Error = "Nothing found"
		pm.Analytics = conf.AppGoogleAnalytics
		r.HTML(200, "search", pm)
	})

	m.Get("/api/getblockcount", func(r render.Render) {
		r.JSON(200, latestheight)
	})

	//	m.Get("/api/latesthash", func(r render.Render) {
	//		r.JSON(200, latesthash)
	//	})

	m.Get("/api/getblockhash/:height", func(r render.Render, params martini.Params, db *redis.Pool) {
		height, _ := strconv.ParseUint(params["height"], 10, 0)
		blockhash, _ := btcplex.GetBlockHash(db, uint(height))
		r.JSON(200, blockhash)
	})

	m.Get("/api/getreceivedbyaddress/:address", func(r render.Render, params martini.Params, db *redis.Pool) {
		res, _ := btcplex.GetReceivedByAddress(db, params["address"])
		r.JSON(200, res)
	})

	m.Get("/api/getsentbyaddress/:address", func(r render.Render, params martini.Params, db *redis.Pool) {
		res, _ := btcplex.GetSentByAddress(db, params["address"])
		r.JSON(200, res)
	})

	m.Get("/api/addressbalance/:address", func(r render.Render, params martini.Params, db *redis.Pool) {
		res, _ := btcplex.AddressBalance(db, params["address"])
		r.JSON(200, res)
	})

	m.Get("/api/checkaddress/:address", func(params martini.Params, r render.Render) {
		valid, _ := btcplex.ValidA58([]byte(params["address"]))
		r.JSON(200, valid)
	})

	m.Get("/api/blocknotify", func(w http.ResponseWriter, r *http.Request) {
		incrementClient()
		defer decrementClient()
		running := true
		notifier := w.(http.CloseNotifier).CloseNotify()
		timer := time.NewTimer(time.Second * 1800)

		f, _ := w.(http.Flusher)
		w.Header().Set("Content-Type", "text/event-stream")
		w.Header().Set("Cache-Control", "no-cache")
		w.Header().Set("Connection", "keep-alive")

		bnotifier := blocknotifygroup.Join()
		defer bnotifier.Close()

		var ls interface{}
		for {
			if running {
				select {
				case ls = <-bnotifier.In:
					io.WriteString(w, fmt.Sprintf("data: %v\n\n", ls.(string)))
					f.Flush()
				case <-notifier:
					running = false
					log.Println("CLOSED")
					break
				case <-timer.C:
					running = false
					log.Println("TimeOUT")
				}
			} else {
				log.Println("DONE")
				break
			}
		}
	})

	m.Get("/api/utxs/:address", func(w http.ResponseWriter, params martini.Params, r *http.Request, rdb *RedisWrapper) {
		incrementClient()
		defer decrementClient()
		rpool := rdb.Pool
		running := true
		notifier := w.(http.CloseNotifier).CloseNotify()
		timer := time.NewTimer(time.Second * 3600)

		f, _ := w.(http.Flusher)
		w.Header().Set("Content-Type", "text/event-stream")
		w.Header().Set("Cache-Control", "no-cache")
		w.Header().Set("Connection", "keep-alive")

		utxs := make(chan string)
		go func(rpool *redis.Pool, utxs chan<- string) {
			conn := rpool.Get()
			defer conn.Close()
			psc := redis.PubSubConn{Conn: conn}
			psc.Subscribe(fmt.Sprintf("addr:%v:txs", params["address"]))
			for {
				switch v := psc.Receive().(type) {
				case redis.Message:
					utxs <- string(v.Data)
				}
			}
		}(rpool, utxs)

		var ls string
		for {
			if running {
				select {
				case ls = <-utxs:
					io.WriteString(w, fmt.Sprintf("data: %v\n\n", ls))
					f.Flush()
				case <-notifier:
					running = false
					log.Println("CLOSED")
					break
				case <-timer.C:
					running = false
					log.Println("TimeOUT")
				}
			} else {
				log.Println("DONE")
				break
			}
		}
	})

	m.Get("/api/utxs", func(w http.ResponseWriter, r *http.Request) {
		incrementClient()
		defer decrementClient()
		running := true
		notifier := w.(http.CloseNotifier).CloseNotify()
		timer := time.NewTimer(time.Second * 3600)

		f, _ := w.(http.Flusher)
		w.Header().Set("Content-Type", "text/event-stream")
		w.Header().Set("Cache-Control", "no-cache")
		w.Header().Set("Connection", "keep-alive")

		utx := utxgroup.Join()
		defer utx.Close()

		var ls interface{}
		for {
			if running {
				select {
				case ls = <-utx.In:
					io.WriteString(w, fmt.Sprintf("data: %v\n\n", ls.(string)))
					f.Flush()
				case <-notifier:
					running = false
					log.Println("CLOSED")
					break
				case <-timer.C:
					running = false
					log.Println("TimeOUT")
				}
			} else {
				log.Println("DONE")
				break
			}
		}
	})

	m.Get("/events", func(w http.ResponseWriter, r *http.Request) {
		running := true
		notifier := w.(http.CloseNotifier).CloseNotify()
		timer := time.NewTimer(time.Second * 8400)

		f, _ := w.(http.Flusher)
		w.Header().Set("Content-Type", "text/event-stream")
		w.Header().Set("Cache-Control", "no-cache")
		w.Header().Set("Connection", "keep-alive")

		newblockg := newblockgroup.Join()
		defer newblockg.Close()
		var ls interface{}
		for {
			if running {
				select {
				case ls = <-newblockg.In:
					io.WriteString(w, fmt.Sprintf("data: %v\n\n", ls.(string)))
					f.Flush()
				case <-notifier:
					running = false
					log.Println("CLOSED")
					break
				case <-timer.C:
					running = false
					log.Println("TimeOUT")
				}
			} else {
				log.Println("DONE")
				break
			}
		}
	})

	m.Get("/events_unconfirmed", func(w http.ResponseWriter, r *http.Request) {
		running := true
		notifier := w.(http.CloseNotifier).CloseNotify()
		timer := time.NewTimer(time.Second * 3600)

		f, _ := w.(http.Flusher)
		w.Header().Set("Content-Type", "text/event-stream")
		w.Header().Set("Cache-Control", "no-cache")
		w.Header().Set("Connection", "keep-alive")

		utx := utxgroup.Join()
		defer utx.Close()

		var ls interface{}
		for {
			if running {
				select {
				case ls = <-utx.In:
					buf := bytes.NewBufferString("")
					utx := new(btcplex.Tx)
					json.Unmarshal([]byte(ls.(string)), utx)
					t := template.New("").Funcs(appHelpers)
					utxtmpl, _ := ioutil.ReadFile(fmt.Sprintf("%v/utx.tmpl", tmpldir))
					t, err := t.Parse(string(utxtmpl))
					if err != nil {
						log.Printf("ERR:%v", err)
					}

					err = t.Execute(buf, utx)
					if err != nil {
						log.Printf("ERR EXEC:%v", err)
					}
					res := map[string]interface{}{}
					// Full unconfirmed cnt from global variables
					res["cnt"] = utxscnt
					// HTML template of the transaction
					res["tmpl"] = buf.String()
					// Last updated time
					res["time"] = time.Now().UTC().Format(time.RFC3339)
					resjson, _ := json.Marshal(res)
					io.WriteString(w, fmt.Sprintf("data: %v\n\n", string(resjson)))
					f.Flush()
				case <-notifier:
					running = false
					log.Println("CLOSED")
					break
				case <-timer.C:
					running = false
					log.Println("TimeOUT")
				}
			} else {
				log.Println("DONE")
				break
			}
		}
	})

	m.Get("/api/info", func(r render.Render) {
		activeclientsmutex.Lock()
		defer activeclientsmutex.Unlock()
		btcplexinfo, _ := btcplex.GetInfoRPC(conf)
		r.JSON(200, map[string]interface{}{"activeclients": activeclients, "info": btcplexinfo})
	})

	log.Printf("Listening on port: %v\n", conf.AppPort)
	http.ListenAndServe(fmt.Sprintf(":%v", conf.AppPort), m)
}
Exemple #4
0
// Serve is for use by a server executable and makes it start listening on
// localhost at the configured port for Connect()ions from clients, and then
// handles those clients. It returns a *Server that you will typically call
// Block() on to block until until your executable receives a SIGINT or SIGTERM,
// or you call Stop(), at which point the queues will be safely closed (you'd
// probably just exit at that point). The possible errors from Serve() will be
// related to not being able to start up at the supplied address; errors
// encountered while dealing with clients are logged but otherwise ignored. If
// it creates a db file or recreates one from backup, it will say what it did in
// the returned msg string. It also spawns your runner clients as needed,
// running them via the configured job scheduler, using the configured shell. It
// determines the command line to execute for your runner client from the
// configured RunnerCmd string you supplied.
func Serve(config ServerConfig) (s *Server, msg string, err error) {
	// port string, webPort string, schedulerName string, shell string, runnerCmd string, dbFile string, dbBkFile string, deployment string
	sock, err := rep.NewSocket()
	if err != nil {
		return
	}

	// we open ourselves up to possible denial-of-service attack if a client
	// sends us tons of data, but at least the client doesn't silently hang
	// forever when it legitimately wants to Add() a ton of jobs
	// unlimited Recv() length
	if err = sock.SetOption(mangos.OptionMaxRecvSize, 0); err != nil {
		return
	}

	// we use raw mode, allowing us to respond to multiple clients in
	// parallel
	if err = sock.SetOption(mangos.OptionRaw, true); err != nil {
		return
	}

	// we'll wait ServerInterruptTime to recv from clients before trying again,
	// allowing us to check if signals have been passed
	if err = sock.SetOption(mangos.OptionRecvDeadline, ServerInterruptTime); err != nil {
		return
	}

	sock.AddTransport(tcp.NewTransport())

	if err = sock.Listen("tcp://0.0.0.0:" + config.Port); err != nil {
		return
	}

	// serving will happen in a goroutine that will stop on SIGINT or SIGTERM,
	// of if something is sent on the quit channel
	sigs := make(chan os.Signal, 2)
	signal.Notify(sigs, os.Interrupt, syscall.SIGTERM)
	stop := make(chan bool, 1)
	done := make(chan error, 1)

	// if we end up spawning clients on other machines, they'll need to know
	// our non-loopback ip address so they can connect to us
	ip := CurrentIP()
	if ip == "" {
		err = Error{"", "Serve", "", ErrNoHost}
		return
	}

	// to be friendly we also record the hostname, but it's possible this isn't
	// defined, hence we don't rely on it for anything important
	host, err := os.Hostname()
	if err != nil {
		host = "localhost"
	}

	// we will spawn runner clients via the requested job scheduler
	sch, err := scheduler.New(config.SchedulerName, config.SchedulerConfig)
	if err != nil {
		return
	}

	// we need to persist stuff to disk, and we do so using boltdb
	db, msg, err := initDB(config.DBFile, config.DBFileBackup, config.Deployment)
	if err != nil {
		return
	}

	s = &Server{
		ServerInfo:   &ServerInfo{Addr: ip + ":" + config.Port, Host: host, Port: config.Port, WebPort: config.WebPort, PID: os.Getpid(), Deployment: config.Deployment, Scheduler: config.SchedulerName, Mode: ServerModeNormal},
		sock:         sock,
		ch:           new(codec.BincHandle),
		qs:           make(map[string]*queue.Queue),
		rpl:          &rgToKeys{lookup: make(map[string]map[string]bool)},
		db:           db,
		stop:         stop,
		done:         done,
		up:           true,
		scheduler:    sch,
		sgroupcounts: make(map[string]int),
		sgrouptrigs:  make(map[string]int),
		sgtr:         make(map[string]*scheduler.Requirements),
		rc:           config.RunnerCmd,
		statusCaster: bcast.NewGroup(),
	}

	// if we're restarting from a state where there were incomplete jobs, we
	// need to load those in to the appropriate queues now
	priorJobs, err := db.recoverIncompleteJobs()
	if err != nil {
		return
	}
	if len(priorJobs) > 0 {
		jobsByQueue := make(map[string][]*queue.ItemDef)
		for _, job := range priorJobs {
			jobsByQueue[job.Queue] = append(jobsByQueue[job.Queue], &queue.ItemDef{Key: job.key(), Data: job, Priority: job.Priority, Delay: 0 * time.Second, TTR: ServerItemTTR, Dependencies: job.Dependencies.incompleteJobKeys(s.db)})
		}
		for qname, itemdefs := range jobsByQueue {
			q := s.getOrCreateQueue(qname)
			_, _, err = s.enqueueItems(q, itemdefs)
			if err != nil {
				return
			}
		}
	}

	// set up responding to command-line clients and signals
	go func() {
		// log panics and die
		defer s.logPanic("jobqueue serving", true)

		for {
			select {
			case sig := <-sigs:
				s.shutdown()
				var serr error
				switch sig {
				case os.Interrupt:
					serr = Error{"", "Serve", "", ErrClosedInt}
				case syscall.SIGTERM:
					serr = Error{"", "Serve", "", ErrClosedTerm}
				}
				signal.Stop(sigs)
				done <- serr
				return
			case <-stop:
				s.shutdown()
				signal.Stop(sigs)
				done <- Error{"", "Serve", "", ErrClosedStop}
				return
			default:
				// receive a clientRequest from a client
				m, rerr := sock.RecvMsg()
				if rerr != nil {
					if rerr != mangos.ErrRecvTimeout {
						log.Println(rerr)
					}
					continue
				}

				// parse the request, do the desired work and respond to the client
				go func() {
					// log panics and continue
					defer s.logPanic("jobqueue server client handling", false)

					herr := s.handleRequest(m)
					if ServerLogClientErrors && herr != nil {
						log.Println(herr)
					}
				}()
			}
		}
	}()

	// set up the web interface
	go func() {
		// log panics and die
		defer s.logPanic("jobqueue web server", true)

		mux := http.NewServeMux()
		mux.HandleFunc("/", webInterfaceStatic)
		mux.HandleFunc("/status_ws", webInterfaceStatusWS(s))
		go http.ListenAndServe("0.0.0.0:"+config.WebPort, mux) // *** should use ListenAndServeTLS, which needs certs (http package has cert creation)...
		go s.statusCaster.Broadcasting(0)
	}()

	return
}
Exemple #5
0
func main() {

	mylog = log.New(&logBuffer, "log: ", log.Lshortfile) //log configurations

	if runningInKubernetes { //find IP of POD
		myIP = myutils.GetMyIP("18.16.") //pattern of IPs of PODS in kubernetes
		if myIP[0:6] != "18.16." {
			mylog.Println("main: could not get MY own IP!: ", myIP)
		}
	} else {
		myIP = "127.0.0.1" //IP of localhost
	}

	if !runningInKubernetes { //running in localhost mode?
		if len(os.Args) < 3 { //3 parameters: program(mandatory),port,quorum-size
			fmt.Println("you should specify PORT and QUORUM-SIZE when using in " +
				"localhost mode. Assuming PORt=8000 and QUORUM-SIZE=5")
			//os.Exit(0)
			port = "8000"
			quorumSize := 5
			updateSetOfReplicasInLocalhost(quorumSize)
			if debug {
				mylog.Println("handR: [default] quorum size=", quorumSize)
			}
		} else {
			port = os.Args[1] //change the default port to be the specified one
			quorumSize, err := strconv.Atoi(os.Args[2])
			if err != nil {
				fmt.Println("ERROR: quorum size must be a number: " + os.Args[2])
				return
			}
			updateSetOfReplicasInLocalhost(quorumSize)
			if debug {
				mylog.Println("handR: quorum size=", quorumSize)
			}
			fmt.Println("quorum-size=", quorumSize)
		}

	} else {
		updateSetOfReplicasInKubernetes()
	}

	requestsWaitingForTheLeader = make(map[string]string)
	requestsWaitingForTheLeaderTimes = make(map[string]time.Time)

	health = "starting replicatedcalc" //first health message

	http.HandleFunc("/healthz", handHealth)
	http.HandleFunc("/logs", handLogs)
	http.HandleFunc("/queue", handQueue)
	http.HandleFunc("/election", handElectionQueue)
	http.HandleFunc("/reset", handResetEtcd)
	http.HandleFunc("/", handRequests)
	http.HandleFunc("/consensus", handListConsensus)

	group = bcast.NewGroup() //broadcast group for consensus notifications

	health += "\n IP discovered: " + myIP //add IP info to the health status
	go executor(executionChan)
	go quorumWatcher(requestChan, executionChan)

	go etcdElectionsWatcher()

	//flag to catch requests notifications while loading queue
	loadingQueue = true
	go etcdWatcher(requestChan)    //listen notifications while loading queue
	loadQueueFromEtcd(requestChan) //retrieve queue in etcd
	loadingQueue = false
	go processRequestsArrivedWhileLoading(requestChan)
	go group.Broadcasting(0)
	go watcherMonitor(restartWatcherChan)

	if !runningInKubernetes {
		fmt.Println("NOT running in kubernetes, server started, port=", port)
	}

	mylog.Println("server ready to start: ", myIP) //initial message in the log
	http.ListenAndServe(":"+port, nil)             //start server
}
Exemple #6
0
	queue        = make(map[string]string) //internal queue
	inverseQueue = make(map[string]string) //inverse structure, for make search faster

	valuesOfConsensusInstances   = make(map[int]int)       // consensus instance, result of the executed request
	consensusInstanceOfRequestID = make(map[string]int)    // requestID, order of request
	requestIDofConsensusInstance = make(map[int]string)    //TEMP , just for printing by now
	opOfConsensusInstance        = make(map[int]string)    //TEMP , just for printing by now
	queueReqIdAndRespondentKeys  = make(map[string]string) //for faster counting of replicas that answered
	tmpRequestListWhileLoading   = make(map[string]string)

	replicas         []string                //set of replicas
	requestChan      = make(chan myRequest)  //notify watched requests
	executionChan    = make(chan myRequest)  //notify requests that achieved consensus
	requestsToAnswer = make(map[string]bool) //requestID of requests that I have to answer

	group = bcast.NewGroup()
	//https://github.com/grafov/bcast

	restartWatcherChan = make(chan string) //channel for restart watcher
	lastObservedKey    = ""                //last observer key by watcher
	watcherRestarts    = 0                 //counter of watcher restarts

	//consensusInMemory []string //consensus just for printing

	availableConsensusInstance = 1
	initialValueOfStateMachine = 0

	lastExecutedInstanceOfConsensus = 0
	thisReplicaIsTheLeader          = false

	leaderMonitorOn = false
// Run monitors for each stream.
func StreamMonitor() {
	var debugvars = expvar.NewMap("streams")
	var requestedTasks = expvar.NewInt("requested-tasks")
	var queueSizeHLSTasks = expvar.NewInt("hls-tasks-queue")
	var executedHLSTasks = expvar.NewInt("hls-tasks-done")
	var expiredHLSTasks = expvar.NewInt("hls-tasks-expired")
	var queueSizeHDSTasks = expvar.NewInt("hds-tasks-queue")
	var executedHDSTasks = expvar.NewInt("hds-tasks-done")
	var expiredHDSTasks = expvar.NewInt("hds-tasks-expired")
	var queueSizeHTTPTasks = expvar.NewInt("http-tasks-queue")
	var executedHTTPTasks = expvar.NewInt("http-tasks-done")
	var expiredHTTPTasks = expvar.NewInt("http-tasks-expired")
	var queueSizeWVTasks = expvar.NewInt("wv-tasks-queue")
	var executedWVTasks = expvar.NewInt("wv-tasks-done")
	var expiredWVTasks = expvar.NewInt("wv-tasks-expired")
	var hlscount, hdscount, wvcount, httpcount int
	var hlsprobecount int

	debugvars.Set("requested-tasks", requestedTasks)
	debugvars.Set("hls-tasks-queue", queueSizeHLSTasks)
	debugvars.Set("hls-tasks-done", executedHLSTasks)
	debugvars.Set("hls-tasks-expired", expiredHLSTasks)
	debugvars.Set("hds-tasks-queue", queueSizeHDSTasks)
	debugvars.Set("hds-tasks-done", executedHDSTasks)
	debugvars.Set("hds-tasks-expired", expiredHDSTasks)
	debugvars.Set("http-tasks-queue", queueSizeHTTPTasks)
	debugvars.Set("http-tasks-done", executedHTTPTasks)
	debugvars.Set("http-tasks-expired", expiredHTTPTasks)
	debugvars.Set("wv-tasks-queue", queueSizeWVTasks)
	debugvars.Set("wv-tasks-done", executedWVTasks)
	debugvars.Set("wv-tasks-expired", expiredWVTasks)

	ctl := bcast.NewGroup()
	go ctl.Broadcasting(0)
	go Heartbeat(ctl)

	// запуск проберов и потоков
	for gname, gdata := range cfg.GroupParams {
		switch gdata.Type {
		case HLS:
			gtasks := make(chan *Task)
			for i := 0; i < gdata.Probers; i++ {
				go CupertinoProber(ctl, gtasks, debugvars)
				hlsprobecount++
			}
			gchunktasks := make(chan *Task)
			for i := 0; i < gdata.MediaProbers; i++ {
				go MediaProber(ctl, HLS, gchunktasks, debugvars)
			}
			for _, stream := range *cfg.GroupStreams[gname] {
				go StreamBox(ctl, stream, HLS, gtasks, debugvars)
				hlscount++
			}
		case HDS:
			gtasks := make(chan *Task)
			for i := 0; i < gdata.Probers; i++ {
				go SanjoseProber(ctl, gtasks, debugvars)
			}
			gchunktasks := make(chan *Task)
			for i := 0; i < gdata.MediaProbers; i++ {
				go MediaProber(ctl, HDS, gchunktasks, debugvars)
			}
			for _, stream := range *cfg.GroupStreams[gname] {
				go StreamBox(ctl, stream, HDS, gtasks, debugvars)
				hdscount++
			}
		case HTTP:
			gtasks := make(chan *Task)
			for i := 0; i < gdata.Probers; i++ {
				go SimpleProber(ctl, gtasks, debugvars)
			}
			for _, stream := range *cfg.GroupStreams[gname] {
				go StreamBox(ctl, stream, HTTP, gtasks, debugvars)
				httpcount++
			}
		case WV:
			gtasks := make(chan *Task)
			for i := 0; i < gdata.Probers; i++ {
				go WidevineProber(ctl, gtasks, debugvars)
			}
			for _, stream := range *cfg.GroupStreams[gname] {
				go StreamBox(ctl, stream, WV, gtasks, debugvars)
				wvcount++
			}
		}
	}

	if hlsprobecount > 0 {
		fmt.Printf("%d HLS probers started.\n", hlsprobecount)
	} else {
		println("No HLS probers started.")
	}
	// if cfg.TotalProbersHDS > 0 {
	// 	fmt.Printf("%d HDS probers started.\n", cfg.TotalProbersHDS)
	// } else {
	// 	println("No HDS probers started.")
	// }
	// if cfg.TotalProbersHTTP > 0 {
	// 	fmt.Printf("%d HTTP probers started.\n", cfg.TotalProbersHTTP)
	// } else {
	// 	println("No HTTP probers started.")
	// }
	// if cfg.TotalProbersWV > 0 {
	// 	fmt.Printf("%d Widevine VOD probers started.\n", cfg.TotalProbersWV)
	// } else {
	// 	println("No Widevine probers started.")
	// }
	if hlscount > 0 {
		StatsGlobals.TotalHLSMonitoringPoints = hlscount
		fmt.Printf("%d HLS monitors started.\n", hlscount)
	} else {
		println("No HLS monitors started.")
	}
	if hdscount > 0 {
		StatsGlobals.TotalHDSMonitoringPoints = hdscount
		fmt.Printf("%d HDS monitors started.\n", hdscount)
	} else {
		println("No HDS monitors started.")
	}
	if httpcount > 0 {
		StatsGlobals.TotalHTTPMonitoringPoints = httpcount
		fmt.Printf("%d HTTP monitors started.\n", httpcount)
	} else {
		println("No HTTP monitors started.")
	}
	if wvcount > 0 {
		StatsGlobals.TotalWVMonitoringPoints = wvcount
		fmt.Printf("%d Widevine monitors started.\n", wvcount)
	} else {
		println("No Widevine monitors started.")
	}

	StatsGlobals.TotalMonitoringPoints = hlscount + hdscount + httpcount + wvcount
}