示例#1
0
func main() {
	n := flag.Int("n", 0, "expected number of items")
	falsePositiveRate := flag.Float64("fpr", 0, "false positive rate")
	flag.Parse()
	filter := bloom.NewWithEstimates(uint(*n), *falsePositiveRate)
	fmt.Printf("m: %d, k: %d\n", filter.Cap(), filter.K())
}
示例#2
0
文件: announce.go 项目: JyBP/torrent
// This is kind of the main thing you want to do with DHT. It traverses the
// graph toward nodes that store peers for the infohash, streaming them to the
// caller, and announcing the local node to each node if allowed and
// specified.
func (s *Server) Announce(infoHash string, port int, impliedPort bool) (*Announce, error) {
	s.mu.Lock()
	startAddrs := func() (ret []dHTAddr) {
		for _, n := range s.closestGoodNodes(160, infoHash) {
			ret = append(ret, n.addr)
		}
		return
	}()
	s.mu.Unlock()
	if len(startAddrs) == 0 {
		addrs, err := bootstrapAddrs(s.bootstrapNodes)
		if err != nil {
			return nil, err
		}
		for _, addr := range addrs {
			startAddrs = append(startAddrs, newDHTAddr(addr))
		}
	}
	disc := &Announce{
		Peers:               make(chan PeersValues, 100),
		stop:                make(chan struct{}),
		values:              make(chan PeersValues),
		triedAddrs:          bloom.NewWithEstimates(1000, 0.5),
		server:              s,
		infoHash:            infoHash,
		announcePort:        port,
		announcePortImplied: impliedPort,
	}
	// Function ferries from values to Values until discovery is halted.
	go func() {
		defer close(disc.Peers)
		for {
			select {
			case psv := <-disc.values:
				select {
				case disc.Peers <- psv:
				case <-disc.stop:
					return
				}
			case <-disc.stop:
				return
			}
		}
	}()
	for i, addr := range startAddrs {
		if i != 0 {
			time.Sleep(time.Millisecond)
		}
		disc.mu.Lock()
		disc.contact(addr)
		disc.mu.Unlock()
	}
	return disc, nil
}
示例#3
0
文件: log.go 项目: cayleydb/dgraph
// Lock must have been acquired.
func createAndUpdateBloomFilter(cache *Cache) {
	hashes := make([]uint32, 50000)
	hashes = hashes[:0]
	if err := streamEntries(cache, 0, 0, func(hdr Header, record []byte) {
		hashes = append(hashes, hdr.hash)
	}); err != nil {
		glog.WithError(err).Fatal("Unable to create bloom filters.")
	}

	n := 100000
	if len(hashes) > n {
		n = len(hashes)
	}
	cache.bf = bloom.NewWithEstimates(uint(n), 0.0001)
	for _, hash := range hashes {
		cache.bf.Add(toBytes(hash))
	}
}
示例#4
0
// PrepareMux sets up the API
func PrepareMux(flags *env.Flags) *web.Mux {
	// Set up a new logger
	log := logrus.New()

	// Set the formatter depending on the passed flag's value
	if flags.LogFormatterType == "text" {
		log.Formatter = &logrus.TextFormatter{
			ForceColors: flags.ForceColors,
		}
	} else if flags.LogFormatterType == "json" {
		log.Formatter = &logrus.JSONFormatter{}
	}

	// Install Logrus hooks
	if flags.SlackURL != "" {
		var level []logrus.Level

		switch flags.SlackLevels {
		case "debug":
			level = slackrus.LevelThreshold(logrus.DebugLevel)
		case "error":
			level = slackrus.LevelThreshold(logrus.ErrorLevel)
		case "fatal":
			level = slackrus.LevelThreshold(logrus.FatalLevel)
		case "info":
			level = slackrus.LevelThreshold(logrus.InfoLevel)
		case "panic":
			level = slackrus.LevelThreshold(logrus.PanicLevel)
		case "warn":
			level = slackrus.LevelThreshold(logrus.WarnLevel)
		}

		log.Hooks.Add(&slackrus.SlackrusHook{
			HookURL:        flags.SlackURL,
			AcceptedLevels: level,
			Channel:        flags.SlackChannel,
			IconEmoji:      flags.SlackIcon,
			Username:       flags.SlackUsername,
		})
	}

	// Connect to raven
	var rc *raven.Client
	if flags.RavenDSN != "" {
		h, err := os.Hostname()
		if err != nil {
			log.Fatal(err)
		}

		rc, err = raven.NewClient(flags.RavenDSN, map[string]string{
			"hostname": h,
		})
		if err != nil {
			log.Fatal(err)
		}
	}
	env.Raven = rc

	// Pass it to the environment package
	env.Log = log

	// Load the bloom filter
	bf := bloom.NewWithEstimates(flags.BloomCount, 0.001)
	bff, err := os.Open(flags.BloomFilter)
	if err != nil {
		log.WithFields(logrus.Fields{
			"error": err.Error(),
		}).Fatal("Unable to open the bloom filter file")
	}
	defer bff.Close()
	if _, err := bf.ReadFrom(bff); err != nil {
		log.WithFields(logrus.Fields{
			"error": err.Error(),
		}).Fatal("Unable to read from the bloom filter file")
	}
	env.PasswordBF = bf

	// Initialize the cache
	redis, err := cache.NewRedisCache(&cache.RedisCacheOpts{
		Address:  flags.RedisAddress,
		Database: flags.RedisDatabase,
		Password: flags.RedisPassword,
	})
	if err != nil {
		log.WithFields(logrus.Fields{
			"error": err,
		}).Fatal("Unable to connect to the redis server")
	}

	env.Cache = redis

	// Set up the database
	rethinkOpts := gorethink.ConnectOpts{
		Address: flags.RethinkDBAddress,
		AuthKey: flags.RethinkDBKey,
		MaxIdle: 10,
		Timeout: time.Second * 10,
	}
	err = db.Setup(rethinkOpts)
	if err != nil {
		log.WithFields(logrus.Fields{
			"error": err,
		}).Fatal("Unable to set up the database")
	}

	// Initialize the actual connection
	rethinkOpts.Database = flags.RethinkDBDatabase
	rethinkSession, err := gorethink.Connect(rethinkOpts)
	if err != nil {
		log.WithFields(logrus.Fields{
			"error": err,
		}).Fatal("Unable to connect to the database")
	}

	// Put the RethinkDB session into the environment package
	env.Rethink = rethinkSession

	// Initialize factors
	env.Factors = make(map[string]factor.Factor)
	if flags.YubiCloudID != "" {
		yubicloud, err := factor.NewYubiCloud(flags.YubiCloudID, flags.YubiCloudKey)
		if err != nil {
			env.Log.WithFields(logrus.Fields{
				"error": err,
			}).Fatal("Unable to initiate YubiCloud")
		}
		env.Factors[yubicloud.Type()] = yubicloud
	}

	authenticator := factor.NewAuthenticator(6)
	env.Factors[authenticator.Type()] = authenticator

	// Initialize the tables
	env.Tokens = &db.TokensTable{
		RethinkCRUD: db.NewCRUDTable(
			rethinkSession,
			rethinkOpts.Database,
			"tokens",
		),
		Cache: redis,
	}
	env.Accounts = &db.AccountsTable{
		RethinkCRUD: db.NewCRUDTable(
			rethinkSession,
			rethinkOpts.Database,
			"accounts",
		),
		Tokens: env.Tokens,
	}
	env.Addresses = &db.AddressesTable{
		RethinkCRUD: db.NewCRUDTable(
			rethinkSession,
			rethinkOpts.Database,
			"addresses",
		),
	}
	env.Keys = &db.KeysTable{
		RethinkCRUD: db.NewCRUDTable(
			rethinkSession,
			rethinkOpts.Database,
			"keys",
		),
	}
	env.Contacts = &db.ContactsTable{
		RethinkCRUD: db.NewCRUDTable(
			rethinkSession,
			rethinkOpts.Database,
			"contacts",
		),
	}
	env.Reservations = &db.ReservationsTable{
		RethinkCRUD: db.NewCRUDTable(
			rethinkSession,
			rethinkOpts.Database,
			"reservations",
		),
	}
	env.Emails = &db.EmailsTable{
		RethinkCRUD: db.NewCRUDTable(
			rethinkSession,
			rethinkOpts.Database,
			"emails",
		),
	}
	env.Threads = &db.ThreadsTable{
		RethinkCRUD: db.NewCRUDTable(
			rethinkSession,
			rethinkOpts.Database,
			"threads",
		),
	}
	env.Labels = &db.LabelsTable{
		RethinkCRUD: db.NewCRUDTable(
			rethinkSession,
			rethinkOpts.Database,
			"labels",
		),
		Emails: env.Emails,
		//Cache:  redis,
	}
	env.Files = &db.FilesTable{
		Emails: env.Emails,
		RethinkCRUD: db.NewCRUDTable(
			rethinkSession,
			rethinkOpts.Database,
			"files",
		),
	}

	// Create a producer
	producer, err := nsq.NewProducer(flags.NSQdAddress, nsq.NewConfig())
	if err != nil {
		env.Log.WithFields(logrus.Fields{
			"error": err.Error(),
		}).Fatal("Unable to create a new nsq producer")
	}

	/*defer func(producer *nsq.Producer) {
		producer.Stop()
	}(producer)*/

	env.Producer = producer

	// Get the hostname
	hostname, err := os.Hostname()
	if err != nil {
		env.Log.WithFields(logrus.Fields{
			"error": err.Error(),
		}).Fatal("Unable to get the hostname")
	}

	// Create a delivery consumer
	deliveryConsumer, err := nsq.NewConsumer("email_delivery", hostname, nsq.NewConfig())
	if err != nil {
		env.Log.WithFields(logrus.Fields{
			"error": err.Error(),
			"topic": "email_delivery",
		}).Fatal("Unable to create a new nsq consumer")
	}
	//defer deliveryConsumer.Stop()

	deliveryConsumer.AddConcurrentHandlers(nsq.HandlerFunc(func(m *nsq.Message) error {
		// Raven recoverer
		defer func() {
			rec := recover()
			if rec == nil {
				return
			}

			msg := &raven.Message{
				Message: string(m.Body),
				Params:  []interface{}{"delivery"},
			}

			var packet *raven.Packet
			switch rval := recover().(type) {
			case error:
				packet = raven.NewPacket(rval.Error(), msg, raven.NewException(rval, raven.NewStacktrace(2, 3, nil)))
			default:
				str := fmt.Sprintf("%+v", rval)
				packet = raven.NewPacket(str, msg, raven.NewException(errors.New(str), raven.NewStacktrace(2, 3, nil)))
			}

			rc.Capture(packet, nil)
		}()

		var msg *struct {
			ID    string `json:"id"`
			Owner string `json:"owner"`
		}

		if err := json.Unmarshal(m.Body, &msg); err != nil {
			return err
		}

		// Check if we are handling owner's session
		if _, ok := sessions[msg.Owner]; !ok {
			return nil
		}

		if len(sessions[msg.Owner]) == 0 {
			return nil
		}

		// Resolve the email
		email, err := env.Emails.GetEmail(msg.ID)
		if err != nil {
			env.Log.WithFields(logrus.Fields{
				"error": err.Error(),
				"id":    msg.ID,
			}).Error("Unable to resolve an email from queue")
			return nil
		}

		// Resolve the thread
		thread, err := env.Threads.GetThread(email.Thread)
		if err != nil {
			env.Log.WithFields(logrus.Fields{
				"error":  err.Error(),
				"id":     msg.ID,
				"thread": email.Thread,
			}).Error("Unable to resolve a thread from queue")
			return nil
		}

		// Send notifications to subscribers
		for _, session := range sessions[msg.Owner] {
			result, _ := json.Marshal(map[string]interface{}{
				"type":   "delivery",
				"id":     msg.ID,
				"name":   email.Name,
				"thread": email.Thread,
				"labels": thread.Labels,
			})
			err = session.Send(string(result))
			if err != nil {
				env.Log.WithFields(logrus.Fields{
					"id":    session.ID(),
					"error": err.Error(),
				}).Warn("Error while writing to a WebSocket")
			}
		}

		return nil
	}), 10)

	if err := deliveryConsumer.ConnectToNSQLookupd(flags.LookupdAddress); err != nil {
		env.Log.WithFields(logrus.Fields{
			"error": err.Error(),
		}).Fatal("Unable to connect to nsqlookupd")
	}

	// Create a receipt consumer
	receiptConsumer, err := nsq.NewConsumer("email_receipt", hostname, nsq.NewConfig())
	if err != nil {
		env.Log.WithFields(logrus.Fields{
			"error": err.Error(),
			"topic": "email_receipt",
		}).Fatal("Unable to create a new nsq consumer")
	}
	//defer receiptConsumer.Stop()

	receiptConsumer.AddConcurrentHandlers(nsq.HandlerFunc(func(m *nsq.Message) error {
		// Raven recoverer
		defer func() {
			rec := recover()
			if rec == nil {
				return
			}

			msg := &raven.Message{
				Message: string(m.Body),
				Params:  []interface{}{"receipt"},
			}

			var packet *raven.Packet
			switch rval := recover().(type) {
			case error:
				packet = raven.NewPacket(rval.Error(), msg, raven.NewException(rval, raven.NewStacktrace(2, 3, nil)))
			default:
				str := fmt.Sprintf("%+v", rval)
				packet = raven.NewPacket(str, msg, raven.NewException(errors.New(str), raven.NewStacktrace(2, 3, nil)))
			}

			rc.Capture(packet, nil)
		}()

		var msg *struct {
			ID    string `json:"id"`
			Owner string `json:"owner"`
		}

		if err := json.Unmarshal(m.Body, &msg); err != nil {
			return err
		}

		// Check if we are handling owner's session
		if _, ok := sessions[msg.Owner]; !ok {
			return nil
		}

		if len(sessions[msg.Owner]) == 0 {
			return nil
		}

		// Resolve the email
		email, err := env.Emails.GetEmail(msg.ID)
		if err != nil {
			env.Log.WithFields(logrus.Fields{
				"error": err.Error(),
				"id":    msg.ID,
			}).Error("Unable to resolve an email from queue")
			return nil
		}

		// Resolve the thread
		thread, err := env.Threads.GetThread(email.Thread)
		if err != nil {
			env.Log.WithFields(logrus.Fields{
				"error":  err.Error(),
				"id":     msg.ID,
				"thread": email.Thread,
			}).Error("Unable to resolve a thread from queue")
			return nil
		}

		// Send notifications to subscribers
		for _, session := range sessions[msg.Owner] {
			result, _ := json.Marshal(map[string]interface{}{
				"type":   "receipt",
				"id":     msg.ID,
				"name":   email.Name,
				"thread": email.Thread,
				"labels": thread.Labels,
			})
			err = session.Send(string(result))
			if err != nil {
				env.Log.WithFields(logrus.Fields{
					"id":    session.ID(),
					"error": err.Error(),
				}).Warn("Error while writing to a WebSocket")
			}
		}

		return nil
	}), 10)

	if err := receiptConsumer.ConnectToNSQLookupd(flags.LookupdAddress); err != nil {
		env.Log.WithFields(logrus.Fields{
			"error": err.Error(),
		}).Fatal("Unable to connect to nsqlookupd")
	}

	// Create a new goji mux
	mux := web.New()

	// Include the most basic middlewares:
	//  - RequestID assigns an unique ID for each request in order to identify errors.
	//  - Glogrus logs each request
	//  - Recoverer prevents panics from crashing the API
	//  - AutomaticOptions automatically responds to OPTIONS requests
	mux.Use(func(c *web.C, h http.Handler) http.Handler {
		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
			// sockjs doesn't want to work with our code, as the author doesn't understand http.Headers
			if strings.HasPrefix(r.RequestURI, "/ws") {
				h.ServeHTTP(w, r)
				return
			}

			// because why not
			w.Header().Set("Access-Control-Allow-Credentials", "true")

			allowedHeaders := []string{
				"Origin",
				"Content-Type",
				"Authorization",
				"X-Requested-With",
			}

			reqHeaders := strings.Split(r.Header.Get("Access-Control-Request-Headers"), ",")
			allowedHeaders = append(allowedHeaders, reqHeaders...)

			resultHeaders := []string{}
			seenHeaders := map[string]struct{}{}
			for _, val := range allowedHeaders {
				if _, ok := seenHeaders[val]; !ok && val != "" {
					resultHeaders = append(resultHeaders, val)
					seenHeaders[val] = struct{}{}
				}
			}

			w.Header().Set("Access-Control-Allow-Headers", strings.Join(resultHeaders, ","))

			/*
				if c.Env != nil {
					if v, ok := c.Env[web.ValidMethodsKey]; ok {
						if methods, ok := v.([]string); ok {
							methodsString := strings.Join(methods, ",")
							w.Header().Set("Allow", methodsString)
							w.Header().Set("Access-Control-Allow-Methods", methodsString)
						}
					}
				} */

			// yolo
			w.Header().Set("Access-Control-Allow-Methods", "GET,POST,PUT,DELETE")
			w.Header().Set("Access-Control-Allow-Origin", "*")

			if r.Method != "OPTIONS" {
				h.ServeHTTP(w, r)
			}
		})
	})
	mux.Use(middleware.RequestID)
	//mux.Use(glogrus.NewGlogrus(log, "api"))
	mux.Use(recoverer)
	mux.Use(middleware.AutomaticOptions)

	// Set up an auth'd mux
	auth := web.New()
	auth.Use(routes.AuthMiddleware)

	// Index route
	mux.Get("/", routes.Hello)

	// Accounts
	auth.Get("/accounts", routes.AccountsList)
	mux.Post("/accounts", routes.AccountsCreate)
	auth.Get("/accounts/:id", routes.AccountsGet)
	auth.Put("/accounts/:id", routes.AccountsUpdate)
	auth.Delete("/accounts/:id", routes.AccountsDelete)
	auth.Post("/accounts/:id/wipe-data", routes.AccountsWipeData)
	auth.Post("/accounts/:id/start-onboarding", routes.AccountsStartOnboarding)

	// Addresses
	auth.Get("/addresses", routes.AddressesList)

	// Avatars
	mux.Get(regexp.MustCompile(`/avatars/(?P<hash>[\S\s]*?)\.(?P<ext>svg|png)(?:[\S\s]*?)$`), routes.Avatars)
	//mux.Get("/avatars/:hash.:ext", routes.Avatars)

	// Files
	auth.Get("/files", routes.FilesList)
	auth.Post("/files", routes.FilesCreate)
	auth.Get("/files/:id", routes.FilesGet)
	auth.Put("/files/:id", routes.FilesUpdate)
	auth.Delete("/files/:id", routes.FilesDelete)

	// Tokens
	auth.Get("/tokens", routes.TokensGet)
	auth.Get("/tokens/:id", routes.TokensGet)
	mux.Post("/tokens", routes.TokensCreate)
	auth.Delete("/tokens", routes.TokensDelete)
	auth.Delete("/tokens/:id", routes.TokensDelete)

	// Threads
	auth.Get("/threads", routes.ThreadsList)
	auth.Get("/threads/:id", routes.ThreadsGet)
	auth.Put("/threads/:id", routes.ThreadsUpdate)
	auth.Delete("/threads/:id", routes.ThreadsDelete)

	// Emails
	auth.Get("/emails", routes.EmailsList)
	auth.Post("/emails", routes.EmailsCreate)
	auth.Get("/emails/:id", routes.EmailsGet)
	auth.Delete("/emails/:id", routes.EmailsDelete)

	// Labels
	auth.Get("/labels", routes.LabelsList)
	auth.Post("/labels", routes.LabelsCreate)
	auth.Get("/labels/:id", routes.LabelsGet)
	auth.Put("/labels/:id", routes.LabelsUpdate)
	auth.Delete("/labels/:id", routes.LabelsDelete)

	// Contacts
	auth.Get("/contacts", routes.ContactsList)
	auth.Post("/contacts", routes.ContactsCreate)
	auth.Get("/contacts/:id", routes.ContactsGet)
	auth.Put("/contacts/:id", routes.ContactsUpdate)
	auth.Delete("/contacts/:id", routes.ContactsDelete)

	// Keys
	mux.Get("/keys", routes.KeysList)
	auth.Post("/keys", routes.KeysCreate)
	mux.Get("/keys/:id", routes.KeysGet)
	auth.Post("/keys/:id/vote", routes.KeysVote)

	// Headers proxy
	mux.Get("/headers", func(w http.ResponseWriter, r *http.Request) {
		utils.JSONResponse(w, 200, r.Header)
	})

	mux.Handle("/ws/*", sockjs.NewHandler("/ws", sockjs.DefaultOptions, func(session sockjs.Session) {
		var subscribed string

		// A new goroutine seems to be spawned for each new session
		for {
			// Read a message from the input
			msg, err := session.Recv()
			if err != nil {
				if err != sockjs.ErrSessionNotOpen {
					env.Log.WithFields(logrus.Fields{
						"id":    session.ID(),
						"error": err.Error(),
					}).Warn("Error while reading from a WebSocket")
				}
				break
			}

			// Decode the message
			var input struct {
				Type    string            `json:"type"`
				Token   string            `json:"token"`
				ID      string            `json:"id"`
				Method  string            `json:"method"`
				Path    string            `json:"path"`
				Body    string            `json:"body"`
				Headers map[string]string `json:"headers"`
			}
			err = json.Unmarshal([]byte(msg), &input)
			if err != nil {
				// Return an error response
				resp, _ := json.Marshal(map[string]interface{}{
					"type":  "error",
					"error": err,
				})
				err := session.Send(string(resp))
				if err != nil {
					env.Log.WithFields(logrus.Fields{
						"id":    session.ID(),
						"error": err.Error(),
					}).Warn("Error while writing to a WebSocket")
					break
				}
				continue
			}

			// Check message's type
			if input.Type == "subscribe" {
				// Listen to user's events

				// Check if token is empty
				if input.Token == "" {
					// Return an error response
					resp, _ := json.Marshal(map[string]interface{}{
						"type":  "error",
						"error": "Invalid token",
					})
					err := session.Send(string(resp))
					if err != nil {
						env.Log.WithFields(logrus.Fields{
							"id":    session.ID(),
							"error": err.Error(),
						}).Warn("Error while writing to a WebSocket")
						break
					}
					continue
				}

				// Check the token in database
				token, err := env.Tokens.GetToken(input.Token)
				if err != nil {
					// Return an error response
					resp, _ := json.Marshal(map[string]interface{}{
						"type":  "error",
						"error": "Invalid token",
					})
					err := session.Send(string(resp))
					if err != nil {
						env.Log.WithFields(logrus.Fields{
							"id":    session.ID(),
							"error": err.Error(),
						}).Warn("Error while writing to a WebSocket")
						break
					}
					continue
				}

				// Do the actual subscription
				subscribed = token.Owner
				sessionsLock.Lock()

				// Sessions map already contains this owner
				if _, ok := sessions[token.Owner]; ok {
					sessions[token.Owner] = append(sessions[token.Owner], session)
				} else {
					// We have to allocate a new slice
					sessions[token.Owner] = []sockjs.Session{session}
				}

				// Unlock the map write
				sessionsLock.Unlock()

				// Return a response
				resp, _ := json.Marshal(map[string]interface{}{
					"type": "subscribed",
				})
				err = session.Send(string(resp))
				if err != nil {
					env.Log.WithFields(logrus.Fields{
						"id":    session.ID(),
						"error": err.Error(),
					}).Warn("Error while writing to a WebSocket")
					break
				}
			} else if input.Type == "unsubscribe" {
				if subscribed == "" {
					resp, _ := json.Marshal(map[string]interface{}{
						"type":  "error",
						"error": "Not subscribed",
					})
					err := session.Send(string(resp))
					if err != nil {
						env.Log.WithFields(logrus.Fields{
							"id":    session.ID(),
							"error": err.Error(),
						}).Warn("Error while writing to a WebSocket")
						break
					}
				}

				sessionsLock.Lock()

				if _, ok := sessions[subscribed]; !ok {
					// Return a response
					resp, _ := json.Marshal(map[string]interface{}{
						"type": "unsubscribed",
					})
					err := session.Send(string(resp))
					if err != nil {
						env.Log.WithFields(logrus.Fields{
							"id":    session.ID(),
							"error": err.Error(),
						}).Warn("Error while writing to a WebSocket")
						sessionsLock.Unlock()
						subscribed = ""
						break
					}
					sessionsLock.Unlock()
					subscribed = ""
					continue
				}

				if len(sessions[subscribed]) == 1 {
					delete(sessions, subscribed)

					// Return a response
					resp, _ := json.Marshal(map[string]interface{}{
						"type": "unsubscribed",
					})
					err := session.Send(string(resp))
					if err != nil {
						env.Log.WithFields(logrus.Fields{
							"id":    session.ID(),
							"error": err.Error(),
						}).Warn("Error while writing to a WebSocket")
						sessionsLock.Unlock()
						subscribed = ""
						break
					}
					sessionsLock.Unlock()
					subscribed = ""
					continue
				}

				// Find the session
				index := -1
				for i, session2 := range sessions[subscribed] {
					if session == session2 {
						index = i
						break
					}
				}

				// We didn't find anything
				if index == -1 {
					// Return a response
					resp, _ := json.Marshal(map[string]interface{}{
						"type": "unsubscribed",
					})
					err := session.Send(string(resp))
					if err != nil {
						env.Log.WithFields(logrus.Fields{
							"id":    session.ID(),
							"error": err.Error(),
						}).Warn("Error while writing to a WebSocket")
						sessionsLock.Unlock()
						subscribed = ""
						break
					}
					sessionsLock.Unlock()
					subscribed = ""
					continue
				}

				// We found it, so we are supposed to slice it
				sessions[subscribed][index] = sessions[subscribed][len(sessions[subscribed])-1]
				sessions[subscribed][len(sessions[subscribed])-1] = nil
				sessions[subscribed] = sessions[subscribed][:len(sessions[subscribed])-1]

				// Return a response
				resp, _ := json.Marshal(map[string]interface{}{
					"type": "unsubscribed",
				})
				err := session.Send(string(resp))
				if err != nil {
					env.Log.WithFields(logrus.Fields{
						"id":    session.ID(),
						"error": err.Error(),
					}).Warn("Error while writing to a WebSocket")
					sessionsLock.Unlock()
					subscribed = ""
					break
				}
				sessionsLock.Unlock()
				subscribed = ""
			} else if input.Type == "request" {
				// Perform the request
				w := httptest.NewRecorder()
				r, err := http.NewRequest(strings.ToUpper(input.Method), "http://api.lavaboom.io"+input.Path, strings.NewReader(input.Body))
				if err != nil {
					env.Log.WithFields(logrus.Fields{
						"id":    session.ID(),
						"error": err.Error(),
						"path":  input.Path,
					}).Warn("SockJS request error")

					// Return an error response
					resp, _ := json.Marshal(map[string]interface{}{
						"error": err.Error(),
					})
					err := session.Send(string(resp))
					if err != nil {
						env.Log.WithFields(logrus.Fields{
							"id":    session.ID(),
							"error": err.Error(),
						}).Warn("Error while writing to a WebSocket")
						break
					}
					continue
				}

				r.Body = nopCloser{strings.NewReader(input.Body)}

				r.RequestURI = input.Path

				for key, value := range input.Headers {
					r.Header.Set(key, value)
				}

				mux.ServeHTTP(w, r)

				// Return the final response
				result, _ := json.Marshal(map[string]interface{}{
					"type":    "response",
					"id":      input.ID,
					"status":  w.Code,
					"headers": w.HeaderMap,
					"body":    w.Body.String(),
				})
				err = session.Send(string(result))
				if err != nil {
					env.Log.WithFields(logrus.Fields{
						"id":    session.ID(),
						"error": err.Error(),
					}).Warn("Error while writing to a WebSocket")
					break
				}
			}
		}

		// We have to clear the subscription here too. TODO: make the code shorter
		if subscribed == "" {
			return
		}

		sessionsLock.Lock()

		if _, ok := sessions[subscribed]; !ok {
			sessionsLock.Unlock()
			return
		}

		if len(sessions[subscribed]) == 1 {
			delete(sessions, subscribed)
			sessionsLock.Unlock()
			return
		}

		// Find the session
		index := -1
		for i, session2 := range sessions[subscribed] {
			if session == session2 {
				index = i
				break
			}
		}

		// We didn't find anything
		if index == -1 {
			sessionsLock.Unlock()
			return
		}

		// We found it, so we are supposed to slice it
		sessions[subscribed][index] = sessions[subscribed][len(sessions[subscribed])-1]
		sessions[subscribed][len(sessions[subscribed])-1] = nil
		sessions[subscribed] = sessions[subscribed][:len(sessions[subscribed])-1]

		// Unlock the mutex
		sessionsLock.Unlock()
	}))

	// Merge the muxes
	mux.Handle("/*", auth)

	// Compile the routes
	mux.Compile()

	return mux
}
示例#5
0
文件: alt.go 项目: 0x17de/MettBot
func (q *AltPlugin) Register(ic *ircclient.IRCClient) {
	q.ic = ic
	q.bf = bloom.NewWithEstimates(expected_urls, false_positives_rate)
	q.regex = regexp.MustCompile(url_regex)
	q.fillFilter()
}
示例#6
0
// This is kind of the main thing you want to do with DHT. It traverses the
// graph toward nodes that store peers for the infohash, streaming them to the
// caller, and announcing the local node to each node if allowed and
// specified.
func (s *Server) Announce(infoHash string, port int, impliedPort bool) (*Announce, error) {
	s.mu.Lock()
	startAddrs := func() (ret []Addr) {
		for _, n := range s.closestGoodNodes(160, infoHash) {
			ret = append(ret, n.addr)
		}
		return
	}()
	s.mu.Unlock()
	if len(startAddrs) == 0 && !s.config.NoDefaultBootstrap {
		addrs, err := bootstrapAddrs(s.bootstrapNodes)
		if err != nil {
			return nil, err
		}
		for _, addr := range addrs {
			startAddrs = append(startAddrs, NewAddr(addr))
		}
	}
	disc := &Announce{
		Peers:               make(chan PeersValues, 100),
		stop:                make(chan struct{}),
		values:              make(chan PeersValues),
		triedAddrs:          bloom.NewWithEstimates(1000, 0.5),
		server:              s,
		infoHash:            infoHash,
		announcePort:        port,
		announcePortImplied: impliedPort,
	}
	// Function ferries from values to Values until discovery is halted.
	go func() {
		defer close(disc.Peers)
		for {
			select {
			case psv := <-disc.values:
				select {
				case disc.Peers <- psv:
				case <-disc.stop:
					return
				}
			case <-disc.stop:
				return
			}
		}
	}()
	go func() {
		disc.mu.Lock()
		defer disc.mu.Unlock()
		for i, addr := range startAddrs {
			if i != 0 {
				disc.mu.Unlock()
				time.Sleep(time.Millisecond)
				disc.mu.Lock()
			}
			disc.contact(addr)
		}
		disc.contactedStartAddrs = true
		// If we failed to contact any of the starting addrs, no transactions
		// will complete triggering a check that there are no pending
		// responses.
		disc.maybeClose()
	}()
	return disc, nil
}
示例#7
0
func IautosSpider(start_url string) {
	fmt.Printf("Fetching: %s\n", start_url)
	var is_seller, addr string
	fmt.Printf("spider url is : %s\n", start_url)
	if strings.Contains(start_url, "as2ds9vepcatcpbnscac") {
		is_seller = "个人"
	} else if strings.Contains(start_url, "as1ds9vepcatcpbnscac") {
		is_seller = "商家"
	}
	addr = strings.Split(start_url, "/")[3]
	dict := map[string]string{"jiangsu": "江苏", "anhui": "安徽", "shandong": "山东", "shanghai": "上海", "zhejiang": "浙江", "jiangxi": "江西"}
	province, _ := dict[addr]

	doc, err := goquery.NewDocument(start_url)
	check(err)

	var urls []string
	doc.Find(".carShow ul li h4 a").Each(func(i int, s *goquery.Selection) {
		href, _ := s.Attr("href")
		urls = append(urls, href)
		//url := s.Find("h4 a").Attr("href")
		// title := s.Find("h4 a").Text()
		// register_date := s.Find(".txt .set .year").Text()
		// fmt.Printf("Review %d: %s - %s\n", i, title, register_date)
	})

	db, err := sql.Open("mysql", "panpan:panpan@tcp(192.168.2.231:3306)/spider?charset=utf8")
	check(err)
	defer db.Close()

	for idx, url := range urls {
		//fmt.Printf("Url %d : %s\n", idx, url)
		filter := bloom.NewWithEstimates(100000, 0.00001)
		if filter.TestString(url) {
			fmt.Printf("%s is already in the database,continue!\n", url)
			continue
		} else {
			filter.AddString(url)
		}

		car, err := goquery.NewDocument(url)
		check(err)

		content := car.Find("div[class='cd-content clearfix'] .clearfix .main")
		carinfo := content.Find(".cd-summary")
		title := carinfo.Find("h2 b").Text()
		rel_time := carinfo.Find("h2 span").Text()

		re, _ := regexp.Compile(`(\d{4}|\d{2})-(\d{2})-(\d{2})`)
		release_time := re.Find([]byte(rel_time))
		//fmt.Println(release_time)
		prices := carinfo.Find(".summary-txt .h136 .price").Text()
		re, _ = regexp.Compile(`(\d+).(\d+)`)
		price := re.Find([]byte(prices))
		owner_readme := content.Find(".cd-details .postscript p").Text()
		// execute javascript to show the telephone number
		// tele_div := carinfo.Find(".summary-txt script")
		// vm := otto.New()
		// _, err = vm.Run(tele_div.Text())
		// check(err)

		telephone := carinfo.Find(".summary-txt .call-num").Text()
		name := carinfo.Find(".summary-txt .seller-name span").Text()
		var configs []string
		carinfo.Find(".summary-txt .h136 dl dd").Each(func(i int, s *goquery.Selection) {
			if i != 0 {
				configs = append(configs, s.Text())
			}
		})
		fmt.Printf("Confis Length is : %d\n", len(configs))
		var reg_date, register_date, config, address string
		var mileage, displacement, transmission string
		if len(configs) == 3 {
			reg_date, config, address = configs[0], configs[1], configs[2]
		} else if len(configs) == 4 {
			reg_date, config, address = configs[0], configs[1], configs[3]
		}
		reg_date2 := strings.Replace(reg_date, "年", "-", -1)
		register_date = strings.Replace(reg_date2, "月", "-", -1)
		register_date += "01"
		address = province + address
		for _, val := range strings.Split(config, ",") {
			if strings.Contains(val, "万公里") {
				re, _ = regexp.Compile(`(\d+).(\d+)`)
				mid_var := re.Find([]byte(val))
				if len(mileage) == 0 {
					mileage = string(mid_var)
				}

			} else if strings.Contains(val, "L") {
				re, _ = regexp.Compile(`(\d+).(\d+)`)
				mid_var := re.Find([]byte(val))
				if len(displacement) == 0 {
					displacement = string(mid_var)
				}
			} else {
				if strings.Contains(val, "手动") {
					transmission = "手动"
				} else if strings.Contains(val, "自动") {
					transmission = "自动"
				} else if strings.Contains(val, "手自一体") {
					transmission = "手自一体"
				}

			}
		}

		fmt.Printf("里程:%s\n", mileage)
		fmt.Printf("排量:%s\n", displacement)
		fmt.Printf("变速箱:%s\n", transmission)
		fmt.Printf("车主姓名:%s\n", name)
		fmt.Printf("商家或个人:%s\n", is_seller)
		fmt.Printf("详细地址:%s\n", address)
		fmt.Printf("Info %d : %s - %s - %s - %s - %s - %s - %s - %s \n %s", idx, url, title, release_time, price, register_date, config, address, telephone, owner_readme)

		// Prepare statement for inserting data
		stmtIns, err := db.Prepare("insert ignore into sell_car_info(title,car_config,name,telephone_num,addrs,release_time,prices,is_seller,info_src,url,owner_readme,mileage,register_date,transmission,displacement) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
		check(err)
		defer stmtIns.Close()

		_, err = stmtIns.Exec(title, config, name, telephone, address, release_time, price, is_seller, "iautos", url, owner_readme, mileage, register_date, transmission, displacement)
		check(err)

	}
}