コード例 #1
0
ファイル: ricochet.go プロジェクト: Yawning/ricochet
// NewEndpoint creates a Ricochet client/server endpoint with the provided
// configuration, including registering the ephemeral HS with Tor.
func NewEndpoint(cfg *EndpointConfig) (e *Endpoint, err error) {
	e = new(Endpoint)
	e.hostname, _ = pkcs1.OnionAddr(&cfg.PrivateKey.PublicKey)
	e.privateKey = cfg.PrivateKey
	e.ctrl = cfg.TorControlPort
	e.isoBase, err = getIsolationAuth()
	if err != nil {
		return nil, err
	}
	e.outgoingQueue = channels.NewInfiniteChannel()
	e.eventQueue = channels.NewInfiniteChannel()
	e.pendingContacts = make(map[string]*ricochetContact)

	e.EventChan = e.eventQueue.Out()

	e.blacklist = make(map[string]bool)
	for _, id := range cfg.BlacklistedContacts {
		if err := e.BlacklistContact(id, true); err != nil {
			return nil, err
		}
	}
	e.contacts = make(map[string]*ricochetContact)
	for _, id := range cfg.KnownContacts {
		if err := e.AddContact(id, nil); err != nil {
			return nil, err
		}
	}
	for id, requestData := range cfg.PendingContacts {
		if err := e.AddContact(id, requestData); err != nil {
			return nil, err
		}
	}

	e.ln, err = e.ctrl.Listener(ricochetPort, e.privateKey)
	if err != nil {
		return nil, err
	}

	logWr := cfg.LogWriter
	if logWr == nil {
		logWr = ioutil.Discard
	}
	e.log = golog.New(logWr, "", golog.LstdFlags)

	e.log.Printf("server: online as '%v'", e.hostname)

	go e.hsAcceptWorker()
	go e.hsConnectWorker()

	return e, nil
}
コード例 #2
0
ファイル: fsm_test.go プロジェクト: ramrunner/gobgp
func makePeerAndHandler() (*Peer, *FSMHandler) {
	p := &Peer{
		fsm:      NewFSM(&config.Global{}, &config.Neighbor{}, table.NewRoutingPolicy()),
		outgoing: channels.NewInfiniteChannel(),
	}

	h := &FSMHandler{
		fsm:      p.fsm,
		errorCh:  make(chan FsmStateReason, 2),
		incoming: channels.NewInfiniteChannel(),
		outgoing: p.outgoing,
	}

	return p, h

}
コード例 #3
0
ファイル: generator.go プロジェクト: kivi/sitemap-generator
//NewSitemapGenerator constructs a new sitemap generator instance,
//Call Start() in order to start the proccesszz
func NewGenerator(config *config.Config) *Generator {
	return &Generator{
		WorkerQueue: channels.NewInfiniteChannel(),
		waitGroup:   new(sync.WaitGroup),
		config:      config,
	}
}
コード例 #4
0
ファイル: watcher.go プロジェクト: ramrunner/gobgp
func newWatcherManager() *watcherManager {
	m := &watcherManager{
		m:  make(map[watcherType]watcher),
		ch: channels.NewInfiniteChannel(),
	}
	m.t.Go(m.loop)
	return m
}
コード例 #5
0
ファイル: textDownloaderMain.go プロジェクト: jmptrader/hydra
func main() {
	args := os.Args
	if len(args) != 4 {
		fmt.Printf("usage: %s [URL] [DOWNLOAD PATH] [# MBs]\n", path.Base(args[0]))
		os.Exit(1)
	}

	url := args[1]
	download := args[2]
	size, err := strconv.Atoi(args[3])

	if err != nil {
		fmt.Printf("Error: %s\n", err.Error())
		fmt.Printf("usage: %s [URL] [DOWNLOAD PATH] [# MBs]\n", path.Base(args[0]))
	}

	done := make(chan struct{})
	at := types.NewAtomicBool(false)
	feeder := channels.NewInfiniteChannel()

	if !dirExists(download) {
		if err := os.MkdirAll(download, os.ModePerm); err != nil {
			fmt.Printf("Failed to create directory (%v): %v.\n", download, err.Error())
			os.Exit(1)
		}
	}

	feeder.In() <- url

	uFeeder := func(sf types.SetupFunction) {
		urlFeeder(sf, feeder.Out(), at)
	}

	producer := func(sf types.SetupFunction) {
		UrlProducer(sf, "UrlFeeder")
	}

	looper := func(sf types.SetupFunction) {
		urlLooper(sf, feeder.In(), at)
	}

	var cancel types.Canceller

	downloader := func(sf types.SetupFunction) {
		textDownloader(sf, download, size, done, func() {
			if !at.Get() {
				feeder.Close()
				cancel()
			}
			at.Set(true)
		})
	}

	cancel = hydra.NewSetupScaffolding()(uFeeder, looper, downloader, producer, UrlParser, MimeDetector, MimeSplitterHtml, MimeSplitterText)

	<-done
}
コード例 #6
0
ファイル: peer.go プロジェクト: osrg/gobgp
func NewPeer(g *config.Global, conf *config.Neighbor, loc *table.TableManager, policy *table.RoutingPolicy) *Peer {
	peer := &Peer{
		outgoing:          channels.NewInfiniteChannel(),
		localRib:          loc,
		policy:            policy,
		fsm:               NewFSM(g, conf, policy),
		prefixLimitWarned: make(map[bgp.RouteFamily]bool),
	}
	if peer.isRouteServerClient() {
		peer.tableId = conf.Config.NeighborAddress
	} else {
		peer.tableId = table.GLOBAL_RIB_NAME
	}
	rfs, _ := config.AfiSafis(conf.AfiSafis).ToRfList()
	peer.adjRibIn = table.NewAdjRib(peer.ID(), rfs)
	return peer
}
コード例 #7
0
ファイル: fs.go プロジェクト: gozes/kbfs-beta
func (f *FS) launchNotificationProcessor(ctx context.Context) {
	f.notificationMutex.Lock()
	defer f.notificationMutex.Unlock()

	// The notifications channel needs to have "infinite" capacity,
	// because otherwise we risk a deadlock between libkbfs and
	// libfuse.  The notification processor sends invalidates to the
	// kernel.  In osxfuse 3.X, the kernel can call back into userland
	// during an invalidate (a GetAttr()) call, which in turn takes
	// locks within libkbfs.  So if libkbfs ever gets blocked while
	// trying to enqueue a notification (while it is holding locks),
	// we could have a deadlock.  Yes, if there are too many
	// outstanding notifications we'll run out of memory and crash,
	// but otherwise we risk deadlock.  Which is worse?
	f.notifications = channels.NewInfiniteChannel()

	// start the notification processor
	go f.processNotifications(ctx)
}
コード例 #8
0
ファイル: fsm.go プロジェクト: jojimt/netplugin
func (h *FSMHandler) openconfirm() (bgp.FSMState, FsmStateReason) {
	fsm := h.fsm
	ticker := keepaliveTicker(fsm)
	h.msgCh = channels.NewInfiniteChannel()
	h.conn = fsm.conn

	h.t.Go(h.recvMessage)

	var holdTimer *time.Timer
	if fsm.pConf.Timers.State.NegotiatedHoldTime == 0 {
		holdTimer = &time.Timer{}
	} else {
		// RFC 4271 P.65
		// sets the HoldTimer according to the negotiated value
		holdTimer = time.NewTimer(time.Second * time.Duration(fsm.pConf.Timers.State.NegotiatedHoldTime))
	}

	for {
		select {
		case <-h.t.Dying():
			h.conn.Close()
			return -1, FSM_DYING
		case conn, ok := <-fsm.connCh:
			if !ok {
				break
			}
			conn.Close()
			log.WithFields(log.Fields{
				"Topic": "Peer",
				"Key":   fsm.pConf.Config.NeighborAddress,
				"State": fsm.state.String(),
			}).Warn("Closed an accepted connection")
		case <-fsm.gracefulRestartTimer.C:
			if fsm.pConf.GracefulRestart.State.PeerRestarting {
				log.WithFields(log.Fields{
					"Topic": "Peer",
					"Key":   fsm.pConf.Config.NeighborAddress,
					"State": fsm.state.String(),
				}).Warn("graceful restart timer expired")
				return bgp.BGP_FSM_IDLE, FSM_RESTART_TIMER_EXPIRED
			}
		case <-ticker.C:
			m := bgp.NewBGPKeepAliveMessage()
			b, _ := m.Serialize()
			// TODO: check error
			fsm.conn.Write(b)
			fsm.bgpMessageStateUpdate(m.Header.Type, false)
		case i, ok := <-h.msgCh.Out():
			if !ok {
				continue
			}
			e := i.(*FsmMsg)
			switch e.MsgData.(type) {
			case *bgp.BGPMessage:
				m := e.MsgData.(*bgp.BGPMessage)
				if m.Header.Type == bgp.BGP_MSG_KEEPALIVE {
					return bgp.BGP_FSM_ESTABLISHED, FSM_OPEN_MSG_NEGOTIATED
				}
				// send notification ?
				h.conn.Close()
				return bgp.BGP_FSM_IDLE, FSM_INVALID_MSG
			case *bgp.MessageError:
				fsm.sendNotificationFromErrorMsg(e.MsgData.(*bgp.MessageError))
				return bgp.BGP_FSM_IDLE, FSM_INVALID_MSG
			default:
				log.WithFields(log.Fields{
					"Topic": "Peer",
					"Key":   fsm.pConf.Config.NeighborAddress,
					"State": fsm.state.String(),
					"Data":  e.MsgData,
				}).Panic("unknown msg type")
			}
		case err := <-h.errorCh:
			h.conn.Close()
			return bgp.BGP_FSM_IDLE, err
		case <-holdTimer.C:
			fsm.sendNotification(bgp.BGP_ERROR_HOLD_TIMER_EXPIRED, 0, nil, "hold timer expired")
			h.t.Kill(nil)
			return bgp.BGP_FSM_IDLE, FSM_HOLD_TIMER_EXPIRED
		case s := <-fsm.adminStateCh:
			err := h.changeAdminState(s)
			if err == nil {
				switch s {
				case ADMIN_STATE_DOWN:
					h.conn.Close()
					return bgp.BGP_FSM_IDLE, FSM_ADMIN_DOWN
				case ADMIN_STATE_UP:
					log.WithFields(log.Fields{
						"Topic":      "Peer",
						"Key":        fsm.pConf.Config.NeighborAddress,
						"State":      fsm.state.String(),
						"AdminState": s.String(),
					}).Panic("code logic bug")
				}
			}
		}
	}
}
コード例 #9
0
ファイル: fsm.go プロジェクト: jojimt/netplugin
func (h *FSMHandler) opensent() (bgp.FSMState, FsmStateReason) {
	fsm := h.fsm
	m := buildopen(fsm.gConf, fsm.pConf)
	b, _ := m.Serialize()
	fsm.conn.Write(b)
	fsm.bgpMessageStateUpdate(m.Header.Type, false)

	h.msgCh = channels.NewInfiniteChannel()
	h.conn = fsm.conn

	h.t.Go(h.recvMessage)

	// RFC 4271 P.60
	// sets its HoldTimer to a large value
	// A HoldTimer value of 4 minutes is suggested as a "large value"
	// for the HoldTimer
	holdTimer := time.NewTimer(time.Second * time.Duration(fsm.opensentHoldTime))

	for {
		select {
		case <-h.t.Dying():
			h.conn.Close()
			return -1, FSM_DYING
		case conn, ok := <-fsm.connCh:
			if !ok {
				break
			}
			conn.Close()
			log.WithFields(log.Fields{
				"Topic": "Peer",
				"Key":   fsm.pConf.Config.NeighborAddress,
				"State": fsm.state.String(),
			}).Warn("Closed an accepted connection")
		case <-fsm.gracefulRestartTimer.C:
			if fsm.pConf.GracefulRestart.State.PeerRestarting {
				log.WithFields(log.Fields{
					"Topic": "Peer",
					"Key":   fsm.pConf.Config.NeighborAddress,
					"State": fsm.state.String(),
				}).Warn("graceful restart timer expired")
				return bgp.BGP_FSM_IDLE, FSM_RESTART_TIMER_EXPIRED
			}
		case i, ok := <-h.msgCh.Out():
			if !ok {
				continue
			}
			e := i.(*FsmMsg)
			switch e.MsgData.(type) {
			case *bgp.BGPMessage:
				m := e.MsgData.(*bgp.BGPMessage)
				if m.Header.Type == bgp.BGP_MSG_OPEN {
					fsm.recvOpen = m
					body := m.Body.(*bgp.BGPOpen)
					err := bgp.ValidateOpenMsg(body, fsm.pConf.Config.PeerAs)
					if err != nil {
						fsm.sendNotificationFromErrorMsg(err.(*bgp.MessageError))
						return bgp.BGP_FSM_IDLE, FSM_INVALID_MSG
					}
					fsm.peerInfo.ID = body.ID
					fsm.capMap, fsm.rfMap = open2Cap(body, fsm.pConf)

					// calculate HoldTime
					// RFC 4271 P.13
					// a BGP speaker MUST calculate the value of the Hold Timer
					// by using the smaller of its configured Hold Time and the Hold Time
					// received in the OPEN message.
					holdTime := float64(body.HoldTime)
					myHoldTime := fsm.pConf.Timers.Config.HoldTime
					if holdTime > myHoldTime {
						fsm.pConf.Timers.State.NegotiatedHoldTime = myHoldTime
					} else {
						fsm.pConf.Timers.State.NegotiatedHoldTime = holdTime
					}

					keepalive := fsm.pConf.Timers.Config.KeepaliveInterval
					if n := fsm.pConf.Timers.State.NegotiatedHoldTime; n < myHoldTime {
						keepalive = n / 3
					}
					fsm.pConf.Timers.State.KeepaliveInterval = keepalive

					gr, ok := fsm.capMap[bgp.BGP_CAP_GRACEFUL_RESTART]
					if fsm.pConf.GracefulRestart.Config.Enabled && ok {
						state := &fsm.pConf.GracefulRestart.State
						state.Enabled = true
						cap := gr[len(gr)-1].(*bgp.CapGracefulRestart)
						state.PeerRestartTime = uint16(cap.Time)

						for _, t := range cap.Tuples {
							n := bgp.AddressFamilyNameMap[bgp.AfiSafiToRouteFamily(t.AFI, t.SAFI)]
							for i, a := range fsm.pConf.AfiSafis {
								if string(a.Config.AfiSafiName) == n {
									fsm.pConf.AfiSafis[i].MpGracefulRestart.State.Enabled = true
									fsm.pConf.AfiSafis[i].MpGracefulRestart.State.Received = true
									break
								}
							}
						}

						// RFC 4724 4.1
						// To re-establish the session with its peer, the Restarting Speaker
						// MUST set the "Restart State" bit in the Graceful Restart Capability
						// of the OPEN message.
						if fsm.pConf.GracefulRestart.State.PeerRestarting && cap.Flags != 0x08 {
							log.WithFields(log.Fields{
								"Topic": "Peer",
								"Key":   fsm.pConf.Config.NeighborAddress,
								"State": fsm.state.String(),
							}).Warn("restart flag is not set")
							// send notification?
							h.conn.Close()
							return bgp.BGP_FSM_IDLE, FSM_INVALID_MSG
						}
					}

					msg := bgp.NewBGPKeepAliveMessage()
					b, _ := msg.Serialize()
					fsm.conn.Write(b)
					fsm.bgpMessageStateUpdate(msg.Header.Type, false)
					return bgp.BGP_FSM_OPENCONFIRM, FSM_OPEN_MSG_RECEIVED
				} else {
					// send notification?
					h.conn.Close()
					return bgp.BGP_FSM_IDLE, FSM_INVALID_MSG
				}
			case *bgp.MessageError:
				fsm.sendNotificationFromErrorMsg(e.MsgData.(*bgp.MessageError))
				return bgp.BGP_FSM_IDLE, FSM_INVALID_MSG
			default:
				log.WithFields(log.Fields{
					"Topic": "Peer",
					"Key":   fsm.pConf.Config.NeighborAddress,
					"State": fsm.state.String(),
					"Data":  e.MsgData,
				}).Panic("unknown msg type")
			}
		case err := <-h.errorCh:
			h.conn.Close()
			return bgp.BGP_FSM_IDLE, err
		case <-holdTimer.C:
			fsm.sendNotification(bgp.BGP_ERROR_HOLD_TIMER_EXPIRED, 0, nil, "hold timer expired")
			h.t.Kill(nil)
			return bgp.BGP_FSM_IDLE, FSM_HOLD_TIMER_EXPIRED
		case s := <-fsm.adminStateCh:
			err := h.changeAdminState(s)
			if err == nil {
				switch s {
				case ADMIN_STATE_DOWN:
					h.conn.Close()
					return bgp.BGP_FSM_IDLE, FSM_ADMIN_DOWN
				case ADMIN_STATE_UP:
					log.WithFields(log.Fields{
						"Topic":      "Peer",
						"Key":        fsm.pConf.Config.NeighborAddress,
						"State":      fsm.state.String(),
						"AdminState": s.String(),
					}).Panic("code logic bug")
				}
			}
		}
	}
}
コード例 #10
0
ファイル: smock.go プロジェクト: MohamedBassem/Smock
// Start starts the mock server and initiates the requests channel
func (s *MockServer) Start() {
	s.testServer.Start()
	s.Reqs = channels.NewInfiniteChannel()
}
コード例 #11
0
func main() {
	//tasks := make(chan int, 100)

	tasks := channels.NewInfiniteChannel()

	//tasks.Resize(10000)

	//_ = make(chan int, 500000)

	// spawn four worker goroutines
	var wg sync.WaitGroup
	for i := 0; i < 8; i++ {
		wg.Add(1)
		go func() {

			time.Sleep(time.Second * 2)
			fmt.Println("caiu")
			debug.FreeOSMemory()

			for cmd := range tasks.Out() {
				//time.Sleep(time.Millisecond)
				inter := cmd.([]interface{})
				if inter[0] == 2 {
					fmt.Println(cmd)
				}
				//debug.FreeOSMemory()
			}
			wg.Done()
		}()
	}

	//time.Sleep(5 * time.Second)

	// generate some tasks
	/*chans := make(map[int]*channels.InfiniteChannel)
	time.Sleep(5 * time.Second)
	for i := 0; i < 10; i++ {
		chans[i] = channels.NewInfiniteChannel()
		//chans[i].Resize(100)
	}
	time.Sleep(5 * time.Second)*/
	/*for i := 0; i < 100000; i++ {
		chans[i] = nil
		//chans[i].Resize(100)
	}*/

	runtime.GC()
	debug.FreeOSMemory()

	for i := 0; i < 10000000; i++ {

		/*if i == 5000 {
			tasks.Resize(500000)
		}*/
		//fmt.Println(tasks.Cap())
		//select {
		tasks.In() <- []interface{}{1, 1, "teste", "outro teste", 1.56, true}
		/*	fmt.Println("mandou", i)
			default:
				fmt.Println("miou", i)

			}*/
	}
	tasks.Close()

	// wait for the workers to finish
	wg.Wait()
}