Пример #1
0
func (n *network) Run(ctx context.Context) {
	wg := sync.WaitGroup{}

	log.Info("Watching for new subnet leases")
	evts := make(chan []subnet.Event)
	wg.Add(1)
	go func() {
		subnet.WatchLeases(ctx, n.sm, n.name, n.lease, evts)
		wg.Done()
	}()

	n.rl = make([]netlink.Route, 0, 10)
	wg.Add(1)
	go func() {
		n.routeCheck(ctx)
		wg.Done()
	}()

	defer wg.Wait()

	for {
		select {
		case evtBatch := <-evts:
			n.handleSubnetEvents(evtBatch)

		case <-ctx.Done():
			return
		}
	}
}
Пример #2
0
func (n *Network) Init(ctx context.Context, iface *net.Interface, iaddr net.IP, eaddr net.IP) *backend.SubnetDef {
	var be backend.Backend
	var sn *backend.SubnetDef

	steps := []func() error{
		func() (err error) {
			n.Config, err = n.sm.GetNetworkConfig(ctx, n.Name)
			if err != nil {
				log.Error("Failed to retrieve network config: ", err)
			}
			return
		},

		func() (err error) {
			be, err = newBackend(n.sm, n.Name, n.Config)
			if err != nil {
				log.Error("Failed to create backend: ", err)
			} else {
				n.be = be
			}
			return
		},

		func() (err error) {
			sn, err = be.Init(ctx, iface, iaddr, eaddr)
			if err != nil {
				log.Errorf("Failed to initialize network %v (type %v): %v", n.Name, n.Config.BackendType, err)
			}
			n.lease = sn.Lease
			return
		},

		func() (err error) {
			if n.ipMasq {
				flannelNet := n.Config.Network
				if err = setupIPMasq(flannelNet); err != nil {
					log.Errorf("Failed to set up IP Masquerade for network %v: %v", n.Name, err)
				}
			}
			return
		},
	}

	for _, s := range steps {
		for ; ; time.Sleep(time.Second) {
			select {
			case <-ctx.Done():
				return nil
			default:
			}

			err := s()
			if err == nil {
				break
			}
		}
	}

	return sn
}
Пример #3
0
func (m *UdpBackend) Run(ctx context.Context) {
	// one for each goroutine below
	wg := sync.WaitGroup{}

	wg.Add(1)
	go func() {
		runCProxy(m.tun, m.conn, m.ctl2, m.tunNet.IP, m.mtu)
		wg.Done()
	}()

	log.Info("Watching for new subnet leases")

	evts := make(chan []subnet.Event)

	wg.Add(1)
	go func() {
		subnet.WatchLeases(ctx, m.sm, m.network, m.lease, evts)
		wg.Done()
	}()

	for {
		select {
		case evtBatch := <-evts:
			m.processSubnetEvents(evtBatch)

		case <-ctx.Done():
			stopProxy(m.ctl)
			break
		}
	}

	wg.Wait()
}
Пример #4
0
func (msr *MockSubnetRegistry) watchSubnet(ctx context.Context, network string, since uint64, sn ip.IP4Net) (Event, uint64, error) {
	msr.mux.Lock()
	n, ok := msr.networks[network]
	msr.mux.Unlock()

	if !ok {
		return Event{}, 0, fmt.Errorf("Network %s not found", network)
	}

	for {
		msr.mux.Lock()
		index := msr.index
		msr.mux.Unlock()

		if since < index {
			return Event{}, msr.index, etcd.Error{
				Code:    etcd.ErrorCodeEventIndexCleared,
				Cause:   "out of date",
				Message: "cursor is out of date",
				Index:   index,
			}
		}

		select {
		case <-ctx.Done():
			return Event{}, index, ctx.Err()

		case e := <-n.subnetEventsChan(sn):
			if e.index > since {
				return e.evt, index, nil
			}
		}
	}
}
Пример #5
0
func (msr *MockSubnetRegistry) watchSubnets(ctx context.Context, network string, since uint64) (Event, uint64, error) {
	n, ok := msr.networks[network]
	if !ok {
		return Event{}, msr.index, fmt.Errorf("Network %s not found", network)
	}

	for {
		if since < msr.index {
			return Event{}, msr.index, etcd.Error{
				Code:    etcd.ErrorCodeEventIndexCleared,
				Cause:   "out of date",
				Message: "cursor is out of date",
				Index:   msr.index,
			}
		}

		select {
		case <-ctx.Done():
			return Event{}, msr.index, ctx.Err()

		case e := <-n.subnetsEvents:
			if e.index <= since {
				continue
			}
			return e.evt, msr.index, nil
		}
	}
}
Пример #6
0
func (msr *MockSubnetRegistry) watchNetworks(ctx context.Context, since uint64) (Event, uint64, error) {
	msr.mux.Lock()
	index := msr.index
	msr.mux.Unlock()

	for {
		if since < index {
			return Event{}, 0, etcd.Error{
				Code:    etcd.ErrorCodeEventIndexCleared,
				Cause:   "out of date",
				Message: "cursor is out of date",
				Index:   index,
			}
		}

		select {
		case <-ctx.Done():
			return Event{}, 0, ctx.Err()

		case e := <-msr.networkEvents:
			if e.index > since {
				return e.evt, e.index, nil
			}
		}
	}
}
Пример #7
0
func RunServer(ctx context.Context, sm subnet.Manager, listenAddr, cafile, certfile, keyfile string) {
	// {network} is always required a the API level but to
	// keep backward compat, special "_" network is allowed
	// that means "no network"

	r := mux.NewRouter()
	r.HandleFunc("/v1/{network}/config", bindHandler(handleGetNetworkConfig, ctx, sm)).Methods("GET")
	r.HandleFunc("/v1/{network}/leases", bindHandler(handleAcquireLease, ctx, sm)).Methods("POST")
	r.HandleFunc("/v1/{network}/leases/{subnet}", bindHandler(handleRenewLease, ctx, sm)).Methods("PUT")
	r.HandleFunc("/v1/{network}/leases", bindHandler(handleWatchLeases, ctx, sm)).Methods("GET")
	r.HandleFunc("/v1/", bindHandler(handleNetworks, ctx, sm)).Methods("GET")

	l, err := listener(listenAddr, cafile, certfile, keyfile)
	if err != nil {
		log.Errorf("Error listening on %v: %v", listenAddr, err)
		return
	}

	c := make(chan error, 1)
	go func() {
		c <- http.Serve(l, httpLogger(r))
	}()

	select {
	case <-ctx.Done():
		l.Close()
		<-c

	case err := <-c:
		log.Errorf("Error serving on %v: %v", listenAddr, err)
	}
}
Пример #8
0
func (m *EtcdManager) AcquireLease(ctx context.Context, network string, attrs *LeaseAttrs) (*Lease, error) {
	config, err := m.GetNetworkConfig(ctx, network)
	if err != nil {
		return nil, err
	}

	for {
		l, err := m.acquireLeaseOnce(ctx, network, config, attrs)
		switch {
		case err == nil:
			log.Info("Subnet lease acquired: ", l.Subnet)
			return l, nil

		case err == context.Canceled, err == context.DeadlineExceeded:
			return nil, err

		default:
			log.Error("Failed to acquire subnet: ", err)
		}

		select {
		case <-time.After(time.Second):

		case <-ctx.Done():
			return nil, ctx.Err()
		}
	}
}
Пример #9
0
func (esr *etcdSubnetRegistry) watchSubnets(ctx context.Context, network string, since uint64) (*etcd.Response, error) {
	stop := make(chan bool)
	respCh := make(chan watchResp)

	go func() {
		for {
			key := path.Join(esr.etcdCfg.Prefix, network, "subnets")
			rresp, err := esr.client().RawWatch(key, since, true, nil, stop)

			if err != nil {
				respCh <- watchResp{nil, err}
				return
			}

			if len(rresp.Body) == 0 {
				// etcd timed out, go back but recreate the client as the underlying
				// http transport gets hosed (http://code.google.com/p/go/issues/detail?id=8648)
				esr.resetClient()
				continue
			}

			resp, err := rresp.Unmarshal()
			respCh <- watchResp{resp, err}
		}
	}()

	select {
	case <-ctx.Done():
		close(stop)
		<-respCh // Wait for f to return.
		return nil, ctx.Err()
	case wr := <-respCh:
		return wr.resp, wr.err
	}
}
Пример #10
0
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
	req := act.HTTPRequest(c.endpoint)

	if err := printcURL(req); err != nil {
		return nil, nil, err
	}

	rtchan := make(chan roundTripResponse, 1)
	go func() {
		resp, err := c.transport.RoundTrip(req)
		rtchan <- roundTripResponse{resp: resp, err: err}
		close(rtchan)
	}()

	var resp *http.Response
	var err error

	select {
	case rtresp := <-rtchan:
		resp, err = rtresp.resp, rtresp.err
	case <-ctx.Done():
		// cancel and wait for request to actually exit before continuing
		c.transport.CancelRequest(req)
		rtresp := <-rtchan
		resp = rtresp.resp
		err = ctx.Err()
	}

	// always check for resp nil-ness to deal with possible
	// race conditions between channels above
	defer func() {
		if resp != nil {
			resp.Body.Close()
		}
	}()

	if err != nil {
		return nil, nil, err
	}

	var body []byte
	done := make(chan struct{})
	go func() {
		body, err = ioutil.ReadAll(resp.Body)
		done <- struct{}{}
	}()

	select {
	case <-ctx.Done():
		err = resp.Body.Close()
		<-done
		if err == nil {
			err = ctx.Err()
		}
	case <-done:
	}

	return resp, body, err
}
Пример #11
0
func (n *network) routeCheck(ctx context.Context) {
	for {
		select {
		case <-ctx.Done():
			return
		case <-time.After(routeCheckRetries * time.Second):
			n.checkSubnetExistInRoutes()
		}
	}
}
Пример #12
0
func (rb *HostgwBackend) routeCheck(ctx context.Context) {
	for {
		select {
		case <-ctx.Done():
			return
		case <-time.After(routeCheckRetries * time.Second):
			rb.checkSubnetExistInRoutes()
		}
	}
}
Пример #13
0
func (n *Network) Run(ctx context.Context) {
	wg := sync.WaitGroup{}
	wg.Add(1)
	go func() {
		n.be.Run()
		wg.Done()
	}()

	<-ctx.Done()
	n.be.Stop()

	wg.Wait()
}
Пример #14
0
func (msr *mockSubnetRegistry) watchSubnets(ctx context.Context, network string, since uint64) (*etcd.Response, error) {
	for {
		select {
		case <-ctx.Done():
			return nil, ctx.Err()

		case r := <-msr.events:
			if r.Node.ModifiedIndex <= since {
				continue
			}
			return r, nil
		}
	}
}
Пример #15
0
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
	ticker := time.NewTicker(interval)
	defer ticker.Stop()
	for {
		err := c.Sync(ctx)
		if err != nil {
			return err
		}
		select {
		case <-ctx.Done():
			return ctx.Err()
		case <-ticker.C:
		}
	}
}
Пример #16
0
func (n *Network) Run(ctx context.Context) {
	wg := sync.WaitGroup{}
	wg.Add(1)
	go func() {
		n.be.Run(ctx)
		wg.Done()
	}()

	wg.Add(1)
	go func() {
		subnet.LeaseRenewer(ctx, n.sm, n.Name, n.lease)
		wg.Done()
	}()

	<-ctx.Done()
	wg.Wait()
}
Пример #17
0
func (m *Manager) Run(ctx context.Context) {
	wg := sync.WaitGroup{}

	if m.isMultiNetwork() {
		for {
			// Try adding initial networks
			result, err := m.sm.WatchNetworks(ctx, nil)
			if err == nil {
				for _, n := range result.Snapshot {
					if m.isNetAllowed(n) {
						m.networks[n] = NewNetwork(ctx, m.sm, m.bm, n, m.ipMasq)
					}
				}
				break
			}

			// Otherwise retry in a few seconds
			log.Warning("Failed to retrieve networks (will retry): %v", err)
			select {
			case <-ctx.Done():
				return
			case <-time.After(time.Second):
			}
		}
	} else {
		m.networks[""] = NewNetwork(ctx, m.sm, m.bm, "", m.ipMasq)
	}

	// Run existing networks
	m.forEachNetwork(func(n *Network) {
		wg.Add(1)
		go func(n *Network) {
			m.runNetwork(n)
			wg.Done()
		}(n)
	})

	if opts.watchNetworks {
		m.watchNetworks()
	}

	wg.Wait()
	m.bm.Wait()
}
Пример #18
0
func (w *watcher) Next(ctx context.Context) (*etcd.Response, error) {
	w.parent.mux.Lock()

	// If the event is already in the history log return it from there

	for _, e := range w.parent.events {
		if e.Index > w.after && w.shouldGrabEvent(e) {
			w.after = e.Index
			w.parent.mux.Unlock()
			return e, nil
		}
	}

	// Watch must handle adding and removing itself from the parent when
	// it's done to ensure it can be garbage collected correctly
	w.parent.watchers[w] = struct{}{}

	w.parent.mux.Unlock()

	// Otherwise wait for new events
	for {
		select {
		case e := <-w.events:
			// Might have already been grabbed through the history log
			if e.Index <= w.after {
				continue
			}
			w.after = e.Index

			w.parent.mux.Lock()
			delete(w.parent.watchers, w)
			w.parent.mux.Unlock()

			return e, nil
		case <-ctx.Done():
			w.parent.mux.Lock()
			delete(w.parent.watchers, w)
			w.parent.mux.Unlock()

			return nil, context.Canceled
		}
	}
}
Пример #19
0
func (m *RemoteManager) httpDo(ctx context.Context, req *http.Request) (*http.Response, error) {
	// Run the HTTP request in a goroutine (so it can be canceled) and pass
	// the result via the channel c
	client := &http.Client{Transport: m.transport}
	c := make(chan httpRespErr, 1)
	go func() {
		resp, err := client.Do(req)
		c <- httpRespErr{resp, err}
	}()

	select {
	case <-ctx.Done():
		m.transport.CancelRequest(req)
		<-c // Wait for f to return.
		return nil, ctx.Err()
	case r := <-c:
		return r.resp, r.err
	}
}
Пример #20
0
func (n *network) Run(ctx context.Context) {
	log.Info("Watching for L3 misses")
	misses := make(chan *netlink.Neigh, 100)
	// Unfrtunately MonitorMisses does not take a cancel channel
	// as there's no wait to interrupt netlink socket recv
	go n.dev.MonitorMisses(misses)

	wg := sync.WaitGroup{}

	log.Info("Watching for new subnet leases")
	evts := make(chan []subnet.Event)
	wg.Add(1)
	go func() {
		subnet.WatchLeases(ctx, n.sm, n.name, n.SubnetLease, evts)
		log.Info("WatchLeases exited")
		wg.Done()
	}()

	defer wg.Wait()
	initialEvtsBatch := <-evts
	for {
		err := n.handleInitialSubnetEvents(initialEvtsBatch)
		if err == nil {
			break
		}
		log.Error(err, " About to retry")
		time.Sleep(time.Second)
	}

	for {
		select {
		case miss := <-misses:
			n.handleMiss(miss)

		case evtBatch := <-evts:
			n.handleSubnetEvents(evtBatch)

		case <-ctx.Done():
			return
		}
	}
}
Пример #21
0
func (m *EtcdManager) acquireLeaseOnce(ctx context.Context, network string, config *Config, attrs *LeaseAttrs) (*Lease, error) {
	for i := 0; i < registerRetries; i++ {
		l, err := m.tryAcquireLease(ctx, network, config, attrs.PublicIP, attrs)
		switch {
		case err != nil:
			return nil, err
		case l != nil:
			return l, nil
		}

		// before moving on, check for cancel
		// TODO(eyakubovich): propogate ctx deeper into registry
		select {
		case <-ctx.Done():
			return nil, ctx.Err()
		default:
		}
	}

	return nil, errors.New("Max retries reached trying to acquire a subnet")
}
Пример #22
0
func LeaseRenewer(ctx context.Context, m Manager, network string, lease *Lease) {
	dur := lease.Expiration.Sub(clock.Now()) - renewMargin

	for {
		select {
		case <-time.After(dur):
			err := m.RenewLease(ctx, network, lease)
			if err != nil {
				log.Error("Error renewing lease (trying again in 1 min): ", err)
				dur = time.Minute
				continue
			}

			log.Info("Lease renewed, new expiration: ", lease.Expiration)
			dur = lease.Expiration.Sub(clock.Now()) - renewMargin

		case <-ctx.Done():
			return
		}
	}
}
Пример #23
0
func (n *network) Run(ctx context.Context) {
	defer func() {
		n.tun.Close()
		n.conn.Close()
		n.ctl.Close()
		n.ctl2.Close()
	}()

	// one for each goroutine below
	wg := sync.WaitGroup{}
	defer wg.Wait()

	wg.Add(1)
	go func() {
		runCProxy(n.tun, n.conn, n.ctl2, n.tunNet.IP, n.MTU())
		wg.Done()
	}()

	log.Info("Watching for new subnet leases")

	evts := make(chan []subnet.Event)

	wg.Add(1)
	go func() {
		subnet.WatchLeases(ctx, n.sm, n.name, n.SubnetLease, evts)
		wg.Done()
	}()

	for {
		select {
		case evtBatch := <-evts:
			n.processSubnetEvents(evtBatch)

		case <-ctx.Done():
			stopProxy(n.ctl)
			return
		}
	}
}
Пример #24
0
func (msr *MockSubnetRegistry) watch(ctx context.Context, network string, since uint64) (*etcd.Response, error) {
	for {
		if since < msr.index {
			return nil, etcd.Error{
				Code:    etcd.ErrorCodeEventIndexCleared,
				Cause:   "out of date",
				Message: "cursor is out of date",
				Index:   msr.index,
			}
		}

		select {
		case <-ctx.Done():
			return nil, ctx.Err()

		case r := <-msr.events:
			if r.Node.ModifiedIndex <= since {
				continue
			}
			return r, nil
		}
	}
}
Пример #25
0
func (msr *MockSubnetRegistry) watchNetworks(ctx context.Context, since uint64) (Event, uint64, error) {
	for {
		if since < msr.index {
			return Event{}, msr.index, etcd.Error{
				Code:    etcd.ErrorCodeEventIndexCleared,
				Cause:   "out of date",
				Message: "cursor is out of date",
				Index:   msr.index,
			}
		}

		select {
		case <-ctx.Done():
			return Event{}, msr.index, ctx.Err()

		case e := <-msr.networkEvents:
			if e.index <= since {
				continue
			}
			return e.evt, msr.index, nil
		}
	}
}
Пример #26
0
func (_ *SimpleNetwork) Run(ctx context.Context) {
	<-ctx.Done()
}
Пример #27
0
func (_ *AllocBackend) Run(ctx context.Context) {
	<-ctx.Done()
}
Пример #28
0
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
	req := act.HTTPRequest(c.endpoint)

	if err := printcURL(req); err != nil {
		return nil, nil, err
	}

	hctx, hcancel := context.WithCancel(ctx)
	if c.headerTimeout > 0 {
		hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
	}
	defer hcancel()

	reqcancel := requestCanceler(c.transport, req)

	rtchan := make(chan roundTripResponse, 1)
	go func() {
		resp, err := c.transport.RoundTrip(req)
		rtchan <- roundTripResponse{resp: resp, err: err}
		close(rtchan)
	}()

	var resp *http.Response
	var err error

	select {
	case rtresp := <-rtchan:
		resp, err = rtresp.resp, rtresp.err
	case <-hctx.Done():
		// cancel and wait for request to actually exit before continuing
		reqcancel()
		rtresp := <-rtchan
		resp = rtresp.resp
		switch {
		case ctx.Err() != nil:
			err = ctx.Err()
		case hctx.Err() != nil:
			err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
		default:
			panic("failed to get error from context")
		}
	}

	// always check for resp nil-ness to deal with possible
	// race conditions between channels above
	defer func() {
		if resp != nil {
			resp.Body.Close()
		}
	}()

	if err != nil {
		return nil, nil, err
	}

	var body []byte
	done := make(chan struct{})
	go func() {
		body, err = ioutil.ReadAll(resp.Body)
		done <- struct{}{}
	}()

	select {
	case <-ctx.Done():
		resp.Body.Close()
		<-done
		return nil, nil, ctx.Err()
	case <-done:
	}

	return resp, body, err
}
Пример #29
0
func (g *GCEBackend) Run(ctx context.Context) {
	<-ctx.Done()
}
Пример #30
0
func (_ *HostgwBackend) Run(ctx context.Context) {
	<-ctx.Done()
}