Esempio n. 1
0
func DefaultOverlay(e xt.EndPointI) (o OverlayI, err error) {
	t := e.Transport()
	switch t {
	case "ip":
		fallthrough
	case "udp":
		fallthrough
	case "tcp":
		t = "ip"
	default:
		return nil, errors.New("not implemented")
	}

	// KLUDGE
	tcpE := e.(*xt.TcpEndPoint)
	tcpA := tcpE.GetTcpAddr() // IP, Port, Zone fields
	v4Addr := tcpA.IP[12:]

	// 127/8 or 10/8 or 172.16/12 or 192.168/16
	if v4Addr[0] == 127 {
		aRange, err := NewCIDRAddrRange("127.0.0.0/8")
		if err == nil {
			o, err = NewIPOverlay("localhost", aRange, "ip", 1.0)
		}
	} else if v4Addr[0] == 10 {
		aRange, err := NewCIDRAddrRange("10.0.0.0/8")
		if err == nil {
			o, err = NewIPOverlay("privateA", aRange, "ip", 1.0)
		}
	} else if v4Addr[0] == 172 && v4Addr[1] >= 16 && v4Addr[1] < 32 {
		aRange, err := NewCIDRAddrRange("172.16.0.0/12")
		if err == nil {
			o, err = NewIPOverlay("privateB", aRange, "ip", 1.0)
		}
	} else if v4Addr[0] == 192 && v4Addr[1] == 168 {
		aRange, err := NewCIDRAddrRange("192.168.0.0/16")
		if err == nil {
			o, err = NewIPOverlay("privateC", aRange, "ip", 1.0)
		}
	} else {
		aRange, err := NewCIDRAddrRange("0.0.0.0/0")
		if err == nil {
			o, err = NewIPOverlay("globalV4", aRange, "ip", 1.0)
		}
	}
	return
}
Esempio n. 2
0
func (o *IPOverlay) IsElement(e xt.EndPointI) bool {
	oT := o.Transport()
	eT := e.Transport()
	if !CompatibleTransports(oT, eT) {
		return false
	}

	eA := e.Address().String()
	parts := strings.Split(eA, ":")

	bs := net.ParseIP(parts[0]) // returns an IP, a []byte
	if bs == nil {
		fmt.Printf("could not parse '%s'\n", eA)
		return false
	}
	return o.addrRange.ipNet.Contains(bs)
}
Esempio n. 3
0
// Given contact information for a registry and the name of a cluster,
// the client joins the cluster, collects information on the other members,
// and terminates when it has info on the entire membership.
func NewMemberMaker(
	node *xn.Node, attrs uint64,
	regName string, regID *xi.NodeID, regEnd xt.EndPointI,
	regCK, regSK *rsa.PublicKey,
	clusterName string, clusterAttrs uint64, clusterID *xi.NodeID,
	size, epCount uint32, endPoints []xt.EndPointI) (
	mm *MemberMaker, err error) {

	var (
		cm      *xcl.ClusterMember
		isAdmin = (attrs & xcl.ATTR_ADMIN) != 0
		regPeer *xn.Peer
	)
	// sanity checks on parameter list
	if node == nil {
		err = MissingNode
	} else {
		if regName == "" || regID == nil || regEnd == nil ||
			regCK == nil {

			err = MissingServerInfo
		}
		if err == nil {
			// DEBUG
			fmt.Printf("NemMemberMaker: regEnd is %s\n", regEnd.String())
			// END
			if (attrs & xcl.ATTR_SOLO) == uint64(0) {
				if clusterName == "" {
					err = MissingClusterNameOrID
					if err == nil && size < uint32(1) {
						// err = ClusterMustHaveTwo
						err = ClusterMustHaveMember
					}
				}
				if err == nil {
					// if the client is an admin client epCount applies
					// to the cluster
					if epCount < uint32(1) {
						epCount = uint32(1)
					}
					if !isAdmin {
						// XXX There is some confusion here: we don't require
						// that all members have the same number of endpoints
						actualEPCount := uint32(len(endPoints))
						if actualEPCount == 0 {
							err = MemberMustHaveEndPoint
						} else if epCount > actualEPCount {
							epCount = actualEPCount
						}
						for i := 0; i < int(epCount); i++ {
							_, err = node.AddEndPoint(endPoints[i])
						}
					}
				}
			}
		}
	}

	if err == nil {
		var ctor xt.ConnectorI
		var ctors []xt.ConnectorI
		ctor, err = xt.NewTcpConnector(regEnd)
		if err == nil {
			ctors = append(ctors, ctor)
			regPeer, err = xn.NewPeer(regName, regID, regCK, regSK,
				nil, ctors)
			if err == nil {
				_, err = node.AddPeer(regPeer)
			}
		}
	}
	if err == nil {
		cm = &xcl.ClusterMember{
			// Attrs gets negotiated
			ClusterName:    clusterName,
			ClusterAttrs:   clusterAttrs,
			ClusterID:      clusterID,
			ClusterMaxSize: size,
			EPCount:        epCount,
			// Members added on the fly
			Members: make([]*xcl.MemberInfo, size),
			Node:    *node,
		}
		mm = &MemberMaker{
			ProposedAttrs: attrs,
			DoneCh:        make(chan error, 1),
			RegPeer:       regPeer,
			ClusterMember: *cm,
		}
	}
	return
}