Exemplo n.º 1
0
func NewMasterInfo(id string, ip, port uint32) *mesos.MasterInfo {
	return &mesos.MasterInfo{
		Id:   proto.String(id),
		Ip:   proto.Uint32(ip),
		Port: proto.Uint32(port),
	}
}
func TestUnmarshalPartiallyPopulatedOptionalFieldsFails(t *testing.T) {
	// Fill in all fields, then randomly remove one.
	dataOut := &test.NinOptNative{
		Field1:  proto.Float64(0),
		Field2:  proto.Float32(0),
		Field3:  proto.Int32(0),
		Field4:  proto.Int64(0),
		Field5:  proto.Uint32(0),
		Field6:  proto.Uint64(0),
		Field7:  proto.Int32(0),
		Field8:  proto.Int64(0),
		Field9:  proto.Uint32(0),
		Field10: proto.Int32(0),
		Field11: proto.Uint64(0),
		Field12: proto.Int64(0),
		Field13: proto.Bool(false),
		Field14: proto.String("0"),
		Field15: []byte("0"),
	}
	r := rand.New(rand.NewSource(time.Now().UnixNano()))
	fieldName := "Field" + strconv.Itoa(r.Intn(15)+1)
	field := reflect.ValueOf(dataOut).Elem().FieldByName(fieldName)
	fieldType := field.Type()
	field.Set(reflect.Zero(fieldType))
	encodedMessage, err := proto.Marshal(dataOut)
	if err != nil {
		t.Fatalf("Unexpected error when marshalling dataOut: %v", err)
	}
	dataIn := NidOptNative{}
	err = proto.Unmarshal(encodedMessage, &dataIn)
	if err.Error() != `proto: required field "`+fieldName+`" not set` {
		t.Fatalf(`err.Error() != "proto: required field "`+fieldName+`" not set"; was "%s" instead`, err.Error())
	}
}
Exemplo n.º 3
0
func sendError(socket *zmq.Socket, req *Request, err error) {
	// Response envelope
	resp := &Response{
		Error: &Response_Error{},
	}

	if req != nil {
		resp.UUID = req.UUID
	}

	// If error is a zrpc error
	if zrpcErr, ok := err.(zrpcError); ok {
		resp.StatusCode = proto.Uint32(uint32(zrpcErr.GetStatusCode()))
		resp.Error.Message = proto.String(zrpcErr.GetMessage())
	} else {
		// Default to internal error
		resp.StatusCode = proto.Uint32(uint32(http.StatusInternalServerError))
		resp.Error.Message = proto.String(err.Error())
	}

	// Encode the response
	buf, protoErr := proto.Marshal(resp)
	if protoErr != nil {
		glog.Error(protoErr)
		return
	}

	// Send the response
	if _, err := socket.SendBytes(buf, 0); err != nil {
		glog.Error(err)
	}
}
Exemplo n.º 4
0
func SetBodyChecksum(body *TransportBody) {
	if body == nil {
		return
	}
	data := body.GetData()
	if data == nil {
		body.Checksum = proto.Uint32(0)
		return
	}
	body.Checksum = proto.Uint32(crc32.ChecksumIEEE(data))
}
Exemplo n.º 5
0
func buildPorts(task eremetic.Task, offer *mesosproto.Offer) ([]*mesosproto.ContainerInfo_DockerInfo_PortMapping, []*mesosproto.Value_Range) {
	var resources []*mesosproto.Value_Range
	var mappings []*mesosproto.ContainerInfo_DockerInfo_PortMapping

	if len(task.Ports) == 0 {
		return mappings, resources
	}

	leftToAssign := len(task.Ports)

	for _, rsrc := range offer.Resources {
		if *rsrc.Name != "ports" {
			continue
		}

		for _, rng := range rsrc.Ranges.Range {
			if leftToAssign == 0 {
				break
			}

			start, end := *rng.Begin, *rng.Begin

			for hport := int(*rng.Begin); hport <= int(*rng.End); hport++ {
				if leftToAssign == 0 {
					break
				}

				leftToAssign--

				tport := &task.Ports[leftToAssign]
				tport.HostPort = uint32(hport)

				if tport.ContainerPort == 0 {
					tport.ContainerPort = tport.HostPort
				}

				end = uint64(hport + 1)

				mappings = append(mappings, &mesosproto.ContainerInfo_DockerInfo_PortMapping{
					ContainerPort: proto.Uint32(tport.ContainerPort),
					HostPort:      proto.Uint32(tport.HostPort),
					Protocol:      proto.String(tport.Protocol),
				})
			}

			if start != end {
				resources = append(resources, mesosutil.NewValueRange(start, end))
			}
		}
	}

	return mappings, resources
}
Exemplo n.º 6
0
func TestStandalone_pollerFetched(t *testing.T) {
	assert := assert.New(t)
	// presence of IP address allows fecher to be called
	d := NewStandalone(&mesos.MasterInfo{Ip: proto.Uint32(localhost)})
	defer d.Cancel()

	fetched := make(chan struct{})
	pid := &upid.UPID{
		ID:   "[email protected]:5050",
		Host: "127.0.0.1",
		Port: "5050",
	}
	f := fetcherFunc(func(ctx context.Context, addr string) (*upid.UPID, error) {
		defer close(fetched)
		assert.Equal("127.0.0.1:5050", addr)
		return pid, nil
	})

	go d.poller(f)

	// fetch called
	select {
	case <-fetched: // expected
	case <-time.After(1 * time.Second):
		t.Fatalf("expected fetch")
	}

	// read MasterInfo
	select {
	case mi := <-d.ch:
		assert.Equal(mi, CreateMasterInfo(pid))
	case <-time.After(1 * time.Second):
		t.Fatalf("expected poller to send master info")
	}
}
Exemplo n.º 7
0
func encodeBooleanPoint(p *BooleanPoint) *internal.Point {
	return &internal.Point{
		Name:       proto.String(p.Name),
		Tags:       proto.String(p.Tags.ID()),
		Time:       proto.Int64(p.Time),
		Nil:        proto.Bool(p.Nil),
		Aux:        encodeAux(p.Aux),
		Aggregated: proto.Uint32(p.Aggregated),

		BooleanValue: proto.Bool(p.Value),
	}
}
Exemplo n.º 8
0
// marshal serializes to a protobuf representation.
func (rpi *RetentionPolicyInfo) marshal() *internal.RetentionPolicyInfo {
	pb := &internal.RetentionPolicyInfo{
		Name:               proto.String(rpi.Name),
		ReplicaN:           proto.Uint32(uint32(rpi.ReplicaN)),
		Duration:           proto.Int64(int64(rpi.Duration)),
		ShardGroupDuration: proto.Int64(int64(rpi.ShardGroupDuration)),
	}

	pb.ShardGroups = make([]*internal.ShardGroupInfo, len(rpi.ShardGroups))
	for i, sgi := range rpi.ShardGroups {
		pb.ShardGroups[i] = sgi.marshal()
	}

	return pb
}
Exemplo n.º 9
0
// marshal serializes to a protobuf representation.
func (s *RetentionPolicySpec) marshal() *internal.RetentionPolicySpec {
	pb := &internal.RetentionPolicySpec{}
	if s.Name != "" {
		pb.Name = proto.String(s.Name)
	}
	if s.Duration != nil {
		pb.Duration = proto.Int64(int64(*s.Duration))
	}
	if s.ShardGroupDuration > 0 {
		pb.ShardGroupDuration = proto.Int64(int64(s.ShardGroupDuration))
	}
	if s.ReplicaN != nil {
		pb.ReplicaN = proto.Uint32(uint32(*s.ReplicaN))
	}
	return pb
}
Exemplo n.º 10
0
func main() {

	for {

		client := NewClient()

		if !client.Connect("127.0.0.1:80") {
			return
		}

		retCmd := &usercmd.ReqUserLogin{
			Account:  proto.String("abcd"),
			Password: proto.String("123456"),
			Key:      proto.Uint32(100),
		}
		client.SendCmd(usercmd.UserCmd_Login, retCmd)

		time.Sleep(time.Millisecond * 1)
	}
}
Exemplo n.º 11
0
// InitChecksum initializes a checksum based on the provided key and
// the contents of the value. If the value contains a byte slice, the
// checksum includes it directly.
func (v *Value) InitChecksum(key []byte) {
	if v.Checksum == nil {
		v.Checksum = proto.Uint32(v.computeChecksum(key))
	}
}
Exemplo n.º 12
0
// Build method builds the task
func (t *Task) Build(slaveID string, offers map[string]*mesosproto.Offer) {
	t.Command = &mesosproto.CommandInfo{Shell: proto.Bool(false)}

	t.Container = &mesosproto.ContainerInfo{
		Type: mesosproto.ContainerInfo_DOCKER.Enum(),
		Docker: &mesosproto.ContainerInfo_DockerInfo{
			Image: &t.config.Image,
		},
	}

	if t.config.Hostname != "" {
		t.Container.Hostname = proto.String(t.config.Hostname)
		if t.config.Domainname != "" {
			t.Container.Hostname = proto.String(t.config.Hostname + "." + t.config.Domainname)
		}
	}

	switch t.config.HostConfig.NetworkMode {
	case "none":
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_NONE.Enum()
	case "host":
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_HOST.Enum()
	case "default", "bridge", "":
		var ports []uint64

		for _, offer := range offers {
			ports = append(ports, getPorts(offer)...)
		}

		for containerProtoPort, bindings := range t.config.HostConfig.PortBindings {
			for _, binding := range bindings {
				containerInfo := strings.SplitN(containerProtoPort, "/", 2)
				containerPort, err := strconv.ParseUint(containerInfo[0], 10, 32)
				if err != nil {
					log.Warn(err)
					continue
				}

				var hostPort uint64

				if binding.HostPort != "" {
					hostPort, err = strconv.ParseUint(binding.HostPort, 10, 32)
					if err != nil {
						log.Warn(err)
						continue
					}
				} else if len(ports) > 0 {
					hostPort = ports[0]
					ports = ports[1:]
				}

				if hostPort == 0 {
					log.Warn("cannot find port to bind on the host")
					continue
				}

				protocol := "tcp"
				if len(containerInfo) == 2 {
					protocol = containerInfo[1]
				}
				t.Container.Docker.PortMappings = append(t.Container.Docker.PortMappings, &mesosproto.ContainerInfo_DockerInfo_PortMapping{
					HostPort:      proto.Uint32(uint32(hostPort)),
					ContainerPort: proto.Uint32(uint32(containerPort)),
					Protocol:      proto.String(protocol),
				})
				t.Resources = append(t.Resources, mesosutil.NewRangesResource("ports", []*mesosproto.Value_Range{mesosutil.NewValueRange(hostPort, hostPort)}))
			}
		}
		// TODO handle -P here
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
	default:
		log.Errorf("Unsupported network mode %q", t.config.HostConfig.NetworkMode)
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
	}

	if cpus := t.config.CpuShares; cpus > 0 {
		t.Resources = append(t.Resources, mesosutil.NewScalarResource("cpus", float64(cpus)))
	}

	if mem := t.config.Memory; mem > 0 {
		t.Resources = append(t.Resources, mesosutil.NewScalarResource("mem", float64(mem/1024/1024)))
	}

	if len(t.config.Cmd) > 0 && t.config.Cmd[0] != "" {
		t.Command.Value = &t.config.Cmd[0]
	}

	if len(t.config.Cmd) > 1 {
		t.Command.Arguments = t.config.Cmd[1:]
	}

	for key, value := range t.config.Labels {
		t.Container.Docker.Parameters = append(t.Container.Docker.Parameters, &mesosproto.Parameter{Key: proto.String("label"), Value: proto.String(fmt.Sprintf("%s=%s", key, value))})
	}

	for _, value := range t.config.Env {
		t.Container.Docker.Parameters = append(t.Container.Docker.Parameters, &mesosproto.Parameter{Key: proto.String("env"), Value: proto.String(value)})
	}

	if !t.config.AttachStdin && !t.config.AttachStdout && !t.config.AttachStderr {
		t.Container.Docker.Parameters = append(t.Container.Docker.Parameters, &mesosproto.Parameter{Key: proto.String("label"), Value: proto.String(fmt.Sprintf("%s=true", cluster.SwarmLabelNamespace+".mesos.detach"))})
	}

	t.SlaveId = &mesosproto.SlaveID{Value: &slaveID}
}
Exemplo n.º 13
0
	pb "github.com/gogo/protobuf/jsonpb/jsonpb_test_proto"
	"github.com/gogo/protobuf/proto"
)

var (
	marshaller = Marshaller{}

	marshallerAllOptions = Marshaller{
		EnumsAsString: true,
		Indent:        "  ",
	}

	simpleObject = &pb.Simple{
		OInt32:  proto.Int32(-32),
		OInt64:  proto.Int64(-6400000000),
		OUint32: proto.Uint32(32),
		OUint64: proto.Uint64(6400000000),
		OSint32: proto.Int32(-13),
		OSint64: proto.Int64(-2600000000),
		OFloat:  proto.Float32(3.14),
		ODouble: proto.Float64(6.02214179e23),
		OBool:   proto.Bool(true),
		OString: proto.String("hello \"there\""),
		OBytes:  []byte("beep boop"),
	}

	simpleObjectJSON = `{` +
		`"o_bool":true,` +
		`"o_int32":-32,` +
		`"o_int64":"-6400000000",` +
		`"o_uint32":32,` +
Exemplo n.º 14
0
func TestStandalone_pollerFetchedMulti(t *testing.T) {
	assert := assert.New(t)
	// presence of IP address allows fecher to be called
	d := NewStandalone(&mesos.MasterInfo{Ip: proto.Uint32(localhost)})
	defer d.Cancel()
	d.leaderSyncInterval = 500 * time.Millisecond

	i := 0
	var wg sync.WaitGroup
	wg.Add(4)
	f := fetcherFunc(func(ctx context.Context, addr string) (*upid.UPID, error) {
		defer func() { i++ }()
		switch i {
		case 0:
			wg.Done()
			assert.Equal("127.0.0.1:5050", addr)
			return &upid.UPID{ID: "[email protected]:5050", Host: "127.0.0.1", Port: "5050"}, nil
		case 1:
			wg.Done()
			assert.Equal("127.0.0.1:5050", addr)
			return &upid.UPID{ID: "[email protected]:5050", Host: "127.0.0.2", Port: "5050"}, nil
		case 2:
			wg.Done()
			return nil, context.DeadlineExceeded
		case 3:
			wg.Done()
			assert.Equal("127.0.0.1:5050", addr)
			return &upid.UPID{ID: "[email protected]:5050", Host: "127.0.0.3", Port: "5050"}, nil
		default:
			d.Cancel()
			return nil, context.Canceled
		}
	})

	go d.poller(f)

	// fetches complete
	ch := make(chan struct{})
	go func() {
		defer close(ch)
		wg.Wait()
	}()

	changed := make(chan struct{})
	go func() {
		defer close(changed)
		for i := 0; i < 4; i++ {
			if mi, ok := <-d.ch; !ok {
				t.Fatalf("failed to read master info on cycle %v", i)
				break
			} else {
				switch i {
				case 0:
					assert.Equal(CreateMasterInfo(&upid.UPID{ID: "[email protected]:5050", Host: "127.0.0.1", Port: "5050"}), mi)
				case 1:
					assert.Equal(CreateMasterInfo(&upid.UPID{ID: "[email protected]:5050", Host: "127.0.0.2", Port: "5050"}), mi)
				case 2:
					assert.Nil(mi)
				case 3:
					assert.Equal(CreateMasterInfo(&upid.UPID{ID: "[email protected]:5050", Host: "127.0.0.3", Port: "5050"}), mi)
				}
			}
		}
	}()

	started := time.Now()
	select {
	case <-ch: // expected
	case <-time.After(3 * time.Second):
		t.Fatalf("expected fetches all complete")
	}

	select {
	case <-changed: // expected
	case <-time.After((3 * time.Second) - time.Now().Sub(started)):
		t.Fatalf("expected to have received all master info changes")
	}
}
Exemplo n.º 15
0
// TODO(tyler) split this long function up!
func (s *EtcdScheduler) launchOne(driver scheduler.SchedulerDriver) {
	// Always ensure we've pruned any dead / unmanaged nodes before
	// launching new ones, or we may overconfigure the ensemble such
	// that it can not make progress if the next launch fails.
	err := s.Prune()
	if err != nil {
		log.Errorf("Failed to remove stale cluster members: %s", err)
		return
	}

	if !s.shouldLaunch(driver) {
		log.Infoln("Skipping launch attempt for now.")
		return
	}

	// validOffer filters out offers that are no longer
	// desirable, even though they may have been when
	// they were enqueued.
	validOffer := func(offer *mesos.Offer) bool {
		runningCopy := s.RunningCopy()
		for _, etcdConfig := range runningCopy {
			if etcdConfig.SlaveID == offer.SlaveId.GetValue() {
				if s.singleInstancePerSlave {
					log.Info("Skipping offer: already running on this slave.")
					return false
				}
			}
		}
		return true
	}

	// Issue BlockingPop until we get back an offer we can use.
	var offer *mesos.Offer
	for {
		offer = s.offerCache.BlockingPop()
		if validOffer(offer) {
			break
		} else {
			s.decline(driver, offer)
		}
	}

	// Do this again because BlockingPop may have taken a long time.
	if !s.shouldLaunch(driver) {
		log.Infoln("Skipping launch attempt for now.")
		s.decline(driver, offer)
		return
	}

	// TODO(tyler) this is a broken hack
	resources := parseOffer(offer)
	lowest := *resources.ports[0].Begin
	rpcPort := lowest
	clientPort := lowest + 1
	httpPort := lowest + 2

	s.mut.Lock()
	var clusterType string
	if len(s.running) == 0 {
		clusterType = "new"
	} else {
		clusterType = "existing"
	}

	s.highestInstanceID++
	name := "etcd-" + strconv.FormatInt(s.highestInstanceID, 10)

	node := &config.Node{
		Name:       name,
		Host:       *offer.Hostname,
		RPCPort:    rpcPort,
		ClientPort: clientPort,
		ReseedPort: httpPort,
		Type:       clusterType,
		SlaveID:    offer.GetSlaveId().GetValue(),
	}
	running := []*config.Node{node}
	for _, r := range s.running {
		running = append(running, r)
	}
	serializedNodes, err := json.Marshal(running)
	log.Infof("Serialized running: %+v", string(serializedNodes))
	if err != nil {
		log.Errorf("Could not serialize running list: %v", err)
		// This Unlock is not deferred because the test implementation of LaunchTasks
		// calls this scheduler's StatusUpdate method, causing the test to deadlock.
		s.decline(driver, offer)
		s.mut.Unlock()
		return
	}

	configSummary := node.String()
	taskID := &mesos.TaskID{Value: &configSummary}
	executor := s.newExecutorInfo(node, s.executorUris)
	task := &mesos.TaskInfo{
		Data:     serializedNodes,
		Name:     proto.String("etcd-server"),
		TaskId:   taskID,
		SlaveId:  offer.SlaveId,
		Executor: executor,
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", s.cpusPerTask),
			util.NewScalarResource("mem", s.memPerTask),
			util.NewScalarResource("disk", s.diskPerTask),
			util.NewRangesResource("ports", []*mesos.Value_Range{
				util.NewValueRange(uint64(rpcPort), uint64(httpPort)),
			}),
		},
		Discovery: &mesos.DiscoveryInfo{
			Visibility: mesos.DiscoveryInfo_EXTERNAL.Enum(),
			Name:       proto.String("etcd-server"),
			Ports: &mesos.Ports{
				Ports: []*mesos.Port{
					&mesos.Port{
						Number:   proto.Uint32(uint32(rpcPort)),
						Protocol: proto.String("tcp"),
					},
					// HACK: "client" is not a real SRV protocol.  This is so
					// that we can have etcd proxies use srv discovery on the
					// above tcp name.  Mesos-dns does not yet care about
					// names for DiscoveryInfo.  When it does, we should
					// create a name for clients to use.  We want to keep
					// the rpcPort accessible at _etcd-server._tcp.<fwname>.mesos
					&mesos.Port{
						Number:   proto.Uint32(uint32(clientPort)),
						Protocol: proto.String("client"),
					},
				},
			},
		},
	}

	log.Infof(
		"Prepared task: %s with offer %s for launch",
		task.GetName(),
		offer.Id.GetValue(),
	)
	log.Info("Launching etcd node.")

	tasks := []*mesos.TaskInfo{task}

	s.pending[node.Name] = struct{}{}

	// This Unlock is not deferred because the test implementation of LaunchTasks
	// calls this scheduler's StatusUpdate method, causing the test to deadlock.
	s.mut.Unlock()

	atomic.AddUint32(&s.Stats.LaunchedServers, 1)
	driver.LaunchTasks(
		[]*mesos.OfferID{offer.Id},
		tasks,
		&mesos.Filters{
			RefuseSeconds: proto.Float64(1),
		},
	)
}
Exemplo n.º 16
0
func (s *visghsScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	if (s.nodesLaunched - s.nodesErrored) >= s.nodeTasks {
		log.Info("decline all of the offers since all of our tasks are already launched")
		ids := make([]*mesos.OfferID, len(offers))
		for i, offer := range offers {
			ids[i] = offer.Id
		}
		driver.LaunchTasks(ids, []*mesos.TaskInfo{}, &mesos.Filters{RefuseSeconds: proto.Float64(120)})
		return
	}
	for _, offer := range offers {
		cpuResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "cpus"
		})
		cpus := 0.0
		for _, res := range cpuResources {
			cpus += res.GetScalar().GetValue()
		}

		memResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "mem"
		})
		mems := 0.0
		for _, res := range memResources {
			mems += res.GetScalar().GetValue()
		}

		portResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "ports"
		})
		ports := []*mesos.Value_Range{}
		portsCount := uint64(0)
		for _, res := range portResources {
			for _, rs := range res.GetRanges().GetRange() {
				ports = append(ports, rs)
				portsCount += 1 + rs.GetEnd() - rs.GetBegin()
			}
		}

		log.Infoln("Received Offer <", offer.Id.GetValue(), "> with cpus=", cpus, " mem=", mems, " ports=", ports)

		remainingCpus := cpus
		remainingMems := mems
		remainingPorts := ports
		remainingPortsCount := portsCount

		// account for executor resources if there's not an executor already running on the slave
		if len(offer.ExecutorIds) == 0 {
			remainingCpus -= CPUS_PER_EXECUTOR
			remainingMems -= MEM_PER_EXECUTOR
		}

		var tasks []*mesos.TaskInfo
		for (s.nodesLaunched-s.nodesErrored) < s.nodeTasks &&
			CPUS_PER_TASK <= remainingCpus &&
			MEM_PER_TASK <= remainingMems &&
			PORTS_PER_TASK <= remainingPortsCount {
			log.Infoln("Ports <", remainingPortsCount, remainingPorts)

			s.nodesLaunched++

			taskId := &mesos.TaskID{
				Value: proto.String(strconv.Itoa(s.nodesLaunched)),
			}

			taskPorts := []*mesos.Value_Range{}
			leftOverPorts := []*mesos.Value_Range{}
			for t := 0; t < PORTS_PER_TASK; t++ {
				if len(remainingPorts) < 1 {
					// failed to allocate port, oh no!
				}
				ps := remainingPorts[0]
				//take the first port from the first
				pb := ps.GetBegin()
				pe := ps.GetEnd()
				//Create one range per port we need, it's easier this way
				tp := mesos.Value_Range{}
				p := pb
				tp.Begin = &p
				tp.End = &p
				taskPorts = append(taskPorts, &tp)

				pb++
				if pb <= pe {
					rpb := pb
					rpe := pe
					rtp := mesos.Value_Range{Begin: &rpb, End: &rpe}
					leftOverPorts = append(leftOverPorts, &rtp)
				}
				for _, ps := range remainingPorts[1:] {
					leftOverPorts = append(leftOverPorts, ps)
				}
			}

			radiaPort := (uint32)(taskPorts[0].GetBegin())
			task := &mesos.TaskInfo{
				Name:     proto.String("visghs-node-" + taskId.GetValue()),
				TaskId:   taskId,
				SlaveId:  offer.SlaveId,
				Executor: s.nexec,
				Discovery: &mesos.DiscoveryInfo{
					Name: proto.String("visghs"),
					//Visibility: mesos.DiscoveryInfo_EXTERNAL.Enum(),
					Visibility: mesos.DiscoveryInfo_FRAMEWORK.Enum(),
					Ports: &mesos.Ports{
						Ports: []*mesos.Port{
							{Protocol: proto.String("UDP"),
								Visibility: mesos.DiscoveryInfo_EXTERNAL.Enum(),
								Name:       proto.String("udpprobe"),
								Number:     proto.Uint32(radiaPort)},
							{Protocol: proto.String("TCP"),
								Visibility: mesos.DiscoveryInfo_EXTERNAL.Enum(),
								Name:       proto.String("radiarpc"),
								Number:     proto.Uint32(radiaPort)},
						},
					},
				},
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", CPUS_PER_TASK),
					util.NewScalarResource("mem", MEM_PER_TASK),
					util.NewRangesResource("ports", taskPorts),
				},
			}
			log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

			tasks = append(tasks, task)
			remainingCpus -= CPUS_PER_TASK
			remainingMems -= MEM_PER_TASK
			remainingPorts = leftOverPorts
			remainingPortsCount -= PORTS_PER_TASK
		}
		log.Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(5)})
	}
}
Exemplo n.º 17
0
func setAnyValue(value interface{}, m *Any) (err error) {
	defer func() {
		if r := recover(); r != nil {
			switch x := r.(type) {
			case runtime.Error:
				panic(r)
			case string:
				err = errors.New(x)
			case error:
				err = x
			default:
				err = errors.New("Unknown panic")
			}
		}
	}()
	if m != nil {
		if m.Type != nil {
			m.Reset()
		}
		var val reflect.Value
		if v, ok := value.(reflect.Value); ok {
			val = v
		} else if v, ok := value.(*Any); ok {
			if v != nil {
				*m = *v
			} else {
				anyType := Any_NilType
				m.Type = &anyType
			}
			return
		} else {
			val = reflect.ValueOf(value)
		}
		if !val.IsValid() {
			anyType := Any_NilType
			m.Type = &anyType
			return
		}
		kind := val.Kind()
		if (kind == reflect.Chan || kind == reflect.Func || kind == reflect.Interface || kind == reflect.Map || kind == reflect.Ptr || kind == reflect.Slice) && val.IsNil() {
			anyType := Any_NilType
			m.Type = &anyType
			return
		}
		if kind == reflect.Ptr {
			kind = val.Elem().Kind()
		}
		switch kind {
		case reflect.Interface:
			err = setAnyValue(val.Interface(), m)
		case reflect.Struct:
			if val.Type().String() == "time.Time" {
				t := val.Interface()
				if v, ok := t.(time.Time); ok {
					tStr := v.Format(time.RFC3339Nano)
					err = setData(reflect.ValueOf(&m.StringValue), Any_TimeType, m, reflect.ValueOf(&tStr))
					break
				}
			}
			err = errors.New("Error: Unsupported value type")
		case reflect.String:
			if kind != reflect.Ptr {
				val = reflect.ValueOf(proto.String(val.String()))
			}
			err = setData(reflect.ValueOf(&m.StringValue), Any_StringType, m, val)
		case reflect.Uint:
			if kind != reflect.Ptr {
				val = reflect.ValueOf(proto.Uint64(val.Uint()))
			}
			err = setData(reflect.ValueOf(&m.Uint64Value), Any_UintType, m, val)
		case reflect.Uint32:
			if kind != reflect.Ptr {
				val = reflect.ValueOf(proto.Uint32(uint32(val.Uint())))
			}
			err = setData(reflect.ValueOf(&m.Uint32Value), Any_Uint32Type, m, val)
		case reflect.Uint64:
			if kind != reflect.Ptr {
				val = reflect.ValueOf(proto.Uint64(val.Uint()))
			}
			err = setData(reflect.ValueOf(&m.Uint64Value), Any_Uint64Type, m, val)
		case reflect.Int:
			if kind != reflect.Ptr {
				val = reflect.ValueOf(proto.Int64(val.Int()))
			}
			err = setData(reflect.ValueOf(&m.Int64Value), Any_IntType, m, val)
		case reflect.Int32:
			if kind != reflect.Ptr {
				val = reflect.ValueOf(proto.Int32(int32(val.Int())))
			}
			err = setData(reflect.ValueOf(&m.Int32Value), Any_Int32Type, m, val)
		case reflect.Int64:
			if kind != reflect.Ptr {
				val = reflect.ValueOf(proto.Int64(val.Int()))
			}
			err = setData(reflect.ValueOf(&m.Int64Value), Any_Int64Type, m, val)
		case reflect.Float32:
			if kind != reflect.Ptr {
				val = reflect.ValueOf(proto.Float32(float32(val.Float())))
			}
			err = setData(reflect.ValueOf(&m.Float32Value), Any_Float32Type, m, val)
		case reflect.Float64:
			if kind != reflect.Ptr {
				val = reflect.ValueOf(proto.Float64(val.Float()))
			}
			err = setData(reflect.ValueOf(&m.Float64Value), Any_Float64Type, m, val)
		case reflect.Bool:
			if kind != reflect.Ptr {
				val = reflect.ValueOf(proto.Bool(val.Bool()))
			}
			err = setData(reflect.ValueOf(&m.BoolValue), Any_BoolType, m, val)
		case reflect.Slice:
			if val.Type() == typeOfBytes {
				err = setData(reflect.ValueOf(&m.ByteValue), Any_ByteType, m, val)
				break
			}
			err = errors.New("Error: Unsupported value type")
		default:
			err = errors.New("Error: Unsupported value type")
		}
	}
	return
}
Exemplo n.º 18
0
func GatherServiceStats() (*badoo_service.ResponseStats, error) {
	ru, err := Getrusage(syscall.RUSAGE_SELF)
	if nil != err {
		return nil, fmt.Errorf("getrusage: %v", err)
	}

	// ports stats first
	ports := make([]*badoo_service.ResponseStatsPortStats, len(StartedServers))

	i := 0
	total_connections := uint32(0)
	for _, srv := range StartedServers {
		port_stats := &badoo_service.ResponseStatsPortStats{}

		stats := srv.Server.Stats

		// listen queue information
		unacked, sacked, err := GetLqInfo(srv)
		if err == nil {
			port_stats.LqCur = proto.Uint32(unacked)
			port_stats.LqMax = proto.Uint32(sacked)
		}

		port_connections := atomic.LoadUint64(&stats.ConnCur)
		total_connections += uint32(port_connections)

		// general stats
		port_stats.Proto = proto.String(srv.Name)
		port_stats.Address = proto.String(srv.Address)
		port_stats.ConnCur = proto.Uint64(port_connections)
		port_stats.ConnTotal = proto.Uint64(atomic.LoadUint64(&stats.ConnTotal))
		port_stats.Requests = proto.Uint64(atomic.LoadUint64(&stats.Requests))
		port_stats.BytesRead = proto.Uint64(atomic.LoadUint64(&stats.BytesRead))
		port_stats.BytesWritten = proto.Uint64(atomic.LoadUint64(&stats.BytesWritten))

		// per request stats
		port_stats.RequestStats = make([]*badoo_service.ResponseStatsPortStatsRequestStatsT, 0, len(badoo_service.RequestMsgid_name))
		for msg_id, msg_name := range srv.Server.Proto.GetRequestIdToNameMap() {
			port_stats.RequestStats = append(port_stats.RequestStats, &badoo_service.ResponseStatsPortStatsRequestStatsT{
				Name:  proto.String(msg_name),
				Count: proto.Uint64(atomic.LoadUint64(&stats.RequestsIdStat[msg_id])),
			})
		}

		ports[i] = port_stats
		i++
	}

	r := &badoo_service.ResponseStats{
		Uptime: proto.Uint32(uint32(time.Since(GetStartupTime()).Seconds())),
		RusageSelf: &badoo_service.ResponseStatsRusage{
			RuUtime:   proto.Float32(timevalToFloat32(&ru.Utime)),
			RuStime:   proto.Float32(timevalToFloat32(&ru.Stime)),
			RuMaxrss:  proto.Uint64(uint64(ru.Maxrss)),
			RuMinflt:  proto.Uint64(uint64(ru.Minflt)),
			RuMajflt:  proto.Uint64(uint64(ru.Majflt)),
			RuInblock: proto.Uint64(uint64(ru.Inblock)),
			RuOublock: proto.Uint64(uint64(ru.Oublock)),
			RuNvcsw:   proto.Uint64(uint64(ru.Nvcsw)),
			RuNivcsw:  proto.Uint64(uint64(ru.Nivcsw)),
		},
		Ports:             ports,
		Connections:       proto.Uint32(total_connections),
		InitPhaseDuration: proto.Uint32(uint32(GetInitPhaseDuration().Seconds())),
	}

	return r, nil
}
Exemplo n.º 19
0
func (t *task) build(slaveID string) {
	t.Command = &mesosproto.CommandInfo{Shell: proto.Bool(false)}

	t.Container = &mesosproto.ContainerInfo{
		Type: mesosproto.ContainerInfo_DOCKER.Enum(),
		Docker: &mesosproto.ContainerInfo_DockerInfo{
			Image: &t.config.Image,
		},
	}

	switch t.config.HostConfig.NetworkMode {
	case "none":
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_NONE.Enum()
	case "host":
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_HOST.Enum()
	case "bridge", "":
		for containerPort, bindings := range t.config.HostConfig.PortBindings {
			for _, binding := range bindings {
				fmt.Println(containerPort)
				containerInfo := strings.SplitN(containerPort, "/", 2)
				fmt.Println(containerInfo[0], containerInfo[1])
				containerPort, err := strconv.ParseUint(containerInfo[0], 10, 32)
				if err != nil {
					log.Warn(err)
					continue
				}
				hostPort, err := strconv.ParseUint(binding.HostPort, 10, 32)
				if err != nil {
					log.Warn(err)
					continue
				}
				protocol := "tcp"
				if len(containerInfo) == 2 {
					protocol = containerInfo[1]
				}
				t.Container.Docker.PortMappings = append(t.Container.Docker.PortMappings, &mesosproto.ContainerInfo_DockerInfo_PortMapping{
					HostPort:      proto.Uint32(uint32(hostPort)),
					ContainerPort: proto.Uint32(uint32(containerPort)),
					Protocol:      proto.String(protocol),
				})
				t.Resources = append(t.Resources, mesosutil.NewRangesResource("ports", []*mesosproto.Value_Range{mesosutil.NewValueRange(hostPort, hostPort)}))
			}
		}
		// TODO handle -P here
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
	default:
		log.Errorf("Unsupported network mode %q", t.config.HostConfig.NetworkMode)
		t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
	}

	if cpus := t.config.CpuShares; cpus > 0 {
		t.Resources = append(t.Resources, mesosutil.NewScalarResource("cpus", float64(cpus)))
	}

	if mem := t.config.Memory; mem > 0 {
		t.Resources = append(t.Resources, mesosutil.NewScalarResource("mem", float64(mem/1024/1024)))
	}

	if len(t.config.Cmd) > 0 && t.config.Cmd[0] != "" {
		t.Command.Value = &t.config.Cmd[0]
	}

	if len(t.config.Cmd) > 1 {
		t.Command.Arguments = t.config.Cmd[1:]
	}

	for key, value := range t.config.Labels {
		t.Container.Docker.Parameters = append(t.Container.Docker.Parameters, &mesosproto.Parameter{Key: proto.String("label"), Value: proto.String(fmt.Sprintf("%s=%s", key, value))})
	}

	t.SlaveId = &mesosproto.SlaveID{Value: &slaveID}
}