Пример #1
0
func write(conn *net.TCPConn) {

	n := protocol.Mode(5)
	reg := &protocol.WMessage{
		MsgType:   proto.String("sendMsg"),
		MsgTypeId: proto.Int32(8),
		UserInfo: &protocol.User{
			Username: proto.String("jim"),
			//Password: proto.String("123456"),
		},
		SendMsg: &protocol.SendMessage{
			Receiver: proto.String("zhang"),
			MsgType:  &n,
			Msg:      proto.String("吃了吗?"),
		},
	}
	buf, err := proto.Marshal(reg)
	if err != nil {
		fmt.Println("failed: %s\n", err)
		return
	}
	fmt.Println("buf: ", len(buf))
	length := len(buf)
	buffer := append(common.IntToBytes(length), buf...)
	conn.Write(buffer)
	//return buffer
	//conn.Write(common.IntToBytes(length))
}
Пример #2
0
func keyToProto(k *Key) *pb.Key {
	if k == nil {
		return nil
	}

	// TODO(jbd): Eliminate unrequired allocations.
	path := []*pb.Key_PathElement(nil)
	for {
		el := &pb.Key_PathElement{
			Kind: proto.String(k.kind),
		}
		if k.id != 0 {
			el.Id = proto.Int64(k.id)
		}
		if k.name != "" {
			el.Name = proto.String(k.name)
		}
		path = append([]*pb.Key_PathElement{el}, path...)
		if k.parent == nil {
			break
		}
		k = k.parent
	}
	key := &pb.Key{
		PathElement: path,
	}
	if k.namespace != "" {
		key.PartitionId = &pb.PartitionId{
			Namespace: proto.String(k.namespace),
		}
	}
	return key
}
Пример #3
0
func listener(c <-chan []byte, dst *string) {
	for {
		msg := <-c
		var conAuxSlice []ConAux
		json.Unmarshal(msg, &conAuxSlice)

		fmt.Println("unmarshalled", conAuxSlice)

		connections := new(protobee.Connections)
		connections.Connection = []*protobee.Connection{}

		for _, value := range conAuxSlice {
			con := new(protobee.Connection)
			con.Transport = proto.String(value.Transport)
			con.LocalAddress = proto.String(value.LocalAddress)
			con.LocalPort = proto.Uint32(value.LocalPort)
			con.RemoteAddress = proto.String(value.RemoteAddress)
			con.RemotePort = proto.Uint32(value.RemotePort)
			con.Pid = proto.Uint32(value.Pid)
			con.Name = proto.String(value.Name)
			connections.Connection = append(connections.Connection, con)
		}
		//connections
		pb, err := proto.Marshal(connections)
		if err != nil {
			fmt.Println("error", err)
		}
		sendDataToDest(pb, dst)
		//time.Sleep(time.Second * 2)
	}
}
Пример #4
0
func TestStringSave(t *testing.T) {

	options := MysqlOptions{
		Addr:         "localhost:3306",
		DB:           "kite",
		Username:     "******",
		Password:     "",
		ShardNum:     4,
		BatchUpSize:  100,
		BatchDelSize: 100,
		FlushPeriod:  10 * time.Millisecond,
		MaxIdleConn:  10,
		MaxOpenConn:  10}

	kiteMysql := NewKiteMysql(options, "localhost")
	truncate(kiteMysql)
	for i := 0; i < 16; i++ {
		//创建消息
		msg := &protocol.StringMessage{}
		msg.Header = &protocol.Header{
			MessageId:    proto.String("26c03f00665862591f696a980b5a6" + fmt.Sprintf("%x", i)),
			Topic:        proto.String("trade"),
			MessageType:  proto.String("pay-succ"),
			ExpiredTime:  proto.Int64(time.Now().Add(10 * time.Minute).Unix()),
			DeliverLimit: proto.Int32(100),
			GroupId:      proto.String("go-kite-test"),
			Commit:       proto.Bool(false),
			Fly:          proto.Bool(false)}

		msg.Body = proto.String("hello world")
		innerT(kiteMysql, msg, msg.GetHeader().GetMessageId(), t)
	}
	kiteMysql.Stop()
}
Пример #5
0
// keyToProto converts a *Key to a Reference proto.
func keyToProto(defaultAppID string, k *Key) *pb.Reference {
	appID := k.appID
	if appID == "" {
		appID = defaultAppID
	}
	n := 0
	for i := k; i != nil; i = i.parent {
		n++
	}
	e := make([]*pb.Path_Element, n)
	for i := k; i != nil; i = i.parent {
		n--
		e[n] = &pb.Path_Element{
			Type: &i.kind,
		}
		// At most one of {Name,Id} should be set.
		// Neither will be set for incomplete keys.
		if i.stringID != "" {
			e[n].Name = &i.stringID
		} else if i.intID != 0 {
			e[n].Id = &i.intID
		}
	}
	var namespace *string
	if k.namespace != "" {
		namespace = proto.String(k.namespace)
	}
	return &pb.Reference{
		App:       proto.String(appID),
		NameSpace: namespace,
		Path: &pb.Path{
			Element: e,
		},
	}
}
Пример #6
0
// Seal encrypts data for the child. This call also zeroes the data parameter.
func (lh *LinuxHost) Seal(child *LinuxHostChild, data []byte, policy string) ([]byte, error) {
	defer ZeroBytes(data)
	lhsb := &LinuxHostSealedBundle{
		Policy: proto.String(policy),
		Data:   data,
	}

	switch policy {
	case SharedSecretPolicyDefault, SharedSecretPolicyConservative:
		// We are using a master key-deriving key shared among all
		// similar LinuxHost instances. For LinuxHost, the default
		// and conservative policies means any process running the same
		// program binary as the caller hosted on a similar
		// LinuxHost.
		lhsb.PolicyInfo = proto.String(child.ChildSubprin.String())
	case SharedSecretPolicyLiberal:
		// The most liberal we can do is allow any hosted process
		// running on a similar LinuxHost instance. So, we don't set
		// any policy info.
	default:
		// Try to parse this statement as a tao/auth policy. If it
		// parses, then use it as the policy statement.
		return nil, newError("policy not supported for Seal: " + policy)
	}

	m, err := proto.Marshal(lhsb)
	if err != nil {
		return nil, err
	}
	defer ZeroBytes(m)

	return lh.Host.Encrypt(m)
}
Пример #7
0
func TestAttribute(t *testing.T) {
	attr := Attribute(&mesos.Attribute{
		Name:   proto.String("rack"),
		Type:   mesos.Value_SCALAR.Enum(),
		Scalar: &mesos.Value_Scalar{Value: proto.Float64(2)},
	})
	if attr != "rack:2.00" {
		t.Errorf(`Attribute(&mesos.Attribute{
        Name: proto.String("rack"),
        Type: mesos.Value_SCALAR.Enum(),
        Scalar: &mesos.Value_Scalar{Value: proto.Float64(2)},
    }) != "rack:2.00"; actual %s`, attr)
	}

	attr = Attribute(&mesos.Attribute{
		Name: proto.String("datacenter"),
		Type: mesos.Value_TEXT.Enum(),
		Text: &mesos.Value_Text{Value: proto.String("DC-1")},
	})
	if attr != "datacenter:DC-1" {
		t.Errorf(`Attribute(&mesos.Attribute{
        Name: proto.String("datacenter"),
        Type: mesos.Value_TEXT.Enum(),
        Text: proto.String("DC-1"),
    }) != "datacenter:DC-1"; actual %s`, attr)
	}
}
func (s *MySuite) TestToVerifyXmlContentForDataTableDrivenExecution(c *C) {
	value := gauge_messages.ProtoItem_TableDrivenScenario
	scenario := gauge_messages.ProtoScenario{Failed: proto.Bool(false), ScenarioHeading: proto.String("Scenario")}
	scenario1 := gauge_messages.ProtoScenario{Failed: proto.Bool(false), ScenarioHeading: proto.String("Scenario")}
	item := &gauge_messages.ProtoItem{TableDrivenScenario: &gauge_messages.ProtoTableDrivenScenario{Scenarios: []*gauge_messages.ProtoScenario{&scenario, &scenario1}}, ItemType: &value}
	spec := &gauge_messages.ProtoSpec{SpecHeading: proto.String("HEADING"), FileName: proto.String("FILENAME"), Items: []*gauge_messages.ProtoItem{item}}
	specResult := &gauge_messages.ProtoSpecResult{ProtoSpec: spec, ScenarioCount: proto.Int(1), Failed: proto.Bool(false)}
	suiteResult := &gauge_messages.ProtoSuiteResult{SpecResults: []*gauge_messages.ProtoSpecResult{specResult}}
	message := &gauge_messages.SuiteExecutionResult{SuiteResult: suiteResult}

	builder := &XmlBuilder{currentId: 0}
	bytes, err := builder.getXmlContent(message)
	var suites JUnitTestSuites
	xml.Unmarshal(bytes, &suites)

	c.Assert(err, Equals, nil)
	c.Assert(len(suites.Suites), Equals, 1)
	c.Assert(suites.Suites[0].Errors, Equals, 0)
	c.Assert(suites.Suites[0].Failures, Equals, 0)
	c.Assert(suites.Suites[0].Package, Equals, "FILENAME")
	c.Assert(suites.Suites[0].Name, Equals, "HEADING")
	c.Assert(suites.Suites[0].Tests, Equals, 2)
	c.Assert(suites.Suites[0].Timestamp, Equals, builder.suites.Suites[0].Timestamp)
	c.Assert(suites.Suites[0].SystemError.Contents, Equals, "")
	c.Assert(suites.Suites[0].SystemOutput.Contents, Equals, "")
	c.Assert(len(suites.Suites[0].TestCases), Equals, 2)
	c.Assert(suites.Suites[0].TestCases[0].Name, Equals, "Scenario 0")
	c.Assert(suites.Suites[0].TestCases[1].Name, Equals, "Scenario 1")
}
func (s *MySuite) TestToVerifyXmlContentForFailingExecutionResult(c *C) {
	value := gauge_messages.ProtoItem_Scenario
	item := &gauge_messages.ProtoItem{Scenario: &gauge_messages.ProtoScenario{Failed: proto.Bool(true), ScenarioHeading: proto.String("Scenario1")}, ItemType: &value}
	spec := &gauge_messages.ProtoSpec{SpecHeading: proto.String("HEADING"), FileName: proto.String("FILENAME"), Items: []*gauge_messages.ProtoItem{item}}
	specResult := &gauge_messages.ProtoSpecResult{ProtoSpec: spec, ScenarioCount: proto.Int(1), Failed: proto.Bool(true), ScenarioFailedCount: proto.Int(1)}
	suiteResult := &gauge_messages.ProtoSuiteResult{SpecResults: []*gauge_messages.ProtoSpecResult{specResult}}
	message := &gauge_messages.SuiteExecutionResult{SuiteResult: suiteResult}

	builder := &XmlBuilder{currentId: 0}
	bytes, err := builder.getXmlContent(message)
	var suites JUnitTestSuites
	xml.Unmarshal(bytes, &suites)

	c.Assert(err, Equals, nil)
	c.Assert(len(suites.Suites), Equals, 1)
	// spec1 || testSuite
	c.Assert(suites.Suites[0].Errors, Equals, 0)
	c.Assert(suites.Suites[0].Failures, Equals, 1)
	c.Assert(suites.Suites[0].Package, Equals, "FILENAME")
	c.Assert(suites.Suites[0].Name, Equals, "HEADING")
	c.Assert(suites.Suites[0].Tests, Equals, 1)
	c.Assert(suites.Suites[0].Timestamp, Equals, builder.suites.Suites[0].Timestamp)
	c.Assert(suites.Suites[0].SystemError.Contents, Equals, "")
	c.Assert(suites.Suites[0].SystemOutput.Contents, Equals, "")
	// scenario1 of spec1 || testCase
	c.Assert(len(suites.Suites[0].TestCases), Equals, 1)
	c.Assert(suites.Suites[0].TestCases[0].Classname, Equals, "HEADING")
	c.Assert(suites.Suites[0].TestCases[0].Name, Equals, "Scenario1")
	c.Assert(suites.Suites[0].TestCases[0].Failure.Message, Equals, "")
	c.Assert(suites.Suites[0].TestCases[0].Failure.Contents, Equals, "")
}
Пример #10
0
// GetFailoverLogs from projector, for a set vbuckets.
// - return http errors for transport related failures.
// - return couchbase SDK error if any.
func (client *Client) GetFailoverLogs(
	pooln, bucketn string,
	vbnos []uint32) (*protobuf.FailoverLogResponse, error) {

	req := &protobuf.FailoverLogRequest{
		Pool:   proto.String(pooln),
		Bucket: proto.String(bucketn),
		Vbnos:  vbnos,
	}
	res := &protobuf.FailoverLogResponse{}
	err := client.withRetry(
		func() error {
			err := client.ap.Request(req, res)
			if err != nil {
				return err
			} else if protoerr := res.GetErr(); protoerr != nil {
				return fmt.Errorf(protoerr.GetError())
			}
			return err // nil
		})
	if err != nil {
		return nil, err
	}
	return res, nil
}
Пример #11
0
func convertToProtoStepValue(stepValue *stepValue) *gauge_messages.ProtoStepValue {
	return &gauge_messages.ProtoStepValue{
		StepValue:              proto.String(stepValue.stepValue),
		ParameterizedStepValue: proto.String(stepValue.parameterizedStepValue),
		Parameters:             stepValue.args,
	}
}
Пример #12
0
// CreateFile opens a new file in HDFS with the given replication, block size,
// and permissions, and returns an io.WriteCloser for writing to it. Because of
// the way that HDFS writes are buffered and acknowledged asynchronously, it is
// very important that Close is called after all data has been written.
func (c *Client) CreateFile(name string, replication int, blockSize int64, perm os.FileMode) (*FileWriter, error) {
	createReq := &hdfs.CreateRequestProto{
		Src:          proto.String(name),
		Masked:       &hdfs.FsPermissionProto{Perm: proto.Uint32(uint32(perm))},
		ClientName:   proto.String(c.namenode.ClientName()),
		CreateFlag:   proto.Uint32(1),
		CreateParent: proto.Bool(false),
		Replication:  proto.Uint32(uint32(replication)),
		BlockSize:    proto.Uint64(uint64(blockSize)),
	}
	createResp := &hdfs.CreateResponseProto{}

	err := c.namenode.Execute("create", createReq, createResp)
	if err != nil {
		if nnErr, ok := err.(*rpc.NamenodeError); ok {
			err = interpretException(nnErr.Exception, err)
		}

		return nil, &os.PathError{"create", name, err}
	}

	return &FileWriter{
		client:      c,
		name:        name,
		replication: replication,
		blockSize:   blockSize,
	}, nil
}
Пример #13
0
func (f *FileWriter) startNewBlock() error {
	// TODO: we don't need to wait for previous blocks to ack before continuing

	if f.blockWriter != nil {
		err := f.blockWriter.Close()
		if err != nil {
			return err
		}
	}
	var previous *hdfs.ExtendedBlockProto
	if f.block != nil {
		previous = f.block.GetB()
	}

	addBlockReq := &hdfs.AddBlockRequestProto{
		Src:        proto.String(f.name),
		ClientName: proto.String(f.client.namenode.ClientName()),
		Previous:   previous,
	}
	addBlockResp := &hdfs.AddBlockResponseProto{}

	err := f.client.namenode.Execute("addBlock", addBlockReq, addBlockResp)
	if err != nil {
		if nnErr, ok := err.(*rpc.NamenodeError); ok {
			err = interpretException(nnErr.Exception, err)
		}

		return &os.PathError{"create", f.name, err}
	}

	f.block = addBlockResp.GetBlock()
	f.blockWriter = rpc.NewBlockWriter(f.block, f.client.namenode, f.blockSize)
	return nil
}
Пример #14
0
// Close closes the file, writing any remaining data out to disk and waiting
// for acknowledgements from the datanodes. It is important that Close is called
// after all data has been written.
func (f *FileWriter) Close() error {
	if f.closed {
		return io.ErrClosedPipe
	}

	var lastBlock *hdfs.ExtendedBlockProto
	if f.blockWriter != nil {
		// Close the blockWriter, flushing any buffered packets.
		err := f.blockWriter.Close()
		if err != nil {
			return err
		}

		lastBlock = f.block.GetB()
	}

	completeReq := &hdfs.CompleteRequestProto{
		Src:        proto.String(f.name),
		ClientName: proto.String(f.client.namenode.ClientName()),
		Last:       lastBlock,
	}
	completeResp := &hdfs.CompleteResponseProto{}

	err := f.client.namenode.Execute("complete", completeReq, completeResp)
	if err != nil {
		return &os.PathError{"create", f.name, err}
	}

	return nil
}
Пример #15
0
// WriteResponse encodes and sends a net/rpc response header r with body x.
func (c *serverCodec) WriteResponse(r *rpc.Response, x interface{}) error {
	// This is similar to WriteRequest(), above.
	var encodeErr error
	var hdr ProtoRPCResponseHeader
	hdr.Op = proto.String(r.ServiceMethod)
	hdr.Seq = proto.Uint64(r.Seq)
	var body proto.Message
	var ok bool
	if r.Error != "" {
		// Error responses have empty body. In this case, x can be an empty struct
		// from net/rpc.Server, and net/rpc.Client will discard the body in any
		// case, so leave body == nil.
		hdr.Error = proto.String(r.Error)
	} else if body, ok = x.(proto.Message); !ok || body == nil {
		// If x isn't a protobuf, or is a nil protobuf, turn reply into an error and
		// leave body == nil.
		encodeErr = ErrBadResponseType
		msg := encodeErr.Error()
		hdr.Error = &msg
	}

	c.sending.Lock()
	_, err := c.m.WriteMessage(&hdr) // writes htonl(length), marshal(hdr)
	if err == nil {
		_, err = c.m.WriteMessage(body) // writes htonl(length), marshal(body)
	}
	c.sending.Unlock()
	if encodeErr != nil {
		err = encodeErr
	}
	return util.Logged(err)
}
Пример #16
0
func (c *connection) writeConnectionHeader() error {
	buf := iohelper.NewPbBuffer()
	service := pb.String(ServiceString[c.serviceType])

	err := buf.WritePBMessage(&proto.ConnectionHeader{
		UserInfo: &proto.UserInformation{
			EffectiveUser: pb.String("pingcap"),
		},
		ServiceName: service,
	})
	if err != nil {
		return errors.Trace(err)
	}

	err = buf.PrependSize()
	if err != nil {
		return errors.Trace(err)
	}

	_, err = c.conn.Write(buf.Bytes())
	if err != nil {
		return errors.Trace(err)
	}

	return nil
}
Пример #17
0
func loginPacket(appkey []byte, username string, authData []byte,
	authType *Spotify.AuthenticationType, deviceId string) []byte {

	packet := &Spotify.ClientResponseEncrypted{
		LoginCredentials: &Spotify.LoginCredentials{
			Username: proto.String(username),
			Typ:      authType,
			AuthData: authData,
		},
		SystemInfo: &Spotify.SystemInfo{
			CpuFamily: Spotify.CpuFamily_CPU_UNKNOWN.Enum(),
			Os:        Spotify.Os_OS_UNKNOWN.Enum(),
			SystemInformationString: proto.String("librespot"),
			DeviceId:                proto.String(deviceId),
		},
		VersionString: proto.String("librespot-8315e10"),
		Appkey: &Spotify.LibspotifyAppKey{
			Version:      proto.Uint32(uint32(appkey[0])),
			Devkey:       appkey[0x1:0x81],
			Signature:    appkey[0x81:0x141],
			Useragent:    proto.String("librespot-8315e10"),
			CallbackHash: make([]byte, 20),
		},
	}

	packetData, err := proto.Marshal(packet)
	if err != nil {
		log.Fatal("login marshaling error: ", err)
	}
	return packetData
}
Пример #18
0
//
// convert IndexDefn to protobuf format
//
func convertIndexDefnToProtoMsg(indexDefn *common.IndexDefn) *protobuf.IndexDefn {

	using := protobuf.StorageType(
		protobuf.StorageType_value[string(indexDefn.Using)]).Enum()
	exprType := protobuf.ExprType(
		protobuf.ExprType_value[string(indexDefn.ExprType)]).Enum()
	partnScheme := protobuf.PartitionScheme(
		protobuf.PartitionScheme_value[string(indexDefn.PartitionScheme)]).Enum()

	//
	// message IndexDefn {
	//  required uint64          defnID          = 1; // unique index id across the secondary index cluster
	//  required string          bucket          = 2; // bucket on which index is defined
	//  required bool            isPrimary       = 3; // whether index secondary-key == docid
	//  required string          name            = 4; // Name of the index
	//  required StorageType     using           = 5; // indexing algorithm
	//  required PartitionScheme partitionScheme = 6;
	//  required string          partnExpression = 7; // use expressions to evaluate doc
	//  required ExprType        exprType        = 8; // how to interpret `expressions` strings
	//  repeated string          secExpressions  = 9; // use expressions to evaluate doc
	//
	defn := &protobuf.IndexDefn{
		DefnID:          proto.Uint64(uint64(indexDefn.DefnId)),
		Bucket:          proto.String(indexDefn.Bucket),
		IsPrimary:       proto.Bool(indexDefn.IsPrimary),
		Name:            proto.String(indexDefn.Name),
		Using:           using,
		ExprType:        exprType,
		SecExpressions:  indexDefn.SecExprs,
		PartitionScheme: partnScheme,
		PartnExpression: proto.String(indexDefn.PartitionKey),
	}

	return defn
}
Пример #19
0
func (g *generator) Generate(targets []*descriptor.File) ([]*plugin.CodeGeneratorResponse_File, error) {
	var files []*plugin.CodeGeneratorResponse_File
	for _, file := range targets {
		glog.V(1).Infof("Processing %s", file.GetName())
		code, err := g.generate(file)
		if err == errNoTargetService {
			glog.V(1).Infof("%s: %v", file.GetName(), err)
			continue
		}
		if err != nil {
			return nil, err
		}
		formatted, err := format.Source([]byte(code))
		if err != nil {
			glog.Errorf("%v: %s", err, code)
			return nil, err
		}
		name := file.GetName()
		ext := filepath.Ext(name)
		base := strings.TrimSuffix(name, ext)
		output := fmt.Sprintf("%s.pb.gw.go", base)
		files = append(files, &plugin.CodeGeneratorResponse_File{
			Name:    proto.String(output),
			Content: proto.String(string(formatted)),
		})
		glog.V(1).Infof("Will emit %s", output)
	}
	return files, nil
}
Пример #20
0
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
	taskName := fmt.Sprintf("syslog-%s", offer.GetSlaveId().GetValue())
	taskId := &mesos.TaskID{
		Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
	}

	data, err := json.Marshal(Config)
	if err != nil {
		panic(err) //shouldn't happen
	}
	Logger.Debugf("Task data: %s", string(data))

	tcpPort := uint64(s.getPort(Config.TcpPort, offer, -1))
	udpPort := uint64(s.getPort(Config.UdpPort, offer, int(tcpPort)))

	task := &mesos.TaskInfo{
		Name:     proto.String(taskName),
		TaskId:   taskId,
		SlaveId:  offer.GetSlaveId(),
		Executor: s.createExecutor(offer, tcpPort, udpPort),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", Config.Cpus),
			util.NewScalarResource("mem", Config.Mem),
			util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(tcpPort, tcpPort)}),
			util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(udpPort, udpPort)}),
		},
		Data:   data,
		Labels: utils.StringToLabels(s.labels),
	}

	s.cluster.Add(offer.GetSlaveId().GetValue(), task)

	driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
Пример #21
0
func main() {
	d := &datapb.SampleData{
		Label: proto.String("hello"),
		Type:  proto.Int32(17),
		Optionalgroup: &datapb.SampleData_OptionalGroup{
			RequiredField: proto.String("good bye"),
		},
	}
	fmt.Println("d.GetLabel():", d.GetLabel())
	fmt.Println()
	// d.GetLabel(): hello

	data, err := proto.Marshal(d)
	if err != nil {
		log.Fatal("marshaling error: ", err)
	}
	fmt.Printf("data: %+v\n", data)
	fmt.Println()
	// data: [10 5 104 101 108 108 111 ...

	newD := &datapb.SampleData{}
	if err := proto.Unmarshal(data, newD); err != nil {
		log.Fatal("unmarshaling error: ", err)
	}
	fmt.Println("newD.GetLabel():", newD.GetLabel())
	fmt.Println()
	// newD.GetLabel(): hello

	fmt.Printf("newD: %+v\n", newD)
	// newD: label:"hello" type:17 OptionalGroup{RequiredField:"good bye" }
}
Пример #22
0
func (s *Scheduler) createExecutor(offer *mesos.Offer, tcpPort uint64, udpPort uint64) *mesos.ExecutorInfo {
	name := fmt.Sprintf("syslog-%s", offer.GetSlaveId().GetValue())
	id := fmt.Sprintf("%s-%s", name, uuid())

	uris := []*mesos.CommandInfo_URI{
		&mesos.CommandInfo_URI{
			Value:      proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, Config.Executor)),
			Executable: proto.Bool(true),
		},
	}

	if Config.ProducerProperties != "" {
		uris = append(uris, &mesos.CommandInfo_URI{
			Value: proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, Config.ProducerProperties)),
		})
	}

	command := fmt.Sprintf("./%s --log.level %s --tcp %d --udp %d --host %s", Config.Executor, Config.LogLevel, tcpPort, udpPort, offer.GetHostname())

	return &mesos.ExecutorInfo{
		ExecutorId: util.NewExecutorID(id),
		Name:       proto.String(name),
		Command: &mesos.CommandInfo{
			Value: proto.String(command),
			Uris:  uris,
		},
	}
}
Пример #23
0
func newRequestHeader(methodName string) *hadoop.RequestHeaderProto {
	return &hadoop.RequestHeaderProto{
		MethodName:                 proto.String(methodName),
		DeclaringClassProtocolName: proto.String(protocolClass),
		ClientProtocolVersion:      proto.Uint64(uint64(protocolClassVersion)),
	}
}
Пример #24
0
// New returns a new EventLogger. If cfg.Interval is not set,
// it will be assigned time.Second as the default. cfg.Client
// will likewise be asinged http.DefaultClient by default.
func New(ctx context.Context, cfg Config) EventLogger {
	if cfg.Interval == time.Duration(0) {
		cfg.Interval = time.Second
	}
	if cfg.Client == nil {
		cfg.Client = http.DefaultClient
	}

	ret := &evtLogger{
		cfg:   cfg,
		evtCh: make(chan *crit_event.LogRequestLite_LogEventLite, maxEvts),
		src: &crit_event.InfraEventSource{
			HostName:      proto.String(cfg.Hostname),
			AppengineName: proto.String(cfg.AppengineName),
			ServiceName:   proto.String(cfg.Service),
		},
	}

	go func() {
		if err := ret.start(ctx); err != nil {
			logging.Errorf(ctx, "Error: %v", err)
		}
	}()

	return ret
}
Пример #25
0
func main() {
	regMessage := &example.RegMessage{
		Id:       proto.Int32(10001),
		Username: proto.String("vicky"),
		Password: proto.String("123456"),
		Email:    proto.String("*****@*****.**"),
	}

	fmt.Println(regMessage)

	buffer, err := proto.Marshal(regMessage)
	if err != nil {
		fmt.Printf("failed: %s\n", err)
		return
	}

	pTCPAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:10000")
	if err != nil {
		fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
		return
	}
	pTCPConn, err := net.DialTCP("tcp", nil, pTCPAddr)
	if err != nil {
		fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
		return
	}
	pTCPConn.Write(buffer)
}
Пример #26
0
// Encodes the SnapshotRecoveryRequest to a buffer. Returns the number of bytes
// written and any error that may have occurred.
func (req *SnapshotRecoveryRequest) Encode(w io.Writer) (int, error) {

	protoPeers := make([]*protobuf.SnapshotRecoveryRequest_Peer, len(req.Peers))

	for i, peer := range req.Peers {
		protoPeers[i] = &protobuf.SnapshotRecoveryRequest_Peer{
			Name:             proto.String(peer.Name),
			ConnectionString: proto.String(peer.ConnectionString),
		}
	}

	pb := &protobuf.SnapshotRecoveryRequest{
		LeaderName: proto.String(req.LeaderName),
		LastIndex:  proto.Uint64(req.LastIndex),
		LastTerm:   proto.Uint64(req.LastTerm),
		Peers:      protoPeers,
		State:      req.State,
	}
	p, err := proto.Marshal(pb)
	if err != nil {
		return -1, err
	}

	return w.Write(p)
}
Пример #27
0
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
	taskName := fmt.Sprintf("syscol-%s", offer.GetSlaveId().GetValue())
	taskId := &mesos.TaskID{
		Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
	}

	data, err := json.Marshal(Config)
	if err != nil {
		panic(err) //shouldn't happen
	}
	Logger.Debugf("Task data: %s", string(data))

	task := &mesos.TaskInfo{
		Name:     proto.String(taskName),
		TaskId:   taskId,
		SlaveId:  offer.GetSlaveId(),
		Executor: s.createExecutor(offer.GetSlaveId().GetValue()),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", Config.Cpus),
			util.NewScalarResource("mem", Config.Mem),
		},
		Data: data,
	}

	s.cluster.Add(offer.GetSlaveId().GetValue(), task)

	driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
Пример #28
0
func (g *generator) Generate(targets []*descriptor.File) ([]*plugin.CodeGeneratorResponse_File, error) {
	var files []*plugin.CodeGeneratorResponse_File
	for _, file := range targets {
		glog.V(1).Infof("Processing %s", file.GetName())
		code, err := applyTemplate(param{File: file, reg: g.reg})
		if err == errNoTargetService {
			glog.V(1).Infof("%s: %v", file.GetName(), err)
			continue
		}
		if err != nil {
			return nil, err
		}

		var formatted bytes.Buffer
		json.Indent(&formatted, []byte(code), "", "  ")

		name := file.GetName()
		ext := filepath.Ext(name)
		base := strings.TrimSuffix(name, ext)
		output := fmt.Sprintf("%s.swagger.json", base)
		files = append(files, &plugin.CodeGeneratorResponse_File{
			Name:    proto.String(output),
			Content: proto.String(formatted.String()),
		})
		glog.V(1).Infof("Will emit %s", output)
	}
	return files, nil
}
Пример #29
0
func main() {
	// 创建一个消息 Test
	test := &example.Test{
		// 使用辅助函数设置域的值
		Label: proto.String("hello"),
		Type:  proto.Int32(17),
		Optionalgroup: &example.Test_OptionalGroup{
			RequiredField: proto.String("good bye"),
		},
	}

	// 进行编码
	data, err := proto.Marshal(test)
	if err != nil {
		log.Fatal("marshaling error: ", err)
	}

	// 进行解码
	newTest := &example.Test{}
	err = proto.Unmarshal(data, newTest)

	if err != nil {
		log.Fatal("unmarshaling error: ", err)
	}

	// 测试结果
	if test.GetLabel() != newTest.GetLabel() {
		log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
	}
}
Пример #30
0
func NewStartRequest(path string, dir string, args []string, allocated resource.ComputeResource, envs []string, host string, port int32) *cmd.ControlMessage {
	request := &cmd.ControlMessage{
		Type: cmd.ControlMessage_StartRequest.Enum(),
		StartRequest: &cmd.StartRequest{
			Path: proto.String(path),
			Args: args,
			Dir:  proto.String(dir),
			Resource: &cmd.ComputeResource{
				CpuCount: proto.Int32(int32(allocated.CPUCount)),
				CpuLevel: proto.Int32(int32(allocated.CPULevel)),
				Memory:   proto.Int32(int32(allocated.MemoryMB)),
			},
			Envs:     envs,
			Host:     proto.String(host),
			Port:     proto.Int32(port),
			HashCode: proto.Uint32(0),
		},
	}

	// generate a unique hash code for the request
	data, err := proto.Marshal(request)
	if err != nil {
		log.Fatalf("marshaling start request error: %v", err)
		return nil
	}
	request.StartRequest.HashCode = proto.Uint32(uint32(util.Hash(data)))

	return request
}