func writeValueMessage(port int) { conn, err := net.Dial("udp", fmt.Sprintf("localhost:%d", port)) Expect(err).ToNot(HaveOccurred()) message := &events.Envelope{ EventType: events.Envelope_ValueMetric.Enum(), Origin: proto.String("someorigin"), ValueMetric: &events.ValueMetric{ Name: proto.String("some name"), Value: proto.Float64(24.0), Unit: proto.String("some unit"), }, } messageBytes, err := proto.Marshal(message) Expect(err).ToNot(HaveOccurred()) // Pad the first 32 bytes of the payload with zeroes // In reality this would be the signature padding := make([]byte, 32) payload := append(padding, messageBytes...) conn.Write(payload) }
func NewFrameworkInfo(user, name string, frameworkId *mesos.FrameworkID) *mesos.FrameworkInfo { return &mesos.FrameworkInfo{ User: proto.String(user), Name: proto.String(name), Id: frameworkId, } }
func main() { req := command.Read() files := req.GetProtoFile() files = vanity.FilterFiles(files, vanity.NotInPackageGoogleProtobuf) vanity.ForEachFile(files, vanity.TurnOnPopulateAll) vanity.ForEachFile(files, vanity.TurnOnGoStringAll) vanity.ForEachFile(files, vanity.TurnOnDescriptionAll) resp := command.Generate(req) msgs := []string{} for _, file := range files { if file.GetPackage() == "fuzztests" { for _, message := range file.GetMessageType() { msgs = append(msgs, "NewPopulated"+message.GetName()) } } } content := ` // Code generated by protoc-gen-gogopop. // DO NOT EDIT! package fuzztests var popFuncs = []interface{}{ ` + strings.Join(msgs, ",\n") + `, }` newFile := &plugin.CodeGeneratorResponse_File{ Name: proto.String("./pop.gen.go"), Content: proto.String(content), } resp.File = append(resp.File, newFile) command.Write(resp) }
func createTask(job *Job, offer *mesos.Offer) mesos.TaskInfo { taskId := &mesos.TaskID{ Value: proto.String(fmt.Sprintf("moroccron-task-%d-%s", time.Now().Unix(), job.Id)), } command_info := job.CreateCommandInfo() task := mesos.TaskInfo{ Name: proto.String(taskId.GetValue()), TaskId: taskId, SlaveId: offer.SlaveId, Container: &mesos.ContainerInfo{ Type: mesos.ContainerInfo_DOCKER.Enum(), Volumes: nil, Hostname: nil, Docker: &mesos.ContainerInfo_DockerInfo{ Image: &DOCKER_IMAGE_DEFAULT, Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), }, }, Command: &command_info, Executor: nil, Resources: []*mesos.Resource{ util.NewScalarResource("cpus", job.CpuResources), util.NewScalarResource("mem", job.MemResources), }, //Data: job_json, } return task }
// used when scheduler creates a new TaskInfo for this job. //TODO should task creation be in this package? func (j *Job) CreateCommandInfo() mesos.CommandInfo { // FYI we ignore the CommandInfo.Container field. Image information is provided in the TaskInfo.Container instead // this will probably change in the future ci := mesos.CommandInfo{ Shell: proto.Bool(j.Shell), } if j.Shell { // value is executed by sh -c 'value' ci.Value = proto.String(*j.Command) } else { // value is the executable, arguments are vararg passed to it if j.Command != nil { ci.Value = proto.String(*j.Command) } ci.Arguments = j.Arguments } // set any environment variables that were passed in env_vars := make([]*mesos.Environment_Variable, len(j.Environment)) i := 0 for k, v := range j.Environment { env_vars[i] = &mesos.Environment_Variable{ Name: &k, Value: &v, } i++ } ci.Environment = &mesos.Environment{Variables: env_vars} return ci }
// Encodes the SnapshotRecoveryRequest to a buffer. Returns the number of bytes // written and any error that may have occurred. func (req *SnapshotRecoveryRequest) Encode(w io.Writer) (int, error) { protoPeers := make([]*protobuf.SnapshotRecoveryRequest_Peer, len(req.Peers)) for i, peer := range req.Peers { protoPeers[i] = &protobuf.SnapshotRecoveryRequest_Peer{ Name: proto.String(peer.Name), ConnectionString: proto.String(peer.ConnectionString), } } pb := &protobuf.SnapshotRecoveryRequest{ LeaderName: proto.String(req.LeaderName), LastIndex: proto.Uint64(req.LastIndex), LastTerm: proto.Uint64(req.LastTerm), Peers: protoPeers, State: req.State, } p, err := proto.Marshal(pb) if err != nil { return -1, err } return w.Write(p) }
// PrintAsText outputs all metrics in text format. func (r *Registry) PrintAsText(w io.Writer) error { var metricFamily prometheusgo.MetricFamily var ret error labels := r.getLabels() for _, metric := range r.tracked { metric.Inspect(func(v interface{}) { if ret != nil { return } if prom, ok := v.(PrometheusExportable); ok { metricFamily.Reset() metricFamily.Name = proto.String(exportedName(metric.GetName())) metricFamily.Help = proto.String(metric.GetHelp()) prom.FillPrometheusMetric(&metricFamily) if len(labels) != 0 { // Set labels from registry. We only set one metric in the slice, but loop anyway. for _, m := range metricFamily.Metric { m.Label = labels } } if l := prom.GetLabels(); len(l) != 0 { // Append per-metric labels. for _, m := range metricFamily.Metric { m.Label = append(m.Label, l...) } } if _, err := expfmt.MetricFamilyToText(w, &metricFamily); err != nil { ret = err } } }) } return ret }
// marshal serializes to a protobuf representation. func (ni NodeInfo) marshal() *internal.NodeInfo { pb := &internal.NodeInfo{} pb.ID = proto.Uint64(ni.ID) pb.Host = proto.String(ni.Host) pb.TCPHost = proto.String(ni.TCPHost) return pb }
func createStartStopMessage(requestId uint64, peerType events.PeerType) *events.Envelope { return &events.Envelope{ Origin: proto.String("fake-origin-2"), EventType: events.Envelope_HttpStartStop.Enum(), HttpStartStop: &events.HttpStartStop{ StartTimestamp: proto.Int64(1), StopTimestamp: proto.Int64(100), RequestId: &events.UUID{ Low: proto.Uint64(requestId), High: proto.Uint64(requestId + 1), }, PeerType: &peerType, Method: events.Method_GET.Enum(), Uri: proto.String("fake-uri-1"), RemoteAddress: proto.String("fake-remote-addr-1"), UserAgent: proto.String("fake-user-agent-1"), StatusCode: proto.Int32(103), ContentLength: proto.Int64(104), ParentRequestId: &events.UUID{ Low: proto.Uint64(2), High: proto.Uint64(3), }, ApplicationId: &events.UUID{ Low: proto.Uint64(105), High: proto.Uint64(106), }, InstanceIndex: proto.Int32(6), InstanceId: proto.String("fake-instance-id-1"), }, } }
func NewHttpStart(req *http.Request, peerType events.PeerType, requestId *uuid.UUID) *events.HttpStart { httpStart := &events.HttpStart{ Timestamp: proto.Int64(time.Now().UnixNano()), RequestId: NewUUID(requestId), PeerType: &peerType, Method: events.Method(events.Method_value[req.Method]).Enum(), Uri: proto.String(fmt.Sprintf("%s%s", req.Host, req.URL.Path)), RemoteAddress: proto.String(req.RemoteAddr), UserAgent: proto.String(req.UserAgent()), } if applicationId, err := uuid.ParseHex(req.Header.Get("X-CF-ApplicationID")); err == nil { httpStart.ApplicationId = NewUUID(applicationId) } if instanceIndex, err := strconv.Atoi(req.Header.Get("X-CF-InstanceIndex")); err == nil { httpStart.InstanceIndex = proto.Int(instanceIndex) } if instanceId := req.Header.Get("X-CF-InstanceID"); instanceId != "" { httpStart.InstanceId = &instanceId } return httpStart }
func TranslateDropsondeToLegacyLogMessage(message []byte) ([]byte, error) { var receivedEnvelope events.Envelope err := proto.Unmarshal(message, &receivedEnvelope) if err != nil { return nil, fmt.Errorf("TranslateDropsondeToLegacyLogMessage: Unable to unmarshal bytes as Envelope: %v", err) } if receivedEnvelope.GetEventType() != events.Envelope_LogMessage { return nil, fmt.Errorf("TranslateDropsondeToLegacyLogMessage: Envelope contained %s instead of LogMessage", receivedEnvelope.GetEventType().String()) } logMessage := receivedEnvelope.GetLogMessage() if logMessage == nil { return nil, fmt.Errorf("TranslateDropsondeToLegacyLogMessage: Envelope's LogMessage was nil: %v", receivedEnvelope) } messageBytes, err := proto.Marshal( &logmessage.LogMessage{ Message: logMessage.GetMessage(), MessageType: (*logmessage.LogMessage_MessageType)(logMessage.MessageType), Timestamp: proto.Int64(logMessage.GetTimestamp()), AppId: proto.String(logMessage.GetAppId()), SourceId: proto.String(logMessage.GetSourceInstance()), SourceName: proto.String(logMessage.GetSourceType()), }, ) if err != nil { return nil, fmt.Errorf("TranslateDropsondeToLegacyLogMessage: Failed marshalling converted dropsonde message: %v", err) } return messageBytes, nil }
func prepareExecutorInfo(id string) *mesos.ExecutorInfo { executorUris := []*mesos.CommandInfo_URI{} executorUris = append(executorUris, &mesos.CommandInfo_URI{Value: execUri, Executable: proto.Bool(true)}) // forward the value of the scheduler's -v flag to the executor v := 0 if f := flag.Lookup("v"); f != nil && f.Value != nil { if vstr := f.Value.String(); vstr != "" { if vi, err := strconv.ParseInt(vstr, 10, 32); err == nil { v = int(vi) } } } executorCommand := fmt.Sprintf("./%s -logtostderr=true -v=%d", execCmd, v) go http.ListenAndServe(fmt.Sprintf("%s:%d", *address, *artifactPort), nil) log.V(2).Info("Serving executor artifacts...") // Create mesos scheduler driver. return &mesos.ExecutorInfo{ ExecutorId: util.NewExecutorID(id), Name: proto.String("Test Executor (Go)"), Source: proto.String("go_test"), Command: &mesos.CommandInfo{ Value: proto.String(executorCommand), Uris: executorUris, }, } }
// Super-useful utility func that attempts to build a mesos.MasterInfo from a // upid.UPID specification. An attempt is made to determine the IP address of // the UPID's Host and any errors during such resolution will result in a nil // returned result. A nil result is also returned upon errors parsing the Port // specification of the UPID. // // TODO(jdef) make this a func of upid.UPID so that callers can invoke somePid.MasterInfo()? // func CreateMasterInfo(pid *upid.UPID) *mesos.MasterInfo { if pid == nil { return nil } port, err := strconv.Atoi(pid.Port) if err != nil { log.Errorf("failed to parse port: %v", err) return nil } //TODO(jdef) what about (future) ipv6 support? var ipv4 net.IP if addrs, err := net.LookupIP(pid.Host); err == nil { for _, ip := range addrs { if ip = ip.To4(); ip != nil { ipv4 = ip break } } if ipv4 == nil { log.Errorf("host does not resolve to an IPv4 address: %v", pid.Host) return nil } } else { log.Errorf("failed to lookup IPs for host '%v': %v", pid.Host, err) return nil } packedip := binary.BigEndian.Uint32(ipv4) // network byte order is big-endian mi := util.NewMasterInfo(pid.ID, packedip, uint32(port)) mi.Pid = proto.String(pid.String()) if pid.Host != "" { mi.Hostname = proto.String(pid.Host) } return mi }
func prepareExecutorInfo(gt net.Addr) *mesos.ExecutorInfo { executorUris := []*mesos.CommandInfo_URI{} uri := serveSelf() executorUris = append(executorUris, &mesos.CommandInfo_URI{Value: uri, Executable: proto.Bool(true)}) // forward the value of the scheduler's -v flag to the executor v := 0 if f := flag.Lookup("v"); f != nil && f.Value != nil { if vstr := f.Value.String(); vstr != "" { if vi, err := strconv.ParseInt(vstr, 10, 32); err == nil { v = int(vi) } } } nodeCommand := fmt.Sprintf("./executor -logtostderr=true -v=%d -node -tracerAddr %s", v, gt.String()) log.V(2).Info("nodeCommand: ", nodeCommand) // Create mesos scheduler driver. return &mesos.ExecutorInfo{ ExecutorId: util.NewExecutorID("default"), Name: proto.String("visghs-node"), Source: proto.String("visghs"), Command: &mesos.CommandInfo{ Value: proto.String(nodeCommand), Uris: executorUris, }, Resources: []*mesos.Resource{ util.NewScalarResource("cpus", CPUS_PER_EXECUTOR), util.NewScalarResource("mem", MEM_PER_EXECUTOR), }, } }
func (self *manager) Start() error { if self.verifyConnectionWithMesos() == false { return errors.New("Mesos unreachable") } self.frameworkInfo.User = proto.String("root") // set default hostname if self.frameworkInfo.GetHostname() == "" { host, err := os.Hostname() if err != nil || host == "" { host = "unknown" } self.frameworkInfo.Hostname = proto.String(host) } if m, err := upid.Parse("master@" + self.master); err != nil { return err } else { self.masterUPID = m } self.selfUPID = &upid.UPID{ ID: "scheduler", Host: self.GetListenerIP(), Port: fmt.Sprintf("%d", self.GetListenerPortForScheduler())} communication.InitRestHandler(self) redis.InitRedisUpdater(self) self.announceFramework() return nil }
func (d *LauncherData) terminate(row *RunQueueEntry, action string) { if d.killedRecently[row.Id] { return } if action == KILL_ACTION_NO_ACTION { d.call(&badoo_phproxyd.RequestTerminate{Hash: proto.Uint64(row.Id)}) } else { params := []string{ `\ScriptFramework\Script_Kill`, fmt.Sprintf("--force-sf-db=%s", db.GetDbName()), fmt.Sprintf("--kill-run-id=%d", row.Id), fmt.Sprintf("--kill-action=%s", action), fmt.Sprintf("--kill-class-name=%s", row.ClassName), fmt.Sprintf("--kill-timetable-id=%d", row.timetable_id.Int64), } d.call(&badoo_phproxyd.RequestRun{ Script: proto.String(getScriptPath(row.settings)), Hash: proto.Uint64(0), Tag: proto.String(PHPROXY_TAG), Force: proto.Int32(1), Params: params, Store: badoo_phproxyd.StoreT_MEMORY.Enum(), FreeAfterRun: proto.Bool(true), }) } d.killedRecently[row.Id] = true }
// encodeFileSnapshot encodes a snapshot file into a protobuf object. func encodeFileSnapshot(f *fileSnapshot) *internal.FileSnapshot { return &internal.FileSnapshot{ Name: proto.String(f.name), Hash: proto.String(f.hash), Content: proto.String(f.content), } }
// encodeTargetSnapshot encodes a snapshot target into a protobuf object. func encodeTargetSnapshot(t *targetSnapshot) *internal.TargetSnapshot { return &internal.TargetSnapshot{ Name: proto.String(t.name), Hash: proto.String(t.hash), Inputs: encodeFileSnapshots(t.inputs), } }
func sendError(socket *zmq.Socket, req *Request, err error) { // Response envelope resp := &Response{ Error: &Response_Error{}, } if req != nil { resp.UUID = req.UUID } // If error is a zrpc error if zrpcErr, ok := err.(zrpcError); ok { resp.StatusCode = proto.Uint32(uint32(zrpcErr.GetStatusCode())) resp.Error.Message = proto.String(zrpcErr.GetMessage()) } else { // Default to internal error resp.StatusCode = proto.Uint32(uint32(http.StatusInternalServerError)) resp.Error.Message = proto.String(err.Error()) } // Encode the response buf, protoErr := proto.Marshal(resp) if protoErr != nil { glog.Error(protoErr) return } // Send the response if _, err := socket.SendBytes(buf, 0); err != nil { glog.Error(err) } }
func NewHttpStartStop(req *http.Request, statusCode int, contentLength int64, peerType events.PeerType, requestId *uuid.UUID) *events.HttpStartStop { now := proto.Int64(time.Now().UnixNano()) httpStartStop := &events.HttpStartStop{ StartTimestamp: now, StopTimestamp: now, RequestId: NewUUID(requestId), PeerType: &peerType, Method: events.Method(events.Method_value[req.Method]).Enum(), Uri: proto.String(fmt.Sprintf("%s://%s%s", scheme(req), req.Host, req.URL.Path)), RemoteAddress: proto.String(req.RemoteAddr), UserAgent: proto.String(req.UserAgent()), StatusCode: proto.Int(statusCode), ContentLength: proto.Int64(contentLength), } if applicationId, err := uuid.ParseHex(req.Header.Get("X-CF-ApplicationID")); err == nil { httpStartStop.ApplicationId = NewUUID(applicationId) } if instanceIndex, err := strconv.Atoi(req.Header.Get("X-CF-InstanceIndex")); err == nil { httpStartStop.InstanceIndex = proto.Int(instanceIndex) } if instanceId := req.Header.Get("X-CF-InstanceID"); instanceId != "" { httpStartStop.InstanceId = proto.String(instanceId) } allForwards := req.Header[http.CanonicalHeaderKey("X-Forwarded-For")] for _, forwarded := range allForwards { httpStartStop.Forwarded = append(httpStartStop.Forwarded, parseXForwarded(forwarded)...) } return httpStartStop }
func prepareExecutorInfo() *mesos.ExecutorInfo { executorUris := []*mesos.CommandInfo_URI{} uri, executorCmd := serveExecutorArtifact(*executorPath) executorUris = append(executorUris, &mesos.CommandInfo_URI{Value: uri, Executable: proto.Bool(true)}) // forward the value of the scheduler's -v flag to the executor v := 0 if f := flag.Lookup("v"); f != nil && f.Value != nil { if vstr := f.Value.String(); vstr != "" { if vi, err := strconv.ParseInt(vstr, 10, 32); err == nil { v = int(vi) } } } executorCommand := fmt.Sprintf("./%s -logtostderr=true -v=%d -slow_tasks=%v", executorCmd, v, *slowTasks) go http.ListenAndServe(fmt.Sprintf("%s:%d", *address, *artifactPort), nil) log.V(2).Info("Serving executor artifacts...") // Create mesos scheduler driver. return &mesos.ExecutorInfo{ ExecutorId: util.NewExecutorID("default"), Name: proto.String("Test Executor (Go)"), Source: proto.String("go_test"), Command: &mesos.CommandInfo{ Value: proto.String(executorCommand), Uris: executorUris, }, Resources: []*mesos.Resource{ util.NewScalarResource("cpus", CPUS_PER_EXECUTOR), util.NewScalarResource("mem", MEM_PER_EXECUTOR), }, } }
func createTaskInfo(task eremetic.Task, offer *mesosproto.Offer) (eremetic.Task, *mesosproto.TaskInfo) { task.FrameworkID = *offer.FrameworkId.Value task.SlaveID = *offer.SlaveId.Value task.Hostname = *offer.Hostname task.AgentIP = offer.GetUrl().GetAddress().GetIp() task.AgentPort = offer.GetUrl().GetAddress().GetPort() portMapping, portResources := buildPorts(task, offer) env := buildEnvironment(task, portMapping) taskInfo := &mesosproto.TaskInfo{ TaskId: &mesosproto.TaskID{Value: proto.String(task.ID)}, SlaveId: offer.SlaveId, Name: proto.String(task.Name), Command: buildCommandInfo(task, env), Container: &mesosproto.ContainerInfo{ Type: mesosproto.ContainerInfo_DOCKER.Enum(), Docker: &mesosproto.ContainerInfo_DockerInfo{ Image: proto.String(task.Image), ForcePullImage: proto.Bool(task.ForcePullImage), PortMappings: portMapping, Network: mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum(), }, Volumes: buildVolumes(task), }, Resources: []*mesosproto.Resource{ mesosutil.NewScalarResource("cpus", task.TaskCPUs), mesosutil.NewScalarResource("mem", task.TaskMem), mesosutil.NewRangesResource("ports", portResources), }, } return task, taskInfo }
// UpdateRetentionPolicy updates an existing retention policy. func (s *Store) UpdateRetentionPolicy(database, name string, rpu *RetentionPolicyUpdate) error { var newName *string if rpu.Name != nil { newName = rpu.Name } var duration *int64 if rpu.Duration != nil { value := int64(*rpu.Duration) duration = &value } var replicaN *uint32 if rpu.ReplicaN != nil { value := uint32(*rpu.ReplicaN) replicaN = &value } return s.exec(internal.Command_UpdateRetentionPolicyCommand, internal.E_UpdateRetentionPolicyCommand_Command, &internal.UpdateRetentionPolicyCommand{ Database: proto.String(database), Name: proto.String(name), NewName: newName, Duration: duration, ReplicaN: replicaN, }, ) }
func basicValueMetric(name string, value float64, unit string) *events.ValueMetric { return &events.ValueMetric{ Name: proto.String(name), Value: proto.Float64(value), Unit: proto.String(unit), } }
// AddLabel adds a label/value pair for this metric. func (m *Metadata) AddLabel(name, value string) { m.labels = append(m.labels, &prometheusgo.LabelPair{ Name: proto.String(exportedLabel(name)), Value: proto.String(value), }) }
func (c *Client) DropContinuousQuery(database, name string) error { return c.retryUntilExec(internal.Command_DropContinuousQueryCommand, internal.E_DropContinuousQueryCommand_Command, &internal.DropContinuousQueryCommand{ Database: proto.String(database), Name: proto.String(name), }, ) }
// DropContinuousQuery removes a continuous query from the store. func (s *Store) DropContinuousQuery(database, name string) error { return s.exec(internal.Command_DropContinuousQueryCommand, internal.E_DropContinuousQueryCommand_Command, &internal.DropContinuousQueryCommand{ Database: proto.String(database), Name: proto.String(name), }, ) }
// DropRetentionPolicy removes a policy from a database by name. func (s *Store) DropRetentionPolicy(database, name string) error { return s.exec(internal.Command_DropRetentionPolicyCommand, internal.E_DropRetentionPolicyCommand_Command, &internal.DropRetentionPolicyCommand{ Database: proto.String(database), Name: proto.String(name), }, ) }
func addDefaultTags(envelope *events.Envelope) *events.Envelope { envelope.Deployment = proto.String("deployment-name") envelope.Job = proto.String("test-component") envelope.Index = proto.String("42") envelope.Ip = proto.String(localIPAddress) return envelope }
func NewError(source string, code int32, message string) *events.Error { err := &events.Error{ Source: proto.String(source), Code: proto.Int32(code), Message: proto.String(message), } return err }