//NewBoolTerminal is a parser utility function that returns a Terminal of type bool given a bool. func NewBoolTerminal(v interface{}) *Terminal { b := v.(bool) if b { return &Terminal{BoolValue: proto.Bool(b), Literal: "true"} } return &Terminal{BoolValue: proto.Bool(b), Literal: "false"} }
func (r *rpc) handlePromoteRaftRequest(req *internal.PromoteRaftRequest) (*internal.PromoteRaftResponse, error) { r.traceCluster("promote raft request from: %v", *req.Addr) // Need to set the local store peers to match what we are about to join if err := r.store.SetPeers(req.RaftNodes); err != nil { return nil, err } if err := r.store.enableLocalRaft(); err != nil { return nil, err } if !contains(req.RaftNodes, *req.Addr) { req.RaftNodes = append(req.RaftNodes, *req.Addr) } if err := r.store.SetPeers(req.RaftNodes); err != nil { return nil, err } return &internal.PromoteRaftResponse{ Header: &internal.ResponseHeader{ OK: proto.Bool(true), }, Success: proto.Bool(true), }, nil }
// handleJoinRequest handles a request to join the cluster func (r *rpc) handleJoinRequest(req *internal.JoinRequest) (*internal.JoinResponse, error) { r.traceCluster("join request from: %v", *req.Addr) node, err := func() (*NodeInfo, error) { // attempt to create the node node, err := r.store.CreateNode(*req.Addr) // if it exists, return the existing node if err == ErrNodeExists { node, err = r.store.NodeByHost(*req.Addr) if err != nil { return node, err } r.logger.Printf("existing node re-joined: id=%v addr=%v", node.ID, node.Host) } else if err != nil { return nil, fmt.Errorf("create node: %v", err) } peers, err := r.store.Peers() if err != nil { return nil, fmt.Errorf("list peers: %v", err) } // If we have less than 3 nodes, add them as raft peers if they are not // already a peer if len(peers) < MaxRaftNodes && !raft.PeerContained(peers, *req.Addr) { r.logger.Printf("adding new raft peer: nodeId=%v addr=%v", node.ID, *req.Addr) if err = r.store.AddPeer(*req.Addr); err != nil { return node, fmt.Errorf("add peer: %v", err) } } return node, err }() nodeID := uint64(0) if node != nil { nodeID = node.ID } if err != nil { return nil, err } // get the current raft peers peers, err := r.store.Peers() if err != nil { return nil, fmt.Errorf("list peers: %v", err) } return &internal.JoinResponse{ Header: &internal.ResponseHeader{ OK: proto.Bool(true), }, EnableRaft: proto.Bool(raft.PeerContained(peers, *req.Addr)), RaftNodes: peers, NodeID: proto.Uint64(nodeID), }, err }
func prepareExecutorInfo() *mesos.ExecutorInfo { containerType := mesos.ContainerInfo_DOCKER containerNetwork := mesos.ContainerInfo_DockerInfo_HOST vcapDataVolumeMode := mesos.Volume_RW return &mesos.ExecutorInfo{ ExecutorId: util.NewExecutorID("diego-executor"), Name: proto.String("Diego Executor"), Source: proto.String("diego-executor"), Container: &mesos.ContainerInfo{ Type: &containerType, Volumes: []*mesos.Volume{ &mesos.Volume{ Mode: &vcapDataVolumeMode, ContainerPath: proto.String("/var/vcap/data"), HostPath: proto.String("data"), }, &mesos.Volume{ Mode: &vcapDataVolumeMode, ContainerPath: proto.String("/var/vcap/sys/log"), HostPath: proto.String("log"), }, &mesos.Volume{ Mode: &vcapDataVolumeMode, ContainerPath: proto.String("/sys/fs/cgroup"), HostPath: proto.String("/sys/fs/cgroup"), }, }, Docker: &mesos.ContainerInfo_DockerInfo{ Image: executorImage, Network: &containerNetwork, Privileged: proto.Bool(true), ForcePullImage: proto.Bool(true), }, }, Command: &mesos.CommandInfo{ Environment: &mesos.Environment{ Variables: []*mesos.Environment_Variable{ &mesos.Environment_Variable{ Name: proto.String("CONSUL_SERVER"), Value: consulServer, }, &mesos.Environment_Variable{ Name: proto.String("ETCD_URL"), Value: etcdUrl, }, }, }, Shell: proto.Bool(false), User: proto.String("root"), Value: proto.String("/executor"), Arguments: []string{"-logtostderr=true"}, }, } }
func encodeBooleanPoint(p *BooleanPoint) *internal.Point { return &internal.Point{ Name: proto.String(p.Name), Tags: proto.String(p.Tags.ID()), Time: proto.Int64(p.Time), Nil: proto.Bool(p.Nil), Aux: encodeAux(p.Aux), BooleanValue: proto.Bool(p.Value), } }
func TestComposeListBool(t *testing.T) { expr := &ast.Expr{ Function: &ast.Function{ Name: "eq", Params: []*ast.Expr{ { Function: &ast.Function{ Name: "length", Params: []*ast.Expr{ { List: &ast.List{ Type: types.LIST_BOOL, Elems: []*ast.Expr{ { Terminal: &ast.Terminal{ BoolValue: proto.Bool(true), }, }, { Terminal: &ast.Terminal{ BoolValue: proto.Bool(false), }, }, }, }, }, }, }, }, { Terminal: &ast.Terminal{ Int64Value: proto.Int64(2), }, }, }, }, } b, err := NewBool(expr) if err != nil { panic(err) } r, err := b.Eval(nil) if err != nil { panic(err) } if r != true { t.Fatalf("expected true") } str := funcs.Sprint(b.Func) if str != "true" { t.Fatalf("trimming did not work: %s", str) } }
func buildURIs(task eremetic.Task) []*mesosproto.CommandInfo_URI { var uris []*mesosproto.CommandInfo_URI for _, v := range task.FetchURIs { uris = append(uris, &mesosproto.CommandInfo_URI{ Value: proto.String(v.URI), Extract: proto.Bool(v.Extract), Executable: proto.Bool(v.Executable), Cache: proto.Bool(v.Cache), }) } return uris }
// tryAuthentication expects to be guarded by eventLock func (driver *MesosSchedulerDriver) tryAuthentication() { if driver.authenticated { // programming error panic("already authenticated") } masterPid := driver.masterPid // save for referencing later in goroutine if masterPid == nil { log.Info("skipping authentication attempt because we lost the master") return } if driver.authenticating.inProgress() { // authentication is in progress, try to cancel it (we may too late already) driver.authenticating.cancel() driver.reauthenticate = true return } if driver.credential != nil { // authentication can block and we don't want to hold up the messenger loop authenticating := &authenticationAttempt{done: make(chan struct{})} go func() { defer authenticating.cancel() result := &mesos.InternalAuthenticationResult{ //TODO(jdef): is this really needed? Success: proto.Bool(false), Completed: proto.Bool(false), Pid: proto.String(masterPid.String()), } // don't reference driver.authenticating here since it may have changed if err := driver.authenticate(masterPid, authenticating); err != nil { log.Errorf("Scheduler failed to authenticate: %v\n", err) if err == auth.AuthenticationFailed { result.Completed = proto.Bool(true) } } else { result.Completed = proto.Bool(true) result.Success = proto.Bool(true) } pid := driver.messenger.UPID() driver.messenger.Route(context.TODO(), &pid, result) }() driver.authenticating = authenticating } else { log.Infoln("No credentials were provided. " + "Attempting to register scheduler without authentication.") driver.authenticated = true go driver.doReliableRegistration(float64(registrationBackoffFactor)) } }
func encodeIteratorOptions(opt *IteratorOptions) *internal.IteratorOptions { pb := &internal.IteratorOptions{ Interval: encodeInterval(opt.Interval), Dimensions: opt.Dimensions, Fill: proto.Int32(int32(opt.Fill)), StartTime: proto.Int64(opt.StartTime), EndTime: proto.Int64(opt.EndTime), Ascending: proto.Bool(opt.Ascending), Limit: proto.Int64(int64(opt.Limit)), Offset: proto.Int64(int64(opt.Offset)), SLimit: proto.Int64(int64(opt.SLimit)), SOffset: proto.Int64(int64(opt.SOffset)), Dedupe: proto.Bool(opt.Dedupe), } // Set expression, if set. if opt.Expr != nil { pb.Expr = proto.String(opt.Expr.String()) } // Convert and encode aux fields as variable references. pb.Fields = make([]*internal.VarRef, len(opt.Aux)) pb.Aux = make([]string, len(opt.Aux)) for i, ref := range opt.Aux { pb.Fields[i] = encodeVarRef(ref) pb.Aux[i] = ref.Val } // Convert and encode sources to measurements. sources := make([]*internal.Measurement, len(opt.Sources)) for i, source := range opt.Sources { mm := source.(*Measurement) sources[i] = encodeMeasurement(mm) } pb.Sources = sources // Fill value can only be a number. Set it if available. if v, ok := opt.FillValue.(float64); ok { pb.FillValue = proto.Float64(v) } // Set condition, if set. if opt.Condition != nil { pb.Condition = proto.String(opt.Condition.String()) } return pb }
func TestUnmarshalPartiallyPopulatedOptionalFieldsFails(t *testing.T) { // Fill in all fields, then randomly remove one. dataOut := &test.NinOptNative{ Field1: proto.Float64(0), Field2: proto.Float32(0), Field3: proto.Int32(0), Field4: proto.Int64(0), Field5: proto.Uint32(0), Field6: proto.Uint64(0), Field7: proto.Int32(0), Field8: proto.Int64(0), Field9: proto.Uint32(0), Field10: proto.Int32(0), Field11: proto.Uint64(0), Field12: proto.Int64(0), Field13: proto.Bool(false), Field14: proto.String("0"), Field15: []byte("0"), } r := rand.New(rand.NewSource(time.Now().UnixNano())) fieldName := "Field" + strconv.Itoa(r.Intn(15)+1) field := reflect.ValueOf(dataOut).Elem().FieldByName(fieldName) fieldType := field.Type() field.Set(reflect.Zero(fieldType)) encodedMessage, err := proto.Marshal(dataOut) if err != nil { t.Fatalf("Unexpected error when marshalling dataOut: %v", err) } dataIn := NidOptNative{} err = proto.Unmarshal(encodedMessage, &dataIn) if err.Error() != `proto: required field "`+fieldName+`" not set` { t.Fatalf(`err.Error() != "proto: required field "`+fieldName+`" not set"; was "%s" instead`, err.Error()) } }
func TestComposeNot(t *testing.T) { expr := &ast.Expr{ Function: &ast.Function{ Name: "not", Params: []*ast.Expr{ { Terminal: &ast.Terminal{ BoolValue: proto.Bool(false), }, }, }, }, } b, err := NewBool(expr) if err != nil { panic(err) } f, err := NewBoolFunc(b) if err != nil { panic(err) } r, err := f.Eval(nil) if err != nil { panic(err) } if r != true { t.Fatalf("expected true") } str := funcs.Sprint(f.(*composedBool).Func) if str != "->true" { t.Fatalf("trimming did not work: %s", str) } }
// used when scheduler creates a new TaskInfo for this job. //TODO should task creation be in this package? func (j *Job) CreateCommandInfo() mesos.CommandInfo { // FYI we ignore the CommandInfo.Container field. Image information is provided in the TaskInfo.Container instead // this will probably change in the future ci := mesos.CommandInfo{ Shell: proto.Bool(j.Shell), } if j.Shell { // value is executed by sh -c 'value' ci.Value = proto.String(*j.Command) } else { // value is the executable, arguments are vararg passed to it if j.Command != nil { ci.Value = proto.String(*j.Command) } ci.Arguments = j.Arguments } // set any environment variables that were passed in env_vars := make([]*mesos.Environment_Variable, len(j.Environment)) i := 0 for k, v := range j.Environment { env_vars[i] = &mesos.Environment_Variable{ Name: &k, Value: &v, } i++ } ci.Environment = &mesos.Environment{Variables: env_vars} return ci }
func (d *LauncherData) terminate(row *RunQueueEntry, action string) { if d.killedRecently[row.Id] { return } if action == KILL_ACTION_NO_ACTION { d.call(&badoo_phproxyd.RequestTerminate{Hash: proto.Uint64(row.Id)}) } else { params := []string{ `\ScriptFramework\Script_Kill`, fmt.Sprintf("--force-sf-db=%s", db.GetDbName()), fmt.Sprintf("--kill-run-id=%d", row.Id), fmt.Sprintf("--kill-action=%s", action), fmt.Sprintf("--kill-class-name=%s", row.ClassName), fmt.Sprintf("--kill-timetable-id=%d", row.timetable_id.Int64), } d.call(&badoo_phproxyd.RequestRun{ Script: proto.String(getScriptPath(row.settings)), Hash: proto.Uint64(0), Tag: proto.String(PHPROXY_TAG), Force: proto.Int32(1), Params: params, Store: badoo_phproxyd.StoreT_MEMORY.Enum(), FreeAfterRun: proto.Bool(true), }) } d.killedRecently[row.Id] = true }
func createTaskInfo(task eremetic.Task, offer *mesosproto.Offer) (eremetic.Task, *mesosproto.TaskInfo) { task.FrameworkID = *offer.FrameworkId.Value task.SlaveID = *offer.SlaveId.Value task.Hostname = *offer.Hostname task.AgentIP = offer.GetUrl().GetAddress().GetIp() task.AgentPort = offer.GetUrl().GetAddress().GetPort() portMapping, portResources := buildPorts(task, offer) env := buildEnvironment(task, portMapping) taskInfo := &mesosproto.TaskInfo{ TaskId: &mesosproto.TaskID{Value: proto.String(task.ID)}, SlaveId: offer.SlaveId, Name: proto.String(task.Name), Command: buildCommandInfo(task, env), Container: &mesosproto.ContainerInfo{ Type: mesosproto.ContainerInfo_DOCKER.Enum(), Docker: &mesosproto.ContainerInfo_DockerInfo{ Image: proto.String(task.Image), ForcePullImage: proto.Bool(task.ForcePullImage), PortMappings: portMapping, Network: mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum(), }, Volumes: buildVolumes(task), }, Resources: []*mesosproto.Resource{ mesosutil.NewScalarResource("cpus", task.TaskCPUs), mesosutil.NewScalarResource("mem", task.TaskMem), mesosutil.NewRangesResource("ports", portResources), }, } return task, taskInfo }
func marshalWay(way *Way, ss *stringSet, includeChangeset bool) *osmpb.Way { keys, vals := way.Tags.keyValues(ss) encoded := &osmpb.Way{ Id: int64(way.ID), Keys: keys, Vals: vals, Info: &osmpb.Info{ Version: int32(way.Version), Timestamp: timeToUnix(way.Timestamp), Visible: proto.Bool(way.Visible), }, } if len(way.Nodes) > 0 { // legacy simple refs encoding. if way.Nodes[0].Version == 0 { encoded.Refs = encodeWayNodes(way.Nodes) } else { encoded.DenseMembers = encodeDenseWayNodes(way.Nodes) } } if len(way.Minors) > 0 { encoded.MinorVersion = encodeMinorWays(way.Minors) } if includeChangeset { encoded.Info.ChangesetId = int64(way.ChangesetID) encoded.Info.UserId = int32(way.UserID) encoded.Info.UserSid = ss.Add(way.User) } return encoded }
func prepareExecutorInfo() *mesos.ExecutorInfo { executorUris := []*mesos.CommandInfo_URI{} uri, executorCmd := serveExecutorArtifact(*executorPath) executorUris = append(executorUris, &mesos.CommandInfo_URI{Value: uri, Executable: proto.Bool(true)}) // forward the value of the scheduler's -v flag to the executor v := 0 if f := flag.Lookup("v"); f != nil && f.Value != nil { if vstr := f.Value.String(); vstr != "" { if vi, err := strconv.ParseInt(vstr, 10, 32); err == nil { v = int(vi) } } } executorCommand := fmt.Sprintf("./%s -logtostderr=true -v=%d -slow_tasks=%v", executorCmd, v, *slowTasks) go http.ListenAndServe(fmt.Sprintf("%s:%d", *address, *artifactPort), nil) log.V(2).Info("Serving executor artifacts...") // Create mesos scheduler driver. return &mesos.ExecutorInfo{ ExecutorId: util.NewExecutorID("default"), Name: proto.String("Test Executor (Go)"), Source: proto.String("go_test"), Command: &mesos.CommandInfo{ Value: proto.String(executorCommand), Uris: executorUris, }, Resources: []*mesos.Resource{ util.NewScalarResource("cpus", CPUS_PER_EXECUTOR), util.NewScalarResource("mem", MEM_PER_EXECUTOR), }, } }
func encodeAux(aux []interface{}) []*internal.Aux { pb := make([]*internal.Aux, len(aux)) for i := range aux { switch v := aux[i].(type) { case float64: pb[i] = &internal.Aux{DataType: proto.Int32(Float), FloatValue: proto.Float64(v)} case *float64: pb[i] = &internal.Aux{DataType: proto.Int32(Float)} case int64: pb[i] = &internal.Aux{DataType: proto.Int32(Integer), IntegerValue: proto.Int64(v)} case *int64: pb[i] = &internal.Aux{DataType: proto.Int32(Integer)} case string: pb[i] = &internal.Aux{DataType: proto.Int32(String), StringValue: proto.String(v)} case *string: pb[i] = &internal.Aux{DataType: proto.Int32(String)} case bool: pb[i] = &internal.Aux{DataType: proto.Int32(Boolean), BooleanValue: proto.Bool(v)} case *bool: pb[i] = &internal.Aux{DataType: proto.Int32(Boolean)} default: pb[i] = &internal.Aux{DataType: proto.Int32(int32(Unknown))} } } return pb }
func TestMarshalRace(t *testing.T) { // unregistered extension desc := &proto.ExtensionDesc{ ExtendedType: (*pb.MyMessage)(nil), ExtensionType: (*bool)(nil), Field: 101010100, Name: "emptyextension", Tag: "varint,0,opt", } m := &pb.MyMessage{Count: proto.Int32(4)} if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) } errChan := make(chan error, 3) for n := 3; n > 0; n-- { go func() { _, err := proto.Marshal(m) errChan <- err }() } for i := 0; i < 3; i++ { err := <-errChan if err != nil { t.Fatal(err) } } }
// handleFetchData handles a request for the current nodes meta data func (r *rpc) handleFetchData(req *internal.FetchDataRequest) (*internal.FetchDataResponse, error) { var ( b []byte data *Data err error ) for { data = r.store.cachedData() if data.Index != req.GetIndex() { b, err = data.MarshalBinary() if err != nil { return nil, err } break } if !req.GetBlocking() { break } if err := r.store.WaitForDataChanged(); err != nil { return nil, err } } return &internal.FetchDataResponse{ Header: &internal.ResponseHeader{ OK: proto.Bool(true), }, Index: proto.Uint64(data.Index), Term: proto.Uint64(data.Term), Data: b}, nil }
func prepareExecutorInfo(gt net.Addr) *mesos.ExecutorInfo { executorUris := []*mesos.CommandInfo_URI{} uri := serveSelf() executorUris = append(executorUris, &mesos.CommandInfo_URI{Value: uri, Executable: proto.Bool(true)}) // forward the value of the scheduler's -v flag to the executor v := 0 if f := flag.Lookup("v"); f != nil && f.Value != nil { if vstr := f.Value.String(); vstr != "" { if vi, err := strconv.ParseInt(vstr, 10, 32); err == nil { v = int(vi) } } } nodeCommand := fmt.Sprintf("./executor -logtostderr=true -v=%d -node -tracerAddr %s", v, gt.String()) log.V(2).Info("nodeCommand: ", nodeCommand) // Create mesos scheduler driver. return &mesos.ExecutorInfo{ ExecutorId: util.NewExecutorID("default"), Name: proto.String("visghs-node"), Source: proto.String("visghs"), Command: &mesos.CommandInfo{ Value: proto.String(nodeCommand), Uris: executorUris, }, Resources: []*mesos.Resource{ util.NewScalarResource("cpus", CPUS_PER_EXECUTOR), util.NewScalarResource("mem", MEM_PER_EXECUTOR), }, } }
// handleJoinRequest handles a request to join the cluster func (r *rpc) handleJoinRequest(req *internal.JoinRequest) (*internal.JoinResponse, error) { r.traceCluster("join request from: %v", *req.Addr) node, err := func() (*NodeInfo, error) { // attempt to create the node node, err := r.store.CreateNode(*req.Addr) // if it exists, return the existing node if err == ErrNodeExists { return r.store.NodeByHost(*req.Addr) } else if err != nil { return nil, fmt.Errorf("create node: %v", err) } // FIXME: jwilder: adding raft nodes is tricky since going // from 1 node (leader) to two kills the cluster because // quorum is lost after adding the second node. For now, // can only add non-raft enabled nodes // If we have less than 3 nodes, add them as raft peers // if len(r.store.Peers()) < MaxRaftNodes { // if err = r.store.AddPeer(*req.Addr); err != nil { // return node, fmt.Errorf("add peer: %v", err) // } // } return node, err }() nodeID := uint64(0) if node != nil { nodeID = node.ID } if err != nil { return nil, err } return &internal.JoinResponse{ Header: &internal.ResponseHeader{ OK: proto.Bool(true), }, //EnableRaft: proto.Bool(contains(r.store.Peers(), *req.Addr)), EnableRaft: proto.Bool(false), RaftNodes: r.store.Peers(), NodeID: proto.Uint64(nodeID), }, err }
func createAccount(conn *transport.Conn, inBuf []byte, outBuf []byte, t *testing.T) { command := &proto.ClientToServer{ CreateAccount: protobuf.Bool(true), } writeProtobuf(conn, outBuf, command, t) receiveProtobuf(conn, inBuf, t) }
func buildCommandInfo(task eremetic.Task, env *mesosproto.Environment) *mesosproto.CommandInfo { commandInfo := &mesosproto.CommandInfo{ User: proto.String(task.User), Environment: env, Uris: buildURIs(task), } if task.Command != "" { commandInfo.Shell = proto.Bool(true) commandInfo.Value = &task.Command } else { commandInfo.Shell = proto.Bool(false) commandInfo.Arguments = task.Args } return commandInfo }
// SetAdminPrivilege sets the admin privilege for a user on a database. func (s *Store) SetAdminPrivilege(username string, admin bool) error { return s.exec(internal.Command_SetAdminPrivilegeCommand, internal.E_SetAdminPrivilegeCommand_Command, &internal.SetAdminPrivilegeCommand{ Username: proto.String(username), Admin: proto.Bool(admin), }, ) }
func (c *Client) SetAdminPrivilege(username string, admin bool) error { return c.retryUntilExec(internal.Command_SetAdminPrivilegeCommand, internal.E_SetAdminPrivilegeCommand_Command, &internal.SetAdminPrivilegeCommand{ Username: proto.String(username), Admin: proto.Bool(admin), }, ) }
func newTestMessage() *pb.MyMessage { msg := &pb.MyMessage{ Count: proto.Int32(42), Name: proto.String("Dave"), Quote: proto.String(`"I didn't want to go."`), Pet: []string{"bunny", "kitty", "horsey"}, Inner: &pb.InnerMessage{ Host: proto.String("footrest.syd"), Port: proto.Int32(7001), Connected: proto.Bool(true), }, Others: []*pb.OtherMessage{ { Key: proto.Int64(0xdeadbeef), Value: []byte{1, 65, 7, 12}, }, { Weight: proto.Float32(6.022), Inner: &pb.InnerMessage{ Host: proto.String("lesha.mtv"), Port: proto.Int32(8002), }, }, }, Bikeshed: pb.MyMessage_BLUE.Enum(), Somegroup: &pb.MyMessage_SomeGroup{ GroupField: proto.Int32(8), }, // One normally wouldn't do this. // This is an undeclared tag 13, as a varint (wire type 0) with value 4. XXX_unrecognized: []byte{13<<3 | 0, 4}, } ext := &pb.Ext{ Data: proto.String("Big gobs for big rats"), } if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { panic(err) } greetings := []string{"adg", "easy", "cow"} if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { panic(err) } // Add an unknown extension. We marshal a pb.Ext, and fake the ID. b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) if err != nil { panic(err) } b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) proto.SetRawExtension(msg, 201, b) // Extensions can be plain fields, too, so let's test that. b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) proto.SetRawExtension(msg, 202, b) return msg }
func TestNestedIfExpr(t *testing.T) { expr := &ast.Expr{ Function: &ast.Function{ Name: "not", Params: []*ast.Expr{ { Terminal: &ast.Terminal{ BoolValue: proto.Bool(false), }, }, }, }, } ifexpr := &ast.IfExpr{ Condition: expr, ThenClause: &ast.StateExpr{ IfExpr: &ast.IfExpr{ Condition: expr, ThenClause: &ast.StateExpr{ State: proto.String("true"), }, ElseClause: &ast.StateExpr{ State: proto.String("falser"), }, }, }, ElseClause: &ast.StateExpr{ State: proto.String("false"), }, } states := nameToState(map[string]int{ "true": 1, "false": 2, "falser": 3, }) t.Logf("states = %v", states) c := funcs.NewCatcher(false) stateExpr, err := Compile(ifexpr, states, c) if err != nil { panic(err) } res := stateExpr.Eval(nil) if res != 1 { t.Fatalf("Expected true state, but got %d", res) } if err := c.GetError(); err != nil { panic(err) } }
func getNumKeys(conn *transport.Conn, inBuf []byte, outBuf []byte, t *testing.T, pk *[32]byte) int64 { getNumKeys := &proto.ClientToServer{ GetNumKeys: protobuf.Bool(true), } writeProtobuf(conn, outBuf, getNumKeys, t) response := receiveProtobuf(conn, inBuf, t) return *response.NumKeys }
func (sched *Scheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) { for _, offer := range offers { taskId := &mesos.TaskID{ Value: proto.String(fmt.Sprintf("basicdocker-task-%d", time.Now().Unix())), } ports := util.FilterResources( offer.Resources, func(res *mesos.Resource) bool { return res.GetName() == "ports" }, ) if len(ports) > 0 && len(ports[0].GetRanges().GetRange()) > 0 { } else { return } task := &mesos.TaskInfo{ Name: proto.String(taskId.GetValue()), TaskId: taskId, SlaveId: offer.SlaveId, Container: &mesos.ContainerInfo{ Type: mesos.ContainerInfo_DOCKER.Enum(), Volumes: nil, Hostname: nil, Docker: &mesos.ContainerInfo_DockerInfo{ Image: &DOCKER_IMAGE_DEFAULT, Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), }, }, Command: &mesos.CommandInfo{ Shell: proto.Bool(true), Value: proto.String("set -x ; /bin/date ; /bin/hostname ; sleep 200 ; echo done"), }, Executor: nil, Resources: []*mesos.Resource{ util.NewScalarResource("cpus", getOfferCpu(offer)), util.NewScalarResource("mem", getOfferMem(offer)), util.NewRangesResource("ports", []*mesos.Value_Range{ util.NewValueRange( *ports[0].GetRanges().GetRange()[0].Begin, *ports[0].GetRanges().GetRange()[0].Begin+1, ), }), }, } log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue()) var tasks []*mesos.TaskInfo = []*mesos.TaskInfo{task} log.Infoln("Launching ", len(tasks), " tasks for offer", offer.Id.GetValue()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)}) sched.tasksLaunched++ time.Sleep(time.Second) } }
func main() { fs := flag.NewFlagSet("fuzzlr", flag.ExitOnError) master := fs.String("master", "localhost:5050", "Location of leading Mesos master.") servingPath := fs.String("binpath", "./bin/", "Directory where go-fuzz binaries are located.") bin := fs.String("bin", "./fuzz.zip", "Archive generated by go-fuzz-build.") corpus := fs.String("corpus", "./corpus.zip", "Corpus zip archive to seed the fuzzing with.") artifactPort := fs.Int("artifactPort", 12300, "Binding port for artifact server") address := fs.String("address", "127.0.0.1", "Binding address for artifact server") shutdownTimeout := fs.Duration("shutdown.timeout", 10*time.Second, "Shutdown timeout") fs.Parse(os.Args[1:]) binpaths, err := filepath.Glob(*servingPath) if err != nil { log.Printf("Unable to read files from provided serving path %s: %v", *servingPath, err) return } commandInfos := []*mesos.CommandInfo_URI{} uris := []string{} for _, path := range binpaths { uri := serving.ServeExecutorArtifact(path, *address, *artifactPort) commandInfos = append(commandInfos, &mesos.CommandInfo_URI{ Value: uri, Executable: proto.Bool(true), }) uris = append(uris, *uri) } go http.ListenAndServe(fmt.Sprintf("%s:%d", *address, *artifactPort), nil) log.Printf("Serving executor artifacts...") sched := scheduler.New(*bin, *corpus, uris...) driver, err := scheduler.NewDriver(*master, sched) if err != nil { log.Printf("Unable to create scheduler driver: %s", err) return } go func() { sigch := make(chan os.Signal, 1) signal.Notify(sigch, os.Interrupt, os.Kill) if s := <-sigch; s != os.Interrupt { return } log.Println("Fuzzlr is shutting down") if err := sched.Shutdown(*shutdownTimeout); err != nil { log.Print(err) } driver.Stop(false) }() if status, err := driver.Run(); err != nil { log.Printf("Framework stopped with status %s and error: %s", status, err) } log.Println("Exiting...") }