func TestUpdate(t *testing.T) { neuralNetwork := CreateSimpleNetwork(t) inputs := mat64.NewDense(1, 2, []float64{0.05, 0.10}) neuralNetwork.Forward(inputs) values := mat64.NewDense(1, 2, []float64{0.01, 0.99}) neuralNetwork.Backward(values) learningConfiguration := neural.LearningConfiguration{ Epochs: proto.Int32(1), Rate: proto.Float64(0.5), Decay: proto.Float64(0), BatchSize: proto.Int32(1), } neuralNetwork.Update(learningConfiguration) expected_weights_0 := mat64.NewDense( 3, 2, []float64{0.149780716, 0.24975114, 0.19956143, 0.29950229, 0.35, 0.35}) if !mat64.EqualApprox( neuralNetwork.Layers[0].Weight, expected_weights_0, 0.0001) { t.Errorf("weights 0 unexpected:\n%v", mat64.Formatted(neuralNetwork.Layers[0].Weight)) } expected_weights_1 := mat64.NewDense( 3, 2, []float64{0.35891648, 0.51130127, 0.408666186, 0.561370121, 0.6, 0.6}) if !mat64.EqualApprox( neuralNetwork.Layers[1].Weight, expected_weights_1, 0.0001) { t.Errorf("weights 1 unexpected:\n%v", mat64.Formatted(neuralNetwork.Layers[1].Weight)) } }
func (s *summary) Write(out *dto.Metric) error { sum := &dto.Summary{} qs := make([]*dto.Quantile, 0, len(s.objectives)) s.bufMtx.Lock() s.mtx.Lock() if len(s.hotBuf) != 0 { s.swapBufs(time.Now()) } s.bufMtx.Unlock() s.flushColdBuf() sum.SampleCount = proto.Uint64(s.cnt) sum.SampleSum = proto.Float64(s.sum) for _, rank := range s.sortedObjectives { qs = append(qs, &dto.Quantile{ Quantile: proto.Float64(rank), Value: proto.Float64(s.headStream.Query(rank)), }) } s.mtx.Unlock() if len(qs) > 0 { sort.Sort(quantSort(qs)) } sum.Quantile = qs out.Summary = sum out.Label = s.labelPairs return nil }
func makeServerRequest(fix fixture, resID string, clients []clientWants, has float64) (*pb.GetServerCapacityResponse, error) { var wants []*pb.PriorityBandAggregate for _, client := range clients { wants = append(wants, &pb.PriorityBandAggregate{ Priority: proto.Int64(0), NumClients: proto.Int64(client.numClients), Wants: proto.Float64(client.wants), }) } req := &pb.GetServerCapacityRequest{ ServerId: proto.String("server"), Resource: []*pb.ServerCapacityResourceRequest{ { ResourceId: proto.String(resID), Has: &pb.Lease{ ExpiryTime: proto.Int64(0), RefreshInterval: proto.Int64(0), Capacity: proto.Float64(0), }, Wants: wants, }, }, } if has > 0 { req.Resource[0].Has = &pb.Lease{ ExpiryTime: proto.Int64(time.Now().Add(1 * time.Minute).Unix()), RefreshInterval: proto.Int64(1), Capacity: proto.Float64(has), } } return fix.client.GetServerCapacity(context.Background(), req) }
func makeRequest(fix fixture, wants, has float64) (*pb.GetCapacityResponse, error) { req := &pb.GetCapacityRequest{ ClientId: proto.String("client"), Resource: []*pb.ResourceRequest{ { ResourceId: proto.String("resource"), Priority: proto.Int64(1), Has: &pb.Lease{ ExpiryTime: proto.Int64(0), RefreshInterval: proto.Int64(0), Capacity: proto.Float64(0), }, Wants: proto.Float64(wants), }, }, } if has > 0 { req.Resource[0].Has = &pb.Lease{ ExpiryTime: proto.Int64(time.Now().Add(1 * time.Minute).Unix()), RefreshInterval: proto.Int64(5), Capacity: proto.Float64(has), } } return fix.client.GetCapacity(context.Background(), req) }
func TestParseTextFiles(t *testing.T) { tests := []struct { path string out string }{ { path: "fixtures/textfile/no_metric_files", out: "fixtures/textfile/no_metric_files.out", }, { path: "fixtures/textfile/two_metric_files", out: "fixtures/textfile/two_metric_files.out", }, { path: "fixtures/textfile/nonexistent_path", out: "fixtures/textfile/nonexistent_path.out", }, } for i, test := range tests { c := textFileCollector{ path: test.path, } // Suppress a log message about `nonexistent_path` not existing, this is // expected and clutters the test output. err := flag.Set("log.level", "fatal") if err != nil { t.Fatal(err) } mfs := c.parseTextFiles() textMFs := make([]string, 0, len(mfs)) for _, mf := range mfs { if mf.GetName() == "node_textfile_mtime" { mf.GetMetric()[0].GetGauge().Value = proto.Float64(1) mf.GetMetric()[1].GetGauge().Value = proto.Float64(2) } textMFs = append(textMFs, proto.MarshalTextString(mf)) } sort.Strings(textMFs) got := strings.Join(textMFs, "") want, err := ioutil.ReadFile(test.out) if err != nil { t.Fatalf("%d. error reading fixture file %s: %s", i, test.out, err) } if string(want) != got { t.Fatalf("%d. want:\n\n%s\n\ngot:\n\n%s", i, string(want), got) } } }
func TestOffer(t *testing.T) { offer := util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"), util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0") if Offer(offer) != "slave0#30c49" { t.Errorf(`util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"), util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0") != "slave0#30c49"; actual %s`, Offer(offer)) } offer.Resources = []*mesos.Resource{util.NewScalarResource("cpus", 4), util.NewScalarResource("mem", 512), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})} if Offer(offer) != "slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000]" { t.Errorf("Expected slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000]; actual %s", Offer(offer)) } offer.Attributes = []*mesos.Attribute{&mesos.Attribute{ Name: proto.String("rack"), Type: mesos.Value_SCALAR.Enum(), Scalar: &mesos.Value_Scalar{Value: proto.Float64(2)}, }} if Offer(offer) != "slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000] rack:2.00" { t.Errorf("Expected slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000] rack:2.00; actual %s", Offer(offer)) } offer.Resources = nil if Offer(offer) != "slave0#30c49 rack:2.00" { t.Errorf("Expected slave0#30c49 rack:2.00; actual %s", Offer(offer)) } }
func TestAttribute(t *testing.T) { attr := Attribute(&mesos.Attribute{ Name: proto.String("rack"), Type: mesos.Value_SCALAR.Enum(), Scalar: &mesos.Value_Scalar{Value: proto.Float64(2)}, }) if attr != "rack:2.00" { t.Errorf(`Attribute(&mesos.Attribute{ Name: proto.String("rack"), Type: mesos.Value_SCALAR.Enum(), Scalar: &mesos.Value_Scalar{Value: proto.Float64(2)}, }) != "rack:2.00"; actual %s`, attr) } attr = Attribute(&mesos.Attribute{ Name: proto.String("datacenter"), Type: mesos.Value_TEXT.Enum(), Text: &mesos.Value_Text{Value: proto.String("DC-1")}, }) if attr != "datacenter:DC-1" { t.Errorf(`Attribute(&mesos.Attribute{ Name: proto.String("datacenter"), Type: mesos.Value_TEXT.Enum(), Text: proto.String("DC-1"), }) != "datacenter:DC-1"; actual %s`, attr) } }
func lease(c context.Context, maxTasks int, queueName string, leaseTime int, groupByTag bool, tag []byte) ([]*Task, error) { if queueName == "" { queueName = "default" } req := &pb.TaskQueueQueryAndOwnTasksRequest{ QueueName: []byte(queueName), LeaseSeconds: proto.Float64(float64(leaseTime)), MaxTasks: proto.Int64(int64(maxTasks)), GroupByTag: proto.Bool(groupByTag), Tag: tag, } res := &pb.TaskQueueQueryAndOwnTasksResponse{} if err := internal.Call(c, "taskqueue", "QueryAndOwnTasks", req, res); err != nil { return nil, err } tasks := make([]*Task, len(res.Task)) for i, t := range res.Task { tasks[i] = &Task{ Payload: t.Body, Name: string(t.TaskName), Method: "PULL", ETA: time.Unix(0, *t.EtaUsec*1e3), RetryCount: *t.RetryCount, Tag: string(t.Tag), } } return tasks, nil }
// Float64P parses the given string representation of a floating point number, // and returns a pointer to a float64 whose value is same as the parsed number. func Float64P(val string) (*float64, error) { f, err := Float64(val) if err != nil { return nil, err } return proto.Float64(f), nil }
func (cn *Conn) Write(b []byte) (n int, err error) { const lim = 1 << 20 // max per chunk for n < len(b) { chunk := b[n:] if len(chunk) > lim { chunk = chunk[:lim] } req := &pb.SendRequest{ SocketDescriptor: &cn.desc, Data: chunk, StreamOffset: &cn.offset, } res := &pb.SendReply{} if !cn.writeDeadline.IsZero() { req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) } if err = cn.c.Call("remote_socket", "Send", req, res, opts(cn.writeDeadline)); err != nil { // assume zero bytes were sent in this RPC break } n += int(res.GetDataSent()) } cn.offset += int64(n) return }
// SetSafeCapacity sets the safe capacity in a response. func (res *Resource) SetSafeCapacity(resp *pb.ResourceResponse) { res.mu.RLock() defer res.mu.RUnlock() // If the resource configuration does not have a safe capacity // configured we return a dynamic safe capacity which equals // the capacity divided by the number of clients that we // know about. // TODO(josv): The calculation of the dynamic safe capacity // needs to take sub clients into account (in a multi-server tree). if res.config.SafeCapacity == nil { resp.SafeCapacity = proto.Float64(*res.config.Capacity / float64(res.store.Count())) } else { resp.SafeCapacity = proto.Float64(*res.config.SafeCapacity) } }
func createDriver(scheduler *Scheduler, settings *Settings) (*mesossched.MesosSchedulerDriver, error) { publishedAddr := net.ParseIP(settings.MessengerAddress) bindingPort := settings.MessengerPort credential, err := getCredential(settings) if err != nil { return nil, err } return mesossched.NewMesosSchedulerDriver(mesossched.DriverConfig{ Master: settings.Master, Framework: &mesosproto.FrameworkInfo{ Id: getFrameworkID(settings), Name: proto.String(settings.Name), User: proto.String(settings.User), Checkpoint: proto.Bool(settings.Checkpoint), FailoverTimeout: proto.Float64(settings.FailoverTimeout), Principal: getPrincipalID(credential), }, Scheduler: scheduler, BindingAddress: net.ParseIP("0.0.0.0"), PublishedAddress: publishedAddr, BindingPort: bindingPort, Credential: credential, WithAuthContext: getAuthContext, }) }
func testSafeCapacity(t *testing.T) { template := makeResourceTemplate("res_with_safe_caoacity", pb.Algorithm_FAIR_SHARE) template.SafeCapacity = proto.Float64(10.0) s, err := MakeTestServer(template, makeResourceTemplate("*", pb.Algorithm_FAIR_SHARE)) if err != nil { t.Errorf("MakeTestServer: %v", err) } res := s.getOrCreateResource("res_with_safe_capacity") res.store.Assign("client1", fiveMinutes, fiveSeconds, 10, 100, 1) res.store.Assign("client2", fiveMinutes, fiveSeconds, 10, 100, 1) resp := &pb.ResourceResponse{} res.SetSafeCapacity(resp) if *resp.SafeCapacity != 10 { t.Errorf("*resp.SafeCapacity: want:10, got:%v", *resp.SafeCapacity) } res = s.getOrCreateResource("res_without_safe_capacity") res.store.Assign("client1", fiveMinutes, fiveSeconds, 10, 100, 1) res.store.Assign("client2", fiveMinutes, fiveSeconds, 10, 100, 1) resp = &pb.ResourceResponse{} res.SetSafeCapacity(resp) if *resp.SafeCapacity != 50 { t.Errorf("*resp.SafeCapacity: want:50, got:%v", *resp.SafeCapacity) } }
func (cn *Conn) Read(b []byte) (n int, err error) { const maxRead = 1 << 20 if len(b) > maxRead { b = b[:maxRead] } req := &pb.ReceiveRequest{ SocketDescriptor: &cn.desc, DataSize: proto.Int32(int32(len(b))), } res := &pb.ReceiveReply{} if !cn.readDeadline.IsZero() { req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) } if err := cn.c.Call("remote_socket", "Receive", req, res, opts(cn.readDeadline)); err != nil { return 0, err } if len(res.Data) == 0 { return 0, io.EOF } if len(res.Data) > len(b) { return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) } return copy(b, res.Data), nil }
func NewNotification(ty string, m map[string]interface{}) *Notification { res := &Notification{} res.Type = ty for k, v := range m { variant := &attribute.Variant{} switch v := v.(type) { case bool: variant.BoolValue = proto.Bool(v) // Minor annoyance here to have to do these casts, but it would be // a huge annoyance elsewhere: case int: variant.IntValue = proto.Int64(int64(v)) case int32: variant.IntValue = proto.Int64(int64(v)) case int64: variant.IntValue = proto.Int64(v) case uint: variant.UintValue = proto.Uint64(uint64(v)) case uint32: variant.UintValue = proto.Uint64(uint64(v)) case uint64: variant.UintValue = proto.Uint64(v) case float32: variant.FloatValue = proto.Float64(float64(v)) case float64: variant.FloatValue = proto.Float64(v) case string: variant.StringValue = proto.String(v) case []byte: variant.BlobValue = v case MessageValue: variant.MessageValue = v.Value case FourccValue: variant.FourccValue = proto.String(v.Value) case entity.EntityId: variant.EntityidValue = &v default: log.Panicf("error: can't convert %s: %T to attribute", k, v) } res.Attributes = append(res.Attributes, &attribute.Attribute{ Name: proto.String(k), Value: variant, }) } return res }
func TestAttributes(t *testing.T) { attributes := Attributes([]*mesos.Attribute{&mesos.Attribute{ Name: proto.String("rack"), Type: mesos.Value_SCALAR.Enum(), Scalar: &mesos.Value_Scalar{Value: proto.Float64(2)}, }, &mesos.Attribute{ Name: proto.String("floor"), Type: mesos.Value_SCALAR.Enum(), Scalar: &mesos.Value_Scalar{Value: proto.Float64(1)}, }}) if !strings.Contains(attributes, "rack") { t.Errorf(`%s does not contain "rack"`, attributes) } if !strings.Contains(attributes, "floor") { t.Errorf(`%s does not contain "floor"`, attributes) } }
func populateMetric( t ValueType, v float64, labelPairs []*dto.LabelPair, m *dto.Metric, ) error { m.Label = labelPairs switch t { case CounterValue: m.Counter = &dto.Counter{Value: proto.Float64(v)} case GaugeValue: m.Gauge = &dto.Gauge{Value: proto.Float64(v)} case UntypedValue: m.Untyped = &dto.Untyped{Value: proto.Float64(v)} default: return fmt.Errorf("encountered unknown type %v", t) } return nil }
// setUpIntermediate sets up a test intermediate server. func setUpIntermediate(name string, addr string) (fixture, error) { var ( fix fixture err error ) fix.server, err = MakeTestIntermediateServer( name, addr, &pb.ResourceTemplate{ IdentifierGlob: proto.String("*"), Capacity: proto.Float64(100), SafeCapacity: proto.Float64(2), Algorithm: &pb.Algorithm{ Kind: pb.Algorithm_PROPORTIONAL_SHARE.Enum(), RefreshInterval: proto.Int64(1), LeaseLength: proto.Int64(2), }, }) if err != nil { return fixture{}, err } lis, err := net.Listen("tcp", ":0") if err != nil { return fixture{}, err } fix.lis = lis fix.rpcServer = rpc.NewServer() pb.RegisterCapacityServer(fix.rpcServer, fix.server) go fix.rpcServer.Serve(lis) conn, err := rpc.Dial(fix.Addr(), rpc.WithInsecure()) if err != nil { return fixture{}, err } fix.client = pb.NewCapacityClient(conn) return fix, nil }
func (h *histogram) Write(out *dto.Metric) error { his := &dto.Histogram{} buckets := make([]*dto.Bucket, len(h.upperBounds)) his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) var count uint64 for i, upperBound := range h.upperBounds { count += atomic.LoadUint64(&h.counts[i]) buckets[i] = &dto.Bucket{ CumulativeCount: proto.Uint64(count), UpperBound: proto.Float64(upperBound), } } his.Bucket = buckets out.Histogram = his out.Label = h.labelPairs return nil }
// toRetryParameter converts RetryOptions to pb.TaskQueueRetryParameters. func (opt *RetryOptions) toRetryParameters() *pb.TaskQueueRetryParameters { params := &pb.TaskQueueRetryParameters{} if opt.RetryLimit > 0 { params.RetryLimit = proto.Int32(opt.RetryLimit) } if opt.AgeLimit > 0 { params.AgeLimitSec = proto.Int64(int64(opt.AgeLimit.Seconds())) } if opt.MinBackoff > 0 { params.MinBackoffSec = proto.Float64(opt.MinBackoff.Seconds()) } if opt.MaxBackoff > 0 { params.MaxBackoffSec = proto.Float64(opt.MaxBackoff.Seconds()) } if opt.MaxDoublings > 0 || (opt.MaxDoublings == 0 && opt.ApplyZeroMaxDoublings) { params.MaxDoublings = proto.Int32(opt.MaxDoublings) } return params }
func (s *StackDeployScheduler) ResourceOffers(driver scheduler.SchedulerDriver, offers []*mesos.Offer) { Logger.Debug("[ResourceOffers] %s", pretty.Offers(offers)) for _, offer := range offers { declineReason := s.acceptOffer(driver, offer) if declineReason != "" { driver.DeclineOffer(offer.GetId(), &mesos.Filters{RefuseSeconds: proto.Float64(10)}) Logger.Debug("Declined offer %s: %s", pretty.Offer(offer), declineReason) } } }
// mesos.Scheduler interface method. // Invoked when resources have been offered to this framework. func (this *ElodinaTransportScheduler) ResourceOffers(driver scheduler.SchedulerDriver, offers []*mesos.Offer) { log.Logger.Info("Received offers") offersAndTasks := make(map[*mesos.Offer][]*mesos.TaskInfo) remainingPartitions, err := this.GetTopicPartitions() if err != nil { return } remainingPartitions.RemoveAll(this.TakenTopicPartitions.GetArray()) log.Logger.Debug("%v", remainingPartitions) tps := remainingPartitions.GetArray() offersAndResources := this.wrapInOfferAndResources(offers) for !remainingPartitions.IsEmpty() { log.Logger.Debug("Iteration %v", remainingPartitions) if this.hasEnoughInstances() { for _, transfer := range this.taskIdToTaskState { if len(transfer.assignment) < this.config.ThreadsPerTask { transfer.assignment = append(transfer.assignment, tps[0]) remainingPartitions.Remove(tps[0]) this.TakenTopicPartitions.Add(tps[0]) if len(tps) > 1 { tps = tps[1:] } else { tps = []consumer.TopicAndPartition{} } } } } else { log.Logger.Debug("Trying to launch new task") offer, task := this.launchNewTask(offersAndResources) if offer != nil && task != nil { offersAndTasks[offer] = append(offersAndTasks[offer], task) } else { for _, offer := range offers { if _, exists := offersAndTasks[offer]; !exists { offersAndTasks[offer] = make([]*mesos.TaskInfo, 0) } } break } } } this.assignPendingPartitions() for _, offer := range offers { if tasks, ok := offersAndTasks[offer]; ok { driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)}) } else { driver.DeclineOffer(offer.Id, &mesos.Filters{RefuseSeconds: proto.Float64(10)}) } } }
func makeResourceTemplate(name string, algo pb.Algorithm_Kind) *pb.ResourceTemplate { return &pb.ResourceTemplate{ IdentifierGlob: proto.String(name), Capacity: proto.Float64(100), Algorithm: &pb.Algorithm{ Kind: algo.Enum(), RefreshInterval: proto.Int64(5), LeaseLength: proto.Int64(20), LearningModeDuration: proto.Int64(0), }, } }
func (s *Scheduler) ResourceOffers(driver scheduler.SchedulerDriver, offers []*mesos.Offer) { Logger.Debugf("[ResourceOffers] %s", offersString(offers)) s.activeLock.Lock() defer s.activeLock.Unlock() if !s.active { Logger.Debug("Scheduler is inactive. Declining all offers.") for _, offer := range offers { driver.DeclineOffer(offer.GetId(), &mesos.Filters{RefuseSeconds: proto.Float64(1)}) } return } for _, offer := range offers { declineReason := s.acceptOffer(driver, offer) if declineReason != "" { driver.DeclineOffer(offer.GetId(), &mesos.Filters{RefuseSeconds: proto.Float64(1)}) Logger.Debugf("Declined offer: %s", declineReason) } } }
func TestValidateGetCapacityRequest(t *testing.T) { invalid := []*pb.GetCapacityRequest{ // No ClientId. { ClientId: proto.String(""), Resource: nil, }, // No ResourceId. { ClientId: proto.String("client"), Resource: []*pb.ResourceRequest{ { ResourceId: proto.String(""), Priority: proto.Int64(1), Has: new(pb.Lease), Wants: proto.Float64(1), }, }, }, // Requests negative capacity. { ClientId: proto.String("client"), Resource: []*pb.ResourceRequest{ { ResourceId: proto.String("resource"), Priority: proto.Int64(1), Has: new(pb.Lease), Wants: proto.Float64(-10), }, }, }, } for _, p := range invalid { if err := validateGetCapacityRequest(p); err == nil { t.Errorf("no validation error raised for invalid %v", p) } } }
func parseLine(str []string) (*userinfo.UserAddr, error) { //fmt.Println(str[0], str[1], str[2]) addr := &userinfo.UserAddr{} if 3 != len(str) || "NULL" == str[1] || "NULL" == str[2] { return addr, errors.New("Error line!") } start := strings.Split(str[1], "#") end := strings.Split(str[2], "#") //fmt.Println(start[0], start[1], end[0], end[1]) pid, _ := strconv.ParseInt(str[0], 10, 64) addr.Userid = proto.Int64(pid) slng, _ := strconv.ParseFloat(start[0], 64) addr.Homelng = proto.Float64(slng) slat, _ := strconv.ParseFloat(start[1], 64) addr.Homelat = proto.Float64(slat) elng, _ := strconv.ParseFloat(end[0], 64) addr.Corplng = proto.Float64(elng) elat, _ := strconv.ParseFloat(end[1], 64) addr.Corplat = proto.Float64(elat) return addr, nil }
func (s *summary) Write(out *dto.Metric) error { sum := &dto.Summary{} qs := make([]*dto.Quantile, 0, len(s.objectives)) s.bufMtx.Lock() s.mtx.Lock() // Swap bufs even if hotBuf is empty to set new hotBufExpTime. s.swapBufs(time.Now()) s.bufMtx.Unlock() s.flushColdBuf() sum.SampleCount = proto.Uint64(s.cnt) sum.SampleSum = proto.Float64(s.sum) for _, rank := range s.sortedObjectives { var q float64 if s.headStream.Count() == 0 { q = math.NaN() } else { q = s.headStream.Query(rank) } qs = append(qs, &dto.Quantile{ Quantile: proto.Float64(rank), Value: proto.Float64(q), }) } s.mtx.Unlock() if len(qs) > 0 { sort.Sort(quantSort(qs)) } sum.Quantile = qs out.Summary = sum out.Label = s.labelPairs return nil }
func (s *Scheduler) ResourceOffers(driver scheduler.SchedulerDriver, offers []*mesos.Offer) { Logger.Debugf("[ResourceOffers] %s", pretty.Offers(offers)) for _, offer := range offers { declineReason := s.acceptOffer(driver, offer) if declineReason != "" { driver.DeclineOffer(offer.GetId(), &mesos.Filters{RefuseSeconds: proto.Float64(10)}) Logger.Debugf("Declined offer: %s", declineReason) } } s.reconcileTasks(false) s.cluster.Save() }
func interfaceToProto(iv interface{}) (p *pb.Value, errStr string) { val := new(pb.Value) switch v := iv.(type) { case int: val.IntegerValue = proto.Int64(int64(v)) case int32: val.IntegerValue = proto.Int64(int64(v)) case int64: val.IntegerValue = proto.Int64(v) case bool: val.BooleanValue = proto.Bool(v) case string: val.StringValue = proto.String(v) case float32: val.DoubleValue = proto.Float64(float64(v)) case float64: val.DoubleValue = proto.Float64(v) case *Key: if v != nil { val.KeyValue = keyToProto(v) } case time.Time: if v.Before(minTime) || v.After(maxTime) { return nil, fmt.Sprintf("time value out of range") } val.TimestampMicrosecondsValue = proto.Int64(toUnixMicro(v)) case []byte: val.BlobValue = v default: if iv != nil { return nil, fmt.Sprintf("invalid Value type %t", iv) } } // TODO(jbd): Support ListValue and EntityValue. // TODO(jbd): Support types whose underlying type is one of the types above. return val, "" }
func TestGetServerCapacity(t *testing.T) { fix, err := setUp() if err != nil { t.Errorf("setUp: %v", err) } defer fix.tearDown() // capacity is the maximum capacity of the resource. capacity := 100.0 // Create a resource template. if err := fix.server.LoadConfig(context.Background(), &pb.ResourceRepository{ Resources: []*pb.ResourceTemplate{ { IdentifierGlob: proto.String("*"), Capacity: proto.Float64(capacity), Algorithm: &pb.Algorithm{ Kind: pb.Algorithm_FAIR_SHARE.Enum(), RefreshInterval: proto.Int64(5), LeaseLength: proto.Int64(20), LearningModeDuration: proto.Int64(0), }, }, }, }, map[string]*time.Time{}); err != nil { t.Fatalf("fix.server.LoadConfig: %v", err) } subclients := 5 wantsPerSubclient := 200.0 var clients []clientWants // Form pairs of wants capacity and number of subclients. for s := 1; s <= subclients; s++ { clients = append(clients, clientWants{wantsPerSubclient, int64(s)}) } out, err := makeServerRequest(fix, "resource", clients, 0) if err != nil { t.Fatalf("s.GetServerCapacity: %v", err) } // We expect to receive the maximum available capacity. lease := out.Response[0].GetGets() if got, want := lease.GetCapacity(), capacity; got != want { t.Errorf("lease.GetCapacity() = %v, want %v", got, want) } }