func (s *summary) Write(out *dto.Metric) error { sum := &dto.Summary{} qs := make([]*dto.Quantile, 0, len(s.objectives)) s.bufMtx.Lock() s.mtx.Lock() if len(s.hotBuf) != 0 { s.swapBufs(time.Now()) } s.bufMtx.Unlock() s.flushColdBuf() sum.SampleCount = proto.Uint64(s.cnt) sum.SampleSum = proto.Float64(s.sum) for _, rank := range s.sortedObjectives { qs = append(qs, &dto.Quantile{ Quantile: proto.Float64(rank), Value: proto.Float64(s.headStream.Query(rank)), }) } s.mtx.Unlock() if len(qs) > 0 { sort.Sort(quantSort(qs)) } sum.Quantile = qs out.Summary = sum out.Label = s.labelPairs return nil }
func computeEpochResult(e Evaluator, examples Examples) pb.EpochResult { l := make([]labelledPrediction, 0, len(examples)) boolLabel := func(example *pb.Example) bool { if example.GetLabel() > 0 { return true } return false } for _, ex := range examples { l = append(l, labelledPrediction{ Label: boolLabel(ex), Prediction: e.Evaluate(ex.GetFeatures()), }) } lp := labelledPredictions(l) return pb.EpochResult{ Roc: proto.Float64(lp.ROC()), LogScore: proto.Float64(lp.LogScore()), NormalizedEntropy: proto.Float64(lp.NormalizedEntropy()), Calibration: proto.Float64(lp.Calibration()), } }
func NewIntervalDatum(key string, timestamp int64, minmax [2]float64) ( d pb.TelemetryDatum) { d.Key = proto.String(key) d.Timestamp = proto.Int64(timestamp) d.IntervalMin = proto.Float64(minmax[0]) d.IntervalMax = proto.Float64(minmax[1]) return d }
func fieldsToProto(src []Field) ([]*pb.Field, error) { // Maps to catch duplicate time or numeric fields. timeFields, numericFields := make(map[string]bool), make(map[string]bool) dst := make([]*pb.Field, 0, len(src)) for _, f := range src { if !validFieldName(f.Name) { return nil, fmt.Errorf("search: invalid field name %q", f.Name) } fieldValue := &pb.FieldValue{} switch x := f.Value.(type) { case string: fieldValue.Type = pb.FieldValue_TEXT.Enum() fieldValue.StringValue = proto.String(x) case Atom: fieldValue.Type = pb.FieldValue_ATOM.Enum() fieldValue.StringValue = proto.String(string(x)) case HTML: fieldValue.Type = pb.FieldValue_HTML.Enum() fieldValue.StringValue = proto.String(string(x)) case time.Time: if timeFields[f.Name] { return nil, fmt.Errorf("search: duplicate time field %q", f.Name) } timeFields[f.Name] = true fieldValue.Type = pb.FieldValue_DATE.Enum() fieldValue.StringValue = proto.String(strconv.FormatInt(x.UnixNano()/1e6, 10)) case float64: if numericFields[f.Name] { return nil, fmt.Errorf("search: duplicate numeric field %q", f.Name) } numericFields[f.Name] = true fieldValue.Type = pb.FieldValue_NUMBER.Enum() fieldValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64)) case appengine.GeoPoint: if !x.Valid() { return nil, fmt.Errorf( "search: GeoPoint field %q with invalid value %v", f.Name, x) } fieldValue.Type = pb.FieldValue_GEO.Enum() fieldValue.Geo = &pb.FieldValue_Geo{ Lat: proto.Float64(x.Lat), Lng: proto.Float64(x.Lng), } default: return nil, fmt.Errorf("search: unsupported field type: %v", reflect.TypeOf(f.Value)) } if p := fieldValue.StringValue; p != nil && !utf8.ValidString(*p) { return nil, fmt.Errorf("search: %q field is invalid UTF-8: %q", f.Name, *p) } dst = append(dst, &pb.Field{ Name: proto.String(f.Name), Value: fieldValue, }) } return dst, nil }
func (l leastAbsoluteDeviationLoss) UpdateWeightedLabels(e Examples) { for _, ex := range e { prediction := l.evaluator.Evaluate(ex.Features) if ex.GetLabel()-prediction > 0 { ex.WeightedLabel = proto.Float64(1.0) } else { ex.WeightedLabel = proto.Float64(-1.0) } } }
// Tests that we split correctly on a trivial example // label == f[0] > 0.5 func TestBestSplit(t *testing.T) { examples := []*pb.Example{ { Features: []float64{0.0}, Label: proto.Float64(0.0), WeightedLabel: proto.Float64(0.0), }, { Features: []float64{1.0}, Label: proto.Float64(1.0), WeightedLabel: proto.Float64(1.0), }, { Features: []float64{1.0}, Label: proto.Float64(1.0), WeightedLabel: proto.Float64(1.0), }, { Features: []float64{0.0}, Label: proto.Float64(0.0), WeightedLabel: proto.Float64(0.0), }, } bestSplit := getBestSplit(examples, 0 /* feature */) if bestSplit.feature != 0 { t.Fatal(bestSplit) } if bestSplit.index != 2 { t.Fatal(bestSplit) } if math.Abs(bestSplit.gain-1.0) > 0.001 { t.Fatal(bestSplit) } }
func (h huberLoss) UpdateWeightedLabels(e Examples) { by(func(e1, e2 *pb.Example) bool { return h.residual(e1) < h.residual(e2) }).Sort(e) marginalExample := e[int64(float64(len(e))*h.huberAlpha)] delta := h.residual(marginalExample) for _, ex := range e { divergence := h.residual(ex) if divergence <= delta { ex.WeightedLabel = proto.Float64(divergence) } else { ex.WeightedLabel = proto.Float64(delta * divergence / math.Abs(divergence)) } } }
func NewDoubleDatum(key string, timestamp int64, v float64) ( d pb.TelemetryDatum) { d.Key = proto.String(key) d.Timestamp = proto.Int64(timestamp) d.Double = proto.Float64(v) return d }
func (cn *Conn) Write(b []byte) (n int, err error) { const lim = 1 << 20 // max per chunk for n < len(b) { chunk := b[n:] if len(chunk) > lim { chunk = chunk[:lim] } req := &pb.SendRequest{ SocketDescriptor: &cn.desc, Data: chunk, StreamOffset: &cn.offset, } res := &pb.SendReply{} if !cn.writeDeadline.IsZero() { req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) } if err = cn.c.Call("remote_socket", "Send", req, res, nil); err != nil { // assume zero bytes were sent in this RPC break } n += int(res.GetDataSent()) } cn.offset += int64(n) return }
func (cn *Conn) Read(b []byte) (n int, err error) { const maxRead = 1 << 20 if len(b) > maxRead { b = b[:maxRead] } req := &pb.ReceiveRequest{ SocketDescriptor: &cn.desc, DataSize: proto.Int32(int32(len(b))), } res := &pb.ReceiveReply{} if !cn.readDeadline.IsZero() { req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) } if err := cn.c.Call("remote_socket", "Receive", req, res, nil); err != nil { return 0, err } if len(res.Data) == 0 { return 0, io.EOF } if len(res.Data) > len(b) { return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) } return copy(b, res.Data), nil }
func NewScalarResource(name string, val float64) *mesos.Resource { return &mesos.Resource{ Name: proto.String(name), Type: mesos.Value_SCALAR.Enum(), Scalar: &mesos.Value_Scalar{Value: proto.Float64(val)}, } }
func (p *pruner) pruneTree(t *pb.TreeNode, e Examples) prunedStage { bestNode, bestCost, bestLeaves := &pb.TreeNode{}, math.MaxFloat64, 0 mapTree(t, e, TreeMapperFunc(func(n *pb.TreeNode, ex Examples) (*pb.TreeNode, bool) { nodeSquaredDivergence, nodeLeaves := weakestLinkCostFunction(n, ex) nodeCost := nodeSquaredDivergence / float64(nodeLeaves) if nodeCost < bestCost { bestNode = t bestCost = nodeCost bestLeaves = nodeLeaves } return proto.Clone(n).(*pb.TreeNode), true })) prunedTree := mapTree(t, e, TreeMapperFunc(func(n *pb.TreeNode, ex Examples) (*pb.TreeNode, bool) { if n != bestNode { return proto.Clone(n).(*pb.TreeNode), true } // Otherwise, return the leaf constructed by pruning all subtrees leafWeight := p.lossFunction.GetLeafWeight(ex) prior := p.lossFunction.GetPrior(ex) return &pb.TreeNode{ LeafValue: proto.Float64(leafWeight * prior), }, false })) rootCost, rootLeaves := weakestLinkCostFunction(t, e) alpha := (rootCost - bestCost) / float64(rootLeaves-bestLeaves) return prunedStage{ alpha: alpha, tree: prunedTree, } }
func TestStatusUpdateMessageHandling(t *testing.T) { sched := NewMesosScheduler() sched.StatusUpdate = func(schedDriver *SchedulerDriver, taskStatus *mesos.TaskStatus) { if taskStatus.GetState() != mesos.TaskState(mesos.TaskState_TASK_RUNNING) { log.Fatal("Scheduler.StatusUpdate expected State value not received.") } if string(taskStatus.GetData()) != "World!" { log.Fatal("Scheduler.StatusUpdate expected Status.Data not received.") } } msg := &mesos.StatusUpdateMessage{ Update: &mesos.StatusUpdate{ FrameworkId: &mesos.FrameworkID{Value: proto.String("test-framework-1")}, Status: &mesos.TaskStatus{ TaskId: &mesos.TaskID{Value: proto.String("test-task-1")}, State: mesos.TaskState(mesos.TaskState_TASK_RUNNING).Enum(), Message: proto.String("Hello"), Data: []byte("World!"), }, Timestamp: proto.Float64(1234567.2), Uuid: []byte("abcd-efg1-2345-6789-abcd-efg1"), }, } driver, err := NewSchedDriver(sched, &mesos.FrameworkInfo{}, "localhost:0") if err != nil { t.Fatal(err) } driver.schedMsgQ <- msg }
func lease(c appengine.Context, maxTasks int, queueName string, leaseTime int, groupByTag bool, tag []byte) ([]*Task, error) { req := &taskqueue_proto.TaskQueueQueryAndOwnTasksRequest{ QueueName: []byte(queueName), LeaseSeconds: proto.Float64(float64(leaseTime)), MaxTasks: proto.Int64(int64(maxTasks)), GroupByTag: proto.Bool(groupByTag), Tag: tag, } res := &taskqueue_proto.TaskQueueQueryAndOwnTasksResponse{} callOpts := &appengine_internal.CallOptions{ Timeout: 10 * time.Second, } if err := c.Call("taskqueue", "QueryAndOwnTasks", req, res, callOpts); err != nil { return nil, err } tasks := make([]*Task, len(res.Task)) for i, t := range res.Task { tasks[i] = &Task{ Payload: t.Body, Name: string(t.TaskName), Method: "PULL", ETA: time.Unix(0, *t.EtaUsec*1e3), RetryCount: *t.RetryCount, Tag: string(t.Tag), } } return tasks, nil }
func KeyValueEncode(key int64, value float64) ([]byte, error) { kv := &KeyValue{ Timestamp: proto.Int64(key), Value: proto.Float64(value), } record, err := proto.Marshal(kv) return record, err }
func setOptionalFloat(s string, v **float64) { f, err := strconv.ParseFloat(s, 64) if err == nil { *v = proto.Float64(f) } else { *v = nil } }
func NewStatusUpdate(frameworkId *mesos.FrameworkID, taskStatus *mesos.TaskStatus, timestamp float64, uuid []byte) *mesos.StatusUpdate { return &mesos.StatusUpdate{ FrameworkId: frameworkId, Status: taskStatus, Timestamp: proto.Float64(timestamp), Uuid: uuid, } }
func TestStatusUpdateMessage(t *testing.T) { eventQ := make(chan interface{}) go func() { for msg := range eventQ { val, ok := msg.(*mesos.StatusUpdateMessage) if !ok { t.Fatal("Failed to receive msg of type StatusUpdateMessage") } if val.Update.FrameworkId.GetValue() != "test-framework-1" { t.Fatal("Expected StatusUpdateMessage.FramewId not received.") } if val.Update.Status.GetState() != mesos.TaskState(mesos.TaskState_TASK_RUNNING) { t.Fatal("Expected StatusUpdateMessage.Update.Status.State not received.") } if string(val.Update.Status.GetData()) != "World!" { t.Fatal("Expected StatusUpdateMessage.Update.Message not received.") } } }() proc, err := newSchedulerProcess(eventQ) if err != nil { t.Fatal(err) } proc.started = true proc.aborted = false msg := &mesos.StatusUpdateMessage{ Update: &mesos.StatusUpdate{ FrameworkId: &mesos.FrameworkID{Value: proto.String("test-framework-1")}, Status: &mesos.TaskStatus{ TaskId: &mesos.TaskID{Value: proto.String("test-task-1")}, State: mesos.TaskState(mesos.TaskState_TASK_RUNNING).Enum(), Message: proto.String("Hello"), Data: []byte("World!"), }, Timestamp: proto.Float64(1234567.2), Uuid: []byte("abcd-efg1-2345-6789-abcd-efg1"), }, } data, err := proto.Marshal(msg) if err != nil { t.Fatalf("Unable to marshal StatusUpdateMessage, %v", err) } req := buildHttpRequest(t, "StatusUpdateMessage", data) resp := httptest.NewRecorder() proc.ServeHTTP(resp, req) if resp.Code != http.StatusAccepted { t.Fatalf("Expecting server status %d but got status %d", http.StatusAccepted, resp.Code) } }
func populateMetric( t ValueType, v float64, labelPairs []*dto.LabelPair, m *dto.Metric, ) { m.Label = labelPairs switch t { case CounterValue: m.Counter = &dto.Counter{Value: proto.Float64(v)} case GaugeValue: m.Gauge = &dto.Gauge{Value: proto.Float64(v)} case UntypedValue: m.Untyped = &dto.Untyped{Value: proto.Float64(v)} default: panic(fmt.Errorf("encountered unknown type %v", t)) } }
// toRetryParameter converts RetryOptions to taskqueue_proto.TaskQueueRetryParameters. func (opt *RetryOptions) toRetryParameters() *taskqueue_proto.TaskQueueRetryParameters { params := &taskqueue_proto.TaskQueueRetryParameters{} if opt.RetryLimit > 0 { params.RetryLimit = proto.Int32(opt.RetryLimit) } if opt.AgeLimit > 0 { params.AgeLimitSec = proto.Int64(int64(opt.AgeLimit.Seconds())) } if opt.MinBackoff > 0 { params.MinBackoffSec = proto.Float64(opt.MinBackoff.Seconds()) } if opt.MaxBackoff > 0 { params.MaxBackoffSec = proto.Float64(opt.MaxBackoff.Seconds()) } if opt.MaxDoublings > 0 || (opt.MaxDoublings == 0 && opt.ApplyZeroMaxDoublings) { params.MaxDoublings = proto.Int32(opt.MaxDoublings) } return params }
func makeAnnotatedTree(level int, numFeatures int) *pb.TreeNode { if level == 0 { return &pb.TreeNode{ LeafValue: proto.Float64(rand.Float64()), } } splittingFeature := rand.Int63n(int64(numFeatures)) splittingValue := rand.Float64() t := &pb.TreeNode{ Feature: proto.Int64(splittingFeature), SplitValue: proto.Float64(splittingValue), Left: makeAnnotatedTree(level-1, numFeatures), Right: makeAnnotatedTree(level-1, numFeatures), Annotation: &pb.Annotation{ LeftFraction: proto.Float64(rand.Float64()), }, } return t }
func (b *boostingTreeGenerator) initializeForest(e Examples) { b.forest = &pb.Forest{ Trees: make([]*pb.TreeNode, 0, b.forestConfig.GetNumWeakLearners()), Rescaling: b.getRescaling().Enum(), } // Initial prior b.forest.Trees = append(b.forest.Trees, &pb.TreeNode{ LeafValue: proto.Float64(b.getLossFunction().GetPrior(e)), }) }
func constructSmallExamples(numExamples int, numFeatures int) Examples { result := make([]*pb.Example, 0, numExamples) for i := 0; i < numExamples; i++ { example := &pb.Example{ Features: make([]float64, numFeatures), } sample := rand.NormFloat64() example.Features[rand.Intn(numFeatures)] = sample if sample < 0.5 { example.Label = proto.Float64(1.0) example.WeightedLabel = proto.Float64(1.0) } else { example.Label = proto.Float64(-1.0) example.WeightedLabel = proto.Float64(-1.0) } result = append(result, example) } return result }
func makeTree(level int, numFeatures int) *pb.TreeNode { if level == 0 { return &pb.TreeNode{ LeafValue: proto.Float64(rand.Float64()), } } splittingFeature := rand.Int63n(int64(numFeatures)) splittingValue := rand.Float64() t := &pb.TreeNode{ Feature: proto.Int64(splittingFeature), SplitValue: proto.Float64(splittingValue), Left: makeTree(level-1, numFeatures), Right: makeTree(level-1, numFeatures), } err := validateTree(t) if err != nil { glog.Fatal("Invalid tree: ", err) } return t }
func constructBenchmarkExamples(numExamples int, numFeatures int, threshold float64) Examples { glog.Info("Num examples: ", numExamples) result := make([]*pb.Example, 0, numExamples) for i := 0; i < numExamples; i++ { example := &pb.Example{ Features: make([]float64, numFeatures), } sum := 0.0 for j := 0; j < numFeatures; j++ { sample := rand.NormFloat64() sum += sample example.Features[int64(j)] = sample } if sum < threshold { example.Label = proto.Float64(1.0) } else { example.Label = proto.Float64(-1.0) } result = append(result, example) } return result }
// readingValue represents the state where the last byte read (now in // p.currentByte) is the first byte of the sample value (i.e. a float). func (p *Parser) readingValue() stateFn { // When we are here, we have read all the labels, so for the // infamous special case of a summary, we can finally find out // if the metric already exists. if p.currentMF.GetType() == dto.MetricType_SUMMARY { signature := model.LabelsToSignature(p.currentLabels) if summary := p.summaries[signature]; summary != nil { p.currentMetric = summary } else { p.summaries[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } } else { p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } if p.readTokenUntilWhitespace(); p.err != nil { return nil // Unexpected end of input. } value, err := strconv.ParseFloat(p.currentToken.String(), 64) if err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) return nil } switch p.currentMF.GetType() { case dto.MetricType_COUNTER: p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} case dto.MetricType_GAUGE: p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} case dto.MetricType_UNTYPED: p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} case dto.MetricType_SUMMARY: // *sigh* if p.currentMetric.Summary == nil { p.currentMetric.Summary = &dto.Summary{} } switch { case p.currentIsSummaryCount: p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) case p.currentIsSummarySum: p.currentMetric.Summary.SampleSum = proto.Float64(value) case !math.IsNaN(p.currentQuantile): p.currentMetric.Summary.Quantile = append( p.currentMetric.Summary.Quantile, &dto.Quantile{ Quantile: proto.Float64(p.currentQuantile), Value: proto.Float64(value), }, ) } default: p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) } if p.currentByte == '\n' { return p.startOfLine } return p.startTimestamp }
func prepareBuf(b *testing.B) *proto.Buffer { buf := proto.NewBuffer(make([]byte, 0, 4096)) v := &SampleValueSeries{Value: make([]*SampleValueSeries_Value, 0, numSamples)} for i := 0; i < numSamples; i++ { v.Value = append(v.Value, &SampleValueSeries_Value{ Timestamp: proto.Int64(rand.Int63()), Value: proto.Float64(rand.NormFloat64()), }) } if err := buf.Marshal(v); err != nil { b.Fatal(err) } return buf }
// ModifyLease modifies the lease of a task. // Used to request more processing time, or to abandon processing. // leaseTime is in seconds and must not be negative. func ModifyLease(c appengine.Context, task *Task, queueName string, leaseTime int) error { req := &taskqueue_proto.TaskQueueModifyTaskLeaseRequest{ QueueName: []byte(queueName), TaskName: []byte(task.Name), EtaUsec: proto.Int64(task.ETA.UnixNano() / 1e3), // Used to verify ownership. LeaseSeconds: proto.Float64(float64(leaseTime)), } res := &taskqueue_proto.TaskQueueModifyTaskLeaseResponse{} if err := c.Call("taskqueue", "ModifyTaskLease", req, res, nil); err != nil { return err } task.ETA = time.Unix(0, *res.UpdatedEtaUsec*1e3) return nil }
func eventToPbEvent(event *Event) (*proto.Event, error) { var e proto.Event if event.Host == "" { event.Host, _ = os.Hostname() } t := reflect.ValueOf(&e).Elem() s := reflect.ValueOf(event).Elem() typeOfEvent := s.Type() for i := 0; i < s.NumField(); i++ { f := s.Field(i) value := reflect.ValueOf(f.Interface()) if reflect.Zero(f.Type()) != value && f.Interface() != nil { name := typeOfEvent.Field(i).Name switch name { case "State", "Service", "Host", "Description": tmp := reflect.ValueOf(pb.String(value.String())) t.FieldByName(name).Set(tmp) case "Ttl": tmp := reflect.ValueOf(pb.Float32(float32(value.Float()))) t.FieldByName(name).Set(tmp) case "Time": tmp := reflect.ValueOf(pb.Int64(value.Int())) t.FieldByName(name).Set(tmp) case "Tags": tmp := reflect.ValueOf(value.Interface().([]string)) t.FieldByName(name).Set(tmp) case "Metric": switch reflect.TypeOf(f.Interface()).Kind() { case reflect.Int: tmp := reflect.ValueOf(pb.Int64(int64(value.Int()))) t.FieldByName("MetricSint64").Set(tmp) case reflect.Float32: tmp := reflect.ValueOf(pb.Float32(float32(value.Float()))) t.FieldByName("MetricF").Set(tmp) case reflect.Float64: tmp := reflect.ValueOf(pb.Float64(value.Float())) t.FieldByName("MetricD").Set(tmp) default: return nil, fmt.Errorf("Metric of invalid type (type %v)", reflect.TypeOf(f.Interface()).Kind()) } } } } return &e, nil }
//message TransactionValue { // optional int64 int_value = 1; // optional bool bool_value = 2; // optional double double_value = 3; // optional string string_value = 4; // optional bytes bytes_value = 5; // optional TransactionCollection array = 6; // optional TransactionCollection map = 7; //} func toTransactionValue(o interface{}) *TransactionValue { // TODO: do it by reflection to catch map and arrays switch o.(type) { case *TransactionValue: return o.(*TransactionValue) case string: return &TransactionValue{ StringValue: pb.String(o.(string)), } case int: return &TransactionValue{ IntValue: pb.Int64(int64(o.(int))), } case int64: return &TransactionValue{ IntValue: pb.Int64(o.(int64)), } case bool: return &TransactionValue{ BoolValue: pb.Bool(o.(bool)), } case float32: return &TransactionValue{ DoubleValue: pb.Float64(float64(o.(float32))), } case float64: return &TransactionValue{ DoubleValue: pb.Float64(o.(float64)), } case nrv.Map, map[string]interface{}: var mp map[string]interface{} if val, ok := o.(map[string]interface{}); ok { mp = val } else { mp = map[string]interface{}(o.(nrv.Map)) } values := make([]*TransactionCollectionValue, len(mp)) i := 0 for k, v := range mp { values[i] = &TransactionCollectionValue{ Key: pb.String(k), Value: toTransactionValue(v), } i++ } return &TransactionValue{ Map: &TransactionCollection{ Values: values, }, } case nrv.Array, []interface{}: var ar []interface{} if val, ok := o.([]interface{}); ok { ar = val } else { ar = []interface{}(o.(nrv.Array)) } values := make([]*TransactionCollectionValue, len(ar)) for i, v := range ar { values[i] = &TransactionCollectionValue{ Value: toTransactionValue(v), } } return &TransactionValue{ Array: &TransactionCollection{ Values: values, }, } case nil: return &TransactionValue{} } panic(fmt.Sprintf("Value not supported: %s", reflect.TypeOf(o))) return nil }