// Utility method for loading data in for tests func (m *MockConnectionPool) LoadCF(cf string, dump CFDump) { rows := []*Row{} t := thrift.Int64Ptr(now()) for key, columns := range dump { cols := Columns{} for name, value := range columns { cols = append(cols, &Column{ Name: []byte(name), Value: value, Timestamp: t, }) } sort.Sort(cols) row := &Row{ Key: []byte(key), Columns: cols, } // Insert row in sorted order i := sort.Search(len(rows), func(i int) bool { return bytes.Compare(rows[i].Key, row.Key) >= 0 }) rows = append(rows, row) copy(rows[i+1:], rows[i:]) rows[i] = row } m.Data[cf] = rows }
func (w *writer) Delete(cf string, key []byte) Writer { tm := w.addWriter(cf, key) d := cassandra.NewDeletion() d.Timestamp = thrift.Int64Ptr(now()) tm.Deletion = d return w }
func toThrift(s *trace.Span) *zipkincore.Span { span := &zipkincore.Span{ TraceID: toInt64(s.TraceId), Name: s.Name, ID: toInt64(s.Id), ParentID: thrift.Int64Ptr(toInt64(s.ParentId)), Debug: s.Debug, Timestamp: thrift.Int64Ptr(s.Timestamp.UnixNano() / 1e3), Duration: thrift.Int64Ptr(s.Duration.Nanoseconds() / 1e3), } for _, a := range s.Annotations { if len(a.Value) > 0 || a.Debug != nil { span.BinaryAnnotations = append(span.BinaryAnnotations, &zipkincore.BinaryAnnotation{ Key: a.Key, Value: a.Value, AnnotationType: zipkincore.AnnotationType_BYTES, Host: toEndpoint(a.Service), }) } else { var val string switch a.Type { case trace.AnnClientRequest: val = zipkincore.CLIENT_SEND case trace.AnnClientResponse: val = zipkincore.CLIENT_RECV case trace.AnnServerRequest: val = zipkincore.SERVER_SEND case trace.AnnServerResponse: val = zipkincore.SERVER_RECV default: val = a.Key } if len(val) == 0 { continue } span.Annotations = append(span.Annotations, &zipkincore.Annotation{ Timestamp: a.Timestamp.UnixNano() / 1e3, Value: val, Host: toEndpoint(a.Service), }) } } return span }
func (w *MockWriter) InsertTtl(cf string, row *Row, ttl int) Writer { rows := w.pool.Rows(cf) t := thrift.Int64Ptr(now()) for _, c := range row.Columns { if c.Timestamp == nil { c.Timestamp = t } if ttl > 0 { c.Ttl = thrift.Int32Ptr(int32(ttl)) } if c.Ttl != nil { // reset to the actual time to expire c.Ttl = thrift.Int32Ptr(int32(now()/1e6) + *c.Ttl) } } i := sort.Search(len(rows), func(i int) bool { return bytes.Compare(rows[i].Key, row.Key) >= 0 }) if i < len(rows) && bytes.Equal(rows[i].Key, row.Key) { // Row already exists, merge the columns e := rows[i] checkExpired(e) cols := e.Columns for _, c := range row.Columns { j := sort.Search(len(cols), func(j int) bool { return bytes.Compare(cols[j].Name, c.Name) >= 0 }) if j < len(cols) && bytes.Equal(cols[j].Name, c.Name) { // Column already exists, pick the one with the greater timestamp ec := cols[j] et := *t if ec != nil { et = *ec.Timestamp } if *c.Timestamp >= et { ec.Value = c.Value ec.Ttl = c.Ttl ec.Timestamp = c.Timestamp } } else { // New column, insert sorted cols = append(cols, c) copy(cols[j+1:], cols[j:]) cols[j] = c } } e.Columns = cols } else { // New row, insert sorted sort.Sort(Columns(row.Columns)) rows = append(rows, row) copy(rows[i+1:], rows[i:]) rows[i] = row w.pool.Data[cf] = rows } return w }
func (w *writer) DeleteColumns(cf string, key []byte, columns [][]byte) Writer { tm := w.addWriter(cf, key) d := cassandra.NewDeletion() d.Timestamp = thrift.Int64Ptr(now()) sp := cassandra.NewSlicePredicate() sp.ColumnNames = columns d.Predicate = sp tm.Deletion = d return w }
func TestCassandraRestart(t *testing.T) { if !*cassandraRestart { t.Skipf("use -cassandra-restart to ensure ConnectionPool tolerates Cassandra restarting") } rPool, err := NewConnectionPool( localEndpointPool, keyspace, PoolOptions{Size: 1, Timeout: standardTimeout}, ) if err != nil { t.Fatal(err) } wPool, err := NewConnectionPool( localEndpointPool, keyspace, PoolOptions{Size: 1, Timeout: standardTimeout}, ) if err != nil { t.Fatal(err) } row := &Row{ Key: []byte("test"), Columns: []*cassandra.Column{ &cassandra.Column{ Name: []byte("test"), Value: []byte("test"), Timestamp: thrift.Int64Ptr(time.Now().UnixNano()), }, }, } if _, err := rPool.Reader().Cf("AllTypes").Get([]byte("test")); err != nil { t.Fatal(err) } if err := wPool.Writer().Insert("AllTypes", row).Run(); err != nil { t.Fatal(err) } var argv []string if false { argv = []string{"sh", "-c", "launchctl unload /Library/LaunchDaemons/org.apache.cassandra.plist; sudo launchctl load /Library/LaunchDaemons/org.apache.cassandra.plist"} } else { argv = []string{"/etc/init.d/cassandra", "restart"} } log.Println("restarting Cassandra...") if err := exec.Command("sudo", argv...).Run(); err != nil { t.Fatal(err) } log.Println("restarted Cassandra; sleeping...") time.Sleep(10 * time.Second) if _, err := rPool.Reader().Cf("AllTypes").Get([]byte("test")); err != nil { t.Fatal(err) } if err := wPool.Writer().Insert("AllTypes", row).Run(); err != nil { t.Fatal(err) } }
func (t *Trace) Child(name string) *Trace { return &Trace{ Endpoint: t.Endpoint, Collectors: t.Collectors, span: &zipkin.Span{ TraceId: t.span.TraceId, Name: name, ParentId: thrift.Int64Ptr(t.span.Id), Id: randomID(), }, } }
func NewTraceForHTTPHeader(traceName string, h http.Header, collectors []SpanCollector) *Trace { var traceID, spanID int64 var parentSpanID *int64 if s := maybeReadID(h.Get("X-B3-TraceId")); s != nil { traceID = int64(*s) } else { traceID = randomID() } if s := maybeReadID(h.Get("X-B3-SpanId")); s != nil { spanID = int64(*s) } else { spanID = randomID() } if s := maybeReadID(h.Get("X-B3-ParentSpanId")); s != nil { parentSpanID = thrift.Int64Ptr(int64(*s)) } return NewTraceForIDs(traceName, traceID, spanID, parentSpanID, collectors) }