func prepare() { // group objects by type byType = make([]bucket, len(d.FTList)) for i := 0; i < d.NumObjects(); i++ { x := read.ObjId(i) tid := d.Ft(x).Id b := byType[tid] b.bytes += d.Size(x) b.objects = append(b.objects, x) byType[tid] = b } // compute referrers ref1 = make([]read.ObjId, d.NumObjects()) for i := 0; i < d.NumObjects(); i++ { ref1[i] = read.ObjNil } ref2 = map[read.ObjId][]read.ObjId{} for i := 0; i < d.NumObjects(); i++ { x := read.ObjId(i) for _, e := range d.Edges(x) { r := ref1[e.To] if r == read.ObjNil { ref1[e.To] = x } else if x != r { s := ref2[e.To] if len(s) == 0 || x != s[len(s)-1] { ref2[e.To] = append(s, x) } } } } dom() }
func objHandler(w http.ResponseWriter, r *http.Request) { q := r.URL.Query() v := q["id"] if len(v) != 1 { http.Error(w, "id parameter missing", 405) return } id, err := strconv.ParseUint(v[0], 10, 64) if err != nil { http.Error(w, err.Error(), 405) return } if int(id) >= d.NumObjects() { http.Error(w, "object not found", 405) return } x := read.ObjId(id) fld := getFields(d.Contents(x), d.Ft(x).Fields, d.Edges(x)) if len(fld) > maxFields { msg := fmt.Sprintf("<font color=Red>elided for display: %d fields</font>", len(fld)-(maxFields-1)) fld = fld[:maxFields-1] fld = append(fld, Field{msg, "", ""}) } ref := getReferrers(x) if len(ref) > maxFields { msg := fmt.Sprintf("<font color=Red>elided for display: %d referrers</font>", len(ref)-(maxFields-1)) ref = ref[:maxFields-1] ref = append(ref, msg) } info := objInfo{ d.Addr(x), typeLink(d.Ft(x)), d.Size(x), fld, ref, domsize[x], } if err := objTemplate.Execute(w, info); err != nil { log.Print(err) } }
func main() { flag.Parse() args := flag.Args() var d *read.Dump if len(args) == 2 { d = read.Read(args[0], args[1]) } else { d = read.Read(args[0], "") } // eliminate unreachable objects // TODO: have reader do this? reachable := make([]bool, d.NumObjects()) var q []read.ObjId for _, f := range d.Frames { for _, e := range f.Edges { if !reachable[e.To] { reachable[e.To] = true q = append(q, e.To) } } } for _, x := range []*read.Data{d.Data, d.Bss} { for _, e := range x.Edges { if !reachable[e.To] { reachable[e.To] = true q = append(q, e.To) } } } for _, r := range d.Otherroots { for _, e := range r.Edges { if !reachable[e.To] { reachable[e.To] = true q = append(q, e.To) } } } for _, f := range d.QFinal { for _, e := range f.Edges { if !reachable[e.To] { reachable[e.To] = true q = append(q, e.To) } } } for _, g := range d.Goroutines { if g.Ctxt != read.ObjNil { if !reachable[g.Ctxt] { reachable[g.Ctxt] = true q = append(q, g.Ctxt) } } } for len(q) > 0 { x := q[0] q = q[1:] for _, e := range d.Edges(x) { if !reachable[e.To] { reachable[e.To] = true q = append(q, e.To) } } } fmt.Printf("digraph {\n") // print object graph for i := 0; i < d.NumObjects(); i++ { x := read.ObjId(i) if !reachable[x] { fmt.Printf(" v%d [style=filled fillcolor=gray];\n", x) } fmt.Printf(" v%d [label=\"%s\\n%d\"];\n", x, d.Ft(x).Name, d.Size(x)) for _, e := range d.Edges(x) { var taillabel, headlabel string if e.FieldName != "" { taillabel = fmt.Sprintf(" [taillabel=\"%s\"]", e.FieldName) } else if e.FromOffset != 0 { taillabel = fmt.Sprintf(" [taillabel=\"%d\"]", e.FromOffset) } if e.ToOffset != 0 { headlabel = fmt.Sprintf(" [headlabel=\"%d\"]", e.ToOffset) } fmt.Printf(" v%d -> v%d%s%s;\n", x, e.To, taillabel, headlabel) } } // goroutines and stacks for _, t := range d.Goroutines { fmt.Printf(" \"goroutines\" [shape=diamond];\n") fmt.Printf(" \"goroutines\" -> f%x_0;\n", t.Bos.Addr) } // stack frames for _, f := range d.Frames { fmt.Printf(" f%x_%d [label=\"%s\\n%d\" shape=rectangle];\n", f.Addr, f.Depth, f.Name, len(f.Data)) if f.Parent != nil { fmt.Printf(" f%x_%d -> f%x_%d;\n", f.Addr, f.Depth, f.Parent.Addr, f.Parent.Depth) } for _, e := range f.Edges { if e.To != read.ObjNil { var taillabel, headlabel string if e.FieldName != "" { taillabel = fmt.Sprintf(" [taillabel=\"%s\"]", e.FieldName) } else if e.FromOffset != 0 { taillabel = fmt.Sprintf(" [taillabel=\"%d\"]", e.FromOffset) } if e.ToOffset != 0 { headlabel = fmt.Sprintf(" [headlabel=\"%d\"]", e.ToOffset) } fmt.Printf(" f%x_%d -> v%d%s%s;\n", f.Addr, f.Depth, e.To, taillabel, headlabel) } } } for _, x := range []*read.Data{d.Data, d.Bss} { for _, e := range x.Edges { if e.To != read.ObjNil { var headlabel string if e.ToOffset != 0 { headlabel = fmt.Sprintf(" [headlabel=\"%d\"]", e.ToOffset) } fmt.Printf(" \"%s\" [shape=diamond];\n", e.FieldName) fmt.Printf(" \"%s\" -> v%d%s;\n", e.FieldName, e.To, headlabel) } } } for _, r := range d.Otherroots { for _, e := range r.Edges { var headlabel string if e.ToOffset != 0 { headlabel = fmt.Sprintf(" [headlabel=\"%d\"]", e.ToOffset) } fmt.Printf(" \"%s\" [shape=diamond];\n", r.Description) fmt.Printf(" \"%s\" -> v%d%s;\n", r.Description, e.To, headlabel) } } for _, f := range d.QFinal { for _, e := range f.Edges { var headlabel string if e.ToOffset != 0 { headlabel = fmt.Sprintf(" [headlabel=\"%d\"]", e.ToOffset) } fmt.Printf(" \"queued finalizers\" [shape=diamond];\n") fmt.Printf(" \"queued finalizers\" -> v%d%s;\n", e.To, headlabel) } } fmt.Printf("}\n") }
func main() { flag.Parse() args := flag.Args() var outfile string if len(args) == 2 { d = read.Read(args[0], "") outfile = args[1] } else { d = read.Read(args[0], args[1]) outfile = args[2] } // some setup usedIds = make(map[uint64]struct{}, 0) for _, typ := range d.Types { usedIds[typ.Addr] = struct{}{} } for i := 0; i < d.NumObjects(); i++ { usedIds[d.Addr(read.ObjId(i))] = struct{}{} } stringCache = make(map[string]uint64, 0) threadSerialNumbers = make(map[*read.GoRoutine]uint32, 0) stackTraceSerialNumbers = make(map[*read.GoRoutine]uint32, 0) // std header hprof = append(hprof, []byte("JAVA PROFILE 1.0.1\x00")...) hprof = append32(hprof, 8) // IDs are 8 bytes (TODO: d.PtrSize?) hprof = append32(hprof, 0) // dummy base time hprof = append32(hprof, 0) // dummy base time // fake entries to make java tools happy java_lang_class, _ = addLoadClass("java.lang.Class") java_lang_classloader, _ = addLoadClass("java.lang.ClassLoader") java_lang_object, java_lang_object_ser = addLoadClass("java.lang.Object") java_lang_string, _ = addLoadClass("java.lang.String") java_lang_objectarray, _ = addLoadClass("Object[]") go_class, go_class_ser = addLoadClass("go") addLoadClass("bool[]") addLoadClass("char[]") addLoadClass("float[]") addLoadClass("double[]") addLoadClass("byte[]") addLoadClass("short[]") addLoadClass("int[]") addLoadClass("long[]") addDummyThread() // must come after addLoadClass(java.lang.Object) addThreads() // the full heap is one big tag addHeapDump() // write final file to output file, err := os.Create(outfile) if err != nil { log.Fatal(err) } file.Write(hprof) file.Close() }
func addHeapDump() { // a few fake class dumps to keep java tools happy dump = append(dump, fakeClassDump(java_lang_object, 0)...) dump = append(dump, fakeClassDump(java_lang_class, java_lang_object)...) dump = append(dump, fakeClassDump(java_lang_classloader, java_lang_object)...) dump = append(dump, fakeClassDump(java_lang_string, java_lang_object)...) // scratch space for modifying object data var data []byte // output each object as an instance for i := 0; i < d.NumObjects(); i++ { x := read.ObjId(i) if d.Size(x) >= 8<<32 { // file format can't record objects this big. TODO: error/warning? Truncate? continue } // figure out what class to use for this object var c uint64 if d.Ft(x).Typ == nil { c = NoPtrClass(d.Size(x)) } else { switch d.Ft(x).Kind { case read.TypeKindObject: c = StdClass(d.Ft(x).Typ, d.Size(x)) case read.TypeKindArray: c = ArrayClass(d.Ft(x).Typ, d.Size(x)) case read.TypeKindChan: c = ChanClass(d.Ft(x).Typ, d.Size(x)) // TODO: TypeKindConservative default: log.Fatal("unhandled kind") } } // make a copy of the object data so we can modify it data = append(data[:0], d.Contents(x)...) // Any pointers to objects get adjusted to point to the object head. for _, e := range d.Edges(x) { writePtr(data[e.FromOffset:], d.Addr(e.To)) } // convert to big-endian representation if c == bigNoPtrArray { for i := uint64(0); i < uint64(len(data)); i += 8 { bigEndian8(data[i:]) } } else if c == bigPtrArray { for i := uint64(0); i < uint64(len(data)); i += d.PtrSize { bigEndianP(data[i:]) } } else { off := uint64(0) for _, f := range javaFields[c] { switch f.kind { case T_CLASS: bigEndianP(data[off:]) off += d.PtrSize case T_BOOLEAN: off++ case T_FLOAT: bigEndian4(data[off:]) off += 4 case T_DOUBLE: bigEndian8(data[off:]) off += 8 case T_BYTE: off++ case T_SHORT: bigEndian2(data[off:]) off += 2 case T_INT: bigEndian4(data[off:]) off += 4 case T_LONG: bigEndian8(data[off:]) off += 8 default: log.Fatalf("bad type %d\n", f.kind) } } } // dump object header if c == bigNoPtrArray { dump = append(dump, HPROF_GC_PRIM_ARRAY_DUMP) dump = appendId(dump, d.Addr(x)) dump = append32(dump, stack_trace_serial_number) dump = append32(dump, uint32(d.Size(x)/8)) dump = append(dump, T_LONG) } else if c == bigPtrArray { dump = append(dump, HPROF_GC_OBJ_ARRAY_DUMP) dump = appendId(dump, d.Addr(x)) dump = append32(dump, stack_trace_serial_number) dump = append32(dump, uint32(d.Size(x)/8)) dump = appendId(dump, java_lang_objectarray) } else { dump = append(dump, HPROF_GC_INSTANCE_DUMP) dump = appendId(dump, d.Addr(x)) dump = append32(dump, stack_trace_serial_number) dump = appendId(dump, c) dump = append32(dump, uint32(d.Size(x))) } // dump object data dump = append(dump, data...) } // output threads for _, t := range d.Goroutines { dump = append(dump, HPROF_GC_ROOT_THREAD_OBJ) dump = appendId(dump, t.Addr) dump = append32(dump, threadSerialNumbers[t]) dump = append32(dump, stackTraceSerialNumbers[t]) } // stack roots for _, t := range d.Goroutines { for f := t.Bos; f != nil; f = f.Parent { for _, e := range f.Edges { // we make one "thread" per field, because the roots // get identified by "thread" in jhat. id := newId() // id of thread object cid := newId() // id of class of thread object tid := newSerial() // thread serial number // this is the class of the thread object. Its name // is what gets displayed with the root entry. addClass(cid, 0, f.Name+"."+e.FieldName, nil) // new thread object dump = append(dump, HPROF_GC_INSTANCE_DUMP) dump = appendId(dump, id) dump = append32(dump, stack_trace_serial_number) dump = appendId(dump, cid) dump = append32(dump, 0) // no data // mark it as a thread dump = append(dump, HPROF_GC_ROOT_THREAD_OBJ) dump = appendId(dump, id) dump = append32(dump, tid) dump = append32(dump, stack_trace_serial_number) // finally, make root come from this thread dump = append(dump, HPROF_GC_ROOT_JAVA_FRAME) dump = appendId(dump, d.Addr(e.To)) dump = append32(dump, tid) dump = append32(dump, 0) // depth } } } // data roots for _, x := range []*read.Data{d.Data, d.Bss} { // adjust edges to point to object beginnings for _, e := range x.Edges { writePtr(x.Data[e.FromOffset:], d.Addr(e.To)) } for _, f := range x.Fields { addGlobal(f.Name, f.Kind, x.Data[f.Offset:]) } } for _, t := range d.Otherroots { for _, e := range t.Edges { dump = append(dump, HPROF_GC_ROOT_UNKNOWN) dump = appendId(dump, d.Addr(e.To)) } } addTag(HPROF_HEAP_DUMP, dump) }
func dom() { fmt.Println("Computing dominators...") n := d.NumObjects() // make list of roots // TODO: have loader compute this? roots := map[read.ObjId]struct{}{} for _, s := range []*read.Data{d.Data, d.Bss} { for _, e := range s.Edges { roots[e.To] = struct{}{} } } for _, f := range d.Frames { for _, e := range f.Edges { roots[e.To] = struct{}{} } } for _, x := range d.Otherroots { for _, e := range x.Edges { roots[e.To] = struct{}{} } } // compute postorder traversal // object states: // 0 - not seen yet // 1 - seen, added to queue, not yet expanded children // 2 - seen, already expanded children // 3 - added to postorder postorder := make([]read.ObjId, 0, n) postnum := make([]int, n+1) state := make([]byte, n) var q []read.ObjId // stack of work to do, holds state 1 and 2 objects for x := range roots { if state[x] != 0 { if state[x] != 3 { log.Fatal("bad state found") } continue } state[x] = 1 q = q[:0] q = append(q, x) for len(q) > 0 { y := q[len(q)-1] if state[y] == 2 { state[y] = 3 q = q[:len(q)-1] postnum[y] = len(postorder) postorder = append(postorder, y) } else { if state[y] != 1 { log.Fatal("bad state") } state[y] = 2 for _, e := range d.Edges(y) { z := e.To if state[z] == 0 { state[z] = 1 q = append(q, z) } } } } } postnum[n] = n // virtual start node // compute immediate dominators // http://www.hipersoft.rice.edu/grads/publications/dom14.pdf idom := make([]read.ObjId, n+1) for i := 0; i < n; i++ { idom[i] = read.ObjNil } idom[n] = read.ObjId(n) for r := range roots { idom[r] = read.ObjId(n) } var redges []read.ObjId change := true for change { change = false for i := len(postorder) - 1; i >= 0; i-- { x := postorder[i] // get list of incoming edges redges = redges[:0] if ref1[x] != read.ObjNil { redges = append(redges, ref1[x]) redges = append(redges, ref2[x]...) } a := read.ObjNil for _, b := range redges { if idom[b] == read.ObjNil { continue } if a == read.ObjNil { a = b continue } for a != b { if postnum[a] < postnum[b] { a = idom[a] } else { b = idom[b] } } } if _, ok := roots[x]; ok { a = read.ObjId(n) } if a != idom[x] { idom[x] = a change = true } } } domsize = make([]uint64, n+1) for _, x := range postorder { domsize[x] += d.Size(x) domsize[idom[x]] += domsize[x] } // Note: unreachable objects will have domsize of 0. }