func (u *Usercorn) mapBinary(f *os.File, isInterp bool, arch string) (interpBase, entry, base, realEntry uint64, err error) { var l models.Loader l, err = loader.LoadArch(f, arch) if err != nil { return } if isInterp { u.interpLoader = l } var dynamic bool switch l.Type() { case loader.EXEC: dynamic = false case loader.DYN: dynamic = true default: err = errors.New("Unsupported file load type.") return } // find segment bounds segments, err := l.Segments() if err != nil { return } var low, high uint64 for _, seg := range segments { if seg.Addr < low { low = seg.Addr } h := seg.Addr + seg.Size if h > high { high = h } } // map contiguous binary loadBias := u.config.ForceBase if isInterp { loadBias = u.config.ForceInterpBase } if dynamic { mapLow := low if loadBias > 0 { mapLow = loadBias } else if mapLow == 0 { mapLow = 0x1000000 } var mmap *models.Mmap mmap, err = u.Mmap(mapLow, high-low) if err != nil { return } loadBias = mmap.Addr - low if isInterp { mmap.Desc = "interp" } else { mmap.Desc = "exe" } } // merge overlapping segments merged := make([]*models.Segment, 0, len(segments)) outer: for _, seg := range segments { addr, size := align(seg.Addr, seg.Size, true) s := &models.Segment{addr, addr + size, seg.Prot} for _, s2 := range merged { if s2.Overlaps(s) { s2.Merge(s) continue outer } } merged = append(merged, s) } // map merged segments for _, seg := range merged { prot := seg.Prot if prot == 0 { prot = uc.PROT_ALL } if !dynamic { err = u.MemMapProt(loadBias+seg.Start, seg.End-seg.Start, prot) if mmap := u.mapping(loadBias+seg.Start, loadBias+seg.End); mmap != nil { mmap.Desc = "exe" } } // register binary for symbolication u.RegisterAddr(f, loadBias+seg.Start, seg.End-seg.Start, int64(seg.Start)) if err != nil { return } } // write segment memory var data []byte for _, seg := range segments { if data, err = seg.Data(); err != nil { return } if err = u.MemWrite(loadBias+seg.Addr, data); err != nil { return } } entry = loadBias + l.Entry() // load interpreter if present interp := l.Interp() if interp != "" && !isInterp && !u.config.SkipInterp { f, err = os.Open(u.PrefixPath(interp, true)) if err != nil { return } defer f.Close() var interpBias, interpEntry uint64 _, _, interpBias, interpEntry, err = u.mapBinary(f, true, l.Arch()) if u.interpLoader.Arch() != l.Arch() { err = fmt.Errorf("Interpreter arch mismatch: %s != %s", l.Arch(), u.interpLoader.Arch()) return } return interpBias, interpEntry, loadBias, entry, err } else { return 0, entry, loadBias, entry, nil } }
func NewUsercornRaw(l models.Loader, config *models.Config) (*Usercorn, error) { config = config.Init() a, OS, err := arch.GetArch(l.Arch(), l.OS()) if err != nil { return nil, err } unicorn, err := NewUnicorn(a, OS, l.ByteOrder()) if err != nil { return nil, err } u := &Usercorn{ Unicorn: unicorn, traceMatching: true, config: config, loader: l, exit: 0xffffffffffffffff, } if config.Output == os.Stderr && readline.IsTerminal(int(os.Stderr.Fd())) { config.Color = true } u.memio = memio.NewMemIO( // ReadAt() callback func(p []byte, addr uint64) (int, error) { if err := u.MemReadInto(p, addr); err != nil { return 0, err } if u.config.TraceMemBatch { u.memlog.UpdateBytes(addr, p, false) } return len(p), nil }, // WriteAt() callback func(p []byte, addr uint64) (int, error) { if err := u.MemWrite(addr, p); err != nil { return 0, err } if u.config.TraceMemBatch { u.memlog.UpdateBytes(addr, p, true) } return len(p), nil }, ) // load kernels // the array cast is a trick to work around circular imports if OS.Kernels != nil { kernelI := OS.Kernels(u) kernels := make([]co.Kernel, len(kernelI)) for i, k := range kernelI { kernels[i] = k.(co.Kernel) } u.kernels = kernels } u.status = models.StatusDiff{U: u} if u.config.LoopCollapse > 0 { u.blockloop = models.NewLoopDetect(u.config.LoopCollapse) } // TODO: if we error, should close Usercorn/Unicorn instance? // GC might take its time if err := u.mapStack(); err != nil { return nil, err } if err := u.addHooks(); err != nil { return nil, err } return u, nil }
func (u *Usercorn) mapBinary(l models.Loader, isInterp bool) (interpBase, entry, base, realEntry uint64, err error) { var dynamic bool switch l.Type() { case loader.EXEC: dynamic = false case loader.DYN: dynamic = true default: err = errors.New("Unsupported file load type.") return } segments, err := l.Segments() if err != nil { return } // merge overlapping segments merged := make([]*models.Segment, 0, len(segments)) outer: for _, seg := range segments { addr, size := align(seg.Addr, seg.Size, true) s := &models.Segment{addr, addr + size, seg.Prot} for _, s2 := range merged { if s2.Overlaps(s) { s2.Merge(s) continue outer } } merged = append(merged, s) } // map merged segments loadBias := u.ForceBase if isInterp { loadBias = u.ForceInterpBase } for _, seg := range merged { size := seg.End - seg.Start if dynamic && seg.Start == 0 && loadBias == 0 { loadBias, err = u.Mmap(0x1000000, size) } else { prot := seg.Prot if prot == 0 { prot = uc.PROT_ALL } err = u.MemMapProt(loadBias+seg.Start, seg.End-seg.Start, prot) } if err != nil { return } } // write segment memory var data []byte for _, seg := range segments { if data, err = seg.Data(); err != nil { return } if err = u.MemWrite(loadBias+seg.Addr, data); err != nil { return } } entry = loadBias + l.Entry() // load interpreter if present interp := l.Interp() if interp != "" && !isInterp { var bin models.Loader bin, err = loader.LoadFileArch(u.PrefixPath(interp, true), l.Arch()) if err != nil { return } if bin.Arch() != l.Arch() { err = fmt.Errorf("Interpreter arch mismatch: %s != %s", l.Arch() != bin.Arch()) return } u.interpLoader = bin _, _, interpBias, interpEntry, err := u.mapBinary(bin, true) return interpBias, interpEntry, loadBias, entry, err } else { return 0, entry, loadBias, entry, nil } }