/* * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. */ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { if !gc.Is64(n.Type) { gc.Fatalf("split64 %v", n.Type) } if nsclean >= len(sclean) { gc.Fatalf("split64 clean") } sclean[nsclean].Op = gc.OEMPTY nsclean++ switch n.Op { default: switch n.Op { default: var n1 gc.Node if !dotaddable(n, &n1) { gc.Igen(n, &n1, nil) sclean[nsclean-1] = n1 } n = &n1 case gc.ONAME: if n.Class == gc.PPARAMREF { var n1 gc.Node gc.Cgen(n.Name.Heapaddr, &n1) sclean[nsclean-1] = n1 n = &n1 } // nothing case gc.OINDREG: break } *lo = *n *hi = *n lo.Type = gc.Types[gc.TUINT32] if n.Type.Etype == gc.TINT64 { hi.Type = gc.Types[gc.TINT32] } else { hi.Type = gc.Types[gc.TUINT32] } hi.Xoffset += 4 case gc.OLITERAL: var n1 gc.Node n.Convconst(&n1, n.Type) i := n1.Int() gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) i >>= 32 if n.Type.Etype == gc.TINT64 { gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) } else { gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) } } }
func defframe(ptxt *obj.Prog) { var n *gc.Node // fill in argument size, stack size ptxt.To.Type = obj.TYPE_TEXTSIZE ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr))) frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg))) // arm64 requires that the frame size (not counting saved LR) // be empty or be 8 mod 16. If not, pad it. if frame != 0 && frame%16 != 8 { frame += 8 } ptxt.To.Offset = int64(frame) // insert code to zero ambiguously live variables // so that the garbage collector only sees initialized values // when it looks for pointers. p := ptxt hi := int64(0) lo := hi // iterate through declarations - they are sorted in decreasing xoffset order. for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next { n = l.N if !n.Name.Needzero { continue } if n.Class != gc.PAUTO { gc.Fatalf("needzero class %d", n.Class) } if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 { gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset)) } if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) { // merge with range we already have lo = n.Xoffset continue } // zero old range p = zerorange(p, int64(frame), lo, hi) // set new range hi = n.Xoffset + n.Type.Width lo = n.Xoffset } // zero final range zerorange(p, int64(frame), lo, hi) }
// generate one instruction: // as f, t func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog { // self move check // TODO(mundaym): use sized math and extend to MOVB, MOVWZ etc. switch as { case s390x.AMOVD, s390x.AFMOVS, s390x.AFMOVD: if f != nil && t != nil && f.Op == gc.OREGISTER && t.Op == gc.OREGISTER && f.Reg == t.Reg { return nil } } p := gc.Prog(as) gc.Naddr(&p.From, f) gc.Naddr(&p.To, t) switch as { // Bad things the front end has done to us. Crash to find call stack. case s390x.ACMP, s390x.ACMPU: if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM { gc.Debug['h'] = 1 gc.Fatalf("bad inst: %v", p) } } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } w := int32(0) switch as { case s390x.AMOVB, s390x.AMOVBZ: w = 1 case s390x.AMOVH, s390x.AMOVHZ: w = 2 case s390x.AMOVW, s390x.AMOVWZ: w = 4 case s390x.AMOVD: if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR { break } w = 8 } if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) { gc.Dump("f", f) gc.Dump("t", t) gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) } return p }
/* * generate one instruction: * as f, t */ func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog { // TODO(austin): Add self-move test like in 6g (but be careful // of truncation moves) p := gc.Prog(as) gc.Naddr(&p.From, f) gc.Naddr(&p.To, t) switch as { // Bad things the front end has done to us. Crash to find call stack. case s390x.AAND, s390x.AMULLD: if p.From.Type == obj.TYPE_CONST { gc.Debug['h'] = 1 gc.Fatalf("bad inst: %v", p) } case s390x.ACMP, s390x.ACMPU: if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM { gc.Debug['h'] = 1 gc.Fatalf("bad inst: %v", p) } } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } w := int32(0) switch as { case s390x.AMOVB, s390x.AMOVBZ: w = 1 case s390x.AMOVH, s390x.AMOVHZ: w = 2 case s390x.AMOVW, s390x.AMOVWZ: w = 4 case s390x.AMOVD: if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR { break } w = 8 } if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) { gc.Dump("f", f) gc.Dump("t", t) gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) } return p }
/* * insert n into reg slot of p */ func raddr(n *gc.Node, p *obj.Prog) { var a obj.Addr gc.Naddr(&a, n) if a.Type != obj.TYPE_REG { if n != nil { gc.Fatalf("bad in raddr: %v", n.Op) } else { gc.Fatalf("bad in raddr: <null>") } p.Reg = 0 } else { p.Reg = a.Reg } }
func defframe(ptxt *obj.Prog) { // fill in argument size, stack size ptxt.To.Type = obj.TYPE_TEXTSIZE ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr))) frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg))) ptxt.To.Offset = int64(frame) // insert code to zero ambiguously live variables // so that the garbage collector only sees initialized values // when it looks for pointers. p := ptxt hi := int64(0) lo := hi ax := uint32(0) x0 := uint32(0) // iterate through declarations - they are sorted in decreasing xoffset order. for _, n := range gc.Curfn.Func.Dcl { if !n.Name.Needzero { continue } if n.Class != gc.PAUTO { gc.Fatalf("needzero class %d", n.Class) } if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 { gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, gc.FmtLong), int(n.Type.Width), int(n.Xoffset)) } if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) { // merge with range we already have lo = n.Xoffset continue } // zero old range p = zerorange(p, int64(frame), lo, hi, &ax, &x0) // set new range hi = n.Xoffset + n.Type.Width lo = n.Xoffset } // zero final range zerorange(p, int64(frame), lo, hi, &ax, &x0) }
func defframe(ptxt *obj.Prog) { var n *gc.Node // fill in argument size, stack size ptxt.To.Type = obj.TYPE_TEXTSIZE ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr))) frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg))) ptxt.To.Offset = int64(frame) // insert code to contain ambiguously live variables // so that garbage collector only sees initialized values // when it looks for pointers. p := ptxt hi := int64(0) lo := hi r0 := uint32(0) for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next { n = l.N if !n.Name.Needzero { continue } if n.Class != gc.PAUTO { gc.Fatalf("needzero class %d", n.Class) } if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 { gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset)) } if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthptr) { // merge with range we already have lo = gc.Rnd(n.Xoffset, int64(gc.Widthptr)) continue } // zero old range p = zerorange(p, int64(frame), lo, hi, &r0) // set new range hi = n.Xoffset + n.Type.Width lo = n.Xoffset } // zero final range zerorange(p, int64(frame), lo, hi, &r0) }
func proginfo(p *obj.Prog) { info := &p.Info *info = progtable[p.As&obj.AMask] if info.Flags == 0 { gc.Fatalf("unknown instruction %v", p) } if (info.Flags&gc.ShiftCX != 0) && p.From.Type != obj.TYPE_CONST { info.Reguse |= CX } if info.Flags&gc.ImulAXDX != 0 { if p.To.Type == obj.TYPE_NONE { info.Reguse |= AX info.Regset |= AX | DX } else { info.Flags |= RightRdwr } } // Addressing makes some registers used. if p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_NONE { info.Regindex |= RtoB(int(p.From.Reg)) } if p.From.Index != x86.REG_NONE { info.Regindex |= RtoB(int(p.From.Index)) } if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE { info.Regindex |= RtoB(int(p.To.Reg)) } if p.To.Index != x86.REG_NONE { info.Regindex |= RtoB(int(p.To.Index)) } }
/* * generate * as n, $c (CMP/CMPU) */ func ginscon2(as int, n2 *gc.Node, c int64) { var n1 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) switch as { default: gc.Fatalf("ginscon2") case ppc64.ACMP: if -ppc64.BIG <= c && c <= ppc64.BIG { rawgins(as, n2, &n1) return } case ppc64.ACMPU: if 0 <= c && c <= 2*ppc64.BIG { rawgins(as, n2, &n1) return } } // MOV n1 into register first var ntmp gc.Node gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) rawgins(ppc64.AMOVD, &n1, &ntmp) rawgins(as, n2, &ntmp) gc.Regfree(&ntmp) }
func proginfo(p *obj.Prog) { info := &p.Info *info = progtable[p.As] if info.Flags == 0 { gc.Fatalf("unknown instruction %v", p) } if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) { info.Flags &^= gc.LeftRead info.Flags |= gc.LeftAddr } if (info.Flags&gc.RegRead != 0) && p.Reg == 0 { info.Flags &^= gc.RegRead info.Flags |= gc.CanRegRead | gc.RightRead } if (p.Scond&arm.C_SCOND != arm.C_SCOND_NONE) && (info.Flags&gc.RightWrite != 0) { info.Flags |= gc.RightRead } switch p.As { case arm.ADIV, arm.ADIVU, arm.AMOD, arm.AMODU: info.Regset |= RtoB(arm.REG_R12) } }
// RightShiftWithCarry generates a constant unsigned // right shift with carry. // // res = n >> shift // with carry func RightShiftWithCarry(n *gc.Node, shift uint, res *gc.Node) { // Extra 1 is for carry bit. maxshift := uint(n.Type.Width*8 + 1) if shift == 0 { gmove(n, res) } else if shift < maxshift { // 1. clear rightmost bit of target var n1 gc.Node gc.Nodconst(&n1, n.Type, 1) gins(optoas(gc.ORSH, n.Type), &n1, n) gins(optoas(gc.OLSH, n.Type), &n1, n) // 2. add carry flag to target var n2 gc.Node gc.Nodconst(&n1, n.Type, 0) gc.Regalloc(&n2, n.Type, nil) gins(optoas(gc.OAS, n.Type), &n1, &n2) gins(arm64.AADC, &n2, n) // 3. right rotate 1 bit gc.Nodconst(&n1, n.Type, 1) gins(arm64.AROR, &n1, n) // ARM64 backend doesn't eliminate shifts by 0. It is manually checked here. if shift > 1 { var n3 gc.Node gc.Nodconst(&n3, n.Type, int64(shift-1)) cgen_shift(gc.ORSH, true, n, &n3, res) } else { gmove(n, res) } gc.Regfree(&n2) } else { gc.Fatalf("RightShiftWithCarry: shift(%v) is bigger than max size(%v)", shift, maxshift) } }
// as2variant returns the variant (V_*) flags of instruction as. func as2variant(as obj.As) int { for i, v := range varianttable[as&obj.AMask] { if v&obj.AMask == as&obj.AMask { return i } } gc.Fatalf("as2variant: instruction %v is not a variant of itself", as&obj.AMask) return 0 }
func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog { if lhs.Op != gc.OREGISTER { gc.Fatalf("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0)) } p := rawgins(as, rhs, nil) raddr(lhs, p) return p }
func gcmp(as obj.As, lhs *gc.Node, rhs *gc.Node) *obj.Prog { if lhs.Op != gc.OREGISTER { gc.Fatalf("bad operands to gcmp: %v %v", lhs.Op, rhs.Op) } p := rawgins(as, rhs, nil) raddr(lhs, p) return p }
func splitclean() { if nsclean <= 0 { gc.Fatalf("splitclean") } nsclean-- if sclean[nsclean].Op != gc.OEMPTY { gc.Regfree(&sclean[nsclean]) } }
// as2variant returns the variant (V_*) flags of instruction as. func as2variant(as int) int { initvariants() for i := int(0); i < len(varianttable[as]); i++ { if varianttable[as][i] == as { return i } } gc.Fatalf("as2variant: instruction %v is not a variant of itself", obj.Aconv(as)) return 0 }
/* * generate high multiply: * res = (nl*nr) >> width */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { // largest ullman on left. if nl.Ullman < nr.Ullman { nl, nr = nr, nl } t := nl.Type w := t.Width * 8 var n1 gc.Node gc.Cgenr(nl, &n1, res) var n2 gc.Node gc.Cgenr(nr, &n2, nil) switch gc.Simtype[t.Etype] { case gc.TINT8, gc.TINT16, gc.TINT32: gins3(optoas(gc.OMUL, t), &n2, &n1, nil) var lo gc.Node gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO) gins(mips.AMOVV, &lo, &n1) p := gins(mips.ASRAV, nil, &n1) p.From.Type = obj.TYPE_CONST p.From.Offset = w case gc.TUINT8, gc.TUINT16, gc.TUINT32: gins3(optoas(gc.OMUL, t), &n2, &n1, nil) var lo gc.Node gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO) gins(mips.AMOVV, &lo, &n1) p := gins(mips.ASRLV, nil, &n1) p.From.Type = obj.TYPE_CONST p.From.Offset = w case gc.TINT64, gc.TUINT64: if t.IsSigned() { gins3(mips.AMULV, &n2, &n1, nil) } else { gins3(mips.AMULVU, &n2, &n1, nil) } var hi gc.Node gc.Nodreg(&hi, gc.Types[gc.TUINT64], mips.REG_HI) gins(mips.AMOVV, &hi, &n1) default: gc.Fatalf("cgen_hmul %v", t) } gc.Cgen(&n1, res) gc.Regfree(&n1) gc.Regfree(&n2) }
/* * direct reference, * could be set/use depending on * semantics */ func copyas(a *obj.Addr, v *obj.Addr) bool { if x86.REG_AL <= a.Reg && a.Reg <= x86.REG_R15B { gc.Fatalf("use of byte register") } if x86.REG_AL <= v.Reg && v.Reg <= x86.REG_R15B { gc.Fatalf("use of byte register") } if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg { return false } if regtyp(v) { return true } if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) { if v.Offset == a.Offset { return true } } return false }
/* generate a constant shift * arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal. */ func gshift(as obj.As, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog { if sval <= 0 || sval > 32 { gc.Fatalf("bad shift value: %d", sval) } sval = sval & 0x1f p := gins(as, nil, rhs) p.From.Type = obj.TYPE_SHIFT p.From.Offset = int64(stype) | int64(sval)<<7 | int64(lhs.Reg)&15 return p }
func proginfo(p *obj.Prog) gc.ProgInfo { info := progtable[p.As&obj.AMask] if info.Flags == 0 { gc.Fatalf("unknown instruction %v", p) } if info.Flags&gc.ImulAXDX != 0 && p.To.Type != obj.TYPE_NONE { info.Flags |= RightRdwr } return info }
/* * generate high multiply * res = (nl * nr) >> wordsize */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { if nl.Ullman < nr.Ullman { tmp := nl nl = nr nr = tmp } t := nl.Type w := int(t.Width * 8) var n1 gc.Node gc.Regalloc(&n1, t, res) gc.Cgen(nl, &n1) var n2 gc.Node gc.Regalloc(&n2, t, nil) gc.Cgen(nr, &n2) switch gc.Simtype[t.Etype] { case gc.TINT8, gc.TINT16: gins(optoas(gc.OMUL, t), &n2, &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1) case gc.TUINT8, gc.TUINT16: gins(optoas(gc.OMUL, t), &n2, &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1) // perform a long multiplication. case gc.TINT32, gc.TUINT32: var p *obj.Prog if gc.Issigned[t.Etype] { p = gins(arm.AMULL, &n2, nil) } else { p = gins(arm.AMULLU, &n2, nil) } // n2 * n1 -> (n1 n2) p.Reg = n1.Reg p.To.Type = obj.TYPE_REGREG p.To.Reg = n1.Reg p.To.Offset = int64(n2.Reg) default: gc.Fatalf("cgen_hmul %v", t) } gc.Cgen(&n1, res) gc.Regfree(&n1) gc.Regfree(&n2) }
/* * generate high multiply: * res = (nl*nr) >> width */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { // largest ullman on left. if nl.Ullman < nr.Ullman { tmp := (*gc.Node)(nl) nl = nr nr = tmp } t := (*gc.Type)(nl.Type) w := int(int(t.Width * 8)) var n1 gc.Node gc.Cgenr(nl, &n1, res) var n2 gc.Node gc.Cgenr(nr, &n2, nil) switch gc.Simtype[t.Etype] { case gc.TINT8, gc.TINT16, gc.TINT32: gins(optoas(gc.OMUL, t), &n2, &n1) p := (*obj.Prog)(gins(arm64.AASR, nil, &n1)) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(w) case gc.TUINT8, gc.TUINT16, gc.TUINT32: gins(optoas(gc.OMUL, t), &n2, &n1) p := (*obj.Prog)(gins(arm64.ALSR, nil, &n1)) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(w) case gc.TINT64, gc.TUINT64: if gc.Issigned[t.Etype] { gins(arm64.ASMULH, &n2, &n1) } else { gins(arm64.AUMULH, &n2, &n1) } default: gc.Fatalf("cgen_hmul %v", t) } gc.Cgen(&n1, res) gc.Regfree(&n1) gc.Regfree(&n2) }
func proginfo(p *obj.Prog) { info := &p.Info *info = progtable[p.As] if info.Flags == 0 { gc.Fatalf("unknown instruction %v", p) } if (info.Flags&gc.ShiftCX != 0) && p.From.Type != obj.TYPE_CONST { info.Reguse |= CX } if info.Flags&gc.ImulAXDX != 0 { if p.To.Type == obj.TYPE_NONE { info.Reguse |= AX info.Regset |= AX | DX } else { info.Flags |= RightRdwr } } // Addressing makes some registers used. if p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_NONE { info.Regindex |= RtoB(int(p.From.Reg)) } if p.From.Index != x86.REG_NONE { info.Regindex |= RtoB(int(p.From.Index)) } if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE { info.Regindex |= RtoB(int(p.To.Reg)) } if p.To.Index != x86.REG_NONE { info.Regindex |= RtoB(int(p.To.Index)) } if gc.Ctxt.Flag_dynlink { // When -dynlink is passed, many operations on external names (and // also calling duffzero/duffcopy) use R15 as a scratch register. if p.As == x86.ALEAQ || info.Flags == gc.Pseudo || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { return } if p.As == obj.ADUFFZERO || p.As == obj.ADUFFCOPY || (p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local) || (p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local) { info.Reguse |= R15 info.Regset |= R15 return } } }
/* * generate high multiply: * res = (nl*nr) >> width */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { // largest ullman on left. if nl.Ullman < nr.Ullman { nl, nr = nr, nl } t := nl.Type w := t.Width * 8 var n1 gc.Node gc.Cgenr(nl, &n1, res) var n2 gc.Node gc.Cgenr(nr, &n2, nil) switch gc.Simtype[t.Etype] { case gc.TINT8, gc.TINT16, gc.TINT32: gins(optoas(gc.OMUL, t), &n2, &n1) p := gins(ppc64.ASRAD, nil, &n1) p.From.Type = obj.TYPE_CONST p.From.Offset = w case gc.TUINT8, gc.TUINT16, gc.TUINT32: gins(optoas(gc.OMUL, t), &n2, &n1) p := gins(ppc64.ASRD, nil, &n1) p.From.Type = obj.TYPE_CONST p.From.Offset = w case gc.TINT64, gc.TUINT64: if gc.Issigned[t.Etype] { gins(ppc64.AMULHD, &n2, &n1) } else { gins(ppc64.AMULHDU, &n2, &n1) } default: gc.Fatalf("cgen_hmul %v", t) } gc.Cgen(&n1, res) gc.Regfree(&n1) gc.Regfree(&n2) }
func proginfo(p *obj.Prog) { initproginfo() info := &p.Info *info = progtable[p.As] if info.Flags == 0 { gc.Fatalf("proginfo: unknown instruction %v", p) } if (info.Flags&gc.RegRead != 0) && p.Reg == 0 { info.Flags &^= gc.RegRead info.Flags |= gc.RightRead /*CanRegRead |*/ } if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR) && p.From.Reg != 0 { info.Regindex |= RtoB(int(p.From.Reg)) if info.Flags&gc.PostInc != 0 { info.Regset |= RtoB(int(p.From.Reg)) } } if (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) && p.To.Reg != 0 { info.Regindex |= RtoB(int(p.To.Reg)) if info.Flags&gc.PostInc != 0 { info.Regset |= RtoB(int(p.To.Reg)) } } if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) { info.Flags &^= gc.LeftRead info.Flags |= gc.LeftAddr } if p.As == obj.ADUFFZERO { info.Reguse |= 1<<0 | RtoB(ppc64.REG_R3) info.Regset |= RtoB(ppc64.REG_R3) } if p.As == obj.ADUFFCOPY { // TODO(austin) Revisit when duffcopy is implemented info.Reguse |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4) | RtoB(ppc64.REG_R5) info.Regset |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4) } }
func proginfo(p *obj.Prog) gc.ProgInfo { info := progtable[p.As&obj.AMask] if info.Flags == 0 { gc.Fatalf("proginfo: unknown instruction %v", p) } if (info.Flags&gc.RegRead != 0) && p.Reg == 0 { info.Flags &^= gc.RegRead info.Flags |= gc.RightRead /*CanRegRead |*/ } if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) { info.Flags &^= gc.LeftRead info.Flags |= gc.LeftAddr } return info }
// Called after regopt and peep have run. // Expand CHECKNIL pseudo-op into actual nil pointer check. func expandchecks(firstp *obj.Prog) { for p := firstp; p != nil; p = p.Link { if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 { fmt.Printf("expandchecks: %v\n", p) } if p.As != obj.ACHECKNIL { continue } if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers gc.Warnl(p.Lineno, "generated nil check") } if p.From.Type != obj.TYPE_REG { gc.Fatalf("invalid nil check %v\n", p) } // check is // CMPBNE arg, $0, 2(PC) [likely] // MOVD R0, 0(R0) p1 := gc.Ctxt.NewProg() gc.Clearp(p1) p1.Link = p.Link p.Link = p1 p1.Lineno = p.Lineno p1.Pc = 9999 p.As = s390x.ACMPBNE p.From3 = new(obj.Addr) p.From3.Type = obj.TYPE_CONST p.From3.Offset = 0 p.To.Type = obj.TYPE_BRANCH p.To.Val = p1.Link // crash by write to memory address 0. p1.As = s390x.AMOVD p1.From.Type = obj.TYPE_REG p1.From.Reg = s390x.REGZERO p1.To.Type = obj.TYPE_MEM p1.To.Reg = s390x.REGZERO p1.To.Offset = 0 } }
/* * generate division according to op, one of: * res = nl / nr * res = nl % nr */ func cgen_div(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) { if gc.Is64(nl.Type) { gc.Fatalf("cgen_div %v", nl.Type) } var t *gc.Type if gc.Issigned[nl.Type.Etype] { t = gc.Types[gc.TINT32] } else { t = gc.Types[gc.TUINT32] } var ax gc.Node var oldax gc.Node savex(x86.REG_AX, &ax, &oldax, res, t) var olddx gc.Node var dx gc.Node savex(x86.REG_DX, &dx, &olddx, res, t) dodiv(op, nl, nr, res, &ax, &dx) restx(&dx, &olddx) restx(&ax, &oldax) }
// Called after regopt and peep have run. // Expand CHECKNIL pseudo-op into actual nil pointer check. func expandchecks(firstp *obj.Prog) { var reg int var p1 *obj.Prog for p := firstp; p != nil; p = p.Link { if p.As != obj.ACHECKNIL { continue } if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers gc.Warnl(int(p.Lineno), "generated nil check") } if p.From.Type != obj.TYPE_REG { gc.Fatalf("invalid nil check %v", p) } reg = int(p.From.Reg) // check is // CMP arg, $0 // MOV.EQ arg, 0(arg) p1 = gc.Ctxt.NewProg() gc.Clearp(p1) p1.Link = p.Link p.Link = p1 p1.Lineno = p.Lineno p1.Pc = 9999 p1.As = arm.AMOVW p1.From.Type = obj.TYPE_REG p1.From.Reg = int16(reg) p1.To.Type = obj.TYPE_MEM p1.To.Reg = int16(reg) p1.To.Offset = 0 p1.Scond = arm.C_SCOND_EQ p.As = arm.ACMP p.From.Type = obj.TYPE_CONST p.From.Reg = 0 p.From.Offset = 0 p.Reg = int16(reg) } }
// Called after regopt and peep have run. // Expand CHECKNIL pseudo-op into actual nil pointer check. func expandchecks(firstp *obj.Prog) { var p1 *obj.Prog for p := (*obj.Prog)(firstp); p != nil; p = p.Link { if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 { fmt.Printf("expandchecks: %v\n", p) } if p.As != obj.ACHECKNIL { continue } if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers gc.Warnl(int(p.Lineno), "generated nil check") } if p.From.Type != obj.TYPE_REG { gc.Fatalf("invalid nil check %v\n", p) } // check is // CBNZ arg, 2(PC) // MOVD ZR, 0(arg) p1 = gc.Ctxt.NewProg() gc.Clearp(p1) p1.Link = p.Link p.Link = p1 p1.Lineno = p.Lineno p1.Pc = 9999 p.As = arm64.ACBNZ p.To.Type = obj.TYPE_BRANCH p.To.Val = p1.Link // crash by write to memory address 0. p1.As = arm64.AMOVD p1.From.Type = obj.TYPE_REG p1.From.Reg = arm64.REGZERO p1.To.Type = obj.TYPE_MEM p1.To.Reg = p.From.Reg p1.To.Offset = 0 } }