/* * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. */ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { if !gc.Is64(n.Type) { gc.Fatal("split64 %v", n.Type) } if nsclean >= len(sclean) { gc.Fatal("split64 clean") } sclean[nsclean].Op = gc.OEMPTY nsclean++ switch n.Op { default: switch n.Op { default: var n1 gc.Node if !dotaddable(n, &n1) { gc.Igen(n, &n1, nil) sclean[nsclean-1] = n1 } n = &n1 case gc.ONAME: if n.Class == gc.PPARAMREF { var n1 gc.Node gc.Cgen(n.Name.Heapaddr, &n1) sclean[nsclean-1] = n1 n = &n1 } // nothing case gc.OINDREG: break } *lo = *n *hi = *n lo.Type = gc.Types[gc.TUINT32] if n.Type.Etype == gc.TINT64 { hi.Type = gc.Types[gc.TINT32] } else { hi.Type = gc.Types[gc.TUINT32] } hi.Xoffset += 4 case gc.OLITERAL: var n1 gc.Node n.Convconst(&n1, n.Type) i := n1.Int() gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) i >>= 32 if n.Type.Etype == gc.TINT64 { gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) } else { gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) } } }
func defframe(ptxt *obj.Prog) { var n *gc.Node // fill in argument size, stack size ptxt.To.Type = obj.TYPE_TEXTSIZE ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr))) frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg))) // arm64 requires that the frame size (not counting saved LR) // be empty or be 8 mod 16. If not, pad it. if frame != 0 && frame%16 != 8 { frame += 8 } ptxt.To.Offset = int64(frame) // insert code to zero ambiguously live variables // so that the garbage collector only sees initialized values // when it looks for pointers. p := ptxt hi := int64(0) lo := hi // iterate through declarations - they are sorted in decreasing xoffset order. for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next { n = l.N if !n.Name.Needzero { continue } if n.Class != gc.PAUTO { gc.Fatal("needzero class %d", n.Class) } if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 { gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset)) } if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) { // merge with range we already have lo = n.Xoffset continue } // zero old range p = zerorange(p, int64(frame), lo, hi) // set new range hi = n.Xoffset + n.Type.Width lo = n.Xoffset } // zero final range zerorange(p, int64(frame), lo, hi) }
/* * insert n into reg slot of p */ func raddr(n *gc.Node, p *obj.Prog) { var a obj.Addr gc.Naddr(&a, n) if a.Type != obj.TYPE_REG { if n != nil { gc.Fatal("bad in raddr: %v", gc.Oconv(int(n.Op), 0)) } else { gc.Fatal("bad in raddr: <null>") } p.Reg = 0 } else { p.Reg = a.Reg } }
func defframe(ptxt *obj.Prog) { var n *gc.Node // fill in argument size, stack size ptxt.To.Type = obj.TYPE_TEXTSIZE ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr))) frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg))) ptxt.To.Offset = int64(frame) // insert code to zero ambiguously live variables // so that the garbage collector only sees initialized values // when it looks for pointers. p := ptxt hi := int64(0) lo := hi ax := uint32(0) for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next { n = l.N if !n.Name.Needzero { continue } if n.Class != gc.PAUTO { gc.Fatal("needzero class %d", n.Class) } if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 { gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset)) } if lo != hi && n.Xoffset+n.Type.Width == lo-int64(2*gc.Widthptr) { // merge with range we already have lo = n.Xoffset continue } // zero old range p = zerorange(p, int64(frame), lo, hi, &ax) // set new range hi = n.Xoffset + n.Type.Width lo = n.Xoffset } // zero final range zerorange(p, int64(frame), lo, hi, &ax) }
/* * generate * as n, $c (CMP/CMPU) */ func ginscon2(as int, n2 *gc.Node, c int64) { var n1 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) switch as { default: gc.Fatal("ginscon2") case ppc64.ACMP: if -ppc64.BIG <= c && c <= ppc64.BIG { rawgins(as, n2, &n1) return } case ppc64.ACMPU: if 0 <= c && c <= 2*ppc64.BIG { rawgins(as, n2, &n1) return } } // MOV n1 into register first var ntmp gc.Node gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) rawgins(ppc64.AMOVD, &n1, &ntmp) rawgins(as, n2, &ntmp) gc.Regfree(&ntmp) }
func proginfo(p *obj.Prog) { info := &p.Info *info = progtable[p.As] if info.Flags == 0 { gc.Fatal("unknown instruction %v", p) } if (info.Flags&gc.ShiftCX != 0) && p.From.Type != obj.TYPE_CONST { info.Reguse |= CX } if info.Flags&gc.ImulAXDX != 0 { if p.To.Type == obj.TYPE_NONE { info.Reguse |= AX info.Regset |= AX | DX } else { info.Flags |= RightRdwr } } // Addressing makes some registers used. if p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_NONE { info.Regindex |= RtoB(int(p.From.Reg)) } if p.From.Index != x86.REG_NONE { info.Regindex |= RtoB(int(p.From.Index)) } if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE { info.Regindex |= RtoB(int(p.To.Reg)) } if p.To.Index != x86.REG_NONE { info.Regindex |= RtoB(int(p.To.Index)) } }
func proginfo(p *obj.Prog) { info := &p.Info *info = progtable[p.As] if info.Flags == 0 { gc.Fatal("unknown instruction %v", p) } if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) { info.Flags &^= gc.LeftRead info.Flags |= gc.LeftAddr } if (info.Flags&gc.RegRead != 0) && p.Reg == 0 { info.Flags &^= gc.RegRead info.Flags |= gc.CanRegRead | gc.RightRead } if (p.Scond&arm.C_SCOND != arm.C_SCOND_NONE) && (info.Flags&gc.RightWrite != 0) { info.Flags |= gc.RightRead } switch p.As { case arm.ADIV, arm.ADIVU, arm.AMOD, arm.AMODU: info.Regset |= RtoB(arm.REG_R12) } }
func splitclean() { if nsclean <= 0 { gc.Fatal("splitclean") } nsclean-- if sclean[nsclean].Op != gc.OEMPTY { gc.Regfree(&sclean[nsclean]) } }
func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog { if lhs.Op != gc.OREGISTER { gc.Fatal("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0)) } p := rawgins(as, rhs, nil) raddr(lhs, p) return p }
// as2variant returns the variant (V_*) flags of instruction as. func as2variant(as int) int { initvariants() for i := int(0); i < len(varianttable[as]); i++ { if varianttable[as][i] == as { return i } } gc.Fatal("as2variant: instruction %v is not a variant of itself", obj.Aconv(as)) return 0 }
/* generate a constant shift * arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal. */ func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog { if sval <= 0 || sval > 32 { gc.Fatal("bad shift value: %d", sval) } sval = sval & 0x1f p := gins(as, nil, rhs) p.From.Type = obj.TYPE_SHIFT p.From.Offset = int64(stype) | int64(sval)<<7 | int64(lhs.Reg)&15 return p }
/* * direct reference, * could be set/use depending on * semantics */ func copyas(a *obj.Addr, v *obj.Addr) bool { if x86.REG_AL <= a.Reg && a.Reg <= x86.REG_R15B { gc.Fatal("use of byte register") } if x86.REG_AL <= v.Reg && v.Reg <= x86.REG_R15B { gc.Fatal("use of byte register") } if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg { return false } if regtyp(v) { return true } if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) { if v.Offset == a.Offset { return true } } return false }
/* * generate high multiply * res = (nl * nr) >> wordsize */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { if nl.Ullman < nr.Ullman { tmp := nl nl = nr nr = tmp } t := nl.Type w := int(t.Width * 8) var n1 gc.Node gc.Regalloc(&n1, t, res) gc.Cgen(nl, &n1) var n2 gc.Node gc.Regalloc(&n2, t, nil) gc.Cgen(nr, &n2) switch gc.Simtype[t.Etype] { case gc.TINT8, gc.TINT16: gins(optoas(gc.OMUL, t), &n2, &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1) case gc.TUINT8, gc.TUINT16: gins(optoas(gc.OMUL, t), &n2, &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1) // perform a long multiplication. case gc.TINT32, gc.TUINT32: var p *obj.Prog if gc.Issigned[t.Etype] { p = gins(arm.AMULL, &n2, nil) } else { p = gins(arm.AMULLU, &n2, nil) } // n2 * n1 -> (n1 n2) p.Reg = n1.Reg p.To.Type = obj.TYPE_REGREG p.To.Reg = n1.Reg p.To.Offset = int64(n2.Reg) default: gc.Fatal("cgen_hmul %v", t) } gc.Cgen(&n1, res) gc.Regfree(&n1) gc.Regfree(&n2) }
/* * generate high multiply: * res = (nl*nr) >> width */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { // largest ullman on left. if nl.Ullman < nr.Ullman { tmp := (*gc.Node)(nl) nl = nr nr = tmp } t := (*gc.Type)(nl.Type) w := int(int(t.Width * 8)) var n1 gc.Node gc.Cgenr(nl, &n1, res) var n2 gc.Node gc.Cgenr(nr, &n2, nil) switch gc.Simtype[t.Etype] { case gc.TINT8, gc.TINT16, gc.TINT32: gins(optoas(gc.OMUL, t), &n2, &n1) p := (*obj.Prog)(gins(ppc64.ASRAD, nil, &n1)) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(w) case gc.TUINT8, gc.TUINT16, gc.TUINT32: gins(optoas(gc.OMUL, t), &n2, &n1) p := (*obj.Prog)(gins(ppc64.ASRD, nil, &n1)) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(w) case gc.TINT64, gc.TUINT64: if gc.Issigned[t.Etype] { gins(ppc64.AMULHD, &n2, &n1) } else { gins(ppc64.AMULHDU, &n2, &n1) } default: gc.Fatal("cgen_hmul %v", t) } gc.Cgen(&n1, res) gc.Regfree(&n1) gc.Regfree(&n2) }
func proginfo(p *obj.Prog) { info := &p.Info *info = progtable[p.As] if info.Flags == 0 { gc.Fatal("unknown instruction %v", p) } if (info.Flags&gc.ShiftCX != 0) && p.From.Type != obj.TYPE_CONST { info.Reguse |= CX } if info.Flags&gc.ImulAXDX != 0 { if p.To.Type == obj.TYPE_NONE { info.Reguse |= AX info.Regset |= AX | DX } else { info.Flags |= RightRdwr } } // Addressing makes some registers used. if p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_NONE { info.Regindex |= RtoB(int(p.From.Reg)) } if p.From.Index != x86.REG_NONE { info.Regindex |= RtoB(int(p.From.Index)) } if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE { info.Regindex |= RtoB(int(p.To.Reg)) } if p.To.Index != x86.REG_NONE { info.Regindex |= RtoB(int(p.To.Index)) } if gc.Ctxt.Flag_dynlink { // When -dynlink is passed, many operations on external names (and // also calling duffzero/duffcopy) use R15 as a scratch register. if p.As == x86.ALEAQ || info.Flags == gc.Pseudo || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { return } if p.As == obj.ADUFFZERO || p.As == obj.ADUFFCOPY || (p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local) || (p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local) { info.Reguse |= R15 info.Regset |= R15 return } } }
func proginfo(p *obj.Prog) { initproginfo() info := &p.Info *info = progtable[p.As] if info.Flags == 0 { gc.Fatal("proginfo: unknown instruction %v", p) } if (info.Flags&gc.RegRead != 0) && p.Reg == 0 { info.Flags &^= gc.RegRead info.Flags |= gc.RightRead /*CanRegRead |*/ } if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR) && p.From.Reg != 0 { info.Regindex |= RtoB(int(p.From.Reg)) if info.Flags&gc.PostInc != 0 { info.Regset |= RtoB(int(p.From.Reg)) } } if (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) && p.To.Reg != 0 { info.Regindex |= RtoB(int(p.To.Reg)) if info.Flags&gc.PostInc != 0 { info.Regset |= RtoB(int(p.To.Reg)) } } if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) { info.Flags &^= gc.LeftRead info.Flags |= gc.LeftAddr } if p.As == obj.ADUFFZERO { info.Reguse |= 1<<0 | RtoB(ppc64.REG_R3) info.Regset |= RtoB(ppc64.REG_R3) } if p.As == obj.ADUFFCOPY { // TODO(austin) Revisit when duffcopy is implemented info.Reguse |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4) | RtoB(ppc64.REG_R5) info.Regset |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4) } }
/* * generate division according to op, one of: * res = nl / nr * res = nl % nr */ func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { if gc.Is64(nl.Type) { gc.Fatal("cgen_div %v", nl.Type) } var t *gc.Type if gc.Issigned[nl.Type.Etype] { t = gc.Types[gc.TINT32] } else { t = gc.Types[gc.TUINT32] } var ax gc.Node var oldax gc.Node savex(x86.REG_AX, &ax, &oldax, res, t) var olddx gc.Node var dx gc.Node savex(x86.REG_DX, &dx, &olddx, res, t) dodiv(op, nl, nr, res, &ax, &dx) restx(&dx, &olddx) restx(&ax, &oldax) }
// Called after regopt and peep have run. // Expand CHECKNIL pseudo-op into actual nil pointer check. func expandchecks(firstp *obj.Prog) { var p1 *obj.Prog for p := (*obj.Prog)(firstp); p != nil; p = p.Link { if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 { fmt.Printf("expandchecks: %v\n", p) } if p.As != obj.ACHECKNIL { continue } if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers gc.Warnl(int(p.Lineno), "generated nil check") } if p.From.Type != obj.TYPE_REG { gc.Fatal("invalid nil check %v\n", p) } // check is // CBNZ arg, 2(PC) // MOVD ZR, 0(arg) p1 = gc.Ctxt.NewProg() gc.Clearp(p1) p1.Link = p.Link p.Link = p1 p1.Lineno = p.Lineno p1.Pc = 9999 p.As = arm64.ACBNZ p.To.Type = obj.TYPE_BRANCH p.To.Val = p1.Link // crash by write to memory address 0. p1.As = arm64.AMOVD p1.From.Type = obj.TYPE_REG p1.From.Reg = arm64.REGZERO p1.To.Type = obj.TYPE_MEM p1.To.Reg = p.From.Reg p1.To.Offset = 0 } }
// Called after regopt and peep have run. // Expand CHECKNIL pseudo-op into actual nil pointer check. func expandchecks(firstp *obj.Prog) { var reg int var p1 *obj.Prog for p := firstp; p != nil; p = p.Link { if p.As != obj.ACHECKNIL { continue } if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers gc.Warnl(int(p.Lineno), "generated nil check") } if p.From.Type != obj.TYPE_REG { gc.Fatal("invalid nil check %v", p) } reg = int(p.From.Reg) // check is // CMP arg, $0 // MOV.EQ arg, 0(arg) p1 = gc.Ctxt.NewProg() gc.Clearp(p1) p1.Link = p.Link p.Link = p1 p1.Lineno = p.Lineno p1.Pc = 9999 p1.As = arm.AMOVW p1.From.Type = obj.TYPE_REG p1.From.Reg = int16(reg) p1.To.Type = obj.TYPE_MEM p1.To.Reg = int16(reg) p1.To.Offset = 0 p1.Scond = arm.C_SCOND_EQ p.As = arm.ACMP p.From.Type = obj.TYPE_CONST p.From.Reg = 0 p.From.Offset = 0 p.Reg = int16(reg) } }
// jmptoset returns ASETxx for AJxx. func jmptoset(jmp int) int { switch jmp { case x86.AJEQ: return x86.ASETEQ case x86.AJNE: return x86.ASETNE case x86.AJLT: return x86.ASETLT case x86.AJCS: return x86.ASETCS case x86.AJLE: return x86.ASETLE case x86.AJLS: return x86.ASETLS case x86.AJGT: return x86.ASETGT case x86.AJHI: return x86.ASETHI case x86.AJGE: return x86.ASETGE case x86.AJCC: return x86.ASETCC case x86.AJMI: return x86.ASETMI case x86.AJOC: return x86.ASETOC case x86.AJOS: return x86.ASETOS case x86.AJPC: return x86.ASETPC case x86.AJPL: return x86.ASETPL case x86.AJPS: return x86.ASETPS } gc.Fatal("jmptoset: no entry for %v", gc.Oconv(jmp, 0)) panic("unreachable") }
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog { cnt := hi - lo if cnt == 0 { return p } if *ax == 0 { p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) *ax = 1 } if cnt%int64(gc.Widthreg) != 0 { // should only happen with nacl if cnt%int64(gc.Widthptr) != 0 { gc.Fatal("zerorange count not a multiple of widthptr %d", cnt) } p = appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo) lo += int64(gc.Widthptr) cnt -= int64(gc.Widthptr) } if cnt <= int64(4*gc.Widthreg) { for i := int64(0); i < cnt; i += int64(gc.Widthreg) { p = appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i) } } else if !gc.Nacl && (cnt <= int64(128*gc.Widthreg)) { q := cnt / int64(gc.Widthreg) p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo+dzDI(q), obj.TYPE_REG, x86.REG_DI, 0) p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(q)) p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg)) } else { p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0) p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0) p = appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) p = appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) } return p }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width) } w := uint64(uint64(nl.Type.Width)) // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := uint64(w % 8) // bytes q := uint64(w / 8) // dwords if gc.Reginuse(ppc64.REGRT1) { gc.Fatal("%v in use during clearfat", obj.Rconv(ppc64.REGRT1)) } var r0 gc.Node gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO) var dst gc.Node gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1) gc.Regrealloc(&dst) gc.Agen(nl, &dst) var boff uint64 if q > 128 { p := gins(ppc64.ASUB, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 var end gc.Node gc.Regalloc(&end, gc.Types[gc.Tptr], nil) p = gins(ppc64.AMOVD, &dst, &end) p.From.Type = obj.TYPE_ADDR p.From.Offset = int64(q * 8) p = gins(ppc64.AMOVDU, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 8 pl := (*obj.Prog)(p) p = gins(ppc64.ACMP, &dst, &end) gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl) gc.Regfree(&end) // The loop leaves R3 on the last zeroed dword boff = 8 } else if q >= 4 { p := gins(ppc64.ASUB, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 f := (*gc.Node)(gc.Sysfunc("duffzero")) p = gins(obj.ADUFFZERO, nil, f) gc.Afunclit(&p.To, f) // 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s p.To.Offset = int64(4 * (128 - q)) // duffzero leaves R3 on the last zeroed dword boff = 8 } else { var p *obj.Prog for t := uint64(0); t < q; t++ { p = gins(ppc64.AMOVD, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(8 * t) } boff = 8 * q } var p *obj.Prog for t := uint64(0); t < c; t++ { p = gins(ppc64.AMOVB, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(t + boff) } gc.Regfree(&dst) }
// Called after regopt and peep have run. // Expand CHECKNIL pseudo-op into actual nil pointer check. func expandchecks(firstp *obj.Prog) { var p1 *obj.Prog var p2 *obj.Prog for p := (*obj.Prog)(firstp); p != nil; p = p.Link { if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 { fmt.Printf("expandchecks: %v\n", p) } if p.As != obj.ACHECKNIL { continue } if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers gc.Warnl(int(p.Lineno), "generated nil check") } if p.From.Type != obj.TYPE_REG { gc.Fatal("invalid nil check %v\n", p) } /* // check is // TD $4, R0, arg (R0 is always zero) // eqv. to: // tdeq r0, arg // NOTE: this needs special runtime support to make SIGTRAP recoverable. reg = p->from.reg; p->as = ATD; p->from = p->to = p->from3 = zprog.from; p->from.type = TYPE_CONST; p->from.offset = 4; p->from.reg = 0; p->reg = REGZERO; p->to.type = TYPE_REG; p->to.reg = reg; */ // check is // CMP arg, R0 // BNE 2(PC) [likely] // MOVD R0, 0(R0) p1 = gc.Ctxt.NewProg() p2 = gc.Ctxt.NewProg() gc.Clearp(p1) gc.Clearp(p2) p1.Link = p2 p2.Link = p.Link p.Link = p1 p1.Lineno = p.Lineno p2.Lineno = p.Lineno p1.Pc = 9999 p2.Pc = 9999 p.As = ppc64.ACMP p.To.Type = obj.TYPE_REG p.To.Reg = ppc64.REGZERO p1.As = ppc64.ABNE //p1->from.type = TYPE_CONST; //p1->from.offset = 1; // likely p1.To.Type = obj.TYPE_BRANCH p1.To.Val = p2.Link // crash by write to memory address 0. p2.As = ppc64.AMOVD p2.From.Type = obj.TYPE_REG p2.From.Reg = ppc64.REGZERO p2.To.Type = obj.TYPE_MEM p2.To.Reg = ppc64.REGZERO p2.To.Offset = 0 } }
/* * return Axxx for Oxxx on type t. */ func optoas(op int, t *gc.Type) int { if t == nil { gc.Fatal("optoas: t is nil") } a := obj.AXXX switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { default: gc.Fatal("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(int(op), 0), t, gc.Types[t.Etype], gc.Types[gc.Simtype[t.Etype]]) /* case CASE(OADDR, TPTR32): a = ALEAL; break; case CASE(OADDR, TPTR64): a = ALEAQ; break; */ // TODO(kaib): make sure the conditional branches work on all edge cases case gc.OEQ<<16 | gc.TBOOL, gc.OEQ<<16 | gc.TINT8, gc.OEQ<<16 | gc.TUINT8, gc.OEQ<<16 | gc.TINT16, gc.OEQ<<16 | gc.TUINT16, gc.OEQ<<16 | gc.TINT32, gc.OEQ<<16 | gc.TUINT32, gc.OEQ<<16 | gc.TINT64, gc.OEQ<<16 | gc.TUINT64, gc.OEQ<<16 | gc.TPTR32, gc.OEQ<<16 | gc.TPTR64, gc.OEQ<<16 | gc.TFLOAT32, gc.OEQ<<16 | gc.TFLOAT64: a = arm.ABEQ case gc.ONE<<16 | gc.TBOOL, gc.ONE<<16 | gc.TINT8, gc.ONE<<16 | gc.TUINT8, gc.ONE<<16 | gc.TINT16, gc.ONE<<16 | gc.TUINT16, gc.ONE<<16 | gc.TINT32, gc.ONE<<16 | gc.TUINT32, gc.ONE<<16 | gc.TINT64, gc.ONE<<16 | gc.TUINT64, gc.ONE<<16 | gc.TPTR32, gc.ONE<<16 | gc.TPTR64, gc.ONE<<16 | gc.TFLOAT32, gc.ONE<<16 | gc.TFLOAT64: a = arm.ABNE case gc.OLT<<16 | gc.TINT8, gc.OLT<<16 | gc.TINT16, gc.OLT<<16 | gc.TINT32, gc.OLT<<16 | gc.TINT64, gc.OLT<<16 | gc.TFLOAT32, gc.OLT<<16 | gc.TFLOAT64: a = arm.ABLT case gc.OLT<<16 | gc.TUINT8, gc.OLT<<16 | gc.TUINT16, gc.OLT<<16 | gc.TUINT32, gc.OLT<<16 | gc.TUINT64: a = arm.ABLO case gc.OLE<<16 | gc.TINT8, gc.OLE<<16 | gc.TINT16, gc.OLE<<16 | gc.TINT32, gc.OLE<<16 | gc.TINT64, gc.OLE<<16 | gc.TFLOAT32, gc.OLE<<16 | gc.TFLOAT64: a = arm.ABLE case gc.OLE<<16 | gc.TUINT8, gc.OLE<<16 | gc.TUINT16, gc.OLE<<16 | gc.TUINT32, gc.OLE<<16 | gc.TUINT64: a = arm.ABLS case gc.OGT<<16 | gc.TINT8, gc.OGT<<16 | gc.TINT16, gc.OGT<<16 | gc.TINT32, gc.OGT<<16 | gc.TINT64, gc.OGT<<16 | gc.TFLOAT32, gc.OGT<<16 | gc.TFLOAT64: a = arm.ABGT case gc.OGT<<16 | gc.TUINT8, gc.OGT<<16 | gc.TUINT16, gc.OGT<<16 | gc.TUINT32, gc.OGT<<16 | gc.TUINT64: a = arm.ABHI case gc.OGE<<16 | gc.TINT8, gc.OGE<<16 | gc.TINT16, gc.OGE<<16 | gc.TINT32, gc.OGE<<16 | gc.TINT64, gc.OGE<<16 | gc.TFLOAT32, gc.OGE<<16 | gc.TFLOAT64: a = arm.ABGE case gc.OGE<<16 | gc.TUINT8, gc.OGE<<16 | gc.TUINT16, gc.OGE<<16 | gc.TUINT32, gc.OGE<<16 | gc.TUINT64: a = arm.ABHS case gc.OCMP<<16 | gc.TBOOL, gc.OCMP<<16 | gc.TINT8, gc.OCMP<<16 | gc.TUINT8, gc.OCMP<<16 | gc.TINT16, gc.OCMP<<16 | gc.TUINT16, gc.OCMP<<16 | gc.TINT32, gc.OCMP<<16 | gc.TUINT32, gc.OCMP<<16 | gc.TPTR32: a = arm.ACMP case gc.OCMP<<16 | gc.TFLOAT32: a = arm.ACMPF case gc.OCMP<<16 | gc.TFLOAT64: a = arm.ACMPD case gc.OPS<<16 | gc.TFLOAT32, gc.OPS<<16 | gc.TFLOAT64: a = arm.ABVS case gc.OAS<<16 | gc.TBOOL: a = arm.AMOVB case gc.OAS<<16 | gc.TINT8: a = arm.AMOVBS case gc.OAS<<16 | gc.TUINT8: a = arm.AMOVBU case gc.OAS<<16 | gc.TINT16: a = arm.AMOVHS case gc.OAS<<16 | gc.TUINT16: a = arm.AMOVHU case gc.OAS<<16 | gc.TINT32, gc.OAS<<16 | gc.TUINT32, gc.OAS<<16 | gc.TPTR32: a = arm.AMOVW case gc.OAS<<16 | gc.TFLOAT32: a = arm.AMOVF case gc.OAS<<16 | gc.TFLOAT64: a = arm.AMOVD case gc.OADD<<16 | gc.TINT8, gc.OADD<<16 | gc.TUINT8, gc.OADD<<16 | gc.TINT16, gc.OADD<<16 | gc.TUINT16, gc.OADD<<16 | gc.TINT32, gc.OADD<<16 | gc.TUINT32, gc.OADD<<16 | gc.TPTR32: a = arm.AADD case gc.OADD<<16 | gc.TFLOAT32: a = arm.AADDF case gc.OADD<<16 | gc.TFLOAT64: a = arm.AADDD case gc.OSUB<<16 | gc.TINT8, gc.OSUB<<16 | gc.TUINT8, gc.OSUB<<16 | gc.TINT16, gc.OSUB<<16 | gc.TUINT16, gc.OSUB<<16 | gc.TINT32, gc.OSUB<<16 | gc.TUINT32, gc.OSUB<<16 | gc.TPTR32: a = arm.ASUB case gc.OSUB<<16 | gc.TFLOAT32: a = arm.ASUBF case gc.OSUB<<16 | gc.TFLOAT64: a = arm.ASUBD case gc.OMINUS<<16 | gc.TINT8, gc.OMINUS<<16 | gc.TUINT8, gc.OMINUS<<16 | gc.TINT16, gc.OMINUS<<16 | gc.TUINT16, gc.OMINUS<<16 | gc.TINT32, gc.OMINUS<<16 | gc.TUINT32, gc.OMINUS<<16 | gc.TPTR32: a = arm.ARSB case gc.OAND<<16 | gc.TINT8, gc.OAND<<16 | gc.TUINT8, gc.OAND<<16 | gc.TINT16, gc.OAND<<16 | gc.TUINT16, gc.OAND<<16 | gc.TINT32, gc.OAND<<16 | gc.TUINT32, gc.OAND<<16 | gc.TPTR32: a = arm.AAND case gc.OOR<<16 | gc.TINT8, gc.OOR<<16 | gc.TUINT8, gc.OOR<<16 | gc.TINT16, gc.OOR<<16 | gc.TUINT16, gc.OOR<<16 | gc.TINT32, gc.OOR<<16 | gc.TUINT32, gc.OOR<<16 | gc.TPTR32: a = arm.AORR case gc.OXOR<<16 | gc.TINT8, gc.OXOR<<16 | gc.TUINT8, gc.OXOR<<16 | gc.TINT16, gc.OXOR<<16 | gc.TUINT16, gc.OXOR<<16 | gc.TINT32, gc.OXOR<<16 | gc.TUINT32, gc.OXOR<<16 | gc.TPTR32: a = arm.AEOR case gc.OLSH<<16 | gc.TINT8, gc.OLSH<<16 | gc.TUINT8, gc.OLSH<<16 | gc.TINT16, gc.OLSH<<16 | gc.TUINT16, gc.OLSH<<16 | gc.TINT32, gc.OLSH<<16 | gc.TUINT32, gc.OLSH<<16 | gc.TPTR32: a = arm.ASLL case gc.ORSH<<16 | gc.TUINT8, gc.ORSH<<16 | gc.TUINT16, gc.ORSH<<16 | gc.TUINT32, gc.ORSH<<16 | gc.TPTR32: a = arm.ASRL case gc.ORSH<<16 | gc.TINT8, gc.ORSH<<16 | gc.TINT16, gc.ORSH<<16 | gc.TINT32: a = arm.ASRA case gc.OMUL<<16 | gc.TUINT8, gc.OMUL<<16 | gc.TUINT16, gc.OMUL<<16 | gc.TUINT32, gc.OMUL<<16 | gc.TPTR32: a = arm.AMULU case gc.OMUL<<16 | gc.TINT8, gc.OMUL<<16 | gc.TINT16, gc.OMUL<<16 | gc.TINT32: a = arm.AMUL case gc.OMUL<<16 | gc.TFLOAT32: a = arm.AMULF case gc.OMUL<<16 | gc.TFLOAT64: a = arm.AMULD case gc.ODIV<<16 | gc.TUINT8, gc.ODIV<<16 | gc.TUINT16, gc.ODIV<<16 | gc.TUINT32, gc.ODIV<<16 | gc.TPTR32: a = arm.ADIVU case gc.ODIV<<16 | gc.TINT8, gc.ODIV<<16 | gc.TINT16, gc.ODIV<<16 | gc.TINT32: a = arm.ADIV case gc.OMOD<<16 | gc.TUINT8, gc.OMOD<<16 | gc.TUINT16, gc.OMOD<<16 | gc.TUINT32, gc.OMOD<<16 | gc.TPTR32: a = arm.AMODU case gc.OMOD<<16 | gc.TINT8, gc.OMOD<<16 | gc.TINT16, gc.OMOD<<16 | gc.TINT32: a = arm.AMOD // case CASE(OEXTEND, TINT16): // a = ACWD; // break; // case CASE(OEXTEND, TINT32): // a = ACDQ; // break; // case CASE(OEXTEND, TINT64): // a = ACQO; // break; case gc.ODIV<<16 | gc.TFLOAT32: a = arm.ADIVF case gc.ODIV<<16 | gc.TFLOAT64: a = arm.ADIVD case gc.OSQRT<<16 | gc.TFLOAT64: a = arm.ASQRTD } return a }
/* * generate one instruction: * as f, t */ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { // Node nod; // if(f != N && f->op == OINDEX) { // gc.Regalloc(&nod, ®node, Z); // v = constnode.vconst; // gc.Cgen(f->right, &nod); // constnode.vconst = v; // idx.reg = nod.reg; // gc.Regfree(&nod); // } // if(t != N && t->op == OINDEX) { // gc.Regalloc(&nod, ®node, Z); // v = constnode.vconst; // gc.Cgen(t->right, &nod); // constnode.vconst = v; // idx.reg = nod.reg; // gc.Regfree(&nod); // } if f != nil && f.Op == gc.OADDR && (as == x86.AMOVL || as == x86.AMOVQ) { // Turn MOVL $xxx into LEAL xxx. // These should be equivalent but most of the backend // only expects to see LEAL, because that's what we had // historically generated. Various hidden assumptions are baked in by now. if as == x86.AMOVL { as = x86.ALEAL } else { as = x86.ALEAQ } f = f.Left } switch as { case x86.AMOVB, x86.AMOVW, x86.AMOVL, x86.AMOVQ, x86.AMOVSS, x86.AMOVSD: if f != nil && t != nil && samaddr(f, t) { return nil } case x86.ALEAQ: if f != nil && gc.Isconst(f, gc.CTNIL) { gc.Fatal("gins LEAQ nil %v", f.Type) } } p := gc.Prog(as) gc.Naddr(&p.From, f) gc.Naddr(&p.To, t) if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } w := int32(0) switch as { case x86.AMOVB: w = 1 case x86.AMOVW: w = 2 case x86.AMOVL: w = 4 case x86.AMOVQ: w = 8 } if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Width > int64(w))) { gc.Dump("f", f) gc.Dump("t", t) gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) } if p.To.Type == obj.TYPE_ADDR && w > 0 { gc.Fatal("bad use of addr: %v", p) } return p }
/* * return Axxx for Oxxx on type t. */ func optoas(op int, t *gc.Type) int { if t == nil { gc.Fatal("optoas: t is nil") } a := obj.AXXX switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { default: gc.Fatal("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t) case gc.OADDR<<16 | gc.TPTR32: a = x86.ALEAL case gc.OADDR<<16 | gc.TPTR64: a = x86.ALEAQ case gc.OEQ<<16 | gc.TBOOL, gc.OEQ<<16 | gc.TINT8, gc.OEQ<<16 | gc.TUINT8, gc.OEQ<<16 | gc.TINT16, gc.OEQ<<16 | gc.TUINT16, gc.OEQ<<16 | gc.TINT32, gc.OEQ<<16 | gc.TUINT32, gc.OEQ<<16 | gc.TINT64, gc.OEQ<<16 | gc.TUINT64, gc.OEQ<<16 | gc.TPTR32, gc.OEQ<<16 | gc.TPTR64, gc.OEQ<<16 | gc.TFLOAT32, gc.OEQ<<16 | gc.TFLOAT64: a = x86.AJEQ case gc.ONE<<16 | gc.TBOOL, gc.ONE<<16 | gc.TINT8, gc.ONE<<16 | gc.TUINT8, gc.ONE<<16 | gc.TINT16, gc.ONE<<16 | gc.TUINT16, gc.ONE<<16 | gc.TINT32, gc.ONE<<16 | gc.TUINT32, gc.ONE<<16 | gc.TINT64, gc.ONE<<16 | gc.TUINT64, gc.ONE<<16 | gc.TPTR32, gc.ONE<<16 | gc.TPTR64, gc.ONE<<16 | gc.TFLOAT32, gc.ONE<<16 | gc.TFLOAT64: a = x86.AJNE case gc.OPS<<16 | gc.TBOOL, gc.OPS<<16 | gc.TINT8, gc.OPS<<16 | gc.TUINT8, gc.OPS<<16 | gc.TINT16, gc.OPS<<16 | gc.TUINT16, gc.OPS<<16 | gc.TINT32, gc.OPS<<16 | gc.TUINT32, gc.OPS<<16 | gc.TINT64, gc.OPS<<16 | gc.TUINT64, gc.OPS<<16 | gc.TPTR32, gc.OPS<<16 | gc.TPTR64, gc.OPS<<16 | gc.TFLOAT32, gc.OPS<<16 | gc.TFLOAT64: a = x86.AJPS case gc.OPC<<16 | gc.TBOOL, gc.OPC<<16 | gc.TINT8, gc.OPC<<16 | gc.TUINT8, gc.OPC<<16 | gc.TINT16, gc.OPC<<16 | gc.TUINT16, gc.OPC<<16 | gc.TINT32, gc.OPC<<16 | gc.TUINT32, gc.OPC<<16 | gc.TINT64, gc.OPC<<16 | gc.TUINT64, gc.OPC<<16 | gc.TPTR32, gc.OPC<<16 | gc.TPTR64, gc.OPC<<16 | gc.TFLOAT32, gc.OPC<<16 | gc.TFLOAT64: a = x86.AJPC case gc.OLT<<16 | gc.TINT8, gc.OLT<<16 | gc.TINT16, gc.OLT<<16 | gc.TINT32, gc.OLT<<16 | gc.TINT64: a = x86.AJLT case gc.OLT<<16 | gc.TUINT8, gc.OLT<<16 | gc.TUINT16, gc.OLT<<16 | gc.TUINT32, gc.OLT<<16 | gc.TUINT64: a = x86.AJCS case gc.OLE<<16 | gc.TINT8, gc.OLE<<16 | gc.TINT16, gc.OLE<<16 | gc.TINT32, gc.OLE<<16 | gc.TINT64: a = x86.AJLE case gc.OLE<<16 | gc.TUINT8, gc.OLE<<16 | gc.TUINT16, gc.OLE<<16 | gc.TUINT32, gc.OLE<<16 | gc.TUINT64: a = x86.AJLS case gc.OGT<<16 | gc.TINT8, gc.OGT<<16 | gc.TINT16, gc.OGT<<16 | gc.TINT32, gc.OGT<<16 | gc.TINT64: a = x86.AJGT case gc.OGT<<16 | gc.TUINT8, gc.OGT<<16 | gc.TUINT16, gc.OGT<<16 | gc.TUINT32, gc.OGT<<16 | gc.TUINT64, gc.OLT<<16 | gc.TFLOAT32, gc.OLT<<16 | gc.TFLOAT64: a = x86.AJHI case gc.OGE<<16 | gc.TINT8, gc.OGE<<16 | gc.TINT16, gc.OGE<<16 | gc.TINT32, gc.OGE<<16 | gc.TINT64: a = x86.AJGE case gc.OGE<<16 | gc.TUINT8, gc.OGE<<16 | gc.TUINT16, gc.OGE<<16 | gc.TUINT32, gc.OGE<<16 | gc.TUINT64, gc.OLE<<16 | gc.TFLOAT32, gc.OLE<<16 | gc.TFLOAT64: a = x86.AJCC case gc.OCMP<<16 | gc.TBOOL, gc.OCMP<<16 | gc.TINT8, gc.OCMP<<16 | gc.TUINT8: a = x86.ACMPB case gc.OCMP<<16 | gc.TINT16, gc.OCMP<<16 | gc.TUINT16: a = x86.ACMPW case gc.OCMP<<16 | gc.TINT32, gc.OCMP<<16 | gc.TUINT32, gc.OCMP<<16 | gc.TPTR32: a = x86.ACMPL case gc.OCMP<<16 | gc.TINT64, gc.OCMP<<16 | gc.TUINT64, gc.OCMP<<16 | gc.TPTR64: a = x86.ACMPQ case gc.OCMP<<16 | gc.TFLOAT32: a = x86.AUCOMISS case gc.OCMP<<16 | gc.TFLOAT64: a = x86.AUCOMISD case gc.OAS<<16 | gc.TBOOL, gc.OAS<<16 | gc.TINT8, gc.OAS<<16 | gc.TUINT8: a = x86.AMOVB case gc.OAS<<16 | gc.TINT16, gc.OAS<<16 | gc.TUINT16: a = x86.AMOVW case gc.OAS<<16 | gc.TINT32, gc.OAS<<16 | gc.TUINT32, gc.OAS<<16 | gc.TPTR32: a = x86.AMOVL case gc.OAS<<16 | gc.TINT64, gc.OAS<<16 | gc.TUINT64, gc.OAS<<16 | gc.TPTR64: a = x86.AMOVQ case gc.OAS<<16 | gc.TFLOAT32: a = x86.AMOVSS case gc.OAS<<16 | gc.TFLOAT64: a = x86.AMOVSD case gc.OADD<<16 | gc.TINT8, gc.OADD<<16 | gc.TUINT8: a = x86.AADDB case gc.OADD<<16 | gc.TINT16, gc.OADD<<16 | gc.TUINT16: a = x86.AADDW case gc.OADD<<16 | gc.TINT32, gc.OADD<<16 | gc.TUINT32, gc.OADD<<16 | gc.TPTR32: a = x86.AADDL case gc.OADD<<16 | gc.TINT64, gc.OADD<<16 | gc.TUINT64, gc.OADD<<16 | gc.TPTR64: a = x86.AADDQ case gc.OADD<<16 | gc.TFLOAT32: a = x86.AADDSS case gc.OADD<<16 | gc.TFLOAT64: a = x86.AADDSD case gc.OSUB<<16 | gc.TINT8, gc.OSUB<<16 | gc.TUINT8: a = x86.ASUBB case gc.OSUB<<16 | gc.TINT16, gc.OSUB<<16 | gc.TUINT16: a = x86.ASUBW case gc.OSUB<<16 | gc.TINT32, gc.OSUB<<16 | gc.TUINT32, gc.OSUB<<16 | gc.TPTR32: a = x86.ASUBL case gc.OSUB<<16 | gc.TINT64, gc.OSUB<<16 | gc.TUINT64, gc.OSUB<<16 | gc.TPTR64: a = x86.ASUBQ case gc.OSUB<<16 | gc.TFLOAT32: a = x86.ASUBSS case gc.OSUB<<16 | gc.TFLOAT64: a = x86.ASUBSD case gc.OINC<<16 | gc.TINT8, gc.OINC<<16 | gc.TUINT8: a = x86.AINCB case gc.OINC<<16 | gc.TINT16, gc.OINC<<16 | gc.TUINT16: a = x86.AINCW case gc.OINC<<16 | gc.TINT32, gc.OINC<<16 | gc.TUINT32, gc.OINC<<16 | gc.TPTR32: a = x86.AINCL case gc.OINC<<16 | gc.TINT64, gc.OINC<<16 | gc.TUINT64, gc.OINC<<16 | gc.TPTR64: a = x86.AINCQ case gc.ODEC<<16 | gc.TINT8, gc.ODEC<<16 | gc.TUINT8: a = x86.ADECB case gc.ODEC<<16 | gc.TINT16, gc.ODEC<<16 | gc.TUINT16: a = x86.ADECW case gc.ODEC<<16 | gc.TINT32, gc.ODEC<<16 | gc.TUINT32, gc.ODEC<<16 | gc.TPTR32: a = x86.ADECL case gc.ODEC<<16 | gc.TINT64, gc.ODEC<<16 | gc.TUINT64, gc.ODEC<<16 | gc.TPTR64: a = x86.ADECQ case gc.OMINUS<<16 | gc.TINT8, gc.OMINUS<<16 | gc.TUINT8: a = x86.ANEGB case gc.OMINUS<<16 | gc.TINT16, gc.OMINUS<<16 | gc.TUINT16: a = x86.ANEGW case gc.OMINUS<<16 | gc.TINT32, gc.OMINUS<<16 | gc.TUINT32, gc.OMINUS<<16 | gc.TPTR32: a = x86.ANEGL case gc.OMINUS<<16 | gc.TINT64, gc.OMINUS<<16 | gc.TUINT64, gc.OMINUS<<16 | gc.TPTR64: a = x86.ANEGQ case gc.OAND<<16 | gc.TBOOL, gc.OAND<<16 | gc.TINT8, gc.OAND<<16 | gc.TUINT8: a = x86.AANDB case gc.OAND<<16 | gc.TINT16, gc.OAND<<16 | gc.TUINT16: a = x86.AANDW case gc.OAND<<16 | gc.TINT32, gc.OAND<<16 | gc.TUINT32, gc.OAND<<16 | gc.TPTR32: a = x86.AANDL case gc.OAND<<16 | gc.TINT64, gc.OAND<<16 | gc.TUINT64, gc.OAND<<16 | gc.TPTR64: a = x86.AANDQ case gc.OOR<<16 | gc.TBOOL, gc.OOR<<16 | gc.TINT8, gc.OOR<<16 | gc.TUINT8: a = x86.AORB case gc.OOR<<16 | gc.TINT16, gc.OOR<<16 | gc.TUINT16: a = x86.AORW case gc.OOR<<16 | gc.TINT32, gc.OOR<<16 | gc.TUINT32, gc.OOR<<16 | gc.TPTR32: a = x86.AORL case gc.OOR<<16 | gc.TINT64, gc.OOR<<16 | gc.TUINT64, gc.OOR<<16 | gc.TPTR64: a = x86.AORQ case gc.OXOR<<16 | gc.TINT8, gc.OXOR<<16 | gc.TUINT8: a = x86.AXORB case gc.OXOR<<16 | gc.TINT16, gc.OXOR<<16 | gc.TUINT16: a = x86.AXORW case gc.OXOR<<16 | gc.TINT32, gc.OXOR<<16 | gc.TUINT32, gc.OXOR<<16 | gc.TPTR32: a = x86.AXORL case gc.OXOR<<16 | gc.TINT64, gc.OXOR<<16 | gc.TUINT64, gc.OXOR<<16 | gc.TPTR64: a = x86.AXORQ case gc.OLROT<<16 | gc.TINT8, gc.OLROT<<16 | gc.TUINT8: a = x86.AROLB case gc.OLROT<<16 | gc.TINT16, gc.OLROT<<16 | gc.TUINT16: a = x86.AROLW case gc.OLROT<<16 | gc.TINT32, gc.OLROT<<16 | gc.TUINT32, gc.OLROT<<16 | gc.TPTR32: a = x86.AROLL case gc.OLROT<<16 | gc.TINT64, gc.OLROT<<16 | gc.TUINT64, gc.OLROT<<16 | gc.TPTR64: a = x86.AROLQ case gc.OLSH<<16 | gc.TINT8, gc.OLSH<<16 | gc.TUINT8: a = x86.ASHLB case gc.OLSH<<16 | gc.TINT16, gc.OLSH<<16 | gc.TUINT16: a = x86.ASHLW case gc.OLSH<<16 | gc.TINT32, gc.OLSH<<16 | gc.TUINT32, gc.OLSH<<16 | gc.TPTR32: a = x86.ASHLL case gc.OLSH<<16 | gc.TINT64, gc.OLSH<<16 | gc.TUINT64, gc.OLSH<<16 | gc.TPTR64: a = x86.ASHLQ case gc.ORSH<<16 | gc.TUINT8: a = x86.ASHRB case gc.ORSH<<16 | gc.TUINT16: a = x86.ASHRW case gc.ORSH<<16 | gc.TUINT32, gc.ORSH<<16 | gc.TPTR32: a = x86.ASHRL case gc.ORSH<<16 | gc.TUINT64, gc.ORSH<<16 | gc.TPTR64: a = x86.ASHRQ case gc.ORSH<<16 | gc.TINT8: a = x86.ASARB case gc.ORSH<<16 | gc.TINT16: a = x86.ASARW case gc.ORSH<<16 | gc.TINT32: a = x86.ASARL case gc.ORSH<<16 | gc.TINT64: a = x86.ASARQ case gc.ORROTC<<16 | gc.TINT8, gc.ORROTC<<16 | gc.TUINT8: a = x86.ARCRB case gc.ORROTC<<16 | gc.TINT16, gc.ORROTC<<16 | gc.TUINT16: a = x86.ARCRW case gc.ORROTC<<16 | gc.TINT32, gc.ORROTC<<16 | gc.TUINT32: a = x86.ARCRL case gc.ORROTC<<16 | gc.TINT64, gc.ORROTC<<16 | gc.TUINT64: a = x86.ARCRQ case gc.OHMUL<<16 | gc.TINT8, gc.OMUL<<16 | gc.TINT8, gc.OMUL<<16 | gc.TUINT8: a = x86.AIMULB case gc.OHMUL<<16 | gc.TINT16, gc.OMUL<<16 | gc.TINT16, gc.OMUL<<16 | gc.TUINT16: a = x86.AIMULW case gc.OHMUL<<16 | gc.TINT32, gc.OMUL<<16 | gc.TINT32, gc.OMUL<<16 | gc.TUINT32, gc.OMUL<<16 | gc.TPTR32: a = x86.AIMULL case gc.OHMUL<<16 | gc.TINT64, gc.OMUL<<16 | gc.TINT64, gc.OMUL<<16 | gc.TUINT64, gc.OMUL<<16 | gc.TPTR64: a = x86.AIMULQ case gc.OHMUL<<16 | gc.TUINT8: a = x86.AMULB case gc.OHMUL<<16 | gc.TUINT16: a = x86.AMULW case gc.OHMUL<<16 | gc.TUINT32, gc.OHMUL<<16 | gc.TPTR32: a = x86.AMULL case gc.OHMUL<<16 | gc.TUINT64, gc.OHMUL<<16 | gc.TPTR64: a = x86.AMULQ case gc.OMUL<<16 | gc.TFLOAT32: a = x86.AMULSS case gc.OMUL<<16 | gc.TFLOAT64: a = x86.AMULSD case gc.ODIV<<16 | gc.TINT8, gc.OMOD<<16 | gc.TINT8: a = x86.AIDIVB case gc.ODIV<<16 | gc.TUINT8, gc.OMOD<<16 | gc.TUINT8: a = x86.ADIVB case gc.ODIV<<16 | gc.TINT16, gc.OMOD<<16 | gc.TINT16: a = x86.AIDIVW case gc.ODIV<<16 | gc.TUINT16, gc.OMOD<<16 | gc.TUINT16: a = x86.ADIVW case gc.ODIV<<16 | gc.TINT32, gc.OMOD<<16 | gc.TINT32: a = x86.AIDIVL case gc.ODIV<<16 | gc.TUINT32, gc.ODIV<<16 | gc.TPTR32, gc.OMOD<<16 | gc.TUINT32, gc.OMOD<<16 | gc.TPTR32: a = x86.ADIVL case gc.ODIV<<16 | gc.TINT64, gc.OMOD<<16 | gc.TINT64: a = x86.AIDIVQ case gc.ODIV<<16 | gc.TUINT64, gc.ODIV<<16 | gc.TPTR64, gc.OMOD<<16 | gc.TUINT64, gc.OMOD<<16 | gc.TPTR64: a = x86.ADIVQ case gc.OEXTEND<<16 | gc.TINT16: a = x86.ACWD case gc.OEXTEND<<16 | gc.TINT32: a = x86.ACDQ case gc.OEXTEND<<16 | gc.TINT64: a = x86.ACQO case gc.ODIV<<16 | gc.TFLOAT32: a = x86.ADIVSS case gc.ODIV<<16 | gc.TFLOAT64: a = x86.ADIVSD case gc.OSQRT<<16 | gc.TFLOAT64: a = x86.ASQRTSD } return a }
func blockcopy(n, res *gc.Node, osrc, odst, w int64) { // determine alignment. // want to avoid unaligned access, so have to use // smaller operations for less aligned types. // for example moving [4]byte must use 4 MOVB not 1 MOVW. align := int(n.Type.Align) var op int switch align { default: gc.Fatal("sgen: invalid alignment %d for %v", align, n.Type) case 1: op = arm.AMOVB case 2: op = arm.AMOVH case 4: op = arm.AMOVW } if w%int64(align) != 0 { gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type) } c := int32(w / int64(align)) if osrc%int64(align) != 0 || odst%int64(align) != 0 { gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align) } // if we are copying forward on the stack and // the src and dst overlap, then reverse direction dir := align if osrc < odst && int64(odst) < int64(osrc)+w { dir = -dir } if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 { var r0 gc.Node r0.Op = gc.OREGISTER r0.Reg = arm.REG_R0 var r1 gc.Node r1.Op = gc.OREGISTER r1.Reg = arm.REG_R0 + 1 var r2 gc.Node r2.Op = gc.OREGISTER r2.Reg = arm.REG_R0 + 2 var src gc.Node gc.Regalloc(&src, gc.Types[gc.Tptr], &r1) var dst gc.Node gc.Regalloc(&dst, gc.Types[gc.Tptr], &r2) if n.Ullman >= res.Ullman { // eval n first gc.Agen(n, &src) if res.Op == gc.ONAME { gc.Gvardef(res) } gc.Agen(res, &dst) } else { // eval res first if res.Op == gc.ONAME { gc.Gvardef(res) } gc.Agen(res, &dst) gc.Agen(n, &src) } var tmp gc.Node gc.Regalloc(&tmp, gc.Types[gc.Tptr], &r0) f := gc.Sysfunc("duffcopy") p := gins(obj.ADUFFCOPY, nil, f) gc.Afunclit(&p.To, f) // 8 and 128 = magic constants: see ../../runtime/asm_arm.s p.To.Offset = 8 * (128 - int64(c)) gc.Regfree(&tmp) gc.Regfree(&src) gc.Regfree(&dst) return } var dst gc.Node var src gc.Node if n.Ullman >= res.Ullman { gc.Agenr(n, &dst, res) // temporarily use dst gc.Regalloc(&src, gc.Types[gc.Tptr], nil) gins(arm.AMOVW, &dst, &src) if res.Op == gc.ONAME { gc.Gvardef(res) } gc.Agen(res, &dst) } else { if res.Op == gc.ONAME { gc.Gvardef(res) } gc.Agenr(res, &dst, res) gc.Agenr(n, &src, nil) } var tmp gc.Node gc.Regalloc(&tmp, gc.Types[gc.TUINT32], nil) // set up end marker var nend gc.Node if c >= 4 { gc.Regalloc(&nend, gc.Types[gc.TUINT32], nil) p := gins(arm.AMOVW, &src, &nend) p.From.Type = obj.TYPE_ADDR if dir < 0 { p.From.Offset = int64(dir) } else { p.From.Offset = w } } // move src and dest to the end of block if necessary if dir < 0 { p := gins(arm.AMOVW, &src, &src) p.From.Type = obj.TYPE_ADDR p.From.Offset = w + int64(dir) p = gins(arm.AMOVW, &dst, &dst) p.From.Type = obj.TYPE_ADDR p.From.Offset = w + int64(dir) } // move if c >= 4 { p := gins(op, &src, &tmp) p.From.Type = obj.TYPE_MEM p.From.Offset = int64(dir) p.Scond |= arm.C_PBIT ploop := p p = gins(op, &tmp, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(dir) p.Scond |= arm.C_PBIT p = gins(arm.ACMP, &src, nil) raddr(&nend, p) gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop) gc.Regfree(&nend) } else { var p *obj.Prog for { tmp14 := c c-- if tmp14 <= 0 { break } p = gins(op, &src, &tmp) p.From.Type = obj.TYPE_MEM p.From.Offset = int64(dir) p.Scond |= arm.C_PBIT p = gins(op, &tmp, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(dir) p.Scond |= arm.C_PBIT } } gc.Regfree(&dst) gc.Regfree(&src) gc.Regfree(&tmp) }
/* * generate code to compute address of n, * a reference to a (perhaps nested) field inside * an array or struct. * return 0 on failure, 1 on success. * on success, leaves usable address in a. * * caller is responsible for calling sudoclean * after successful sudoaddable, * to release the register used for a. */ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { if n.Type == nil { return false } *a = obj.Addr{} switch n.Op { case gc.OLITERAL: if !gc.Isconst(n, gc.CTINT) { break } v := n.Int() if v >= 32000 || v <= -32000 { break } switch as { default: return false case arm.AADD, arm.ASUB, arm.AAND, arm.AORR, arm.AEOR, arm.AMOVB, arm.AMOVBS, arm.AMOVBU, arm.AMOVH, arm.AMOVHS, arm.AMOVHU, arm.AMOVW: break } cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY gc.Naddr(a, n) return true case gc.ODOT, gc.ODOTPTR: cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY var nn *gc.Node var oary [10]int64 o := gc.Dotoffset(n, oary[:], &nn) if nn == nil { sudoclean() return false } if nn.Addable && o == 1 && oary[0] >= 0 { // directly addressable set of DOTs n1 := *nn n1.Type = n.Type n1.Xoffset += oary[0] gc.Naddr(a, &n1) return true } gc.Regalloc(reg, gc.Types[gc.Tptr], nil) n1 := *reg n1.Op = gc.OINDREG if oary[0] >= 0 { gc.Agen(nn, reg) n1.Xoffset = oary[0] } else { gc.Cgen(nn, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[0] + 1) } for i := 1; i < o; i++ { if oary[i] >= 0 { gc.Fatal("can't happen") } gins(arm.AMOVW, &n1, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[i] + 1) } a.Type = obj.TYPE_NONE a.Name = obj.NAME_NONE n1.Type = n.Type gc.Naddr(a, &n1) return true case gc.OINDEX: return false } return false }
/* * generate one instruction: * as f, t */ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { // Node nod; // int32 v; if f != nil && f.Op == gc.OINDEX { gc.Fatal("gins OINDEX not implemented") } // gc.Regalloc(&nod, ®node, Z); // v = constnode.vconst; // gc.Cgen(f->right, &nod); // constnode.vconst = v; // idx.reg = nod.reg; // gc.Regfree(&nod); if t != nil && t.Op == gc.OINDEX { gc.Fatal("gins OINDEX not implemented") } // gc.Regalloc(&nod, ®node, Z); // v = constnode.vconst; // gc.Cgen(t->right, &nod); // constnode.vconst = v; // idx.reg = nod.reg; // gc.Regfree(&nod); p := gc.Prog(as) gc.Naddr(&p.From, f) gc.Naddr(&p.To, t) switch as { case arm.ABL: if p.To.Type == obj.TYPE_REG { p.To.Type = obj.TYPE_MEM } case arm.ACMP, arm.ACMPF, arm.ACMPD: if t != nil { if f.Op != gc.OREGISTER { /* generate a comparison TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites. */ gc.Fatal("bad operands to gcmp") } p.From = p.To p.To = obj.Addr{} raddr(f, p) } case arm.AMULU: if f != nil && f.Op != gc.OREGISTER { gc.Fatal("bad operands to mul") } case arm.AMOVW: if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR || p.From.Type == obj.TYPE_CONST) && (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) { gc.Fatal("gins double memory") } case arm.AADD: if p.To.Type == obj.TYPE_MEM { gc.Fatal("gins arith to mem") } case arm.ARSB: if p.From.Type == obj.TYPE_NONE { gc.Fatal("rsb with no from") } } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } return p }
func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", f, t) } ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } // cannot have two memory operands; // except 64-bit, which always copies via registers anyway. var a int var r1 gc.Node if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node switch tt { default: f.Convconst(&con, t.Type) case gc.TINT16, gc.TINT8: var con gc.Node f.Convconst(&con, gc.Types[gc.TINT32]) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(arm.AMOVW, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return case gc.TUINT16, gc.TUINT8: var con gc.Node f.Convconst(&con, gc.Types[gc.TUINT32]) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(arm.AMOVW, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return } f = &con ft = gc.Simsimtype(con.Type) // constants can't move directly to memory if gc.Ismem(t) && !gc.Is64(t.Type) { goto hard } } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: // should not happen gc.Fatal("gmove %v -> %v", f, t) return /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8: // same size if !gc.Ismem(f) { a = arm.AMOVB break } fallthrough case gc.TUINT8<<16 | gc.TINT8, gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8: a = arm.AMOVBS case gc.TUINT8<<16 | gc.TUINT8: if !gc.Ismem(f) { a = arm.AMOVB break } fallthrough case gc.TINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8: a = arm.AMOVBU case gc.TINT64<<16 | gc.TINT8, // truncate low word gc.TUINT64<<16 | gc.TINT8: a = arm.AMOVBS goto trunc64 case gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = arm.AMOVBU goto trunc64 case gc.TINT16<<16 | gc.TINT16: // same size if !gc.Ismem(f) { a = arm.AMOVH break } fallthrough case gc.TUINT16<<16 | gc.TINT16, gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16: a = arm.AMOVHS case gc.TUINT16<<16 | gc.TUINT16: if !gc.Ismem(f) { a = arm.AMOVH break } fallthrough case gc.TINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16: a = arm.AMOVHU case gc.TINT64<<16 | gc.TINT16, // truncate low word gc.TUINT64<<16 | gc.TINT16: a = arm.AMOVHS goto trunc64 case gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = arm.AMOVHU goto trunc64 case gc.TINT32<<16 | gc.TINT32, // same size gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TUINT32: a = arm.AMOVW case gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Regalloc(&r1, t.Type, nil) gins(arm.AMOVW, &flo, &r1) gins(arm.AMOVW, &r1, t) gc.Regfree(&r1) splitclean() return case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) var r1 gc.Node gc.Regalloc(&r1, flo.Type, nil) var r2 gc.Node gc.Regalloc(&r2, fhi.Type, nil) gins(arm.AMOVW, &flo, &r1) gins(arm.AMOVW, &fhi, &r2) gins(arm.AMOVW, &r1, &tlo) gins(arm.AMOVW, &r2, &thi) gc.Regfree(&r1) gc.Regfree(&r2) splitclean() splitclean() return /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16, gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32: a = arm.AMOVBS goto rdst case gc.TINT8<<16 | gc.TINT64, // convert via int32 gc.TINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16, gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32: a = arm.AMOVBU goto rdst case gc.TUINT8<<16 | gc.TINT64, // convert via uint32 gc.TUINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32: a = arm.AMOVHS goto rdst case gc.TINT16<<16 | gc.TINT64, // convert via int32 gc.TINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32: a = arm.AMOVHU goto rdst case gc.TUINT16<<16 | gc.TINT64, // convert via uint32 gc.TUINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) var r1 gc.Node gc.Regalloc(&r1, tlo.Type, nil) var r2 gc.Node gc.Regalloc(&r2, thi.Type, nil) gmove(f, &r1) p1 := gins(arm.AMOVW, &r1, &r2) p1.From.Type = obj.TYPE_SHIFT p1.From.Offset = 2<<5 | 31<<7 | int64(r1.Reg)&15 // r1->31 p1.From.Reg = 0 //print("gmove: %v\n", p1); gins(arm.AMOVW, &r1, &tlo) gins(arm.AMOVW, &r2, &thi) gc.Regfree(&r1) gc.Regfree(&r2) splitclean() return case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: var thi gc.Node var tlo gc.Node split64(t, &tlo, &thi) gmove(f, &tlo) var r1 gc.Node gc.Regalloc(&r1, thi.Type, nil) gins(arm.AMOVW, ncon(0), &r1) gins(arm.AMOVW, &r1, &thi) gc.Regfree(&r1) splitclean() return // case CASE(TFLOAT64, TUINT64): /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TINT32, gc.TFLOAT32<<16 | gc.TUINT32, // case CASE(TFLOAT32, TUINT64): gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TINT32, gc.TFLOAT64<<16 | gc.TUINT32: fa := arm.AMOVF a := arm.AMOVFW if ft == gc.TFLOAT64 { fa = arm.AMOVD a = arm.AMOVDW } ta := arm.AMOVW switch tt { case gc.TINT8: ta = arm.AMOVBS case gc.TUINT8: ta = arm.AMOVBU case gc.TINT16: ta = arm.AMOVHS case gc.TUINT16: ta = arm.AMOVHU } var r1 gc.Node gc.Regalloc(&r1, gc.Types[ft], f) var r2 gc.Node gc.Regalloc(&r2, gc.Types[tt], t) gins(fa, f, &r1) // load to fpu p1 := gins(a, &r1, &r1) // convert to w switch tt { case gc.TUINT8, gc.TUINT16, gc.TUINT32: p1.Scond |= arm.C_UBIT } gins(arm.AMOVW, &r1, &r2) // copy to cpu gins(ta, &r2, t) // store gc.Regfree(&r1) gc.Regfree(&r2) return /* * integer to float */ case gc.TINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT32, gc.TINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT64, gc.TINT32<<16 | gc.TFLOAT64, gc.TUINT32<<16 | gc.TFLOAT64: fa := arm.AMOVW switch ft { case gc.TINT8: fa = arm.AMOVBS case gc.TUINT8: fa = arm.AMOVBU case gc.TINT16: fa = arm.AMOVHS case gc.TUINT16: fa = arm.AMOVHU } a := arm.AMOVWF ta := arm.AMOVF if tt == gc.TFLOAT64 { a = arm.AMOVWD ta = arm.AMOVD } var r1 gc.Node gc.Regalloc(&r1, gc.Types[ft], f) var r2 gc.Node gc.Regalloc(&r2, gc.Types[tt], t) gins(fa, f, &r1) // load to cpu gins(arm.AMOVW, &r1, &r2) // copy to fpu p1 := gins(a, &r2, &r2) // convert switch ft { case gc.TUINT8, gc.TUINT16, gc.TUINT32: p1.Scond |= arm.C_UBIT } gins(ta, &r2, t) // store gc.Regfree(&r1) gc.Regfree(&r2) return case gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: gc.Fatal("gmove UINT64, TFLOAT not implemented") return /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = arm.AMOVF case gc.TFLOAT64<<16 | gc.TFLOAT64: a = arm.AMOVD case gc.TFLOAT32<<16 | gc.TFLOAT64: var r1 gc.Node gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t) gins(arm.AMOVF, f, &r1) gins(arm.AMOVFD, &r1, &r1) gins(arm.AMOVD, &r1, t) gc.Regfree(&r1) return case gc.TFLOAT64<<16 | gc.TFLOAT32: var r1 gc.Node gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t) gins(arm.AMOVD, f, &r1) gins(arm.AMOVDF, &r1, &r1) gins(arm.AMOVF, &r1, t) gc.Regfree(&r1) return } gins(a, f, t) return // TODO(kaib): we almost always require a register dest anyway, this can probably be // removed. // requires register destination rdst: { gc.Regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) gc.Regfree(&r1) return } // requires register intermediate hard: gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return // truncate 64 bit integer trunc64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) gc.Regalloc(&r1, t.Type, nil) gins(a, &flo, &r1) gins(a, &r1, t) gc.Regfree(&r1) splitclean() return }