func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL { // Reverse comparison to place constant last. op = gc.Brrev(op) n1, n2 = n2, n1 } var r1, r2, g1, g2 gc.Node gc.Regalloc(&r1, t, n1) gc.Regalloc(&g1, n1.Type, &r1) gc.Cgen(n1, &g1) gmove(&g1, &r1) if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) { ginscon2(optoas(gc.OCMP, t), &r1, n2.Int()) } else { gc.Regalloc(&r2, t, n2) gc.Regalloc(&g2, n1.Type, &r2) gc.Cgen(n2, &g2) gmove(&g2, &r2) rawgins(optoas(gc.OCMP, t), &r1, &r2) gc.Regfree(&g2) gc.Regfree(&r2) } gc.Regfree(&g1) gc.Regfree(&r1) return gc.Gbranch(optoas(op, t), nil, likely) }
/* * call to n has already been generated. * generate: * res = &return value from call. */ func cgen_aret(n *gc.Node, res *gc.Node) { t := n.Left.Type if gc.Isptr[t.Etype] { t = t.Type } var flist gc.Iter fp := gc.Structfirst(&flist, gc.Getoutarg(t)) if fp == nil { gc.Fatal("cgen_aret: nil") } var nod1 gc.Node nod1.Op = gc.OINDREG nod1.Val.U.Reg = x86.REG_SP nod1.Addable = 1 nod1.Xoffset = fp.Width nod1.Type = fp.Type if res.Op != gc.OREGISTER { var nod2 gc.Node regalloc(&nod2, gc.Types[gc.Tptr], res) gins(x86.ALEAL, &nod1, &nod2) gins(x86.AMOVL, &nod2, res) regfree(&nod2) } else { gins(x86.ALEAL, &nod1, res) } }
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL { op = gc.Brrev(op) n1, n2 = n2, n1 } var r1, r2, g1, g2 gc.Node gc.Regalloc(&r1, t, n1) gc.Regalloc(&g1, n1.Type, &r1) gc.Cgen(n1, &g1) gmove(&g1, &r1) if gc.Isint[t.Etype] && n2.Op == gc.OLITERAL && n2.Int() == 0 { gins(arm.ACMP, &r1, n2) } else { gc.Regalloc(&r2, t, n2) gc.Regalloc(&g2, n1.Type, &r2) gc.Cgen(n2, &g2) gmove(&g2, &r2) gins(optoas(gc.OCMP, t), &r1, &r2) gc.Regfree(&g2) gc.Regfree(&r2) } gc.Regfree(&g1) gc.Regfree(&r1) return gc.Gbranch(optoas(op, t), nil, likely) }
/* * call to n has already been generated. * generate: * res = &return value from call. */ func cgen_aret(n *gc.Node, res *gc.Node) { t := n.Left.Type if gc.Isptr[t.Etype] { t = t.Type } var flist gc.Iter fp := gc.Structfirst(&flist, gc.Getoutarg(t)) if fp == nil { gc.Fatal("cgen_aret: nil") } var nod1 gc.Node nod1.Op = gc.OINDREG nod1.Val.U.Reg = ppc64.REGSP nod1.Addable = 1 nod1.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved lr at 0(SP) nod1.Type = fp.Type if res.Op != gc.OREGISTER { var nod2 gc.Node regalloc(&nod2, gc.Types[gc.Tptr], res) agen(&nod1, &nod2) gins(ppc64.AMOVD, &nod2, res) regfree(&nod2) } else { agen(&nod1, res) } }
/* * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. */ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { if !gc.Is64(n.Type) { gc.Fatal("split64 %v", gc.Tconv(n.Type, 0)) } if nsclean >= len(sclean) { gc.Fatal("split64 clean") } sclean[nsclean].Op = gc.OEMPTY nsclean++ switch n.Op { default: switch n.Op { default: var n1 gc.Node if !dotaddable(n, &n1) { gc.Igen(n, &n1, nil) sclean[nsclean-1] = n1 } n = &n1 case gc.ONAME: if n.Class == gc.PPARAMREF { var n1 gc.Node gc.Cgen(n.Heapaddr, &n1) sclean[nsclean-1] = n1 n = &n1 } // nothing case gc.OINDREG: break } *lo = *n *hi = *n lo.Type = gc.Types[gc.TUINT32] if n.Type.Etype == gc.TINT64 { hi.Type = gc.Types[gc.TINT32] } else { hi.Type = gc.Types[gc.TUINT32] } hi.Xoffset += 4 case gc.OLITERAL: var n1 gc.Node gc.Convconst(&n1, n.Type, &n.Val) i := gc.Mpgetfix(n1.Val.U.Xval) gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) i >>= 32 if n.Type.Etype == gc.TINT64 { gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) } else { gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) } } }
func intLiteral(n *gc.Node) (x int64, ok bool) { switch { case n == nil: return case gc.Isconst(n, gc.CTINT): return n.Int(), true case gc.Isconst(n, gc.CTBOOL): return int64(obj.Bool2int(n.Bool())), true } return }
func dotaddable(n *gc.Node, n1 *gc.Node) bool { if n.Op != gc.ODOT { return false } var oary [10]int64 var nn *gc.Node o := gc.Dotoffset(n, oary[:], &nn) if nn != nil && nn.Addable && o == 1 && oary[0] >= 0 { *n1 = *nn n1.Type = n.Type n1.Xoffset += oary[0] return true } return false }
/* * register dr is one of the special ones (AX, CX, DI, SI, etc.). * we need to use it. if it is already allocated as a temporary * (r > 1; can only happen if a routine like sgen passed a * special as cgen's res and then cgen used regalloc to reuse * it as its own temporary), then move it for now to another * register. caller must call restx to move it back. * the move is not necessary if dr == res, because res is * known to be dead. */ func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) { r := int(reg[dr]) // save current ax and dx if they are live // and not the destination *oldx = gc.Node{} gc.Nodreg(x, t, dr) if r > 1 && !gc.Samereg(x, res) { gc.Regalloc(oldx, gc.Types[gc.TINT64], nil) x.Type = gc.Types[gc.TINT64] gmove(x, oldx) x.Type = t oldx.Ostk = int32(r) // squirrel away old r value reg[dr] = 1 } }
func restx(x *gc.Node, oldx *gc.Node) { if oldx.Op != 0 { x.Type = gc.Types[gc.TINT64] reg[x.Reg] = uint8(oldx.Ostk) gmove(oldx, x) gc.Regfree(oldx) } }
func restx(x *gc.Node, oldx *gc.Node) { gc.Regfree(x) if oldx.Op != 0 { x.Type = gc.Types[gc.TINT32] gmove(oldx, x) } }
/* * generate function call; * proc=0 normal call * proc=1 goroutine run in new proc * proc=2 defer call save away stack */ func cgen_call(n *gc.Node, proc int) { if n == nil { return } var afun gc.Node if n.Left.Ullman >= gc.UINF { // if name involves a fn call // precompute the address of the fn gc.Tempname(&afun, gc.Types[gc.Tptr]) cgen(n.Left, &afun) } gc.Genlist(n.List) // assign the args t := n.Left.Type // call tempname pointer if n.Left.Ullman >= gc.UINF { var nod gc.Node regalloc(&nod, gc.Types[gc.Tptr], nil) gc.Cgen_as(&nod, &afun) nod.Type = t ginscall(&nod, proc) regfree(&nod) return } // call pointer if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC { var nod gc.Node regalloc(&nod, gc.Types[gc.Tptr], nil) gc.Cgen_as(&nod, n.Left) nod.Type = t ginscall(&nod, proc) regfree(&nod) return } // call direct n.Left.Method = 1 ginscall(n.Left, proc) }
/* * call to n has already been generated. * generate: * res = return value from call. */ func cgen_callret(n *gc.Node, res *gc.Node) { t := n.Left.Type if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 { t = t.Type } var flist gc.Iter fp := gc.Structfirst(&flist, gc.Getoutarg(t)) if fp == nil { gc.Fatal("cgen_callret: nil") } var nod gc.Node nod.Op = gc.OINDREG nod.Val.U.Reg = x86.REG_SP nod.Addable = 1 nod.Xoffset = fp.Width nod.Type = fp.Type gc.Cgen_as(res, &nod) }
func fixlargeoffset(n *gc.Node) { if n == nil { return } if n.Op != gc.OINDREG { return } if -4096 <= n.Xoffset && n.Xoffset < 4096 { return } a := gc.Node(*n) a.Op = gc.OREGISTER a.Type = gc.Types[gc.Tptr] a.Xoffset = 0 gc.Cgen_checknil(&a) ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a) n.Xoffset = 0 }
/* * peep.c */ func mgen(n *gc.Node, n1 *gc.Node, rg *gc.Node) { n1.Op = gc.OEMPTY if n.Addable != 0 { *n1 = *n if n1.Op == gc.OREGISTER || n1.Op == gc.OINDREG { reg[n.Val.U.Reg]++ } return } gc.Tempname(n1, n.Type) cgen(n, n1) if n.Type.Width <= int64(gc.Widthptr) || gc.Isfloat[n.Type.Etype] { n2 := *n1 regalloc(n1, n.Type, rg) gmove(&n2, n1) } }
func fixlargeoffset(n *gc.Node) { if n == nil { return } if n.Op != gc.OINDREG { return } if n.Val.U.Reg == ppc64.REGSP { // stack offset cannot be large return } if n.Xoffset != int64(int32(n.Xoffset)) { // TODO(minux): offset too large, move into R31 and add to R31 instead. // this is used only in test/fixedbugs/issue6036.go. gc.Fatal("offset too large: %v", gc.Nconv(n, 0)) a := gc.Node(*n) a.Op = gc.OREGISTER a.Type = gc.Types[gc.Tptr] a.Xoffset = 0 gc.Cgen_checknil(&a) ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a) n.Xoffset = 0 } }
/* * generate: * res = n; * simplifies and calls gmove. */ func cgen(n *gc.Node, res *gc.Node) { //print("cgen %N(%d) -> %N(%d)\n", n, n->addable, res, res->addable); if gc.Debug['g'] != 0 { gc.Dump("\ncgen-n", n) gc.Dump("cgen-res", res) } if n == nil || n.Type == nil { return } if res == nil || res.Type == nil { gc.Fatal("cgen: res nil") } for n.Op == gc.OCONVNOP { n = n.Left } switch n.Op { case gc.OSLICE, gc.OSLICEARR, gc.OSLICESTR, gc.OSLICE3, gc.OSLICE3ARR: if res.Op != gc.ONAME || res.Addable == 0 { var n1 gc.Node gc.Tempname(&n1, n.Type) gc.Cgen_slice(n, &n1) cgen(&n1, res) } else { gc.Cgen_slice(n, res) } return case gc.OEFACE: if res.Op != gc.ONAME || res.Addable == 0 { var n1 gc.Node gc.Tempname(&n1, n.Type) gc.Cgen_eface(n, &n1) cgen(&n1, res) } else { gc.Cgen_eface(n, res) } return } if n.Ullman >= gc.UINF { if n.Op == gc.OINDREG { gc.Fatal("cgen: this is going to misscompile") } if res.Ullman >= gc.UINF { var n1 gc.Node gc.Tempname(&n1, n.Type) cgen(n, &n1) cgen(&n1, res) return } } if gc.Isfat(n.Type) { if n.Type.Width < 0 { gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0)) } sgen(n, res, n.Type.Width) return } if res.Addable == 0 { if n.Ullman > res.Ullman { var n1 gc.Node regalloc(&n1, n.Type, res) cgen(n, &n1) if n1.Ullman > res.Ullman { gc.Dump("n1", &n1) gc.Dump("res", res) gc.Fatal("loop in cgen") } cgen(&n1, res) regfree(&n1) return } var f int if res.Ullman >= gc.UINF { goto gen } if gc.Complexop(n, res) { gc.Complexgen(n, res) return } f = 1 // gen thru register switch n.Op { case gc.OLITERAL: if gc.Smallintconst(n) { f = 0 } case gc.OREGISTER: f = 0 } if !gc.Iscomplex[n.Type.Etype] { a := optoas(gc.OAS, res.Type) var addr obj.Addr if sudoaddable(a, res, &addr) { var p1 *obj.Prog if f != 0 { var n2 gc.Node regalloc(&n2, res.Type, nil) cgen(n, &n2) p1 = gins(a, &n2, nil) regfree(&n2) } else { p1 = gins(a, n, nil) } p1.To = addr if gc.Debug['g'] != 0 { fmt.Printf("%v [ignore previous line]\n", p1) } sudoclean() return } } gen: var n1 gc.Node igen(res, &n1, nil) cgen(n, &n1) regfree(&n1) return } // update addressability for string, slice // can't do in walk because n->left->addable // changes if n->left is an escaping local variable. switch n.Op { case gc.OSPTR, gc.OLEN: if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) { n.Addable = n.Left.Addable } case gc.OCAP: if gc.Isslice(n.Left.Type) { n.Addable = n.Left.Addable } case gc.OITAB: n.Addable = n.Left.Addable } if gc.Complexop(n, res) { gc.Complexgen(n, res) return } // if both are addressable, move if n.Addable != 0 { if n.Op == gc.OREGISTER || res.Op == gc.OREGISTER { gmove(n, res) } else { var n1 gc.Node regalloc(&n1, n.Type, nil) gmove(n, &n1) cgen(&n1, res) regfree(&n1) } return } nl := n.Left nr := n.Right if nl != nil && nl.Ullman >= gc.UINF { if nr != nil && nr.Ullman >= gc.UINF { var n1 gc.Node gc.Tempname(&n1, nl.Type) cgen(nl, &n1) n2 := *n n2.Left = &n1 cgen(&n2, res) return } } if !gc.Iscomplex[n.Type.Etype] { a := optoas(gc.OAS, n.Type) var addr obj.Addr if sudoaddable(a, n, &addr) { if res.Op == gc.OREGISTER { p1 := gins(a, nil, res) p1.From = addr } else { var n2 gc.Node regalloc(&n2, n.Type, nil) p1 := gins(a, nil, &n2) p1.From = addr gins(a, &n2, res) regfree(&n2) } sudoclean() return } } // TODO(minux): we shouldn't reverse FP comparisons, but then we need to synthesize // OGE, OLE, and ONE ourselves. // if(nl != N && isfloat[n->type->etype] && isfloat[nl->type->etype]) goto flt; var a int switch n.Op { default: gc.Dump("cgen", n) gc.Fatal("cgen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign)) // these call bgen to get a bool value case gc.OOROR, gc.OANDAND, gc.OEQ, gc.ONE, gc.OLT, gc.OLE, gc.OGE, gc.OGT, gc.ONOT: p1 := gc.Gbranch(ppc64.ABR, nil, 0) p2 := gc.Pc gmove(gc.Nodbool(true), res) p3 := gc.Gbranch(ppc64.ABR, nil, 0) gc.Patch(p1, gc.Pc) bgen(n, true, 0, p2) gmove(gc.Nodbool(false), res) gc.Patch(p3, gc.Pc) return case gc.OPLUS: cgen(nl, res) return // unary case gc.OCOM: a := optoas(gc.OXOR, nl.Type) var n1 gc.Node regalloc(&n1, nl.Type, nil) cgen(nl, &n1) var n2 gc.Node gc.Nodconst(&n2, nl.Type, -1) gins(a, &n2, &n1) gmove(&n1, res) regfree(&n1) return case gc.OMINUS: if gc.Isfloat[nl.Type.Etype] { nr = gc.Nodintconst(-1) gc.Convlit(&nr, n.Type) a = optoas(gc.OMUL, nl.Type) goto sbop } a := optoas(int(n.Op), nl.Type) // unary var n1 gc.Node regalloc(&n1, nl.Type, res) cgen(nl, &n1) gins(a, nil, &n1) gmove(&n1, res) regfree(&n1) return // symmetric binary case gc.OAND, gc.OOR, gc.OXOR, gc.OADD, gc.OMUL: a = optoas(int(n.Op), nl.Type) goto sbop // asymmetric binary case gc.OSUB: a = optoas(int(n.Op), nl.Type) goto abop case gc.OHMUL: cgen_hmul(nl, nr, res) case gc.OCONV: if n.Type.Width > nl.Type.Width { // If loading from memory, do conversion during load, // so as to avoid use of 8-bit register in, say, int(*byteptr). switch nl.Op { case gc.ODOT, gc.ODOTPTR, gc.OINDEX, gc.OIND, gc.ONAME: var n1 gc.Node igen(nl, &n1, res) var n2 gc.Node regalloc(&n2, n.Type, res) gmove(&n1, &n2) gmove(&n2, res) regfree(&n2) regfree(&n1) return } } var n1 gc.Node regalloc(&n1, nl.Type, res) var n2 gc.Node regalloc(&n2, n.Type, &n1) cgen(nl, &n1) // if we do the conversion n1 -> n2 here // reusing the register, then gmove won't // have to allocate its own register. gmove(&n1, &n2) gmove(&n2, res) regfree(&n2) regfree(&n1) case gc.ODOT, gc.ODOTPTR, gc.OINDEX, gc.OIND, gc.ONAME: // PHEAP or PPARAMREF var var n1 gc.Node igen(n, &n1, res) gmove(&n1, res) regfree(&n1) // interface table is first word of interface value case gc.OITAB: var n1 gc.Node igen(nl, &n1, res) n1.Type = n.Type gmove(&n1, res) regfree(&n1) // pointer is the first word of string or slice. case gc.OSPTR: if gc.Isconst(nl, gc.CTSTR) { var n1 gc.Node regalloc(&n1, gc.Types[gc.Tptr], res) p1 := gins(ppc64.AMOVD, nil, &n1) gc.Datastring(nl.Val.U.Sval, &p1.From) gmove(&n1, res) regfree(&n1) break } var n1 gc.Node igen(nl, &n1, res) n1.Type = n.Type gmove(&n1, res) regfree(&n1) case gc.OLEN: if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) { // map and chan have len in the first int-sized word. // a zero pointer means zero length var n1 gc.Node regalloc(&n1, gc.Types[gc.Tptr], res) cgen(nl, &n1) var n2 gc.Node gc.Nodconst(&n2, gc.Types[gc.Tptr], 0) gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2) p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0) n2 = n1 n2.Op = gc.OINDREG n2.Type = gc.Types[gc.Simtype[gc.TINT]] gmove(&n2, &n1) gc.Patch(p1, gc.Pc) gmove(&n1, res) regfree(&n1) break } if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) { // both slice and string have len one pointer into the struct. // a zero pointer means zero length var n1 gc.Node igen(nl, &n1, res) n1.Type = gc.Types[gc.Simtype[gc.TUINT]] n1.Xoffset += int64(gc.Array_nel) gmove(&n1, res) regfree(&n1) break } gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong)) case gc.OCAP: if gc.Istype(nl.Type, gc.TCHAN) { // chan has cap in the second int-sized word. // a zero pointer means zero length var n1 gc.Node regalloc(&n1, gc.Types[gc.Tptr], res) cgen(nl, &n1) var n2 gc.Node gc.Nodconst(&n2, gc.Types[gc.Tptr], 0) gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2) p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0) n2 = n1 n2.Op = gc.OINDREG n2.Xoffset = int64(gc.Widthint) n2.Type = gc.Types[gc.Simtype[gc.TINT]] gmove(&n2, &n1) gc.Patch(p1, gc.Pc) gmove(&n1, res) regfree(&n1) break } if gc.Isslice(nl.Type) { var n1 gc.Node igen(nl, &n1, res) n1.Type = gc.Types[gc.Simtype[gc.TUINT]] n1.Xoffset += int64(gc.Array_cap) gmove(&n1, res) regfree(&n1) break } gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong)) case gc.OADDR: if n.Bounded { // let race detector avoid nil checks gc.Disable_checknil++ } agen(nl, res) if n.Bounded { gc.Disable_checknil-- } case gc.OCALLMETH: gc.Cgen_callmeth(n, 0) cgen_callret(n, res) case gc.OCALLINTER: cgen_callinter(n, res, 0) cgen_callret(n, res) case gc.OCALLFUNC: cgen_call(n, 0) cgen_callret(n, res) case gc.OMOD, gc.ODIV: if gc.Isfloat[n.Type.Etype] { a = optoas(int(n.Op), nl.Type) goto abop } if nl.Ullman >= nr.Ullman { var n1 gc.Node regalloc(&n1, nl.Type, res) cgen(nl, &n1) cgen_div(int(n.Op), &n1, nr, res) regfree(&n1) } else { var n2 gc.Node if !gc.Smallintconst(nr) { regalloc(&n2, nr.Type, res) cgen(nr, &n2) } else { n2 = *nr } cgen_div(int(n.Op), nl, &n2, res) if n2.Op != gc.OLITERAL { regfree(&n2) } } case gc.OLSH, gc.ORSH, gc.OLROT: cgen_shift(int(n.Op), n.Bounded, nl, nr, res) } return /* * put simplest on right - we'll generate into left * and then adjust it using the computation of right. * constants and variables have the same ullman * count, so look for constants specially. * * an integer constant we can use as an immediate * is simpler than a variable - we can use the immediate * in the adjustment instruction directly - so it goes * on the right. * * other constants, like big integers or floating point * constants, require a mov into a register, so those * might as well go on the left, so we can reuse that * register for the computation. */ sbop: // symmetric binary if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) { r := nl nl = nr nr = r } abop: // asymmetric binary var n1 gc.Node var n2 gc.Node if nl.Ullman >= nr.Ullman { regalloc(&n1, nl.Type, res) cgen(nl, &n1) /* * This generates smaller code - it avoids a MOV - but it's * easily 10% slower due to not being able to * optimize/manipulate the move. * To see, run: go test -bench . crypto/md5 * with and without. * if(sudoaddable(a, nr, &addr)) { p1 = gins(a, N, &n1); p1->from = addr; gmove(&n1, res); sudoclean(); regfree(&n1); goto ret; } * */ // TODO(minux): enable using constants directly in certain instructions. //if(smallintconst(nr)) // n2 = *nr; //else { regalloc(&n2, nr.Type, nil) cgen(nr, &n2) } else //} { //if(smallintconst(nr)) // n2 = *nr; //else { regalloc(&n2, nr.Type, res) cgen(nr, &n2) //} regalloc(&n1, nl.Type, nil) cgen(nl, &n1) } gins(a, &n2, &n1) // Normalize result for types smaller than word. if n.Type.Width < int64(gc.Widthreg) { switch n.Op { case gc.OADD, gc.OSUB, gc.OMUL, gc.OLSH: gins(optoas(gc.OAS, n.Type), &n1, &n1) } } gmove(&n1, res) regfree(&n1) if n2.Op != gc.OLITERAL { regfree(&n2) } return }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { gc.Dump("\nclearfat", nl) } w := uint32(nl.Type.Width) // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := w % 4 // bytes q := w / 4 // quads if q < 4 { // Write sequence of MOV 0, off(base) instead of using STOSL. // The hope is that although the code will be slightly longer, // the MOVs will have no dependencies and pipeline better // than the unrolled STOSL loop. // NOTE: Must use agen, not igen, so that optimizer sees address // being taken. We are not writing on field boundaries. var n1 gc.Node gc.Regalloc(&n1, gc.Types[gc.Tptr], nil) gc.Agen(nl, &n1) n1.Op = gc.OINDREG var z gc.Node gc.Nodconst(&z, gc.Types[gc.TUINT64], 0) for { tmp14 := q q-- if tmp14 <= 0 { break } n1.Type = z.Type gins(x86.AMOVL, &z, &n1) n1.Xoffset += 4 } gc.Nodconst(&z, gc.Types[gc.TUINT8], 0) for { tmp15 := c c-- if tmp15 <= 0 { break } n1.Type = z.Type gins(x86.AMOVB, &z, &n1) n1.Xoffset++ } gc.Regfree(&n1) return } var n1 gc.Node gc.Nodreg(&n1, gc.Types[gc.Tptr], x86.REG_DI) gc.Agen(nl, &n1) gconreg(x86.AMOVL, 0, x86.REG_AX) if q > 128 || (q >= 4 && gc.Nacl) { gconreg(x86.AMOVL, int64(q), x86.REG_CX) gins(x86.AREP, nil, nil) // repeat gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+ } else if q >= 4 { p := gins(obj.ADUFFZERO, nil, nil) p.To.Type = obj.TYPE_ADDR p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg)) // 1 and 128 = magic constants: see ../../runtime/asm_386.s p.To.Offset = 1 * (128 - int64(q)) } else { for q > 0 { gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+ q-- } } for c > 0 { gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+ c-- } }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { gc.Dump("\nclearfat", nl) } w := nl.Type.Width // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := w % 8 // bytes q := w / 8 // quads if q < 4 { // Write sequence of MOV 0, off(base) instead of using STOSQ. // The hope is that although the code will be slightly longer, // the MOVs will have no dependencies and pipeline better // than the unrolled STOSQ loop. // NOTE: Must use agen, not igen, so that optimizer sees address // being taken. We are not writing on field boundaries. var n1 gc.Node gc.Agenr(nl, &n1, nil) n1.Op = gc.OINDREG var z gc.Node gc.Nodconst(&z, gc.Types[gc.TUINT64], 0) for { tmp14 := q q-- if tmp14 <= 0 { break } n1.Type = z.Type gins(x86.AMOVQ, &z, &n1) n1.Xoffset += 8 } if c >= 4 { gc.Nodconst(&z, gc.Types[gc.TUINT32], 0) n1.Type = z.Type gins(x86.AMOVL, &z, &n1) n1.Xoffset += 4 c -= 4 } gc.Nodconst(&z, gc.Types[gc.TUINT8], 0) for { tmp15 := c c-- if tmp15 <= 0 { break } n1.Type = z.Type gins(x86.AMOVB, &z, &n1) n1.Xoffset++ } gc.Regfree(&n1) return } var oldn1 gc.Node var n1 gc.Node savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr]) gc.Agen(nl, &n1) var ax gc.Node var oldax gc.Node savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr]) gconreg(x86.AMOVL, 0, x86.REG_AX) if q > 128 || gc.Nacl { gconreg(movptr, q, x86.REG_CX) gins(x86.AREP, nil, nil) // repeat gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+ } else { if di := dzDI(q); di != 0 { gconreg(addptr, di, x86.REG_DI) } p := gins(obj.ADUFFZERO, nil, nil) p.To.Type = obj.TYPE_ADDR p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg)) p.To.Offset = dzOff(q) } z := ax di := n1 if w >= 8 && c >= 4 { di.Op = gc.OINDREG z.Type = gc.Types[gc.TINT64] di.Type = z.Type p := gins(x86.AMOVQ, &z, &di) p.To.Scale = 1 p.To.Offset = c - 8 } else if c >= 4 { di.Op = gc.OINDREG z.Type = gc.Types[gc.TINT32] di.Type = z.Type gins(x86.AMOVL, &z, &di) if c > 4 { p := gins(x86.AMOVL, &z, &di) p.To.Scale = 1 p.To.Offset = c - 4 } } else { for c > 0 { gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+ c-- } } restx(&n1, &oldn1) restx(&ax, &oldax) }
/* * generate shift according to op, one of: * res = nl << nr * res = nl >> nr */ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { a := optoas(op, nl.Type) if nr.Op == gc.OLITERAL { var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) gc.Cgen(nl, &n1) sc := uint64(gc.Mpgetfix(nr.Val.U.Xval)) if sc >= uint64(nl.Type.Width*8) { // large shift gets 2 shifts by width-1 var n3 gc.Node gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gins(a, &n3, &n1) gins(a, &n3, &n1) } else { gins(a, nr, &n1) } gmove(&n1, res) gc.Regfree(&n1) return } if nl.Ullman >= gc.UINF { var n4 gc.Node gc.Tempname(&n4, nl.Type) gc.Cgen(nl, &n4) nl = &n4 } if nr.Ullman >= gc.UINF { var n5 gc.Node gc.Tempname(&n5, nr.Type) gc.Cgen(nr, &n5) nr = &n5 } rcx := int(reg[x86.REG_CX]) var n1 gc.Node gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX) // Allow either uint32 or uint64 as shift type, // to avoid unnecessary conversion from uint32 to uint64 // just to do the comparison. tcount := gc.Types[gc.Simtype[nr.Type.Etype]] if tcount.Etype < gc.TUINT32 { tcount = gc.Types[gc.TUINT32] } gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX var n3 gc.Node gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX var cx gc.Node gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX) var oldcx gc.Node if rcx > 0 && !gc.Samereg(&cx, res) { gc.Regalloc(&oldcx, gc.Types[gc.TUINT64], nil) gmove(&cx, &oldcx) } cx.Type = tcount var n2 gc.Node if gc.Samereg(&cx, res) { gc.Regalloc(&n2, nl.Type, nil) } else { gc.Regalloc(&n2, nl.Type, res) } if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &n2) gc.Cgen(nr, &n1) gmove(&n1, &n3) } else { gc.Cgen(nr, &n1) gmove(&n1, &n3) gc.Cgen(nl, &n2) } gc.Regfree(&n3) // test and fix up large shifts if !bounded { gc.Nodconst(&n3, tcount, nl.Type.Width*8) gins(optoas(gc.OCMP, tcount), &n1, &n3) p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1) if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gins(a, &n3, &n2) } else { gc.Nodconst(&n3, nl.Type, 0) gmove(&n3, &n2) } gc.Patch(p1, gc.Pc) } gins(a, &n1, &n2) if oldcx.Op != 0 { cx.Type = gc.Types[gc.TUINT64] gmove(&oldcx, &cx) gc.Regfree(&oldcx) } gmove(&n2, res) gc.Regfree(&n1) gc.Regfree(&n2) }
/* * n is call to interface method. * generate res = n. */ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) { i := n.Left if i.Op != gc.ODOTINTER { gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0)) } f := i.Right // field if f.Op != gc.ONAME { gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0)) } i = i.Left // interface if i.Addable == 0 { var tmpi gc.Node gc.Tempname(&tmpi, i.Type) cgen(i, &tmpi) i = &tmpi } gc.Genlist(n.List) // assign the args // i is now addable, prepare an indirected // register to hold its address. var nodi gc.Node igen(i, &nodi, res) // REG = &inter var nodsp gc.Node gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], x86.REG_SP) nodsp.Xoffset = 0 if proc != 0 { nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn } nodi.Type = gc.Types[gc.Tptr] nodi.Xoffset += int64(gc.Widthptr) cgen(&nodi, &nodsp) // {0 or 8}(SP) = 4(REG) -- i.data var nodo gc.Node regalloc(&nodo, gc.Types[gc.Tptr], res) nodi.Type = gc.Types[gc.Tptr] nodi.Xoffset -= int64(gc.Widthptr) cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab regfree(&nodi) var nodr gc.Node regalloc(&nodr, gc.Types[gc.Tptr], &nodo) if n.Left.Xoffset == gc.BADWIDTH { gc.Fatal("cgen_callinter: badwidth") } gc.Cgen_checknil(&nodo) nodo.Op = gc.OINDREG nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8 if proc == 0 { // plain call: use direct c function pointer - more efficient cgen(&nodo, &nodr) // REG = 20+offset(REG) -- i.tab->fun[f] proc = 3 } else { // go/defer. generate go func value. gins(x86.ALEAL, &nodo, &nodr) // REG = &(20+offset(REG)) -- i.tab->fun[f] } nodr.Type = n.Left.Type ginscall(&nodr, proc) regfree(&nodr) regfree(&nodo) }
func stackcopy(n, res *gc.Node, osrc, odst, w int64) { var dst gc.Node gc.Nodreg(&dst, gc.Types[gc.Tptr], x86.REG_DI) var src gc.Node gc.Nodreg(&src, gc.Types[gc.Tptr], x86.REG_SI) var tsrc gc.Node gc.Tempname(&tsrc, gc.Types[gc.Tptr]) var tdst gc.Node gc.Tempname(&tdst, gc.Types[gc.Tptr]) if n.Addable == 0 { gc.Agen(n, &tsrc) } if res.Addable == 0 { gc.Agen(res, &tdst) } if n.Addable != 0 { gc.Agen(n, &src) } else { gmove(&tsrc, &src) } if res.Op == gc.ONAME { gc.Gvardef(res) } if res.Addable != 0 { gc.Agen(res, &dst) } else { gmove(&tdst, &dst) } c := int32(w % 4) // bytes q := int32(w / 4) // doublewords // if we are copying forward on the stack and // the src and dst overlap, then reverse direction if osrc < odst && int64(odst) < int64(osrc)+w { // reverse direction gins(x86.ASTD, nil, nil) // set direction flag if c > 0 { gconreg(x86.AADDL, w-1, x86.REG_SI) gconreg(x86.AADDL, w-1, x86.REG_DI) gconreg(x86.AMOVL, int64(c), x86.REG_CX) gins(x86.AREP, nil, nil) // repeat gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)- } if q > 0 { if c > 0 { gconreg(x86.AADDL, -3, x86.REG_SI) gconreg(x86.AADDL, -3, x86.REG_DI) } else { gconreg(x86.AADDL, w-4, x86.REG_SI) gconreg(x86.AADDL, w-4, x86.REG_DI) } gconreg(x86.AMOVL, int64(q), x86.REG_CX) gins(x86.AREP, nil, nil) // repeat gins(x86.AMOVSL, nil, nil) // MOVL *(SI)-,*(DI)- } // we leave with the flag clear gins(x86.ACLD, nil, nil) } else { gins(x86.ACLD, nil, nil) // paranoia. TODO(rsc): remove? // normal direction if q > 128 || (q >= 4 && gc.Nacl) { gconreg(x86.AMOVL, int64(q), x86.REG_CX) gins(x86.AREP, nil, nil) // repeat gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+ } else if q >= 4 { p := gins(obj.ADUFFCOPY, nil, nil) p.To.Type = obj.TYPE_ADDR p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg)) // 10 and 128 = magic constants: see ../../runtime/asm_386.s p.To.Offset = 10 * (128 - int64(q)) } else if !gc.Nacl && c == 0 { var cx gc.Node gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX) // We don't need the MOVSL side-effect of updating SI and DI, // and issuing a sequence of MOVLs directly is faster. src.Op = gc.OINDREG dst.Op = gc.OINDREG for q > 0 { gmove(&src, &cx) // MOVL x+(SI),CX gmove(&cx, &dst) // MOVL CX,x+(DI) src.Xoffset += 4 dst.Xoffset += 4 q-- } } else { for q > 0 { gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+ q-- } } for c > 0 { gins(x86.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+ c-- } } }
/* * generate division. * generates one of: * res = nl / nr * res = nl % nr * according to op. */ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { // Have to be careful about handling // most negative int divided by -1 correctly. // The hardware will trap. // Also the byte divide instruction needs AH, // which we otherwise don't have to deal with. // Easiest way to avoid for int8, int16: use int32. // For int32 and int64, use explicit test. // Could use int64 hw for int32. t := nl.Type t0 := t check := 0 if gc.Issigned[t.Etype] { check = 1 if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) { check = 0 } else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 { check = 0 } } if t.Width < 4 { if gc.Issigned[t.Etype] { t = gc.Types[gc.TINT32] } else { t = gc.Types[gc.TUINT32] } check = 0 } a := optoas(op, t) var n3 gc.Node gc.Regalloc(&n3, t0, nil) var ax gc.Node var oldax gc.Node if nl.Ullman >= nr.Ullman { savex(x86.REG_AX, &ax, &oldax, res, t0) gc.Cgen(nl, &ax) gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen gc.Cgen(nr, &n3) gc.Regfree(&ax) } else { gc.Cgen(nr, &n3) savex(x86.REG_AX, &ax, &oldax, res, t0) gc.Cgen(nl, &ax) } if t != t0 { // Convert ax1 := ax n31 := n3 ax.Type = t n3.Type = t gmove(&ax1, &ax) gmove(&n31, &n3) } var n4 gc.Node if gc.Nacl { // Native Client does not relay the divide-by-zero trap // to the executing program, so we must insert a check // for ourselves. gc.Nodconst(&n4, t, 0) gins(optoas(gc.OCMP, t), &n3, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if panicdiv == nil { panicdiv = gc.Sysfunc("panicdivide") } gc.Ginscall(panicdiv, -1) gc.Patch(p1, gc.Pc) } var p2 *obj.Prog if check != 0 { gc.Nodconst(&n4, t, -1) gins(optoas(gc.OCMP, t), &n3, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if op == gc.ODIV { // a / (-1) is -a. gins(optoas(gc.OMINUS, t), nil, &ax) gmove(&ax, res) } else { // a % (-1) is 0. gc.Nodconst(&n4, t, 0) gmove(&n4, res) } p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) } var olddx gc.Node var dx gc.Node savex(x86.REG_DX, &dx, &olddx, res, t) if !gc.Issigned[t.Etype] { gc.Nodconst(&n4, t, 0) gmove(&n4, &dx) } else { gins(optoas(gc.OEXTEND, t), nil, nil) } gins(a, &n3, nil) gc.Regfree(&n3) if op == gc.ODIV { gmove(&ax, res) } else { gmove(&dx, res) } restx(&dx, &olddx) if check != 0 { gc.Patch(p2, gc.Pc) } restx(&ax, &oldax) }
func memname(n *gc.Node, t *gc.Type) { gc.Tempname(n, t) n.Sym = gc.Lookup("." + n.Sym.Name[1:]) // keep optimizer from registerizing n.Orig.Sym = n.Sym }
/* * generate: * newreg = &n; * res = newreg * * on exit, a has been changed to be *newreg. * caller must regfree(a). * The generated code checks that the result is not *nil. */ func igen(n *gc.Node, a *gc.Node, res *gc.Node) { if gc.Debug['g'] != 0 { gc.Dump("\nigen-n", n) } switch n.Op { case gc.ONAME: if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF { break } *a = *n return // Increase the refcount of the register so that igen's caller // has to call regfree. case gc.OINDREG: if n.Val.U.Reg != ppc64.REGSP { reg[n.Val.U.Reg]++ } *a = *n return case gc.ODOT: igen(n.Left, a, res) a.Xoffset += n.Xoffset a.Type = n.Type fixlargeoffset(a) return case gc.ODOTPTR: cgenr(n.Left, a, res) gc.Cgen_checknil(a) a.Op = gc.OINDREG a.Xoffset += n.Xoffset a.Type = n.Type fixlargeoffset(a) return case gc.OCALLFUNC, gc.OCALLMETH, gc.OCALLINTER: switch n.Op { case gc.OCALLFUNC: cgen_call(n, 0) case gc.OCALLMETH: gc.Cgen_callmeth(n, 0) case gc.OCALLINTER: cgen_callinter(n, nil, 0) } var flist gc.Iter fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type)) *a = gc.Node{} a.Op = gc.OINDREG a.Val.U.Reg = ppc64.REGSP a.Addable = 1 a.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved lr at 0(SP) a.Type = n.Type return // Index of fixed-size array by constant can // put the offset in the addressing. // Could do the same for slice except that we need // to use the real index for the bounds checking. case gc.OINDEX: if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] && gc.Isfixedarray(n.Left.Left.Type)) { if gc.Isconst(n.Right, gc.CTINT) { // Compute &a. if !gc.Isptr[n.Left.Type.Etype] { igen(n.Left, a, res) } else { var n1 gc.Node igen(n.Left, &n1, res) gc.Cgen_checknil(&n1) regalloc(a, gc.Types[gc.Tptr], res) gmove(&n1, a) regfree(&n1) a.Op = gc.OINDREG } // Compute &a[i] as &a + i*width. a.Type = n.Type a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width fixlargeoffset(a) return } } } agenr(n, a, res) a.Op = gc.OINDREG a.Type = n.Type }
/* * generate division. * generates one of: * res = nl / nr * res = nl % nr * according to op. */ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { // Have to be careful about handling // most negative int divided by -1 correctly. // The hardware will generate undefined result. // Also need to explicitly trap on division on zero, // the hardware will silently generate undefined result. // DIVW will leave unpredicable result in higher 32-bit, // so always use DIVD/DIVDU. t := nl.Type t0 := t check := 0 if gc.Issigned[t.Etype] { check = 1 if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) { check = 0 } else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 { check = 0 } } if t.Width < 8 { if gc.Issigned[t.Etype] { t = gc.Types[gc.TINT64] } else { t = gc.Types[gc.TUINT64] } check = 0 } a := optoas(gc.ODIV, t) var tl gc.Node gc.Regalloc(&tl, t0, nil) var tr gc.Node gc.Regalloc(&tr, t0, nil) if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &tl) gc.Cgen(nr, &tr) } else { gc.Cgen(nr, &tr) gc.Cgen(nl, &tl) } if t != t0 { // Convert tl2 := tl tr2 := tr tl.Type = t tr.Type = t gmove(&tl2, &tl) gmove(&tr2, &tr) } // Handle divide-by-zero panic. p1 := gins(optoas(gc.OCMP, t), &tr, nil) p1.To.Type = obj.TYPE_REG p1.To.Reg = ppc64.REGZERO p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1) if panicdiv == nil { panicdiv = gc.Sysfunc("panicdivide") } gc.Ginscall(panicdiv, -1) gc.Patch(p1, gc.Pc) var p2 *obj.Prog if check != 0 { var nm1 gc.Node gc.Nodconst(&nm1, t, -1) gins(optoas(gc.OCMP, t), &tr, &nm1) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if op == gc.ODIV { // a / (-1) is -a. gins(optoas(gc.OMINUS, t), nil, &tl) gmove(&tl, res) } else { // a % (-1) is 0. var nz gc.Node gc.Nodconst(&nz, t, 0) gmove(&nz, res) } p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) } p1 = gins(a, &tr, &tl) if op == gc.ODIV { gc.Regfree(&tr) gmove(&tl, res) } else { // A%B = A-(A/B*B) var tm gc.Node gc.Regalloc(&tm, t, nil) // patch div to use the 3 register form // TODO(minux): add gins3? p1.Reg = p1.To.Reg p1.To.Reg = tm.Val.U.Reg gins(optoas(gc.OMUL, t), &tr, &tm) gc.Regfree(&tr) gins(optoas(gc.OSUB, t), &tm, &tl) gc.Regfree(&tm) gmove(&tl, res) } gc.Regfree(&tl) if check != 0 { gc.Patch(p2, gc.Pc) } }
func stackcopy(n, ns *gc.Node, osrc, odst, w int64) { var noddi gc.Node gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI) var nodsi gc.Node gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI) var nodl gc.Node var nodr gc.Node if n.Ullman >= ns.Ullman { gc.Agenr(n, &nodr, &nodsi) if ns.Op == gc.ONAME { gc.Gvardef(ns) } gc.Agenr(ns, &nodl, &noddi) } else { if ns.Op == gc.ONAME { gc.Gvardef(ns) } gc.Agenr(ns, &nodl, &noddi) gc.Agenr(n, &nodr, &nodsi) } if nodl.Val.U.Reg != x86.REG_DI { gmove(&nodl, &noddi) } if nodr.Val.U.Reg != x86.REG_SI { gmove(&nodr, &nodsi) } gc.Regfree(&nodl) gc.Regfree(&nodr) c := w % 8 // bytes q := w / 8 // quads var oldcx gc.Node var cx gc.Node savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64]) // if we are copying forward on the stack and // the src and dst overlap, then reverse direction if osrc < odst && odst < osrc+w { // reverse direction gins(x86.ASTD, nil, nil) // set direction flag if c > 0 { gconreg(addptr, w-1, x86.REG_SI) gconreg(addptr, w-1, x86.REG_DI) gconreg(movptr, c, x86.REG_CX) gins(x86.AREP, nil, nil) // repeat gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)- } if q > 0 { if c > 0 { gconreg(addptr, -7, x86.REG_SI) gconreg(addptr, -7, x86.REG_DI) } else { gconreg(addptr, w-8, x86.REG_SI) gconreg(addptr, w-8, x86.REG_DI) } gconreg(movptr, q, x86.REG_CX) gins(x86.AREP, nil, nil) // repeat gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)-,*(DI)- } // we leave with the flag clear gins(x86.ACLD, nil, nil) } else { // normal direction if q > 128 || (gc.Nacl && q >= 4) { gconreg(movptr, q, x86.REG_CX) gins(x86.AREP, nil, nil) // repeat gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+ } else if q >= 4 { p := gins(obj.ADUFFCOPY, nil, nil) p.To.Type = obj.TYPE_ADDR p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg)) // 14 and 128 = magic constants: see ../../runtime/asm_amd64.s p.To.Offset = 14 * (128 - q) } else if !gc.Nacl && c == 0 { // We don't need the MOVSQ side-effect of updating SI and DI, // and issuing a sequence of MOVQs directly is faster. nodsi.Op = gc.OINDREG noddi.Op = gc.OINDREG for q > 0 { gmove(&nodsi, &cx) // MOVQ x+(SI),CX gmove(&cx, &noddi) // MOVQ CX,x+(DI) nodsi.Xoffset += 8 noddi.Xoffset += 8 q-- } } else { for q > 0 { gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+ q-- } } // copy the remaining c bytes if w < 4 || c <= 1 || (odst < osrc && osrc < odst+w) { for c > 0 { gins(x86.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+ c-- } } else if w < 8 || c <= 4 { nodsi.Op = gc.OINDREG noddi.Op = gc.OINDREG cx.Type = gc.Types[gc.TINT32] nodsi.Type = gc.Types[gc.TINT32] noddi.Type = gc.Types[gc.TINT32] if c > 4 { nodsi.Xoffset = 0 noddi.Xoffset = 0 gmove(&nodsi, &cx) gmove(&cx, &noddi) } nodsi.Xoffset = c - 4 noddi.Xoffset = c - 4 gmove(&nodsi, &cx) gmove(&cx, &noddi) } else { nodsi.Op = gc.OINDREG noddi.Op = gc.OINDREG cx.Type = gc.Types[gc.TINT64] nodsi.Type = gc.Types[gc.TINT64] noddi.Type = gc.Types[gc.TINT64] nodsi.Xoffset = c - 8 noddi.Xoffset = c - 8 gmove(&nodsi, &cx) gmove(&cx, &noddi) } } restx(&cx, &oldcx) }
/* * generate: * res = &n; * The generated code checks that the result is not nil. */ func agen(n *gc.Node, res *gc.Node) { if gc.Debug['g'] != 0 { gc.Dump("\nagen-res", res) gc.Dump("agen-r", n) } if n == nil || n.Type == nil { return } for n.Op == gc.OCONVNOP { n = n.Left } if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) { // Use of a nil interface or nil slice. // Create a temporary we can take the address of and read. // The generated code is just going to panic, so it need not // be terribly efficient. See issue 3670. var n1 gc.Node gc.Tempname(&n1, n.Type) gc.Gvardef(&n1) clearfat(&n1) var n2 gc.Node regalloc(&n2, gc.Types[gc.Tptr], res) var n3 gc.Node n3.Op = gc.OADDR n3.Left = &n1 gins(ppc64.AMOVD, &n3, &n2) gmove(&n2, res) regfree(&n2) return } if n.Addable != 0 { var n1 gc.Node n1.Op = gc.OADDR n1.Left = n var n2 gc.Node regalloc(&n2, gc.Types[gc.Tptr], res) gins(ppc64.AMOVD, &n1, &n2) gmove(&n2, res) regfree(&n2) return } nl := n.Left switch n.Op { default: gc.Fatal("agen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign)) // TODO(minux): 5g has this: Release res so that it is available for cgen_call. // Pick it up again after the call for OCALLMETH and OCALLFUNC. case gc.OCALLMETH: gc.Cgen_callmeth(n, 0) cgen_aret(n, res) case gc.OCALLINTER: cgen_callinter(n, res, 0) cgen_aret(n, res) case gc.OCALLFUNC: cgen_call(n, 0) cgen_aret(n, res) case gc.OSLICE, gc.OSLICEARR, gc.OSLICESTR, gc.OSLICE3, gc.OSLICE3ARR: var n1 gc.Node gc.Tempname(&n1, n.Type) gc.Cgen_slice(n, &n1) agen(&n1, res) case gc.OEFACE: var n1 gc.Node gc.Tempname(&n1, n.Type) gc.Cgen_eface(n, &n1) agen(&n1, res) case gc.OINDEX: var n1 gc.Node agenr(n, &n1, res) gmove(&n1, res) regfree(&n1) // should only get here with names in this func. case gc.ONAME: if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth { gc.Dump("bad agen", n) gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth) } // should only get here for heap vars or paramref if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF { gc.Dump("bad agen", n) gc.Fatal("agen: bad ONAME class %#x", n.Class) } cgen(n.Heapaddr, res) if n.Xoffset != 0 { ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res) } case gc.OIND: cgen(nl, res) gc.Cgen_checknil(res) case gc.ODOT: agen(nl, res) if n.Xoffset != 0 { ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res) } case gc.ODOTPTR: cgen(nl, res) gc.Cgen_checknil(res) if n.Xoffset != 0 { ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res) } } }
/* * allocate a register (reusing res if possible) and generate * a = &n * The caller must call regfree(a). * The generated code checks that the result is not nil. */ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) { if gc.Debug['g'] != 0 { gc.Dump("agenr-n", n) } nl := n.Left nr := n.Right switch n.Op { case gc.ODOT, gc.ODOTPTR, gc.OCALLFUNC, gc.OCALLMETH, gc.OCALLINTER: var n1 gc.Node igen(n, &n1, res) regalloc(a, gc.Types[gc.Tptr], &n1) agen(&n1, a) regfree(&n1) case gc.OIND: cgenr(n.Left, a, res) gc.Cgen_checknil(a) case gc.OINDEX: var p2 *obj.Prog // to be patched to panicindex. w := uint32(n.Type.Width) //bounded = debug['B'] || n->bounded; var n3 gc.Node var n1 gc.Node if nr.Addable != 0 { var tmp gc.Node if !gc.Isconst(nr, gc.CTINT) { gc.Tempname(&tmp, gc.Types[gc.TINT64]) } if !gc.Isconst(nl, gc.CTSTR) { agenr(nl, &n3, res) } if !gc.Isconst(nr, gc.CTINT) { cgen(nr, &tmp) regalloc(&n1, tmp.Type, nil) gmove(&tmp, &n1) } } else if nl.Addable != 0 { if !gc.Isconst(nr, gc.CTINT) { var tmp gc.Node gc.Tempname(&tmp, gc.Types[gc.TINT64]) cgen(nr, &tmp) regalloc(&n1, tmp.Type, nil) gmove(&tmp, &n1) } if !gc.Isconst(nl, gc.CTSTR) { agenr(nl, &n3, res) } } else { var tmp gc.Node gc.Tempname(&tmp, gc.Types[gc.TINT64]) cgen(nr, &tmp) nr = &tmp if !gc.Isconst(nl, gc.CTSTR) { agenr(nl, &n3, res) } regalloc(&n1, tmp.Type, nil) gins(optoas(gc.OAS, tmp.Type), &tmp, &n1) } // &a is in &n3 (allocated in res) // i is in &n1 (if not constant) // w is width // constant index if gc.Isconst(nr, gc.CTINT) { if gc.Isconst(nl, gc.CTSTR) { gc.Fatal("constant string constant index") } v := uint64(gc.Mpgetfix(nr.Val.U.Xval)) if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING { if gc.Debug['B'] == 0 && !n.Bounded { n1 = n3 n1.Op = gc.OINDREG n1.Type = gc.Types[gc.Tptr] n1.Xoffset = int64(gc.Array_nel) var n4 gc.Node regalloc(&n4, n1.Type, nil) gmove(&n1, &n4) ginscon2(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n4, int64(v)) regfree(&n4) p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT64]), nil, +1) ginscall(gc.Panicindex, 0) gc.Patch(p1, gc.Pc) } n1 = n3 n1.Op = gc.OINDREG n1.Type = gc.Types[gc.Tptr] n1.Xoffset = int64(gc.Array_array) gmove(&n1, &n3) } if v*uint64(w) != 0 { ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), int64(v*uint64(w)), &n3) } *a = n3 break } var n2 gc.Node regalloc(&n2, gc.Types[gc.TINT64], &n1) // i gmove(&n1, &n2) regfree(&n1) var n4 gc.Node if gc.Debug['B'] == 0 && !n.Bounded { // check bounds if gc.Isconst(nl, gc.CTSTR) { gc.Nodconst(&n4, gc.Types[gc.TUINT64], int64(len(nl.Val.U.Sval))) } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING { n1 = n3 n1.Op = gc.OINDREG n1.Type = gc.Types[gc.Tptr] n1.Xoffset = int64(gc.Array_nel) regalloc(&n4, gc.Types[gc.TUINT64], nil) gmove(&n1, &n4) } else { if nl.Type.Bound < (1<<15)-1 { gc.Nodconst(&n4, gc.Types[gc.TUINT64], nl.Type.Bound) } else { regalloc(&n4, gc.Types[gc.TUINT64], nil) p1 := gins(ppc64.AMOVD, nil, &n4) p1.From.Type = obj.TYPE_CONST p1.From.Offset = nl.Type.Bound } } gins(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n2, &n4) if n4.Op == gc.OREGISTER { regfree(&n4) } p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1) if p2 != nil { gc.Patch(p2, gc.Pc) } ginscall(gc.Panicindex, 0) gc.Patch(p1, gc.Pc) } if gc.Isconst(nl, gc.CTSTR) { regalloc(&n3, gc.Types[gc.Tptr], res) p1 := gins(ppc64.AMOVD, nil, &n3) gc.Datastring(nl.Val.U.Sval, &p1.From) p1.From.Type = obj.TYPE_ADDR } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING { n1 = n3 n1.Op = gc.OINDREG n1.Type = gc.Types[gc.Tptr] n1.Xoffset = int64(gc.Array_array) gmove(&n1, &n3) } if w == 0 { } else // nothing to do if w == 1 { /* w already scaled */ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3) /* else if(w == 2 || w == 4 || w == 8) { // TODO(minux): scale using shift } */ } else { regalloc(&n4, gc.Types[gc.TUINT64], nil) gc.Nodconst(&n1, gc.Types[gc.TUINT64], int64(w)) gmove(&n1, &n4) gins(optoas(gc.OMUL, gc.Types[gc.TUINT64]), &n4, &n2) gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3) regfree(&n4) } *a = n3 regfree(&n2) default: regalloc(a, gc.Types[gc.Tptr], res) agen(n, a) } }
/* * generate: * if(n == true) goto to; */ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) { if gc.Debug['g'] != 0 { gc.Dump("\nbgen", n) } if n == nil { n = gc.Nodbool(true) } if n.Ninit != nil { gc.Genlist(n.Ninit) } if n.Type == nil { gc.Convlit(&n, gc.Types[gc.TBOOL]) if n.Type == nil { return } } et := int(n.Type.Etype) if et != gc.TBOOL { gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0)) gc.Patch(gins(obj.AEND, nil, nil), to) return } var nr *gc.Node for n.Op == gc.OCONVNOP { n = n.Left if n.Ninit != nil { gc.Genlist(n.Ninit) } } var nl *gc.Node switch n.Op { default: var n1 gc.Node regalloc(&n1, n.Type, nil) cgen(n, &n1) var n2 gc.Node gc.Nodconst(&n2, n.Type, 0) gins(optoas(gc.OCMP, n.Type), &n1, &n2) a := ppc64.ABNE if !true_ { a = ppc64.ABEQ } gc.Patch(gc.Gbranch(a, n.Type, likely), to) regfree(&n1) return // need to ask if it is bool? case gc.OLITERAL: if !true_ == (n.Val.U.Bval == 0) { gc.Patch(gc.Gbranch(ppc64.ABR, nil, likely), to) } return case gc.OANDAND, gc.OOROR: if (n.Op == gc.OANDAND) == true_ { p1 := gc.Gbranch(obj.AJMP, nil, 0) p2 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) bgen(n.Left, !true_, -likely, p2) bgen(n.Right, !true_, -likely, p2) p1 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, to) gc.Patch(p2, gc.Pc) } else { bgen(n.Left, true_, likely, to) bgen(n.Right, true_, likely, to) } return case gc.OEQ, gc.ONE, gc.OLT, gc.OGT, gc.OLE, gc.OGE: nr = n.Right if nr == nil || nr.Type == nil { return } fallthrough case gc.ONOT: // unary nl = n.Left if nl == nil || nl.Type == nil { return } } switch n.Op { case gc.ONOT: bgen(nl, !true_, likely, to) return case gc.OEQ, gc.ONE, gc.OLT, gc.OGT, gc.OLE, gc.OGE: a := int(n.Op) if !true_ { if gc.Isfloat[nr.Type.Etype] { // brcom is not valid on floats when NaN is involved. p1 := gc.Gbranch(ppc64.ABR, nil, 0) p2 := gc.Gbranch(ppc64.ABR, nil, 0) gc.Patch(p1, gc.Pc) ll := n.Ninit // avoid re-genning ninit n.Ninit = nil bgen(n, true, -likely, p2) n.Ninit = ll gc.Patch(gc.Gbranch(ppc64.ABR, nil, 0), to) gc.Patch(p2, gc.Pc) return } a = gc.Brcom(a) true_ = !true_ } // make simplest on right if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) { a = gc.Brrev(a) r := nl nl = nr nr = r } if gc.Isslice(nl.Type) { // front end should only leave cmp to literal nil if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL { gc.Yyerror("illegal slice comparison") break } a = optoas(a, gc.Types[gc.Tptr]) var n1 gc.Node igen(nl, &n1, nil) n1.Xoffset += int64(gc.Array_array) n1.Type = gc.Types[gc.Tptr] var tmp gc.Node gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0) var n2 gc.Node regalloc(&n2, gc.Types[gc.Tptr], &n1) gmove(&n1, &n2) gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp) regfree(&n2) gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to) regfree(&n1) break } if gc.Isinter(nl.Type) { // front end should only leave cmp to literal nil if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL { gc.Yyerror("illegal interface comparison") break } a = optoas(a, gc.Types[gc.Tptr]) var n1 gc.Node igen(nl, &n1, nil) n1.Type = gc.Types[gc.Tptr] var tmp gc.Node gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0) var n2 gc.Node regalloc(&n2, gc.Types[gc.Tptr], &n1) gmove(&n1, &n2) gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp) regfree(&n2) gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to) regfree(&n1) break } if gc.Iscomplex[nl.Type.Etype] { gc.Complexbool(a, nl, nr, true_, likely, to) break } var n1 gc.Node var n2 gc.Node if nr.Ullman >= gc.UINF { regalloc(&n1, nl.Type, nil) cgen(nl, &n1) var tmp gc.Node gc.Tempname(&tmp, nl.Type) gmove(&n1, &tmp) regfree(&n1) regalloc(&n2, nr.Type, nil) cgen(nr, &n2) regalloc(&n1, nl.Type, nil) cgen(&tmp, &n1) goto cmp } regalloc(&n1, nl.Type, nil) cgen(nl, &n1) // TODO(minux): cmpi does accept 16-bit signed immediate as p->to. // and cmpli accepts 16-bit unsigned immediate. //if(smallintconst(nr)) { // gins(optoas(OCMP, nr->type), &n1, nr); // patch(gbranch(optoas(a, nr->type), nr->type, likely), to); // regfree(&n1); // break; //} regalloc(&n2, nr.Type, nil) cgen(nr, &n2) cmp: l := &n1 r := &n2 gins(optoas(gc.OCMP, nr.Type), l, r) if gc.Isfloat[nr.Type.Etype] && (a == gc.OLE || a == gc.OGE) { // To get NaN right, must rewrite x <= y into separate x < y or x = y. switch a { case gc.OLE: a = gc.OLT case gc.OGE: a = gc.OGT } gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to) gc.Patch(gc.Gbranch(optoas(gc.OEQ, nr.Type), nr.Type, likely), to) } else { gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to) } regfree(&n1) regfree(&n2) } return }
/* * copy a composite value by moving its individual components. * Slices, strings and interfaces are supported. * Small structs or arrays with elements of basic type are * also supported. * nr is N when assigning a zero value. * return 1 if can do, 0 if can't. */ func componentgen(nr *gc.Node, nl *gc.Node) bool { var nodl gc.Node var nodr gc.Node freel := 0 freer := 0 switch nl.Type.Etype { default: goto no case gc.TARRAY: t := nl.Type // Slices are ok. if gc.Isslice(t) { break } // Small arrays are ok. if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) { break } goto no // Small structs with non-fat types are ok. // Zero-sized structs are treated separately elsewhere. case gc.TSTRUCT: fldcount := int64(0) for t := nl.Type.Type; t != nil; t = t.Down { if gc.Isfat(t.Type) { goto no } if t.Etype != gc.TFIELD { gc.Fatal("componentgen: not a TFIELD: %v", gc.Tconv(t, obj.FmtLong)) } fldcount++ } if fldcount == 0 || fldcount > 4 { goto no } case gc.TSTRING, gc.TINTER: break } nodl = *nl if !cadable(nl) { if nr != nil && !cadable(nr) { goto no } igen(nl, &nodl, nil) freel = 1 } if nr != nil { nodr = *nr if !cadable(nr) { igen(nr, &nodr, nil) freer = 1 } } else { // When zeroing, prepare a register containing zero. var tmp gc.Node gc.Nodconst(&tmp, nl.Type, 0) regalloc(&nodr, gc.Types[gc.TUINT], nil) gmove(&tmp, &nodr) freer = 1 } // nl and nr are 'cadable' which basically means they are names (variables) now. // If they are the same variable, don't generate any code, because the // VARDEF we generate will mark the old value as dead incorrectly. // (And also the assignments are useless.) if nr != nil && nl.Op == gc.ONAME && nr.Op == gc.ONAME && nl == nr { goto yes } switch nl.Type.Etype { // componentgen for arrays. case gc.TARRAY: if nl.Op == gc.ONAME { gc.Gvardef(nl) } t := nl.Type if !gc.Isslice(t) { nodl.Type = t.Type nodr.Type = nodl.Type for fldcount := int64(0); fldcount < t.Bound; fldcount++ { if nr == nil { gc.Clearslim(&nodl) } else { gmove(&nodr, &nodl) } nodl.Xoffset += t.Type.Width nodr.Xoffset += t.Type.Width } goto yes } // componentgen for slices. nodl.Xoffset += int64(gc.Array_array) nodl.Type = gc.Ptrto(nl.Type.Type) if nr != nil { nodr.Xoffset += int64(gc.Array_array) nodr.Type = nodl.Type } gmove(&nodr, &nodl) nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array) nodl.Type = gc.Types[gc.Simtype[gc.TUINT]] if nr != nil { nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array) nodr.Type = nodl.Type } gmove(&nodr, &nodl) nodl.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel) nodl.Type = gc.Types[gc.Simtype[gc.TUINT]] if nr != nil { nodr.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel) nodr.Type = nodl.Type } gmove(&nodr, &nodl) goto yes case gc.TSTRING: if nl.Op == gc.ONAME { gc.Gvardef(nl) } nodl.Xoffset += int64(gc.Array_array) nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8]) if nr != nil { nodr.Xoffset += int64(gc.Array_array) nodr.Type = nodl.Type } gmove(&nodr, &nodl) nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array) nodl.Type = gc.Types[gc.Simtype[gc.TUINT]] if nr != nil { nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array) nodr.Type = nodl.Type } gmove(&nodr, &nodl) goto yes case gc.TINTER: if nl.Op == gc.ONAME { gc.Gvardef(nl) } nodl.Xoffset += int64(gc.Array_array) nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8]) if nr != nil { nodr.Xoffset += int64(gc.Array_array) nodr.Type = nodl.Type } gmove(&nodr, &nodl) nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array) nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8]) if nr != nil { nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array) nodr.Type = nodl.Type } gmove(&nodr, &nodl) goto yes case gc.TSTRUCT: if nl.Op == gc.ONAME { gc.Gvardef(nl) } loffset := nodl.Xoffset roffset := nodr.Xoffset // funarg structs may not begin at offset zero. if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil { loffset -= nl.Type.Type.Width } if nr != nil && nr.Type.Etype == gc.TSTRUCT && nr.Type.Funarg != 0 && nr.Type.Type != nil { roffset -= nr.Type.Type.Width } for t := nl.Type.Type; t != nil; t = t.Down { nodl.Xoffset = loffset + t.Width nodl.Type = t.Type if nr == nil { gc.Clearslim(&nodl) } else { nodr.Xoffset = roffset + t.Width nodr.Type = nodl.Type gmove(&nodr, &nodl) } } goto yes } no: if freer != 0 { regfree(&nodr) } if freel != 0 { regfree(&nodl) } return false yes: if freer != 0 { regfree(&nodr) } if freel != 0 { regfree(&nodl) } return true }