func stacksplitPost(ctxt *obj.Link, p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog) *obj.Prog { // MOVD LR, R5 p = obj.Appendp(ctxt, p) pPre.Pcond = p p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REG_LR p.To.Type = obj.TYPE_REG p.To.Reg = REG_R5 if pPreempt != nil { pPreempt.Pcond = p } // BL runtime.morestack(SB) p = obj.Appendp(ctxt, p) p.As = ABL p.To.Type = obj.TYPE_BRANCH if ctxt.Cursym.Cfunc { p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0) } else if ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0 { p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0) } else { p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack", 0) } // BR start p = obj.Appendp(ctxt, p) p.As = ABR p.To.Type = obj.TYPE_BRANCH p.Pcond = ctxt.Cursym.Text.Link return p }
func initdiv(ctxt *obj.Link) { if ctxt.Sym_div != nil { return } ctxt.Sym_div = obj.Linklookup(ctxt, "_div", 0) ctxt.Sym_divu = obj.Linklookup(ctxt, "_divu", 0) ctxt.Sym_mod = obj.Linklookup(ctxt, "_mod", 0) ctxt.Sym_modu = obj.Linklookup(ctxt, "_modu", 0) }
func stacksplitPost(ctxt *obj.Link, p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog, framesize int32) *obj.Prog { // Now we are at the end of the function, but logically // we are still in function prologue. We need to fix the // SP data and PCDATA. spfix := obj.Appendp(ctxt, p) spfix.As = obj.ANOP spfix.Spadj = -framesize pcdata := obj.Appendp(ctxt, spfix) pcdata.Lineno = ctxt.Cursym.Text.Lineno pcdata.Mode = ctxt.Cursym.Text.Mode pcdata.As = obj.APCDATA pcdata.From.Type = obj.TYPE_CONST pcdata.From.Offset = obj.PCDATA_StackMapIndex pcdata.To.Type = obj.TYPE_CONST pcdata.To.Offset = -1 // pcdata starts at -1 at function entry // MOVD LR, R5 p = obj.Appendp(ctxt, pcdata) pPre.Pcond = p p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REG_LR p.To.Type = obj.TYPE_REG p.To.Reg = REG_R5 if pPreempt != nil { pPreempt.Pcond = p } // BL runtime.morestack(SB) p = obj.Appendp(ctxt, p) p.As = ABL p.To.Type = obj.TYPE_BRANCH if ctxt.Cursym.Cfunc { p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0) } else if ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0 { p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0) } else { p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack", 0) } // BR start p = obj.Appendp(ctxt, p) p.As = ABR p.To.Type = obj.TYPE_BRANCH p.Pcond = ctxt.Cursym.Text.Link return p }
func stringsym(s string) (data *obj.LSym) { var symname string if len(s) > 100 { // Huge strings are hashed to avoid long names in object files. // Indulge in some paranoia by writing the length of s, too, // as protection against length extension attacks. h := sha256.New() io.WriteString(h, s) symname = fmt.Sprintf(".gostring.%d.%x", len(s), h.Sum(nil)) } else { // Small strings get named directly by their contents. symname = strconv.Quote(s) } const prefix = "go.string." symdataname := prefix + symname symdata := obj.Linklookup(Ctxt, symdataname, 0) if !symdata.SeenGlobl() { // string data off := dsnameLSym(symdata, 0, s) ggloblLSym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL) } return symdata }
func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog) { // RegTo2 is set on the instructions we insert here so they don't get // processed twice. if p.RegTo2 != 0 { return } if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { return } // Any Prog (aside from the above special cases) with an Addr with Name == // NAME_EXTERN, NAME_STATIC or NAME_GOTREF has a CALL __x86.get_pc_thunk.cx // inserted before it. isName := func(a *obj.Addr) bool { if a.Sym == nil || (a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR) || a.Reg != 0 { return false } if a.Sym.Type == obj.STLSBSS { return false } return a.Name == obj.NAME_EXTERN || a.Name == obj.NAME_STATIC || a.Name == obj.NAME_GOTREF } if isName(&p.From) && p.From.Type == obj.TYPE_ADDR { // Handle things like "MOVL $sym, (SP)" or "PUSHL $sym" by rewriting // to "MOVL $sym, CX; MOVL CX, (SP)" or "MOVL $sym, CX; PUSHL CX" // respectively. if p.To.Type != obj.TYPE_REG { q := obj.Appendp(ctxt, p) q.As = p.As q.From.Type = obj.TYPE_REG q.From.Reg = REG_CX q.To = p.To p.As = AMOVL p.To.Type = obj.TYPE_REG p.To.Reg = REG_CX p.To.Sym = nil p.To.Name = obj.NAME_NONE } } if !isName(&p.From) && !isName(&p.To) && (p.From3 == nil || !isName(p.From3)) { return } q := obj.Appendp(ctxt, p) q.RegTo2 = 1 r := obj.Appendp(ctxt, q) r.RegTo2 = 1 q.As = obj.ACALL q.To.Sym = obj.Linklookup(ctxt, "__x86.get_pc_thunk.cx", 0) q.To.Type = obj.TYPE_MEM q.To.Name = obj.NAME_EXTERN q.To.Sym.Local = true r.As = p.As r.Scond = p.Scond r.From = p.From r.From3 = p.From3 r.Reg = p.Reg r.To = p.To obj.Nopout(p) }
// symbolReference parses a symbol that is known not to be a register. func (p *Parser) symbolReference(a *obj.Addr, name string, prefix rune) { // Identifier is a name. switch prefix { case 0: a.Type = obj.TYPE_MEM case '$': a.Type = obj.TYPE_ADDR case '*': a.Type = obj.TYPE_INDIR } // Weirdness with statics: Might now have "<>". isStatic := 0 // TODO: Really a boolean, but Linklookup wants a "version" integer. if p.peek() == '<' { isStatic = 1 p.next() p.get('>') } if p.peek() == '+' || p.peek() == '-' { a.Offset = int64(p.expr()) } a.Sym = obj.Linklookup(p.ctxt, name, isStatic) if p.peek() == scanner.EOF { if prefix != 0 { p.errorf("illegal addressing mode for symbol %s", name) } return } // Expect (SB) or (FP), (PC), (SB), or (SP) p.get('(') reg := p.get(scanner.Ident).String() p.get(')') p.setPseudoRegister(a, reg, isStatic != 0, prefix) }
func dimportpath(p *Pkg) { if p.Pathsym != nil { return } // If we are compiling the runtime package, there are two runtime packages around // -- localpkg and Runtimepkg. We don't want to produce import path symbols for // both of them, so just produce one for localpkg. if myimportpath == "runtime" && p == Runtimepkg { return } var str string if p == localpkg { // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. str = myimportpath } else { str = p.Path } s := obj.Linklookup(Ctxt, "type..importpath."+p.Prefix+".", 0) ot := dnameData(s, 0, str, "", nil, false) ggloblLSym(s, int32(ot), obj.DUPOK|obj.RODATA) p.Pathsym = s }
// dname creates a reflect.name for a struct field or method. func dname(name, tag string, pkg *Pkg, exported bool) *obj.LSym { // Write out data as "type.." to signal two things to the // linker, first that when dynamically linking, the symbol // should be moved to a relro section, and second that the // contents should not be decoded as a type. sname := "type..namedata." if pkg == nil { // In the common case, share data with other packages. if name == "" { if exported { sname += "-noname-exported." + tag } else { sname += "-noname-unexported." + tag } } else { sname += name + "." + tag } } else { sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount) dnameCount++ } s := obj.Linklookup(Ctxt, sname, 0) if len(s.P) > 0 { return s } ot := dnameData(s, 0, name, tag, pkg, exported) ggloblLSym(s, int32(ot), obj.DUPOK|obj.RODATA) return s }
// dname dumps a reflect.name for a struct field or method. func dname(s *Sym, ot int, name, tag string, pkg *Pkg, exported bool) int { if len(name) > 1<<16-1 { Fatalf("name too long: %s", name) } if len(tag) > 1<<16-1 { Fatalf("tag too long: %s", tag) } // Encode name and tag. See reflect/type.go for details. var bits byte l := 1 + 2 + len(name) if exported { bits |= 1 << 0 } if len(tag) > 0 { l += 2 + len(tag) bits |= 1 << 1 } if pkg != nil { bits |= 1 << 2 } b := make([]byte, l) b[0] = bits b[1] = uint8(len(name) >> 8) b[2] = uint8(len(name)) copy(b[3:], name) if len(tag) > 0 { tb := b[3+len(name):] tb[0] = uint8(len(tag) >> 8) tb[1] = uint8(len(tag)) copy(tb[2:], tag) } // Very few names require a pkgPath *string (only those // defined in a different package than their type). So if // there is no pkgPath, we treat the name contents as string // data that duplicates across packages. var bsym *obj.LSym if pkg == nil { _, bsym = stringsym(string(b)) } else { // Write out data as "type.." to signal two things to the // linker, first that when dynamically linking, the symbol // should be moved to a relro section, and second that the // contents should not be decoded as a type. bsymname := fmt.Sprintf(`type..methodname."".%d`, dnameCount) dnameCount++ bsym = obj.Linklookup(Ctxt, bsymname, 0) bsym.P = b boff := len(b) boff = int(Rnd(int64(boff), int64(Widthptr))) boff = dgopkgpathLSym(bsym, boff, pkg) ggloblLSym(bsym, int32(boff), obj.RODATA|obj.LOCAL) } ot = dsymptrLSym(Linksym(s), ot, bsym, 0) return ot }
func stringsym(s string) (hdr, data *obj.LSym) { var symname string if len(s) > 100 { // Huge strings are hashed to avoid long names in object files. // Indulge in some paranoia by writing the length of s, too, // as protection against length extension attacks. h := sha256.New() io.WriteString(h, s) symname = fmt.Sprintf(".gostring.%d.%x", len(s), h.Sum(nil)) } else { // Small strings get named directly by their contents. symname = strconv.Quote(s) } const prefix = "go.string." symdataname := prefix + symname // All the strings have the same prefix, so ignore it for map // purposes, but use a slice of the symbol name string to // reduce long-term memory overhead. key := symdataname[len(prefix):] if syms, ok := stringConstants[key]; ok { return syms.hdr, syms.data } symhdrname := "go.string.hdr." + symname symhdr := obj.Linklookup(Ctxt, symhdrname, 0) symdata := obj.Linklookup(Ctxt, symdataname, 0) stringConstants[key] = stringConstantSyms{symhdr, symdata} // string header off := 0 off = dsymptrLSym(symhdr, off, symdata, 0) off = duintxxLSym(symhdr, off, uint64(len(s)), Widthint) ggloblLSym(symhdr, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL) // string data off = dsnameLSym(symdata, 0, s) ggloblLSym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL) return symhdr, symdata }
// dgopkgpathOffLSym writes an offset relocation in s at offset ot to the pkg path symbol. func dgopkgpathOffLSym(s *obj.LSym, ot int, pkg *Pkg) int { if pkg == nil { return duintxxLSym(s, ot, 0, 4) } if pkg == localpkg && myimportpath == "" { // If we don't know the full import path of the package being compiled // (i.e. -p was not passed on the compiler command line), emit a reference to // type..importpath.""., which the linker will rewrite using the correct import path. // Every package that imports this one directly defines the symbol. // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. ns := obj.Linklookup(Ctxt, `type..importpath."".`, 0) return dsymptrOffLSym(s, ot, ns, 0) } dimportpath(pkg) return dsymptrOffLSym(s, ot, pkg.Pathsym, 0) }
func Linksym(s *Sym) *obj.LSym { if s == nil { return nil } if s.Lsym != nil { return s.Lsym } var name string if isblanksym(s) { name = "_" } else if s.Linkname != "" { name = s.Linkname } else { name = s.Pkg.Prefix + "." + s.Name } ls := obj.Linklookup(Ctxt, name, 0) s.Lsym = ls return ls }
func compile(fn *Node) { if Newproc == nil { Newproc = Sysfunc("newproc") Deferproc = Sysfunc("deferproc") Deferreturn = Sysfunc("deferreturn") panicindex = Sysfunc("panicindex") panicslice = Sysfunc("panicslice") panicdivide = Sysfunc("panicdivide") growslice = Sysfunc("growslice") panicdottype = Sysfunc("panicdottype") } defer func(lno int32) { lineno = lno }(setlineno(fn)) Curfn = fn dowidth(Curfn.Type) if fn.Nbody.Len() == 0 { if pure_go || strings.HasPrefix(fn.Func.Nname.Sym.Name, "init.") { yyerror("missing function body for %q", fn.Func.Nname.Sym.Name) return } emitptrargsmap() return } saveerrors() if Curfn.Type.FuncType().Outnamed { // add clearing of the output parameters for _, t := range Curfn.Type.Results().Fields().Slice() { if t.Nname != nil { n := nod(OAS, t.Nname, nil) n = typecheck(n, Etop) Curfn.Nbody.Prepend(n) } } } order(Curfn) if nerrors != 0 { return } hasdefer = false walk(Curfn) if nerrors != 0 { return } if instrumenting { instrument(Curfn) } if nerrors != 0 { return } // Build an SSA backend function. ssafn := buildssa(Curfn) if nerrors != 0 { return } newplist() setlineno(Curfn) nam := Curfn.Func.Nname if isblank(nam) { nam = nil } ptxt := Gins(obj.ATEXT, nam, nil) ptxt.From3 = new(obj.Addr) if fn.Func.Dupok { ptxt.From3.Offset |= obj.DUPOK } if fn.Func.Wrapper { ptxt.From3.Offset |= obj.WRAPPER } if fn.Func.Needctxt { ptxt.From3.Offset |= obj.NEEDCTXT } if fn.Func.Pragma&Nosplit != 0 { ptxt.From3.Offset |= obj.NOSPLIT } if fn.Func.ReflectMethod { ptxt.From3.Offset |= obj.REFLECTMETHOD } if fn.Func.Pragma&Systemstack != 0 { ptxt.From.Sym.Set(obj.AttrCFunc, true) } // Clumsy but important. // See test/recover.go for test cases and src/reflect/value.go // for the actual functions being considered. if myimportpath == "reflect" { if Curfn.Func.Nname.Sym.Name == "callReflect" || Curfn.Func.Nname.Sym.Name == "callMethod" { ptxt.From3.Offset |= obj.WRAPPER } } gcargs := makefuncdatasym("gcargs·", obj.FUNCDATA_ArgsPointerMaps) gclocals := makefuncdatasym("gclocals·", obj.FUNCDATA_LocalsPointerMaps) if obj.Fieldtrack_enabled != 0 && len(Curfn.Func.FieldTrack) > 0 { trackSyms := make([]*Sym, 0, len(Curfn.Func.FieldTrack)) for sym := range Curfn.Func.FieldTrack { trackSyms = append(trackSyms, sym) } sort.Sort(symByName(trackSyms)) for _, sym := range trackSyms { gtrack(sym) } } for _, n := range fn.Func.Dcl { if n.Op != ONAME { // might be OTYPE or OLITERAL continue } switch n.Class { case PAUTO: if !n.Used { continue } fallthrough case PPARAM, PPARAMOUT: p := Gins(obj.ATYPE, n, nil) p.From.Sym = obj.Linklookup(Ctxt, n.Sym.Name, 0) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = Linksym(ngotype(n)) } } genssa(ssafn, ptxt, gcargs, gclocals) ssafn.Free() }
func (yyrcvr *yyParserImpl) Parse(yylex yyLexer) int { var yyn int var yylval yySymType var yyVAL yySymType var yyDollar []yySymType yyS := make([]yySymType, yyMaxDepth) Nerrs := 0 /* number of errors */ Errflag := 0 /* error recovery flag */ yystate := 0 yychar := -1 yytoken := -1 // yychar translated into internal numbering yyrcvr.lookahead = func() int { return yychar } defer func() { // Make sure we report no lookahead when not parsing. yystate = -1 yychar = -1 yytoken = -1 }() yyp := -1 goto yystack ret0: return 0 ret1: return 1 yystack: /* put a state and value onto the stack */ if yyDebug >= 4 { __yyfmt__.Printf("char %v in %v\n", yyTokname(yytoken), yyStatname(yystate)) } yyp++ if yyp >= len(yyS) { nyys := make([]yySymType, len(yyS)*2) copy(nyys, yyS) yyS = nyys } yyS[yyp] = yyVAL yyS[yyp].yys = yystate yynewstate: yyn = yyPact[yystate] if yyn <= yyFlag { goto yydefault /* simple state */ } if yychar < 0 { yychar, yytoken = yylex1(yylex, &yylval) } yyn += yytoken if yyn < 0 || yyn >= yyLast { goto yydefault } yyn = yyAct[yyn] if yyChk[yyn] == yytoken { /* valid shift */ yychar = -1 yytoken = -1 yyVAL = yylval yystate = yyn if Errflag > 0 { Errflag-- } goto yystack } yydefault: /* default state action */ yyn = yyDef[yystate] if yyn == -2 { if yychar < 0 { yychar, yytoken = yylex1(yylex, &yylval) } /* look through exception table */ xi := 0 for { if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate { break } xi += 2 } for xi += 2; ; xi += 2 { yyn = yyExca[xi+0] if yyn < 0 || yyn == yytoken { break } } yyn = yyExca[xi+1] if yyn < 0 { goto ret0 } } if yyn == 0 { /* error ... attempt to resume parsing */ switch Errflag { case 0: /* brand new error */ yylex.Error(yyErrorMessage(yystate, yytoken)) Nerrs++ if yyDebug >= 1 { __yyfmt__.Printf("%s", yyStatname(yystate)) __yyfmt__.Printf(" saw %s\n", yyTokname(yytoken)) } fallthrough case 1, 2: /* incompletely recovered error ... try again */ Errflag = 3 /* find a state where "error" is a legal shift action */ for yyp >= 0 { yyn = yyPact[yyS[yyp].yys] + yyErrCode if yyn >= 0 && yyn < yyLast { yystate = yyAct[yyn] /* simulate a shift of "error" */ if yyChk[yystate] == yyErrCode { goto yystack } } /* the current p has no shift on "error", pop stack */ if yyDebug >= 2 { __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys) } yyp-- } /* there is no state on the stack with an error shift ... abort */ goto ret1 case 3: /* no shift yet; clobber input char */ if yyDebug >= 2 { __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yytoken)) } if yytoken == yyEofCode { goto ret1 } yychar = -1 yytoken = -1 goto yynewstate /* try again in the same state */ } } /* reduction by production yyn */ if yyDebug >= 2 { __yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate)) } yynt := yyn yypt := yyp _ = yypt // guard against "declared and not used" yyp -= yyR2[yyn] // yyp is now the index of $0. Perform the default action. Iff the // reduced production is ε, $1 is possibly out of range. if yyp+1 >= len(yyS) { nyys := make([]yySymType, len(yyS)*2) copy(nyys, yyS) yyS = nyys } yyVAL = yyS[yyp+1] /* consult goto table to find next state */ yyn = yyR1[yyn] yyg := yyPgo[yyn] yyj := yyg + yyS[yyp].yys + 1 if yyj >= yyLast { yystate = yyAct[yyg] } else { yystate = yyAct[yyj] if yyChk[yystate] != -yyn { yystate = yyAct[yyg] } } // dummy call; replaced with literal code switch yynt { case 2: yyDollar = yyS[yypt-1 : yypt+1] //line a.y:73 { stmtline = asm.Lineno } case 4: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:80 { yyDollar[1].sym = asm.LabelLookup(yyDollar[1].sym) if yyDollar[1].sym.Type == LLAB && yyDollar[1].sym.Value != int64(asm.PC) { yyerror("redeclaration of %s", yyDollar[1].sym.Labelname) } yyDollar[1].sym.Type = LLAB yyDollar[1].sym.Value = int64(asm.PC) } case 6: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:90 { yyDollar[1].sym.Type = LVAR yyDollar[1].sym.Value = int64(yyDollar[3].lval) } case 7: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:95 { if yyDollar[1].sym.Value != int64(yyDollar[3].lval) { yyerror("redeclaration of %s", yyDollar[1].sym.Name) } yyDollar[1].sym.Value = int64(yyDollar[3].lval) } case 11: yyDollar = yyS[yypt-7 : yypt+1] //line a.y:110 { outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[3].addr, yyDollar[5].lval, &yyDollar[7].addr) } case 12: yyDollar = yyS[yypt-6 : yypt+1] //line a.y:114 { outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[3].addr, yyDollar[5].lval, &nullgen) } case 13: yyDollar = yyS[yypt-5 : yypt+1] //line a.y:118 { outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[3].addr, 0, &yyDollar[5].addr) } case 14: yyDollar = yyS[yypt-5 : yypt+1] //line a.y:125 { outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[3].addr, 0, &yyDollar[5].addr) } case 15: yyDollar = yyS[yypt-5 : yypt+1] //line a.y:132 { outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[3].addr, 0, &yyDollar[5].addr) } case 16: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:139 { outcode(yyDollar[1].lval, yyDollar[2].lval, &nullgen, 0, &yyDollar[4].addr) } case 17: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:143 { outcode(yyDollar[1].lval, yyDollar[2].lval, &nullgen, 0, &yyDollar[4].addr) } case 18: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:150 { outcode(yyDollar[1].lval, Always, &nullgen, 0, &yyDollar[3].addr) } case 19: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:157 { outcode(yyDollar[1].lval, Always, &nullgen, 0, &yyDollar[3].addr) } case 20: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:164 { outcode(yyDollar[1].lval, yyDollar[2].lval, &nullgen, 0, &yyDollar[4].addr) } case 21: yyDollar = yyS[yypt-6 : yypt+1] //line a.y:171 { outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[3].addr, yyDollar[5].lval, &nullgen) } case 22: yyDollar = yyS[yypt-7 : yypt+1] //line a.y:178 { var g obj.Addr g = nullgen g.Type = obj.TYPE_REGLIST g.Offset = int64(yyDollar[6].lval) outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[3].addr, 0, &g) } case 23: yyDollar = yyS[yypt-7 : yypt+1] //line a.y:187 { var g obj.Addr g = nullgen g.Type = obj.TYPE_REGLIST g.Offset = int64(yyDollar[4].lval) outcode(yyDollar[1].lval, yyDollar[2].lval, &g, 0, &yyDollar[7].addr) } case 24: yyDollar = yyS[yypt-7 : yypt+1] //line a.y:199 { outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[5].addr, int32(yyDollar[3].addr.Reg), &yyDollar[7].addr) } case 25: yyDollar = yyS[yypt-6 : yypt+1] //line a.y:203 { outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[5].addr, int32(yyDollar[3].addr.Reg), &yyDollar[3].addr) } case 26: yyDollar = yyS[yypt-6 : yypt+1] //line a.y:207 { outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[4].addr, int32(yyDollar[6].addr.Reg), &yyDollar[6].addr) } case 27: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:214 { outcode(yyDollar[1].lval, yyDollar[2].lval, &nullgen, 0, &nullgen) } case 28: yyDollar = yyS[yypt-5 : yypt+1] //line a.y:221 { asm.Settext(yyDollar[2].addr.Sym) outcode(yyDollar[1].lval, Always, &yyDollar[2].addr, 0, &yyDollar[5].addr) if asm.Pass > 1 { lastpc.From3 = new(obj.Addr) } } case 29: yyDollar = yyS[yypt-7 : yypt+1] //line a.y:229 { asm.Settext(yyDollar[2].addr.Sym) outcode(yyDollar[1].lval, Always, &yyDollar[2].addr, 0, &yyDollar[7].addr) if asm.Pass > 1 { lastpc.From3 = new(obj.Addr) lastpc.From3.Type = obj.TYPE_CONST lastpc.From3.Offset = int64(yyDollar[4].lval) } } case 30: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:242 { asm.Settext(yyDollar[2].addr.Sym) outcode(yyDollar[1].lval, Always, &yyDollar[2].addr, 0, &yyDollar[4].addr) if asm.Pass > 1 { lastpc.From3 = new(obj.Addr) } } case 31: yyDollar = yyS[yypt-6 : yypt+1] //line a.y:250 { asm.Settext(yyDollar[2].addr.Sym) outcode(yyDollar[1].lval, Always, &yyDollar[2].addr, 0, &yyDollar[6].addr) if asm.Pass > 1 { lastpc.From3 = new(obj.Addr) lastpc.From3.Type = obj.TYPE_CONST lastpc.From3.Offset = int64(yyDollar[4].lval) } } case 32: yyDollar = yyS[yypt-6 : yypt+1] //line a.y:264 { outcode(yyDollar[1].lval, Always, &yyDollar[2].addr, 0, &yyDollar[6].addr) if asm.Pass > 1 { lastpc.From3 = new(obj.Addr) lastpc.From3.Type = obj.TYPE_CONST lastpc.From3.Offset = int64(yyDollar[4].lval) } } case 33: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:276 { outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[3].addr, 0, &nullgen) } case 34: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:283 { outcode(yyDollar[1].lval, Always, &nullgen, 0, &yyDollar[3].addr) } case 35: yyDollar = yyS[yypt-5 : yypt+1] //line a.y:290 { outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[3].addr, 0, &yyDollar[5].addr) } case 36: yyDollar = yyS[yypt-5 : yypt+1] //line a.y:294 { outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[3].addr, 0, &yyDollar[5].addr) } case 37: yyDollar = yyS[yypt-7 : yypt+1] //line a.y:298 { outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[3].addr, yyDollar[5].lval, &yyDollar[7].addr) } case 38: yyDollar = yyS[yypt-6 : yypt+1] //line a.y:302 { outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[3].addr, int32(yyDollar[5].addr.Reg), &nullgen) } case 39: yyDollar = yyS[yypt-12 : yypt+1] //line a.y:309 { var g obj.Addr g = nullgen g.Type = obj.TYPE_CONST g.Offset = int64( (0xe << 24) | /* opcode */ (yyDollar[1].lval << 20) | /* MCR/MRC */ ((yyDollar[2].lval ^ C_SCOND_XOR) << 28) | /* scond */ ((yyDollar[3].lval & 15) << 8) | /* coprocessor number */ ((yyDollar[5].lval & 7) << 21) | /* coprocessor operation */ ((yyDollar[7].lval & 15) << 12) | /* arm register */ ((yyDollar[9].lval & 15) << 16) | /* Crn */ ((yyDollar[11].lval & 15) << 0) | /* Crm */ ((yyDollar[12].lval & 7) << 5) | /* coprocessor information */ (1 << 4)) /* must be set */ outcode(AMRC, Always, &nullgen, 0, &g) } case 40: yyDollar = yyS[yypt-7 : yypt+1] //line a.y:321 { outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[3].addr, int32(yyDollar[5].addr.Reg), &yyDollar[7].addr) } case 41: yyDollar = yyS[yypt-9 : yypt+1] //line a.y:329 { yyDollar[7].addr.Type = obj.TYPE_REGREG2 yyDollar[7].addr.Offset = int64(yyDollar[9].lval) outcode(yyDollar[1].lval, yyDollar[2].lval, &yyDollar[3].addr, int32(yyDollar[5].addr.Reg), &yyDollar[7].addr) } case 42: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:338 { outcode(yyDollar[1].lval, Always, &yyDollar[2].addr, 0, &nullgen) } case 43: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:345 { if yyDollar[2].addr.Type != obj.TYPE_CONST || yyDollar[4].addr.Type != obj.TYPE_CONST { yyerror("arguments to PCDATA must be integer constants") } outcode(yyDollar[1].lval, Always, &yyDollar[2].addr, 0, &yyDollar[4].addr) } case 44: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:355 { if yyDollar[2].addr.Type != obj.TYPE_CONST { yyerror("index for FUNCDATA must be integer constant") } if yyDollar[4].addr.Type != obj.NAME_EXTERN && yyDollar[4].addr.Type != obj.NAME_STATIC && yyDollar[4].addr.Type != obj.TYPE_MEM { yyerror("value for FUNCDATA must be symbol reference") } outcode(yyDollar[1].lval, Always, &yyDollar[2].addr, 0, &yyDollar[4].addr) } case 45: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:368 { outcode(yyDollar[1].lval, Always, &nullgen, 0, &nullgen) } case 46: yyDollar = yyS[yypt-1 : yypt+1] //line a.y:374 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_TEXTSIZE yyVAL.addr.Offset = int64(yyDollar[1].lval) yyVAL.addr.Val = int32(obj.ArgsSizeUnknown) } case 47: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:381 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_TEXTSIZE yyVAL.addr.Offset = -int64(yyDollar[2].lval) yyVAL.addr.Val = int32(obj.ArgsSizeUnknown) } case 48: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:388 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_TEXTSIZE yyVAL.addr.Offset = int64(yyDollar[1].lval) yyVAL.addr.Val = int32(yyDollar[3].lval) } case 49: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:395 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_TEXTSIZE yyVAL.addr.Offset = -int64(yyDollar[2].lval) yyVAL.addr.Val = int32(yyDollar[4].lval) } case 50: yyDollar = yyS[yypt-0 : yypt+1] //line a.y:403 { yyVAL.lval = Always } case 51: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:407 { yyVAL.lval = (yyDollar[1].lval & ^C_SCOND) | yyDollar[2].lval } case 52: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:411 { yyVAL.lval = yyDollar[1].lval | yyDollar[2].lval } case 55: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:420 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_BRANCH yyVAL.addr.Offset = int64(yyDollar[1].lval) + int64(asm.PC) } case 56: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:426 { yyDollar[1].sym = asm.LabelLookup(yyDollar[1].sym) yyVAL.addr = nullgen if asm.Pass == 2 && yyDollar[1].sym.Type != LLAB { yyerror("undefined label: %s", yyDollar[1].sym.Labelname) } yyVAL.addr.Type = obj.TYPE_BRANCH yyVAL.addr.Offset = yyDollar[1].sym.Value + int64(yyDollar[2].lval) } case 57: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:437 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_CONST yyVAL.addr.Offset = int64(yyDollar[2].lval) } case 58: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:443 { yyVAL.addr = yyDollar[2].addr yyVAL.addr.Type = obj.TYPE_ADDR } case 59: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:448 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_SCONST yyVAL.addr.Val = yyDollar[2].sval } case 61: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:457 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_FCONST yyVAL.addr.Val = yyDollar[2].dval } case 62: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:463 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_FCONST yyVAL.addr.Val = -yyDollar[3].dval } case 63: yyDollar = yyS[yypt-1 : yypt+1] //line a.y:471 { yyVAL.lval = 1 << uint(yyDollar[1].lval&15) } case 64: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:475 { yyVAL.lval = 0 for i := yyDollar[1].lval; i <= yyDollar[3].lval; i++ { yyVAL.lval |= 1 << uint(i&15) } for i := yyDollar[3].lval; i <= yyDollar[1].lval; i++ { yyVAL.lval |= 1 << uint(i&15) } } case 65: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:485 { yyVAL.lval = (1 << uint(yyDollar[1].lval&15)) | yyDollar[3].lval } case 69: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:494 { yyVAL.addr = yyDollar[1].addr yyVAL.addr.Reg = int16(yyDollar[3].lval) } case 70: yyDollar = yyS[yypt-1 : yypt+1] //line a.y:499 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_REG yyVAL.addr.Reg = int16(yyDollar[1].lval) } case 71: yyDollar = yyS[yypt-1 : yypt+1] //line a.y:505 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_REG yyVAL.addr.Reg = int16(yyDollar[1].lval) } case 72: yyDollar = yyS[yypt-1 : yypt+1] //line a.y:511 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_MEM yyVAL.addr.Offset = int64(yyDollar[1].lval) } case 76: yyDollar = yyS[yypt-1 : yypt+1] //line a.y:522 { yyVAL.addr = yyDollar[1].addr if yyDollar[1].addr.Name != obj.NAME_EXTERN && yyDollar[1].addr.Name != obj.NAME_STATIC { } } case 77: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:530 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_MEM yyVAL.addr.Reg = int16(yyDollar[2].lval) yyVAL.addr.Offset = 0 } case 79: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:540 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_MEM yyVAL.addr.Reg = int16(yyDollar[3].lval) yyVAL.addr.Offset = int64(yyDollar[1].lval) } case 81: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:550 { yyVAL.addr = yyDollar[1].addr yyVAL.addr.Type = obj.TYPE_MEM yyVAL.addr.Reg = int16(yyDollar[3].lval) } case 86: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:563 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_CONST yyVAL.addr.Offset = int64(yyDollar[2].lval) } case 87: yyDollar = yyS[yypt-1 : yypt+1] //line a.y:571 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_REG yyVAL.addr.Reg = int16(yyDollar[1].lval) } case 88: yyDollar = yyS[yypt-5 : yypt+1] //line a.y:579 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_REGREG yyVAL.addr.Reg = int16(yyDollar[2].lval) yyVAL.addr.Offset = int64(yyDollar[4].lval) } case 89: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:588 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_SHIFT yyVAL.addr.Offset = int64(yyDollar[1].lval&15) | int64(yyDollar[4].lval) | (0 << 5) } case 90: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:594 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_SHIFT yyVAL.addr.Offset = int64(yyDollar[1].lval&15) | int64(yyDollar[4].lval) | (1 << 5) } case 91: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:600 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_SHIFT yyVAL.addr.Offset = int64(yyDollar[1].lval&15) | int64(yyDollar[4].lval) | (2 << 5) } case 92: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:606 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_SHIFT yyVAL.addr.Offset = int64(yyDollar[1].lval&15) | int64(yyDollar[4].lval) | (3 << 5) } case 93: yyDollar = yyS[yypt-1 : yypt+1] //line a.y:614 { if yyVAL.lval < REG_R0 || yyVAL.lval > REG_R15 { print("register value out of range\n") } yyVAL.lval = ((yyDollar[1].lval & 15) << 8) | (1 << 4) } case 94: yyDollar = yyS[yypt-1 : yypt+1] //line a.y:621 { if yyVAL.lval < 0 || yyVAL.lval >= 32 { print("shift value out of range\n") } yyVAL.lval = (yyDollar[1].lval & 31) << 7 } case 96: yyDollar = yyS[yypt-1 : yypt+1] //line a.y:631 { yyVAL.lval = REGPC } case 97: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:635 { if yyDollar[3].lval < 0 || yyDollar[3].lval >= NREG { print("register value out of range\n") } yyVAL.lval = REG_R0 + yyDollar[3].lval } case 99: yyDollar = yyS[yypt-1 : yypt+1] //line a.y:645 { yyVAL.lval = REGSP } case 101: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:652 { if yyDollar[3].lval < 0 || yyDollar[3].lval >= NREG { print("register value out of range\n") } yyVAL.lval = yyDollar[3].lval // TODO(rsc): REG_C0+$3 } case 104: yyDollar = yyS[yypt-1 : yypt+1] //line a.y:665 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_REG yyVAL.addr.Reg = int16(yyDollar[1].lval) } case 105: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:671 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_REG yyVAL.addr.Reg = int16(REG_F0 + yyDollar[3].lval) } case 106: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:679 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_MEM yyVAL.addr.Name = int8(yyDollar[3].lval) yyVAL.addr.Sym = nil yyVAL.addr.Offset = int64(yyDollar[1].lval) } case 107: yyDollar = yyS[yypt-5 : yypt+1] //line a.y:687 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_MEM yyVAL.addr.Name = int8(yyDollar[4].lval) yyVAL.addr.Sym = obj.Linklookup(asm.Ctxt, yyDollar[1].sym.Name, 0) yyVAL.addr.Offset = int64(yyDollar[2].lval) } case 108: yyDollar = yyS[yypt-7 : yypt+1] //line a.y:695 { yyVAL.addr = nullgen yyVAL.addr.Type = obj.TYPE_MEM yyVAL.addr.Name = obj.NAME_STATIC yyVAL.addr.Sym = obj.Linklookup(asm.Ctxt, yyDollar[1].sym.Name, 1) yyVAL.addr.Offset = int64(yyDollar[4].lval) } case 109: yyDollar = yyS[yypt-0 : yypt+1] //line a.y:704 { yyVAL.lval = 0 } case 110: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:708 { yyVAL.lval = yyDollar[2].lval } case 111: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:712 { yyVAL.lval = -yyDollar[2].lval } case 116: yyDollar = yyS[yypt-1 : yypt+1] //line a.y:724 { yyVAL.lval = int32(yyDollar[1].sym.Value) } case 117: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:728 { yyVAL.lval = -yyDollar[2].lval } case 118: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:732 { yyVAL.lval = yyDollar[2].lval } case 119: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:736 { yyVAL.lval = ^yyDollar[2].lval } case 120: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:740 { yyVAL.lval = yyDollar[2].lval } case 121: yyDollar = yyS[yypt-0 : yypt+1] //line a.y:745 { yyVAL.lval = 0 } case 122: yyDollar = yyS[yypt-2 : yypt+1] //line a.y:749 { yyVAL.lval = yyDollar[2].lval } case 124: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:756 { yyVAL.lval = yyDollar[1].lval + yyDollar[3].lval } case 125: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:760 { yyVAL.lval = yyDollar[1].lval - yyDollar[3].lval } case 126: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:764 { yyVAL.lval = yyDollar[1].lval * yyDollar[3].lval } case 127: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:768 { yyVAL.lval = yyDollar[1].lval / yyDollar[3].lval } case 128: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:772 { yyVAL.lval = yyDollar[1].lval % yyDollar[3].lval } case 129: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:776 { yyVAL.lval = yyDollar[1].lval << uint(yyDollar[4].lval) } case 130: yyDollar = yyS[yypt-4 : yypt+1] //line a.y:780 { yyVAL.lval = yyDollar[1].lval >> uint(yyDollar[4].lval) } case 131: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:784 { yyVAL.lval = yyDollar[1].lval & yyDollar[3].lval } case 132: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:788 { yyVAL.lval = yyDollar[1].lval ^ yyDollar[3].lval } case 133: yyDollar = yyS[yypt-3 : yypt+1] //line a.y:792 { yyVAL.lval = yyDollar[1].lval | yyDollar[3].lval } } goto yystack /* stack new state and value */ }
func dumptypestructs() { // copy types from externdcl list to signatlist for _, n := range externdcl { if n.Op != OTYPE { continue } signatlist = append(signatlist, n) } // Process signatlist. This can't use range, as entries are // added to the list while it is being processed. for i := 0; i < len(signatlist); i++ { n := signatlist[i] if n.Op != OTYPE { continue } t := n.Type dtypesym(t) if t.Sym != nil { dtypesym(ptrto(t)) } } // process itabs for _, i := range itabs { // dump empty itab symbol into i.sym // type itab struct { // inter *interfacetype // _type *_type // link *itab // bad int32 // unused int32 // fun [1]uintptr // variable sized // } o := dsymptr(i.sym, 0, dtypesym(i.itype), 0) o = dsymptr(i.sym, o, dtypesym(i.t), 0) o += Widthptr + 8 // skip link/bad/unused fields o += len(imethods(i.itype)) * Widthptr // skip fun method pointers // at runtime the itab will contain pointers to types, other itabs and // method functions. None are allocated on heap, so we can use obj.NOPTR. ggloblsym(i.sym, int32(o), int16(obj.DUPOK|obj.NOPTR|obj.LOCAL)) ilink := Pkglookup(i.t.tconv(FmtLeft)+","+i.itype.tconv(FmtLeft), itablinkpkg) dsymptr(ilink, 0, i.sym, 0) ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA|obj.LOCAL)) } // process ptabs if localpkg.Name == "main" && len(ptabs) > 0 { ot := 0 s := obj.Linklookup(Ctxt, "go.plugin.tabs", 0) for _, p := range ptabs { // Dump ptab symbol into go.pluginsym package. // // type ptab struct { // name nameOff // typ typeOff // pointer to symbol // } nsym := dname(p.s.Name, "", nil, true) ot = dsymptrOffLSym(s, ot, nsym, 0) ot = dsymptrOffLSym(s, ot, Linksym(dtypesym(p.t)), 0) } ggloblLSym(s, int32(ot), int16(obj.RODATA)) ot = 0 s = obj.Linklookup(Ctxt, "go.plugin.exports", 0) for _, p := range ptabs { ot = dsymptrLSym(s, ot, Linksym(p.s), 0) } ggloblLSym(s, int32(ot), int16(obj.RODATA)) } // generate import strings for imported packages if forceObjFileStability { // Sorting the packages is not necessary but to compare binaries created // using textual and binary format we sort by path to reduce differences. sort.Sort(pkgByPath(pkgs)) } for _, p := range pkgs { if p.Direct { dimportpath(p) } } // do basic types if compiling package runtime. // they have to be in at least one package, // and runtime is always loaded implicitly, // so this is as good as any. // another possible choice would be package main, // but using runtime means fewer copies in .6 files. if myimportpath == "runtime" { for i := EType(1); i <= TBOOL; i++ { dtypesym(ptrto(Types[i])) } dtypesym(ptrto(Types[TSTRING])) dtypesym(ptrto(Types[TUNSAFEPTR])) // emit type structs for error and func(error) string. // The latter is the type of an auto-generated wrapper. dtypesym(ptrto(errortype)) dtypesym(functype(nil, []*Node{nod(ODCLFIELD, nil, typenod(errortype))}, []*Node{nod(ODCLFIELD, nil, typenod(Types[TSTRING]))})) // add paths for runtime and main, which 6l imports implicitly. dimportpath(Runtimepkg) if flag_race { dimportpath(racepkg) } if flag_msan { dimportpath(msanpkg) } dimportpath(mkpkg("main")) } }
func preprocess(ctxt *obj.Link, cursym *obj.LSym) { if ctxt.Headtype == obj.Hplan9 && ctxt.Plan9privates == nil { ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0) } ctxt.Cursym = cursym if cursym.Text == nil || cursym.Text.Link == nil { return } p := cursym.Text autoffset := int32(p.To.Offset) if autoffset < 0 { autoffset = 0 } var bpsize int if p.Mode == 64 && obj.Framepointer_enabled != 0 && autoffset > 0 { // Make room for to save a base pointer. If autoffset == 0, // this might do something special like a tail jump to // another function, so in that case we omit this. bpsize = ctxt.Arch.Ptrsize autoffset += int32(bpsize) p.To.Offset += int64(bpsize) } else { bpsize = 0 } textarg := int64(p.To.Val.(int32)) cursym.Args = int32(textarg) cursym.Locals = int32(p.To.Offset) // TODO(rsc): Remove. if p.Mode == 32 && cursym.Locals < 0 { cursym.Locals = 0 } // TODO(rsc): Remove 'p.Mode == 64 &&'. if p.Mode == 64 && autoffset < obj.StackSmall && p.From3Offset()&obj.NOSPLIT == 0 { for q := p; q != nil; q = q.Link { if q.As == obj.ACALL { goto noleaf } if (q.As == obj.ADUFFCOPY || q.As == obj.ADUFFZERO) && autoffset >= obj.StackSmall-8 { goto noleaf } } p.From3.Offset |= obj.NOSPLIT noleaf: } if p.From3Offset()&obj.NOSPLIT == 0 || p.From3Offset()&obj.WRAPPER != 0 { p = obj.Appendp(ctxt, p) p = load_g_cx(ctxt, p) // load g into CX } if cursym.Text.From3Offset()&obj.NOSPLIT == 0 { p = stacksplit(ctxt, p, autoffset, int32(textarg)) // emit split check } if autoffset != 0 { if autoffset%int32(ctxt.Arch.Regsize) != 0 { ctxt.Diag("unaligned stack size %d", autoffset) } p = obj.Appendp(ctxt, p) p.As = AADJSP p.From.Type = obj.TYPE_CONST p.From.Offset = int64(autoffset) p.Spadj = autoffset } else { // zero-byte stack adjustment. // Insert a fake non-zero adjustment so that stkcheck can // recognize the end of the stack-splitting prolog. p = obj.Appendp(ctxt, p) p.As = obj.ANOP p.Spadj = int32(-ctxt.Arch.Ptrsize) p = obj.Appendp(ctxt, p) p.As = obj.ANOP p.Spadj = int32(ctxt.Arch.Ptrsize) } deltasp := autoffset if bpsize > 0 { // Save caller's BP p = obj.Appendp(ctxt, p) p.As = AMOVQ p.From.Type = obj.TYPE_REG p.From.Reg = REG_BP p.To.Type = obj.TYPE_MEM p.To.Reg = REG_SP p.To.Scale = 1 p.To.Offset = int64(autoffset) - int64(bpsize) // Move current frame to BP p = obj.Appendp(ctxt, p) p.As = ALEAQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Scale = 1 p.From.Offset = int64(autoffset) - int64(bpsize) p.To.Type = obj.TYPE_REG p.To.Reg = REG_BP } if cursym.Text.From3Offset()&obj.WRAPPER != 0 { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOVQ g_panic(CX), BX // TESTQ BX, BX // JEQ end // LEAQ (autoffset+8)(SP), DI // CMPQ panic_argp(BX), DI // JNE end // MOVQ SP, panic_argp(BX) // end: // NOP // // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes. p = obj.Appendp(ctxt, p) p.As = AMOVQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_CX p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic p.To.Type = obj.TYPE_REG p.To.Reg = REG_BX if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { p.As = AMOVL p.From.Type = obj.TYPE_MEM p.From.Reg = REG_R15 p.From.Scale = 1 p.From.Index = REG_CX } if p.Mode == 32 { p.As = AMOVL } p = obj.Appendp(ctxt, p) p.As = ATESTQ p.From.Type = obj.TYPE_REG p.From.Reg = REG_BX p.To.Type = obj.TYPE_REG p.To.Reg = REG_BX if ctxt.Headtype == obj.Hnacl || p.Mode == 32 { p.As = ATESTL } p = obj.Appendp(ctxt, p) p.As = AJEQ p.To.Type = obj.TYPE_BRANCH p1 := p p = obj.Appendp(ctxt, p) p.As = ALEAQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Offset = int64(autoffset) + int64(ctxt.Arch.Regsize) p.To.Type = obj.TYPE_REG p.To.Reg = REG_DI if ctxt.Headtype == obj.Hnacl || p.Mode == 32 { p.As = ALEAL } p = obj.Appendp(ctxt, p) p.As = ACMPQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_BX p.From.Offset = 0 // Panic.argp p.To.Type = obj.TYPE_REG p.To.Reg = REG_DI if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { p.As = ACMPL p.From.Type = obj.TYPE_MEM p.From.Reg = REG_R15 p.From.Scale = 1 p.From.Index = REG_BX } if p.Mode == 32 { p.As = ACMPL } p = obj.Appendp(ctxt, p) p.As = AJNE p.To.Type = obj.TYPE_BRANCH p2 := p p = obj.Appendp(ctxt, p) p.As = AMOVQ p.From.Type = obj.TYPE_REG p.From.Reg = REG_SP p.To.Type = obj.TYPE_MEM p.To.Reg = REG_BX p.To.Offset = 0 // Panic.argp if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { p.As = AMOVL p.To.Type = obj.TYPE_MEM p.To.Reg = REG_R15 p.To.Scale = 1 p.To.Index = REG_BX } if p.Mode == 32 { p.As = AMOVL } p = obj.Appendp(ctxt, p) p.As = obj.ANOP p1.Pcond = p p2.Pcond = p } var a int var pcsize int for ; p != nil; p = p.Link { pcsize = int(p.Mode) / 8 a = int(p.From.Name) if a == obj.NAME_AUTO { p.From.Offset += int64(deltasp) - int64(bpsize) } if a == obj.NAME_PARAM { p.From.Offset += int64(deltasp) + int64(pcsize) } if p.From3 != nil { a = int(p.From3.Name) if a == obj.NAME_AUTO { p.From3.Offset += int64(deltasp) - int64(bpsize) } if a == obj.NAME_PARAM { p.From3.Offset += int64(deltasp) + int64(pcsize) } } a = int(p.To.Name) if a == obj.NAME_AUTO { p.To.Offset += int64(deltasp) - int64(bpsize) } if a == obj.NAME_PARAM { p.To.Offset += int64(deltasp) + int64(pcsize) } switch p.As { default: continue case APUSHL, APUSHFL: deltasp += 4 p.Spadj = 4 continue case APUSHQ, APUSHFQ: deltasp += 8 p.Spadj = 8 continue case APUSHW, APUSHFW: deltasp += 2 p.Spadj = 2 continue case APOPL, APOPFL: deltasp -= 4 p.Spadj = -4 continue case APOPQ, APOPFQ: deltasp -= 8 p.Spadj = -8 continue case APOPW, APOPFW: deltasp -= 2 p.Spadj = -2 continue case obj.ARET: break } if autoffset != deltasp { ctxt.Diag("unbalanced PUSH/POP") } if autoffset != 0 { if bpsize > 0 { // Restore caller's BP p.As = AMOVQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Scale = 1 p.From.Offset = int64(autoffset) - int64(bpsize) p.To.Type = obj.TYPE_REG p.To.Reg = REG_BP p = obj.Appendp(ctxt, p) } p.As = AADJSP p.From.Type = obj.TYPE_CONST p.From.Offset = int64(-autoffset) p.Spadj = -autoffset p = obj.Appendp(ctxt, p) p.As = obj.ARET // If there are instructions following // this ARET, they come from a branch // with the same stackframe, so undo // the cleanup. p.Spadj = +autoffset } if p.To.Sym != nil { // retjmp p.As = obj.AJMP } } } func indir_cx(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) { if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { a.Type = obj.TYPE_MEM a.Reg = REG_R15 a.Index = REG_CX a.Scale = 1 return } a.Type = obj.TYPE_MEM a.Reg = REG_CX } // Append code to p to load g into cx. // Overwrites p with the first instruction (no first appendp). // Overwriting p is unusual but it lets use this in both the // prologue (caller must call appendp first) and in the epilogue. // Returns last new instruction. func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog { p.As = AMOVQ if ctxt.Arch.Ptrsize == 4 { p.As = AMOVL } p.From.Type = obj.TYPE_MEM p.From.Reg = REG_TLS p.From.Offset = 0 p.To.Type = obj.TYPE_REG p.To.Reg = REG_CX next := p.Link progedit(ctxt, p) for p.Link != next { p = p.Link } if p.From.Index == REG_TLS { p.From.Scale = 2 } return p } // Append code to p to check for stack split. // Appends to (does not overwrite) p. // Assumes g is in CX. // Returns last new instruction. func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32) *obj.Prog { cmp := ACMPQ lea := ALEAQ mov := AMOVQ sub := ASUBQ if ctxt.Headtype == obj.Hnacl || p.Mode == 32 { cmp = ACMPL lea = ALEAL mov = AMOVL sub = ASUBL } var q1 *obj.Prog if framesize <= obj.StackSmall { // small stack: SP <= stackguard // CMPQ SP, stackguard p = obj.Appendp(ctxt, p) p.As = cmp p.From.Type = obj.TYPE_REG p.From.Reg = REG_SP indir_cx(ctxt, p, &p.To) p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0 if ctxt.Cursym.Cfunc { p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1 } } else if framesize <= obj.StackBig { // large stack: SP-framesize <= stackguard-StackSmall // LEAQ -xxx(SP), AX // CMPQ AX, stackguard p = obj.Appendp(ctxt, p) p.As = lea p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Offset = -(int64(framesize) - obj.StackSmall) p.To.Type = obj.TYPE_REG p.To.Reg = REG_AX p = obj.Appendp(ctxt, p) p.As = cmp p.From.Type = obj.TYPE_REG p.From.Reg = REG_AX indir_cx(ctxt, p, &p.To) p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0 if ctxt.Cursym.Cfunc { p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1 } } else { // Such a large stack we need to protect against wraparound. // If SP is close to zero: // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall) // The +StackGuard on both sides is required to keep the left side positive: // SP is allowed to be slightly below stackguard. See stack.h. // // Preemption sets stackguard to StackPreempt, a very large value. // That breaks the math above, so we have to check for that explicitly. // MOVQ stackguard, CX // CMPQ CX, $StackPreempt // JEQ label-of-call-to-morestack // LEAQ StackGuard(SP), AX // SUBQ CX, AX // CMPQ AX, $(framesize+(StackGuard-StackSmall)) p = obj.Appendp(ctxt, p) p.As = mov indir_cx(ctxt, p, &p.From) p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0 if ctxt.Cursym.Cfunc { p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1 } p.To.Type = obj.TYPE_REG p.To.Reg = REG_SI p = obj.Appendp(ctxt, p) p.As = cmp p.From.Type = obj.TYPE_REG p.From.Reg = REG_SI p.To.Type = obj.TYPE_CONST p.To.Offset = obj.StackPreempt if p.Mode == 32 { p.To.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1))) } p = obj.Appendp(ctxt, p) p.As = AJEQ p.To.Type = obj.TYPE_BRANCH q1 = p p = obj.Appendp(ctxt, p) p.As = lea p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Offset = obj.StackGuard p.To.Type = obj.TYPE_REG p.To.Reg = REG_AX p = obj.Appendp(ctxt, p) p.As = sub p.From.Type = obj.TYPE_REG p.From.Reg = REG_SI p.To.Type = obj.TYPE_REG p.To.Reg = REG_AX p = obj.Appendp(ctxt, p) p.As = cmp p.From.Type = obj.TYPE_REG p.From.Reg = REG_AX p.To.Type = obj.TYPE_CONST p.To.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall) } // common jls := obj.Appendp(ctxt, p) jls.As = AJLS jls.To.Type = obj.TYPE_BRANCH var last *obj.Prog for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link { } spfix := obj.Appendp(ctxt, last) spfix.As = obj.ANOP spfix.Spadj = -framesize call := obj.Appendp(ctxt, spfix) call.Lineno = ctxt.Cursym.Text.Lineno call.Mode = ctxt.Cursym.Text.Mode call.As = obj.ACALL call.To.Type = obj.TYPE_BRANCH morestack := "runtime.morestack" switch { case ctxt.Cursym.Cfunc: morestack = "runtime.morestackc" case ctxt.Cursym.Text.From3Offset()&obj.NEEDCTXT == 0: morestack = "runtime.morestack_noctxt" } call.To.Sym = obj.Linklookup(ctxt, morestack, 0) jmp := obj.Appendp(ctxt, call) jmp.As = obj.AJMP jmp.To.Type = obj.TYPE_BRANCH jmp.Pcond = ctxt.Cursym.Text.Link jmp.Spadj = +framesize jls.Pcond = call if q1 != nil { q1.Pcond = call } return jls } func follow(ctxt *obj.Link, s *obj.LSym) { ctxt.Cursym = s firstp := ctxt.NewProg() lastp := firstp xfol(ctxt, s.Text, &lastp) lastp.Link = nil s.Text = firstp.Link } func nofollow(a obj.As) bool { switch a { case obj.AJMP, obj.ARET, AIRETL, AIRETQ, AIRETW, ARETFL, ARETFQ, ARETFW, obj.AUNDEF: return true } return false } func pushpop(a obj.As) bool { switch a { case APUSHL, APUSHFL, APUSHQ, APUSHFQ, APUSHW, APUSHFW, APOPL, APOPFL, APOPQ, APOPFQ, APOPW, APOPFW: return true } return false } func relinv(a obj.As) obj.As { switch a { case AJEQ: return AJNE case AJNE: return AJEQ case AJLE: return AJGT case AJLS: return AJHI case AJLT: return AJGE case AJMI: return AJPL case AJGE: return AJLT case AJPL: return AJMI case AJGT: return AJLE case AJHI: return AJLS case AJCS: return AJCC case AJCC: return AJCS case AJPS: return AJPC case AJPC: return AJPS case AJOS: return AJOC case AJOC: return AJOS } log.Fatalf("unknown relation: %s", obj.Aconv(a)) return 0 } func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) { var q *obj.Prog var i int var a obj.As loop: if p == nil { return } if p.As == obj.AJMP { q = p.Pcond if q != nil && q.As != obj.ATEXT { /* mark instruction as done and continue layout at target of jump */ p.Mark |= DONE p = q if p.Mark&DONE == 0 { goto loop } } } if p.Mark&DONE != 0 { /* * p goes here, but already used it elsewhere. * copy up to 4 instructions or else branch to other copy. */ i = 0 q = p for ; i < 4; i, q = i+1, q.Link { if q == nil { break } if q == *last { break } a = q.As if a == obj.ANOP { i-- continue } if nofollow(a) || pushpop(a) { break // NOTE(rsc): arm does goto copy } if q.Pcond == nil || q.Pcond.Mark&DONE != 0 { continue } if a == obj.ACALL || a == ALOOP { continue } for { if p.As == obj.ANOP { p = p.Link continue } q = obj.Copyp(ctxt, p) p = p.Link q.Mark |= DONE (*last).Link = q *last = q if q.As != a || q.Pcond == nil || q.Pcond.Mark&DONE != 0 { continue } q.As = relinv(q.As) p = q.Pcond q.Pcond = q.Link q.Link = p xfol(ctxt, q.Link, last) p = q.Link if p.Mark&DONE != 0 { return } goto loop /* */ } } q = ctxt.NewProg() q.As = obj.AJMP q.Lineno = p.Lineno q.To.Type = obj.TYPE_BRANCH q.To.Offset = p.Pc q.Pcond = p p = q } /* emit p */ p.Mark |= DONE (*last).Link = p *last = p a = p.As /* continue loop with what comes after p */ if nofollow(a) { return } if p.Pcond != nil && a != obj.ACALL { /* * some kind of conditional branch. * recurse to follow one path. * continue loop on the other. */ q = obj.Brchain(ctxt, p.Pcond) if q != nil { p.Pcond = q } q = obj.Brchain(ctxt, p.Link) if q != nil { p.Link = q } if p.From.Type == obj.TYPE_CONST { if p.From.Offset == 1 { /* * expect conditional jump to be taken. * rewrite so that's the fall-through case. */ p.As = relinv(a) q = p.Link p.Link = p.Pcond p.Pcond = q } } else { q = p.Link if q.Mark&DONE != 0 { if a != ALOOP { p.As = relinv(a) p.Link = p.Pcond p.Pcond = q } } } xfol(ctxt, p.Link, last) if p.Pcond.Mark&DONE != 0 { return } p = p.Pcond goto loop } p = p.Link goto loop } var unaryDst = map[obj.As]bool{ ABSWAPL: true, ABSWAPQ: true, ACMPXCHG8B: true, ADECB: true, ADECL: true, ADECQ: true, ADECW: true, AINCB: true, AINCL: true, AINCQ: true, AINCW: true, ANEGB: true, ANEGL: true, ANEGQ: true, ANEGW: true, ANOTB: true, ANOTL: true, ANOTQ: true, ANOTW: true, APOPL: true, APOPQ: true, APOPW: true, ASETCC: true, ASETCS: true, ASETEQ: true, ASETGE: true, ASETGT: true, ASETHI: true, ASETLE: true, ASETLS: true, ASETLT: true, ASETMI: true, ASETNE: true, ASETOC: true, ASETOS: true, ASETPC: true, ASETPL: true, ASETPS: true, AFFREE: true, AFLDENV: true, AFSAVE: true, AFSTCW: true, AFSTENV: true, AFSTSW: true, AFXSAVE: true, AFXSAVE64: true, ASTMXCSR: true, } var Linkamd64 = obj.LinkArch{ ByteOrder: binary.LittleEndian, Name: "amd64", Thechar: '6', Preprocess: preprocess, Assemble: span6, Follow: follow, Progedit: progedit, UnaryDst: unaryDst, Minlc: 1, Ptrsize: 8, Regsize: 8, } var Linkamd64p32 = obj.LinkArch{ ByteOrder: binary.LittleEndian, Name: "amd64p32", Thechar: '6', Preprocess: preprocess, Assemble: span6, Follow: follow, Progedit: progedit, UnaryDst: unaryDst, Minlc: 1, Ptrsize: 4, Regsize: 8, } var Link386 = obj.LinkArch{ ByteOrder: binary.LittleEndian, Name: "386", Thechar: '8', Preprocess: preprocess, Assemble: span6, Follow: follow, Progedit: progedit, UnaryDst: unaryDst, Minlc: 1, Ptrsize: 4, Regsize: 4, }
func progedit(ctxt *obj.Link, p *obj.Prog) { // Maintain information about code generation mode. if ctxt.Mode == 0 { ctxt.Mode = ctxt.Arch.Regsize * 8 } p.Mode = int8(ctxt.Mode) switch p.As { case AMODE: if p.From.Type == obj.TYPE_CONST || (p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_NONE) { switch int(p.From.Offset) { case 16, 32, 64: ctxt.Mode = int(p.From.Offset) } } obj.Nopout(p) } // Thread-local storage references use the TLS pseudo-register. // As a register, TLS refers to the thread-local storage base, and it // can only be loaded into another register: // // MOVQ TLS, AX // // An offset from the thread-local storage base is written off(reg)(TLS*1). // Semantically it is off(reg), but the (TLS*1) annotation marks this as // indexing from the loaded TLS base. This emits a relocation so that // if the linker needs to adjust the offset, it can. For example: // // MOVQ TLS, AX // MOVQ 0(AX)(TLS*1), CX // load g into CX // // On systems that support direct access to the TLS memory, this // pair of instructions can be reduced to a direct TLS memory reference: // // MOVQ 0(TLS), CX // load g into CX // // The 2-instruction and 1-instruction forms correspond to the two code // sequences for loading a TLS variable in the local exec model given in "ELF // Handling For Thread-Local Storage". // // We apply this rewrite on systems that support the 1-instruction form. // The decision is made using only the operating system and the -shared flag, // not the link mode. If some link modes on a particular operating system // require the 2-instruction form, then all builds for that operating system // will use the 2-instruction form, so that the link mode decision can be // delayed to link time. // // In this way, all supported systems use identical instructions to // access TLS, and they are rewritten appropriately first here in // liblink and then finally using relocations in the linker. // // When -shared is passed, we leave the code in the 2-instruction form but // assemble (and relocate) them in different ways to generate the initial // exec code sequence. It's a bit of a fluke that this is possible without // rewriting the instructions more comprehensively, and it only does because // we only support a single TLS variable (g). if CanUse1InsnTLS(ctxt) { // Reduce 2-instruction sequence to 1-instruction sequence. // Sequences like // MOVQ TLS, BX // ... off(BX)(TLS*1) ... // become // NOP // ... off(TLS) ... // // TODO(rsc): Remove the Hsolaris special case. It exists only to // guarantee we are producing byte-identical binaries as before this code. // But it should be unnecessary. if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_REG && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 && ctxt.Headtype != obj.Hsolaris { obj.Nopout(p) } if p.From.Type == obj.TYPE_MEM && p.From.Index == REG_TLS && REG_AX <= p.From.Reg && p.From.Reg <= REG_R15 { p.From.Reg = REG_TLS p.From.Scale = 0 p.From.Index = REG_NONE } if p.To.Type == obj.TYPE_MEM && p.To.Index == REG_TLS && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 { p.To.Reg = REG_TLS p.To.Scale = 0 p.To.Index = REG_NONE } } else { // load_g_cx, below, always inserts the 1-instruction sequence. Rewrite it // as the 2-instruction sequence if necessary. // MOVQ 0(TLS), BX // becomes // MOVQ TLS, BX // MOVQ 0(BX)(TLS*1), BX if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 { q := obj.Appendp(ctxt, p) q.As = p.As q.From = p.From q.From.Type = obj.TYPE_MEM q.From.Reg = p.To.Reg q.From.Index = REG_TLS q.From.Scale = 2 // TODO: use 1 q.To = p.To p.From.Type = obj.TYPE_REG p.From.Reg = REG_TLS p.From.Index = REG_NONE p.From.Offset = 0 } } // TODO: Remove. if ctxt.Headtype == obj.Hwindows && p.Mode == 64 || ctxt.Headtype == obj.Hplan9 { if p.From.Scale == 1 && p.From.Index == REG_TLS { p.From.Scale = 2 } if p.To.Scale == 1 && p.To.Index == REG_TLS { p.To.Scale = 2 } } // Rewrite 0 to $0 in 3rd argument to CMPPS etc. // That's what the tables expect. switch p.As { case ACMPPD, ACMPPS, ACMPSD, ACMPSS: if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE && p.To.Reg == REG_NONE && p.To.Index == REG_NONE && p.To.Sym == nil { p.To.Type = obj.TYPE_CONST } } // Rewrite CALL/JMP/RET to symbol as TYPE_BRANCH. switch p.As { case obj.ACALL, obj.AJMP, obj.ARET: if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil { p.To.Type = obj.TYPE_BRANCH } } // Rewrite MOVL/MOVQ $XXX(FP/SP) as LEAL/LEAQ. if p.From.Type == obj.TYPE_ADDR && (ctxt.Arch.Thechar == '6' || p.From.Name != obj.NAME_EXTERN && p.From.Name != obj.NAME_STATIC) { switch p.As { case AMOVL: p.As = ALEAL p.From.Type = obj.TYPE_MEM case AMOVQ: p.As = ALEAQ p.From.Type = obj.TYPE_MEM } } if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { if p.From3 != nil { nacladdr(ctxt, p, p.From3) } nacladdr(ctxt, p, &p.From) nacladdr(ctxt, p, &p.To) } // Rewrite float constants to values stored in memory. switch p.As { // Convert AMOVSS $(0), Xx to AXORPS Xx, Xx case AMOVSS: if p.From.Type == obj.TYPE_FCONST { // f == 0 can't be used here due to -0, so use Float64bits if f := p.From.Val.(float64); math.Float64bits(f) == 0 { if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 { p.As = AXORPS p.From = p.To break } } } fallthrough case AFMOVF, AFADDF, AFSUBF, AFSUBRF, AFMULF, AFDIVF, AFDIVRF, AFCOMF, AFCOMFP, AADDSS, ASUBSS, AMULSS, ADIVSS, ACOMISS, AUCOMISS: if p.From.Type == obj.TYPE_FCONST { f32 := float32(p.From.Val.(float64)) i32 := math.Float32bits(f32) literal := fmt.Sprintf("$f32.%08x", i32) s := obj.Linklookup(ctxt, literal, 0) p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_EXTERN p.From.Sym = s p.From.Sym.Local = true p.From.Offset = 0 } case AMOVSD: // Convert AMOVSD $(0), Xx to AXORPS Xx, Xx if p.From.Type == obj.TYPE_FCONST { // f == 0 can't be used here due to -0, so use Float64bits if f := p.From.Val.(float64); math.Float64bits(f) == 0 { if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 { p.As = AXORPS p.From = p.To break } } } fallthrough case AFMOVD, AFADDD, AFSUBD, AFSUBRD, AFMULD, AFDIVD, AFDIVRD, AFCOMD, AFCOMDP, AADDSD, ASUBSD, AMULSD, ADIVSD, ACOMISD, AUCOMISD: if p.From.Type == obj.TYPE_FCONST { i64 := math.Float64bits(p.From.Val.(float64)) literal := fmt.Sprintf("$f64.%016x", i64) s := obj.Linklookup(ctxt, literal, 0) p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_EXTERN p.From.Sym = s p.From.Sym.Local = true p.From.Offset = 0 } } if ctxt.Flag_dynlink { rewriteToUseGot(ctxt, p) } if ctxt.Flag_shared != 0 && p.Mode == 32 { rewriteToPcrel(ctxt, p) } }
// Rewrite p, if necessary, to access global data via the global offset table. func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) { var add, lea, mov obj.As var reg int16 if p.Mode == 64 { add = AADDQ lea = ALEAQ mov = AMOVQ reg = REG_R15 } else { add = AADDL lea = ALEAL mov = AMOVL reg = REG_CX } if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO { // ADUFFxxx $offset // becomes // $MOV runtime.duffxxx@GOT, $reg // $ADD $offset, $reg // CALL $reg var sym *obj.LSym if p.As == obj.ADUFFZERO { sym = obj.Linklookup(ctxt, "runtime.duffzero", 0) } else { sym = obj.Linklookup(ctxt, "runtime.duffcopy", 0) } offset := p.To.Offset p.As = mov p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_GOTREF p.From.Sym = sym p.To.Type = obj.TYPE_REG p.To.Reg = reg p.To.Offset = 0 p.To.Sym = nil p1 := obj.Appendp(ctxt, p) p1.As = add p1.From.Type = obj.TYPE_CONST p1.From.Offset = offset p1.To.Type = obj.TYPE_REG p1.To.Reg = reg p2 := obj.Appendp(ctxt, p1) p2.As = obj.ACALL p2.To.Type = obj.TYPE_REG p2.To.Reg = reg } // We only care about global data: NAME_EXTERN means a global // symbol in the Go sense, and p.Sym.Local is true for a few // internally defined symbols. if p.As == lea && p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local { // $LEA sym, Rx becomes $MOV $sym, Rx which will be rewritten below p.As = mov p.From.Type = obj.TYPE_ADDR } if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local { // $MOV $sym, Rx becomes $MOV sym@GOT, Rx // $MOV $sym+<off>, Rx becomes $MOV sym@GOT, Rx; $LEA <off>(Rx), Rx // On 386 only, more complicated things like PUSHL $sym become $MOV sym@GOT, CX; PUSHL CX cmplxdest := false pAs := p.As var dest obj.Addr if p.To.Type != obj.TYPE_REG || pAs != mov { if p.Mode == 64 { ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p) } cmplxdest = true dest = p.To p.As = mov p.To.Type = obj.TYPE_REG p.To.Reg = REG_CX p.To.Sym = nil p.To.Name = obj.NAME_NONE } p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_GOTREF q := p if p.From.Offset != 0 { q = obj.Appendp(ctxt, p) q.As = lea q.From.Type = obj.TYPE_MEM q.From.Reg = p.To.Reg q.From.Offset = p.From.Offset q.To = p.To p.From.Offset = 0 } if cmplxdest { q = obj.Appendp(ctxt, q) q.As = pAs q.To = dest q.From.Type = obj.TYPE_REG q.From.Reg = REG_CX } } if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN { ctxt.Diag("don't know how to handle %v with -dynlink", p) } var source *obj.Addr // MOVx sym, Ry becomes $MOV sym@GOT, R15; MOVx (R15), Ry // MOVx Ry, sym becomes $MOV sym@GOT, R15; MOVx Ry, (R15) // An addition may be inserted between the two MOVs if there is an offset. if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local { if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local { ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p) } source = &p.From } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local { source = &p.To } else { return } if p.As == obj.ACALL { // When dynlinking on 386, almost any call might end up being a call // to a PLT, so make sure the GOT pointer is loaded into BX. // RegTo2 is set on the replacement call insn to stop it being // processed when it is in turn passed to progedit. if p.Mode == 64 || (p.To.Sym != nil && p.To.Sym.Local) || p.RegTo2 != 0 { return } p1 := obj.Appendp(ctxt, p) p2 := obj.Appendp(ctxt, p1) p1.As = ALEAL p1.From.Type = obj.TYPE_MEM p1.From.Name = obj.NAME_STATIC p1.From.Sym = obj.Linklookup(ctxt, "_GLOBAL_OFFSET_TABLE_", 0) p1.To.Type = obj.TYPE_REG p1.To.Reg = REG_BX p2.As = p.As p2.Scond = p.Scond p2.From = p.From p2.From3 = p.From3 p2.Reg = p.Reg p2.To = p.To // p.To.Type was set to TYPE_BRANCH above, but that makes checkaddr // in ../pass.go complain, so set it back to TYPE_MEM here, until p2 // itself gets passed to progedit. p2.To.Type = obj.TYPE_MEM p2.RegTo2 = 1 obj.Nopout(p) return } if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ARET || p.As == obj.AJMP { return } if source.Type != obj.TYPE_MEM { ctxt.Diag("don't know how to handle %v with -dynlink", p) } p1 := obj.Appendp(ctxt, p) p2 := obj.Appendp(ctxt, p1) p1.As = mov p1.From.Type = obj.TYPE_MEM p1.From.Sym = source.Sym p1.From.Name = obj.NAME_GOTREF p1.To.Type = obj.TYPE_REG p1.To.Reg = reg p2.As = p.As p2.From = p.From p2.To = p.To if p.From.Name == obj.NAME_EXTERN { p2.From.Reg = reg p2.From.Name = obj.NAME_NONE p2.From.Sym = nil } else if p.To.Name == obj.NAME_EXTERN { p2.To.Reg = reg p2.To.Name = obj.NAME_NONE p2.To.Sym = nil } else { return } obj.Nopout(p) }
/* // instruction scheduling if(debug['Q'] == 0) return; curtext = nil; q = nil; // p - 1 q1 = firstp; // top of block o = 0; // count of instructions for(p = firstp; p != nil; p = p1) { p1 = p->link; o++; if(p->mark & NOSCHED){ if(q1 != p){ sched(q1, q); } for(; p != nil; p = p->link){ if(!(p->mark & NOSCHED)) break; q = p; } p1 = p; q1 = p; o = 0; continue; } if(p->mark & (LABEL|SYNC)) { if(q1 != p) sched(q1, q); q1 = p; o = 1; } if(p->mark & (BRANCH|SYNC)) { sched(q1, p); q1 = p1; o = 0; } if(o >= NSCHED) { sched(q1, p); q1 = p1; o = 0; } q = p; } */ func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog { // MOVD g_stackguard(g), R3 p = obj.Appendp(ctxt, p) p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Reg = REGG p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0 if ctxt.Cursym.Cfunc != 0 { p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1 } p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 var q *obj.Prog if framesize <= obj.StackSmall { // small stack: SP < stackguard // CMP stackguard, SP p = obj.Appendp(ctxt, p) p.As = ACMPU p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 p.To.Type = obj.TYPE_REG p.To.Reg = REGSP } else if framesize <= obj.StackBig { // large stack: SP-framesize < stackguard-StackSmall // ADD $-framesize, SP, R4 // CMP stackguard, R4 p = obj.Appendp(ctxt, p) p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(-framesize) p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 p = obj.Appendp(ctxt, p) p.As = ACMPU p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 } else { // Such a large stack we need to protect against wraparound. // If SP is close to zero: // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall) // The +StackGuard on both sides is required to keep the left side positive: // SP is allowed to be slightly below stackguard. See stack.h. // // Preemption sets stackguard to StackPreempt, a very large value. // That breaks the math above, so we have to check for that explicitly. // // stackguard is R3 // CMP R3, $StackPreempt // BEQ label-of-call-to-morestack // ADD $StackGuard, SP, R4 // SUB R3, R4 // MOVD $(framesize+(StackGuard-StackSmall)), R31 // CMPU R31, R4 p = obj.Appendp(ctxt, p) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 p.To.Type = obj.TYPE_CONST p.To.Offset = obj.StackPreempt p = obj.Appendp(ctxt, p) q = p p.As = ABEQ p.To.Type = obj.TYPE_BRANCH p = obj.Appendp(ctxt, p) p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = obj.StackGuard p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 p = obj.Appendp(ctxt, p) p.As = ASUB p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 p = obj.Appendp(ctxt, p) p.As = AMOVD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(framesize) + obj.StackGuard - obj.StackSmall p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP p = obj.Appendp(ctxt, p) p.As = ACMPU p.From.Type = obj.TYPE_REG p.From.Reg = REGTMP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 } // q1: BLT done p = obj.Appendp(ctxt, p) q1 := p p.As = ABLT p.To.Type = obj.TYPE_BRANCH // MOVD LR, R5 p = obj.Appendp(ctxt, p) p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REG_LR p.To.Type = obj.TYPE_REG p.To.Reg = REG_R5 if q != nil { q.Pcond = p } var morestacksym *obj.LSym if ctxt.Cursym.Cfunc != 0 { morestacksym = obj.Linklookup(ctxt, "runtime.morestackc", 0) } else if ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0 { morestacksym = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0) } else { morestacksym = obj.Linklookup(ctxt, "runtime.morestack", 0) } if ctxt.Flag_dynlink { // Avoid calling morestack via a PLT when dynamically linking. The // PLT stubs generated by the system linker on ppc64le when "std r2, // 24(r1)" to save the TOC pointer in their callers stack // frame. Unfortunately (and necessarily) morestack is called before // the function that calls it sets up its frame and so the PLT ends // up smashing the saved TOC pointer for its caller's caller. // // According to the ABI documentation there is a mechanism to avoid // the TOC save that the PLT stub does (put a R_PPC64_TOCSAVE // relocation on the nop after the call to morestack) but at the time // of writing it is not supported at all by gold and my attempt to // use it with ld.bfd caused an internal linker error. So this hack // seems preferable. // MOVD $runtime.morestack(SB), R12 p = obj.Appendp(ctxt, p) p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Sym = morestacksym p.From.Name = obj.NAME_GOTREF p.To.Type = obj.TYPE_REG p.To.Reg = REG_R12 // MOVD R12, CTR p = obj.Appendp(ctxt, p) p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REG_R12 p.To.Type = obj.TYPE_REG p.To.Reg = REG_CTR // BL CTR p = obj.Appendp(ctxt, p) p.As = obj.ACALL p.From.Type = obj.TYPE_REG p.From.Reg = REG_R12 p.To.Type = obj.TYPE_REG p.To.Reg = REG_CTR } else { // BL runtime.morestack(SB) p = obj.Appendp(ctxt, p) p.As = ABL p.To.Type = obj.TYPE_BRANCH p.To.Sym = morestacksym } // BR start p = obj.Appendp(ctxt, p) p.As = ABR p.To.Type = obj.TYPE_BRANCH p.Pcond = ctxt.Cursym.Text.Link // placeholder for q1's jump target p = obj.Appendp(ctxt, p) p.As = obj.ANOP // zero-width place holder q1.Pcond = p return p }
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog { // MOVW g_stackguard(g), R1 p = obj.Appendp(ctxt, p) p.As = AMOVW p.From.Type = obj.TYPE_MEM p.From.Reg = REGG p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0 if ctxt.Cursym.Cfunc != 0 { p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1 } p.To.Type = obj.TYPE_REG p.To.Reg = REG_R1 if framesize <= obj.StackSmall { // small stack: SP < stackguard // CMP stackguard, SP p = obj.Appendp(ctxt, p) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R1 p.Reg = REGSP } else if framesize <= obj.StackBig { // large stack: SP-framesize < stackguard-StackSmall // MOVW $-framesize(SP), R2 // CMP stackguard, R2 p = obj.Appendp(ctxt, p) p.As = AMOVW p.From.Type = obj.TYPE_ADDR p.From.Reg = REGSP p.From.Offset = int64(-framesize) p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p = obj.Appendp(ctxt, p) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R1 p.Reg = REG_R2 } else { // Such a large stack we need to protect against wraparound // if SP is close to zero. // SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall) // The +StackGuard on both sides is required to keep the left side positive: // SP is allowed to be slightly below stackguard. See stack.h. // CMP $StackPreempt, R1 // MOVW.NE $StackGuard(SP), R2 // SUB.NE R1, R2 // MOVW.NE $(framesize+(StackGuard-StackSmall)), R3 // CMP.NE R3, R2 p = obj.Appendp(ctxt, p) p.As = ACMP p.From.Type = obj.TYPE_CONST p.From.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1))) p.Reg = REG_R1 p = obj.Appendp(ctxt, p) p.As = AMOVW p.From.Type = obj.TYPE_ADDR p.From.Reg = REGSP p.From.Offset = obj.StackGuard p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p.Scond = C_SCOND_NE p = obj.Appendp(ctxt, p) p.As = ASUB p.From.Type = obj.TYPE_REG p.From.Reg = REG_R1 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p.Scond = C_SCOND_NE p = obj.Appendp(ctxt, p) p.As = AMOVW p.From.Type = obj.TYPE_ADDR p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall) p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 p.Scond = C_SCOND_NE p = obj.Appendp(ctxt, p) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 p.Reg = REG_R2 p.Scond = C_SCOND_NE } // BLS call-to-morestack bls := obj.Appendp(ctxt, p) bls.As = ABLS bls.To.Type = obj.TYPE_BRANCH var last *obj.Prog for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link { } // MOVW LR, R3 movw := obj.Appendp(ctxt, last) movw.As = AMOVW movw.From.Type = obj.TYPE_REG movw.From.Reg = REGLINK movw.To.Type = obj.TYPE_REG movw.To.Reg = REG_R3 bls.Pcond = movw // BL runtime.morestack call := obj.Appendp(ctxt, movw) call.As = obj.ACALL call.To.Type = obj.TYPE_BRANCH morestack := "runtime.morestack" switch { case ctxt.Cursym.Cfunc != 0: morestack = "runtime.morestackc" case ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0: morestack = "runtime.morestack_noctxt" } call.To.Sym = obj.Linklookup(ctxt, morestack, 0) // B start b := obj.Appendp(ctxt, call) b.As = obj.AJMP b.To.Type = obj.TYPE_BRANCH b.Pcond = ctxt.Cursym.Text.Link return bls }
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog { // MOV g_stackguard(g), R1 p = obj.Appendp(ctxt, p) p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Reg = REGG p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 if ctxt.Cursym.Cfunc { p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 } p.To.Type = obj.TYPE_REG p.To.Reg = REG_R1 q := (*obj.Prog)(nil) if framesize <= obj.StackSmall { // small stack: SP < stackguard // MOV SP, R2 // CMP stackguard, R2 p = obj.Appendp(ctxt, p) p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p = obj.Appendp(ctxt, p) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R1 p.Reg = REG_R2 } else if framesize <= obj.StackBig { // large stack: SP-framesize < stackguard-StackSmall // SUB $framesize, SP, R2 // CMP stackguard, R2 p = obj.Appendp(ctxt, p) p.As = ASUB p.From.Type = obj.TYPE_CONST p.From.Offset = int64(framesize) p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p = obj.Appendp(ctxt, p) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R1 p.Reg = REG_R2 } else { // Such a large stack we need to protect against wraparound // if SP is close to zero. // SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall) // The +StackGuard on both sides is required to keep the left side positive: // SP is allowed to be slightly below stackguard. See stack.h. // CMP $StackPreempt, R1 // BEQ label_of_call_to_morestack // ADD $StackGuard, SP, R2 // SUB R1, R2 // MOV $(framesize+(StackGuard-StackSmall)), R3 // CMP R3, R2 p = obj.Appendp(ctxt, p) p.As = ACMP p.From.Type = obj.TYPE_CONST p.From.Offset = obj.StackPreempt p.Reg = REG_R1 p = obj.Appendp(ctxt, p) q = p p.As = ABEQ p.To.Type = obj.TYPE_BRANCH p = obj.Appendp(ctxt, p) p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = obj.StackGuard p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p = obj.Appendp(ctxt, p) p.As = ASUB p.From.Type = obj.TYPE_REG p.From.Reg = REG_R1 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p = obj.Appendp(ctxt, p) p.As = AMOVD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall) p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 p = obj.Appendp(ctxt, p) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 p.Reg = REG_R2 } // BLS do-morestack bls := obj.Appendp(ctxt, p) bls.As = ABLS bls.To.Type = obj.TYPE_BRANCH var last *obj.Prog for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link { } spfix := obj.Appendp(ctxt, last) spfix.As = obj.ANOP spfix.Spadj = -framesize // MOV LR, R3 movlr := obj.Appendp(ctxt, spfix) movlr.As = AMOVD movlr.From.Type = obj.TYPE_REG movlr.From.Reg = REGLINK movlr.To.Type = obj.TYPE_REG movlr.To.Reg = REG_R3 if q != nil { q.Pcond = movlr } bls.Pcond = movlr debug := movlr if false { debug = obj.Appendp(ctxt, debug) debug.As = AMOVD debug.From.Type = obj.TYPE_CONST debug.From.Offset = int64(framesize) debug.To.Type = obj.TYPE_REG debug.To.Reg = REGTMP } // BL runtime.morestack(SB) call := obj.Appendp(ctxt, debug) call.As = ABL call.To.Type = obj.TYPE_BRANCH morestack := "runtime.morestack" switch { case ctxt.Cursym.Cfunc: morestack = "runtime.morestackc" case ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0: morestack = "runtime.morestack_noctxt" } call.To.Sym = obj.Linklookup(ctxt, morestack, 0) // B start jmp := obj.Appendp(ctxt, call) jmp.As = AB jmp.To.Type = obj.TYPE_BRANCH jmp.Pcond = ctxt.Cursym.Text.Link jmp.Spadj = +framesize // placeholder for bls's jump target // p = obj.Appendp(ctxt, p) // p.As = obj.ANOP return bls }
// Rewrite p, if necessary, to access global data via the global offset table. func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) { if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO { // ADUFFxxx $offset // becomes // MOVD runtime.duffxxx@GOT, R12 // ADD $offset, R12 // MOVD R12, CTR // BL (CTR) var sym *obj.LSym if p.As == obj.ADUFFZERO { sym = obj.Linklookup(ctxt, "runtime.duffzero", 0) } else { sym = obj.Linklookup(ctxt, "runtime.duffcopy", 0) } offset := p.To.Offset p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_GOTREF p.From.Sym = sym p.To.Type = obj.TYPE_REG p.To.Reg = REG_R12 p.To.Name = obj.NAME_NONE p.To.Offset = 0 p.To.Sym = nil p1 := obj.Appendp(ctxt, p) p1.As = AADD p1.From.Type = obj.TYPE_CONST p1.From.Offset = offset p1.To.Type = obj.TYPE_REG p1.To.Reg = REG_R12 p2 := obj.Appendp(ctxt, p1) p2.As = AMOVD p2.From.Type = obj.TYPE_REG p2.From.Reg = REG_R12 p2.To.Type = obj.TYPE_REG p2.To.Reg = REG_CTR p3 := obj.Appendp(ctxt, p2) p3.As = obj.ACALL p3.From.Type = obj.TYPE_REG p3.From.Reg = REG_R12 p3.To.Type = obj.TYPE_REG p3.To.Reg = REG_CTR } // We only care about global data: NAME_EXTERN means a global // symbol in the Go sense, and p.Sym.Local is true for a few // internally defined symbols. if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local { // MOVD $sym, Rx becomes MOVD sym@GOT, Rx // MOVD $sym+<off>, Rx becomes MOVD sym@GOT, Rx; ADD <off>, Rx if p.As != AMOVD { ctxt.Diag("do not know how to handle TYPE_ADDR in %v with -dynlink", p) } if p.To.Type != obj.TYPE_REG { ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v with -dynlink", p) } p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_GOTREF if p.From.Offset != 0 { q := obj.Appendp(ctxt, p) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = p.From.Offset q.To = p.To p.From.Offset = 0 } } if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN { ctxt.Diag("don't know how to handle %v with -dynlink", p) } var source *obj.Addr // MOVx sym, Ry becomes MOVD sym@GOT, REGTMP; MOVx (REGTMP), Ry // MOVx Ry, sym becomes MOVD sym@GOT, REGTMP; MOVx Ry, (REGTMP) // An addition may be inserted between the two MOVs if there is an offset. if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local { if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local { ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p) } source = &p.From } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local { source = &p.To } else { return } if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { return } if source.Sym.Type == obj.STLSBSS { return } if source.Type != obj.TYPE_MEM { ctxt.Diag("don't know how to handle %v with -dynlink", p) } p1 := obj.Appendp(ctxt, p) p2 := obj.Appendp(ctxt, p1) p1.As = AMOVD p1.From.Type = obj.TYPE_MEM p1.From.Sym = source.Sym p1.From.Name = obj.NAME_GOTREF p1.To.Type = obj.TYPE_REG p1.To.Reg = REGTMP p2.As = p.As p2.From = p.From p2.To = p.To if p.From.Name == obj.NAME_EXTERN { p2.From.Reg = REGTMP p2.From.Name = obj.NAME_NONE p2.From.Sym = nil } else if p.To.Name == obj.NAME_EXTERN { p2.To.Reg = REGTMP p2.To.Name = obj.NAME_NONE p2.To.Sym = nil } else { return } obj.Nopout(p) }
func preprocess(ctxt *obj.Link, cursym *obj.LSym) { // TODO(minux): add morestack short-cuts with small fixed frame-size. ctxt.Cursym = cursym if cursym.Text == nil || cursym.Text.Link == nil { return } p := cursym.Text textstksiz := p.To.Offset if textstksiz == -8 { // Compatibility hack. p.From3.Offset |= obj.NOFRAME textstksiz = 0 } if textstksiz%8 != 0 { ctxt.Diag("frame size %d not a multiple of 8", textstksiz) } if p.From3.Offset&obj.NOFRAME != 0 { if textstksiz != 0 { ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz) } } cursym.Args = p.To.Val.(int32) cursym.Locals = int32(textstksiz) /* * find leaf subroutines * strip NOPs * expand RET * expand BECOME pseudo */ if ctxt.Debugvlog != 0 { fmt.Fprintf(ctxt.Bso, "%5.2f noops\n", obj.Cputime()) } ctxt.Bso.Flush() var q *obj.Prog var q1 *obj.Prog for p := cursym.Text; p != nil; p = p.Link { switch p.As { /* too hard, just leave alone */ case obj.ATEXT: q = p p.Mark |= LABEL | LEAF | SYNC if p.Link != nil { p.Link.Mark |= LABEL } case ANOR: q = p if p.To.Type == obj.TYPE_REG { if p.To.Reg == REGZERO { p.Mark |= LABEL | SYNC } } case ALWAR, ASTWCCC, AECIWX, AECOWX, AEIEIO, AICBI, AISYNC, ATLBIE, ATLBIEL, ASLBIA, ASLBIE, ASLBMFEE, ASLBMFEV, ASLBMTE, ADCBF, ADCBI, ADCBST, ADCBT, ADCBTST, ADCBZ, ASYNC, ATLBSYNC, APTESYNC, ATW, AWORD, ARFI, ARFCI, ARFID, AHRFID: q = p p.Mark |= LABEL | SYNC continue case AMOVW, AMOVWZ, AMOVD: q = p if p.From.Reg >= REG_SPECIAL || p.To.Reg >= REG_SPECIAL { p.Mark |= LABEL | SYNC } continue case AFABS, AFABSCC, AFADD, AFADDCC, AFCTIW, AFCTIWCC, AFCTIWZ, AFCTIWZCC, AFDIV, AFDIVCC, AFMADD, AFMADDCC, AFMOVD, AFMOVDU, /* case AFMOVDS: */ AFMOVS, AFMOVSU, /* case AFMOVSD: */ AFMSUB, AFMSUBCC, AFMUL, AFMULCC, AFNABS, AFNABSCC, AFNEG, AFNEGCC, AFNMADD, AFNMADDCC, AFNMSUB, AFNMSUBCC, AFRSP, AFRSPCC, AFSUB, AFSUBCC: q = p p.Mark |= FLOAT continue case ABL, ABCL, obj.ADUFFZERO, obj.ADUFFCOPY: cursym.Text.Mark &^= LEAF fallthrough case ABC, ABEQ, ABGE, ABGT, ABLE, ABLT, ABNE, ABR, ABVC, ABVS: p.Mark |= BRANCH q = p q1 = p.Pcond if q1 != nil { for q1.As == obj.ANOP { q1 = q1.Link p.Pcond = q1 } if q1.Mark&LEAF == 0 { q1.Mark |= LABEL } } else { p.Mark |= LABEL } q1 = p.Link if q1 != nil { q1.Mark |= LABEL } continue case AFCMPO, AFCMPU: q = p p.Mark |= FCMP | FLOAT continue case obj.ARET: q = p if p.Link != nil { p.Link.Mark |= LABEL } continue case obj.ANOP: q1 = p.Link q.Link = q1 /* q is non-nop */ q1.Mark |= p.Mark continue default: q = p continue } } autosize := int32(0) var aoffset int var mov int var o int var p1 *obj.Prog var p2 *obj.Prog for p := cursym.Text; p != nil; p = p.Link { o = int(p.As) switch o { case obj.ATEXT: mov = AMOVD aoffset = 0 autosize = int32(textstksiz) if p.Mark&LEAF != 0 && autosize == 0 && p.From3.Offset&obj.NOFRAME == 0 { // A leaf function with no locals has no frame. p.From3.Offset |= obj.NOFRAME } if p.From3.Offset&obj.NOFRAME == 0 { // If there is a stack frame at all, it includes // space to save the LR. autosize += int32(ctxt.FixedFrameSize()) } p.To.Offset = int64(autosize) q = p if ctxt.Flag_shared != 0 && cursym.Name != "runtime.duffzero" && cursym.Name != "runtime.duffcopy" { // When compiling Go into PIC, all functions must start // with instructions to load the TOC pointer into r2: // // addis r2, r12, .TOC.-func@ha // addi r2, r2, .TOC.-func@l+4 // // We could probably skip this prologue in some situations // but it's a bit subtle. However, it is both safe and // necessary to leave the prologue off duffzero and // duffcopy as we rely on being able to jump to a specific // instruction offset for them. // // These are AWORDS because there is no (afaict) way to // generate the addis instruction except as part of the // load of a large constant, and in that case there is no // way to use r12 as the source. q = obj.Appendp(ctxt, q) q.As = AWORD q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = 0x3c4c0000 q = obj.Appendp(ctxt, q) q.As = AWORD q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = 0x38420000 rel := obj.Addrel(ctxt.Cursym) rel.Off = 0 rel.Siz = 8 rel.Sym = obj.Linklookup(ctxt, ".TOC.", 0) rel.Type = obj.R_ADDRPOWER_PCREL } if cursym.Text.From3.Offset&obj.NOSPLIT == 0 { q = stacksplit(ctxt, q, autosize) // emit split check } if autosize != 0 { /* use MOVDU to adjust R1 when saving R31, if autosize is small */ if cursym.Text.Mark&LEAF == 0 && autosize >= -BIG && autosize <= BIG { mov = AMOVDU aoffset = int(-autosize) } else { q = obj.Appendp(ctxt, q) q.As = AADD q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = int64(-autosize) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = +autosize } } else if cursym.Text.Mark&LEAF == 0 { // A very few functions that do not return to their caller // (e.g. gogo) are not identified as leaves but still have // no frame. cursym.Text.Mark |= LEAF } if cursym.Text.Mark&LEAF != 0 { cursym.Leaf = 1 break } q = obj.Appendp(ctxt, q) q.As = AMOVD q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REG_LR q.To.Type = obj.TYPE_REG q.To.Reg = REGTMP q = obj.Appendp(ctxt, q) q.As = int16(mov) q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REGTMP q.To.Type = obj.TYPE_MEM q.To.Offset = int64(aoffset) q.To.Reg = REGSP if q.As == AMOVDU { q.Spadj = int32(-aoffset) } if ctxt.Flag_shared != 0 { q = obj.Appendp(ctxt, q) q.As = AMOVD q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REG_R2 q.To.Type = obj.TYPE_MEM q.To.Reg = REGSP q.To.Offset = 24 } if cursym.Text.From3.Offset&obj.WRAPPER != 0 { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOVD g_panic(g), R3 // CMP R0, R3 // BEQ end // MOVD panic_argp(R3), R4 // ADD $(autosize+8), R1, R5 // CMP R4, R5 // BNE end // ADD $8, R1, R6 // MOVD R6, panic_argp(R3) // end: // NOP // // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not a ppc64 NOP: it encodes to 0 instruction bytes. q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REGG q.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic q.To.Type = obj.TYPE_REG q.To.Reg = REG_R3 q = obj.Appendp(ctxt, q) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R0 q.To.Type = obj.TYPE_REG q.To.Reg = REG_R3 q = obj.Appendp(ctxt, q) q.As = ABEQ q.To.Type = obj.TYPE_BRANCH p1 = q q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REG_R3 q.From.Offset = 0 // Panic.argp q.To.Type = obj.TYPE_REG q.To.Reg = REG_R4 q = obj.Appendp(ctxt, q) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) + ctxt.FixedFrameSize() q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R5 q = obj.Appendp(ctxt, q) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R4 q.To.Type = obj.TYPE_REG q.To.Reg = REG_R5 q = obj.Appendp(ctxt, q) q.As = ABNE q.To.Type = obj.TYPE_BRANCH p2 = q q = obj.Appendp(ctxt, q) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = ctxt.FixedFrameSize() q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R6 q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_REG q.From.Reg = REG_R6 q.To.Type = obj.TYPE_MEM q.To.Reg = REG_R3 q.To.Offset = 0 // Panic.argp q = obj.Appendp(ctxt, q) q.As = obj.ANOP p1.Pcond = q p2.Pcond = q } case obj.ARET: if p.From.Type == obj.TYPE_CONST { ctxt.Diag("using BECOME (%v) is not supported!", p) break } retTarget := p.To.Sym if cursym.Text.Mark&LEAF != 0 { if autosize == 0 { p.As = ABR p.From = obj.Addr{} if retTarget == nil { p.To.Type = obj.TYPE_REG p.To.Reg = REG_LR } else { p.To.Type = obj.TYPE_BRANCH p.To.Sym = retTarget } p.Mark |= BRANCH break } p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(autosize) p.To.Type = obj.TYPE_REG p.To.Reg = REGSP p.Spadj = -autosize q = ctxt.NewProg() q.As = ABR q.Lineno = p.Lineno q.To.Type = obj.TYPE_REG q.To.Reg = REG_LR q.Mark |= BRANCH q.Spadj = +autosize q.Link = p.Link p.Link = q break } p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Offset = 0 p.From.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP q = ctxt.NewProg() q.As = AMOVD q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REGTMP q.To.Type = obj.TYPE_REG q.To.Reg = REG_LR q.Link = p.Link p.Link = q p = q if false { // Debug bad returns q = ctxt.NewProg() q.As = AMOVD q.Lineno = p.Lineno q.From.Type = obj.TYPE_MEM q.From.Offset = 0 q.From.Reg = REGTMP q.To.Type = obj.TYPE_REG q.To.Reg = REGTMP q.Link = p.Link p.Link = q p = q } if autosize != 0 { q = ctxt.NewProg() q.As = AADD q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = -autosize q.Link = p.Link p.Link = q } q1 = ctxt.NewProg() q1.As = ABR q1.Lineno = p.Lineno if retTarget == nil { q1.To.Type = obj.TYPE_REG q1.To.Reg = REG_LR } else { q1.To.Type = obj.TYPE_BRANCH q1.To.Sym = retTarget } q1.Mark |= BRANCH q1.Spadj = +autosize q1.Link = q.Link q.Link = q1 case AADD: if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST { p.Spadj = int32(-p.From.Offset) } } } }
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { s.SetLineno(v.Line) switch v.Op { case ssa.OpInitMem: // memory arg needs no code case ssa.OpArg: // input args need no code case ssa.OpSP, ssa.OpSB, ssa.OpGetG: // nothing to do case ssa.OpCopy, ssa.OpARMMOVWconvert, ssa.OpARMMOVWreg: if v.Type.IsMemory() { return } x := v.Args[0].Reg() y := v.Reg() if x == y { return } as := arm.AMOVW if v.Type.IsFloat() { switch v.Type.Size() { case 4: as = arm.AMOVF case 8: as = arm.AMOVD default: panic("bad float size") } } p := gc.Prog(as) p.From.Type = obj.TYPE_REG p.From.Reg = x p.To.Type = obj.TYPE_REG p.To.Reg = y case ssa.OpARMMOVWnop: if v.Reg() != v.Args[0].Reg() { v.Fatalf("input[0] and output not in same register %s", v.LongString()) } // nothing to do case ssa.OpLoadReg: if v.Type.IsFlags() { v.Fatalf("load flags not implemented: %v", v.LongString()) return } p := gc.Prog(loadByType(v.Type)) gc.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpPhi: gc.CheckLoweredPhi(v) case ssa.OpStoreReg: if v.Type.IsFlags() { v.Fatalf("store flags not implemented: %v", v.LongString()) return } p := gc.Prog(storeByType(v.Type)) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() gc.AddrAuto(&p.To, v) case ssa.OpARMUDIVrtcall: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = obj.Linklookup(gc.Ctxt, "udiv", 0) case ssa.OpARMADD, ssa.OpARMADC, ssa.OpARMSUB, ssa.OpARMSBC, ssa.OpARMRSB, ssa.OpARMAND, ssa.OpARMOR, ssa.OpARMXOR, ssa.OpARMBIC, ssa.OpARMMUL, ssa.OpARMADDF, ssa.OpARMADDD, ssa.OpARMSUBF, ssa.OpARMSUBD, ssa.OpARMMULF, ssa.OpARMMULD, ssa.OpARMDIVF, ssa.OpARMDIVD: r := v.Reg() r1 := v.Args[0].Reg() r2 := v.Args[1].Reg() p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = r2 p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r case ssa.OpARMADDS, ssa.OpARMSUBS: r := v.Reg0() r1 := v.Args[0].Reg() r2 := v.Args[1].Reg() p := gc.Prog(v.Op.Asm()) p.Scond = arm.C_SBIT p.From.Type = obj.TYPE_REG p.From.Reg = r2 p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r case ssa.OpARMSLL, ssa.OpARMSRL, ssa.OpARMSRA: r := v.Reg() r1 := v.Args[0].Reg() r2 := v.Args[1].Reg() p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = r2 p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r case ssa.OpARMSRAcond: // ARM shift instructions uses only the low-order byte of the shift amount // generate conditional instructions to deal with large shifts // flag is already set // SRA.HS $31, Rarg0, Rdst // shift 31 bits to get the sign bit // SRA.LO Rarg1, Rarg0, Rdst r := v.Reg() r1 := v.Args[0].Reg() r2 := v.Args[1].Reg() p := gc.Prog(arm.ASRA) p.Scond = arm.C_SCOND_HS p.From.Type = obj.TYPE_CONST p.From.Offset = 31 p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r p = gc.Prog(arm.ASRA) p.Scond = arm.C_SCOND_LO p.From.Type = obj.TYPE_REG p.From.Reg = r2 p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r case ssa.OpARMADDconst, ssa.OpARMADCconst, ssa.OpARMSUBconst, ssa.OpARMSBCconst, ssa.OpARMRSBconst, ssa.OpARMRSCconst, ssa.OpARMANDconst, ssa.OpARMORconst, ssa.OpARMXORconst, ssa.OpARMBICconst, ssa.OpARMSLLconst, ssa.OpARMSRLconst, ssa.OpARMSRAconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpARMADDSconst, ssa.OpARMSUBSconst, ssa.OpARMRSBSconst: p := gc.Prog(v.Op.Asm()) p.Scond = arm.C_SBIT p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg0() case ssa.OpARMSRRconst: genshift(arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt) case ssa.OpARMADDshiftLL, ssa.OpARMADCshiftLL, ssa.OpARMSUBshiftLL, ssa.OpARMSBCshiftLL, ssa.OpARMRSBshiftLL, ssa.OpARMRSCshiftLL, ssa.OpARMANDshiftLL, ssa.OpARMORshiftLL, ssa.OpARMXORshiftLL, ssa.OpARMBICshiftLL: genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) case ssa.OpARMADDSshiftLL, ssa.OpARMSUBSshiftLL, ssa.OpARMRSBSshiftLL: p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt) p.Scond = arm.C_SBIT case ssa.OpARMADDshiftRL, ssa.OpARMADCshiftRL, ssa.OpARMSUBshiftRL, ssa.OpARMSBCshiftRL, ssa.OpARMRSBshiftRL, ssa.OpARMRSCshiftRL, ssa.OpARMANDshiftRL, ssa.OpARMORshiftRL, ssa.OpARMXORshiftRL, ssa.OpARMBICshiftRL: genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) case ssa.OpARMADDSshiftRL, ssa.OpARMSUBSshiftRL, ssa.OpARMRSBSshiftRL: p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt) p.Scond = arm.C_SBIT case ssa.OpARMADDshiftRA, ssa.OpARMADCshiftRA, ssa.OpARMSUBshiftRA, ssa.OpARMSBCshiftRA, ssa.OpARMRSBshiftRA, ssa.OpARMRSCshiftRA, ssa.OpARMANDshiftRA, ssa.OpARMORshiftRA, ssa.OpARMXORshiftRA, ssa.OpARMBICshiftRA: genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) case ssa.OpARMADDSshiftRA, ssa.OpARMSUBSshiftRA, ssa.OpARMRSBSshiftRA: p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt) p.Scond = arm.C_SBIT case ssa.OpARMXORshiftRR: genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt) case ssa.OpARMMVNshiftLL: genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) case ssa.OpARMMVNshiftRL: genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) case ssa.OpARMMVNshiftRA: genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) case ssa.OpARMMVNshiftLLreg: genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL) case ssa.OpARMMVNshiftRLreg: genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR) case ssa.OpARMMVNshiftRAreg: genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR) case ssa.OpARMADDshiftLLreg, ssa.OpARMADCshiftLLreg, ssa.OpARMSUBshiftLLreg, ssa.OpARMSBCshiftLLreg, ssa.OpARMRSBshiftLLreg, ssa.OpARMRSCshiftLLreg, ssa.OpARMANDshiftLLreg, ssa.OpARMORshiftLLreg, ssa.OpARMXORshiftLLreg, ssa.OpARMBICshiftLLreg: genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL) case ssa.OpARMADDSshiftLLreg, ssa.OpARMSUBSshiftLLreg, ssa.OpARMRSBSshiftLLreg: p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL) p.Scond = arm.C_SBIT case ssa.OpARMADDshiftRLreg, ssa.OpARMADCshiftRLreg, ssa.OpARMSUBshiftRLreg, ssa.OpARMSBCshiftRLreg, ssa.OpARMRSBshiftRLreg, ssa.OpARMRSCshiftRLreg, ssa.OpARMANDshiftRLreg, ssa.OpARMORshiftRLreg, ssa.OpARMXORshiftRLreg, ssa.OpARMBICshiftRLreg: genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR) case ssa.OpARMADDSshiftRLreg, ssa.OpARMSUBSshiftRLreg, ssa.OpARMRSBSshiftRLreg: p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR) p.Scond = arm.C_SBIT case ssa.OpARMADDshiftRAreg, ssa.OpARMADCshiftRAreg, ssa.OpARMSUBshiftRAreg, ssa.OpARMSBCshiftRAreg, ssa.OpARMRSBshiftRAreg, ssa.OpARMRSCshiftRAreg, ssa.OpARMANDshiftRAreg, ssa.OpARMORshiftRAreg, ssa.OpARMXORshiftRAreg, ssa.OpARMBICshiftRAreg: genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR) case ssa.OpARMADDSshiftRAreg, ssa.OpARMSUBSshiftRAreg, ssa.OpARMRSBSshiftRAreg: p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR) p.Scond = arm.C_SBIT case ssa.OpARMHMUL, ssa.OpARMHMULU: // 32-bit high multiplication p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() p.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_REGREG p.To.Reg = v.Reg() p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register case ssa.OpARMMULLU: // 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1 p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() p.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_REGREG p.To.Reg = v.Reg0() // high 32-bit p.To.Offset = int64(v.Reg1()) // low 32-bit case ssa.OpARMMULA: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() p.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_REGREG2 p.To.Reg = v.Reg() // result p.To.Offset = int64(v.Args[2].Reg()) // addend case ssa.OpARMMOVWconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpARMMOVFconst, ssa.OpARMMOVDconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_FCONST p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpARMCMP, ssa.OpARMCMN, ssa.OpARMTST, ssa.OpARMTEQ, ssa.OpARMCMPF, ssa.OpARMCMPD: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG // Special layout in ARM assembly // Comparing to x86, the operands of ARM's CMP are reversed. p.From.Reg = v.Args[1].Reg() p.Reg = v.Args[0].Reg() case ssa.OpARMCMPconst, ssa.OpARMCMNconst, ssa.OpARMTSTconst, ssa.OpARMTEQconst: // Special layout in ARM assembly p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.Reg = v.Args[0].Reg() case ssa.OpARMCMPF0, ssa.OpARMCMPD0: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() case ssa.OpARMCMPshiftLL: genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt) case ssa.OpARMCMPshiftRL: genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt) case ssa.OpARMCMPshiftRA: genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt) case ssa.OpARMCMPshiftLLreg: genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL) case ssa.OpARMCMPshiftRLreg: genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR) case ssa.OpARMCMPshiftRAreg: genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR) case ssa.OpARMMOVWaddr: p := gc.Prog(arm.AMOVW) p.From.Type = obj.TYPE_ADDR p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() var wantreg string // MOVW $sym+off(base), R // the assembler expands it as the following: // - base is SP: add constant offset to SP (R13) // when constant is large, tmp register (R11) may be used // - base is SB: load external address from constant pool (use relocation) switch v.Aux.(type) { default: v.Fatalf("aux is of unknown type %T", v.Aux) case *ssa.ExternSymbol: wantreg = "SB" gc.AddAux(&p.From, v) case *ssa.ArgSymbol, *ssa.AutoSymbol: wantreg = "SP" gc.AddAux(&p.From, v) case nil: // No sym, just MOVW $off(SP), R wantreg = "SP" p.From.Reg = arm.REGSP p.From.Offset = v.AuxInt } if reg := v.Args[0].RegName(); reg != wantreg { v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg) } case ssa.OpARMMOVBload, ssa.OpARMMOVBUload, ssa.OpARMMOVHload, ssa.OpARMMOVHUload, ssa.OpARMMOVWload, ssa.OpARMMOVFload, ssa.OpARMMOVDload: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpARMMOVBstore, ssa.OpARMMOVHstore, ssa.OpARMMOVWstore, ssa.OpARMMOVFstore, ssa.OpARMMOVDstore: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() gc.AddAux(&p.To, v) case ssa.OpARMMOVWloadidx: // this is just shift 0 bits fallthrough case ssa.OpARMMOVWloadshiftLL: p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) p.From.Reg = v.Args[0].Reg() case ssa.OpARMMOVWloadshiftRL: p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) p.From.Reg = v.Args[0].Reg() case ssa.OpARMMOVWloadshiftRA: p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) p.From.Reg = v.Args[0].Reg() case ssa.OpARMMOVWstoreidx: // this is just shift 0 bits fallthrough case ssa.OpARMMOVWstoreshiftLL: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[2].Reg() p.To.Type = obj.TYPE_SHIFT p.To.Reg = v.Args[0].Reg() p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt)) case ssa.OpARMMOVWstoreshiftRL: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[2].Reg() p.To.Type = obj.TYPE_SHIFT p.To.Reg = v.Args[0].Reg() p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt)) case ssa.OpARMMOVWstoreshiftRA: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[2].Reg() p.To.Type = obj.TYPE_SHIFT p.To.Reg = v.Args[0].Reg() p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_AR, v.AuxInt)) case ssa.OpARMMOVBreg, ssa.OpARMMOVBUreg, ssa.OpARMMOVHreg, ssa.OpARMMOVHUreg: a := v.Args[0] for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop { a = a.Args[0] } if a.Op == ssa.OpLoadReg { t := a.Type switch { case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(), v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(), v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(), v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned(): // arg is a proper-typed load, already zero/sign-extended, don't extend again if v.Reg() == v.Args[0].Reg() { return } p := gc.Prog(arm.AMOVW) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() return default: } } fallthrough case ssa.OpARMMVN, ssa.OpARMCLZ, ssa.OpARMSQRTD, ssa.OpARMNEGF, ssa.OpARMNEGD, ssa.OpARMMOVWF, ssa.OpARMMOVWD, ssa.OpARMMOVFW, ssa.OpARMMOVDW, ssa.OpARMMOVFD, ssa.OpARMMOVDF: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpARMMOVWUF, ssa.OpARMMOVWUD, ssa.OpARMMOVFWU, ssa.OpARMMOVDWU: p := gc.Prog(v.Op.Asm()) p.Scond = arm.C_UBIT p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpARMCMOVWHSconst: p := gc.Prog(arm.AMOVW) p.Scond = arm.C_SCOND_HS p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpARMCMOVWLSconst: p := gc.Prog(arm.AMOVW) p.Scond = arm.C_SCOND_LS p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpARMCALLstatic: if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym { // Deferred calls will appear to be returning to // the CALL deferreturn(SB) that we are about to emit. // However, the stack trace code will show the line // of the instruction byte before the return PC. // To avoid that being an unrelated instruction, // insert an actual hardware NOP that will have the right line number. // This is different from obj.ANOP, which is a virtual no-op // that doesn't make it into the instruction stream. ginsnop() } p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym)) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpARMCALLclosure: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Offset = 0 p.To.Reg = v.Args[0].Reg() if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpARMCALLdefer: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(gc.Deferproc.Sym) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpARMCALLgo: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(gc.Newproc.Sym) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpARMCALLinter: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Offset = 0 p.To.Reg = v.Args[0].Reg() if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpARMDUFFZERO: p := gc.Prog(obj.ADUFFZERO) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg)) p.To.Offset = v.AuxInt case ssa.OpARMDUFFCOPY: p := gc.Prog(obj.ADUFFCOPY) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg)) p.To.Offset = v.AuxInt case ssa.OpARMLoweredNilCheck: // Issue a load which will fault if arg is nil. p := gc.Prog(arm.AMOVB) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = arm.REGTMP if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers gc.Warnl(v.Line, "generated nil check") } case ssa.OpARMLoweredZero: // MOVW.P Rarg2, 4(R1) // CMP Rarg1, R1 // BLE -2(PC) // arg1 is the address of the last element to zero // arg2 is known to be zero // auxint is alignment var sz int64 var mov obj.As switch { case v.AuxInt%4 == 0: sz = 4 mov = arm.AMOVW case v.AuxInt%2 == 0: sz = 2 mov = arm.AMOVH default: sz = 1 mov = arm.AMOVB } p := gc.Prog(mov) p.Scond = arm.C_PBIT p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[2].Reg() p.To.Type = obj.TYPE_MEM p.To.Reg = arm.REG_R1 p.To.Offset = sz p2 := gc.Prog(arm.ACMP) p2.From.Type = obj.TYPE_REG p2.From.Reg = v.Args[1].Reg() p2.Reg = arm.REG_R1 p3 := gc.Prog(arm.ABLE) p3.To.Type = obj.TYPE_BRANCH gc.Patch(p3, p) case ssa.OpARMLoweredMove: // MOVW.P 4(R1), Rtmp // MOVW.P Rtmp, 4(R2) // CMP Rarg2, R1 // BLE -3(PC) // arg2 is the address of the last element of src // auxint is alignment var sz int64 var mov obj.As switch { case v.AuxInt%4 == 0: sz = 4 mov = arm.AMOVW case v.AuxInt%2 == 0: sz = 2 mov = arm.AMOVH default: sz = 1 mov = arm.AMOVB } p := gc.Prog(mov) p.Scond = arm.C_PBIT p.From.Type = obj.TYPE_MEM p.From.Reg = arm.REG_R1 p.From.Offset = sz p.To.Type = obj.TYPE_REG p.To.Reg = arm.REGTMP p2 := gc.Prog(mov) p2.Scond = arm.C_PBIT p2.From.Type = obj.TYPE_REG p2.From.Reg = arm.REGTMP p2.To.Type = obj.TYPE_MEM p2.To.Reg = arm.REG_R2 p2.To.Offset = sz p3 := gc.Prog(arm.ACMP) p3.From.Type = obj.TYPE_REG p3.From.Reg = v.Args[2].Reg() p3.Reg = arm.REG_R1 p4 := gc.Prog(arm.ABLE) p4.To.Type = obj.TYPE_BRANCH gc.Patch(p4, p) case ssa.OpVarDef: gc.Gvardef(v.Aux.(*gc.Node)) case ssa.OpVarKill: gc.Gvarkill(v.Aux.(*gc.Node)) case ssa.OpVarLive: gc.Gvarlive(v.Aux.(*gc.Node)) case ssa.OpKeepAlive: gc.KeepAlive(v) case ssa.OpARMEqual, ssa.OpARMNotEqual, ssa.OpARMLessThan, ssa.OpARMLessEqual, ssa.OpARMGreaterThan, ssa.OpARMGreaterEqual, ssa.OpARMLessThanU, ssa.OpARMLessEqualU, ssa.OpARMGreaterThanU, ssa.OpARMGreaterEqualU: // generate boolean values // use conditional move p := gc.Prog(arm.AMOVW) p.From.Type = obj.TYPE_CONST p.From.Offset = 0 p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() p = gc.Prog(arm.AMOVW) p.Scond = condBits[v.Op] p.From.Type = obj.TYPE_CONST p.From.Offset = 1 p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpSelect0, ssa.OpSelect1: // nothing to do case ssa.OpARMLoweredGetClosurePtr: // Closure pointer is R7 (arm.REGCTXT). gc.CheckLoweredGetClosurePtr(v) case ssa.OpARMFlagEQ, ssa.OpARMFlagLT_ULT, ssa.OpARMFlagLT_UGT, ssa.OpARMFlagGT_ULT, ssa.OpARMFlagGT_UGT: v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) case ssa.OpARMInvertFlags: v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) default: v.Fatalf("genValue not implemented: %s", v.LongString()) } }
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog { // Leaf function with no frame is effectively NOSPLIT. if framesize == 0 { return p } var mov, add, sub obj.As if ctxt.Mode&Mips64 != 0 { add = AADDV mov = AMOVV sub = ASUBVU } else { add = AADDU mov = AMOVW sub = ASUBU } // MOV g_stackguard(g), R1 p = obj.Appendp(ctxt, p) p.As = mov p.From.Type = obj.TYPE_MEM p.From.Reg = REGG p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 if ctxt.Cursym.CFunc() { p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 } p.To.Type = obj.TYPE_REG p.To.Reg = REG_R1 var q *obj.Prog if framesize <= obj.StackSmall { // small stack: SP < stackguard // AGTU SP, stackguard, R1 p = obj.Appendp(ctxt, p) p.As = ASGTU p.From.Type = obj.TYPE_REG p.From.Reg = REGSP p.Reg = REG_R1 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R1 } else if framesize <= obj.StackBig { // large stack: SP-framesize < stackguard-StackSmall // ADD $-framesize, SP, R2 // SGTU R2, stackguard, R1 p = obj.Appendp(ctxt, p) p.As = add p.From.Type = obj.TYPE_CONST p.From.Offset = int64(-framesize) p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p = obj.Appendp(ctxt, p) p.As = ASGTU p.From.Type = obj.TYPE_REG p.From.Reg = REG_R2 p.Reg = REG_R1 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R1 } else { // Such a large stack we need to protect against wraparound. // If SP is close to zero: // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall) // The +StackGuard on both sides is required to keep the left side positive: // SP is allowed to be slightly below stackguard. See stack.h. // // Preemption sets stackguard to StackPreempt, a very large value. // That breaks the math above, so we have to check for that explicitly. // // stackguard is R1 // MOV $StackPreempt, R2 // BEQ R1, R2, label-of-call-to-morestack // ADD $StackGuard, SP, R2 // SUB R1, R2 // MOV $(framesize+(StackGuard-StackSmall)), R1 // SGTU R2, R1, R1 p = obj.Appendp(ctxt, p) p.As = mov p.From.Type = obj.TYPE_CONST p.From.Offset = obj.StackPreempt p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p = obj.Appendp(ctxt, p) q = p p.As = ABEQ p.From.Type = obj.TYPE_REG p.From.Reg = REG_R1 p.Reg = REG_R2 p.To.Type = obj.TYPE_BRANCH p.Mark |= BRANCH p = obj.Appendp(ctxt, p) p.As = add p.From.Type = obj.TYPE_CONST p.From.Offset = obj.StackGuard p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p = obj.Appendp(ctxt, p) p.As = sub p.From.Type = obj.TYPE_REG p.From.Reg = REG_R1 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p = obj.Appendp(ctxt, p) p.As = mov p.From.Type = obj.TYPE_CONST p.From.Offset = int64(framesize) + obj.StackGuard - obj.StackSmall p.To.Type = obj.TYPE_REG p.To.Reg = REG_R1 p = obj.Appendp(ctxt, p) p.As = ASGTU p.From.Type = obj.TYPE_REG p.From.Reg = REG_R2 p.Reg = REG_R1 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R1 } // q1: BNE R1, done p = obj.Appendp(ctxt, p) q1 := p p.As = ABNE p.From.Type = obj.TYPE_REG p.From.Reg = REG_R1 p.To.Type = obj.TYPE_BRANCH p.Mark |= BRANCH // MOV LINK, R3 p = obj.Appendp(ctxt, p) p.As = mov p.From.Type = obj.TYPE_REG p.From.Reg = REGLINK p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 if q != nil { q.Pcond = p p.Mark |= LABEL } // JAL runtime.morestack(SB) p = obj.Appendp(ctxt, p) p.As = AJAL p.To.Type = obj.TYPE_BRANCH if ctxt.Cursym.CFunc() { p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0) } else if ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0 { p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0) } else { p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack", 0) } p.Mark |= BRANCH // JMP start p = obj.Appendp(ctxt, p) p.As = AJMP p.To.Type = obj.TYPE_BRANCH p.Pcond = ctxt.Cursym.Text.Link p.Mark |= BRANCH // placeholder for q1's jump target p = obj.Appendp(ctxt, p) p.As = obj.ANOP // zero-width place holder q1.Pcond = p return p }
func progedit(ctxt *obj.Link, p *obj.Prog) { p.From.Class = 0 p.To.Class = 0 // $0 results in C_ZCON, which matches both C_REG and various // C_xCON, however the C_REG cases in asmout don't expect a // constant, so they will use the register fields and assemble // a R0. To prevent that, rewrite $0 as ZR. if p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 { p.From.Type = obj.TYPE_REG p.From.Reg = REGZERO } if p.To.Type == obj.TYPE_CONST && p.To.Offset == 0 { p.To.Type = obj.TYPE_REG p.To.Reg = REGZERO } // Rewrite BR/BL to symbol as TYPE_BRANCH. switch p.As { case AB, ABL, obj.ARET, obj.ADUFFZERO, obj.ADUFFCOPY: if p.To.Sym != nil { p.To.Type = obj.TYPE_BRANCH } break } // Rewrite float constants to values stored in memory. switch p.As { case AFMOVS: if p.From.Type == obj.TYPE_FCONST { f32 := float32(p.From.Val.(float64)) i32 := math.Float32bits(f32) literal := fmt.Sprintf("$f32.%08x", i32) s := obj.Linklookup(ctxt, literal, 0) s.Size = 4 p.From.Type = obj.TYPE_MEM p.From.Sym = s p.From.Sym.Local = true p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } case AFMOVD: if p.From.Type == obj.TYPE_FCONST { i64 := math.Float64bits(p.From.Val.(float64)) literal := fmt.Sprintf("$f64.%016x", i64) s := obj.Linklookup(ctxt, literal, 0) s.Size = 8 p.From.Type = obj.TYPE_MEM p.From.Sym = s p.From.Sym.Local = true p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } break } // Rewrite negative immediates as positive immediates with // complementary instruction. switch p.As { case AADD, ASUB, ACMP, ACMN: if p.From.Type == obj.TYPE_CONST && p.From.Offset < 0 && p.From.Offset != -1<<63 { p.From.Offset = -p.From.Offset p.As = complements[p.As] } case AADDW, ASUBW, ACMPW, ACMNW: if p.From.Type == obj.TYPE_CONST && p.From.Offset < 0 && int32(p.From.Offset) != -1<<31 { p.From.Offset = -p.From.Offset p.As = complements[p.As] } } // For 32-bit logical instruction with constant, // rewrite the high 32-bit to be a repetition of // the low 32-bit, so that the BITCON test can be // shared for both 32-bit and 64-bit. 32-bit ops // will zero the high 32-bit of the destination // register anyway. switch p.As { case AANDW, AORRW, AEORW, AANDSW: if p.From.Type == obj.TYPE_CONST { v := p.From.Offset & 0xffffffff p.From.Offset = v | v<<32 } } if ctxt.Flag_dynlink { rewriteToUseGot(ctxt, p) } }
func progedit(ctxt *obj.Link, p *obj.Prog) { p.From.Class = 0 p.To.Class = 0 // Rewrite B/BL to symbol as TYPE_BRANCH. switch p.As { case AB, ABL, obj.ADUFFZERO, obj.ADUFFCOPY: if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil { p.To.Type = obj.TYPE_BRANCH } } // Replace TLS register fetches on older ARM procesors. switch p.As { // Treat MRC 15, 0, <reg>, C13, C0, 3 specially. case AMRC: if p.To.Offset&0xffff0fff == 0xee1d0f70 { // Because the instruction might be rewriten to a BL which returns in R0 // the register must be zero. if p.To.Offset&0xf000 != 0 { ctxt.Diag("%v: TLS MRC instruction must write to R0 as it might get translated into a BL instruction", p.Line()) } if ctxt.Goarm < 7 { // Replace it with BL runtime.read_tls_fallback(SB) for ARM CPUs that lack the tls extension. if progedit_tlsfallback == nil { progedit_tlsfallback = obj.Linklookup(ctxt, "runtime.read_tls_fallback", 0) } // MOVW LR, R11 p.As = AMOVW p.From.Type = obj.TYPE_REG p.From.Reg = REGLINK p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP // BL runtime.read_tls_fallback(SB) p = obj.Appendp(ctxt, p) p.As = ABL p.To.Type = obj.TYPE_BRANCH p.To.Sym = progedit_tlsfallback p.To.Offset = 0 // MOVW R11, LR p = obj.Appendp(ctxt, p) p.As = AMOVW p.From.Type = obj.TYPE_REG p.From.Reg = REGTMP p.To.Type = obj.TYPE_REG p.To.Reg = REGLINK break } } // Otherwise, MRC/MCR instructions need no further treatment. p.As = AWORD } // Rewrite float constants to values stored in memory. switch p.As { case AMOVF: if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.Val.(float64)) < 0 && (chipzero5(ctxt, p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) { f32 := float32(p.From.Val.(float64)) i32 := math.Float32bits(f32) literal := fmt.Sprintf("$f32.%08x", i32) s := obj.Linklookup(ctxt, literal, 0) p.From.Type = obj.TYPE_MEM p.From.Sym = s p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } case AMOVD: if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.Val.(float64)) < 0 && (chipzero5(ctxt, p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) { i64 := math.Float64bits(p.From.Val.(float64)) literal := fmt.Sprintf("$f64.%016x", i64) s := obj.Linklookup(ctxt, literal, 0) p.From.Type = obj.TYPE_MEM p.From.Sym = s p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } } }
func progedit(ctxt *obj.Link, p *obj.Prog) { p.From.Class = 0 p.To.Class = 0 // Rewrite BR/BL to symbol as TYPE_BRANCH. switch p.As { case ABR, ABL, obj.ARET, obj.ADUFFZERO, obj.ADUFFCOPY: if p.To.Sym != nil { p.To.Type = obj.TYPE_BRANCH } } // Rewrite float constants to values stored in memory. switch p.As { case AFMOVS: if p.From.Type == obj.TYPE_FCONST { f32 := float32(p.From.Val.(float64)) i32 := math.Float32bits(f32) literal := fmt.Sprintf("$f32.%08x", i32) s := obj.Linklookup(ctxt, literal, 0) s.Size = 4 p.From.Type = obj.TYPE_MEM p.From.Sym = s p.From.Sym.Local = true p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } case AFMOVD: if p.From.Type == obj.TYPE_FCONST { i64 := math.Float64bits(p.From.Val.(float64)) literal := fmt.Sprintf("$f64.%016x", i64) s := obj.Linklookup(ctxt, literal, 0) s.Size = 8 p.From.Type = obj.TYPE_MEM p.From.Sym = s p.From.Sym.Local = true p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } // Put >32-bit constants in memory and load them case AMOVD: if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE && p.From.Reg == 0 && int64(int32(p.From.Offset)) != p.From.Offset { literal := fmt.Sprintf("$i64.%016x", uint64(p.From.Offset)) s := obj.Linklookup(ctxt, literal, 0) s.Size = 8 p.From.Type = obj.TYPE_MEM p.From.Sym = s p.From.Sym.Local = true p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } } // Rewrite SUB constants into ADD. switch p.As { case ASUBC: if p.From.Type == obj.TYPE_CONST { p.From.Offset = -p.From.Offset p.As = AADDC } case ASUBCCC: if p.From.Type == obj.TYPE_CONST { p.From.Offset = -p.From.Offset p.As = AADDCCC } case ASUB: if p.From.Type == obj.TYPE_CONST { p.From.Offset = -p.From.Offset p.As = AADD } } if ctxt.Flag_dynlink { rewriteToUseGot(ctxt, p) } }
func typelinkLSym(t *Type) *obj.LSym { name := "go.typelink." + Tconv(t, FmtLeft) // complete, unambiguous type name return obj.Linklookup(Ctxt, name, 0) }
func softfloat(ctxt *obj.Link, cursym *obj.LSym) { if ctxt.Goarm > 5 { return } symsfloat := obj.Linklookup(ctxt, "_sfloat", 0) wasfloat := 0 for p := cursym.Text; p != nil; p = p.Link { if p.Pcond != nil { p.Pcond.Mark |= LABEL } } var next *obj.Prog for p := cursym.Text; p != nil; p = p.Link { switch p.As { case AMOVW: if isfloatreg(&p.To) || isfloatreg(&p.From) { goto soft } goto notsoft case AMOVWD, AMOVWF, AMOVDW, AMOVFW, AMOVFD, AMOVDF, AMOVF, AMOVD, ACMPF, ACMPD, AADDF, AADDD, ASUBF, ASUBD, AMULF, AMULD, ADIVF, ADIVD, ASQRTF, ASQRTD, AABSF, AABSD: goto soft default: goto notsoft } soft: if wasfloat == 0 || (p.Mark&LABEL != 0) { next = ctxt.NewProg() *next = *p // BL _sfloat(SB) *p = obj.Prog{} p.Ctxt = ctxt p.Link = next p.As = ABL p.To.Type = obj.TYPE_BRANCH p.To.Sym = symsfloat p.Lineno = next.Lineno p = next wasfloat = 1 } continue notsoft: wasfloat = 0 } }