Source file src/cmd/compile/internal/ssa/regalloc_test.go

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package ssa
     6  
     7  import (
     8  	"cmd/compile/internal/types"
     9  	"cmd/internal/obj/x86"
    10  	"fmt"
    11  	"testing"
    12  )
    13  
    14  func TestLiveControlOps(t *testing.T) {
    15  	c := testConfig(t)
    16  	f := c.Fun("entry",
    17  		Bloc("entry",
    18  			Valu("mem", OpInitMem, types.TypeMem, 0, nil),
    19  			Valu("x", OpAMD64MOVLconst, c.config.Types.Int8, 1, nil),
    20  			Valu("y", OpAMD64MOVLconst, c.config.Types.Int8, 2, nil),
    21  			Valu("a", OpAMD64TESTB, types.TypeFlags, 0, nil, "x", "y"),
    22  			Valu("b", OpAMD64TESTB, types.TypeFlags, 0, nil, "y", "x"),
    23  			Eq("a", "if", "exit"),
    24  		),
    25  		Bloc("if",
    26  			Eq("b", "plain", "exit"),
    27  		),
    28  		Bloc("plain",
    29  			Goto("exit"),
    30  		),
    31  		Bloc("exit",
    32  			Exit("mem"),
    33  		),
    34  	)
    35  	flagalloc(f.f)
    36  	regalloc(f.f)
    37  	checkFunc(f.f)
    38  }
    39  
    40  // Test to make sure G register is never reloaded from spill (spill of G is okay)
    41  // See #25504
    42  func TestNoGetgLoadReg(t *testing.T) {
    43  	/*
    44  		Original:
    45  		func fff3(i int) *g {
    46  			gee := getg()
    47  			if i == 0 {
    48  				fff()
    49  			}
    50  			return gee // here
    51  		}
    52  	*/
    53  	c := testConfigARM64(t)
    54  	f := c.Fun("b1",
    55  		Bloc("b1",
    56  			Valu("v1", OpInitMem, types.TypeMem, 0, nil),
    57  			Valu("v6", OpArg, c.config.Types.Int64, 0, c.Temp(c.config.Types.Int64)),
    58  			Valu("v8", OpGetG, c.config.Types.Int64.PtrTo(), 0, nil, "v1"),
    59  			Valu("v11", OpARM64CMPconst, types.TypeFlags, 0, nil, "v6"),
    60  			Eq("v11", "b2", "b4"),
    61  		),
    62  		Bloc("b4",
    63  			Goto("b3"),
    64  		),
    65  		Bloc("b3",
    66  			Valu("v14", OpPhi, types.TypeMem, 0, nil, "v1", "v12"),
    67  			Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
    68  			Valu("v16", OpARM64MOVDstore, types.TypeMem, 0, nil, "v8", "sb", "v14"),
    69  			Exit("v16"),
    70  		),
    71  		Bloc("b2",
    72  			Valu("v12", OpARM64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "v1"),
    73  			Goto("b3"),
    74  		),
    75  	)
    76  	regalloc(f.f)
    77  	checkFunc(f.f)
    78  	// Double-check that we never restore to the G register. Regalloc should catch it, but check again anyway.
    79  	r := f.f.RegAlloc
    80  	for _, b := range f.blocks {
    81  		for _, v := range b.Values {
    82  			if v.Op == OpLoadReg && r[v.ID].String() == "g" {
    83  				t.Errorf("Saw OpLoadReg targeting g register: %s", v.LongString())
    84  			}
    85  		}
    86  	}
    87  }
    88  
    89  // Test to make sure we don't push spills into loops.
    90  // See issue #19595.
    91  func TestSpillWithLoop(t *testing.T) {
    92  	c := testConfig(t)
    93  	f := c.Fun("entry",
    94  		Bloc("entry",
    95  			Valu("mem", OpInitMem, types.TypeMem, 0, nil),
    96  			Valu("ptr", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64)),
    97  			Valu("cond", OpArg, c.config.Types.Bool, 0, c.Temp(c.config.Types.Bool)),
    98  			Valu("ld", OpAMD64MOVQload, c.config.Types.Int64, 0, nil, "ptr", "mem"), // this value needs a spill
    99  			Goto("loop"),
   100  		),
   101  		Bloc("loop",
   102  			Valu("memphi", OpPhi, types.TypeMem, 0, nil, "mem", "call"),
   103  			Valu("call", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "memphi"),
   104  			Valu("test", OpAMD64CMPBconst, types.TypeFlags, 0, nil, "cond"),
   105  			Eq("test", "next", "exit"),
   106  		),
   107  		Bloc("next",
   108  			Goto("loop"),
   109  		),
   110  		Bloc("exit",
   111  			Valu("store", OpAMD64MOVQstore, types.TypeMem, 0, nil, "ptr", "ld", "call"),
   112  			Exit("store"),
   113  		),
   114  	)
   115  	regalloc(f.f)
   116  	checkFunc(f.f)
   117  	for _, v := range f.blocks["loop"].Values {
   118  		if v.Op == OpStoreReg {
   119  			t.Errorf("spill inside loop %s", v.LongString())
   120  		}
   121  	}
   122  }
   123  
   124  func TestSpillMove1(t *testing.T) {
   125  	c := testConfig(t)
   126  	f := c.Fun("entry",
   127  		Bloc("entry",
   128  			Valu("mem", OpInitMem, types.TypeMem, 0, nil),
   129  			Valu("x", OpArg, c.config.Types.Int64, 0, c.Temp(c.config.Types.Int64)),
   130  			Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())),
   131  			Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"),
   132  			Goto("loop1"),
   133  		),
   134  		Bloc("loop1",
   135  			Valu("y", OpAMD64MULQ, c.config.Types.Int64, 0, nil, "x", "x"),
   136  			Eq("a", "loop2", "exit1"),
   137  		),
   138  		Bloc("loop2",
   139  			Eq("a", "loop1", "exit2"),
   140  		),
   141  		Bloc("exit1",
   142  			// store before call, y is available in a register
   143  			Valu("mem2", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem"),
   144  			Valu("mem3", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem2"),
   145  			Exit("mem3"),
   146  		),
   147  		Bloc("exit2",
   148  			// store after call, y must be loaded from a spill location
   149  			Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
   150  			Valu("mem5", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem4"),
   151  			Exit("mem5"),
   152  		),
   153  	)
   154  	flagalloc(f.f)
   155  	regalloc(f.f)
   156  	checkFunc(f.f)
   157  	// Spill should be moved to exit2.
   158  	if numSpills(f.blocks["loop1"]) != 0 {
   159  		t.Errorf("spill present from loop1")
   160  	}
   161  	if numSpills(f.blocks["loop2"]) != 0 {
   162  		t.Errorf("spill present in loop2")
   163  	}
   164  	if numSpills(f.blocks["exit1"]) != 0 {
   165  		t.Errorf("spill present in exit1")
   166  	}
   167  	if numSpills(f.blocks["exit2"]) != 1 {
   168  		t.Errorf("spill missing in exit2")
   169  	}
   170  
   171  }
   172  
   173  func TestSpillMove2(t *testing.T) {
   174  	c := testConfig(t)
   175  	f := c.Fun("entry",
   176  		Bloc("entry",
   177  			Valu("mem", OpInitMem, types.TypeMem, 0, nil),
   178  			Valu("x", OpArg, c.config.Types.Int64, 0, c.Temp(c.config.Types.Int64)),
   179  			Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())),
   180  			Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"),
   181  			Goto("loop1"),
   182  		),
   183  		Bloc("loop1",
   184  			Valu("y", OpAMD64MULQ, c.config.Types.Int64, 0, nil, "x", "x"),
   185  			Eq("a", "loop2", "exit1"),
   186  		),
   187  		Bloc("loop2",
   188  			Eq("a", "loop1", "exit2"),
   189  		),
   190  		Bloc("exit1",
   191  			// store after call, y must be loaded from a spill location
   192  			Valu("mem2", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
   193  			Valu("mem3", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem2"),
   194  			Exit("mem3"),
   195  		),
   196  		Bloc("exit2",
   197  			// store after call, y must be loaded from a spill location
   198  			Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
   199  			Valu("mem5", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem4"),
   200  			Exit("mem5"),
   201  		),
   202  	)
   203  	flagalloc(f.f)
   204  	regalloc(f.f)
   205  	checkFunc(f.f)
   206  	// There should be a spill in loop1, and nowhere else.
   207  	// TODO: resurrect moving spills out of loops? We could put spills at the start of both exit1 and exit2.
   208  	if numSpills(f.blocks["loop1"]) != 1 {
   209  		t.Errorf("spill missing from loop1")
   210  	}
   211  	if numSpills(f.blocks["loop2"]) != 0 {
   212  		t.Errorf("spill present in loop2")
   213  	}
   214  	if numSpills(f.blocks["exit1"]) != 0 {
   215  		t.Errorf("spill present in exit1")
   216  	}
   217  	if numSpills(f.blocks["exit2"]) != 0 {
   218  		t.Errorf("spill present in exit2")
   219  	}
   220  
   221  }
   222  
   223  func TestClobbersArg0(t *testing.T) {
   224  	c := testConfig(t)
   225  	f := c.Fun("entry",
   226  		Bloc("entry",
   227  			Valu("mem", OpInitMem, types.TypeMem, 0, nil),
   228  			Valu("ptr", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())),
   229  			Valu("dst", OpArg, c.config.Types.Int64.PtrTo().PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo().PtrTo())),
   230  			Valu("zero", OpAMD64LoweredZeroLoop, types.TypeMem, 256, nil, "ptr", "mem"),
   231  			Valu("store", OpAMD64MOVQstore, types.TypeMem, 0, nil, "dst", "ptr", "zero"),
   232  			Exit("store")))
   233  	flagalloc(f.f)
   234  	regalloc(f.f)
   235  	checkFunc(f.f)
   236  	// LoweredZeroLoop clobbers its argument, so there must be a copy of "ptr" somewhere
   237  	// so we still have that value available at "store".
   238  	if n := numCopies(f.blocks["entry"]); n != 1 {
   239  		fmt.Printf("%s\n", f.f.String())
   240  		t.Errorf("got %d copies, want 1", n)
   241  	}
   242  }
   243  
   244  func TestClobbersArg1(t *testing.T) {
   245  	c := testConfig(t)
   246  	f := c.Fun("entry",
   247  		Bloc("entry",
   248  			Valu("mem", OpInitMem, types.TypeMem, 0, nil),
   249  			Valu("src", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())),
   250  			Valu("dst", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())),
   251  			Valu("use1", OpArg, c.config.Types.Int64.PtrTo().PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo().PtrTo())),
   252  			Valu("use2", OpArg, c.config.Types.Int64.PtrTo().PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo().PtrTo())),
   253  			Valu("move", OpAMD64LoweredMoveLoop, types.TypeMem, 256, nil, "dst", "src", "mem"),
   254  			Valu("store1", OpAMD64MOVQstore, types.TypeMem, 0, nil, "use1", "src", "move"),
   255  			Valu("store2", OpAMD64MOVQstore, types.TypeMem, 0, nil, "use2", "dst", "store1"),
   256  			Exit("store2")))
   257  	flagalloc(f.f)
   258  	regalloc(f.f)
   259  	checkFunc(f.f)
   260  	// LoweredMoveLoop clobbers its arguments, so there must be a copy of "src" and "dst" somewhere
   261  	// so we still have that value available at the stores.
   262  	if n := numCopies(f.blocks["entry"]); n != 2 {
   263  		fmt.Printf("%s\n", f.f.String())
   264  		t.Errorf("got %d copies, want 2", n)
   265  	}
   266  }
   267  
   268  func TestNoRematerializeDeadConstant(t *testing.T) {
   269  	c := testConfigARM64(t)
   270  	f := c.Fun("b1",
   271  		Bloc("b1",
   272  			Valu("mem", OpInitMem, types.TypeMem, 0, nil),
   273  			Valu("addr", OpArg, c.config.Types.Int32.PtrTo(), 0, c.Temp(c.config.Types.Int32.PtrTo())),
   274  			Valu("const", OpARM64MOVDconst, c.config.Types.Int32, -1, nil), // Original constant
   275  			Valu("cmp", OpARM64CMPconst, types.TypeFlags, 0, nil, "const"),
   276  			Goto("b2"),
   277  		),
   278  		Bloc("b2",
   279  			Valu("phi_mem", OpPhi, types.TypeMem, 0, nil, "mem", "callmem"),
   280  			Eq("cmp", "b6", "b3"),
   281  		),
   282  		Bloc("b3",
   283  			Valu("call", OpARM64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "phi_mem"),
   284  			Valu("callmem", OpSelectN, types.TypeMem, 0, nil, "call"),
   285  			Eq("cmp", "b5", "b4"),
   286  		),
   287  		Bloc("b4", // A block where we don't really need to rematerialize the constant -1
   288  			Goto("b2"),
   289  		),
   290  		Bloc("b5",
   291  			Valu("user", OpAMD64MOVQstore, types.TypeMem, 0, nil, "addr", "const", "callmem"),
   292  			Exit("user"),
   293  		),
   294  		Bloc("b6",
   295  			Exit("phi_mem"),
   296  		),
   297  	)
   298  
   299  	regalloc(f.f)
   300  	checkFunc(f.f)
   301  
   302  	// Check that in block b4, there's no dead rematerialization of the constant -1
   303  	for _, v := range f.blocks["b4"].Values {
   304  		if v.Op == OpARM64MOVDconst && v.AuxInt == -1 {
   305  			t.Errorf("constant -1 rematerialized in loop block b4: %s", v.LongString())
   306  		}
   307  	}
   308  }
   309  
   310  func numSpills(b *Block) int {
   311  	return numOps(b, OpStoreReg)
   312  }
   313  func numCopies(b *Block) int {
   314  	return numOps(b, OpCopy)
   315  }
   316  func numOps(b *Block, op Op) int {
   317  	n := 0
   318  	for _, v := range b.Values {
   319  		if v.Op == op {
   320  			n++
   321  		}
   322  	}
   323  	return n
   324  }
   325  
   326  func TestRematerializeableRegCompatible(t *testing.T) {
   327  	c := testConfig(t)
   328  	f := c.Fun("entry",
   329  		Bloc("entry",
   330  			Valu("mem", OpInitMem, types.TypeMem, 0, nil),
   331  			Valu("x", OpAMD64MOVLconst, c.config.Types.Int32, 1, nil),
   332  			Valu("a", OpAMD64POR, c.config.Types.Float32, 0, nil, "x", "x"),
   333  			Valu("res", OpMakeResult, types.NewResults([]*types.Type{c.config.Types.Float32, types.TypeMem}), 0, nil, "a", "mem"),
   334  			Ret("res"),
   335  		),
   336  	)
   337  	regalloc(f.f)
   338  	checkFunc(f.f)
   339  	moveFound := false
   340  	for _, v := range f.f.Blocks[0].Values {
   341  		if v.Op == OpCopy && x86.REG_X0 <= v.Reg() && v.Reg() <= x86.REG_X31 {
   342  			moveFound = true
   343  		}
   344  	}
   345  	if !moveFound {
   346  		t.Errorf("Expects an Copy to be issued, but got: %+v", f.f)
   347  	}
   348  }
   349  

View as plain text