Skip to content

Commit

Permalink
[GR-52445] AMD64MultiStackMove is now aware of intermediate killed st…
Browse files Browse the repository at this point in the history
…ack slots.

PullRequest: graal/18751
  • Loading branch information
mur47x111 committed Sep 16, 2024
2 parents a7cae01 + 90c8243 commit 7a2b91d
Show file tree
Hide file tree
Showing 2 changed files with 56 additions and 34 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,12 @@
import static java.lang.Float.floatToRawIntBits;
import static jdk.graal.compiler.asm.amd64.AMD64Assembler.ConditionFlag.Equal;
import static jdk.graal.compiler.asm.amd64.AMD64Assembler.ConditionFlag.NotEqual;
import static jdk.graal.compiler.lir.LIRInstruction.OperandFlag.COMPOSITE;
import static jdk.graal.compiler.lir.LIRInstruction.OperandFlag.HINT;
import static jdk.graal.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL;
import static jdk.graal.compiler.lir.LIRInstruction.OperandFlag.REG;
import static jdk.graal.compiler.lir.LIRInstruction.OperandFlag.STACK;
import static jdk.graal.compiler.lir.LIRInstruction.OperandFlag.UNINITIALIZED;
import static jdk.vm.ci.code.ValueUtil.asRegister;
import static jdk.vm.ci.code.ValueUtil.isRegister;
import static jdk.vm.ci.code.ValueUtil.isStackSlot;
Expand Down Expand Up @@ -83,8 +89,8 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
public static final class MoveToRegOp extends AbstractMoveOp {
public static final LIRInstructionClass<MoveToRegOp> TYPE = LIRInstructionClass.create(MoveToRegOp.class);

@Def({OperandFlag.REG, OperandFlag.STACK, OperandFlag.HINT}) protected AllocatableValue result;
@Use({OperandFlag.REG, OperandFlag.STACK}) protected AllocatableValue input;
@Def({REG, STACK, HINT}) protected AllocatableValue result;
@Use({REG, STACK}) protected AllocatableValue input;

public MoveToRegOp(AMD64Kind moveKind, AllocatableValue result, AllocatableValue input) {
super(TYPE, moveKind);
Expand All @@ -107,8 +113,8 @@ public AllocatableValue getResult() {
public static final class MoveFromRegOp extends AbstractMoveOp {
public static final LIRInstructionClass<MoveFromRegOp> TYPE = LIRInstructionClass.create(MoveFromRegOp.class);

@Def({OperandFlag.REG, OperandFlag.STACK}) protected AllocatableValue result;
@Use({OperandFlag.REG, OperandFlag.HINT}) protected AllocatableValue input;
@Def({REG, STACK}) protected AllocatableValue result;
@Use({REG, HINT}) protected AllocatableValue input;

public MoveFromRegOp(AMD64Kind moveKind, AllocatableValue result, AllocatableValue input) {
super(TYPE, moveKind);
Expand All @@ -131,7 +137,7 @@ public AllocatableValue getResult() {
public static class MoveFromConstOp extends AMD64LIRInstruction implements StandardOp.LoadConstantOp {
public static final LIRInstructionClass<MoveFromConstOp> TYPE = LIRInstructionClass.create(MoveFromConstOp.class);

@Def({OperandFlag.REG, OperandFlag.STACK}) protected AllocatableValue result;
@Def({REG, STACK}) protected AllocatableValue result;
private final JavaConstant input;

public MoveFromConstOp(AllocatableValue result, JavaConstant input) {
Expand Down Expand Up @@ -165,9 +171,9 @@ public AllocatableValue getResult() {
public static final class AMD64StackMove extends AMD64LIRInstruction implements StandardOp.ValueMoveOp {
public static final LIRInstructionClass<AMD64StackMove> TYPE = LIRInstructionClass.create(AMD64StackMove.class);

@Def({OperandFlag.STACK}) protected AllocatableValue result;
@Use({OperandFlag.STACK, OperandFlag.HINT}) protected AllocatableValue input;
@Alive({OperandFlag.STACK, OperandFlag.UNINITIALIZED}) private AllocatableValue backupSlot;
@Def({STACK}) protected AllocatableValue result;
@Use({STACK, HINT}) protected AllocatableValue input;
@Alive({STACK, UNINITIALIZED}) private AllocatableValue backupSlot;

private Register scratch;

Expand Down Expand Up @@ -219,16 +225,18 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
public static final class AMD64MultiStackMove extends AMD64LIRInstruction {
public static final LIRInstructionClass<AMD64MultiStackMove> TYPE = LIRInstructionClass.create(AMD64MultiStackMove.class);

@Def({OperandFlag.STACK}) protected AllocatableValue[] results;
@Use({OperandFlag.STACK}) protected Value[] inputs;
@Alive({OperandFlag.STACK, OperandFlag.UNINITIALIZED}) private AllocatableValue backupSlot;
@Def({STACK}) protected AllocatableValue[] results;
@Use({STACK, ILLEGAL}) protected Value[] inputs;
@Temp({STACK, ILLEGAL}) protected Value[] tmps;
@Alive({STACK, UNINITIALIZED}) private AllocatableValue backupSlot;

private Register scratch;

public AMD64MultiStackMove(AllocatableValue[] results, Value[] inputs, Register scratch, AllocatableValue backupSlot) {
public AMD64MultiStackMove(AllocatableValue[] results, Value[] inputs, Value[] tmps, Register scratch, AllocatableValue backupSlot) {
super(TYPE);
this.results = results;
this.inputs = inputs;
this.tmps = tmps;
this.backupSlot = backupSlot;
this.scratch = scratch;
}
Expand All @@ -245,6 +253,12 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
move(backupKind, crb, masm, backupSlot, scratch.asValue(backupSlot.getValueKind()));
for (int i = 0; i < results.length; i++) {
Value input = inputs[i];
if (Value.ILLEGAL.equals(input)) {
input = tmps[i];
GraalError.guarantee(!Value.ILLEGAL.equals(input), "Unstructured multi stack move: %s", this);
} else {
GraalError.guarantee(Value.ILLEGAL.equals(tmps[i]), "Unstructured multi stack move: %s", this);
}
AllocatableValue result = results[i];
// move stack slot
move((AMD64Kind) input.getPlatformKind(), crb, masm, scratch.asValue(input.getValueKind()), input);
Expand All @@ -258,8 +272,8 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
public static final class LeaOp extends AMD64LIRInstruction {
public static final LIRInstructionClass<LeaOp> TYPE = LIRInstructionClass.create(LeaOp.class);

@Def({OperandFlag.REG}) protected AllocatableValue result;
@Use({OperandFlag.COMPOSITE, OperandFlag.UNINITIALIZED}) protected AMD64AddressValue address;
@Def({REG}) protected AllocatableValue result;
@Use({COMPOSITE, UNINITIALIZED}) protected AMD64AddressValue address;
private final OperandSize size;

public LeaOp(AllocatableValue result, AMD64AddressValue address, OperandSize size) {
Expand All @@ -283,7 +297,7 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
public static final class LeaDataOp extends AMD64LIRInstruction {
public static final LIRInstructionClass<LeaDataOp> TYPE = LIRInstructionClass.create(LeaDataOp.class);

@Def({OperandFlag.REG}) protected AllocatableValue result;
@Def({REG}) protected AllocatableValue result;
private final DataPointerConstant data;

public LeaDataOp(AllocatableValue result, DataPointerConstant data) {
Expand All @@ -301,8 +315,8 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
public static final class StackLeaOp extends AMD64LIRInstruction {
public static final LIRInstructionClass<StackLeaOp> TYPE = LIRInstructionClass.create(StackLeaOp.class);

@Def({OperandFlag.REG}) protected AllocatableValue result;
@Use({OperandFlag.STACK, OperandFlag.UNINITIALIZED}) protected AllocatableValue slot;
@Def({REG}) protected AllocatableValue result;
@Use({STACK, UNINITIALIZED}) protected AllocatableValue slot;

public StackLeaOp(AllocatableValue result, AllocatableValue slot) {
super(TYPE);
Expand Down Expand Up @@ -336,7 +350,7 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
public static final class NullCheckOp extends AMD64LIRInstruction implements StandardOp.NullCheck {
public static final LIRInstructionClass<NullCheckOp> TYPE = LIRInstructionClass.create(NullCheckOp.class);

@Use({OperandFlag.COMPOSITE}) protected AMD64AddressValue address;
@Use({COMPOSITE}) protected AMD64AddressValue address;
@State protected LIRFrameState state;

public NullCheckOp(AMD64AddressValue address, LIRFrameState state) {
Expand Down Expand Up @@ -369,7 +383,7 @@ public static final class CompareAndSwapOp extends AMD64LIRInstruction {
private final AMD64Kind accessKind;

@Def protected AllocatableValue result;
@Use({OperandFlag.COMPOSITE}) protected AMD64AddressValue address;
@Use({COMPOSITE}) protected AMD64AddressValue address;
@Use protected AllocatableValue cmpValue;
@Use protected AllocatableValue newValue;

Expand Down Expand Up @@ -416,7 +430,7 @@ public static final class AtomicReadAndAddOp extends AMD64LIRInstruction {
private final AMD64Kind accessKind;

@Def protected AllocatableValue result;
@Alive({OperandFlag.COMPOSITE}) protected AMD64AddressValue address;
@Alive({COMPOSITE}) protected AMD64AddressValue address;
@Use protected AllocatableValue delta;

public AtomicReadAndAddOp(AMD64Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue delta) {
Expand Down Expand Up @@ -459,7 +473,7 @@ public static final class AtomicReadAndWriteOp extends AMD64LIRInstruction {
private final AMD64Kind accessKind;

@Def protected AllocatableValue result;
@Alive({OperandFlag.COMPOSITE}) protected AMD64AddressValue address;
@Alive({COMPOSITE}) protected AMD64AddressValue address;
@Use protected AllocatableValue newValue;

public AtomicReadAndWriteOp(AMD64Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue newValue) {
Expand Down Expand Up @@ -843,9 +857,9 @@ public abstract static class PointerCompressionOp extends AMD64LIRInstruction {
protected final CompressEncoding encoding;
protected final boolean nonNull;

@Def({OperandFlag.REG, OperandFlag.HINT}) private AllocatableValue result;
@Use({OperandFlag.REG, OperandFlag.CONST}) private Value input;
@Alive({OperandFlag.REG, OperandFlag.ILLEGAL, OperandFlag.UNINITIALIZED}) private AllocatableValue baseRegister;
@Def({REG, HINT}) private AllocatableValue result;
@Use({REG, OperandFlag.CONST}) private Value input;
@Alive({REG, ILLEGAL, UNINITIALIZED}) private AllocatableValue baseRegister;

protected PointerCompressionOp(LIRInstructionClass<? extends PointerCompressionOp> type,
AllocatableValue result,
Expand Down Expand Up @@ -1017,8 +1031,8 @@ public static void emitUncompressCode(AMD64MacroAssembler masm, Register resReg,
}

private abstract static class ZeroNullConversionOp extends AMD64LIRInstruction {
@Def({OperandFlag.REG, OperandFlag.HINT}) protected AllocatableValue result;
@Use({OperandFlag.REG}) protected AllocatableValue input;
@Def({REG, HINT}) protected AllocatableValue result;
@Use({REG}) protected AllocatableValue input;

protected ZeroNullConversionOp(LIRInstructionClass<? extends ZeroNullConversionOp> type, AllocatableValue result, AllocatableValue input) {
super(type);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import jdk.graal.compiler.core.common.cfg.BasicBlock;
import jdk.graal.compiler.debug.CounterKey;
import jdk.graal.compiler.debug.DebugContext;
import jdk.graal.compiler.debug.GraalError;
import jdk.graal.compiler.lir.LIR;
import jdk.graal.compiler.lir.LIRInstruction;
import jdk.graal.compiler.lir.RedundantMoveElimination;
Expand Down Expand Up @@ -82,6 +83,7 @@ private static class Closure {
private Register reg = null;
private List<AllocatableValue> dst;
private List<Value> src;
private List<Value> tmp;
private AllocatableValue slot;
private boolean removed = false;

Expand All @@ -92,30 +94,35 @@ public void process(DebugContext debug, List<LIRInstruction> instructions) {
if (isStackMove(inst)) {
AMD64Move.AMD64StackMove move = asStackMove(inst);

if (reg != null && !reg.equals(move.getScratchRegister()) ||
// can't use own output as an input (GR-52445)
dst != null && dst.contains(move.getInput())) {
if (reg != null && !reg.equals(move.getScratchRegister())) {
// end of trace & start of new
replaceStackMoves(debug, instructions);
}

// lazy initialize
if (dst == null) {
assert src == null;
GraalError.guarantee(src == null && tmp == null, "dst, src, tmp should be initialized simultaneously.");
dst = new ArrayList<>();
src = new ArrayList<>();
tmp = new ArrayList<>();
}

dst.add(move.getResult());
src.add(move.getInput());
Value in = move.getInput();
if (dst.contains(in)) {
tmp.add(in);
src.add(Value.ILLEGAL);
} else {
tmp.add(Value.ILLEGAL);
src.add(in);
}

if (begin == NONE) {
// trace begin
begin = i;
reg = move.getScratchRegister();
slot = move.getBackupSlot();
}

} else if (begin != NONE) {
// end of trace
replaceStackMoves(debug, instructions);
Expand All @@ -125,13 +132,13 @@ public void process(DebugContext debug, List<LIRInstruction> instructions) {
if (removed) {
instructions.removeAll(Collections.singleton(null));
}

}

private void replaceStackMoves(DebugContext debug, List<LIRInstruction> instructions) {
int size = dst.size();
if (size > 1) {
AMD64Move.AMD64MultiStackMove multiMove = new AMD64Move.AMD64MultiStackMove(dst.toArray(new AllocatableValue[size]), src.toArray(new AllocatableValue[size]), reg, slot);
AMD64Move.AMD64MultiStackMove multiMove = new AMD64Move.AMD64MultiStackMove(dst.toArray(new AllocatableValue[size]), src.toArray(new AllocatableValue[size]),
tmp.toArray(new AllocatableValue[size]), reg, slot);
// replace first instruction
instructions.set(begin, multiMove);
// and null out others
Expand All @@ -143,6 +150,7 @@ private void replaceStackMoves(DebugContext debug, List<LIRInstruction> instruct
// reset
dst.clear();
src.clear();
tmp.clear();
begin = NONE;
reg = null;
slot = null;
Expand Down

0 comments on commit 7a2b91d

Please sign in to comment.