Compare commits
1 Commits
split
...
globalisel
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0943946b0b |
@@ -673,6 +673,14 @@ public:
|
||||
bool matchSDivByConst(MachineInstr &MI);
|
||||
void applySDivByConst(MachineInstr &MI);
|
||||
|
||||
/// Given an G_SDIV \p MI expressing a signed divided by a pow2 constant,
|
||||
/// return expressions that implements it by shifting.
|
||||
bool matchDivByPow2(MachineInstr &MI, bool IsSigned);
|
||||
void applySDivByPow2(MachineInstr &MI);
|
||||
/// Given an G_UDIV \p MI expressing an unsigned divided by a pow2 constant,
|
||||
/// return expressions that implements it by shifting.
|
||||
void applyUDivByPow2(MachineInstr &MI);
|
||||
|
||||
// G_UMULH x, (1 << c)) -> x >> (bitwidth - c)
|
||||
bool matchUMulHToLShr(MachineInstr &MI);
|
||||
void applyUMulHToLShr(MachineInstr &MI);
|
||||
|
||||
@@ -179,6 +179,7 @@ def FmArcp : MIFlagEnum<"FmArcp">;
|
||||
def FmContract : MIFlagEnum<"FmContract">;
|
||||
def FmAfn : MIFlagEnum<"FmAfn">;
|
||||
def FmReassoc : MIFlagEnum<"FmReassoc">;
|
||||
def IsExact : MIFlagEnum<"IsExact">;
|
||||
|
||||
def MIFlags;
|
||||
// def not; -> Already defined as a SDNode
|
||||
@@ -1036,7 +1037,20 @@ def sdiv_by_const : GICombineRule<
|
||||
[{ return Helper.matchSDivByConst(*${root}); }]),
|
||||
(apply [{ Helper.applySDivByConst(*${root}); }])>;
|
||||
|
||||
def intdiv_combines : GICombineGroup<[udiv_by_const, sdiv_by_const]>;
|
||||
def sdiv_by_pow2 : GICombineRule<
|
||||
(defs root:$root),
|
||||
(match (G_SDIV $dst, $x, $y, (MIFlags (not IsExact))):$root,
|
||||
[{ return Helper.matchDivByPow2(*${root}, /*IsSigned=*/true); }]),
|
||||
(apply [{ Helper.applySDivByPow2(*${root}); }])>;
|
||||
|
||||
def udiv_by_pow2 : GICombineRule<
|
||||
(defs root:$root),
|
||||
(match (G_UDIV $dst, $x, $y, (MIFlags (not IsExact))):$root,
|
||||
[{ return Helper.matchDivByPow2(*${root}, /*IsSigned=*/false); }]),
|
||||
(apply [{ Helper.applyUDivByPow2(*${root}); }])>;
|
||||
|
||||
def intdiv_combines : GICombineGroup<[udiv_by_const, sdiv_by_const,
|
||||
sdiv_by_pow2, udiv_by_pow2]>;
|
||||
|
||||
def reassoc_ptradd : GICombineRule<
|
||||
(defs root:$root, build_fn_matchinfo:$matchinfo),
|
||||
|
||||
@@ -5270,6 +5270,144 @@ MachineInstr *CombinerHelper::buildSDivUsingMul(MachineInstr &MI) {
|
||||
return MIB.buildMul(Ty, Res, Factor);
|
||||
}
|
||||
|
||||
bool CombinerHelper::matchDivByPow2(MachineInstr &MI, bool IsSigned) {
|
||||
assert((MI.getOpcode() == TargetOpcode::G_SDIV ||
|
||||
MI.getOpcode() == TargetOpcode::G_UDIV) &&
|
||||
"Expected SDIV or UDIV");
|
||||
auto &Div = cast<GenericMachineInstr>(MI);
|
||||
Register RHS = Div.getReg(2);
|
||||
auto MatchPow2 = [&](const Constant *C) {
|
||||
auto *CI = dyn_cast<ConstantInt>(C);
|
||||
return CI && (CI->getValue().isPowerOf2() ||
|
||||
(IsSigned && CI->getValue().isNegatedPowerOf2()));
|
||||
};
|
||||
return matchUnaryPredicate(MRI, RHS, MatchPow2, /*AllowUndefs=*/false);
|
||||
}
|
||||
|
||||
void CombinerHelper::applySDivByPow2(MachineInstr &MI) {
|
||||
assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV");
|
||||
auto &SDiv = cast<GenericMachineInstr>(MI);
|
||||
Register Dst = SDiv.getReg(0);
|
||||
Register LHS = SDiv.getReg(1);
|
||||
Register RHS = SDiv.getReg(2);
|
||||
LLT Ty = MRI.getType(Dst);
|
||||
LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
|
||||
|
||||
Builder.setInstrAndDebugLoc(MI);
|
||||
|
||||
// Effectively we want to lower G_SDIV %lhs, %rhs, where %rhs is a power of 2,
|
||||
// to the following version:
|
||||
//
|
||||
// %c1 = G_CTTZ %rhs
|
||||
// %inexact = G_SUB $bitwidth, %c1
|
||||
// %sign = %G_ASHR %lhs, $(bitwidth - 1)
|
||||
// %srl = G_SHR %sign, %inexact
|
||||
// %add = G_ADD %lhs, %srl
|
||||
// %sra = G_ASHR %add, %c1
|
||||
// %sra = G_SELECT, %isoneorallones, %lhs, %sra
|
||||
// %zero = G_CONSTANT $0
|
||||
// %neg = G_NEG %sra
|
||||
// %isneg = G_ICMP SLT %lhs, %zero
|
||||
// %res = G_SELECT %isneg, %neg, %sra
|
||||
//
|
||||
// When %rhs is a constant integer, or a splat vector, we can check its value
|
||||
// at compile time such that the first two G_ICMP conditional statements, as
|
||||
// well as the corresponding non-taken branches, can be eliminated. This can
|
||||
// generate compact code even w/o any constant folding afterwards. When $rhs
|
||||
// is not a splat vector, we have to generate those checks via instructions.
|
||||
|
||||
unsigned Bitwidth = Ty.getScalarSizeInBits();
|
||||
auto Zero = Builder.buildConstant(Ty, 0);
|
||||
|
||||
// TODO: It is not necessary to have this specialized version. We need it *for
|
||||
// now* because the folding/combine can't handle it. Remove this large
|
||||
// conditional statement once we can properly fold the two G_ICMP.
|
||||
if (auto RHSC = getConstantOrConstantSplatVector(RHS)) {
|
||||
// Special case: (sdiv X, 1) -> X
|
||||
if (RHSC->isOne()) {
|
||||
replaceSingleDefInstWithReg(MI, LHS);
|
||||
return;
|
||||
}
|
||||
// Special Case: (sdiv X, -1) -> 0-X
|
||||
if (RHSC->isAllOnes()) {
|
||||
auto Neg = Builder.buildNeg(Ty, LHS);
|
||||
replaceSingleDefInstWithReg(MI, Neg->getOperand(0).getReg());
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned TrailingZeros = RHSC->countTrailingZeros();
|
||||
auto C1 = Builder.buildConstant(ShiftAmtTy, TrailingZeros);
|
||||
auto Inexact = Builder.buildConstant(ShiftAmtTy, Bitwidth - TrailingZeros);
|
||||
auto Sign = Builder.buildAShr(
|
||||
Ty, LHS, Builder.buildConstant(ShiftAmtTy, Bitwidth - 1));
|
||||
// Add (LHS < 0) ? abs2 - 1 : 0;
|
||||
auto Lshr = Builder.buildLShr(Ty, Sign, Inexact);
|
||||
auto Add = Builder.buildAdd(Ty, LHS, Lshr);
|
||||
auto Shr = Builder.buildAShr(Ty, Add, C1);
|
||||
|
||||
// If dividing by a positive value, we're done. Otherwise, the result must
|
||||
// be negated.
|
||||
auto Res = RHSC->isNegative() ? Builder.buildNeg(Ty, Shr) : Shr;
|
||||
replaceSingleDefInstWithReg(MI, Res->getOperand(0).getReg());
|
||||
return;
|
||||
}
|
||||
|
||||
// RHS is not a splat vector. Build the above version with instructions.
|
||||
auto Bits = Builder.buildConstant(ShiftAmtTy, Bitwidth);
|
||||
auto C1 = Builder.buildCTTZ(Ty, RHS);
|
||||
C1 = Builder.buildZExtOrTrunc(ShiftAmtTy, C1);
|
||||
auto Inexact = Builder.buildSub(ShiftAmtTy, Bits, C1);
|
||||
auto Sign = Builder.buildAShr(
|
||||
Ty, LHS, Builder.buildConstant(ShiftAmtTy, Bitwidth - 1));
|
||||
|
||||
// Add (LHS < 0) ? abs2 - 1 : 0;
|
||||
auto Shl = Builder.buildShl(Ty, Sign, Inexact);
|
||||
auto Add = Builder.buildAdd(Ty, LHS, Shl);
|
||||
auto Shr = Builder.buildAShr(Ty, Add, C1);
|
||||
|
||||
LLT CCVT = LLT::vector(Ty.getElementCount(), 1);
|
||||
|
||||
auto One = Builder.buildConstant(Ty, 1);
|
||||
auto AllOnes =
|
||||
Builder.buildConstant(Ty, APInt::getAllOnes(Ty.getScalarSizeInBits()));
|
||||
auto IsOne = Builder.buildICmp(CmpInst::Predicate::ICMP_EQ, CCVT, RHS, One);
|
||||
auto IsAllOnes =
|
||||
Builder.buildICmp(CmpInst::Predicate::ICMP_EQ, CCVT, RHS, AllOnes);
|
||||
auto IsOneOrAllOnes = Builder.buildOr(CCVT, IsOne, IsAllOnes);
|
||||
Shr = Builder.buildSelect(Ty, IsOneOrAllOnes, LHS, Shr);
|
||||
|
||||
// If dividing by a positive value, we're done. Otherwise, the result must
|
||||
// be negated.
|
||||
auto Neg = Builder.buildNeg(Ty, Shr);
|
||||
auto IsNeg = Builder.buildICmp(CmpInst::Predicate::ICMP_SLT, CCVT, LHS, Zero);
|
||||
Builder.buildSelect(MI.getOperand(0).getReg(), IsNeg, Neg, Shr);
|
||||
}
|
||||
|
||||
void CombinerHelper::applyUDivByPow2(MachineInstr &MI) {
|
||||
assert(MI.getOpcode() == TargetOpcode::G_UDIV && "Expected UDIV");
|
||||
auto &UDiv = cast<GenericMachineInstr>(MI);
|
||||
Register Dst = UDiv.getReg(0);
|
||||
Register LHS = UDiv.getReg(1);
|
||||
Register RHS = UDiv.getReg(2);
|
||||
LLT Ty = MRI.getType(Dst);
|
||||
LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
|
||||
|
||||
Builder.setInstrAndDebugLoc(MI);
|
||||
|
||||
// TODO: It is not necessary to have this specialized version. We need it *for
|
||||
// now* because the folding/combine can't handle CTTZ.
|
||||
if (auto RHSC = getConstantOrConstantSplatVector(RHS)) {
|
||||
auto C1 = Builder.buildConstant(ShiftAmtTy, RHSC->countTrailingZeros());
|
||||
auto Res = Builder.buildLShr(Ty, LHS, C1);
|
||||
replaceSingleDefInstWithReg(MI, Res->getOperand(0).getReg());
|
||||
return;
|
||||
}
|
||||
|
||||
auto C1 = Builder.buildCTTZ(Ty, RHS);
|
||||
C1 = Builder.buildZExtOrTrunc(ShiftAmtTy, C1);
|
||||
Builder.buildLShr(MI.getOperand(0).getReg(), LHS, C1);
|
||||
}
|
||||
|
||||
bool CombinerHelper::matchUMulHToLShr(MachineInstr &MI) {
|
||||
assert(MI.getOpcode() == TargetOpcode::G_UMULH);
|
||||
Register RHS = MI.getOperand(2).getReg();
|
||||
|
||||
@@ -670,36 +670,19 @@ define amdgpu_kernel void @bfe_sext_in_reg_i24(ptr addrspace(1) %out, ptr addrsp
|
||||
define amdgpu_kernel void @simplify_demanded_bfe_sdiv(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
|
||||
; GFX6-LABEL: simplify_demanded_bfe_sdiv:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, 2.0
|
||||
; GFX6-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x0
|
||||
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
||||
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
|
||||
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
|
||||
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; GFX6-NEXT: s_load_dword s0, s[6:7], 0x0
|
||||
; GFX6-NEXT: s_mov_b32 s6, -1
|
||||
; GFX6-NEXT: s_mov_b32 s7, 0xf000
|
||||
; GFX6-NEXT: v_mul_lo_u32 v1, v0, -2
|
||||
; GFX6-NEXT: s_load_dword s3, s[2:3], 0x0
|
||||
; GFX6-NEXT: s_mov_b32 s2, -1
|
||||
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; GFX6-NEXT: s_bfe_i32 s0, s0, 0x100001
|
||||
; GFX6-NEXT: s_ashr_i32 s2, s0, 31
|
||||
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
|
||||
; GFX6-NEXT: s_add_i32 s0, s0, s2
|
||||
; GFX6-NEXT: s_xor_b32 s0, s0, s2
|
||||
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
|
||||
; GFX6-NEXT: v_mul_hi_u32 v0, s0, v0
|
||||
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
||||
; GFX6-NEXT: v_add_i32_e32 v2, vcc, 1, v0
|
||||
; GFX6-NEXT: v_sub_i32_e32 v1, vcc, s0, v1
|
||||
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 2, v1
|
||||
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
||||
; GFX6-NEXT: v_subrev_i32_e64 v2, s[0:1], 2, v1
|
||||
; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
|
||||
; GFX6-NEXT: v_add_i32_e32 v2, vcc, 1, v0
|
||||
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 2, v1
|
||||
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
||||
; GFX6-NEXT: v_xor_b32_e32 v0, s2, v0
|
||||
; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s2, v0
|
||||
; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
||||
; GFX6-NEXT: s_bfe_i32 s3, s3, 0x100001
|
||||
; GFX6-NEXT: s_ashr_i32 s4, s3, 31
|
||||
; GFX6-NEXT: s_lshr_b32 s4, s4, 31
|
||||
; GFX6-NEXT: s_add_i32 s3, s3, s4
|
||||
; GFX6-NEXT: s_ashr_i32 s3, s3, 1
|
||||
; GFX6-NEXT: v_mov_b32_e32 v0, s3
|
||||
; GFX6-NEXT: s_mov_b32 s3, 0xf000
|
||||
; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
||||
; GFX6-NEXT: s_endpgm
|
||||
%src = load i32, ptr addrspace(1) %in, align 4
|
||||
%bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %src, i32 1, i32 16)
|
||||
|
||||
@@ -279,125 +279,27 @@ define i32 @v_sdiv_i32_pow2k_denom(i32 %num) {
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
||||
; CHECK-NEXT: v_rcp_iflag_f32_e32 v2, 0x45800000
|
||||
; CHECK-NEXT: v_mov_b32_e32 v3, 0xfffff000
|
||||
; CHECK-NEXT: v_mov_b32_e32 v4, 0x1000
|
||||
; CHECK-NEXT: v_lshrrev_b32_e32 v1, 20, v1
|
||||
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1
|
||||
; CHECK-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
|
||||
; CHECK-NEXT: v_xor_b32_e32 v0, v0, v1
|
||||
; CHECK-NEXT: v_cvt_u32_f32_e32 v2, v2
|
||||
; CHECK-NEXT: v_mul_lo_u32 v3, v2, v3
|
||||
; CHECK-NEXT: v_mul_hi_u32 v3, v2, v3
|
||||
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v3
|
||||
; CHECK-NEXT: v_mul_hi_u32 v2, v0, v2
|
||||
; CHECK-NEXT: v_lshlrev_b32_e32 v3, 12, v2
|
||||
; CHECK-NEXT: v_add_i32_e32 v5, vcc, 1, v2
|
||||
; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v3
|
||||
; CHECK-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v4
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[4:5]
|
||||
; CHECK-NEXT: v_subrev_i32_e32 v3, vcc, 0x1000, v0
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v0, v0, v3, s[4:5]
|
||||
; CHECK-NEXT: v_add_i32_e32 v3, vcc, 1, v2
|
||||
; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v4
|
||||
; CHECK-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
|
||||
; CHECK-NEXT: v_xor_b32_e32 v0, v0, v1
|
||||
; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
|
||||
; CHECK-NEXT: v_ashrrev_i32_e32 v0, 12, v0
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%result = sdiv i32 %num, 4096
|
||||
ret i32 %result
|
||||
}
|
||||
|
||||
define <2 x i32> @v_sdiv_v2i32_pow2k_denom(<2 x i32> %num) {
|
||||
; GISEL-LABEL: v_sdiv_v2i32_pow2k_denom:
|
||||
; GISEL: ; %bb.0:
|
||||
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GISEL-NEXT: v_ashrrev_i32_e32 v2, 31, v0
|
||||
; GISEL-NEXT: v_mov_b32_e32 v3, 0x1000
|
||||
; GISEL-NEXT: v_cvt_f32_u32_e32 v4, 0x1000
|
||||
; GISEL-NEXT: v_mov_b32_e32 v5, 0xfffff000
|
||||
; GISEL-NEXT: v_ashrrev_i32_e32 v6, 31, v1
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v2
|
||||
; GISEL-NEXT: v_rcp_iflag_f32_e32 v4, v4
|
||||
; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v6
|
||||
; GISEL-NEXT: v_xor_b32_e32 v0, v0, v2
|
||||
; GISEL-NEXT: v_mul_f32_e32 v4, 0x4f7ffffe, v4
|
||||
; GISEL-NEXT: v_xor_b32_e32 v1, v1, v6
|
||||
; GISEL-NEXT: v_cvt_u32_f32_e32 v4, v4
|
||||
; GISEL-NEXT: v_mul_lo_u32 v5, v4, v5
|
||||
; GISEL-NEXT: v_mul_hi_u32 v5, v4, v5
|
||||
; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v5
|
||||
; GISEL-NEXT: v_mul_hi_u32 v5, v0, v4
|
||||
; GISEL-NEXT: v_mul_hi_u32 v4, v1, v4
|
||||
; GISEL-NEXT: v_lshlrev_b32_e32 v7, 12, v5
|
||||
; GISEL-NEXT: v_add_i32_e32 v8, vcc, 1, v5
|
||||
; GISEL-NEXT: v_lshlrev_b32_e32 v9, 12, v4
|
||||
; GISEL-NEXT: v_add_i32_e32 v10, vcc, 1, v4
|
||||
; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v7
|
||||
; GISEL-NEXT: v_sub_i32_e32 v1, vcc, v1, v9
|
||||
; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v3
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v5, v5, v8, s[4:5]
|
||||
; GISEL-NEXT: v_sub_i32_e32 v7, vcc, v0, v3
|
||||
; GISEL-NEXT: v_cmp_ge_u32_e64 s[6:7], v1, v3
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[6:7]
|
||||
; GISEL-NEXT: v_subrev_i32_e32 v8, vcc, 0x1000, v1
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v0, v0, v7, s[4:5]
|
||||
; GISEL-NEXT: v_add_i32_e32 v7, vcc, 1, v5
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v1, v1, v8, s[6:7]
|
||||
; GISEL-NEXT: v_add_i32_e32 v8, vcc, 1, v4
|
||||
; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v0, v3
|
||||
; GISEL-NEXT: v_cndmask_b32_e32 v0, v5, v7, vcc
|
||||
; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
|
||||
; GISEL-NEXT: v_cndmask_b32_e32 v1, v4, v8, vcc
|
||||
; GISEL-NEXT: v_xor_b32_e32 v0, v0, v2
|
||||
; GISEL-NEXT: v_xor_b32_e32 v1, v1, v6
|
||||
; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
|
||||
; GISEL-NEXT: v_sub_i32_e32 v1, vcc, v1, v6
|
||||
; GISEL-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; CGP-LABEL: v_sdiv_v2i32_pow2k_denom:
|
||||
; CGP: ; %bb.0:
|
||||
; CGP-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CGP-NEXT: v_ashrrev_i32_e32 v2, 31, v0
|
||||
; CGP-NEXT: v_rcp_iflag_f32_e32 v3, 0x45800000
|
||||
; CGP-NEXT: v_mov_b32_e32 v4, 0xfffff000
|
||||
; CGP-NEXT: v_mov_b32_e32 v5, 0x1000
|
||||
; CGP-NEXT: v_ashrrev_i32_e32 v6, 31, v1
|
||||
; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v2
|
||||
; CGP-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
|
||||
; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v6
|
||||
; CGP-NEXT: v_xor_b32_e32 v0, v0, v2
|
||||
; CGP-NEXT: v_cvt_u32_f32_e32 v3, v3
|
||||
; CGP-NEXT: v_xor_b32_e32 v1, v1, v6
|
||||
; CGP-NEXT: v_mul_lo_u32 v4, v3, v4
|
||||
; CGP-NEXT: v_mul_hi_u32 v4, v3, v4
|
||||
; CGP-NEXT: v_add_i32_e32 v3, vcc, v3, v4
|
||||
; CGP-NEXT: v_mul_hi_u32 v4, v0, v3
|
||||
; CGP-NEXT: v_mul_hi_u32 v3, v1, v3
|
||||
; CGP-NEXT: v_lshlrev_b32_e32 v7, 12, v4
|
||||
; CGP-NEXT: v_add_i32_e32 v8, vcc, 1, v4
|
||||
; CGP-NEXT: v_lshlrev_b32_e32 v9, 12, v3
|
||||
; CGP-NEXT: v_add_i32_e32 v10, vcc, 1, v3
|
||||
; CGP-NEXT: v_sub_i32_e32 v0, vcc, v0, v7
|
||||
; CGP-NEXT: v_sub_i32_e32 v1, vcc, v1, v9
|
||||
; CGP-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v5
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v4, v4, v8, s[4:5]
|
||||
; CGP-NEXT: v_sub_i32_e32 v7, vcc, v0, v5
|
||||
; CGP-NEXT: v_cmp_ge_u32_e64 s[6:7], v1, v5
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v3, v3, v10, s[6:7]
|
||||
; CGP-NEXT: v_subrev_i32_e32 v8, vcc, 0x1000, v1
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v0, v0, v7, s[4:5]
|
||||
; CGP-NEXT: v_add_i32_e32 v7, vcc, 1, v4
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v1, v1, v8, s[6:7]
|
||||
; CGP-NEXT: v_add_i32_e32 v8, vcc, 1, v3
|
||||
; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v5
|
||||
; CGP-NEXT: v_cndmask_b32_e32 v0, v4, v7, vcc
|
||||
; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v1, v5
|
||||
; CGP-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
|
||||
; CGP-NEXT: v_xor_b32_e32 v0, v0, v2
|
||||
; CGP-NEXT: v_xor_b32_e32 v1, v1, v6
|
||||
; CGP-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
|
||||
; CGP-NEXT: v_sub_i32_e32 v1, vcc, v1, v6
|
||||
; CGP-NEXT: s_setpc_b64 s[30:31]
|
||||
; CHECK-LABEL: v_sdiv_v2i32_pow2k_denom:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: v_ashrrev_i32_e32 v2, 31, v0
|
||||
; CHECK-NEXT: v_ashrrev_i32_e32 v3, 31, v1
|
||||
; CHECK-NEXT: v_lshrrev_b32_e32 v2, 20, v2
|
||||
; CHECK-NEXT: v_lshrrev_b32_e32 v3, 20, v3
|
||||
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v2
|
||||
; CHECK-NEXT: v_add_i32_e32 v1, vcc, v1, v3
|
||||
; CHECK-NEXT: v_ashrrev_i32_e32 v0, 12, v0
|
||||
; CHECK-NEXT: v_ashrrev_i32_e32 v1, 12, v1
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%result = sdiv <2 x i32> %num, <i32 4096, i32 4096>
|
||||
ret <2 x i32> %result
|
||||
}
|
||||
@@ -884,3 +786,13 @@ define <2 x i32> @v_sdiv_v2i32_24bit(<2 x i32> %num, <2 x i32> %den) {
|
||||
%result = sdiv <2 x i32> %num.mask, %den.mask
|
||||
ret <2 x i32> %result
|
||||
}
|
||||
|
||||
define i32 @v_sdiv_i32_exact(i32 %num) {
|
||||
; CHECK-LABEL: v_sdiv_i32_exact:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: v_ashrrev_i32_e32 v0, 12, v0
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%result = sdiv exact i32 %num, 4096
|
||||
ret i32 %result
|
||||
}
|
||||
|
||||
@@ -999,126 +999,11 @@ define i64 @v_sdiv_i64_pow2k_denom(i64 %num) {
|
||||
; CHECK-LABEL: v_sdiv_i64_pow2k_denom:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: v_cvt_f32_u32_e32 v2, 0x1000
|
||||
; CHECK-NEXT: v_cvt_f32_ubyte0_e32 v3, 0
|
||||
; CHECK-NEXT: v_mov_b32_e32 v6, 0xfffff000
|
||||
; CHECK-NEXT: v_mac_f32_e32 v2, 0x4f800000, v3
|
||||
; CHECK-NEXT: v_rcp_iflag_f32_e32 v2, v2
|
||||
; CHECK-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v2
|
||||
; CHECK-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2
|
||||
; CHECK-NEXT: v_trunc_f32_e32 v4, v3
|
||||
; CHECK-NEXT: v_mac_f32_e32 v2, 0xcf800000, v4
|
||||
; CHECK-NEXT: v_cvt_u32_f32_e32 v5, v2
|
||||
; CHECK-NEXT: v_cvt_u32_f32_e32 v7, v4
|
||||
; CHECK-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, v5, 0
|
||||
; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v7, v[3:4]
|
||||
; CHECK-NEXT: v_mul_hi_u32 v8, v5, v2
|
||||
; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], -1, v5, v[3:4]
|
||||
; CHECK-NEXT: v_mul_lo_u32 v4, v7, v2
|
||||
; CHECK-NEXT: v_mul_hi_u32 v2, v7, v2
|
||||
; CHECK-NEXT: v_mul_lo_u32 v9, v5, v3
|
||||
; CHECK-NEXT: v_mul_lo_u32 v10, v7, v3
|
||||
; CHECK-NEXT: v_mul_hi_u32 v11, v5, v3
|
||||
; CHECK-NEXT: v_mul_hi_u32 v3, v7, v3
|
||||
; CHECK-NEXT: v_add_i32_e32 v4, vcc, v4, v9
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
|
||||
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v10, v2
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
|
||||
; CHECK-NEXT: v_add_i32_e32 v4, vcc, v4, v8
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
||||
; CHECK-NEXT: v_add_i32_e32 v4, vcc, v9, v4
|
||||
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v11
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
|
||||
; CHECK-NEXT: v_add_i32_e32 v8, vcc, v10, v8
|
||||
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v4
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
||||
; CHECK-NEXT: v_add_i32_e32 v4, vcc, v8, v4
|
||||
; CHECK-NEXT: v_add_i32_e32 v3, vcc, v3, v4
|
||||
; CHECK-NEXT: v_add_i32_e32 v5, vcc, v5, v2
|
||||
; CHECK-NEXT: v_addc_u32_e32 v7, vcc, v7, v3, vcc
|
||||
; CHECK-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, v5, 0
|
||||
; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v7, v[3:4]
|
||||
; CHECK-NEXT: v_ashrrev_i32_e32 v6, 31, v1
|
||||
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v6
|
||||
; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], -1, v5, v[3:4]
|
||||
; CHECK-NEXT: v_addc_u32_e32 v1, vcc, v1, v6, vcc
|
||||
; CHECK-NEXT: v_xor_b32_e32 v4, v0, v6
|
||||
; CHECK-NEXT: v_mul_lo_u32 v0, v7, v2
|
||||
; CHECK-NEXT: v_mul_lo_u32 v8, v5, v3
|
||||
; CHECK-NEXT: v_xor_b32_e32 v9, v1, v6
|
||||
; CHECK-NEXT: v_mul_hi_u32 v1, v5, v2
|
||||
; CHECK-NEXT: v_mul_hi_u32 v2, v7, v2
|
||||
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v8
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
|
||||
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
||||
; CHECK-NEXT: v_mul_lo_u32 v1, v7, v3
|
||||
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v8, v0
|
||||
; CHECK-NEXT: v_mul_hi_u32 v8, v5, v3
|
||||
; CHECK-NEXT: v_add_i32_e32 v1, vcc, v1, v2
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
||||
; CHECK-NEXT: v_add_i32_e32 v1, vcc, v1, v8
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
|
||||
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v8
|
||||
; CHECK-NEXT: v_mul_hi_u32 v3, v7, v3
|
||||
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v1, v0
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
|
||||
; CHECK-NEXT: v_add_i32_e32 v1, vcc, v2, v1
|
||||
; CHECK-NEXT: v_add_i32_e32 v1, vcc, v3, v1
|
||||
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v5, v0
|
||||
; CHECK-NEXT: v_addc_u32_e32 v1, vcc, v7, v1, vcc
|
||||
; CHECK-NEXT: v_mul_lo_u32 v2, v9, v0
|
||||
; CHECK-NEXT: v_mul_lo_u32 v3, v4, v1
|
||||
; CHECK-NEXT: v_mul_hi_u32 v7, v4, v0
|
||||
; CHECK-NEXT: v_mul_hi_u32 v0, v9, v0
|
||||
; CHECK-NEXT: v_mov_b32_e32 v5, 0x1000
|
||||
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v3
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
||||
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v7
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
||||
; CHECK-NEXT: v_mul_lo_u32 v7, v9, v1
|
||||
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v3, v2
|
||||
; CHECK-NEXT: v_mul_hi_u32 v3, v4, v1
|
||||
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v7, v0
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
|
||||
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v3
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
||||
; CHECK-NEXT: v_add_i32_e32 v3, vcc, v7, v3
|
||||
; CHECK-NEXT: v_add_i32_e32 v7, vcc, v0, v2
|
||||
; CHECK-NEXT: v_mul_hi_u32 v8, v9, v1
|
||||
; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v5, v7, 0
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
||||
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v3, v2
|
||||
; CHECK-NEXT: v_add_i32_e32 v3, vcc, v8, v2
|
||||
; CHECK-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v5, v3, v[1:2]
|
||||
; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v4, v0
|
||||
; CHECK-NEXT: v_subb_u32_e64 v2, s[4:5], v9, v1, vcc
|
||||
; CHECK-NEXT: v_sub_i32_e64 v1, s[4:5], v9, v1
|
||||
; CHECK-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
|
||||
; CHECK-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v5
|
||||
; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v5
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[4:5]
|
||||
; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v2
|
||||
; CHECK-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v2, -1, v4, s[4:5]
|
||||
; CHECK-NEXT: v_add_i32_e32 v4, vcc, 1, v7
|
||||
; CHECK-NEXT: v_addc_u32_e32 v8, vcc, 0, v3, vcc
|
||||
; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v5
|
||||
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
||||
; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
|
||||
; CHECK-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
|
||||
; CHECK-NEXT: v_add_i32_e32 v1, vcc, 1, v4
|
||||
; CHECK-NEXT: v_addc_u32_e32 v5, vcc, 0, v8, vcc
|
||||
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
|
||||
; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
|
||||
; CHECK-NEXT: v_cndmask_b32_e32 v1, v8, v5, vcc
|
||||
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
|
||||
; CHECK-NEXT: v_cndmask_b32_e32 v0, v7, v0, vcc
|
||||
; CHECK-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
|
||||
; CHECK-NEXT: v_xor_b32_e32 v0, v0, v6
|
||||
; CHECK-NEXT: v_xor_b32_e32 v1, v1, v6
|
||||
; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v6
|
||||
; CHECK-NEXT: v_subb_u32_e32 v1, vcc, v1, v6, vcc
|
||||
; CHECK-NEXT: v_ashrrev_i32_e32 v2, 31, v1
|
||||
; CHECK-NEXT: v_lshrrev_b32_e32 v2, 20, v2
|
||||
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v2
|
||||
; CHECK-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
||||
; CHECK-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%result = sdiv i64 %num, 4096
|
||||
ret i64 %result
|
||||
@@ -1128,473 +1013,31 @@ define <2 x i64> @v_sdiv_v2i64_pow2k_denom(<2 x i64> %num) {
|
||||
; GISEL-LABEL: v_sdiv_v2i64_pow2k_denom:
|
||||
; GISEL: ; %bb.0:
|
||||
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GISEL-NEXT: v_cvt_f32_u32_e32 v4, 0x1000
|
||||
; GISEL-NEXT: v_cvt_f32_ubyte0_e32 v5, 0
|
||||
; GISEL-NEXT: s_sub_u32 s6, 0, 0x1000
|
||||
; GISEL-NEXT: s_subb_u32 s7, 0, 0
|
||||
; GISEL-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
|
||||
; GISEL-NEXT: v_rcp_iflag_f32_e32 v4, v4
|
||||
; GISEL-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
|
||||
; GISEL-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
|
||||
; GISEL-NEXT: v_trunc_f32_e32 v7, v5
|
||||
; GISEL-NEXT: v_mac_f32_e32 v4, 0xcf800000, v7
|
||||
; GISEL-NEXT: v_cvt_u32_f32_e32 v6, v4
|
||||
; GISEL-NEXT: v_cvt_u32_f32_e32 v7, v7
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], s6, v6, 0
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s6, v7, v[5:6]
|
||||
; GISEL-NEXT: v_mul_lo_u32 v5, v7, v4
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s7, v6, v[8:9]
|
||||
; GISEL-NEXT: v_mul_hi_u32 v9, v6, v4
|
||||
; GISEL-NEXT: v_mul_hi_u32 v4, v7, v4
|
||||
; GISEL-NEXT: v_mul_lo_u32 v10, v6, v8
|
||||
; GISEL-NEXT: v_mul_lo_u32 v11, v7, v8
|
||||
; GISEL-NEXT: v_mul_hi_u32 v12, v6, v8
|
||||
; GISEL-NEXT: v_mul_hi_u32 v8, v7, v8
|
||||
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v10
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v10, v5
|
||||
; GISEL-NEXT: v_add_i32_e32 v4, vcc, v11, v4
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v12
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v9, vcc, v9, v10
|
||||
; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v5
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v9, v5
|
||||
; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
|
||||
; GISEL-NEXT: v_add_i32_e32 v11, vcc, v6, v4
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s6, v11, 0
|
||||
; GISEL-NEXT: v_addc_u32_e32 v5, vcc, v7, v5, vcc
|
||||
; GISEL-NEXT: v_mov_b32_e32 v4, v9
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[9:10], s[4:5], s6, v5, v[4:5]
|
||||
; GISEL-NEXT: v_ashrrev_i32_e32 v4, 31, v1
|
||||
; GISEL-NEXT: v_lshrrev_b32_e32 v4, 20, v4
|
||||
; GISEL-NEXT: v_ashrrev_i32_e32 v5, 31, v3
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v4
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[9:10], s[4:5], s7, v11, v[9:10]
|
||||
; GISEL-NEXT: v_addc_u32_e32 v1, vcc, v1, v4, vcc
|
||||
; GISEL-NEXT: v_xor_b32_e32 v10, v0, v4
|
||||
; GISEL-NEXT: v_mul_lo_u32 v0, v5, v8
|
||||
; GISEL-NEXT: v_mul_lo_u32 v12, v11, v9
|
||||
; GISEL-NEXT: v_xor_b32_e32 v13, v1, v4
|
||||
; GISEL-NEXT: v_mul_hi_u32 v1, v11, v8
|
||||
; GISEL-NEXT: v_mul_hi_u32 v8, v5, v8
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v12
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v1
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
||||
; GISEL-NEXT: v_mul_lo_u32 v1, v5, v9
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v12, v0
|
||||
; GISEL-NEXT: v_mul_hi_u32 v12, v11, v9
|
||||
; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v8
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v12
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v12
|
||||
; GISEL-NEXT: v_mul_hi_u32 v9, v5, v9
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v1, v0
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v1, vcc, v8, v1
|
||||
; GISEL-NEXT: v_add_i32_e32 v1, vcc, v9, v1
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v11, v0
|
||||
; GISEL-NEXT: v_addc_u32_e32 v1, vcc, v5, v1, vcc
|
||||
; GISEL-NEXT: v_mul_lo_u32 v8, v13, v0
|
||||
; GISEL-NEXT: v_mul_lo_u32 v9, v10, v1
|
||||
; GISEL-NEXT: v_mul_hi_u32 v11, v10, v0
|
||||
; GISEL-NEXT: v_mul_hi_u32 v0, v13, v0
|
||||
; GISEL-NEXT: v_mov_b32_e32 v5, 0x1000
|
||||
; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v9
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v11
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
|
||||
; GISEL-NEXT: v_mul_lo_u32 v11, v13, v1
|
||||
; GISEL-NEXT: v_add_i32_e32 v8, vcc, v9, v8
|
||||
; GISEL-NEXT: v_mul_hi_u32 v9, v10, v1
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v11, v0
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v9
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v9, vcc, v11, v9
|
||||
; GISEL-NEXT: v_add_i32_e32 v11, vcc, v0, v8
|
||||
; GISEL-NEXT: v_mul_hi_u32 v12, v13, v1
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v5, v11, 0
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v8, vcc, v9, v8
|
||||
; GISEL-NEXT: v_add_i32_e32 v12, vcc, v12, v8
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v5, v12, v[1:2]
|
||||
; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v10, v0
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], 0, v11, v[8:9]
|
||||
; GISEL-NEXT: s_sub_u32 s6, 0, 0x1000
|
||||
; GISEL-NEXT: s_subb_u32 s7, 0, 0
|
||||
; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], v13, v8, vcc
|
||||
; GISEL-NEXT: v_sub_i32_e64 v8, s[4:5], v13, v8
|
||||
; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v5
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[4:5]
|
||||
; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1
|
||||
; GISEL-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v8, vcc
|
||||
; GISEL-NEXT: v_sub_i32_e32 v8, vcc, v0, v5
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v10, -1, v9, s[4:5]
|
||||
; GISEL-NEXT: v_subbrev_u32_e32 v9, vcc, 0, v1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v13, vcc, 1, v11
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], s6, v6, 0
|
||||
; GISEL-NEXT: v_addc_u32_e32 v14, vcc, 0, v12, vcc
|
||||
; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v8, v5
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc
|
||||
; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9
|
||||
; GISEL-NEXT: v_cndmask_b32_e32 v15, -1, v8, vcc
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s6, v7, v[1:2]
|
||||
; GISEL-NEXT: v_add_i32_e32 v1, vcc, 1, v13
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s7, v6, v[8:9]
|
||||
; GISEL-NEXT: v_addc_u32_e32 v16, vcc, 0, v14, vcc
|
||||
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15
|
||||
; GISEL-NEXT: v_cndmask_b32_e32 v9, v13, v1, vcc
|
||||
; GISEL-NEXT: v_mul_lo_u32 v1, v7, v0
|
||||
; GISEL-NEXT: v_mul_lo_u32 v13, v6, v8
|
||||
; GISEL-NEXT: v_mul_hi_u32 v15, v6, v0
|
||||
; GISEL-NEXT: v_cndmask_b32_e32 v14, v14, v16, vcc
|
||||
; GISEL-NEXT: v_mul_hi_u32 v0, v7, v0
|
||||
; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v13
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v15
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
|
||||
; GISEL-NEXT: v_mul_lo_u32 v15, v7, v8
|
||||
; GISEL-NEXT: v_add_i32_e32 v1, vcc, v13, v1
|
||||
; GISEL-NEXT: v_mul_hi_u32 v13, v6, v8
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v15, v0
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v13
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v13, vcc, v15, v13
|
||||
; GISEL-NEXT: v_mul_hi_u32 v8, v7, v8
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v1
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v1, vcc, v13, v1
|
||||
; GISEL-NEXT: v_add_i32_e32 v1, vcc, v8, v1
|
||||
; GISEL-NEXT: v_add_i32_e32 v8, vcc, v6, v0
|
||||
; GISEL-NEXT: v_addc_u32_e32 v13, vcc, v7, v1, vcc
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], s6, v8, 0
|
||||
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
|
||||
; GISEL-NEXT: v_cndmask_b32_e32 v9, v11, v9, vcc
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], s6, v13, v[1:2]
|
||||
; GISEL-NEXT: v_xor_b32_e32 v1, v9, v4
|
||||
; GISEL-NEXT: v_ashrrev_i32_e32 v9, 31, v3
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], s7, v8, v[6:7]
|
||||
; GISEL-NEXT: v_cndmask_b32_e32 v10, v12, v14, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v9
|
||||
; GISEL-NEXT: v_addc_u32_e32 v3, vcc, v3, v9, vcc
|
||||
; GISEL-NEXT: v_xor_b32_e32 v11, v2, v9
|
||||
; GISEL-NEXT: v_mul_lo_u32 v2, v13, v0
|
||||
; GISEL-NEXT: v_mul_lo_u32 v7, v8, v6
|
||||
; GISEL-NEXT: v_xor_b32_e32 v12, v3, v9
|
||||
; GISEL-NEXT: v_mul_hi_u32 v3, v8, v0
|
||||
; GISEL-NEXT: v_mul_hi_u32 v0, v13, v0
|
||||
; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v7
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v3
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
||||
; GISEL-NEXT: v_mul_lo_u32 v3, v13, v6
|
||||
; GISEL-NEXT: v_add_i32_e32 v2, vcc, v7, v2
|
||||
; GISEL-NEXT: v_mul_hi_u32 v7, v8, v6
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v3, v0
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v7
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v3, vcc, v3, v7
|
||||
; GISEL-NEXT: v_mul_hi_u32 v6, v13, v6
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v2
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v2, vcc, v3, v2
|
||||
; GISEL-NEXT: v_add_i32_e32 v2, vcc, v6, v2
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
|
||||
; GISEL-NEXT: v_addc_u32_e32 v2, vcc, v13, v2, vcc
|
||||
; GISEL-NEXT: v_mul_lo_u32 v3, v12, v0
|
||||
; GISEL-NEXT: v_mul_lo_u32 v6, v11, v2
|
||||
; GISEL-NEXT: v_mul_hi_u32 v7, v11, v0
|
||||
; GISEL-NEXT: v_mul_hi_u32 v0, v12, v0
|
||||
; GISEL-NEXT: v_xor_b32_e32 v8, v10, v4
|
||||
; GISEL-NEXT: v_add_i32_e32 v3, vcc, v3, v6
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v3, vcc, v3, v7
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
||||
; GISEL-NEXT: v_mul_lo_u32 v7, v12, v2
|
||||
; GISEL-NEXT: v_add_i32_e32 v3, vcc, v6, v3
|
||||
; GISEL-NEXT: v_mul_hi_u32 v6, v11, v2
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v7, v0
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v6
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v6, vcc, v7, v6
|
||||
; GISEL-NEXT: v_add_i32_e32 v10, vcc, v0, v3
|
||||
; GISEL-NEXT: v_mul_hi_u32 v7, v12, v2
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v5, v10, 0
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v6, v0
|
||||
; GISEL-NEXT: v_add_i32_e32 v13, vcc, v7, v0
|
||||
; GISEL-NEXT: v_mov_b32_e32 v0, v3
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v5, v13, v[0:1]
|
||||
; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v1, v4
|
||||
; GISEL-NEXT: v_subb_u32_e32 v1, vcc, v8, v4, vcc
|
||||
; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[4:5], 0, v10, v[6:7]
|
||||
; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v11, v2
|
||||
; GISEL-NEXT: v_subb_u32_e64 v4, s[4:5], v12, v3, vcc
|
||||
; GISEL-NEXT: v_sub_i32_e64 v3, s[4:5], v12, v3
|
||||
; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
|
||||
; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v2, v5
|
||||
; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v2, v5
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[4:5]
|
||||
; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v4
|
||||
; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v4, -1, v6, s[4:5]
|
||||
; GISEL-NEXT: v_add_i32_e32 v6, vcc, 1, v10
|
||||
; GISEL-NEXT: v_addc_u32_e32 v7, vcc, 0, v13, vcc
|
||||
; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v2, v5
|
||||
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
||||
; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
|
||||
; GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v3, vcc, 1, v6
|
||||
; GISEL-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc
|
||||
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
|
||||
; GISEL-NEXT: v_cndmask_b32_e32 v2, v6, v3, vcc
|
||||
; GISEL-NEXT: v_cndmask_b32_e32 v3, v7, v5, vcc
|
||||
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
|
||||
; GISEL-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
|
||||
; GISEL-NEXT: v_cndmask_b32_e32 v3, v13, v3, vcc
|
||||
; GISEL-NEXT: v_xor_b32_e32 v2, v2, v9
|
||||
; GISEL-NEXT: v_xor_b32_e32 v3, v3, v9
|
||||
; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v2, v9
|
||||
; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v3, v9, vcc
|
||||
; GISEL-NEXT: v_lshrrev_b32_e32 v5, 20, v5
|
||||
; GISEL-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
||||
; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v5
|
||||
; GISEL-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
|
||||
; GISEL-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
|
||||
; GISEL-NEXT: v_ashr_i64 v[2:3], v[2:3], 12
|
||||
; GISEL-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; CGP-LABEL: v_sdiv_v2i64_pow2k_denom:
|
||||
; CGP: ; %bb.0:
|
||||
; CGP-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CGP-NEXT: v_cvt_f32_u32_e32 v4, 0x1000
|
||||
; CGP-NEXT: v_cvt_f32_ubyte0_e32 v5, 0
|
||||
; CGP-NEXT: v_mov_b32_e32 v6, 0xfffff000
|
||||
; CGP-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
|
||||
; CGP-NEXT: v_rcp_iflag_f32_e32 v4, v4
|
||||
; CGP-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
|
||||
; CGP-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
|
||||
; CGP-NEXT: v_trunc_f32_e32 v7, v5
|
||||
; CGP-NEXT: v_mac_f32_e32 v4, 0xcf800000, v7
|
||||
; CGP-NEXT: v_cvt_u32_f32_e32 v8, v4
|
||||
; CGP-NEXT: v_cvt_u32_f32_e32 v9, v7
|
||||
; CGP-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v6, v8, 0
|
||||
; CGP-NEXT: v_mov_b32_e32 v7, v5
|
||||
; CGP-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v6, v9, v[7:8]
|
||||
; CGP-NEXT: v_mul_hi_u32 v12, v9, v4
|
||||
; CGP-NEXT: v_mad_u64_u32 v[13:14], s[4:5], -1, v8, v[10:11]
|
||||
; CGP-NEXT: v_mul_lo_u32 v10, v9, v4
|
||||
; CGP-NEXT: v_mul_hi_u32 v11, v8, v4
|
||||
; CGP-NEXT: v_mul_lo_u32 v4, v8, v13
|
||||
; CGP-NEXT: v_mul_lo_u32 v7, v9, v13
|
||||
; CGP-NEXT: v_mul_hi_u32 v14, v8, v13
|
||||
; CGP-NEXT: v_mul_hi_u32 v13, v9, v13
|
||||
; CGP-NEXT: v_add_i32_e32 v4, vcc, v10, v4
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v4, vcc, v4, v11
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v4, vcc, v15, v4
|
||||
; CGP-NEXT: v_add_i32_e32 v7, vcc, v7, v12
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v7, vcc, v7, v14
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v14, vcc, v15, v14
|
||||
; CGP-NEXT: v_add_i32_e32 v4, vcc, v7, v4
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v7, vcc, v14, v7
|
||||
; CGP-NEXT: v_add_i32_e32 v7, vcc, v13, v7
|
||||
; CGP-NEXT: v_add_i32_e32 v16, vcc, v8, v4
|
||||
; CGP-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v6, v16, 0
|
||||
; CGP-NEXT: v_addc_u32_e32 v17, vcc, v9, v7, vcc
|
||||
; CGP-NEXT: v_mov_b32_e32 v4, v14
|
||||
; CGP-NEXT: v_mad_u64_u32 v[14:15], s[4:5], v6, v17, v[4:5]
|
||||
; CGP-NEXT: v_ashrrev_i32_e32 v7, 31, v1
|
||||
; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v7
|
||||
; CGP-NEXT: v_mad_u64_u32 v[14:15], s[4:5], -1, v16, v[14:15]
|
||||
; CGP-NEXT: v_addc_u32_e32 v1, vcc, v1, v7, vcc
|
||||
; CGP-NEXT: v_xor_b32_e32 v15, v0, v7
|
||||
; CGP-NEXT: v_mul_lo_u32 v0, v17, v13
|
||||
; CGP-NEXT: v_mul_lo_u32 v4, v16, v14
|
||||
; CGP-NEXT: v_xor_b32_e32 v18, v1, v7
|
||||
; CGP-NEXT: v_mul_hi_u32 v1, v16, v13
|
||||
; CGP-NEXT: v_mul_hi_u32 v13, v17, v13
|
||||
; CGP-NEXT: v_ashrrev_i32_e32 v4, 31, v1
|
||||
; CGP-NEXT: v_lshrrev_b32_e32 v4, 20, v4
|
||||
; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v4
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v1
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
||||
; CGP-NEXT: v_mul_lo_u32 v1, v17, v14
|
||||
; CGP-NEXT: v_add_i32_e32 v0, vcc, v4, v0
|
||||
; CGP-NEXT: v_mul_hi_u32 v4, v16, v14
|
||||
; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v13
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v4
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v4, vcc, v13, v4
|
||||
; CGP-NEXT: v_mul_hi_u32 v13, v17, v14
|
||||
; CGP-NEXT: v_add_i32_e32 v0, vcc, v1, v0
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v1, vcc, v4, v1
|
||||
; CGP-NEXT: v_add_i32_e32 v1, vcc, v13, v1
|
||||
; CGP-NEXT: v_add_i32_e32 v0, vcc, v16, v0
|
||||
; CGP-NEXT: v_addc_u32_e32 v1, vcc, v17, v1, vcc
|
||||
; CGP-NEXT: v_mul_lo_u32 v13, v18, v0
|
||||
; CGP-NEXT: v_mul_lo_u32 v14, v15, v1
|
||||
; CGP-NEXT: v_mul_hi_u32 v16, v15, v0
|
||||
; CGP-NEXT: v_mul_hi_u32 v0, v18, v0
|
||||
; CGP-NEXT: v_mov_b32_e32 v4, 0x1000
|
||||
; CGP-NEXT: v_add_i32_e32 v13, vcc, v13, v14
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v13, vcc, v13, v16
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
|
||||
; CGP-NEXT: v_mul_lo_u32 v16, v18, v1
|
||||
; CGP-NEXT: v_add_i32_e32 v13, vcc, v14, v13
|
||||
; CGP-NEXT: v_mul_hi_u32 v14, v15, v1
|
||||
; CGP-NEXT: v_add_i32_e32 v0, vcc, v16, v0
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v14
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v14, vcc, v16, v14
|
||||
; CGP-NEXT: v_add_i32_e32 v16, vcc, v0, v13
|
||||
; CGP-NEXT: v_mul_hi_u32 v17, v18, v1
|
||||
; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v4, v16, 0
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v13, vcc, v14, v13
|
||||
; CGP-NEXT: v_add_i32_e32 v17, vcc, v17, v13
|
||||
; CGP-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v4, v17, v[1:2]
|
||||
; CGP-NEXT: v_sub_i32_e32 v0, vcc, v15, v0
|
||||
; CGP-NEXT: v_subb_u32_e64 v1, s[4:5], v18, v13, vcc
|
||||
; CGP-NEXT: v_sub_i32_e64 v13, s[4:5], v18, v13
|
||||
; CGP-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v4
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[4:5]
|
||||
; CGP-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1
|
||||
; CGP-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v13, vcc
|
||||
; CGP-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
|
||||
; CGP-NEXT: v_subbrev_u32_e32 v13, vcc, 0, v1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v15, vcc, 1, v16
|
||||
; CGP-NEXT: v_addc_u32_e32 v18, vcc, 0, v17, vcc
|
||||
; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v4
|
||||
; CGP-NEXT: v_mov_b32_e32 v0, v5
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v14, -1, v14, s[4:5]
|
||||
; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v9, v[0:1]
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v19, 0, -1, vcc
|
||||
; CGP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v13
|
||||
; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], -1, v8, v[0:1]
|
||||
; CGP-NEXT: v_cndmask_b32_e32 v5, -1, v19, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v1, vcc, 1, v15
|
||||
; CGP-NEXT: v_mul_lo_u32 v19, v8, v0
|
||||
; CGP-NEXT: v_addc_u32_e32 v13, vcc, 0, v18, vcc
|
||||
; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
|
||||
; CGP-NEXT: v_cndmask_b32_e32 v5, v15, v1, vcc
|
||||
; CGP-NEXT: v_cndmask_b32_e32 v13, v18, v13, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v1, vcc, v10, v19
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v11
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
|
||||
; CGP-NEXT: v_mul_lo_u32 v11, v9, v0
|
||||
; CGP-NEXT: v_add_i32_e32 v1, vcc, v10, v1
|
||||
; CGP-NEXT: v_mul_hi_u32 v10, v8, v0
|
||||
; CGP-NEXT: v_add_i32_e32 v11, vcc, v11, v12
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v10, vcc, v11, v10
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v11, vcc, v12, v11
|
||||
; CGP-NEXT: v_mul_hi_u32 v0, v9, v0
|
||||
; CGP-NEXT: v_add_i32_e32 v1, vcc, v10, v1
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v10, vcc, v11, v10
|
||||
; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v10
|
||||
; CGP-NEXT: v_add_i32_e32 v8, vcc, v8, v1
|
||||
; CGP-NEXT: v_addc_u32_e32 v9, vcc, v9, v0, vcc
|
||||
; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v8, 0
|
||||
; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
|
||||
; CGP-NEXT: v_cndmask_b32_e32 v5, v16, v5, vcc
|
||||
; CGP-NEXT: v_xor_b32_e32 v11, v5, v7
|
||||
; CGP-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v6, v9, v[1:2]
|
||||
; CGP-NEXT: v_cndmask_b32_e32 v10, v17, v13, vcc
|
||||
; CGP-NEXT: v_xor_b32_e32 v1, v10, v7
|
||||
; CGP-NEXT: v_mad_u64_u32 v[5:6], s[4:5], -1, v8, v[5:6]
|
||||
; CGP-NEXT: v_ashrrev_i32_e32 v10, 31, v3
|
||||
; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v10
|
||||
; CGP-NEXT: v_addc_u32_e32 v3, vcc, v3, v10, vcc
|
||||
; CGP-NEXT: v_xor_b32_e32 v12, v2, v10
|
||||
; CGP-NEXT: v_mul_lo_u32 v2, v9, v0
|
||||
; CGP-NEXT: v_mul_lo_u32 v6, v8, v5
|
||||
; CGP-NEXT: v_xor_b32_e32 v13, v3, v10
|
||||
; CGP-NEXT: v_mul_hi_u32 v3, v8, v0
|
||||
; CGP-NEXT: v_mul_hi_u32 v0, v9, v0
|
||||
; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v6
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v3
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
||||
; CGP-NEXT: v_mul_lo_u32 v3, v9, v5
|
||||
; CGP-NEXT: v_add_i32_e32 v2, vcc, v6, v2
|
||||
; CGP-NEXT: v_mul_hi_u32 v6, v8, v5
|
||||
; CGP-NEXT: v_add_i32_e32 v0, vcc, v3, v0
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v6
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v3, vcc, v3, v6
|
||||
; CGP-NEXT: v_mul_hi_u32 v5, v9, v5
|
||||
; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v2
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v2, vcc, v3, v2
|
||||
; CGP-NEXT: v_add_i32_e32 v2, vcc, v5, v2
|
||||
; CGP-NEXT: v_add_i32_e32 v3, vcc, v8, v0
|
||||
; CGP-NEXT: v_addc_u32_e32 v2, vcc, v9, v2, vcc
|
||||
; CGP-NEXT: v_mul_lo_u32 v5, v13, v3
|
||||
; CGP-NEXT: v_mul_lo_u32 v6, v12, v2
|
||||
; CGP-NEXT: v_sub_i32_e32 v0, vcc, v11, v7
|
||||
; CGP-NEXT: v_subb_u32_e32 v1, vcc, v1, v7, vcc
|
||||
; CGP-NEXT: v_mul_hi_u32 v7, v12, v3
|
||||
; CGP-NEXT: v_add_i32_e32 v5, vcc, v5, v6
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v5, vcc, v5, v7
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
||||
; CGP-NEXT: v_mul_lo_u32 v7, v13, v2
|
||||
; CGP-NEXT: v_mul_hi_u32 v3, v13, v3
|
||||
; CGP-NEXT: v_add_i32_e32 v5, vcc, v6, v5
|
||||
; CGP-NEXT: v_mul_hi_u32 v6, v12, v2
|
||||
; CGP-NEXT: v_add_i32_e32 v3, vcc, v7, v3
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v3, vcc, v3, v6
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v6, vcc, v7, v6
|
||||
; CGP-NEXT: v_add_i32_e32 v7, vcc, v3, v5
|
||||
; CGP-NEXT: v_mul_hi_u32 v8, v13, v2
|
||||
; CGP-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v4, v7, 0
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v5, vcc, v6, v5
|
||||
; CGP-NEXT: v_add_i32_e32 v8, vcc, v8, v5
|
||||
; CGP-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v4, v8, v[3:4]
|
||||
; CGP-NEXT: v_sub_i32_e32 v2, vcc, v12, v2
|
||||
; CGP-NEXT: v_subb_u32_e64 v3, s[4:5], v13, v5, vcc
|
||||
; CGP-NEXT: v_sub_i32_e64 v5, s[4:5], v13, v5
|
||||
; CGP-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
|
||||
; CGP-NEXT: v_cmp_ge_u32_e64 s[4:5], v2, v4
|
||||
; CGP-NEXT: v_sub_i32_e32 v2, vcc, v2, v4
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[4:5]
|
||||
; CGP-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v3
|
||||
; CGP-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v3, -1, v6, s[4:5]
|
||||
; CGP-NEXT: v_add_i32_e32 v6, vcc, 1, v7
|
||||
; CGP-NEXT: v_addc_u32_e32 v9, vcc, 0, v8, vcc
|
||||
; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v4
|
||||
; CGP-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
||||
; CGP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
|
||||
; CGP-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc
|
||||
; CGP-NEXT: v_add_i32_e32 v4, vcc, 1, v6
|
||||
; CGP-NEXT: v_addc_u32_e32 v5, vcc, 0, v9, vcc
|
||||
; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
|
||||
; CGP-NEXT: v_cndmask_b32_e32 v2, v6, v4, vcc
|
||||
; CGP-NEXT: v_cndmask_b32_e32 v4, v9, v5, vcc
|
||||
; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
|
||||
; CGP-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
|
||||
; CGP-NEXT: v_cndmask_b32_e32 v3, v8, v4, vcc
|
||||
; CGP-NEXT: v_xor_b32_e32 v2, v2, v10
|
||||
; CGP-NEXT: v_xor_b32_e32 v3, v3, v10
|
||||
; CGP-NEXT: v_sub_i32_e32 v2, vcc, v2, v10
|
||||
; CGP-NEXT: v_subb_u32_e32 v3, vcc, v3, v10, vcc
|
||||
; CGP-NEXT: v_ashrrev_i32_e32 v4, 31, v3
|
||||
; CGP-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
||||
; CGP-NEXT: v_lshrrev_b32_e32 v4, 20, v4
|
||||
; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v4
|
||||
; CGP-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
|
||||
; CGP-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
|
||||
; CGP-NEXT: v_ashr_i64 v[2:3], v[2:3], 12
|
||||
; CGP-NEXT: s_setpc_b64 s[30:31]
|
||||
%result = sdiv <2 x i64> %num, <i64 4096, i64 4096>
|
||||
ret <2 x i64> %result
|
||||
@@ -3398,3 +2841,13 @@ define <2 x i64> @v_sdiv_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
|
||||
%result = sdiv <2 x i64> %num.mask, %den.mask
|
||||
ret <2 x i64> %result
|
||||
}
|
||||
|
||||
define i64 @v_sdiv_i64_exact(i64 %num) {
|
||||
; CHECK-LABEL: v_sdiv_i64_exact:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%result = sdiv exact i64 %num, 4096
|
||||
ret i64 %result
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user