Skip to content

Commit e618a79

Browse files
committed
[RISCV][test] Add (add x, C) -> (sub x, -C) multi-use and vector tests
1 parent 416c782 commit e618a79

File tree

4 files changed

+185
-1
lines changed

4 files changed

+185
-1
lines changed

llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,22 @@ define i64 @add_multiuse(i64 %x) {
5656
; CHECK-NEXT: and a0, a0, a1
5757
; CHECK-NEXT: ret
5858
%add = add i64 %x, -1099511627775
59-
%xor = and i64 %add, -1099511627775
59+
%and = and i64 %add, -1099511627775
60+
ret i64 %and
61+
}
62+
63+
define i64 @add_multiuse_const(i64 %x, i64 %y) {
64+
; CHECK-LABEL: add_multiuse_const:
65+
; CHECK: # %bb.0:
66+
; CHECK-NEXT: li a2, -1
67+
; CHECK-NEXT: slli a2, a2, 40
68+
; CHECK-NEXT: addi a2, a2, 1
69+
; CHECK-NEXT: add a0, a0, a2
70+
; CHECK-NEXT: add a1, a1, a2
71+
; CHECK-NEXT: xor a0, a0, a1
72+
; CHECK-NEXT: ret
73+
%a = add i64 %x, -1099511627775
74+
%b = add i64 %y, -1099511627775
75+
%xor = xor i64 %a, %b
6076
ret i64 %xor
6177
}

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -431,3 +431,61 @@ define void @vadd_vx_v16i64(ptr %a, i64 %b, ptr %c) {
431431
store <16 x i64> %vc, ptr %c
432432
ret void
433433
}
434+
435+
define <2 x i64> @vadd_vx_v2i64_to_sub(<2 x i64> %va) {
436+
; RV32-LABEL: vadd_vx_v2i64_to_sub:
437+
; RV32: # %bb.0:
438+
; RV32-NEXT: addi sp, sp, -16
439+
; RV32-NEXT: .cfi_def_cfa_offset 16
440+
; RV32-NEXT: li a0, -256
441+
; RV32-NEXT: li a1, 1
442+
; RV32-NEXT: sw a1, 8(sp)
443+
; RV32-NEXT: sw a0, 12(sp)
444+
; RV32-NEXT: addi a0, sp, 8
445+
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
446+
; RV32-NEXT: vlse64.v v9, (a0), zero
447+
; RV32-NEXT: vadd.vv v8, v8, v9
448+
; RV32-NEXT: addi sp, sp, 16
449+
; RV32-NEXT: .cfi_def_cfa_offset 0
450+
; RV32-NEXT: ret
451+
;
452+
; RV64-LABEL: vadd_vx_v2i64_to_sub:
453+
; RV64: # %bb.0:
454+
; RV64-NEXT: li a0, -1
455+
; RV64-NEXT: slli a0, a0, 40
456+
; RV64-NEXT: addi a0, a0, 1
457+
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
458+
; RV64-NEXT: vadd.vx v8, v8, a0
459+
; RV64-NEXT: ret
460+
%v = add <2 x i64> splat (i64 -1099511627775), %va
461+
ret <2 x i64> %v
462+
}
463+
464+
define <2 x i64> @vadd_vx_v2i64_to_sub_swapped(<2 x i64> %va) {
465+
; RV32-LABEL: vadd_vx_v2i64_to_sub_swapped:
466+
; RV32: # %bb.0:
467+
; RV32-NEXT: addi sp, sp, -16
468+
; RV32-NEXT: .cfi_def_cfa_offset 16
469+
; RV32-NEXT: li a0, -256
470+
; RV32-NEXT: li a1, 1
471+
; RV32-NEXT: sw a1, 8(sp)
472+
; RV32-NEXT: sw a0, 12(sp)
473+
; RV32-NEXT: addi a0, sp, 8
474+
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
475+
; RV32-NEXT: vlse64.v v9, (a0), zero
476+
; RV32-NEXT: vadd.vv v8, v8, v9
477+
; RV32-NEXT: addi sp, sp, 16
478+
; RV32-NEXT: .cfi_def_cfa_offset 0
479+
; RV32-NEXT: ret
480+
;
481+
; RV64-LABEL: vadd_vx_v2i64_to_sub_swapped:
482+
; RV64: # %bb.0:
483+
; RV64-NEXT: li a0, -1
484+
; RV64-NEXT: slli a0, a0, 40
485+
; RV64-NEXT: addi a0, a0, 1
486+
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
487+
; RV64-NEXT: vadd.vx v8, v8, a0
488+
; RV64-NEXT: ret
489+
%v = add <2 x i64> %va, splat (i64 -1099511627775)
490+
ret <2 x i64> %v
491+
}

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1425,3 +1425,59 @@ define <32 x i64> @vadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
14251425
%v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
14261426
ret <32 x i64> %v
14271427
}
1428+
1429+
define <2 x i64> @vadd_vx_v2i64_to_sub(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) nounwind {
1430+
; RV32-LABEL: vadd_vx_v2i64_to_sub:
1431+
; RV32: # %bb.0:
1432+
; RV32-NEXT: addi sp, sp, -16
1433+
; RV32-NEXT: li a1, -256
1434+
; RV32-NEXT: li a2, 1
1435+
; RV32-NEXT: sw a2, 8(sp)
1436+
; RV32-NEXT: sw a1, 12(sp)
1437+
; RV32-NEXT: addi a1, sp, 8
1438+
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
1439+
; RV32-NEXT: vlse64.v v9, (a1), zero
1440+
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1441+
; RV32-NEXT: vadd.vv v8, v9, v8, v0.t
1442+
; RV32-NEXT: addi sp, sp, 16
1443+
; RV32-NEXT: ret
1444+
;
1445+
; RV64-LABEL: vadd_vx_v2i64_to_sub:
1446+
; RV64: # %bb.0:
1447+
; RV64-NEXT: li a1, -1
1448+
; RV64-NEXT: slli a1, a1, 40
1449+
; RV64-NEXT: addi a1, a1, 1
1450+
; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1451+
; RV64-NEXT: vadd.vx v8, v8, a1, v0.t
1452+
; RV64-NEXT: ret
1453+
%v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> splat (i64 -1099511627775), <2 x i64> %va, <2 x i1> %m, i32 %evl)
1454+
ret <2 x i64> %v
1455+
}
1456+
1457+
define <2 x i64> @vadd_vx_v2i64_to_sub_swapped(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) nounwind {
1458+
; RV32-LABEL: vadd_vx_v2i64_to_sub_swapped:
1459+
; RV32: # %bb.0:
1460+
; RV32-NEXT: addi sp, sp, -16
1461+
; RV32-NEXT: li a1, -256
1462+
; RV32-NEXT: li a2, 1
1463+
; RV32-NEXT: sw a2, 8(sp)
1464+
; RV32-NEXT: sw a1, 12(sp)
1465+
; RV32-NEXT: addi a1, sp, 8
1466+
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
1467+
; RV32-NEXT: vlse64.v v9, (a1), zero
1468+
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1469+
; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
1470+
; RV32-NEXT: addi sp, sp, 16
1471+
; RV32-NEXT: ret
1472+
;
1473+
; RV64-LABEL: vadd_vx_v2i64_to_sub_swapped:
1474+
; RV64: # %bb.0:
1475+
; RV64-NEXT: li a1, -1
1476+
; RV64-NEXT: slli a1, a1, 40
1477+
; RV64-NEXT: addi a1, a1, 1
1478+
; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1479+
; RV64-NEXT: vadd.vx v8, v8, a1, v0.t
1480+
; RV64-NEXT: ret
1481+
%v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1099511627775), <2 x i1> %m, i32 %evl)
1482+
ret <2 x i64> %v
1483+
}

llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -865,3 +865,57 @@ define <vscale x 8 x i32> @vadd_vv_mask_negative1_nxv8i32(<vscale x 8 x i32> %va
865865
%vd = add <vscale x 8 x i32> %vc, %vs
866866
ret <vscale x 8 x i32> %vd
867867
}
868+
869+
define <vscale x 1 x i64> @vadd_vx_imm64_to_sub(<vscale x 1 x i64> %va) nounwind {
870+
; RV32-LABEL: vadd_vx_imm64_to_sub:
871+
; RV32: # %bb.0:
872+
; RV32-NEXT: addi sp, sp, -16
873+
; RV32-NEXT: li a0, -256
874+
; RV32-NEXT: li a1, 1
875+
; RV32-NEXT: sw a1, 8(sp)
876+
; RV32-NEXT: sw a0, 12(sp)
877+
; RV32-NEXT: addi a0, sp, 8
878+
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
879+
; RV32-NEXT: vlse64.v v9, (a0), zero
880+
; RV32-NEXT: vadd.vv v8, v8, v9
881+
; RV32-NEXT: addi sp, sp, 16
882+
; RV32-NEXT: ret
883+
;
884+
; RV64-LABEL: vadd_vx_imm64_to_sub:
885+
; RV64: # %bb.0:
886+
; RV64-NEXT: li a0, -1
887+
; RV64-NEXT: slli a0, a0, 40
888+
; RV64-NEXT: addi a0, a0, 1
889+
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
890+
; RV64-NEXT: vadd.vx v8, v8, a0
891+
; RV64-NEXT: ret
892+
%vc = add <vscale x 1 x i64> splat (i64 -1099511627775), %va
893+
ret <vscale x 1 x i64> %vc
894+
}
895+
896+
define <vscale x 1 x i64> @vadd_vx_imm64_to_sub_swapped(<vscale x 1 x i64> %va) nounwind {
897+
; RV32-LABEL: vadd_vx_imm64_to_sub_swapped:
898+
; RV32: # %bb.0:
899+
; RV32-NEXT: addi sp, sp, -16
900+
; RV32-NEXT: li a0, -256
901+
; RV32-NEXT: li a1, 1
902+
; RV32-NEXT: sw a1, 8(sp)
903+
; RV32-NEXT: sw a0, 12(sp)
904+
; RV32-NEXT: addi a0, sp, 8
905+
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
906+
; RV32-NEXT: vlse64.v v9, (a0), zero
907+
; RV32-NEXT: vadd.vv v8, v8, v9
908+
; RV32-NEXT: addi sp, sp, 16
909+
; RV32-NEXT: ret
910+
;
911+
; RV64-LABEL: vadd_vx_imm64_to_sub_swapped:
912+
; RV64: # %bb.0:
913+
; RV64-NEXT: li a0, -1
914+
; RV64-NEXT: slli a0, a0, 40
915+
; RV64-NEXT: addi a0, a0, 1
916+
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
917+
; RV64-NEXT: vadd.vx v8, v8, a0
918+
; RV64-NEXT: ret
919+
%vc = add <vscale x 1 x i64> %va, splat (i64 -1099511627775)
920+
ret <vscale x 1 x i64> %vc
921+
}

0 commit comments

Comments
 (0)