@@ -1795,10 +1795,13 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
17951795 return size; // Self copy, no move.
17961796
17971797 if (bottom_type()->isa_vect() != nullptr && ideal_reg() == Op_VecX) {
1798+ int src_offset = ra_->reg2offset(src_lo);
1799+ int dst_offset = ra_->reg2offset(dst_lo);
1800+ DEBUG_ONLY(int algm = MIN2(RegMask::num_registers(ideal_reg()), (int)Matcher::stack_alignment_in_slots()) * VMRegImpl::stack_slot_size);
1801+ assert((src_lo_rc != rc_stack) || is_aligned(src_offset, algm), "unaligned vector spill sp offset %d (src)", src_offset);
1802+ assert((dst_lo_rc != rc_stack) || is_aligned(dst_offset, algm), "unaligned vector spill sp offset %d (dst)", dst_offset);
17981803 // Memory->Memory Spill.
17991804 if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1800- int src_offset = ra_->reg2offset(src_lo);
1801- int dst_offset = ra_->reg2offset(dst_lo);
18021805 if (masm) {
18031806 __ ld(R0, src_offset, R1_SP);
18041807 __ std(R0, dst_offset, R1_SP);
@@ -1810,22 +1813,11 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
18101813 // VectorRegister->Memory Spill.
18111814 else if (src_lo_rc == rc_vec && dst_lo_rc == rc_stack) {
18121815 VectorSRegister Rsrc = as_VectorRegister(Matcher::_regEncode[src_lo]).to_vsr();
1813- int dst_offset = ra_->reg2offset(dst_lo);
18141816 if (PowerArchitecturePPC64 >= 9) {
1815- if (is_aligned(dst_offset, 16)) {
1816- if (masm) {
1817- __ stxv(Rsrc, dst_offset, R1_SP); // matches storeV16_Power9
1818- }
1819- size += 4;
1820- } else {
1821- // Other alignment can be used by Vector API (VectorPayload in rearrangeOp,
1822- // observed with VectorRearrangeTest.java on Power9).
1823- if (masm) {
1824- __ addi(R0, R1_SP, dst_offset);
1825- __ stxvx(Rsrc, R0); // matches storeV16_Power9 (regarding element ordering)
1826- }
1827- size += 8;
1817+ if (masm) {
1818+ __ stxv(Rsrc, dst_offset, R1_SP); // matches storeV16_Power9
18281819 }
1820+ size += 4;
18291821 } else {
18301822 if (masm) {
18311823 __ addi(R0, R1_SP, dst_offset);
@@ -1837,20 +1829,11 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
18371829 // Memory->VectorRegister Spill.
18381830 else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vec) {
18391831 VectorSRegister Rdst = as_VectorRegister(Matcher::_regEncode[dst_lo]).to_vsr();
1840- int src_offset = ra_->reg2offset(src_lo);
18411832 if (PowerArchitecturePPC64 >= 9) {
1842- if (is_aligned(src_offset, 16)) {
1843- if (masm) {
1844- __ lxv(Rdst, src_offset, R1_SP);
1845- }
1846- size += 4;
1847- } else {
1848- if (masm) {
1849- __ addi(R0, R1_SP, src_offset);
1850- __ lxvx(Rdst, R0);
1851- }
1852- size += 8;
1833+ if (masm) {
1834+ __ lxv(Rdst, src_offset, R1_SP);
18531835 }
1836+ size += 4;
18541837 } else {
18551838 if (masm) {
18561839 __ addi(R0, R1_SP, src_offset);
0 commit comments