https://github.com/JoshdRod updated 
https://github.com/llvm/llvm-project/pull/171448

>From 7854c9af0229e0da243ae75cc08aa3d65c1bdc8c Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <[email protected]>
Date: Tue, 9 Dec 2025 14:27:56 +0000
Subject: [PATCH 1/5] [GlobalISel][AArch64] Added support for sli intrinsic

sli intrinsic now lowers correctly for all vector types.
---
 llvm/lib/Target/AArch64/AArch64InstrGISel.td           |  7 +++++++
 llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp |  9 ++++++++-
 .../Target/AArch64/GISel/AArch64RegisterBankInfo.cpp   |  2 ++
 llvm/test/CodeGen/AArch64/arm64-vshift.ll              | 10 ----------
 4 files changed, 17 insertions(+), 11 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrGISel.td 
b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
index 75354e4098fb4..3002547eb2d79 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrGISel.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
@@ -270,6 +270,12 @@ def G_URSHR: AArch64GenericInstruction {
   let hasSideEffects = 0;
 }
 
+def G_VSLI: AArch64GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2, type0:$src3);
+  let hasSideEffects = 0;
+}
+
 // Generic instruction for the BSP pseudo. It is expanded into BSP, which
 // expands into BSL/BIT/BIF after register allocation.
 def G_BSP : AArch64GenericInstruction {
@@ -321,6 +327,7 @@ def : GINodeEquiv<G_USDOT, AArch64usdot>;
 def : GINodeEquiv<G_SQSHLU, AArch64sqshlui>;
 def : GINodeEquiv<G_SRSHR, AArch64srshri>;
 def : GINodeEquiv<G_URSHR, AArch64urshri>;
+def : GINodeEquiv<G_VSLI, AArch64vsli>;
 
 def : GINodeEquiv<G_EXTRACT_VECTOR_ELT, vector_extract>;
 
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 8951ccfbd3352..642ddf4bc92c4 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1949,6 +1949,13 @@ bool 
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
       return false;
     }
   }
+  case Intrinsic::aarch64_neon_vsli: {
+    MIB.buildInstr(
+        AArch64::G_VSLI, {MI.getOperand(0)},
+        {MI.getOperand(2), MI.getOperand(3), MI.getOperand(4).getImm()});
+    MI.eraseFromParent();
+    break;
+  }
   case Intrinsic::aarch64_neon_abs: {
     // Lower the intrinsic to G_ABS.
     MIB.buildInstr(TargetOpcode::G_ABS, {MI.getOperand(0)}, 
{MI.getOperand(2)});
@@ -2598,4 +2605,4 @@ bool AArch64LegalizerInfo::legalizeFptrunc(MachineInstr 
&MI,
   MRI.replaceRegWith(Dst, Fin);
   MI.eraseFromParent();
   return true;
-}
\ No newline at end of file
+}
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index 84bc3f1e14a7a..8cd7c73f157e3 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -575,6 +575,7 @@ bool AArch64RegisterBankInfo::onlyUsesFP(const MachineInstr 
&MI,
   case TargetOpcode::G_LROUND:
   case TargetOpcode::G_LLROUND:
   case AArch64::G_PMULL:
+  case AArch64::G_VSLI:
     return true;
   case TargetOpcode::G_INTRINSIC:
     switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
@@ -613,6 +614,7 @@ bool AArch64RegisterBankInfo::onlyDefinesFP(const 
MachineInstr &MI,
   case TargetOpcode::G_INSERT_VECTOR_ELT:
   case TargetOpcode::G_BUILD_VECTOR:
   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
+  case AArch64::G_VSLI:
     return true;
   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
     switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll 
b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index a316a4bc543b5..05ddb4b5a7c64 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -2,16 +2,6 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=0 | FileCheck %s 
--check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=1 -global-isel-abort=2 2>&1 | 
FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:    warning: Instruction selection used fallback path for sli8b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli4h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli2s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli1d
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sli1d_imm0
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for 
sli16b
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli8h
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli4s
-; CHECK-GI NEXT:    warning: Instruction selection used fallback path for sli2d
-
 define <8 x i8> @sqshl8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqshl8b:
 ; CHECK:       // %bb.0:

>From 87f41ca0e8af61c7c51b7fd821fc4b662210a072 Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <[email protected]>
Date: Wed, 10 Dec 2025 09:59:24 +0000
Subject: [PATCH 2/5] [AArch64][GlobalISel] Changed G_VSLI input operand list
 to correctly reflect operand types

---
 llvm/lib/Target/AArch64/AArch64InstrGISel.td | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrGISel.td 
b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
index 3002547eb2d79..0c853ac573e1a 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrGISel.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
@@ -272,7 +272,7 @@ def G_URSHR: AArch64GenericInstruction {
 
 def G_VSLI: AArch64GenericInstruction {
   let OutOperandList = (outs type0:$dst);
-  let InOperandList = (ins type0:$src1, type0:$src2, type0:$src3);
+  let InOperandList = (ins type0:$src1, type0:$src2, type1:$src3);
   let hasSideEffects = 0;
 }
 

>From e4d67f3344579d5be59b461413dcecff5b000267 Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <[email protected]>
Date: Wed, 10 Dec 2025 10:09:48 +0000
Subject: [PATCH 3/5] [AArch64][GlobalISel] Renamed G_VSLI to G_SLI

The name now better reflects the machine code instruction, over the IR 
intrinsic.
---
 llvm/lib/Target/AArch64/AArch64InstrGISel.td              | 4 ++--
 llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp    | 2 +-
 llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp | 4 ++--
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrGISel.td 
b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
index 0c853ac573e1a..eed1ec67d1e86 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrGISel.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
@@ -270,7 +270,7 @@ def G_URSHR: AArch64GenericInstruction {
   let hasSideEffects = 0;
 }
 
-def G_VSLI: AArch64GenericInstruction {
+def G_SLI: AArch64GenericInstruction {
   let OutOperandList = (outs type0:$dst);
   let InOperandList = (ins type0:$src1, type0:$src2, type1:$src3);
   let hasSideEffects = 0;
@@ -327,7 +327,7 @@ def : GINodeEquiv<G_USDOT, AArch64usdot>;
 def : GINodeEquiv<G_SQSHLU, AArch64sqshlui>;
 def : GINodeEquiv<G_SRSHR, AArch64srshri>;
 def : GINodeEquiv<G_URSHR, AArch64urshri>;
-def : GINodeEquiv<G_VSLI, AArch64vsli>;
+def : GINodeEquiv<G_SLI, AArch64vsli>;
 
 def : GINodeEquiv<G_EXTRACT_VECTOR_ELT, vector_extract>;
 
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 642ddf4bc92c4..0327d6d331303 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1951,7 +1951,7 @@ bool 
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
   }
   case Intrinsic::aarch64_neon_vsli: {
     MIB.buildInstr(
-        AArch64::G_VSLI, {MI.getOperand(0)},
+        AArch64::G_SLI, {MI.getOperand(0)},
         {MI.getOperand(2), MI.getOperand(3), MI.getOperand(4).getImm()});
     MI.eraseFromParent();
     break;
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index 8cd7c73f157e3..1511d038e9d2f 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -575,7 +575,7 @@ bool AArch64RegisterBankInfo::onlyUsesFP(const MachineInstr 
&MI,
   case TargetOpcode::G_LROUND:
   case TargetOpcode::G_LLROUND:
   case AArch64::G_PMULL:
-  case AArch64::G_VSLI:
+  case AArch64::G_SLI:
     return true;
   case TargetOpcode::G_INTRINSIC:
     switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
@@ -614,7 +614,7 @@ bool AArch64RegisterBankInfo::onlyDefinesFP(const 
MachineInstr &MI,
   case TargetOpcode::G_INSERT_VECTOR_ELT:
   case TargetOpcode::G_BUILD_VECTOR:
   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
-  case AArch64::G_VSLI:
+  case AArch64::G_SLI:
     return true;
   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
     switch (cast<GIntrinsic>(MI).getIntrinsicID()) {

>From 8ab42c471ee74e7264cca307430d979df9aef100 Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <[email protected]>
Date: Wed, 10 Dec 2025 10:22:44 +0000
Subject: [PATCH 4/5] [AArch64][GlobalISel] Added support for sri intrinsic

---
 llvm/lib/Target/AArch64/AArch64InstrGISel.td              | 8 ++++++++
 llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp    | 7 +++++++
 llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp | 2 ++
 3 files changed, 17 insertions(+)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrGISel.td 
b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
index eed1ec67d1e86..48aee9ce7344b 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrGISel.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
@@ -276,6 +276,12 @@ def G_SLI: AArch64GenericInstruction {
   let hasSideEffects = 0;
 }
 
+def G_SRI: AArch64GenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$src1, type0:$src2, type1:$src3);
+  let hasSideEffects = 0;
+}
+
 // Generic instruction for the BSP pseudo. It is expanded into BSP, which
 // expands into BSL/BIT/BIF after register allocation.
 def G_BSP : AArch64GenericInstruction {
@@ -327,7 +333,9 @@ def : GINodeEquiv<G_USDOT, AArch64usdot>;
 def : GINodeEquiv<G_SQSHLU, AArch64sqshlui>;
 def : GINodeEquiv<G_SRSHR, AArch64srshri>;
 def : GINodeEquiv<G_URSHR, AArch64urshri>;
+
 def : GINodeEquiv<G_SLI, AArch64vsli>;
+def : GINodeEquiv<G_SRI, AArch64vsri>;
 
 def : GINodeEquiv<G_EXTRACT_VECTOR_ELT, vector_extract>;
 
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 0327d6d331303..a430c81134560 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1956,6 +1956,13 @@ bool 
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     MI.eraseFromParent();
     break;
   }
+  case Intrinsic::aarch64_neon_vsri: {
+    MIB.buildInstr(
+        AArch64::G_SRI, {MI.getOperand(0)},
+        {MI.getOperand(2), MI.getOperand(3), MI.getOperand(4).getImm()});
+    MI.eraseFromParent();
+    break;
+  }
   case Intrinsic::aarch64_neon_abs: {
     // Lower the intrinsic to G_ABS.
     MIB.buildInstr(TargetOpcode::G_ABS, {MI.getOperand(0)}, 
{MI.getOperand(2)});
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp 
b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index 1511d038e9d2f..44f8fd8ad6d35 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -576,6 +576,7 @@ bool AArch64RegisterBankInfo::onlyUsesFP(const MachineInstr 
&MI,
   case TargetOpcode::G_LLROUND:
   case AArch64::G_PMULL:
   case AArch64::G_SLI:
+  case AArch64::G_SRI:
     return true;
   case TargetOpcode::G_INTRINSIC:
     switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
@@ -615,6 +616,7 @@ bool AArch64RegisterBankInfo::onlyDefinesFP(const 
MachineInstr &MI,
   case TargetOpcode::G_BUILD_VECTOR:
   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
   case AArch64::G_SLI:
+  case AArch64::G_SRI:
     return true;
   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
     switch (cast<GIntrinsic>(MI).getIntrinsicID()) {

>From 7e964b702c55cb4b37302886de98b87ecdfc50fb Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <[email protected]>
Date: Wed, 10 Dec 2025 11:47:19 +0000
Subject: [PATCH 5/5] [AArch64][GlobalISel] Added test coverage for sri
 intrinsic

Previously, generation of sri intrinsics was tested during the ACLE -> IR 
stage, but not in the IR -> MIR stage. Now, correct generation of sri 
intrinsics is tested in both stages.
---
 llvm/test/CodeGen/AArch64/arm64-vshift.ll | 116 ++++++++++++++++++++--
 1 file changed, 110 insertions(+), 6 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll 
b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index 05ddb4b5a7c64..29c06b8fa228c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -4278,6 +4278,110 @@ declare <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x 
i16>, <8 x i16>, i32) nounw
 declare <4 x i32> @llvm.aarch64.neon.vsli.v4i32(<4 x i32>, <4 x i32>, i32) 
nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64>, <2 x i64>, i32) 
nounwind readnone
 
+define <8 x i8> @sri8b(ptr %A, ptr %B) nounwind {
+; CHECK-LABEL: sri8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sri v0.8b, v1.8b, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = call <8 x i8> @llvm.aarch64.neon.vsri.v8i8(<8 x i8> %tmp1, <8 x i8> 
%tmp2, i32 1)
+  ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @sri4h(ptr %A, ptr %B) nounwind {
+; CHECK-LABEL: sri4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sri v0.4h, v1.4h, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.vsri.v4i16(<4 x i16> %tmp1, <4 x 
i16> %tmp2, i32 1)
+  ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @sri2s(ptr %A, ptr %B) nounwind {
+; CHECK-LABEL: sri2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sri v0.2s, v1.2s, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.vsri.v2i32(<2 x i32> %tmp1, <2 x 
i32> %tmp2, i32 1)
+  ret <2 x i32> %tmp3
+}
+
+define <1 x i64> @sri1d(ptr %A, ptr %B) nounwind {
+; CHECK-LABEL: sri1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sri d0, d1, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <1 x i64>, ptr %A
+  %tmp2 = load <1 x i64>, ptr %B
+  %tmp3 = call <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64> %tmp1, <1 x 
i64> %tmp2, i32 1)
+  ret <1 x i64> %tmp3
+}
+
+define <16 x i8> @sri16b(ptr %A, ptr %B) nounwind {
+; CHECK-LABEL: sri16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sri v0.16b, v1.16b, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
+  %tmp3 = call <16 x i8> @llvm.aarch64.neon.vsri.v16i8(<16 x i8> %tmp1, <16 x 
i8> %tmp2, i32 1)
+  ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @sri8h(ptr %A, ptr %B) nounwind {
+; CHECK-LABEL: sri8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sri v0.8h, v1.8h, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.vsri.v8i16(<8 x i16> %tmp1, <8 x 
i16> %tmp2, i32 1)
+  ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @sri4s(ptr %A, ptr %B) nounwind {
+; CHECK-LABEL: sri4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sri v0.4s, v1.4s, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.vsri.v4i32(<4 x i32> %tmp1, <4 x 
i32> %tmp2, i32 1)
+  ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @sri2d(ptr %A, ptr %B) nounwind {
+; CHECK-LABEL: sri2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sri v0.2d, v1.2d, #1
+; CHECK-NEXT:    ret
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i64>, ptr %B
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.vsri.v2i64(<2 x i64> %tmp1, <2 x 
i64> %tmp2, i32 1)
+  ret <2 x i64> %tmp3
+}
+
 define <1 x i64> @ashr_v1i64(<1 x i64> %a, <1 x i64> %b) {
 ; CHECK-SD-LABEL: ashr_v1i64:
 ; CHECK-SD:       // %bb.0:
@@ -4522,9 +4626,9 @@ define <4 x i16> @lshr_trunc_v4i64_v4i16(<4 x i64> %a) {
 ;
 ; CHECK-GI-LABEL: lshr_trunc_v4i64_v4i16:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    adrp x8, .LCPI270_0
+; CHECK-GI-NEXT:    adrp x8, .LCPI278_0
 ; CHECK-GI-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI270_0]
+; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI278_0]
 ; CHECK-GI-NEXT:    uzp1 v2.4s, v2.4s, v2.4s
 ; CHECK-GI-NEXT:    neg v1.4s, v2.4s
 ; CHECK-GI-NEXT:    ushl v0.4s, v0.4s, v1.4s
@@ -4563,9 +4667,9 @@ define <4 x i16> @ashr_trunc_v4i64_v4i16(<4 x i64> %a) {
 ;
 ; CHECK-GI-LABEL: ashr_trunc_v4i64_v4i16:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    adrp x8, .LCPI272_0
+; CHECK-GI-NEXT:    adrp x8, .LCPI280_0
 ; CHECK-GI-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI272_0]
+; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI280_0]
 ; CHECK-GI-NEXT:    uzp1 v2.4s, v2.4s, v2.4s
 ; CHECK-GI-NEXT:    neg v1.4s, v2.4s
 ; CHECK-GI-NEXT:    sshl v0.4s, v0.4s, v1.4s
@@ -4603,9 +4707,9 @@ define <4 x i16> @shl_trunc_v4i64_v4i16(<4 x i64> %a) {
 ;
 ; CHECK-GI-LABEL: shl_trunc_v4i64_v4i16:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    adrp x8, .LCPI274_0
+; CHECK-GI-NEXT:    adrp x8, .LCPI282_0
 ; CHECK-GI-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI274_0]
+; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI282_0]
 ; CHECK-GI-NEXT:    uzp1 v1.4s, v2.4s, v2.4s
 ; CHECK-GI-NEXT:    xtn v0.4h, v0.4s
 ; CHECK-GI-NEXT:    xtn v1.4h, v1.4s

_______________________________________________
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to