craig.topper created this revision.
craig.topper added reviewers: asb, VincentWu, kito-cheng, reames.
Herald added subscribers: jobnoorman, luke, vkmr, frasercrmck, luismarques, 
apazos, sameer.abuasal, s.egerton, Jim, benna, psnobl, jocewei, PkmX, the_o, 
brucehoult, MartinMosbeck, rogfer01, edward-jones, zzheng, jrtc27, shiva0217, 
niosHD, sabuasal, simoncook, johnrusso, rbar, hiraditya, arichardson.
Herald added a project: All.
craig.topper requested review of this revision.
Herald added subscribers: wangpc, eopXD, MaskRay.
Herald added projects: clang, LLVM.

Previously we returned i32 on RV32 and i64 on RV64. The instructions
only consume 32 bits and only produce 32 bits. For RV64, the result
is sign extended to 64 bits like *W instructions.

This patch removes this detail from the interface to improve
portability and consistency. This matches the proposal for scalar
intrinsics here https://github.com/riscv-non-isa/riscv-c-api-doc/pull/44

I've included IR autoupgrade support as well.

I'll be doing this for other builtins/intrinsics that currently use
'long' in other patches.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D154647

Files:
  clang/include/clang/Basic/BuiltinsRISCV.def
  clang/lib/CodeGen/CGBuiltin.cpp
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksed.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksh.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksed.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksh.c
  llvm/include/llvm/IR/IntrinsicsRISCV.td
  llvm/lib/IR/AutoUpgrade.cpp
  llvm/lib/Target/RISCV/RISCVISelLowering.cpp
  llvm/lib/Target/RISCV/RISCVISelLowering.h
  llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
  llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll
  llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll
  llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll
  llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll
  llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll
  llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll
  llvm/test/CodeGen/RISCV/sextw-removal.ll

Index: llvm/test/CodeGen/RISCV/sextw-removal.ll
===================================================================
--- llvm/test/CodeGen/RISCV/sextw-removal.ll
+++ llvm/test/CodeGen/RISCV/sextw-removal.ll
@@ -1319,13 +1319,11 @@
 ; NOREMOVAL-NEXT:    addi sp, sp, 32
 ; NOREMOVAL-NEXT:    ret
 bb:
-  %sext = sext i32 %arg1 to i64
-  %i = call i64 @llvm.riscv.sha256sig0.i64(i64 %sext)
-  %trunc = trunc i64 %i to i32
+  %i = call i32 @llvm.riscv.sha256sig0(i32 %arg1)
   br label %bb2
 
 bb2:                                              ; preds = %bb2, %bb
-  %i3 = phi i32 [ %trunc, %bb ], [ %i5, %bb2 ]
+  %i3 = phi i32 [ %i, %bb ], [ %i5, %bb2 ]
   %i4 = tail call signext i32 @bar(i32 signext %i3)
   %i5 = shl i32 %i3, %arg1
   %i6 = icmp eq i32 %i4, 0
@@ -1334,7 +1332,7 @@
 bb7:                                              ; preds = %bb2
   ret void
 }
-declare i64 @llvm.riscv.sha256sig0.i64(i64)
+declare i32 @llvm.riscv.sha256sig0(i32)
 
 ; The type promotion of %7 forms a sext_inreg, but %7 and %6 are combined to
 ; form a sh2add. This leaves behind a sext.w that isn't needed.
Index: llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll
===================================================================
--- llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll
+++ llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll
@@ -2,24 +2,24 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+zksh -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64ZKSH
 
-declare i64 @llvm.riscv.sm3p0.i64(i64);
+declare i32 @llvm.riscv.sm3p0(i32);
 
-define i64 @sm3p0_i64(i64 %a) nounwind {
-; RV64ZKSH-LABEL: sm3p0_i64:
+define signext i32 @sm3p0_i32(i32 signext %a) nounwind {
+; RV64ZKSH-LABEL: sm3p0_i32:
 ; RV64ZKSH:       # %bb.0:
 ; RV64ZKSH-NEXT:    sm3p0 a0, a0
 ; RV64ZKSH-NEXT:    ret
-  %val = call i64 @llvm.riscv.sm3p0.i64(i64 %a)
-  ret i64 %val
+  %val = call i32 @llvm.riscv.sm3p0(i32 signext %a)
+  ret i32 %val
 }
 
-declare i64 @llvm.riscv.sm3p1.i64(i64);
+declare i32 @llvm.riscv.sm3p1(i32);
 
-define i64 @sm3p1_i64(i64 %a) nounwind {
-; RV64ZKSH-LABEL: sm3p1_i64:
+define signext i32 @sm3p1_i32(i32 signext %a) nounwind {
+; RV64ZKSH-LABEL: sm3p1_i32:
 ; RV64ZKSH:       # %bb.0:
 ; RV64ZKSH-NEXT:    sm3p1 a0, a0
 ; RV64ZKSH-NEXT:    ret
-  %val = call i64 @llvm.riscv.sm3p1.i64(i64 %a)
-  ret i64 %val
+  %val = call i32 @llvm.riscv.sm3p1(i32 signext %a)
+  ret i32 %val
 }
Index: llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll
===================================================================
--- llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll
+++ llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll
@@ -2,24 +2,24 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+zksed -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64ZKSED
 
-declare i64 @llvm.riscv.sm4ks.i64(i64, i64, i32);
+declare i32 @llvm.riscv.sm4ks(i32, i32, i32);
 
-define i64 @sm4ks_i64(i64 %a, i64 %b) nounwind {
-; RV64ZKSED-LABEL: sm4ks_i64:
+define signext i32 @sm4ks_i32(i32 signext %a, i32 signext %b) nounwind {
+; RV64ZKSED-LABEL: sm4ks_i32:
 ; RV64ZKSED:       # %bb.0:
-; RV64ZKSED-NEXT:    sm4ks a0, a0, a1, 0
+; RV64ZKSED-NEXT:    sm4ks a0, a0, a1, 2
 ; RV64ZKSED-NEXT:    ret
-  %val = call i64 @llvm.riscv.sm4ks.i64(i64 %a, i64 %b, i32 0)
-  ret i64 %val
+  %val = call i32 @llvm.riscv.sm4ks(i32 %a, i32 %b, i32 2)
+  ret i32 %val
 }
 
-declare i64 @llvm.riscv.sm4ed.i64(i64, i64, i32);
+declare i32 @llvm.riscv.sm4ed(i32, i32, i32);
 
-define i64 @sm4ed_i64(i64 %a, i64 %b) nounwind {
-; RV64ZKSED-LABEL: sm4ed_i64:
+define signext i32 @sm4ed_i32(i32 signext %a, i32 signext %b) nounwind {
+; RV64ZKSED-LABEL: sm4ed_i32:
 ; RV64ZKSED:       # %bb.0:
-; RV64ZKSED-NEXT:    sm4ed a0, a0, a1, 1
+; RV64ZKSED-NEXT:    sm4ed a0, a0, a1, 3
 ; RV64ZKSED-NEXT:    ret
-  %val = call i64 @llvm.riscv.sm4ed.i64(i64 %a, i64 %b, i32 1)
-  ret i64 %val
+  %val = call i32 @llvm.riscv.sm4ed(i32 %a, i32 %b, i32 3)
+  ret i32 %val
 }
Index: llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll
===================================================================
--- llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll
+++ llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll
@@ -3,48 +3,48 @@
 ; RUN:   | FileCheck %s -check-prefix=RV64ZKNH
 
 
-declare i64 @llvm.riscv.sha256sig0.i64(i64);
+declare i32 @llvm.riscv.sha256sig0(i32);
 
-define i64 @sha256sig0_i64(i64 %a) nounwind {
-; RV64ZKNH-LABEL: sha256sig0_i64:
+define signext i32 @sha256sig0_i32(i32 signext %a) nounwind {
+; RV64ZKNH-LABEL: sha256sig0_i32:
 ; RV64ZKNH:       # %bb.0:
 ; RV64ZKNH-NEXT:    sha256sig0 a0, a0
 ; RV64ZKNH-NEXT:    ret
-    %val = call i64 @llvm.riscv.sha256sig0.i64(i64 %a)
-    ret i64 %val
+    %val = call i32 @llvm.riscv.sha256sig0(i32 signext %a)
+    ret i32 %val
 }
 
-declare i64 @llvm.riscv.sha256sig1.i64(i64);
+declare i32 @llvm.riscv.sha256sig1(i32);
 
-define i64 @sha256sig1_i64(i64 %a) nounwind {
-; RV64ZKNH-LABEL: sha256sig1_i64:
+define signext i32 @sha256sig1_i32(i32 signext %a) nounwind {
+; RV64ZKNH-LABEL: sha256sig1_i32:
 ; RV64ZKNH:       # %bb.0:
 ; RV64ZKNH-NEXT:    sha256sig1 a0, a0
 ; RV64ZKNH-NEXT:    ret
-    %val = call i64 @llvm.riscv.sha256sig1.i64(i64 %a)
-    ret i64 %val
+    %val = call i32 @llvm.riscv.sha256sig1(i32 signext %a)
+    ret i32 %val
 }
 
-declare i64 @llvm.riscv.sha256sum0.i64(i64);
+declare i32 @llvm.riscv.sha256sum0(i32);
 
-define i64 @sha256sum0_i64(i64 %a) nounwind {
-; RV64ZKNH-LABEL: sha256sum0_i64:
+define signext i32 @sha256sum0_i32(i32 signext %a) nounwind {
+; RV64ZKNH-LABEL: sha256sum0_i32:
 ; RV64ZKNH:       # %bb.0:
 ; RV64ZKNH-NEXT:    sha256sum0 a0, a0
 ; RV64ZKNH-NEXT:    ret
-    %val = call i64 @llvm.riscv.sha256sum0.i64(i64 %a)
-    ret i64 %val
+    %val = call i32 @llvm.riscv.sha256sum0(i32 signext %a)
+    ret i32 %val
 }
 
-declare i64 @llvm.riscv.sha256sum1.i64(i64);
+declare i32 @llvm.riscv.sha256sum1(i32);
 
-define i64 @sha256sum1_i64(i64 %a) nounwind {
-; RV64ZKNH-LABEL: sha256sum1_i64:
+define signext i32 @sha256sum1_i32(i32 signext %a) nounwind {
+; RV64ZKNH-LABEL: sha256sum1_i32:
 ; RV64ZKNH:       # %bb.0:
 ; RV64ZKNH-NEXT:    sha256sum1 a0, a0
 ; RV64ZKNH-NEXT:    ret
-    %val = call i64 @llvm.riscv.sha256sum1.i64(i64 %a)
-    ret i64 %val
+    %val = call i32 @llvm.riscv.sha256sum1(i32 signext %a)
+    ret i32 %val
 }
 
 declare i64 @llvm.riscv.sha512sig0(i64);
Index: llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll
===================================================================
--- llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll
+++ llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll
@@ -2,24 +2,24 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+zksh -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32ZKSH
 
-declare i32 @llvm.riscv.sm3p0.i32(i32);
+declare i32 @llvm.riscv.sm3p0(i32);
 
 define i32 @sm3p0_i32(i32 %a) nounwind {
 ; RV32ZKSH-LABEL: sm3p0_i32:
 ; RV32ZKSH:       # %bb.0:
 ; RV32ZKSH-NEXT:    sm3p0 a0, a0
 ; RV32ZKSH-NEXT:    ret
-  %val = call i32 @llvm.riscv.sm3p0.i32(i32 %a)
+  %val = call i32 @llvm.riscv.sm3p0(i32 %a)
   ret i32 %val
 }
 
-declare i32 @llvm.riscv.sm3p1.i32(i32);
+declare i32 @llvm.riscv.sm3p1(i32);
 
 define i32 @sm3p1_i32(i32 %a) nounwind {
 ; RV32ZKSH-LABEL: sm3p1_i32:
 ; RV32ZKSH:       # %bb.0:
 ; RV32ZKSH-NEXT:    sm3p1 a0, a0
 ; RV32ZKSH-NEXT:    ret
-  %val = call i32 @llvm.riscv.sm3p1.i32(i32 %a)
+  %val = call i32 @llvm.riscv.sm3p1(i32 %a)
   ret i32 %val
 }
Index: llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll
===================================================================
--- llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll
+++ llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll
@@ -2,24 +2,24 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+zksed -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32ZKSED
 
-declare i32 @llvm.riscv.sm4ks.i32(i32, i32, i32);
+declare i32 @llvm.riscv.sm4ks(i32, i32, i32);
 
 define i32 @sm4ks_i32(i32 %a, i32 %b) nounwind {
 ; RV32ZKSED-LABEL: sm4ks_i32:
 ; RV32ZKSED:       # %bb.0:
 ; RV32ZKSED-NEXT:    sm4ks a0, a0, a1, 2
 ; RV32ZKSED-NEXT:    ret
-  %val = call i32 @llvm.riscv.sm4ks.i32(i32 %a, i32 %b, i32 2)
+  %val = call i32 @llvm.riscv.sm4ks(i32 %a, i32 %b, i32 2)
   ret i32 %val
 }
 
-declare i32 @llvm.riscv.sm4ed.i32(i32, i32, i32);
+declare i32 @llvm.riscv.sm4ed(i32, i32, i32);
 
 define i32 @sm4ed_i32(i32 %a, i32 %b) nounwind {
 ; RV32ZKSED-LABEL: sm4ed_i32:
 ; RV32ZKSED:       # %bb.0:
 ; RV32ZKSED-NEXT:    sm4ed a0, a0, a1, 3
 ; RV32ZKSED-NEXT:    ret
-  %val = call i32 @llvm.riscv.sm4ed.i32(i32 %a, i32 %b, i32 3)
+  %val = call i32 @llvm.riscv.sm4ed(i32 %a, i32 %b, i32 3)
   ret i32 %val
 }
Index: llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll
===================================================================
--- llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll
+++ llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll
@@ -3,47 +3,47 @@
 ; RUN:   | FileCheck %s -check-prefix=RV32ZKNH
 
 
-declare i32 @llvm.riscv.sha256sig0.i32(i32);
+declare i32 @llvm.riscv.sha256sig0(i32);
 
 define i32 @sha256sig0_i32(i32 %a) nounwind {
 ; RV32ZKNH-LABEL: sha256sig0_i32:
 ; RV32ZKNH:       # %bb.0:
 ; RV32ZKNH-NEXT:    sha256sig0 a0, a0
 ; RV32ZKNH-NEXT:    ret
-    %val = call i32 @llvm.riscv.sha256sig0.i32(i32 %a)
+    %val = call i32 @llvm.riscv.sha256sig0(i32 %a)
     ret i32 %val
 }
 
-declare i32 @llvm.riscv.sha256sig1.i32(i32);
+declare i32 @llvm.riscv.sha256sig1(i32);
 
 define i32 @sha256sig1_i32(i32 %a) nounwind {
 ; RV32ZKNH-LABEL: sha256sig1_i32:
 ; RV32ZKNH:       # %bb.0:
 ; RV32ZKNH-NEXT:    sha256sig1 a0, a0
 ; RV32ZKNH-NEXT:    ret
-    %val = call i32 @llvm.riscv.sha256sig1.i32(i32 %a)
+    %val = call i32 @llvm.riscv.sha256sig1(i32 %a)
     ret i32 %val
 }
 
-declare i32 @llvm.riscv.sha256sum0.i32(i32);
+declare i32 @llvm.riscv.sha256sum0(i32);
 
 define i32 @sha256sum0_i32(i32 %a) nounwind {
 ; RV32ZKNH-LABEL: sha256sum0_i32:
 ; RV32ZKNH:       # %bb.0:
 ; RV32ZKNH-NEXT:    sha256sum0 a0, a0
 ; RV32ZKNH-NEXT:    ret
-    %val = call i32 @llvm.riscv.sha256sum0.i32(i32 %a)
+    %val = call i32 @llvm.riscv.sha256sum0(i32 %a)
     ret i32 %val
 }
 
-declare i32 @llvm.riscv.sha256sum1.i32(i32);
+declare i32 @llvm.riscv.sha256sum1(i32);
 
 define i32 @sha256sum1_i32(i32 %a) nounwind {
 ; RV32ZKNH-LABEL: sha256sum1_i32:
 ; RV32ZKNH:       # %bb.0:
 ; RV32ZKNH-NEXT:    sha256sum1 a0, a0
 ; RV32ZKNH-NEXT:    ret
-    %val = call i32 @llvm.riscv.sha256sum1.i32(i32 %a)
+    %val = call i32 @llvm.riscv.sha256sum1(i32 %a)
     ret i32 %val
 }
 
Index: llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
===================================================================
--- llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
+++ llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
@@ -15,6 +15,21 @@
 // Operand and SDNode transformation definitions.
 //===----------------------------------------------------------------------===//
 
+def riscv_sha256sig0 : SDNode<"RISCVISD::SHA256SIG0", SDTIntUnaryOp>;
+def riscv_sha256sig1 : SDNode<"RISCVISD::SHA256SIG1", SDTIntUnaryOp>;
+def riscv_sha256sum0 : SDNode<"RISCVISD::SHA256SUM0", SDTIntUnaryOp>;
+def riscv_sha256sum1 : SDNode<"RISCVISD::SHA256SUM1", SDTIntUnaryOp>;
+
+def SDT_RISCVZkByteSelect : SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>,
+                                                 SDTCisVT<1, XLenVT>,
+                                                 SDTCisVT<2, XLenVT>,
+                                                 SDTCisVT<3, i32>]>;
+def riscv_sm4ks : SDNode<"RISCVISD::SM4KS", SDT_RISCVZkByteSelect>;
+def riscv_sm4ed : SDNode<"RISCVISD::SM4ED", SDT_RISCVZkByteSelect>;
+
+def riscv_sm3p0 : SDNode<"RISCVISD::SM3P0", SDTIntUnaryOp>;
+def riscv_sm3p1 : SDNode<"RISCVISD::SM3P1", SDTIntUnaryOp>;
+
 def RnumArg : AsmOperandClass {
   let Name = "RnumArg";
   let RenderMethod = "addImmOperands";
@@ -119,12 +134,12 @@
 def SHA512SUM1 : RVKUnary<0b000100000101, 0b001, "sha512sum1">;
 } // Predicates = [HasStdExtZknh, IsRV64]
 
-let Predicates = [HasStdExtZksed] in {
+let Predicates = [HasStdExtZksed], IsSignExtendingOpW = 1 in {
 def SM4ED : RVKByteSelect<0b11000, "sm4ed">;
 def SM4KS : RVKByteSelect<0b11010, "sm4ks">;
 } // Predicates = [HasStdExtZksed]
 
-let Predicates = [HasStdExtZksh] in {
+let Predicates = [HasStdExtZksh], IsSignExtendingOpW = 1 in {
 def SM3P0 : RVKUnary<0b000100001000, 0b001, "sm3p0">;
 def SM3P1 : RVKUnary<0b000100001001, 0b001, "sm3p1">;
 } // Predicates = [HasStdExtZksh]
@@ -168,10 +183,10 @@
 
 // Zknh
 let Predicates = [HasStdExtZknh] in {
-def : PatGpr<int_riscv_sha256sig0, SHA256SIG0>;
-def : PatGpr<int_riscv_sha256sig1, SHA256SIG1>;
-def : PatGpr<int_riscv_sha256sum0, SHA256SUM0>;
-def : PatGpr<int_riscv_sha256sum1, SHA256SUM1>;
+def : PatGpr<riscv_sha256sig0, SHA256SIG0>;
+def : PatGpr<riscv_sha256sig1, SHA256SIG1>;
+def : PatGpr<riscv_sha256sum0, SHA256SUM0>;
+def : PatGpr<riscv_sha256sum1, SHA256SUM1>;
 } // Predicates = [HasStdExtZknh]
 
 let Predicates = [HasStdExtZknh, IsRV32] in {
@@ -192,12 +207,12 @@
 
 // Zksed
 let Predicates = [HasStdExtZksed] in {
-def : PatGprGprByteSelect<int_riscv_sm4ks, SM4KS>;
-def : PatGprGprByteSelect<int_riscv_sm4ed, SM4ED>;
+def : PatGprGprByteSelect<riscv_sm4ks, SM4KS>;
+def : PatGprGprByteSelect<riscv_sm4ed, SM4ED>;
 } // Predicates = [HasStdExtZksed]
 
 // Zksh
 let Predicates = [HasStdExtZksh] in {
-def : PatGpr<int_riscv_sm3p0, SM3P0>;
-def : PatGpr<int_riscv_sm3p1, SM3P1>;
+def : PatGpr<riscv_sm3p0, SM3P0>;
+def : PatGpr<riscv_sm3p1, SM3P1>;
 } // Predicates = [HasStdExtZksh]
Index: llvm/lib/Target/RISCV/RISCVISelLowering.h
===================================================================
--- llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -128,6 +128,12 @@
   ORC_B,
   ZIP,
   UNZIP,
+
+  // Scalar crypto.
+  SHA256SIG0, SHA256SIG1, SHA256SUM0, SHA256SUM1,
+  SM4KS, SM4ED,
+  SM3P0, SM3P1,
+
   // Vector Extension
   // VMV_V_V_VL matches the semantics of vmv.v.v but includes an extra operand
   // for the VL value to be used for the operation. The first operand is
Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp
===================================================================
--- llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -6877,11 +6877,34 @@
     return DAG.getRegister(RISCV::X4, PtrVT);
   }
   case Intrinsic::riscv_orc_b:
-  case Intrinsic::riscv_brev8: {
-    unsigned Opc =
-        IntNo == Intrinsic::riscv_brev8 ? RISCVISD::BREV8 : RISCVISD::ORC_B;
+  case Intrinsic::riscv_brev8:
+  case Intrinsic::riscv_sha256sig0:
+  case Intrinsic::riscv_sha256sig1:
+  case Intrinsic::riscv_sha256sum0:
+  case Intrinsic::riscv_sha256sum1:
+  case Intrinsic::riscv_sm3p0:
+  case Intrinsic::riscv_sm3p1: {
+    unsigned Opc;
+    switch (IntNo) {
+    case Intrinsic::riscv_orc_b:      Opc = RISCVISD::ORC_B;      break;
+    case Intrinsic::riscv_brev8:      Opc = RISCVISD::BREV8;      break;
+    case Intrinsic::riscv_sha256sig0: Opc = RISCVISD::SHA256SIG0; break;
+    case Intrinsic::riscv_sha256sig1: Opc = RISCVISD::SHA256SIG1; break;
+    case Intrinsic::riscv_sha256sum0: Opc = RISCVISD::SHA256SUM0; break;
+    case Intrinsic::riscv_sha256sum1: Opc = RISCVISD::SHA256SUM1; break;
+    case Intrinsic::riscv_sm3p0:      Opc = RISCVISD::SM3P0;      break;
+    case Intrinsic::riscv_sm3p1:      Opc = RISCVISD::SM3P1;      break;
+    }
+
     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1));
   }
+  case Intrinsic::riscv_sm4ks:
+  case Intrinsic::riscv_sm4ed: {
+    unsigned Opc =
+        IntNo == Intrinsic::riscv_sm4ks ? RISCVISD::SM4KS : RISCVISD::SM4ED;
+    return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2),
+                       Op.getOperand(3));
+  }
   case Intrinsic::riscv_zip:
   case Intrinsic::riscv_unzip: {
     unsigned Opc =
@@ -9749,10 +9772,40 @@
       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
       return;
     }
-    case Intrinsic::riscv_orc_b: {
+    case Intrinsic::riscv_orc_b:
+    case Intrinsic::riscv_sha256sig0:
+    case Intrinsic::riscv_sha256sig1:
+    case Intrinsic::riscv_sha256sum0:
+    case Intrinsic::riscv_sha256sum1:
+    case Intrinsic::riscv_sm3p0:
+    case Intrinsic::riscv_sm3p1: {
+      unsigned Opc;
+      switch (IntNo) {
+      case Intrinsic::riscv_orc_b:      Opc = RISCVISD::ORC_B;      break;
+      case Intrinsic::riscv_sha256sig0: Opc = RISCVISD::SHA256SIG0; break;
+      case Intrinsic::riscv_sha256sig1: Opc = RISCVISD::SHA256SIG1; break;
+      case Intrinsic::riscv_sha256sum0: Opc = RISCVISD::SHA256SUM0; break;
+      case Intrinsic::riscv_sha256sum1: Opc = RISCVISD::SHA256SUM1; break;
+      case Intrinsic::riscv_sm3p0:      Opc = RISCVISD::SM3P0;      break;
+      case Intrinsic::riscv_sm3p1:      Opc = RISCVISD::SM3P1;      break;
+      }
+
       SDValue NewOp =
           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
-      SDValue Res = DAG.getNode(RISCVISD::ORC_B, DL, MVT::i64, NewOp);
+      SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp);
+      Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
+      return;
+    }
+    case Intrinsic::riscv_sm4ks:
+    case Intrinsic::riscv_sm4ed: {
+      unsigned Opc =
+          IntNo == Intrinsic::riscv_sm4ks ? RISCVISD::SM4KS : RISCVISD::SM4ED;
+      SDValue NewOp0 =
+          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
+      SDValue NewOp1 =
+          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
+      SDValue Res =
+          DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, N->getOperand(3));
       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
       return;
     }
@@ -15645,6 +15698,14 @@
   NODE_NAME_CASE(ORC_B)
   NODE_NAME_CASE(ZIP)
   NODE_NAME_CASE(UNZIP)
+  NODE_NAME_CASE(SHA256SIG0)
+  NODE_NAME_CASE(SHA256SIG1)
+  NODE_NAME_CASE(SHA256SUM0)
+  NODE_NAME_CASE(SHA256SUM1)
+  NODE_NAME_CASE(SM4KS)
+  NODE_NAME_CASE(SM4ED)
+  NODE_NAME_CASE(SM3P0)
+  NODE_NAME_CASE(SM3P1)
   NODE_NAME_CASE(TH_LWD)
   NODE_NAME_CASE(TH_LWUD)
   NODE_NAME_CASE(TH_LDD)
Index: llvm/lib/IR/AutoUpgrade.cpp
===================================================================
--- llvm/lib/IR/AutoUpgrade.cpp
+++ llvm/lib/IR/AutoUpgrade.cpp
@@ -1235,17 +1235,57 @@
       return true;
     }
     if (Name.startswith("riscv.sm4ks") &&
-        !F->getFunctionType()->getParamType(2)->isIntegerTy(32)) {
+        (!F->getFunctionType()->getParamType(2)->isIntegerTy(32) ||
+         F->getFunctionType()->getReturnType()->isIntegerTy(64))) {
       rename(F);
-      NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::riscv_sm4ks,
-                                        F->getReturnType());
+      NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::riscv_sm4ks);
       return true;
     }
     if (Name.startswith("riscv.sm4ed") &&
-        !F->getFunctionType()->getParamType(2)->isIntegerTy(32)) {
+        (!F->getFunctionType()->getParamType(2)->isIntegerTy(32) ||
+         F->getFunctionType()->getReturnType()->isIntegerTy(64))) {
+      rename(F);
+      NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::riscv_sm4ed);
+      return true;
+    }
+    if (Name.startswith("riscv.sha256sig0") &&
+        F->getFunctionType()->getReturnType()->isIntegerTy(64)) {
+      rename(F);
+      NewFn = Intrinsic::getDeclaration(F->getParent(),
+                                        Intrinsic::riscv_sha256sig0);
+      return true;
+    }
+    if (Name.startswith("riscv.sha256sig1") &&
+        F->getFunctionType()->getReturnType()->isIntegerTy(64)) {
+      rename(F);
+      NewFn = Intrinsic::getDeclaration(F->getParent(),
+                                        Intrinsic::riscv_sha256sig1);
+      return true;
+    }
+    if (Name.startswith("riscv.sha256sum0") &&
+        F->getFunctionType()->getReturnType()->isIntegerTy(64)) {
+      rename(F);
+      NewFn = Intrinsic::getDeclaration(F->getParent(),
+                                        Intrinsic::riscv_sha256sum0);
+      return true;
+    }
+    if (Name.startswith("riscv.sha256sum1") &&
+        F->getFunctionType()->getReturnType()->isIntegerTy(64)) {
+      rename(F);
+      NewFn = Intrinsic::getDeclaration(F->getParent(),
+                                        Intrinsic::riscv_sha256sum1);
+      return true;
+    }
+    if (Name.startswith("riscv.sm3p0") &&
+        F->getFunctionType()->getReturnType()->isIntegerTy(64)) {
       rename(F);
-      NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::riscv_sm4ed,
-                                        F->getReturnType());
+      NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::riscv_sm3p0);
+      return true;
+    }
+    if (Name.startswith("riscv.sm3p1") &&
+        F->getFunctionType()->getReturnType()->isIntegerTy(64)) {
+      rename(F);
+      NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::riscv_sm3p1);
       return true;
     }
     break;
@@ -4426,15 +4466,51 @@
   case Intrinsic::riscv_sm4ks:
   case Intrinsic::riscv_sm4ed: {
     // The last argument to these intrinsics used to be i8 and changed to i32.
+    // The type overload for sm4ks and sm4ed was removed.
     Value *Arg2 = CI->getArgOperand(2);
-    if (Arg2->getType()->isIntegerTy(32))
+    if (Arg2->getType()->isIntegerTy(32) && !CI->getType()->isIntegerTy(64))
       return;
 
-    Arg2 = ConstantInt::get(Type::getInt32Ty(C), cast<ConstantInt>(Arg2)->getZExtValue());
+    Value *Arg0 = CI->getArgOperand(0);
+    Value *Arg1 = CI->getArgOperand(1);
+    if (CI->getType()->isIntegerTy(64)) {
+      Arg0 = Builder.CreateTrunc(Arg0, Builder.getInt32Ty());
+      Arg1 = Builder.CreateTrunc(Arg1, Builder.getInt32Ty());
+    }
 
-    NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0),
-                                 CI->getArgOperand(1), Arg2});
-    break;
+    Arg2 = ConstantInt::get(Type::getInt32Ty(C),
+                            cast<ConstantInt>(Arg2)->getZExtValue());
+
+    NewCall = Builder.CreateCall(NewFn, {Arg0, Arg1, Arg2});
+    Value *Res = NewCall;
+    if (Res->getType() != CI->getType())
+      Res = Builder.CreateIntCast(NewCall, CI->getType(), /*isSigned*/ true);
+    NewCall->takeName(CI);
+    CI->replaceAllUsesWith(Res);
+    CI->eraseFromParent();
+    return;
+  }
+  case Intrinsic::riscv_sha256sig0:
+  case Intrinsic::riscv_sha256sig1:
+  case Intrinsic::riscv_sha256sum0:
+  case Intrinsic::riscv_sha256sum1:
+  case Intrinsic::riscv_sm3p0:
+  case Intrinsic::riscv_sm3p1: {
+    // The last argument to these intrinsics used to be i8 and changed to i32.
+    // The type overload for sm4ks and sm4ed was removed.
+    if (!CI->getType()->isIntegerTy(64))
+      return;
+
+    Value *Arg =
+        Builder.CreateTrunc(CI->getArgOperand(0), Builder.getInt32Ty());
+
+    NewCall = Builder.CreateCall(NewFn, Arg);
+    Value *Res =
+        Builder.CreateIntCast(NewCall, CI->getType(), /*isSigned*/ true);
+    NewCall->takeName(CI);
+    CI->replaceAllUsesWith(Res);
+    CI->eraseFromParent();
+    return;
   }
 
   case Intrinsic::x86_xop_vfrcz_ss:
Index: llvm/include/llvm/IR/IntrinsicsRISCV.td
===================================================================
--- llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -1581,11 +1581,6 @@
 
 let TargetPrefix = "riscv" in {
 
-class ScalarCryptoGprIntrinsicAny
-    : DefaultAttrsIntrinsic<[llvm_anyint_ty],
-                [LLVMMatchType<0>],
-                [IntrNoMem, IntrSpeculatable]>;
-
 class ScalarCryptoByteSelect32
     : DefaultAttrsIntrinsic<[llvm_i32_ty],
                             [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
@@ -1602,16 +1597,16 @@
                             [llvm_i64_ty, llvm_i64_ty],
                             [IntrNoMem, IntrSpeculatable]>;
 
+class ScalarCryptoGprIntrinsic32
+    : DefaultAttrsIntrinsic<[llvm_i32_ty],
+                            [llvm_i32_ty],
+                            [IntrNoMem, IntrSpeculatable]>;
+
 class ScalarCryptoGprIntrinsic64
     : DefaultAttrsIntrinsic<[llvm_i64_ty],
                             [llvm_i64_ty],
                             [IntrNoMem, IntrSpeculatable]>;
 
-class ScalarCryptoByteSelectAny
-    : DefaultAttrsIntrinsic<[llvm_anyint_ty],
-                            [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
-                            [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]>;
-
 // Zknd
 def int_riscv_aes32dsi  : ScalarCryptoByteSelect32,
                           ClangBuiltin<"__builtin_riscv_aes32dsi_32">;
@@ -1647,10 +1642,10 @@
                           ClangBuiltin<"__builtin_riscv_aes64ks1i_64">;
 
 // Zknh
-def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsicAny;
-def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsicAny;
-def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsicAny;
-def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsicAny;
+def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsic32;
+def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsic32;
+def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsic32;
+def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsic32;
 
 def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32,
                             ClangBuiltin<"__builtin_riscv_sha512sig0l_32">;
@@ -1675,12 +1670,12 @@
                            ClangBuiltin<"__builtin_riscv_sha512sum1_64">;
 
 // Zksed
-def int_riscv_sm4ks      : ScalarCryptoByteSelectAny;
-def int_riscv_sm4ed      : ScalarCryptoByteSelectAny;
+def int_riscv_sm4ks      : ScalarCryptoByteSelect32;
+def int_riscv_sm4ed      : ScalarCryptoByteSelect32;
 
 // Zksh
-def int_riscv_sm3p0      : ScalarCryptoGprIntrinsicAny;
-def int_riscv_sm3p1      : ScalarCryptoGprIntrinsicAny;
+def int_riscv_sm3p0      : ScalarCryptoGprIntrinsic32;
+def int_riscv_sm3p1      : ScalarCryptoGprIntrinsic32;
 } // TargetPrefix = "riscv"
 
 //===----------------------------------------------------------------------===//
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksh.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksh.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksh.c
@@ -4,25 +4,25 @@
 
 // RV64ZKSH-LABEL: @sm3p0(
 // RV64ZKSH-NEXT:  entry:
-// RV64ZKSH-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKSH-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKSH-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKSH-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.sm3p0.i64(i64 [[TMP0]])
-// RV64ZKSH-NEXT:    ret i64 [[TMP1]]
+// RV64ZKSH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
+// RV64ZKSH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
+// RV64ZKSH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
+// RV64ZKSH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p0(i32 [[TMP0]])
+// RV64ZKSH-NEXT:    ret i32 [[TMP1]]
 //
-long sm3p0(long rs1) {
+int sm3p0(int rs1) {
   return __builtin_riscv_sm3p0(rs1);
 }
 
 
 // RV64ZKSH-LABEL: @sm3p1(
 // RV64ZKSH-NEXT:  entry:
-// RV64ZKSH-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKSH-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKSH-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKSH-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.sm3p1.i64(i64 [[TMP0]])
-// RV64ZKSH-NEXT:    ret i64 [[TMP1]]
+// RV64ZKSH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
+// RV64ZKSH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
+// RV64ZKSH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
+// RV64ZKSH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p1(i32 [[TMP0]])
+// RV64ZKSH-NEXT:    ret i32 [[TMP1]]
 //
-long sm3p1(long rs1) {
+int sm3p1(int rs1) {
   return __builtin_riscv_sm3p1(rs1);
 }
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksed.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksed.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksed.c
@@ -4,30 +4,30 @@
 
 // RV64ZKSED-LABEL: @sm4ks(
 // RV64ZKSED-NEXT:  entry:
-// RV64ZKSED-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKSED-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKSED-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKSED-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZKSED-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKSED-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKSED-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.sm4ks.i64(i64 [[TMP0]], i64 [[TMP1]], i32 0)
-// RV64ZKSED-NEXT:    ret i64 [[TMP2]]
+// RV64ZKSED-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
+// RV64ZKSED-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
+// RV64ZKSED-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
+// RV64ZKSED-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
+// RV64ZKSED-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
+// RV64ZKSED-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
+// RV64ZKSED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ks(i32 [[TMP0]], i32 [[TMP1]], i32 0)
+// RV64ZKSED-NEXT:    ret i32 [[TMP2]]
 //
-long sm4ks(long rs1, long rs2) {
+int sm4ks(int rs1, int rs2) {
   return __builtin_riscv_sm4ks(rs1, rs2, 0);
 }
 
 // RV64ZKSED-LABEL: @sm4ed(
 // RV64ZKSED-NEXT:  entry:
-// RV64ZKSED-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKSED-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKSED-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKSED-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZKSED-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKSED-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKSED-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.sm4ed.i64(i64 [[TMP0]], i64 [[TMP1]], i32 0)
-// RV64ZKSED-NEXT:    ret i64 [[TMP2]]
+// RV64ZKSED-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
+// RV64ZKSED-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
+// RV64ZKSED-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
+// RV64ZKSED-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
+// RV64ZKSED-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
+// RV64ZKSED-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
+// RV64ZKSED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ed(i32 [[TMP0]], i32 [[TMP1]], i32 0)
+// RV64ZKSED-NEXT:    ret i32 [[TMP2]]
 //
-long sm4ed(long rs1, long rs2) {
+int sm4ed(int rs1, int rs2) {
   return __builtin_riscv_sm4ed(rs1, rs2, 0);
 }
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c
@@ -57,49 +57,49 @@
 
 // RV64ZKNH-LABEL: @sha256sig0(
 // RV64ZKNH-NEXT:  entry:
-// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNH-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.sha256sig0.i64(i64 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i64 [[TMP1]]
+// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
+// RV64ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
+// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
+// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[TMP0]])
+// RV64ZKNH-NEXT:    ret i32 [[TMP1]]
 //
-long sha256sig0(long rs1) {
+int sha256sig0(int rs1) {
   return __builtin_riscv_sha256sig0(rs1);
 }
 
 // RV64ZKNH-LABEL: @sha256sig1(
 // RV64ZKNH-NEXT:  entry:
-// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNH-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.sha256sig1.i64(i64 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i64 [[TMP1]]
+// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
+// RV64ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
+// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
+// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig1(i32 [[TMP0]])
+// RV64ZKNH-NEXT:    ret i32 [[TMP1]]
 //
-long sha256sig1(long rs1) {
+int sha256sig1(int rs1) {
   return __builtin_riscv_sha256sig1(rs1);
 }
 
 
 // RV64ZKNH-LABEL: @sha256sum0(
 // RV64ZKNH-NEXT:  entry:
-// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNH-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.sha256sum0.i64(i64 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i64 [[TMP1]]
+// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
+// RV64ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
+// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
+// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[TMP0]])
+// RV64ZKNH-NEXT:    ret i32 [[TMP1]]
 //
-long sha256sum0(long rs1) {
+int sha256sum0(int rs1) {
   return __builtin_riscv_sha256sum0(rs1);
 }
 
 // RV64ZKNH-LABEL: @sha256sum1(
 // RV64ZKNH-NEXT:  entry:
-// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNH-NEXT:    store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.sha256sum1.i64(i64 [[TMP0]])
-// RV64ZKNH-NEXT:    ret i64 [[TMP1]]
+// RV64ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
+// RV64ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
+// RV64ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
+// RV64ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum1(i32 [[TMP0]])
+// RV64ZKNH-NEXT:    ret i32 [[TMP1]]
 //
-long sha256sum1(long rs1) {
+int sha256sum1(int rs1) {
   return __builtin_riscv_sha256sum1(rs1);
 }
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksh.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksh.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksh.c
@@ -7,10 +7,10 @@
 // RV32ZKSH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKSH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKSH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKSH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p0.i32(i32 [[TMP0]])
+// RV32ZKSH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p0(i32 [[TMP0]])
 // RV32ZKSH-NEXT:    ret i32 [[TMP1]]
 //
-long sm3p0(long rs1)
+int sm3p0(int rs1)
 {
   return __builtin_riscv_sm3p0(rs1);
 }
@@ -20,9 +20,9 @@
 // RV32ZKSH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKSH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKSH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKSH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p1.i32(i32 [[TMP0]])
+// RV32ZKSH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p1(i32 [[TMP0]])
 // RV32ZKSH-NEXT:    ret i32 [[TMP1]]
 //
-long sm3p1(long rs1) {
+int sm3p1(int rs1) {
   return __builtin_riscv_sm3p1(rs1);
 }
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksed.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksed.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksed.c
@@ -10,10 +10,10 @@
 // RV32ZKSED-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZKSED-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZKSED-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKSED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ks.i32(i32 [[TMP0]], i32 [[TMP1]], i32 0)
+// RV32ZKSED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ks(i32 [[TMP0]], i32 [[TMP1]], i32 0)
 // RV32ZKSED-NEXT:    ret i32 [[TMP2]]
 //
-long sm4ks(long rs1, long rs2) {
+int sm4ks(int rs1, int rs2) {
   return __builtin_riscv_sm4ks(rs1, rs2, 0);
 }
 
@@ -26,9 +26,9 @@
 // RV32ZKSED-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZKSED-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZKSED-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKSED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ed.i32(i32 [[TMP0]], i32 [[TMP1]], i32 0)
+// RV32ZKSED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ed(i32 [[TMP0]], i32 [[TMP1]], i32 0)
 // RV32ZKSED-NEXT:    ret i32 [[TMP2]]
 //
-long sm4ed(long rs1, long rs2) {
+int sm4ed(int rs1, int rs2) {
   return __builtin_riscv_sm4ed(rs1, rs2, 0);
 }
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c
@@ -7,10 +7,10 @@
 // RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig0.i32(i32 [[TMP0]])
+// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[TMP0]])
 // RV32ZKNH-NEXT:    ret i32 [[TMP1]]
 //
-long sha256sig0(long rs1) {
+int sha256sig0(int rs1) {
   return __builtin_riscv_sha256sig0(rs1);
 }
 
@@ -19,10 +19,10 @@
 // RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig1.i32(i32 [[TMP0]])
+// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig1(i32 [[TMP0]])
 // RV32ZKNH-NEXT:    ret i32 [[TMP1]]
 //
-long sha256sig1(long rs1) {
+int sha256sig1(int rs1) {
   return __builtin_riscv_sha256sig1(rs1);
 }
 
@@ -31,10 +31,10 @@
 // RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum0.i32(i32 [[TMP0]])
+// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[TMP0]])
 // RV32ZKNH-NEXT:    ret i32 [[TMP1]]
 //
-long sha256sum0(long rs1) {
+int sha256sum0(int rs1) {
   return __builtin_riscv_sha256sum0(rs1);
 }
 
@@ -43,10 +43,10 @@
 // RV32ZKNH-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
 // RV32ZKNH-NEXT:    store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
 // RV32ZKNH-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum1.i32(i32 [[TMP0]])
+// RV32ZKNH-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum1(i32 [[TMP0]])
 // RV32ZKNH-NEXT:    ret i32 [[TMP1]]
 //
-long sha256sum1(long rs1) {
+int sha256sum1(int rs1) {
   return __builtin_riscv_sha256sum1(rs1);
 }
 
Index: clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- clang/lib/CodeGen/CGBuiltin.cpp
+++ clang/lib/CodeGen/CGBuiltin.cpp
@@ -20233,39 +20233,31 @@
   // Zknh
   case RISCV::BI__builtin_riscv_sha256sig0:
     ID = Intrinsic::riscv_sha256sig0;
-    IntrinsicTypes = {ResultType};
     break;
   case RISCV::BI__builtin_riscv_sha256sig1:
     ID = Intrinsic::riscv_sha256sig1;
-    IntrinsicTypes = {ResultType};
     break;
   case RISCV::BI__builtin_riscv_sha256sum0:
     ID = Intrinsic::riscv_sha256sum0;
-    IntrinsicTypes = {ResultType};
     break;
   case RISCV::BI__builtin_riscv_sha256sum1:
     ID = Intrinsic::riscv_sha256sum1;
-    IntrinsicTypes = {ResultType};
     break;
 
   // Zksed
   case RISCV::BI__builtin_riscv_sm4ks:
     ID = Intrinsic::riscv_sm4ks;
-    IntrinsicTypes = {ResultType};
     break;
   case RISCV::BI__builtin_riscv_sm4ed:
     ID = Intrinsic::riscv_sm4ed;
-    IntrinsicTypes = {ResultType};
     break;
 
   // Zksh
   case RISCV::BI__builtin_riscv_sm3p0:
     ID = Intrinsic::riscv_sm3p0;
-    IntrinsicTypes = {ResultType};
     break;
   case RISCV::BI__builtin_riscv_sm3p1:
     ID = Intrinsic::riscv_sm3p1;
-    IntrinsicTypes = {ResultType};
     break;
 
   // Zihintntl
Index: clang/include/clang/Basic/BuiltinsRISCV.def
===================================================================
--- clang/include/clang/Basic/BuiltinsRISCV.def
+++ clang/include/clang/Basic/BuiltinsRISCV.def
@@ -55,10 +55,10 @@
 TARGET_BUILTIN(__builtin_riscv_aes64esm_64, "WiWiWi", "nc", "zkne,64bit")
 
 // Zknh extension
-TARGET_BUILTIN(__builtin_riscv_sha256sig0, "LiLi", "nc", "zknh")
-TARGET_BUILTIN(__builtin_riscv_sha256sig1, "LiLi", "nc", "zknh")
-TARGET_BUILTIN(__builtin_riscv_sha256sum0, "LiLi", "nc", "zknh")
-TARGET_BUILTIN(__builtin_riscv_sha256sum1, "LiLi", "nc", "zknh")
+TARGET_BUILTIN(__builtin_riscv_sha256sig0, "ii", "nc", "zknh")
+TARGET_BUILTIN(__builtin_riscv_sha256sig1, "ii", "nc", "zknh")
+TARGET_BUILTIN(__builtin_riscv_sha256sum0, "ii", "nc", "zknh")
+TARGET_BUILTIN(__builtin_riscv_sha256sum1, "ii", "nc", "zknh")
 
 TARGET_BUILTIN(__builtin_riscv_sha512sig0h_32, "ZiZiZi", "nc", "zknh,32bit")
 TARGET_BUILTIN(__builtin_riscv_sha512sig0l_32, "ZiZiZi", "nc", "zknh,32bit")
@@ -72,12 +72,12 @@
 TARGET_BUILTIN(__builtin_riscv_sha512sum1_64, "WiWi", "nc", "zknh,64bit")
 
 // Zksed extension
-TARGET_BUILTIN(__builtin_riscv_sm4ed, "LiLiLiIUi", "nc", "zksed")
-TARGET_BUILTIN(__builtin_riscv_sm4ks, "LiLiLiIUi", "nc", "zksed")
+TARGET_BUILTIN(__builtin_riscv_sm4ed, "iiiIUi", "nc", "zksed")
+TARGET_BUILTIN(__builtin_riscv_sm4ks, "iiiIUi", "nc", "zksed")
 
 // Zksh extension
-TARGET_BUILTIN(__builtin_riscv_sm3p0, "LiLi", "nc", "zksh")
-TARGET_BUILTIN(__builtin_riscv_sm3p1, "LiLi", "nc", "zksh")
+TARGET_BUILTIN(__builtin_riscv_sm3p0, "ii", "nc", "zksh")
+TARGET_BUILTIN(__builtin_riscv_sm3p1, "ii", "nc", "zksh")
 
 // Zihintntl extension
 TARGET_BUILTIN(__builtin_riscv_ntl_load, "v.", "t", "experimental-zihintntl")
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to