huntergr created this revision.

Initial changes for clang to use the scalable type size queries 
(https://reviews.llvm.org/D53137)

This isn't ready for inclusion yet, more discussion on the mailing list 
required.


https://reviews.llvm.org/D53138

Files:
  lib/CodeGen/CGBuiltin.cpp
  lib/CodeGen/CGExprScalar.cpp
  lib/CodeGen/CGStmt.cpp
  lib/CodeGen/TargetInfo.cpp

Index: lib/CodeGen/TargetInfo.cpp
===================================================================
--- lib/CodeGen/TargetInfo.cpp
+++ lib/CodeGen/TargetInfo.cpp
@@ -890,9 +890,10 @@
 /// IsX86_MMXType - Return true if this is an MMX type.
 bool IsX86_MMXType(llvm::Type *IRType) {
   // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
-  return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
-    cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
-    IRType->getScalarSizeInBits() != 64;
+  auto Size = IRType->getScalableSizeInBits();
+  return IRType->isVectorTy() && Size.Scaled == 0 &&
+    Size.Unscaled == 64 && IRType->getScalarSizeInBits() != 64 &&
+    cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy();
 }
 
 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
Index: lib/CodeGen/CGStmt.cpp
===================================================================
--- lib/CodeGen/CGStmt.cpp
+++ lib/CodeGen/CGStmt.cpp
@@ -1985,7 +1985,7 @@
       // Update largest vector width for any vector types.
       if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
         LargestVectorWidth = std::max(LargestVectorWidth,
-                                      VT->getPrimitiveSizeInBits());
+                                      VT->getScalableSizeInBits().Unscaled);
     } else {
       ArgTypes.push_back(Dest.getAddress().getType());
       Args.push_back(Dest.getPointer());
@@ -2010,7 +2010,7 @@
       // Update largest vector width for any vector types.
       if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
         LargestVectorWidth = std::max(LargestVectorWidth,
-                                      VT->getPrimitiveSizeInBits());
+                                      VT->getScalableSizeInBits().Unscaled);
       if (Info.allowsRegister())
         InOutConstraints += llvm::utostr(i);
       else
@@ -2094,7 +2094,7 @@
     // Update largest vector width for any vector types.
     if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
       LargestVectorWidth = std::max(LargestVectorWidth,
-                                    VT->getPrimitiveSizeInBits());
+                                    VT->getScalableSizeInBits().Unscaled);
 
     ArgTypes.push_back(Arg->getType());
     Args.push_back(Arg);
Index: lib/CodeGen/CGExprScalar.cpp
===================================================================
--- lib/CodeGen/CGExprScalar.cpp
+++ lib/CodeGen/CGExprScalar.cpp
@@ -1079,8 +1079,8 @@
 
   if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
     // Allow bitcast from vector to integer/fp of the same size.
-    unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
-    unsigned DstSize = DstTy->getPrimitiveSizeInBits();
+    auto SrcSize = SrcTy->getScalableSizeInBits();
+    auto DstSize = DstTy->getScalableSizeInBits();
     if (SrcSize == DstSize)
       return Builder.CreateBitCast(Src, DstTy, "conv");
 
Index: lib/CodeGen/CGBuiltin.cpp
===================================================================
--- lib/CodeGen/CGBuiltin.cpp
+++ lib/CodeGen/CGBuiltin.cpp
@@ -4790,8 +4790,8 @@
   for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
        ai != ae; ++ai, ++j) {
     llvm::Type *ArgTy = ai->getType();
-    if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
-             ArgTy->getPrimitiveSizeInBits())
+    if (Ops[j]->getType()->getScalableSizeInBits() ==
+             ArgTy->getScalableSizeInBits())
       continue;
 
     assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
@@ -4805,8 +4805,8 @@
 
   Value *Result = CGF.EmitNeonCall(F, Ops, s);
   llvm::Type *ResultType = CGF.ConvertType(E->getType());
-  if (ResultType->getPrimitiveSizeInBits() <
-      Result->getType()->getPrimitiveSizeInBits())
+  if (ResultType->getScalableSizeInBits() <
+      Result->getType()->getScalableSizeInBits())
     return CGF.Builder.CreateExtractElement(Result, C0);
 
   return CGF.Builder.CreateBitCast(Result, ResultType, s);
@@ -5336,7 +5336,7 @@
   case NEON::BI__builtin_neon_vdot_v:
   case NEON::BI__builtin_neon_vdotq_v: {
     llvm::Type *InputTy =
-        llvm::VectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+        llvm::VectorType::get(Int8Ty, Ty->getScalableSizeInBits().Unscaled / 8);
     llvm::Type *Tys[2] = { Ty, InputTy };
     Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
@@ -6735,7 +6735,7 @@
   case NEON::BI__builtin_neon_vcvts_s32_f32:
   case NEON::BI__builtin_neon_vcvtd_s64_f64: {
     Ops.push_back(EmitScalarExpr(E->getArg(0)));
-    bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
+    bool Is64 = Ops[0]->getType()->getScalableSizeInBits().Unscaled == 64;
     llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
     llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
     Ops[0] = Builder.CreateBitCast(Ops[0], FTy);
@@ -6750,7 +6750,7 @@
   case NEON::BI__builtin_neon_vcvts_f32_s32:
   case NEON::BI__builtin_neon_vcvtd_f64_s64: {
     Ops.push_back(EmitScalarExpr(E->getArg(0)));
-    bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
+    bool Is64 = Ops[0]->getType()->getScalableSizeInBits().Unscaled == 64;
     llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
     llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
     Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
@@ -6769,9 +6769,9 @@
     Ops.push_back(EmitScalarExpr(E->getArg(0)));
     llvm::Type *FTy = HalfTy;
     llvm::Type *InTy;
-    if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
+    if (Ops[0]->getType()->getScalableSizeInBits().Unscaled == 64)
       InTy = Int64Ty;
-    else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
+    else if (Ops[0]->getType()->getScalableSizeInBits().Unscaled == 32)
       InTy = Int32Ty;
     else
       InTy = Int16Ty;
@@ -8879,7 +8879,7 @@
   Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
   Value *Res;
   if (Rnd != 4) {
-    Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ?
+    Intrinsic::ID IID = Ops[0]->getType()->getScalableSizeInBits().Unscaled == 32 ?
                         Intrinsic::x86_avx512_vfmadd_f32 :
                         Intrinsic::x86_avx512_vfmadd_f64;
     Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
@@ -8908,8 +8908,9 @@
                            ArrayRef<Value *> Ops) {
   llvm::Type *Ty = Ops[0]->getType();
   // Arguments have a vXi32 type so cast to vXi64.
+  auto Size = Ty->getScalableSizeInBits();
   Ty = llvm::VectorType::get(CGF.Int64Ty,
-                             Ty->getPrimitiveSizeInBits() / 64);
+                             Size.Unscaled / 64);
   Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
   Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
 
@@ -8937,7 +8938,7 @@
                              ArrayRef<Value *> Ops) {
   llvm::Type *Ty = Ops[0]->getType();
 
-  unsigned VecWidth = Ty->getPrimitiveSizeInBits();
+  unsigned VecWidth = Ty->getScalableSizeInBits().Unscaled;
   unsigned EltWidth = Ty->getScalarSizeInBits();
   Intrinsic::ID IID;
   if (VecWidth == 128 && EltWidth == 32)
@@ -9729,7 +9730,8 @@
     uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
     llvm::Type *Ty = Ops[0]->getType();
     unsigned NumElts = Ty->getVectorNumElements();
-    unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
+    auto Size = Ty->getScalableSizeInBits();
+    unsigned NumLanes = Size.Unscaled / 128;
     unsigned NumLaneElts = NumElts / NumLanes;
 
     // Splat the 8-bits of immediate 4 times to help the loop wrap around.
@@ -9756,7 +9758,8 @@
     uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
     llvm::Type *Ty = Ops[0]->getType();
     unsigned NumElts = Ty->getVectorNumElements();
-    unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
+    auto Size = Ty->getScalableSizeInBits();
+    unsigned NumLanes = Size.Unscaled / 128;
     unsigned NumLaneElts = NumElts / NumLanes;
 
     // Splat the 8-bits of immediate 4 times to help the loop wrap around.
@@ -9862,7 +9865,7 @@
     unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
     llvm::Type *Ty = Ops[0]->getType();
     unsigned NumElts = Ty->getVectorNumElements();
-    unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
+    unsigned NumLanes = Ty->getScalableSizeInBits().Unscaled == 512 ? 4 : 2;
     unsigned NumLaneElts = NumElts / NumLanes;
 
     uint32_t Indices[16];
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
  • [PATCH] D53138: Scalable typ... Graham Hunter via Phabricator via cfe-commits

Reply via email to