https://github.com/BeMg updated https://github.com/llvm/llvm-project/pull/85786
>From 48eea6eda33c4e73316fe938a15d8e361039072e Mon Sep 17 00:00:00 2001 From: Piyou Chen <piyou.c...@sifive.com> Date: Wed, 5 Jun 2024 01:17:03 -0700 Subject: [PATCH 1/2] [RISCV] Add groupid/bitmask for RISC-V extension Base on https://github.com/riscv-non-isa/riscv-c-api-doc/pull/74. This patch defines the groupid/bitmask in RISCVFeatures.td and generates the corresponding table in RISCVTargetParserDef.inc. The groupid/bitmask of extensions provides an abstraction layer between the compiler and runtime functions. --- .../llvm/TargetParser/RISCVTargetParser.h | 8 + llvm/lib/Target/RISCV/RISCVFeatures.td | 301 ++++++++++++------ llvm/lib/TargetParser/RISCVTargetParser.cpp | 32 ++ llvm/test/TableGen/riscv-target-def.td | 26 +- llvm/utils/TableGen/RISCVTargetDefEmitter.cpp | 52 +++ 5 files changed, 318 insertions(+), 101 deletions(-) diff --git a/llvm/include/llvm/TargetParser/RISCVTargetParser.h b/llvm/include/llvm/TargetParser/RISCVTargetParser.h index 5b1494efe7bdc..8444935bd666d 100644 --- a/llvm/include/llvm/TargetParser/RISCVTargetParser.h +++ b/llvm/include/llvm/TargetParser/RISCVTargetParser.h @@ -24,6 +24,14 @@ class Triple; namespace RISCV { +namespace RISCVExtensionBitmaskTable { +struct RISCVExtensionBitmask { + const char *Name; + unsigned GroupID; + uint64_t Bitmask; +}; +} // namespace RISCVExtensionBitmaskTable + // We use 64 bits as the known part in the scalable vector types. static constexpr unsigned RVVBitsPerBlock = 64; diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td index 011edca019fd6..624bd3f408858 100644 --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -37,6 +37,15 @@ class RISCVExtension<string name, int major, int minor, string desc, bit Experimental = false; } +// The groupID/bitmask of RISCVExtension is used to retrieve a specific bit value +// from __riscv_feature_bit based on the groupID and bitmask. +// gourpID - groupID of extension +// bitmaskShift - bitmask shift of extension +class RISCVExtensionBitmask<bits<3> groupID, int bitmaskShift> { + bits<3> GroupID = groupID; + bits<64> Bitmask = !shl(1, bitmaskShift); +} + // Version of RISCVExtension to be used for Experimental extensions. This // sets the Experimental flag and prepends experimental- to the -mattr name. class RISCVExperimentalExtension<string name, int major, int minor, string desc, @@ -52,56 +61,67 @@ class RISCVExperimentalExtension<string name, int major, int minor, string desc, def FeatureStdExtI : RISCVExtension<"i", 2, 1, - "'I' (Base Integer Instruction Set)">; + "'I' (Base Integer Instruction Set)">, + RISCVExtensionBitmask<0, 0>; def FeatureStdExtE : RISCVExtension<"e", 2, 0, - "Implements RV{32,64}E (provides 16 rather than 32 GPRs)">; + "Implements RV{32,64}E (provides 16 rather than 32 GPRs)">, + RISCVExtensionBitmask<0, 1>; def FeatureStdExtZic64b : RISCVExtension<"zic64b", 1, 0, - "'Zic64b' (Cache Block Size Is 64 Bytes)">; + "'Zic64b' (Cache Block Size Is 64 Bytes)">, + RISCVExtensionBitmask<0, 2>; def FeatureStdExtZicbom : RISCVExtension<"zicbom", 1, 0, - "'Zicbom' (Cache-Block Management Instructions)">; + "'Zicbom' (Cache-Block Management Instructions)">, + RISCVExtensionBitmask<0, 3>; def HasStdExtZicbom : Predicate<"Subtarget->hasStdExtZicbom()">, AssemblerPredicate<(all_of FeatureStdExtZicbom), "'Zicbom' (Cache-Block Management Instructions)">; def FeatureStdExtZicbop : RISCVExtension<"zicbop", 1, 0, - "'Zicbop' (Cache-Block Prefetch Instructions)">; + "'Zicbop' (Cache-Block Prefetch Instructions)">, + RISCVExtensionBitmask<0, 4>; def HasStdExtZicbop : Predicate<"Subtarget->hasStdExtZicbop()">, AssemblerPredicate<(all_of FeatureStdExtZicbop), "'Zicbop' (Cache-Block Prefetch Instructions)">; def FeatureStdExtZicboz : RISCVExtension<"zicboz", 1, 0, - "'Zicboz' (Cache-Block Zero Instructions)">; + "'Zicboz' (Cache-Block Zero Instructions)">, + RISCVExtensionBitmask<0, 5>; def HasStdExtZicboz : Predicate<"Subtarget->hasStdExtZicboz()">, AssemblerPredicate<(all_of FeatureStdExtZicboz), "'Zicboz' (Cache-Block Zero Instructions)">; def FeatureStdExtZiccamoa : RISCVExtension<"ziccamoa", 1, 0, - "'Ziccamoa' (Main Memory Supports All Atomics in A)">; + "'Ziccamoa' (Main Memory Supports All Atomics in A)">, + RISCVExtensionBitmask<0, 6>; def FeatureStdExtZiccif : RISCVExtension<"ziccif", 1, 0, - "'Ziccif' (Main Memory Supports Instruction Fetch with Atomicity Requirement)">; + "'Ziccif' (Main Memory Supports Instruction Fetch with Atomicity Requirement)">, + RISCVExtensionBitmask<0, 7>; def FeatureStdExtZicclsm : RISCVExtension<"zicclsm", 1, 0, - "'Zicclsm' (Main Memory Supports Misaligned Loads/Stores)">; + "'Zicclsm' (Main Memory Supports Misaligned Loads/Stores)">, + RISCVExtensionBitmask<0, 8>; def FeatureStdExtZiccrse : RISCVExtension<"ziccrse", 1, 0, - "'Ziccrse' (Main Memory Supports Forward Progress on LR/SC Sequences)">; + "'Ziccrse' (Main Memory Supports Forward Progress on LR/SC Sequences)">, + RISCVExtensionBitmask<0, 9>; def FeatureStdExtZicsr : RISCVExtension<"zicsr", 2, 0, - "'zicsr' (CSRs)">; + "'zicsr' (CSRs)">, + RISCVExtensionBitmask<0, 10>; def HasStdExtZicsr : Predicate<"Subtarget->hasStdExtZicsr()">, AssemblerPredicate<(all_of FeatureStdExtZicsr), "'Zicsr' (CSRs)">; @@ -109,32 +129,37 @@ def HasStdExtZicsr : Predicate<"Subtarget->hasStdExtZicsr()">, def FeatureStdExtZicntr : RISCVExtension<"zicntr", 2, 0, "'Zicntr' (Base Counters and Timers)", - [FeatureStdExtZicsr]>; + [FeatureStdExtZicsr]>, + RISCVExtensionBitmask<0, 11>; def FeatureStdExtZicond : RISCVExtension<"zicond", 1, 0, - "'Zicond' (Integer Conditional Operations)">; + "'Zicond' (Integer Conditional Operations)">, + RISCVExtensionBitmask<0, 12>; def HasStdExtZicond : Predicate<"Subtarget->hasStdExtZicond()">, AssemblerPredicate<(all_of FeatureStdExtZicond), "'Zicond' (Integer Conditional Operations)">; def FeatureStdExtZifencei : RISCVExtension<"zifencei", 2, 0, - "'Zifencei' (fence.i)">; + "'Zifencei' (fence.i)">, + RISCVExtensionBitmask<0, 13>; def HasStdExtZifencei : Predicate<"Subtarget->hasStdExtZifencei()">, AssemblerPredicate<(all_of FeatureStdExtZifencei), "'Zifencei' (fence.i)">; def FeatureStdExtZihintpause : RISCVExtension<"zihintpause", 2, 0, - "'Zihintpause' (Pause Hint)">; + "'Zihintpause' (Pause Hint)">, + RISCVExtensionBitmask<0, 14>; def HasStdExtZihintpause : Predicate<"Subtarget->hasStdExtZihintpause()">, AssemblerPredicate<(all_of FeatureStdExtZihintpause), "'Zihintpause' (Pause Hint)">; def FeatureStdExtZihintntl : RISCVExtension<"zihintntl", 1, 0, - "'Zihintntl' (Non-Temporal Locality Hints)">; + "'Zihintntl' (Non-Temporal Locality Hints)">, + RISCVExtensionBitmask<0, 15>; def HasStdExtZihintntl : Predicate<"Subtarget->hasStdExtZihintntl()">, AssemblerPredicate<(all_of FeatureStdExtZihintntl), "'Zihintntl' (Non-Temporal Locality Hints)">; @@ -142,10 +167,12 @@ def HasStdExtZihintntl : Predicate<"Subtarget->hasStdExtZihintntl()">, def FeatureStdExtZihpm : RISCVExtension<"zihpm", 2, 0, "'Zihpm' (Hardware Performance Counters)", - [FeatureStdExtZicsr]>; + [FeatureStdExtZicsr]>, + RISCVExtensionBitmask<0, 16>; def FeatureStdExtZimop : RISCVExtension<"zimop", 1, 0, - "'Zimop' (May-Be-Operations)">; + "'Zimop' (May-Be-Operations)">, + RISCVExtensionBitmask<0, 17>; def HasStdExtZimop : Predicate<"Subtarget->hasStdExtZimop()">, AssemblerPredicate<(all_of FeatureStdExtZimop), "'Zimop' (May-Be-Operations)">; @@ -153,7 +180,8 @@ def HasStdExtZimop : Predicate<"Subtarget->hasStdExtZimop()">, def FeatureStdExtZicfilp : RISCVExperimentalExtension<"zicfilp", 0, 4, "'Zicfilp' (Landing pad)", - [FeatureStdExtZicsr]>; + [FeatureStdExtZicsr]>, + RISCVExtensionBitmask<0, 18>; def HasStdExtZicfilp : Predicate<"Subtarget->hasStdExtZicfilp()">, AssemblerPredicate<(all_of FeatureStdExtZicfilp), "'Zicfilp' (Landing pad)">; @@ -163,7 +191,8 @@ def NoStdExtZicfilp : Predicate<"!Subtarget->hasStdExtZicfilp()">, def FeatureStdExtZicfiss : RISCVExperimentalExtension<"zicfiss", 0, 4, "'Zicfiss' (Shadow stack)", - [FeatureStdExtZicsr, FeatureStdExtZimop]>; + [FeatureStdExtZicsr, FeatureStdExtZimop]>, + RISCVExtensionBitmask<0, 19>; def HasStdExtZicfiss : Predicate<"Subtarget->hasStdExtZicfiss()">, AssemblerPredicate<(all_of FeatureStdExtZicfiss), "'Zicfiss' (Shadow stack)">; @@ -173,14 +202,16 @@ def NoHasStdExtZicfiss : Predicate<"!Subtarget->hasStdExtZicfiss()">; def FeatureStdExtM : RISCVExtension<"m", 2, 0, - "'M' (Integer Multiplication and Division)">; + "'M' (Integer Multiplication and Division)">, + RISCVExtensionBitmask<0, 20>; def HasStdExtM : Predicate<"Subtarget->hasStdExtM()">, AssemblerPredicate<(all_of FeatureStdExtM), "'M' (Integer Multiplication and Division)">; def FeatureStdExtZmmul : RISCVExtension<"zmmul", 1, 0, - "'Zmmul' (Integer Multiplication)">; + "'Zmmul' (Integer Multiplication)">, + RISCVExtensionBitmask<0, 21>; def HasStdExtMOrZmmul : Predicate<"Subtarget->hasStdExtM() || Subtarget->hasStdExtZmmul()">, @@ -192,28 +223,33 @@ def HasStdExtMOrZmmul def FeatureStdExtA : RISCVExtension<"a", 2, 1, - "'A' (Atomic Instructions)">; + "'A' (Atomic Instructions)">, + RISCVExtensionBitmask<0, 22>; def HasStdExtA : Predicate<"Subtarget->hasStdExtA()">, AssemblerPredicate<(all_of FeatureStdExtA), "'A' (Atomic Instructions)">; def FeatureStdExtZtso : RISCVExperimentalExtension<"ztso", 0, 1, - "'Ztso' (Memory Model - Total Store Order)">; + "'Ztso' (Memory Model - Total Store Order)">, + RISCVExtensionBitmask<0, 23>; def HasStdExtZtso : Predicate<"Subtarget->hasStdExtZtso()">, AssemblerPredicate<(all_of FeatureStdExtZtso), "'Ztso' (Memory Model - Total Store Order)">; def NotHasStdExtZtso : Predicate<"!Subtarget->hasStdExtZtso()">; def FeatureStdExtZa64rs : RISCVExtension<"za64rs", 1, 0, - "'Za64rs' (Reservation Set Size of at Most 64 Bytes)">; + "'Za64rs' (Reservation Set Size of at Most 64 Bytes)">, + RISCVExtensionBitmask<0, 24>; def FeatureStdExtZa128rs : RISCVExtension<"za128rs", 1, 0, - "'Za128rs' (Reservation Set Size of at Most 128 Bytes)">; + "'Za128rs' (Reservation Set Size of at Most 128 Bytes)">, + RISCVExtensionBitmask<0, 25>; def FeatureStdExtZaamo : RISCVExtension<"zaamo", 1, 0, - "'Zaamo' (Atomic Memory Operations)">; + "'Zaamo' (Atomic Memory Operations)">, + RISCVExtensionBitmask<0, 26>; def HasStdExtAOrZaamo : Predicate<"Subtarget->hasStdExtA() || Subtarget->hasStdExtZaamo()">, AssemblerPredicate<(any_of FeatureStdExtA, FeatureStdExtZaamo), @@ -222,14 +258,16 @@ def HasStdExtAOrZaamo def FeatureStdExtZabha : RISCVExtension<"zabha", 1, 0, - "'Zabha' (Byte and Halfword Atomic Memory Operations)">; + "'Zabha' (Byte and Halfword Atomic Memory Operations)">, + RISCVExtensionBitmask<0, 27>; def HasStdExtZabha : Predicate<"Subtarget->hasStdExtZabha()">, AssemblerPredicate<(all_of FeatureStdExtZabha), "'Zabha' (Byte and Halfword Atomic Memory Operations)">; def FeatureStdExtZacas : RISCVExtension<"zacas", 1, 0, - "'Zacas' (Atomic Compare-And-Swap Instructions)">; + "'Zacas' (Atomic Compare-And-Swap Instructions)">, + RISCVExtensionBitmask<0, 28>; def HasStdExtZacas : Predicate<"Subtarget->hasStdExtZacas()">, AssemblerPredicate<(all_of FeatureStdExtZacas), "'Zacas' (Atomic Compare-And-Swap Instructions)">; @@ -237,14 +275,16 @@ def NoStdExtZacas : Predicate<"!Subtarget->hasStdExtZacas()">; def FeatureStdExtZalasr : RISCVExperimentalExtension<"zalasr", 0, 1, - "'Zalasr' (Load-Acquire and Store-Release Instructions)">; + "'Zalasr' (Load-Acquire and Store-Release Instructions)">, + RISCVExtensionBitmask<0, 29>; def HasStdExtZalasr : Predicate<"Subtarget->hasStdExtZalasr()">, AssemblerPredicate<(all_of FeatureStdExtZalasr), "'Zalasr' (Load-Acquire and Store-Release Instructions)">; def FeatureStdExtZalrsc : RISCVExtension<"zalrsc", 1, 0, - "'Zalrsc' (Load-Reserved/Store-Conditional)">; + "'Zalrsc' (Load-Reserved/Store-Conditional)">, + RISCVExtensionBitmask<0, 30>; def HasStdExtAOrZalrsc : Predicate<"Subtarget->hasStdExtA() || Subtarget->hasStdExtZalrsc()">, AssemblerPredicate<(any_of FeatureStdExtA, FeatureStdExtZalrsc), @@ -253,10 +293,12 @@ def HasStdExtAOrZalrsc def FeatureStdExtZama16b : RISCVExtension<"zama16b", 1, 0, - "'Zama16b' (Atomic 16-byte misaligned loads, stores and AMOs)">; + "'Zama16b' (Atomic 16-byte misaligned loads, stores and AMOs)">, + RISCVExtensionBitmask<0, 31>; def FeatureStdExtZawrs : RISCVExtension<"zawrs", 1, 0, - "'Zawrs' (Wait on Reservation Set)">; + "'Zawrs' (Wait on Reservation Set)">, + RISCVExtensionBitmask<0, 32>; def HasStdExtZawrs : Predicate<"Subtarget->hasStdExtZawrs()">, AssemblerPredicate<(all_of FeatureStdExtZawrs), "'Zawrs' (Wait on Reservation Set)">; @@ -266,7 +308,8 @@ def HasStdExtZawrs : Predicate<"Subtarget->hasStdExtZawrs()">, def FeatureStdExtF : RISCVExtension<"f", 2, 2, "'F' (Single-Precision Floating-Point)", - [FeatureStdExtZicsr]>; + [FeatureStdExtZicsr]>, + RISCVExtensionBitmask<0, 33>; def HasStdExtF : Predicate<"Subtarget->hasStdExtF()">, AssemblerPredicate<(all_of FeatureStdExtF), "'F' (Single-Precision Floating-Point)">; @@ -274,7 +317,8 @@ def HasStdExtF : Predicate<"Subtarget->hasStdExtF()">, def FeatureStdExtD : RISCVExtension<"d", 2, 2, "'D' (Double-Precision Floating-Point)", - [FeatureStdExtF]>; + [FeatureStdExtF]>, + RISCVExtensionBitmask<0, 34>; def HasStdExtD : Predicate<"Subtarget->hasStdExtD()">, AssemblerPredicate<(all_of FeatureStdExtD), "'D' (Double-Precision Floating-Point)">; @@ -282,7 +326,8 @@ def HasStdExtD : Predicate<"Subtarget->hasStdExtD()">, def FeatureStdExtZfhmin : RISCVExtension<"zfhmin", 1, 0, "'Zfhmin' (Half-Precision Floating-Point Minimal)", - [FeatureStdExtF]>; + [FeatureStdExtF]>, + RISCVExtensionBitmask<0, 35>; def HasStdExtZfhmin : Predicate<"Subtarget->hasStdExtZfhmin()">, AssemblerPredicate<(all_of FeatureStdExtZfhmin), "'Zfh' (Half-Precision Floating-Point) or " @@ -291,7 +336,8 @@ def HasStdExtZfhmin : Predicate<"Subtarget->hasStdExtZfhmin()">, def FeatureStdExtZfh : RISCVExtension<"zfh", 1, 0, "'Zfh' (Half-Precision Floating-Point)", - [FeatureStdExtZfhmin]>; + [FeatureStdExtZfhmin]>, + RISCVExtensionBitmask<0, 36>; def HasStdExtZfh : Predicate<"Subtarget->hasStdExtZfh()">, AssemblerPredicate<(all_of FeatureStdExtZfh), "'Zfh' (Half-Precision Floating-Point)">; @@ -300,7 +346,8 @@ def NoStdExtZfh : Predicate<"!Subtarget->hasStdExtZfh()">; def FeatureStdExtZfbfmin : RISCVExperimentalExtension<"zfbfmin", 1, 0, "'Zfbfmin' (Scalar BF16 Converts)", - [FeatureStdExtF]>; + [FeatureStdExtF]>, + RISCVExtensionBitmask<0, 37>; def HasStdExtZfbfmin : Predicate<"Subtarget->hasStdExtZfbfmin()">, AssemblerPredicate<(all_of FeatureStdExtZfbfmin), "'Zfbfmin' (Scalar BF16 Converts)">; @@ -316,7 +363,8 @@ def HasHalfFPLoadStoreMove def FeatureStdExtZfa : RISCVExtension<"zfa", 1, 0, "'Zfa' (Additional Floating-Point)", - [FeatureStdExtF]>; + [FeatureStdExtF]>, + RISCVExtensionBitmask<0, 38>; def HasStdExtZfa : Predicate<"Subtarget->hasStdExtZfa()">, AssemblerPredicate<(all_of FeatureStdExtZfa), "'Zfa' (Additional Floating-Point)">; @@ -324,7 +372,8 @@ def HasStdExtZfa : Predicate<"Subtarget->hasStdExtZfa()">, def FeatureStdExtZfinx : RISCVExtension<"zfinx", 1, 0, "'Zfinx' (Float in Integer)", - [FeatureStdExtZicsr]>; + [FeatureStdExtZicsr]>, + RISCVExtensionBitmask<0, 39>; def HasStdExtZfinx : Predicate<"Subtarget->hasStdExtZfinx()">, AssemblerPredicate<(all_of FeatureStdExtZfinx), "'Zfinx' (Float in Integer)">; @@ -332,7 +381,8 @@ def HasStdExtZfinx : Predicate<"Subtarget->hasStdExtZfinx()">, def FeatureStdExtZdinx : RISCVExtension<"zdinx", 1, 0, "'Zdinx' (Double in Integer)", - [FeatureStdExtZfinx]>; + [FeatureStdExtZfinx]>, + RISCVExtensionBitmask<0, 40>; def HasStdExtZdinx : Predicate<"Subtarget->hasStdExtZdinx()">, AssemblerPredicate<(all_of FeatureStdExtZdinx), "'Zdinx' (Double in Integer)">; @@ -340,7 +390,8 @@ def HasStdExtZdinx : Predicate<"Subtarget->hasStdExtZdinx()">, def FeatureStdExtZhinxmin : RISCVExtension<"zhinxmin", 1, 0, "'Zhinxmin' (Half Float in Integer Minimal)", - [FeatureStdExtZfinx]>; + [FeatureStdExtZfinx]>, + RISCVExtensionBitmask<0, 41>; def HasStdExtZhinxmin : Predicate<"Subtarget->hasStdExtZhinxmin()">, AssemblerPredicate<(all_of FeatureStdExtZhinxmin), "'Zhinx' (Half Float in Integer) or " @@ -349,7 +400,8 @@ def HasStdExtZhinxmin : Predicate<"Subtarget->hasStdExtZhinxmin()">, def FeatureStdExtZhinx : RISCVExtension<"zhinx", 1, 0, "'Zhinx' (Half Float in Integer)", - [FeatureStdExtZhinxmin]>; + [FeatureStdExtZhinxmin]>, + RISCVExtensionBitmask<0, 42>; def HasStdExtZhinx : Predicate<"Subtarget->hasStdExtZhinx()">, AssemblerPredicate<(all_of FeatureStdExtZhinx), "'Zhinx' (Half Float in Integer)">; @@ -359,7 +411,8 @@ def NoStdExtZhinx : Predicate<"!Subtarget->hasStdExtZhinx()">; def FeatureStdExtC : RISCVExtension<"c", 2, 0, - "'C' (Compressed Instructions)">; + "'C' (Compressed Instructions)">, + RISCVExtensionBitmask<0, 43>; def HasStdExtC : Predicate<"Subtarget->hasStdExtC()">, AssemblerPredicate<(all_of FeatureStdExtC), "'C' (Compressed Instructions)">; @@ -374,7 +427,8 @@ def HasRVCHints : Predicate<"Subtarget->enableRVCHintInstrs()">, def FeatureStdExtZca : RISCVExtension<"zca", 1, 0, "'Zca' (part of the C extension, excluding compressed " - "floating point loads/stores)">; + "floating point loads/stores)">, + RISCVExtensionBitmask<0, 44>; def HasStdExtCOrZca : Predicate<"Subtarget->hasStdExtCOrZca()">, @@ -386,7 +440,8 @@ def HasStdExtCOrZca def FeatureStdExtZcb : RISCVExtension<"zcb", 1, 0, "'Zcb' (Compressed basic bit manipulation instructions)", - [FeatureStdExtZca]>; + [FeatureStdExtZca]>, + RISCVExtensionBitmask<0, 45>; def HasStdExtZcb : Predicate<"Subtarget->hasStdExtZcb()">, AssemblerPredicate<(all_of FeatureStdExtZcb), "'Zcb' (Compressed basic bit manipulation instructions)">; @@ -394,7 +449,8 @@ def HasStdExtZcb : Predicate<"Subtarget->hasStdExtZcb()">, def FeatureStdExtZcd : RISCVExtension<"zcd", 1, 0, "'Zcd' (Compressed Double-Precision Floating-Point Instructions)", - [FeatureStdExtD, FeatureStdExtZca]>; + [FeatureStdExtD, FeatureStdExtZca]>, + RISCVExtensionBitmask<0, 46>; def HasStdExtCOrZcd : Predicate<"Subtarget->hasStdExtCOrZcd()">, @@ -405,12 +461,14 @@ def HasStdExtCOrZcd def FeatureStdExtZcf : RISCVExtension<"zcf", 1, 0, "'Zcf' (Compressed Single-Precision Floating-Point Instructions)", - [FeatureStdExtF, FeatureStdExtZca]>; + [FeatureStdExtF, FeatureStdExtZca]>, + RISCVExtensionBitmask<0, 47>; def FeatureStdExtZcmp : RISCVExtension<"zcmp", 1, 0, "'Zcmp' (sequenced instuctions for code-size reduction)", - [FeatureStdExtZca]>; + [FeatureStdExtZca]>, + RISCVExtensionBitmask<0, 48>; def HasStdExtZcmp : Predicate<"Subtarget->hasStdExtZcmp() && !Subtarget->hasStdExtC()">, AssemblerPredicate<(all_of FeatureStdExtZcmp), "'Zcmp' (sequenced instuctions for code-size reduction)">; @@ -418,7 +476,8 @@ def HasStdExtZcmp : Predicate<"Subtarget->hasStdExtZcmp() && !Subtarget->hasStdE def FeatureStdExtZcmt : RISCVExtension<"zcmt", 1, 0, "'Zcmt' (table jump instuctions for code-size reduction)", - [FeatureStdExtZca, FeatureStdExtZicsr]>; + [FeatureStdExtZca, FeatureStdExtZicsr]>, + RISCVExtensionBitmask<0, 49>; def HasStdExtZcmt : Predicate<"Subtarget->hasStdExtZcmt()">, AssemblerPredicate<(all_of FeatureStdExtZcmt), "'Zcmt' (table jump instuctions for code-size reduction)">; @@ -426,7 +485,8 @@ def HasStdExtZcmt : Predicate<"Subtarget->hasStdExtZcmt()">, def FeatureStdExtZce : RISCVExtension<"zce", 1, 0, "'Zce' (Compressed extensions for microcontrollers)", - [FeatureStdExtZcb, FeatureStdExtZcmp, FeatureStdExtZcmt]>; + [FeatureStdExtZcb, FeatureStdExtZcmp, FeatureStdExtZcmt]>, + RISCVExtensionBitmask<0, 50>; def HasStdExtCOrZcfOrZce : Predicate<"Subtarget->hasStdExtC() || Subtarget->hasStdExtZcf() " @@ -439,7 +499,8 @@ def HasStdExtCOrZcfOrZce def FeatureStdExtZcmop : RISCVExtension<"zcmop", 1, 0, "'Zcmop' (Compressed May-Be-Operations)", - [FeatureStdExtZca]>; + [FeatureStdExtZca]>, + RISCVExtensionBitmask<0, 51>; def HasStdExtZcmop : Predicate<"Subtarget->hasStdExtZcmop()">, AssemblerPredicate<(all_of FeatureStdExtZcmop), "'Zcmop' (Compressed May-Be-Operations)">; @@ -448,7 +509,8 @@ def HasStdExtZcmop : Predicate<"Subtarget->hasStdExtZcmop()">, def FeatureStdExtZba : RISCVExtension<"zba", 1, 0, - "'Zba' (Address Generation Instructions)">; + "'Zba' (Address Generation Instructions)">, + RISCVExtensionBitmask<0, 52>; def HasStdExtZba : Predicate<"Subtarget->hasStdExtZba()">, AssemblerPredicate<(all_of FeatureStdExtZba), "'Zba' (Address Generation Instructions)">; @@ -456,21 +518,24 @@ def NotHasStdExtZba : Predicate<"!Subtarget->hasStdExtZba()">; def FeatureStdExtZbb : RISCVExtension<"zbb", 1, 0, - "'Zbb' (Basic Bit-Manipulation)">; + "'Zbb' (Basic Bit-Manipulation)">, + RISCVExtensionBitmask<0, 53>; def HasStdExtZbb : Predicate<"Subtarget->hasStdExtZbb()">, AssemblerPredicate<(all_of FeatureStdExtZbb), "'Zbb' (Basic Bit-Manipulation)">; def FeatureStdExtZbc : RISCVExtension<"zbc", 1, 0, - "'Zbc' (Carry-Less Multiplication)">; + "'Zbc' (Carry-Less Multiplication)">, + RISCVExtensionBitmask<0, 54>; def HasStdExtZbc : Predicate<"Subtarget->hasStdExtZbc()">, AssemblerPredicate<(all_of FeatureStdExtZbc), "'Zbc' (Carry-Less Multiplication)">; def FeatureStdExtZbs : RISCVExtension<"zbs", 1, 0, - "'Zbs' (Single-Bit Instructions)">; + "'Zbs' (Single-Bit Instructions)">, + RISCVExtensionBitmask<0, 55>; def HasStdExtZbs : Predicate<"Subtarget->hasStdExtZbs()">, AssemblerPredicate<(all_of FeatureStdExtZbs), "'Zbs' (Single-Bit Instructions)">; @@ -487,14 +552,16 @@ def HasStdExtB : Predicate<"Subtarget->hasStdExtB()">, def FeatureStdExtZbkb : RISCVExtension<"zbkb", 1, 0, - "'Zbkb' (Bitmanip instructions for Cryptography)">; + "'Zbkb' (Bitmanip instructions for Cryptography)">, + RISCVExtensionBitmask<0, 56>; def HasStdExtZbkb : Predicate<"Subtarget->hasStdExtZbkb()">, AssemblerPredicate<(all_of FeatureStdExtZbkb), "'Zbkb' (Bitmanip instructions for Cryptography)">; def FeatureStdExtZbkx : RISCVExtension<"zbkx", 1, 0, - "'Zbkx' (Crossbar permutation instructions)">; + "'Zbkx' (Crossbar permutation instructions)">, + RISCVExtensionBitmask<0, 57>; def HasStdExtZbkx : Predicate<"Subtarget->hasStdExtZbkx()">, AssemblerPredicate<(all_of FeatureStdExtZbkx), "'Zbkx' (Crossbar permutation instructions)">; @@ -511,7 +578,8 @@ def HasStdExtZbbOrZbkb def FeatureStdExtZbkc : RISCVExtension<"zbkc", 1, 0, "'Zbkc' (Carry-less multiply instructions for " - "Cryptography)">; + "Cryptography)">, + RISCVExtensionBitmask<0, 58>; def HasStdExtZbkc : Predicate<"Subtarget->hasStdExtZbkc()">, AssemblerPredicate<(all_of FeatureStdExtZbkc), @@ -528,14 +596,16 @@ def HasStdExtZbcOrZbkc def FeatureStdExtZknd : RISCVExtension<"zknd", 1, 0, - "'Zknd' (NIST Suite: AES Decryption)">; + "'Zknd' (NIST Suite: AES Decryption)">, + RISCVExtensionBitmask<0, 59>; def HasStdExtZknd : Predicate<"Subtarget->hasStdExtZknd()">, AssemblerPredicate<(all_of FeatureStdExtZknd), "'Zknd' (NIST Suite: AES Decryption)">; def FeatureStdExtZkne : RISCVExtension<"zkne", 1, 0, - "'Zkne' (NIST Suite: AES Encryption)">; + "'Zkne' (NIST Suite: AES Encryption)">, + RISCVExtensionBitmask<0, 60>; def HasStdExtZkne : Predicate<"Subtarget->hasStdExtZkne()">, AssemblerPredicate<(all_of FeatureStdExtZkne), "'Zkne' (NIST Suite: AES Encryption)">; @@ -550,28 +620,32 @@ def HasStdExtZkndOrZkne def FeatureStdExtZknh : RISCVExtension<"zknh", 1, 0, - "'Zknh' (NIST Suite: Hash Function Instructions)">; + "'Zknh' (NIST Suite: Hash Function Instructions)">, + RISCVExtensionBitmask<0, 61>; def HasStdExtZknh : Predicate<"Subtarget->hasStdExtZknh()">, AssemblerPredicate<(all_of FeatureStdExtZknh), "'Zknh' (NIST Suite: Hash Function Instructions)">; def FeatureStdExtZksed : RISCVExtension<"zksed", 1, 0, - "'Zksed' (ShangMi Suite: SM4 Block Cipher Instructions)">; + "'Zksed' (ShangMi Suite: SM4 Block Cipher Instructions)">, + RISCVExtensionBitmask<0, 62>; def HasStdExtZksed : Predicate<"Subtarget->hasStdExtZksed()">, AssemblerPredicate<(all_of FeatureStdExtZksed), "'Zksed' (ShangMi Suite: SM4 Block Cipher Instructions)">; def FeatureStdExtZksh : RISCVExtension<"zksh", 1, 0, - "'Zksh' (ShangMi Suite: SM3 Hash Function Instructions)">; + "'Zksh' (ShangMi Suite: SM3 Hash Function Instructions)">, + RISCVExtensionBitmask<0, 63>; def HasStdExtZksh : Predicate<"Subtarget->hasStdExtZksh()">, AssemblerPredicate<(all_of FeatureStdExtZksh), "'Zksh' (ShangMi Suite: SM3 Hash Function Instructions)">; def FeatureStdExtZkr : RISCVExtension<"zkr", 1, 0, - "'Zkr' (Entropy Source Extension)">; + "'Zkr' (Entropy Source Extension)">, + RISCVExtensionBitmask<1, 0>; def HasStdExtZkr : Predicate<"Subtarget->hasStdExtZkr()">, AssemblerPredicate<(all_of FeatureStdExtZkr), "'Zkr' (Entropy Source Extension)">; @@ -584,7 +658,8 @@ def FeatureStdExtZkn FeatureStdExtZbkx, FeatureStdExtZkne, FeatureStdExtZknd, - FeatureStdExtZknh]>; + FeatureStdExtZknh]>, + RISCVExtensionBitmask<1, 1>; def FeatureStdExtZks : RISCVExtension<"zks", 1, 0, @@ -593,24 +668,28 @@ def FeatureStdExtZks FeatureStdExtZbkc, FeatureStdExtZbkx, FeatureStdExtZksed, - FeatureStdExtZksh]>; + FeatureStdExtZksh]>, + RISCVExtensionBitmask<1, 2>; def FeatureStdExtZkt : RISCVExtension<"zkt", 1, 0, - "'Zkt' (Data Independent Execution Latency)">; + "'Zkt' (Data Independent Execution Latency)">, + RISCVExtensionBitmask<1, 3>; def FeatureStdExtZk : RISCVExtension<"zk", 1, 0, "'Zk' (Standard scalar cryptography extension)", [FeatureStdExtZkn, FeatureStdExtZkr, - FeatureStdExtZkt]>; + FeatureStdExtZkt]>, + RISCVExtensionBitmask<1, 4>; // Vector Extensions def FeatureStdExtZvl32b : RISCVExtension<"zvl32b", 1, 0, "'Zvl' (Minimum Vector Length) 32", [], - "ZvlLen", "32">; + "ZvlLen", "32">, + RISCVExtensionBitmask<1, 5>; foreach i = { 6-16 } in { defvar I = !shl(1, i); @@ -618,48 +697,57 @@ foreach i = { 6-16 } in { RISCVExtension<"zvl"#I#"b", 1, 0, "'Zvl' (Minimum Vector Length) "#I, [!cast<RISCVExtension>("FeatureStdExtZvl"#!srl(I, 1)#"b")], - "ZvlLen", !cast<string>(I)>; + "ZvlLen", !cast<string>(I)>, + RISCVExtensionBitmask<1, !add(6, !sub(i, 6))>; } def FeatureStdExtZve32x : RISCVExtension<"zve32x", 1, 0, "'Zve32x' (Vector Extensions for Embedded Processors " "with maximal 32 EEW)", - [FeatureStdExtZicsr, FeatureStdExtZvl32b]>; + [FeatureStdExtZicsr, FeatureStdExtZvl32b]>, + RISCVExtensionBitmask<1, 17>; + def FeatureStdExtZve32f : RISCVExtension<"zve32f", 1, 0, "'Zve32f' (Vector Extensions for Embedded Processors " "with maximal 32 EEW and F extension)", - [FeatureStdExtZve32x, FeatureStdExtF]>; + [FeatureStdExtZve32x, FeatureStdExtF]>, + RISCVExtensionBitmask<1, 18>; def FeatureStdExtZve64x : RISCVExtension<"zve64x", 1, 0, "'Zve64x' (Vector Extensions for Embedded Processors " "with maximal 64 EEW)", - [FeatureStdExtZve32x, FeatureStdExtZvl64b]>; + [FeatureStdExtZve32x, FeatureStdExtZvl64b]>, + RISCVExtensionBitmask<1, 19>; def FeatureStdExtZve64f : RISCVExtension<"zve64f", 1, 0, "'Zve64f' (Vector Extensions for Embedded Processors " "with maximal 64 EEW and F extension)", - [FeatureStdExtZve32f, FeatureStdExtZve64x]>; + [FeatureStdExtZve32f, FeatureStdExtZve64x]>, + RISCVExtensionBitmask<1, 20>; def FeatureStdExtZve64d : RISCVExtension<"zve64d", 1, 0, "'Zve64d' (Vector Extensions for Embedded Processors " "with maximal 64 EEW, F and D extension)", - [FeatureStdExtZve64f, FeatureStdExtD]>; + [FeatureStdExtZve64f, FeatureStdExtD]>, + RISCVExtensionBitmask<1, 21>; def FeatureStdExtV : RISCVExtension<"v", 1, 0, "'V' (Vector Extension for Application Processors)", - [FeatureStdExtZvl128b, FeatureStdExtZve64d]>; + [FeatureStdExtZvl128b, FeatureStdExtZve64d]>, + RISCVExtensionBitmask<1, 22>; def FeatureStdExtZvfbfmin : RISCVExperimentalExtension<"zvfbfmin", 1, 0, "'Zvbfmin' (Vector BF16 Converts)", - [FeatureStdExtZve32f]>; + [FeatureStdExtZve32f]>, + RISCVExtensionBitmask<1, 23>; def HasStdExtZvfbfmin : Predicate<"Subtarget->hasStdExtZvfbfmin()">, AssemblerPredicate<(all_of FeatureStdExtZvfbfmin), "'Zvfbfmin' (Vector BF16 Converts)">; @@ -667,7 +755,8 @@ def HasStdExtZvfbfmin : Predicate<"Subtarget->hasStdExtZvfbfmin()">, def FeatureStdExtZvfbfwma : RISCVExperimentalExtension<"zvfbfwma", 1, 0, "'Zvfbfwma' (Vector BF16 widening mul-add)", - [FeatureStdExtZvfbfmin, FeatureStdExtZfbfmin]>; + [FeatureStdExtZvfbfmin, FeatureStdExtZfbfmin]>, + RISCVExtensionBitmask<1, 24>; def HasStdExtZvfbfwma : Predicate<"Subtarget->hasStdExtZvfbfwma()">, AssemblerPredicate<(all_of FeatureStdExtZvfbfwma), "'Zvfbfwma' (Vector BF16 widening mul-add)">; @@ -675,12 +764,14 @@ def HasStdExtZvfbfwma : Predicate<"Subtarget->hasStdExtZvfbfwma()">, def FeatureStdExtZvfhmin : RISCVExtension<"zvfhmin", 1, 0, "'Zvfhmin' (Vector Half-Precision Floating-Point Minimal)", - [FeatureStdExtZve32f]>; + [FeatureStdExtZve32f]>, + RISCVExtensionBitmask<1, 25>; def FeatureStdExtZvfh : RISCVExtension<"zvfh", 1, 0, "'Zvfh' (Vector Half-Precision Floating-Point)", - [FeatureStdExtZvfhmin, FeatureStdExtZfhmin]>; + [FeatureStdExtZvfhmin, FeatureStdExtZfhmin]>, + RISCVExtensionBitmask<1, 26>; def HasStdExtZfhOrZvfh : Predicate<"Subtarget->hasStdExtZfh() || Subtarget->hasStdExtZvfh()">, @@ -692,7 +783,8 @@ def HasStdExtZfhOrZvfh def FeatureStdExtZvkb : RISCVExtension<"zvkb", 1, 0, - "'Zvkb' (Vector Bit-manipulation used in Cryptography)">; + "'Zvkb' (Vector Bit-manipulation used in Cryptography)">, + RISCVExtensionBitmask<1, 27>; def HasStdExtZvkb : Predicate<"Subtarget->hasStdExtZvkb()">, AssemblerPredicate<(all_of FeatureStdExtZvkb), "'Zvkb' (Vector Bit-manipulation used in Cryptography)">; @@ -700,35 +792,40 @@ def HasStdExtZvkb : Predicate<"Subtarget->hasStdExtZvkb()">, def FeatureStdExtZvbb : RISCVExtension<"zvbb", 1, 0, "'Zvbb' (Vector basic bit-manipulation instructions)", - [FeatureStdExtZvkb]>; + [FeatureStdExtZvkb]>, + RISCVExtensionBitmask<1, 28>; def HasStdExtZvbb : Predicate<"Subtarget->hasStdExtZvbb()">, AssemblerPredicate<(all_of FeatureStdExtZvbb), "'Zvbb' (Vector basic bit-manipulation instructions)">; def FeatureStdExtZvbc : RISCVExtension<"zvbc", 1, 0, - "'Zvbc' (Vector Carryless Multiplication)">; + "'Zvbc' (Vector Carryless Multiplication)">, + RISCVExtensionBitmask<1, 29>; def HasStdExtZvbc : Predicate<"Subtarget->hasStdExtZvbc()">, AssemblerPredicate<(all_of FeatureStdExtZvbc), "'Zvbc' (Vector Carryless Multiplication)">; def FeatureStdExtZvkg : RISCVExtension<"zvkg", 1, 0, - "'Zvkg' (Vector GCM instructions for Cryptography)">; + "'Zvkg' (Vector GCM instructions for Cryptography)">, + RISCVExtensionBitmask<1, 30>; def HasStdExtZvkg : Predicate<"Subtarget->hasStdExtZvkg()">, AssemblerPredicate<(all_of FeatureStdExtZvkg), "'Zvkg' (Vector GCM instructions for Cryptography)">; def FeatureStdExtZvkned : RISCVExtension<"zvkned", 1, 0, - "'Zvkned' (Vector AES Encryption & Decryption (Single Round))">; + "'Zvkned' (Vector AES Encryption & Decryption (Single Round))">, + RISCVExtensionBitmask<1, 31>; def HasStdExtZvkned : Predicate<"Subtarget->hasStdExtZvkned()">, AssemblerPredicate<(all_of FeatureStdExtZvkned), "'Zvkned' (Vector AES Encryption & Decryption (Single Round))">; def FeatureStdExtZvknha : RISCVExtension<"zvknha", 1, 0, - "'Zvknha' (Vector SHA-2 (SHA-256 only))">; + "'Zvknha' (Vector SHA-2 (SHA-256 only))">, + RISCVExtensionBitmask<1, 32>; def HasStdExtZvknha : Predicate<"Subtarget->hasStdExtZvknha()">, AssemblerPredicate<(all_of FeatureStdExtZvknha), "'Zvknha' (Vector SHA-2 (SHA-256 only))">; @@ -736,7 +833,8 @@ def HasStdExtZvknha : Predicate<"Subtarget->hasStdExtZvknha()">, def FeatureStdExtZvknhb : RISCVExtension<"zvknhb", 1, 0, "'Zvknhb' (Vector SHA-2 (SHA-256 and SHA-512))", - [FeatureStdExtZve64x]>; + [FeatureStdExtZve64x]>, + RISCVExtensionBitmask<1, 33>; def HasStdExtZvknhb : Predicate<"Subtarget->hasStdExtZvknhb()">, AssemblerPredicate<(all_of FeatureStdExtZvknhb), "'Zvknhb' (Vector SHA-2 (SHA-256 and SHA-512))">; @@ -747,21 +845,24 @@ def HasStdExtZvknhaOrZvknhb : Predicate<"Subtarget->hasStdExtZvknha() || Subtarg def FeatureStdExtZvksed : RISCVExtension<"zvksed", 1, 0, - "'Zvksed' (SM4 Block Cipher Instructions)">; + "'Zvksed' (SM4 Block Cipher Instructions)">, + RISCVExtensionBitmask<1, 34>; def HasStdExtZvksed : Predicate<"Subtarget->hasStdExtZvksed()">, AssemblerPredicate<(all_of FeatureStdExtZvksed), "'Zvksed' (SM4 Block Cipher Instructions)">; def FeatureStdExtZvksh : RISCVExtension<"zvksh", 1, 0, - "'Zvksh' (SM3 Hash Function Instructions)">; + "'Zvksh' (SM3 Hash Function Instructions)">, + RISCVExtensionBitmask<1, 35>; def HasStdExtZvksh : Predicate<"Subtarget->hasStdExtZvksh()">, AssemblerPredicate<(all_of FeatureStdExtZvksh), "'Zvksh' (SM3 Hash Function Instructions)">; def FeatureStdExtZvkt : RISCVExtension<"zvkt", 1, 0, - "'Zvkt' (Vector Data-Independent Execution Latency)">; + "'Zvkt' (Vector Data-Independent Execution Latency)">, + RISCVExtensionBitmask<1, 36>; // Zvk short-hand extensions @@ -770,34 +871,40 @@ def FeatureStdExtZvkn "'Zvkn' (shorthand for 'Zvkned', 'Zvknhb', 'Zvkb', and " "'Zvkt')", [FeatureStdExtZvkned, FeatureStdExtZvknhb, - FeatureStdExtZvkb, FeatureStdExtZvkt]>; + FeatureStdExtZvkb, FeatureStdExtZvkt]>, + RISCVExtensionBitmask<1, 37>; def FeatureStdExtZvknc : RISCVExtension<"zvknc", 1, 0, "'Zvknc' (shorthand for 'Zvknc' and 'Zvbc')", - [FeatureStdExtZvkn, FeatureStdExtZvbc]>; + [FeatureStdExtZvkn, FeatureStdExtZvbc]>, + RISCVExtensionBitmask<1, 38>; def FeatureStdExtZvkng : RISCVExtension<"zvkng", 1, 0, "'zvkng' (shorthand for 'Zvkn' and 'Zvkg')", - [FeatureStdExtZvkn, FeatureStdExtZvkg]>; + [FeatureStdExtZvkn, FeatureStdExtZvkg]>, + RISCVExtensionBitmask<1, 39>; def FeatureStdExtZvks : RISCVExtension<"zvks", 1, 0, "'Zvks' (shorthand for 'Zvksed', 'Zvksh', 'Zvkb', and " "'Zvkt')", [FeatureStdExtZvksed, FeatureStdExtZvksh, - FeatureStdExtZvkb, FeatureStdExtZvkt]>; + FeatureStdExtZvkb, FeatureStdExtZvkt]>, + RISCVExtensionBitmask<1, 40>; def FeatureStdExtZvksc : RISCVExtension<"zvksc", 1, 0, "'Zvksc' (shorthand for 'Zvks' and 'Zvbc')", - [FeatureStdExtZvks, FeatureStdExtZvbc]>; + [FeatureStdExtZvks, FeatureStdExtZvbc]>, + RISCVExtensionBitmask<1, 41>; def FeatureStdExtZvksg : RISCVExtension<"zvksg", 1, 0, "'Zvksg' (shorthand for 'Zvks' and 'Zvkg')", - [FeatureStdExtZvks, FeatureStdExtZvkg]>; + [FeatureStdExtZvks, FeatureStdExtZvkg]>, + RISCVExtensionBitmask<1, 42>; // Vector instruction predicates diff --git a/llvm/lib/TargetParser/RISCVTargetParser.cpp b/llvm/lib/TargetParser/RISCVTargetParser.cpp index 9003f9beffa7e..ec6447a44f90d 100644 --- a/llvm/lib/TargetParser/RISCVTargetParser.cpp +++ b/llvm/lib/TargetParser/RISCVTargetParser.cpp @@ -119,6 +119,38 @@ void getFeaturesForCPU(StringRef CPU, else EnabledFeatures.push_back(F.substr(1)); } + +namespace RISCVExtensionBitmaskTable { +#define GET_RISCVExtensionBitmaskTable_IMPL +#include "llvm/TargetParser/RISCVTargetParserDef.inc" + +} // namespace RISCVExtensionBitmaskTable + +namespace { +struct LessExtName { + bool operator()(const RISCVExtensionBitmaskTable::RISCVExtensionBitmask &LHS, + StringRef RHS) { + return StringRef(LHS.Name) < RHS; + } + bool + operator()(StringRef LHS, + const RISCVExtensionBitmaskTable::RISCVExtensionBitmask &RHS) { + return LHS < StringRef(RHS.Name); + } +}; +} // namespace + +static RISCVExtensionBitmaskTable::RISCVExtensionBitmask +getExtensionBitmask(StringRef ExtName) { + ArrayRef<RISCVExtensionBitmaskTable::RISCVExtensionBitmask> ExtBitmasks = + ArrayRef(RISCVExtensionBitmaskTable::ExtensionBitmask); + auto *I = llvm::lower_bound(ExtBitmasks, ExtName, LessExtName()); + + if (I != ExtBitmasks.end()) + return *I; + + return RISCVExtensionBitmaskTable::RISCVExtensionBitmask(); +} } // namespace RISCV namespace RISCVVType { diff --git a/llvm/test/TableGen/riscv-target-def.td b/llvm/test/TableGen/riscv-target-def.td index fb58448d7ce88..2303c1f4bf37f 100644 --- a/llvm/test/TableGen/riscv-target-def.td +++ b/llvm/test/TableGen/riscv-target-def.td @@ -12,6 +12,11 @@ class RISCVExtension<string name, int major, int minor, string desc, bit Experimental = false; } +class RISCVExtensionBitmask<bits<3> groupID, int bitmaskShift> { + bits<3> GroupID = groupID; + bits<64> Bitmask = !shl(1, bitmaskShift); +} + class RISCVExperimentalExtension<string name, int major, int minor, string desc, list<RISCVExtension> implies = [], string fieldname = !subst("Feature", "Has", NAME), @@ -23,20 +28,24 @@ class RISCVExperimentalExtension<string name, int major, int minor, string desc, def FeatureStdExtI : RISCVExtension<"i", 2, 1, - "'I' (Base Integer Instruction Set)">; + "'I' (Base Integer Instruction Set)">, + RISCVExtensionBitmask<0, 0>; def FeatureStdExtZicsr : RISCVExtension<"zicsr", 2, 0, - "'zicsr' (CSRs)">; + "'zicsr' (CSRs)">, + RISCVExtensionBitmask<0, 10>; def FeatureStdExtZifencei : RISCVExtension<"zifencei", 2, 0, - "'Zifencei' (fence.i)">; + "'Zifencei' (fence.i)">, + RISCVExtensionBitmask<0, 13>; def FeatureStdExtF : RISCVExtension<"f", 2, 2, "'F' (Single-Precision Floating-Point)", - [FeatureStdExtZicsr]>; + [FeatureStdExtZicsr]>, + RISCVExtensionBitmask<0, 33>; def FeatureStdExtZidummy : RISCVExperimentalExtension<"zidummy", 0, 1, @@ -171,3 +180,12 @@ def ROCKET : RISCVTuneProcessorModel<"rocket", // CHECK-NEXT: TUNE_PROC(ROCKET, "rocket") // CHECK: #undef TUNE_PROC + +// CHECK: #ifdef GET_RISCVExtensionBitmaskTable_IMPL +// CHECK-NEXT: static const RISCVExtensionBitmask ExtensionBitmask[]={ +// CHECK-NEXT: {"f", 0, 8589934592ULL}, +// CHECK-NEXT: {"i", 0, 1ULL}, +// CHECK-NEXT: {"zicsr", 0, 1024ULL}, +// CHECK-NEXT: {"zifencei", 0, 8192ULL}, +// CHECK-NEXT: }; +// CHECK-NEXT: #endif diff --git a/llvm/utils/TableGen/RISCVTargetDefEmitter.cpp b/llvm/utils/TableGen/RISCVTargetDefEmitter.cpp index b76ba05954aa5..fa1540d7db7a3 100644 --- a/llvm/utils/TableGen/RISCVTargetDefEmitter.cpp +++ b/llvm/utils/TableGen/RISCVTargetDefEmitter.cpp @@ -11,6 +11,7 @@ // //===----------------------------------------------------------------------===// +#include "llvm/ADT/DenseMap.h" #include "llvm/Support/RISCVISAUtils.h" #include "llvm/TableGen/Record.h" #include "llvm/TableGen/TableGenBackend.h" @@ -210,10 +211,61 @@ static void emitRISCVProcs(RecordKeeper &RK, raw_ostream &OS) { OS << "\n#undef TUNE_PROC\n"; } +static inline uint64_t getValueFromBitsInit(const BitsInit *B, + const Record &R) { + assert(B->getNumBits() <= 64 && "BitInits' too long!"); + + uint64_t Value = 0; + for (unsigned i = 0, e = B->getNumBits(); i != e; ++i) { + const auto *Bit = cast<BitInit>(B->getBit(i)); + if (Bit) + Value |= uint64_t(Bit->getValue()) << i; + } + return Value; +} + +static void emitRISCVExtensionBitmask(RecordKeeper &RK, raw_ostream &OS) { + + std::vector<Record *> Extensions = + RK.getAllDerivedDefinitionsIfDefined("RISCVExtensionBitmask"); + llvm::sort(Extensions, [](const Record *Rec1, const Record *Rec2) { + return getExtensionName(Rec1) < getExtensionName(Rec2); + }); + +#ifndef NDEBUG + llvm::DenseSet<std::pair<uint64_t, uint64_t>> Seen; +#endif + + OS << "#ifdef GET_RISCVExtensionBitmaskTable_IMPL\n"; + OS << "static const RISCVExtensionBitmask ExtensionBitmask[]={\n"; + for (const Record *Rec : Extensions) { + BitsInit *GroupIDBits = Rec->getValueAsBitsInit("GroupID"); + BitsInit *BitmaskBits = Rec->getValueAsBitsInit("Bitmask"); + + StringRef ExtName = Rec->getValueAsString("Name"); + ExtName.consume_front("experimental-"); + uint64_t GroupIDVal = getValueFromBitsInit(GroupIDBits, *Rec); + uint64_t BitmaskVal = getValueFromBitsInit(BitmaskBits, *Rec); + +#ifndef NDEBUG + assert(Seen.insert(std::make_pair(GroupIDVal, BitmaskVal)).second && + "duplicated bitmask"); +#endif + + OS << " {" + << "\"" << ExtName << "\"" + << ", " << GroupIDVal << ", " << BitmaskVal << "ULL" + << "},\n"; + } + OS << "};\n"; + OS << "#endif\n"; +} + static void EmitRISCVTargetDef(RecordKeeper &RK, raw_ostream &OS) { emitRISCVExtensions(RK, OS); emitRISCVProfiles(RK, OS); emitRISCVProcs(RK, OS); + emitRISCVExtensionBitmask(RK, OS); } static TableGen::Emitter::Opt X("gen-riscv-target-def", EmitRISCVTargetDef, >From ad2c9f3fdf20069850ef6e1d4195f9e310a59cee Mon Sep 17 00:00:00 2001 From: Piyou Chen <piyou.c...@sifive.com> Date: Mon, 10 Jun 2024 22:22:42 -0700 Subject: [PATCH 2/2] [RISCV][FMV] Support target_clones --- .../clang/Basic/DiagnosticFrontendKinds.td | 4 + clang/include/clang/Basic/TargetInfo.h | 3 +- clang/lib/AST/ASTContext.cpp | 10 + clang/lib/Basic/Targets/RISCV.cpp | 10 +- clang/lib/Basic/Targets/RISCV.h | 2 + clang/lib/CodeGen/CGBuiltin.cpp | 55 ++++++ clang/lib/CodeGen/CodeGenFunction.cpp | 113 ++++++++++- clang/lib/CodeGen/CodeGenFunction.h | 5 + clang/lib/CodeGen/CodeGenModule.cpp | 5 +- clang/lib/CodeGen/Targets/RISCV.cpp | 23 +++ clang/lib/Sema/SemaDeclAttr.cpp | 25 +++ .../attr-target-clones-riscv-invaild.c | 8 + clang/test/CodeGen/attr-target-clones-riscv.c | 186 ++++++++++++++++++ .../CodeGenCXX/attr-target-clones-riscv.cpp | 186 ++++++++++++++++++ .../test/SemaCXX/attr-target-clones-riscv.cpp | 28 +++ .../llvm/TargetParser/RISCVTargetParser.h | 2 + llvm/lib/TargetParser/RISCVTargetParser.cpp | 14 ++ 17 files changed, 674 insertions(+), 5 deletions(-) create mode 100644 clang/test/CodeGen/attr-target-clones-riscv-invaild.c create mode 100644 clang/test/CodeGen/attr-target-clones-riscv.c create mode 100644 clang/test/CodeGenCXX/attr-target-clones-riscv.cpp create mode 100644 clang/test/SemaCXX/attr-target-clones-riscv.cpp diff --git a/clang/include/clang/Basic/DiagnosticFrontendKinds.td b/clang/include/clang/Basic/DiagnosticFrontendKinds.td index 85c32e55bdab3..49a019a08da72 100644 --- a/clang/include/clang/Basic/DiagnosticFrontendKinds.td +++ b/clang/include/clang/Basic/DiagnosticFrontendKinds.td @@ -374,4 +374,8 @@ def warn_missing_symbol_graph_dir : Warning< def err_ast_action_on_llvm_ir : Error< "cannot apply AST actions to LLVM IR file '%0'">, DefaultFatal; + +def err_os_unsupport_riscv_target_clones : Error< + "target_clones is currently only supported on Linux">; + } diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h index 8a6511b9ced83..9a993d75fd517 100644 --- a/clang/include/clang/Basic/TargetInfo.h +++ b/clang/include/clang/Basic/TargetInfo.h @@ -1481,7 +1481,8 @@ class TargetInfo : public TransferrableTargetInfo, /// Identify whether this target supports multiversioning of functions, /// which requires support for cpu_supports and cpu_is functionality. bool supportsMultiVersioning() const { - return getTriple().isX86() || getTriple().isAArch64(); + return getTriple().isX86() || getTriple().isAArch64() || + getTriple().isRISCV(); } /// Identify whether this target supports IFuncs. diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp index bf74e56a14799..7cd505dbbf639 100644 --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -13744,6 +13744,16 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, Features.insert(Features.begin(), Target->getTargetOpts().FeaturesAsWritten.begin(), Target->getTargetOpts().FeaturesAsWritten.end()); + } else if (Target->getTriple().isRISCV()) { + StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); + if (VersionStr != "default") { + ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(VersionStr); + Features.insert(Features.begin(), ParsedAttr.Features.begin(), + ParsedAttr.Features.end()); + } + Features.insert(Features.begin(), + Target->getTargetOpts().FeaturesAsWritten.begin(), + Target->getTargetOpts().FeaturesAsWritten.end()); } else { StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); if (VersionStr.starts_with("arch=")) diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp index a7ce9dda34bdd..2bb8ecd6f0f82 100644 --- a/clang/lib/Basic/Targets/RISCV.cpp +++ b/clang/lib/Basic/Targets/RISCV.cpp @@ -257,7 +257,7 @@ bool RISCVTargetInfo::initFeatureMap( // If a target attribute specified a full arch string, override all the ISA // extension target features. - const auto I = llvm::find(FeaturesVec, "__RISCV_TargetAttrNeedOverride"); + const auto I = llvm::find(FeaturesVec, "+__RISCV_TargetAttrNeedOverride"); if (I != FeaturesVec.end()) { std::vector<std::string> OverrideFeatures(std::next(I), FeaturesVec.end()); @@ -367,6 +367,12 @@ bool RISCVTargetInfo::handleTargetFeatures(std::vector<std::string> &Features, return true; } +bool RISCVTargetInfo::isValidFeatureName(StringRef Feature) const { + if (Feature.starts_with("__RISCV_TargetAttrNeedOverride")) + return true; + return llvm::RISCVISAInfo::isSupportedExtensionFeature(Feature); +} + bool RISCVTargetInfo::isValidCPUName(StringRef Name) const { bool Is64Bit = getTriple().isArch64Bit(); return llvm::RISCV::parseCPU(Name, Is64Bit); @@ -391,7 +397,7 @@ void RISCVTargetInfo::fillValidTuneCPUList( static void handleFullArchString(StringRef FullArchStr, std::vector<std::string> &Features) { - Features.push_back("__RISCV_TargetAttrNeedOverride"); + Features.push_back("+__RISCV_TargetAttrNeedOverride"); auto RII = llvm::RISCVISAInfo::parseArchString( FullArchStr, /* EnableExperimentalExtension */ true); if (llvm::errorToBool(RII.takeError())) { diff --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h index d0e9cdc6da07b..bdb3188291c1e 100644 --- a/clang/lib/Basic/Targets/RISCV.h +++ b/clang/lib/Basic/Targets/RISCV.h @@ -106,6 +106,8 @@ class RISCVTargetInfo : public TargetInfo { bool handleTargetFeatures(std::vector<std::string> &Features, DiagnosticsEngine &Diags) override; + bool isValidFeatureName(StringRef Feature) const override; + bool hasBitIntType() const override { return true; } bool hasBFloat16Type() const override { return true; } diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 06e201fa71e6f..d7f3af8893db5 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -62,6 +62,7 @@ #include "llvm/Support/MathExtras.h" #include "llvm/Support/ScopedPrinter.h" #include "llvm/TargetParser/AArch64TargetParser.h" +#include "llvm/TargetParser/RISCVTargetParser.h" #include "llvm/TargetParser/X86TargetParser.h" #include <optional> #include <sstream> @@ -14174,6 +14175,16 @@ Value *CodeGenFunction::EmitAArch64CpuInit() { return Builder.CreateCall(Func); } +Value *CodeGenFunction::EmitRISCVCpuInit() { + llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); + llvm::FunctionCallee Func = + CGM.CreateRuntimeFunction(FTy, "__init_riscv_features_bit"); + cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true); + cast<llvm::GlobalValue>(Func.getCallee()) + ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); + return Builder.CreateCall(Func); +} + Value *CodeGenFunction::EmitX86CpuInit() { llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, /*Variadic*/ false); @@ -14226,6 +14237,50 @@ CodeGenFunction::EmitAArch64CpuSupports(ArrayRef<StringRef> FeaturesStrs) { return Result; } +llvm::Value * +CodeGenFunction::EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs) { + + const unsigned FeatureBitSize = 2; + llvm::ArrayType *ArrayOfInt64Ty = + llvm::ArrayType::get(Int64Ty, FeatureBitSize); + llvm::Type *StructTy = llvm::StructType::get(Int32Ty, ArrayOfInt64Ty); + llvm::Constant *RISCVFeaturesBits = + CGM.CreateRuntimeVariable(StructTy, "__riscv_feature_bits"); + cast<llvm::GlobalValue>(RISCVFeaturesBits)->setDSOLocal(true); + + auto LoadFeatureBit = [&](unsigned Index) { + // Create GEP then load. + llvm::Value *IndexVal = llvm::ConstantInt::get(Int32Ty, Index); + std::vector<llvm::Value *> GEPIndices = {llvm::ConstantInt::get(Int32Ty, 0), + llvm::ConstantInt::get(Int32Ty, 1), + IndexVal}; + llvm::Value *Ptr = + Builder.CreateInBoundsGEP(StructTy, RISCVFeaturesBits, GEPIndices); + Value *FeaturesBit = + Builder.CreateAlignedLoad(Int64Ty, Ptr, CharUnits::fromQuantity(8)); + return FeaturesBit; + }; + + SmallVector<llvm::Value *> FeatureBits; + FeatureBits.push_back(LoadFeatureBit(0)); + FeatureBits.push_back(LoadFeatureBit(1)); + + SmallVector<unsigned long long> RequireFeatureBits = + llvm::RISCV::getRequireFeatureBitMask(FeaturesStrs); + + Value *Result = Builder.getTrue(); + for (unsigned i = 0; i < FeatureBits.size(); i++) { + if (!RequireFeatureBits[i]) + continue; + Value *Mask = Builder.getInt64(RequireFeatureBits[i]); + Value *Bitset = Builder.CreateAnd(FeatureBits[i], Mask); + Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask); + Result = Builder.CreateAnd(Result, Cmp); + } + + return Result; +} + Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E) { if (BuiltinID == Builtin::BI__builtin_cpu_is) diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp index cea0d84c64bc4..a773fd0ed9262 100644 --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -2850,10 +2850,121 @@ void CodeGenFunction::EmitMultiVersionResolver( case llvm::Triple::aarch64: EmitAArch64MultiVersionResolver(Resolver, Options); return; + case llvm::Triple::riscv32: + case llvm::Triple::riscv64: + EmitRISCVMultiVersionResolver(Resolver, Options); + return; default: - assert(false && "Only implemented for x86 and AArch64 targets"); + assert(false && "Only implemented for x86, AArch64 and RISC-V targets"); + } +} + +void CodeGenFunction::EmitRISCVMultiVersionResolver( + llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) { + + if (getContext().getTargetInfo().getTriple().getOS() != + llvm::Triple::OSType::Linux) { + CGM.getDiags().Report(diag::err_os_unsupport_riscv_target_clones); + return; + } + + llvm::BasicBlock *EntryBlock = createBasicBlock("resolver_entry", Resolver); + Builder.SetInsertPoint(EntryBlock); + EmitRISCVCpuInit(); + + llvm::BasicBlock *CurBlock = createBasicBlock("resolver_cond", Resolver); + llvm::BasicBlock *FirstCond = CurBlock; + + bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc(); + bool HasDefault = false; + int DefaultIndex = 0; + // Check the each candidate function. + for (unsigned Index = 0; Index < Options.size(); Index++) { + + if (Options[Index].Conditions.Features[0].starts_with("default")) { + HasDefault = true; + DefaultIndex = Index; + continue; + } + + Builder.SetInsertPoint(CurBlock); + + std::vector<std::string> TargetAttrFeats = + getContext() + .getTargetInfo() + .parseTargetAttr(Options[Index].Conditions.Features[0]) + .Features; + + if (!TargetAttrFeats.empty()) { + // If this function doens't need override, then merge with module level + // target features. Otherwise, remain the current target features. + auto I = llvm::find(TargetAttrFeats, "+__RISCV_TargetAttrNeedOverride"); + if (I == TargetAttrFeats.end()) + TargetAttrFeats.insert(TargetAttrFeats.begin(), + Target.getTargetOpts().FeaturesAsWritten.begin(), + Target.getTargetOpts().FeaturesAsWritten.end()); + else + TargetAttrFeats.erase(I); + + // Only consider +<extension-feature>. + llvm::SmallVector<StringRef, 8> PlusTargetAttrFeats; + for (StringRef Feat : TargetAttrFeats) { + if (!getContext().getTargetInfo().isValidFeatureName( + Feat.substr(1).str())) + continue; + if (Feat.starts_with("+")) + PlusTargetAttrFeats.push_back(Feat.substr(1)); + } + + llvm::Value *Condition = EmitRISCVCpuSupports(PlusTargetAttrFeats); + llvm::BasicBlock *RetBlock = + createBasicBlock("resolver_return", Resolver); + CGBuilderTy RetBuilder(*this, RetBlock); + CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, + Options[Index].Function, SupportsIFunc); + CurBlock = createBasicBlock("resolver_else", Resolver); + Builder.CreateCondBr(Condition, RetBlock, CurBlock); + } + } + + // Finally, emit the default one. + if (HasDefault) { + Builder.SetInsertPoint(CurBlock); + CreateMultiVersionResolverReturn( + CGM, Resolver, Builder, Options[DefaultIndex].Function, SupportsIFunc); + + Builder.SetInsertPoint(EntryBlock); + const unsigned FeatureBitSize = 2; + llvm::ArrayType *ArrayOfInt64Ty = + llvm::ArrayType::get(Int64Ty, FeatureBitSize); + llvm::Type *StructTy = llvm::StructType::get(Int32Ty, ArrayOfInt64Ty); + llvm::Constant *RISCVFeaturesBits = + CGM.CreateRuntimeVariable(StructTy, "__riscv_feature_bits"); + cast<llvm::GlobalValue>(RISCVFeaturesBits)->setDSOLocal(true); + std::vector<llvm::Value *> GEPIndices = { + llvm::ConstantInt::get(Int32Ty, 0), llvm::ConstantInt::get(Int32Ty, 0)}; + llvm::Value *Ptr = + Builder.CreateInBoundsGEP(StructTy, RISCVFeaturesBits, GEPIndices); + llvm::Value *Length = + Builder.CreateAlignedLoad(Int32Ty, Ptr, CharUnits::fromQuantity(8)); + + llvm::Value *FeatureBitSizeVal = + llvm::ConstantInt::get(Int32Ty, FeatureBitSize); + llvm::Value *Result = Builder.CreateICmpULE(Length, FeatureBitSizeVal); + + Builder.CreateCondBr(Result, FirstCond, CurBlock); + + return; } + + // If no generic/default, emit an unreachable. + Builder.SetInsertPoint(CurBlock); + llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap); + TrapCall->setDoesNotReturn(); + TrapCall->setDoesNotThrow(); + Builder.CreateUnreachable(); + Builder.ClearInsertionPoint(); } void CodeGenFunction::EmitAArch64MultiVersionResolver( diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index 5739fbaaa9194..e8d5d5690e1f2 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -5254,6 +5254,9 @@ class CodeGenFunction : public CodeGenTypeCache { void EmitAArch64MultiVersionResolver(llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options); + void + EmitRISCVMultiVersionResolver(llvm::Function *Resolver, + ArrayRef<MultiVersionResolverOption> Options); private: QualType getVarArgType(const Expr *Arg); @@ -5278,6 +5281,8 @@ class CodeGenFunction : public CodeGenTypeCache { FormAArch64ResolverCondition(const MultiVersionResolverOption &RO); llvm::Value *EmitAArch64CpuSupports(const CallExpr *E); llvm::Value *EmitAArch64CpuSupports(ArrayRef<StringRef> FeatureStrs); + llvm::Value *EmitRISCVCpuInit(); + llvm::Value *EmitRISCVCpuSupports(ArrayRef<StringRef> FeatureStrs); }; inline DominatingLLVMValue::saved_type diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index dd4a665ebc78b..66485d63a3d73 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -4233,7 +4233,10 @@ void CodeGenModule::emitMultiVersionFunctions() { Feats.clear(); if (getTarget().getTriple().isAArch64()) TC->getFeatures(Feats, I); - else { + else if (getTarget().getTriple().isRISCV()) { + StringRef Version = TC->getFeatureStr(I); + Feats.push_back(Version); + } else { StringRef Version = TC->getFeatureStr(I); if (Version.starts_with("arch=")) Architecture = Version.drop_front(sizeof("arch=") - 1); diff --git a/clang/lib/CodeGen/Targets/RISCV.cpp b/clang/lib/CodeGen/Targets/RISCV.cpp index 7b32c79723562..2ef90878929ee 100644 --- a/clang/lib/CodeGen/Targets/RISCV.cpp +++ b/clang/lib/CodeGen/Targets/RISCV.cpp @@ -63,9 +63,32 @@ class RISCVABIInfo : public DefaultABIInfo { CharUnits Field2Off) const; ABIArgInfo coerceVLSVector(QualType Ty) const; + + using ABIInfo::appendAttributeMangling; + void appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index, + raw_ostream &Out) const override; + void appendAttributeMangling(StringRef AttrStr, + raw_ostream &Out) const override; }; } // end anonymous namespace +void RISCVABIInfo::appendAttributeMangling(TargetClonesAttr *Attr, + unsigned Index, + raw_ostream &Out) const { + appendAttributeMangling(Attr->getFeatureStr(Index), Out); +} + +void RISCVABIInfo::appendAttributeMangling(StringRef AttrStr, + raw_ostream &Out) const { + if (AttrStr == "default") { + Out << ".default"; + return; + } + + Out << '.'; + Out << AttrStr; +} + void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const { QualType RetTy = FI.getReturnType(); if (!getCXXABI().classifyReturnType(FI)) diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp index ce6b5b1ff6f93..b15bda0a2b655 100644 --- a/clang/lib/Sema/SemaDeclAttr.cpp +++ b/clang/lib/Sema/SemaDeclAttr.cpp @@ -3152,6 +3152,31 @@ bool Sema::checkTargetClonesAttrString( /*IncludeLocallyStreaming=*/false)) return Diag(LiteralLoc, diag::err_sme_streaming_cannot_be_multiversioned); + } else if (TInfo.getTriple().isRISCV()) { + // Suppress warn_target_clone_mixed_values + HasCommas = false; + + if (Str.starts_with("arch=")) { + // parseTargetAttr will parse full version string, + // the following split Cur string is no longer interesting. + if ((!Cur.starts_with("arch="))) + continue; + + ParsedTargetAttr TargetAttr = + Context.getTargetInfo().parseTargetAttr(Str); + if (TargetAttr.Features.empty()) + return Diag(CurLoc, diag::warn_unsupported_target_attribute) + << Unsupported << None << Str << TargetClones; + } else if (Str == "default") { + DefaultIsDupe = HasDefault; + HasDefault = true; + } else { + return Diag(CurLoc, diag::warn_unsupported_target_attribute) + << Unsupported << None << Str << TargetClones; + } + if (llvm::is_contained(StringsBuffer, Str) || DefaultIsDupe) + Diag(CurLoc, diag::warn_target_clone_duplicate_options); + StringsBuffer.push_back(Str); } else { // Other targets ( currently X86 ) if (Cur.starts_with("arch=")) { diff --git a/clang/test/CodeGen/attr-target-clones-riscv-invaild.c b/clang/test/CodeGen/attr-target-clones-riscv-invaild.c new file mode 100644 index 0000000000000..f5e8f40d7a8f3 --- /dev/null +++ b/clang/test/CodeGen/attr-target-clones-riscv-invaild.c @@ -0,0 +1,8 @@ +// RUN: not %clang_cc1 -triple riscv64 -target-feature +i -emit-llvm -o - %s 2>&1 | FileCheck %s --check-prefix=CHECK-UNSUPPORT-OS + +// CHECK-UNSUPPORT-OS: error: target_clones is currently only supported on Linux +__attribute__((target_clones("default", "arch=+c"))) int foo2(void) { + return 2; +} + +int bar() { return foo1()+foo2(); } diff --git a/clang/test/CodeGen/attr-target-clones-riscv.c b/clang/test/CodeGen/attr-target-clones-riscv.c new file mode 100644 index 0000000000000..2bd2ad78c6baf --- /dev/null +++ b/clang/test/CodeGen/attr-target-clones-riscv.c @@ -0,0 +1,186 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-globals all --include-generated-funcs --version 4 +// RUN: %clang_cc1 -triple riscv64-linux-gnu -target-feature +i -emit-llvm -o - %s | FileCheck %s + +__attribute__((target_clones("default", "arch=rv64im"))) int foo1(void) { + return 1; +} +__attribute__((target_clones("default", "arch=+zbb"))) int foo2(void) { return 2; } +__attribute__((target_clones("default", "arch=+zbb,+c"))) int foo3(void) { return 3; } +__attribute__((target_clones("default", "arch=rv64ima", "arch=+zbb,+v"))) int +foo4(void) { + return 4; +} +__attribute__((target_clones("default"))) int foo5(void) { return 5; } + +int bar() { return foo1() + foo2() + foo3() + foo4() + foo5(); } + +//. +// CHECK: @__riscv_feature_bits = external dso_local global { i32, [2 x i64] } +// CHECK: @foo1.ifunc = weak_odr alias i32 (), ptr @foo1 +// CHECK: @foo2.ifunc = weak_odr alias i32 (), ptr @foo2 +// CHECK: @foo3.ifunc = weak_odr alias i32 (), ptr @foo3 +// CHECK: @foo4.ifunc = weak_odr alias i32 (), ptr @foo4 +// CHECK: @foo5.ifunc = weak_odr alias i32 (), ptr @foo5 +// CHECK: @foo1 = weak_odr ifunc i32 (), ptr @foo1.resolver +// CHECK: @foo2 = weak_odr ifunc i32 (), ptr @foo2.resolver +// CHECK: @foo3 = weak_odr ifunc i32 (), ptr @foo3.resolver +// CHECK: @foo4 = weak_odr ifunc i32 (), ptr @foo4.resolver +// CHECK: @foo5 = weak_odr ifunc i32 (), ptr @foo5.resolver +//. +// CHECK-LABEL: define dso_local signext i32 @foo1.default( +// CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: ret i32 1 +// +// +// CHECK-LABEL: define weak_odr ptr @foo1.resolver() comdat { +// CHECK-NEXT: resolver_entry: +// CHECK-NEXT: call void @__init_riscv_features_bit() +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__riscv_feature_bits, align 8 +// CHECK-NEXT: [[TMP1:%.*]] = icmp ule i32 [[TMP0]], 2 +// CHECK-NEXT: br i1 [[TMP1]], label [[RESOLVER_COND:%.*]], label [[RESOLVER_ELSE:%.*]] +// CHECK: resolver_cond: +// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8 +// CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP2]], 1048576 +// CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP4]], 1048576 +// CHECK-NEXT: [[TMP6:%.*]] = and i1 true, [[TMP5]] +// CHECK-NEXT: br i1 [[TMP6]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE]] +// CHECK: resolver_return: +// CHECK-NEXT: ret ptr @"foo1.arch=rv64im" +// CHECK: resolver_else: +// CHECK-NEXT: ret ptr @foo1.default +// +// +// CHECK-LABEL: define dso_local signext i32 @foo2.default( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: ret i32 2 +// +// +// CHECK-LABEL: define weak_odr ptr @foo2.resolver() comdat { +// CHECK-NEXT: resolver_entry: +// CHECK-NEXT: call void @__init_riscv_features_bit() +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__riscv_feature_bits, align 8 +// CHECK-NEXT: [[TMP1:%.*]] = icmp ule i32 [[TMP0]], 2 +// CHECK-NEXT: br i1 [[TMP1]], label [[RESOLVER_COND:%.*]], label [[RESOLVER_ELSE:%.*]] +// CHECK: resolver_cond: +// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8 +// CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP2]], 9007199254740993 +// CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP4]], 9007199254740993 +// CHECK-NEXT: [[TMP6:%.*]] = and i1 true, [[TMP5]] +// CHECK-NEXT: br i1 [[TMP6]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE]] +// CHECK: resolver_return: +// CHECK-NEXT: ret ptr @"foo2.arch=+zbb" +// CHECK: resolver_else: +// CHECK-NEXT: ret ptr @foo2.default +// +// +// CHECK-LABEL: define dso_local signext i32 @foo3.default( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: ret i32 3 +// +// +// CHECK-LABEL: define weak_odr ptr @foo3.resolver() comdat { +// CHECK-NEXT: resolver_entry: +// CHECK-NEXT: call void @__init_riscv_features_bit() +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__riscv_feature_bits, align 8 +// CHECK-NEXT: [[TMP1:%.*]] = icmp ule i32 [[TMP0]], 2 +// CHECK-NEXT: br i1 [[TMP1]], label [[RESOLVER_COND:%.*]], label [[RESOLVER_ELSE:%.*]] +// CHECK: resolver_cond: +// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8 +// CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP2]], 9015995347763201 +// CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP4]], 9015995347763201 +// CHECK-NEXT: [[TMP6:%.*]] = and i1 true, [[TMP5]] +// CHECK-NEXT: br i1 [[TMP6]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE]] +// CHECK: resolver_return: +// CHECK-NEXT: ret ptr @"foo3.arch=+zbb,+c" +// CHECK: resolver_else: +// CHECK-NEXT: ret ptr @foo3.default +// +// +// CHECK-LABEL: define dso_local signext i32 @foo4.default( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: ret i32 4 +// +// +// CHECK-LABEL: define weak_odr ptr @foo4.resolver() comdat { +// CHECK-NEXT: resolver_entry: +// CHECK-NEXT: call void @__init_riscv_features_bit() +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__riscv_feature_bits, align 8 +// CHECK-NEXT: [[TMP1:%.*]] = icmp ule i32 [[TMP0]], 2 +// CHECK-NEXT: br i1 [[TMP1]], label [[RESOLVER_COND:%.*]], label [[RESOLVER_ELSE2:%.*]] +// CHECK: resolver_cond: +// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8 +// CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP2]], 5242880 +// CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP4]], 5242880 +// CHECK-NEXT: [[TMP6:%.*]] = and i1 true, [[TMP5]] +// CHECK-NEXT: br i1 [[TMP6]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]] +// CHECK: resolver_return: +// CHECK-NEXT: ret ptr @"foo4.arch=rv64ima" +// CHECK: resolver_else: +// CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8 +// CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8 +// CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP7]], 9007199254740993 +// CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[TMP9]], 9007199254740993 +// CHECK-NEXT: [[TMP11:%.*]] = and i1 true, [[TMP10]] +// CHECK-NEXT: [[TMP12:%.*]] = and i64 [[TMP8]], 4194304 +// CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[TMP12]], 4194304 +// CHECK-NEXT: [[TMP14:%.*]] = and i1 [[TMP11]], [[TMP13]] +// CHECK-NEXT: br i1 [[TMP14]], label [[RESOLVER_RETURN1:%.*]], label [[RESOLVER_ELSE2]] +// CHECK: resolver_return1: +// CHECK-NEXT: ret ptr @"foo4.arch=+zbb,+v" +// CHECK: resolver_else2: +// CHECK-NEXT: ret ptr @foo4.default +// +// +// CHECK-LABEL: define dso_local signext i32 @foo5.default( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: ret i32 5 +// +// +// CHECK-LABEL: define weak_odr ptr @foo5.resolver() comdat { +// CHECK-NEXT: resolver_entry: +// CHECK-NEXT: call void @__init_riscv_features_bit() +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__riscv_feature_bits, align 8 +// CHECK-NEXT: [[TMP1:%.*]] = icmp ule i32 [[TMP0]], 2 +// CHECK-NEXT: br i1 [[TMP1]], label [[RESOLVER_COND:%.*]], label [[RESOLVER_COND]] +// CHECK: resolver_cond: +// CHECK-NEXT: ret ptr @foo5.default +// +// +// CHECK-LABEL: define dso_local signext i32 @bar( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[CALL:%.*]] = call signext i32 @foo1() +// CHECK-NEXT: [[CALL1:%.*]] = call signext i32 @foo2() +// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[CALL]], [[CALL1]] +// CHECK-NEXT: [[CALL2:%.*]] = call signext i32 @foo3() +// CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[CALL2]] +// CHECK-NEXT: [[CALL4:%.*]] = call signext i32 @foo4() +// CHECK-NEXT: [[ADD5:%.*]] = add nsw i32 [[ADD3]], [[CALL4]] +// CHECK-NEXT: [[CALL6:%.*]] = call signext i32 @foo5() +// CHECK-NEXT: [[ADD7:%.*]] = add nsw i32 [[ADD5]], [[CALL6]] +// CHECK-NEXT: ret i32 [[ADD7]] +// +//. +// CHECK: attributes #[[ATTR0]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+64bit,+i" } +// CHECK: attributes #[[ATTR1:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+64bit,+m,{{(-[[:alnum:]-]+)(,-[[:alnum:]-]+)*}}" } +// CHECK: attributes #[[ATTR2:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+64bit,+i,+zbb" } +// CHECK: attributes #[[ATTR3:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+64bit,+c,+i,+zbb" } +// CHECK: attributes #[[ATTR4:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+64bit,+a,+m,{{(-[[:alnum:]-]+)(,-[[:alnum:]-]+)*}}" } +// CHECK: attributes #[[ATTR5:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+64bit,+d,+f,+i,+v,+zbb,+zicsr,+zve32f,+zve32x,+zve64d,+zve64f,+zve64x,+zvl128b,+zvl32b,+zvl64b" } +//. +// CHECK: [[META0:![0-9]+]] = !{i32 1, !"wchar_size", i32 4} +// CHECK: [[META1:![0-9]+]] = !{i32 1, !"target-abi", !"lp64"} +// CHECK: [[META2:![0-9]+]] = !{i32 6, !"riscv-isa", [[META3:![0-9]+]]} +// CHECK: [[META3]] = !{!"rv64i2p1"} +// CHECK: [[META4:![0-9]+]] = !{i32 8, !"SmallDataLimit", i32 0} +// CHECK: [[META5:![0-9]+]] = !{!"{{.*}}clang version {{.*}}"} +//. diff --git a/clang/test/CodeGenCXX/attr-target-clones-riscv.cpp b/clang/test/CodeGenCXX/attr-target-clones-riscv.cpp new file mode 100644 index 0000000000000..5ae2727496b2f --- /dev/null +++ b/clang/test/CodeGenCXX/attr-target-clones-riscv.cpp @@ -0,0 +1,186 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-globals all --include-generated-funcs --version 4 +// RUN: %clang_cc1 -std=c++11 -triple riscv64-linux-gnu -target-feature +i -target-feature +m -emit-llvm %s -o - | FileCheck %s + +__attribute__((target_clones("default", "arch=rv64im"))) int foo1(void) { + return 1; +} +__attribute__((target_clones("default", "arch=+zbb"))) int foo2(void) { return 2; } +__attribute__((target_clones("default", "arch=+zbb,+c"))) int foo3(void) { return 3; } +__attribute__((target_clones("default", "arch=rv64ima", "arch=+zbb,+v"))) int +foo4(void) { + return 4; +} +__attribute__((target_clones("default"))) int foo5(void) { return 5; } + +int bar() { return foo1() + foo2() + foo3() + foo4() + foo5(); } + +//. +// CHECK: @__riscv_feature_bits = external dso_local global { i32, [2 x i64] } +// CHECK: @_Z4foo1v.ifunc = weak_odr alias i32 (), ptr @_Z4foo1v +// CHECK: @_Z4foo2v.ifunc = weak_odr alias i32 (), ptr @_Z4foo2v +// CHECK: @_Z4foo3v.ifunc = weak_odr alias i32 (), ptr @_Z4foo3v +// CHECK: @_Z4foo4v.ifunc = weak_odr alias i32 (), ptr @_Z4foo4v +// CHECK: @_Z4foo5v.ifunc = weak_odr alias i32 (), ptr @_Z4foo5v +// CHECK: @_Z4foo1v = weak_odr ifunc i32 (), ptr @_Z4foo1v.resolver +// CHECK: @_Z4foo2v = weak_odr ifunc i32 (), ptr @_Z4foo2v.resolver +// CHECK: @_Z4foo3v = weak_odr ifunc i32 (), ptr @_Z4foo3v.resolver +// CHECK: @_Z4foo4v = weak_odr ifunc i32 (), ptr @_Z4foo4v.resolver +// CHECK: @_Z4foo5v = weak_odr ifunc i32 (), ptr @_Z4foo5v.resolver +//. +// CHECK-LABEL: define dso_local noundef signext i32 @_Z4foo1v.default( +// CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: ret i32 1 +// +// +// CHECK-LABEL: define weak_odr ptr @_Z4foo1v.resolver() comdat { +// CHECK-NEXT: resolver_entry: +// CHECK-NEXT: call void @__init_riscv_features_bit() +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__riscv_feature_bits, align 8 +// CHECK-NEXT: [[TMP1:%.*]] = icmp ule i32 [[TMP0]], 2 +// CHECK-NEXT: br i1 [[TMP1]], label [[RESOLVER_COND:%.*]], label [[RESOLVER_ELSE:%.*]] +// CHECK: resolver_cond: +// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8 +// CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP2]], 1048576 +// CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP4]], 1048576 +// CHECK-NEXT: [[TMP6:%.*]] = and i1 true, [[TMP5]] +// CHECK-NEXT: br i1 [[TMP6]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE]] +// CHECK: resolver_return: +// CHECK-NEXT: ret ptr @"_Z4foo1v.arch=rv64im" +// CHECK: resolver_else: +// CHECK-NEXT: ret ptr @_Z4foo1v.default +// +// +// CHECK-LABEL: define dso_local noundef signext i32 @_Z4foo2v.default( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: ret i32 2 +// +// +// CHECK-LABEL: define weak_odr ptr @_Z4foo2v.resolver() comdat { +// CHECK-NEXT: resolver_entry: +// CHECK-NEXT: call void @__init_riscv_features_bit() +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__riscv_feature_bits, align 8 +// CHECK-NEXT: [[TMP1:%.*]] = icmp ule i32 [[TMP0]], 2 +// CHECK-NEXT: br i1 [[TMP1]], label [[RESOLVER_COND:%.*]], label [[RESOLVER_ELSE:%.*]] +// CHECK: resolver_cond: +// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8 +// CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP2]], 9007199255789569 +// CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP4]], 9007199255789569 +// CHECK-NEXT: [[TMP6:%.*]] = and i1 true, [[TMP5]] +// CHECK-NEXT: br i1 [[TMP6]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE]] +// CHECK: resolver_return: +// CHECK-NEXT: ret ptr @"_Z4foo2v.arch=+zbb" +// CHECK: resolver_else: +// CHECK-NEXT: ret ptr @_Z4foo2v.default +// +// +// CHECK-LABEL: define dso_local noundef signext i32 @_Z4foo3v.default( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: ret i32 3 +// +// +// CHECK-LABEL: define weak_odr ptr @_Z4foo3v.resolver() comdat { +// CHECK-NEXT: resolver_entry: +// CHECK-NEXT: call void @__init_riscv_features_bit() +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__riscv_feature_bits, align 8 +// CHECK-NEXT: [[TMP1:%.*]] = icmp ule i32 [[TMP0]], 2 +// CHECK-NEXT: br i1 [[TMP1]], label [[RESOLVER_COND:%.*]], label [[RESOLVER_ELSE:%.*]] +// CHECK: resolver_cond: +// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8 +// CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP2]], 9015995348811777 +// CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP4]], 9015995348811777 +// CHECK-NEXT: [[TMP6:%.*]] = and i1 true, [[TMP5]] +// CHECK-NEXT: br i1 [[TMP6]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE]] +// CHECK: resolver_return: +// CHECK-NEXT: ret ptr @"_Z4foo3v.arch=+zbb,+c" +// CHECK: resolver_else: +// CHECK-NEXT: ret ptr @_Z4foo3v.default +// +// +// CHECK-LABEL: define dso_local noundef signext i32 @_Z4foo4v.default( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: ret i32 4 +// +// +// CHECK-LABEL: define weak_odr ptr @_Z4foo4v.resolver() comdat { +// CHECK-NEXT: resolver_entry: +// CHECK-NEXT: call void @__init_riscv_features_bit() +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__riscv_feature_bits, align 8 +// CHECK-NEXT: [[TMP1:%.*]] = icmp ule i32 [[TMP0]], 2 +// CHECK-NEXT: br i1 [[TMP1]], label [[RESOLVER_COND:%.*]], label [[RESOLVER_ELSE2:%.*]] +// CHECK: resolver_cond: +// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8 +// CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP2]], 5242880 +// CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP4]], 5242880 +// CHECK-NEXT: [[TMP6:%.*]] = and i1 true, [[TMP5]] +// CHECK-NEXT: br i1 [[TMP6]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]] +// CHECK: resolver_return: +// CHECK-NEXT: ret ptr @"_Z4foo4v.arch=rv64ima" +// CHECK: resolver_else: +// CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 0), align 8 +// CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr inbounds ({ i32, [2 x i64] }, ptr @__riscv_feature_bits, i32 0, i32 1, i32 1), align 8 +// CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP7]], 9007199255789569 +// CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[TMP9]], 9007199255789569 +// CHECK-NEXT: [[TMP11:%.*]] = and i1 true, [[TMP10]] +// CHECK-NEXT: [[TMP12:%.*]] = and i64 [[TMP8]], 4194304 +// CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[TMP12]], 4194304 +// CHECK-NEXT: [[TMP14:%.*]] = and i1 [[TMP11]], [[TMP13]] +// CHECK-NEXT: br i1 [[TMP14]], label [[RESOLVER_RETURN1:%.*]], label [[RESOLVER_ELSE2]] +// CHECK: resolver_return1: +// CHECK-NEXT: ret ptr @"_Z4foo4v.arch=+zbb,+v" +// CHECK: resolver_else2: +// CHECK-NEXT: ret ptr @_Z4foo4v.default +// +// +// CHECK-LABEL: define dso_local noundef signext i32 @_Z4foo5v.default( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: ret i32 5 +// +// +// CHECK-LABEL: define weak_odr ptr @_Z4foo5v.resolver() comdat { +// CHECK-NEXT: resolver_entry: +// CHECK-NEXT: call void @__init_riscv_features_bit() +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__riscv_feature_bits, align 8 +// CHECK-NEXT: [[TMP1:%.*]] = icmp ule i32 [[TMP0]], 2 +// CHECK-NEXT: br i1 [[TMP1]], label [[RESOLVER_COND:%.*]], label [[RESOLVER_COND]] +// CHECK: resolver_cond: +// CHECK-NEXT: ret ptr @_Z4foo5v.default +// +// +// CHECK-LABEL: define dso_local noundef signext i32 @_Z3barv( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z4foo1v() +// CHECK-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_Z4foo2v() +// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[CALL]], [[CALL1]] +// CHECK-NEXT: [[CALL2:%.*]] = call noundef signext i32 @_Z4foo3v() +// CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[CALL2]] +// CHECK-NEXT: [[CALL4:%.*]] = call noundef signext i32 @_Z4foo4v() +// CHECK-NEXT: [[ADD5:%.*]] = add nsw i32 [[ADD3]], [[CALL4]] +// CHECK-NEXT: [[CALL6:%.*]] = call noundef signext i32 @_Z4foo5v() +// CHECK-NEXT: [[ADD7:%.*]] = add nsw i32 [[ADD5]], [[CALL6]] +// CHECK-NEXT: ret i32 [[ADD7]] +// +//. +// CHECK: attributes #[[ATTR0]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+64bit,+i,+m" } +// CHECK: attributes #[[ATTR1:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+64bit,+m,{{(-[[:alnum:]-]+)(,-[[:alnum:]-]+)*}}" } +// CHECK: attributes #[[ATTR2:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+64bit,+i,+m,+zbb" } +// CHECK: attributes #[[ATTR3:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+64bit,+c,+i,+m,+zbb" } +// CHECK: attributes #[[ATTR4:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+64bit,+a,+m,{{(-[[:alnum:]-]+)(,-[[:alnum:]-]+)*}}" } +// CHECK: attributes #[[ATTR5:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+64bit,+d,+f,+i,+m,+v,+zbb,+zicsr,+zve32f,+zve32x,+zve64d,+zve64f,+zve64x,+zvl128b,+zvl32b,+zvl64b" } +//. +// CHECK: [[META0:![0-9]+]] = !{i32 1, !"wchar_size", i32 4} +// CHECK: [[META1:![0-9]+]] = !{i32 1, !"target-abi", !"lp64"} +// CHECK: [[META2:![0-9]+]] = !{i32 6, !"riscv-isa", [[META3:![0-9]+]]} +// CHECK: [[META3]] = !{!"rv64i2p1_m2p0"} +// CHECK: [[META4:![0-9]+]] = !{i32 8, !"SmallDataLimit", i32 0} +// CHECK: [[META5:![0-9]+]] = !{!"{{.*}}clang version {{.*}}"} +//. diff --git a/clang/test/SemaCXX/attr-target-clones-riscv.cpp b/clang/test/SemaCXX/attr-target-clones-riscv.cpp new file mode 100644 index 0000000000000..4e5e8546e21f3 --- /dev/null +++ b/clang/test/SemaCXX/attr-target-clones-riscv.cpp @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -triple riscv64-linux-gnu -fsyntax-only -verify -fexceptions -fcxx-exceptions %s -std=c++14 + +// expected-warning@+1 {{unsupported 'mcpu=sifive-u74' in the 'target_clones' attribute string; 'target_clones' attribute ignored}} +void __attribute__((target_clones("default", "mcpu=sifive-u74"))) mcpu() {} + +// expected-warning@+1 {{unsupported 'mtune=sifive-u74' in the 'target_clones' attribute string; 'target_clones' attribute ignored}} +void __attribute__((target_clones("default", "mtune=sifive-u74"))) mtune() {} + +// expected-warning@+1 {{version list contains duplicate entries}} +void __attribute__((target_clones("default", "arch=+c", "arch=+c"))) dupVersion() {} + +// expected-warning@+1 {{unsupported '' in the 'target_clones' attribute string; 'target_clones' attribute ignored}} +void __attribute__((target_clones("default", ""))) emptyVersion() {} + +// expected-error@+1 {{'target_clones' multiversioning requires a default target}} +void __attribute__((target_clones("arch=+c"))) withoutDefault() {} + +// expected-warning@+1 {{unsupported '+c' in the 'target_clones' attribute string; 'target_clones' attribute ignored}} +void __attribute__((target_clones("default", "+c"))) invaildVersion() {} + +void lambda() { + // expected-error@+1 {{attribute 'target_clones' multiversioned functions do not yet support lambdas}} + auto x = []() __attribute__((target_clones("default"))){}; + x(); + // expected-error@+1 {{attribute 'target_clones' multiversioned functions do not yet support lambdas}} + auto y = []() __attribute__((target_clones("arch=rv64gc", "default"))){}; + y(); +} diff --git a/llvm/include/llvm/TargetParser/RISCVTargetParser.h b/llvm/include/llvm/TargetParser/RISCVTargetParser.h index 8444935bd666d..a9d80b553a980 100644 --- a/llvm/include/llvm/TargetParser/RISCVTargetParser.h +++ b/llvm/include/llvm/TargetParser/RISCVTargetParser.h @@ -44,6 +44,8 @@ StringRef getMArchFromMcpu(StringRef CPU); void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64); void fillValidTuneCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64); bool hasFastUnalignedAccess(StringRef CPU); +llvm::SmallVector<unsigned long long> + getRequireFeatureBitMask(ArrayRef<StringRef>); } // namespace RISCV diff --git a/llvm/lib/TargetParser/RISCVTargetParser.cpp b/llvm/lib/TargetParser/RISCVTargetParser.cpp index ec6447a44f90d..dfaec9bdcd115 100644 --- a/llvm/lib/TargetParser/RISCVTargetParser.cpp +++ b/llvm/lib/TargetParser/RISCVTargetParser.cpp @@ -151,6 +151,20 @@ getExtensionBitmask(StringRef ExtName) { return RISCVExtensionBitmaskTable::RISCVExtensionBitmask(); } + +llvm::SmallVector<unsigned long long> +getRequireFeatureBitMask(ArrayRef<StringRef> Exts) { + llvm::SmallVector<unsigned long long> BitMasks = {0, 0}; + + for (auto Ext : Exts) { + RISCVExtensionBitmaskTable::RISCVExtensionBitmask ExtBitmask = + getExtensionBitmask(Ext); + assert(ExtBitmask.Bitmask != 0 && "This extension doesn't has bitmask."); + BitMasks[ExtBitmask.GroupID] |= ExtBitmask.Bitmask; + } + + return BitMasks; +} } // namespace RISCV namespace RISCVVType { _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits