joechrisellis created this revision. joechrisellis added reviewers: peterwaller-arm, DavidTruby. Herald added subscribers: psnobl, kristof.beyls, tschuett. Herald added a reviewer: efriedma. joechrisellis requested review of this revision. Herald added a project: clang. Herald added a subscriber: cfe-commits.
VLST arguments are coerced to VLATs at the function boundary for consistency with the VLAT ABI. They are then bitcast back to VLSTs in the function prolog. Previously, this conversion is done through memory. With the introduction of the llvm.vector.{insert,extract} intrinsic, we can avoid going through memory here. Depends on D92761 <https://reviews.llvm.org/D92761> Repository: rG LLVM Github Monorepo https://reviews.llvm.org/D92762 Files: clang/lib/CodeGen/CGCall.cpp clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp clang/test/CodeGen/attr-arm-sve-vector-bits-call.c clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
Index: clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c =================================================================== --- clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c +++ clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c @@ -13,11 +13,8 @@ // CHECK-LABEL: @to_svint32_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = alloca <16 x i32>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[TYPE]] to <vscale x 4 x i32>* -// CHECK-NEXT: store <vscale x 4 x i32> [[TYPE_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16 -// CHECK-NEXT: [[TYPE1:%.*]] = load <16 x i32>, <16 x i32>* [[TYPE]], align 16, [[TBAA6:!tbaa !.*]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TYPE1]], i64 0) +// CHECK-NEXT: [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TYPE]], i64 0) // CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]] // svint32_t to_svint32_t(fixed_int32_t type) { @@ -39,11 +36,8 @@ // CHECK-LABEL: @to_svfloat64_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = alloca <8 x double>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x double>* [[TYPE]] to <vscale x 2 x double>* -// CHECK-NEXT: store <vscale x 2 x double> [[TYPE_COERCE:%.*]], <vscale x 2 x double>* [[TMP0]], align 16 -// CHECK-NEXT: [[TYPE1:%.*]] = load <8 x double>, <8 x double>* [[TYPE]], align 16, [[TBAA6]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[TYPE1]], i64 0) +// CHECK-NEXT: [[TYPE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TYPE_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[TYPE]], i64 0) // CHECK-NEXT: ret <vscale x 2 x double> [[CASTSCALABLESVE]] // svfloat64_t to_svfloat64_t(fixed_float64_t type) { @@ -65,15 +59,12 @@ // CHECK-LABEL: @to_svbool_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = alloca <8 x i8>, align 16 // CHECK-NEXT: [[TYPE_ADDR:%.*]] = alloca <8 x i8>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[TYPE]] to <vscale x 16 x i1>* -// CHECK-NEXT: store <vscale x 16 x i1> [[TYPE_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16 -// CHECK-NEXT: [[TYPE1:%.*]] = load <8 x i8>, <8 x i8>* [[TYPE]], align 16, [[TBAA6]] -// CHECK-NEXT: store <8 x i8> [[TYPE1]], <8 x i8>* [[TYPE_ADDR]], align 16, [[TBAA6]] -// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[TYPE_ADDR]] to <vscale x 16 x i1>* -// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP1]], align 16, [[TBAA6]] -// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]] +// CHECK-NEXT: [[TYPE:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv16i1(<vscale x 16 x i1> [[TYPE_COERCE:%.*]], i64 0) +// CHECK-NEXT: store <8 x i8> [[TYPE]], <8 x i8>* [[TYPE_ADDR]], align 16, [[TBAA6:!tbaa !.*]] +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[TYPE_ADDR]] to <vscale x 16 x i1>* +// CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP0]], align 16, [[TBAA6]] +// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP1]] // svbool_t to_svbool_t(fixed_bool_t type) { return type; @@ -130,11 +121,8 @@ // CHECK-LABEL: @from_fixed_int32_t__to_gnu_int32_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = alloca <16 x i32>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[TYPE]] to <vscale x 4 x i32>* -// CHECK-NEXT: store <vscale x 4 x i32> [[TYPE_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16 -// CHECK-NEXT: [[TYPE1:%.*]] = load <16 x i32>, <16 x i32>* [[TYPE]], align 16, [[TBAA6]] -// CHECK-NEXT: store <16 x i32> [[TYPE1]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE_COERCE:%.*]], i64 0) +// CHECK-NEXT: store <16 x i32> [[TYPE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]] // CHECK-NEXT: ret void // gnu_int32_t from_fixed_int32_t__to_gnu_int32_t(fixed_int32_t type) { Index: clang/test/CodeGen/attr-arm-sve-vector-bits-call.c =================================================================== --- clang/test/CodeGen/attr-arm-sve-vector-bits-call.c +++ clang/test/CodeGen/attr-arm-sve-vector-bits-call.c @@ -24,17 +24,14 @@ // CHECK-LABEL: @fixed_caller( // CHECK-NEXT: entry: -// CHECK-NEXT: [[X:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[X]] to <vscale x 4 x i32>* -// CHECK-NEXT: store <vscale x 4 x i32> [[X_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16 -// CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, [[TBAA6:!tbaa !.*]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[X1]], i64 0) +// CHECK-NEXT: [[X:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[X]], i64 0) // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[CASTSCALABLESVE]], i64 0) // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]] // fixed_int32_t fixed_caller(fixed_int32_t x) { return sizeless_callee(x); @@ -42,15 +39,12 @@ // CHECK-LABEL: @fixed_callee( // CHECK-NEXT: entry: -// CHECK-NEXT: [[X:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[X]] to <vscale x 4 x i32>* -// CHECK-NEXT: store <vscale x 4 x i32> [[X_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16 -// CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, [[TBAA6]] +// CHECK-NEXT: [[X:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[X_COERCE:%.*]], i64 0) // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>* -// CHECK-NEXT: store <16 x i32> [[X1]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] +// CHECK-NEXT: store <16 x i32> [[X]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]] // fixed_int32_t fixed_callee(fixed_int32_t x) { return x; @@ -67,7 +61,7 @@ // CHECK-NEXT: [[CALL:%.*]] = call <vscale x 4 x i32> @fixed_callee(<vscale x 4 x i32> [[TMP0]]) // CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[COERCE1]] to <vscale x 4 x i32>* // CHECK-NEXT: store <vscale x 4 x i32> [[CALL]], <vscale x 4 x i32>* [[TMP1]], align 16 -// CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, [[TBAA6:!tbaa !.*]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP2]], i64 0) // CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]] // @@ -81,24 +75,18 @@ // CHECK-LABEL: @call_int32_ff( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = alloca <16 x i32>, align 16 -// CHECK-NEXT: [[OP2:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[OP1]] to <vscale x 4 x i32>* -// CHECK-NEXT: store <vscale x 4 x i32> [[OP1_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, [[TBAA6]] -// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[OP2]] to <vscale x 4 x i32>* -// CHECK-NEXT: store <vscale x 4 x i32> [[OP2_COERCE:%.*]], <vscale x 4 x i32>* [[TMP1]], align 16 -// CHECK-NEXT: [[OP22:%.*]] = load <16 x i32>, <16 x i32>* [[OP2]], align 16, [[TBAA6]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OP11]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OP22]], i64 0) -// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP2]], <vscale x 4 x i32> [[CASTSCALABLESVE]], <vscale x 4 x i32> [[CASTSCALABLESVE3]]) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP3]], i64 0) +// CHECK-NEXT: [[OP1:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[OP1_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[OP2:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[OP2_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OP1]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OP2]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[CASTSCALABLESVE]], <vscale x 4 x i32> [[CASTSCALABLESVE2]]) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP1]], i64 0) // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]] +// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]] // fixed_int32_t call_int32_ff(svbool_t pg, fixed_int32_t op1, fixed_int32_t op2) { return svsel(pg, op1, op2); @@ -106,24 +94,18 @@ // CHECK-LABEL: @call_float64_ff( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = alloca <8 x double>, align 16 -// CHECK-NEXT: [[OP2:%.*]] = alloca <8 x double>, align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x double>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x double>* [[OP1]] to <vscale x 2 x double>* -// CHECK-NEXT: store <vscale x 2 x double> [[OP1_COERCE:%.*]], <vscale x 2 x double>* [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, [[TBAA6]] -// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x double>* [[OP2]] to <vscale x 2 x double>* -// CHECK-NEXT: store <vscale x 2 x double> [[OP2_COERCE:%.*]], <vscale x 2 x double>* [[TMP1]], align 16 -// CHECK-NEXT: [[OP22:%.*]] = load <8 x double>, <8 x double>* [[OP2]], align 16, [[TBAA6]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[OP11]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE3:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[OP22]], i64 0) -// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP2]], <vscale x 2 x double> [[CASTSCALABLESVE]], <vscale x 2 x double> [[CASTSCALABLESVE3]]) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TMP3]], i64 0) +// CHECK-NEXT: [[OP1:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[OP1_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[OP2:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[OP2_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[OP1]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[OP2]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[CASTSCALABLESVE]], <vscale x 2 x double> [[CASTSCALABLESVE2]]) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TMP1]], i64 0) // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 2 x double>* [[RETVAL_COERCE]] to <8 x double>* // CHECK-NEXT: store <8 x double> [[CASTFIXEDSVE]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret <vscale x 2 x double> [[TMP4]] +// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret <vscale x 2 x double> [[TMP2]] // fixed_float64_t call_float64_ff(svbool_t pg, fixed_float64_t op1, fixed_float64_t op2) { return svsel(pg, op1, op2); @@ -131,32 +113,26 @@ // CHECK-LABEL: @call_bool_ff( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = alloca <8 x i8>, align 16 -// CHECK-NEXT: [[OP2:%.*]] = alloca <8 x i8>, align 16 // CHECK-NEXT: [[OP1_ADDR:%.*]] = alloca <8 x i8>, align 16 // CHECK-NEXT: [[OP2_ADDR:%.*]] = alloca <8 x i8>, align 16 // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca <vscale x 16 x i1>, align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to <vscale x 16 x i1>* -// CHECK-NEXT: store <vscale x 16 x i1> [[OP1_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, [[TBAA6]] -// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[OP2]] to <vscale x 16 x i1>* -// CHECK-NEXT: store <vscale x 16 x i1> [[OP2_COERCE:%.*]], <vscale x 16 x i1>* [[TMP1]], align 16 -// CHECK-NEXT: [[OP22:%.*]] = load <8 x i8>, <8 x i8>* [[OP2]], align 16, [[TBAA6]] -// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, [[TBAA6]] -// CHECK-NEXT: store <8 x i8> [[OP22]], <8 x i8>* [[OP2_ADDR]], align 16, [[TBAA6]] -// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to <vscale x 16 x i1>* +// CHECK-NEXT: [[OP1:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv16i1(<vscale x 16 x i1> [[OP1_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[OP2:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv16i1(<vscale x 16 x i1> [[OP2_COERCE:%.*]], i64 0) +// CHECK-NEXT: store <8 x i8> [[OP1]], <8 x i8>* [[OP1_ADDR]], align 16, [[TBAA6]] +// CHECK-NEXT: store <8 x i8> [[OP2]], <8 x i8>* [[OP2_ADDR]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to <vscale x 16 x i1>* +// CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP0]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8>* [[OP2_ADDR]] to <vscale x 16 x i1>* // CHECK-NEXT: [[TMP3:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP2]], align 16, [[TBAA6]] -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i8>* [[OP2_ADDR]] to <vscale x 16 x i1>* -// CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP4]], align 16, [[TBAA6]] -// CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]]) -// CHECK-NEXT: store <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9:!tbaa !.*]] +// CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[TMP1]], <vscale x 16 x i1> [[TMP3]]) +// CHECK-NEXT: store <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9:!tbaa !.*]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]] to <8 x i8>* -// CHECK-NEXT: [[TMP7:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>* -// CHECK-NEXT: store <8 x i8> [[TMP7]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP8:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP8]] +// CHECK-NEXT: store <8 x i8> [[TMP5]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 +// CHECK-NEXT: [[TMP6:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP6]] // fixed_bool_t call_bool_ff(svbool_t pg, fixed_bool_t op1, fixed_bool_t op2) { return svsel(pg, op1, op2); @@ -168,19 +144,16 @@ // CHECK-LABEL: @call_int32_fs( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[OP1]] to <vscale x 4 x i32>* -// CHECK-NEXT: store <vscale x 4 x i32> [[OP1_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, [[TBAA6]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OP11]], i64 0) -// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP1]], <vscale x 4 x i32> [[CASTSCALABLESVE]], <vscale x 4 x i32> [[OP2:%.*]]) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP2]], i64 0) +// CHECK-NEXT: [[OP1:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[OP1_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OP1]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[CASTSCALABLESVE]], <vscale x 4 x i32> [[OP2:%.*]]) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP1]], i64 0) // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP3:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]] +// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]] // fixed_int32_t call_int32_fs(svbool_t pg, fixed_int32_t op1, svint32_t op2) { return svsel(pg, op1, op2); @@ -188,19 +161,16 @@ // CHECK-LABEL: @call_float64_fs( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = alloca <8 x double>, align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x double>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x double>* [[OP1]] to <vscale x 2 x double>* -// CHECK-NEXT: store <vscale x 2 x double> [[OP1_COERCE:%.*]], <vscale x 2 x double>* [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, [[TBAA6]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[OP11]], i64 0) -// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP1]], <vscale x 2 x double> [[CASTSCALABLESVE]], <vscale x 2 x double> [[OP2:%.*]]) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TMP2]], i64 0) +// CHECK-NEXT: [[OP1:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[OP1_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[OP1]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[CASTSCALABLESVE]], <vscale x 2 x double> [[OP2:%.*]]) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TMP1]], i64 0) // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 2 x double>* [[RETVAL_COERCE]] to <8 x double>* // CHECK-NEXT: store <8 x double> [[CASTFIXEDSVE]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP3:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret <vscale x 2 x double> [[TMP3]] +// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret <vscale x 2 x double> [[TMP2]] // fixed_float64_t call_float64_fs(svbool_t pg, fixed_float64_t op1, svfloat64_t op2) { return svsel(pg, op1, op2); @@ -208,24 +178,21 @@ // CHECK-LABEL: @call_bool_fs( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = alloca <8 x i8>, align 16 // CHECK-NEXT: [[OP1_ADDR:%.*]] = alloca <8 x i8>, align 16 // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca <vscale x 16 x i1>, align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to <vscale x 16 x i1>* -// CHECK-NEXT: store <vscale x 16 x i1> [[OP1_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, [[TBAA6]] -// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, [[TBAA6]] -// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to <vscale x 16 x i1>* -// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP1]], align 16, [[TBAA6]] -// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[TMP2]], <vscale x 16 x i1> [[OP2:%.*]]) -// CHECK-NEXT: store <vscale x 16 x i1> [[TMP3]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]] +// CHECK-NEXT: [[OP1:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv16i1(<vscale x 16 x i1> [[OP1_COERCE:%.*]], i64 0) +// CHECK-NEXT: store <8 x i8> [[OP1]], <8 x i8>* [[OP1_ADDR]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to <vscale x 16 x i1>* +// CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP0]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[TMP1]], <vscale x 16 x i1> [[OP2:%.*]]) +// CHECK-NEXT: store <vscale x 16 x i1> [[TMP2]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]] to <8 x i8>* -// CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>* -// CHECK-NEXT: store <8 x i8> [[TMP4]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP5]] +// CHECK-NEXT: store <8 x i8> [[TMP3]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP4]] // fixed_bool_t call_bool_fs(svbool_t pg, fixed_bool_t op1, svbool_t op2) { return svsel(pg, op1, op2); Index: clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp =================================================================== --- clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp +++ clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp @@ -48,20 +48,14 @@ // CHECK-SAME: [[#VBITS]] // CHECK-SAME: EES_(<vscale x 4 x i32> %x.coerce, <vscale x 4 x i32> %y.coerce) // CHECK-NEXT: entry: -// CHECK-NEXT: %x = alloca <[[#div(VBITS,32)]] x i32>, align 16 -// CHECK-NEXT: %y = alloca <[[#div(VBITS,32)]] x i32>, align 16 -// CHECK-NEXT: %retval.coerce = alloca <vscale x 4 x i32>, align 16 -// CHECK-NEXT: %0 = bitcast <[[#div(VBITS,32)]] x i32>* %x to <vscale x 4 x i32>* -// CHECK-NEXT: store <vscale x 4 x i32> %x.coerce, <vscale x 4 x i32>* %0, align 16 -// CHECK-NEXT: %x1 = load <[[#div(VBITS,32)]] x i32>, <[[#div(VBITS,32)]] x i32>* %x, align 16 -// CHECK-NEXT: %1 = bitcast <[[#div(VBITS,32)]] x i32>* %y to <vscale x 4 x i32>* -// CHECK-NEXT: store <vscale x 4 x i32> %y.coerce, <vscale x 4 x i32>* %1, align 16 -// CHECK-NEXT: %y2 = load <[[#div(VBITS,32)]] x i32>, <[[#div(VBITS,32)]] x i32>* %y, align 16 -// CHECK-NEXT: %add = add <[[#div(VBITS,32)]] x i32> %y2, %x1 -// CHECK-NEXT: %retval.0..sroa_cast = bitcast <vscale x 4 x i32>* %retval.coerce to <[[#div(VBITS,32)]] x i32>* -// CHECK-NEXT: store <[[#div(VBITS,32)]] x i32> %add, <[[#div(VBITS,32)]] x i32>* %retval.0..sroa_cast, align 16 -// CHECK-NEXT: %2 = load <vscale x 4 x i32>, <vscale x 4 x i32>* %retval.coerce, align 16 -// CHECK-NEXT: ret <vscale x 4 x i32> %2 +// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16 +// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS,32)]] x i32> @llvm.experimental.vector.extract.v[[#div(VBITS,32)]]i32.nxv4i32(<vscale x 4 x i32> [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[Y:%.*]] = call <[[#div(VBITS,32)]] x i32> @llvm.experimental.vector.extract.v[[#div(VBITS,32)]]i32.nxv4i32(<vscale x 4 x i32> [[Y_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[ADD:%.*]] = add <[[#div(VBITS,32)]] x i32> [[Y]], [[X]] +// CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <[[#div(VBITS,32)]] x i32>* +// CHECK-NEXT: store <[[#div(VBITS,32)]] x i32> [[ADD]], <[[#div(VBITS,32)]] x i32>* [[RETVAL_0__SROA_CAST]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]] typedef svint32_t vec __attribute__((arm_sve_vector_bits(N))); auto f(vec x, vec y) { return x + y; } // Returns a vec. #endif @@ -76,19 +70,13 @@ // CHECK-SAME: [[#VBITS]] // CHECK-SAME: EE(<vscale x 8 x i16> %x.coerce) // CHECK-NEXT: entry: -// CHECK128-NEXT: %x = alloca <[[#div(VBITS,16)]] x i16>, align 16 -// CHECK128-NEXT: %0 = bitcast <[[#div(VBITS,16)]] x i16>* %x to <vscale x 8 x i16>* -// CHECK128-NEXT: store <vscale x 8 x i16> %x.coerce, <vscale x 8 x i16>* %0, align 16 -// CHECK128-NEXT: %x1 = load <[[#div(VBITS,16)]] x i16>, <[[#div(VBITS,16)]] x i16>* %x, align 16 -// CHECK128-NEXT: call void @_Z1fDv[[#div(VBITS,16)]]_s(<[[#div(VBITS,16)]] x i16> %x1) +// CHECK128-NEXT: [[X:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> [[X_COERCE:%.*]], i64 0) +// CHECK128-NEXT: call void @_Z1fDv8_s(<8 x i16> [[X]]) [[ATTR5:#.*]] // CHECK128-NEXT: ret void -// CHECKWIDE-NEXT: %x = alloca <[[#div(VBITS,16)]] x i16>, align 16 -// CHECKWIDE-NEXT: %indirect-arg-temp = alloca <[[#div(VBITS,16)]] x i16>, align 16 -// CHECKWIDE-NEXT: %0 = bitcast <[[#div(VBITS,16)]] x i16>* %x to <vscale x 8 x i16>* -// CHECKWIDE-NEXT: store <vscale x 8 x i16> %x.coerce, <vscale x 8 x i16>* %0, align 16 -// CHECKWIDE-NEXT: %x1 = load <[[#div(VBITS,16)]] x i16>, <[[#div(VBITS,16)]] x i16>* %x, align 16 -// CHECKWIDE-NEXT: store <[[#div(VBITS,16)]] x i16> %x1, <[[#div(VBITS,16)]] x i16>* %indirect-arg-temp, align 16 -// CHECKWIDE-NEXT: call void @_Z1fDv[[#div(VBITS,16)]]_s(<[[#div(VBITS,16)]] x i16>* nonnull %indirect-arg-temp) +// CHECKWIDE-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS,16)]] x i16>, align 16 +// CHECKWIDE-NEXT: [[X:%.*]] = call <[[#div(VBITS,16)]] x i16> @llvm.experimental.vector.extract.v[[#div(VBITS,16)]]i16.nxv8i16(<vscale x 8 x i16> [[X_COERCE:%.*]], i64 0) +// CHECKWIDE-NEXT: store <[[#div(VBITS,16)]] x i16> [[X]], <[[#div(VBITS,16)]] x i16>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6:!tbaa !.*]] +// CHECKWIDE-NEXT: call void @_Z1fDv[[#div(VBITS,16)]]_s(<[[#div(VBITS,16)]] x i16>* nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]] // CHECKWIDE-NEXT: ret void void g(vec2 x) { f(x); } // OK #endif Index: clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c =================================================================== --- clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c +++ clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c @@ -79,21 +79,15 @@ // CHECK128-LABEL: define void @g(<vscale x 16 x i8> %x.coerce) // CHECK128-NEXT: entry: -// CHECK128-NEXT: [[X:%.*]] = alloca <16 x i8>, align 16 -// CHECK128-NEXT: [[TMP0:%.*]] = bitcast <16 x i8>* [[X]] to <vscale x 16 x i8>* -// CHECK128-NEXT: store <vscale x 16 x i8> [[X_COERCE:%.*]], <vscale x 16 x i8>* [[TMP0]], align 16 -// CHECK128-NEXT: [[X1:%.*]] = load <16 x i8>, <16 x i8>* [[X]], align 16, [[TBAA6:!tbaa !.*]] -// CHECK128-NEXT: call void @f3(<16 x i8> [[X1]]) [[ATTR5:#.*]] +// CHECK128-NEXT: [[X:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[X_COERCE:%.*]], i64 0) +// CHECK128-NEXT: call void @f3(<16 x i8> [[X]]) [[ATTR5:#.*]] // CHECK128-NEXT: ret void // CHECK-LABEL: define void @g(<vscale x 16 x i8> %x.coerce) // CHECK-NEXT: entry: -// CHECK-NEXT: [[X:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16 // CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <[[#div(VBITS,8)]] x i8>* [[X]] to <vscale x 16 x i8>* -// CHECK-NEXT: store <vscale x 16 x i8> [[X_COERCE:%.*]], <vscale x 16 x i8>* [[TMP0]], align 16 -// CHECK-NEXT: [[X1:%.*]] = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* [[X]], align 16, [[TBAA6]] -// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[X1]], <[[#div(VBITS,8)]] x i8>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6]] +// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8(<vscale x 16 x i8> [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[X]], <[[#div(VBITS,8)]] x i8>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6]] // CHECK-NEXT: call void @f3(<[[#div(VBITS,8)]] x i8>* nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]] // CHECK-NEXT: ret void Index: clang/lib/CodeGen/CGCall.cpp =================================================================== --- clang/lib/CodeGen/CGCall.cpp +++ clang/lib/CodeGen/CGCall.cpp @@ -2668,6 +2668,24 @@ break; } + if (auto *VecTy = Ty->getAs<VectorType>()) { + // If this is a VLST coerced to a VLAT at the function boundary, use + // llvm.experimental.vector.insert to perform the conversion. + if (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector || + VecTy->getVectorKind() == + VectorType::SveFixedLengthPredicateVector) { + auto *LTy = ConvertType(Ty); + llvm::Value *Zero = llvm::Constant::getNullValue(this->CGM.Int64Ty); + + assert(NumIRArgs == 1); + auto AI = Fn->getArg(FirstIRArg); + AI->setName(Arg->getName() + ".coerce"); + ArgVals.push_back(ParamValue::forDirect( + Builder.CreateExtractVector(LTy, AI, Zero, "castScalableSve"))); + break; + } + } + Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), Arg->getName());
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits