Author: Jay Foad
Date: 2025-01-29T17:49:54Z
New Revision: aa2952165cd1808dab2bb49b97becc097f4c9cac

URL: 
https://github.com/llvm/llvm-project/commit/aa2952165cd1808dab2bb49b97becc097f4c9cac
DIFF: 
https://github.com/llvm/llvm-project/commit/aa2952165cd1808dab2bb49b97becc097f4c9cac.diff

LOG: Fix typo "tranpose" (#124929)

Added: 
    

Modified: 
    clang/lib/Headers/amxtf32transposeintrin.h
    llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp
    mlir/docs/Canonicalization.md
    mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
    mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
    mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp
    mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
    mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp
    mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
    mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
    mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
    mlir/lib/Dialect/Vector/IR/VectorOps.cpp
    mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
    mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
    mlir/test/Dialect/Vector/vector-unroll-options.mlir

Removed: 
    


################################################################################
diff  --git a/clang/lib/Headers/amxtf32transposeintrin.h 
b/clang/lib/Headers/amxtf32transposeintrin.h
index 60336f953ecb7a..e1b90c1adfb22a 100644
--- a/clang/lib/Headers/amxtf32transposeintrin.h
+++ b/clang/lib/Headers/amxtf32transposeintrin.h
@@ -8,7 +8,7 @@
  */
 #ifndef __IMMINTRIN_H
 #error                                                                         
\
-    "Never use <amxtf32tranposeintrin.h> directly; include <immintrin.h> 
instead."
+    "Never use <amxtf32transposeintrin.h> directly; include <immintrin.h> 
instead."
 #endif // __IMMINTRIN_H
 
 #ifndef __AMX_TF32TRANSPOSEINTRIN_H

diff  --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp 
b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp
index db9aa7e18f5e7a..cfb552c65e0c6f 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp
@@ -2269,7 +2269,7 @@ OpRef HvxSelector::perfect(ShuffleMask SM, OpRef Va, 
ResultStack &Results) {
   // For example, with the inputs as above, the result will be:
   //   0 8  2 A  4 C  6 E
   //   1 9  3 B  5 D  7 F
-  // Now, this result can be tranposed again, but with the group size of 2:
+  // Now, this result can be transposed again, but with the group size of 2:
   //   08 19  4C 5D
   //   2A 3B  6E 7F
   // If we then transpose that result, but with the group size of 4, we get:

diff  --git a/mlir/docs/Canonicalization.md b/mlir/docs/Canonicalization.md
index 03fd174229afe9..6e59a4128093a2 100644
--- a/mlir/docs/Canonicalization.md
+++ b/mlir/docs/Canonicalization.md
@@ -71,7 +71,7 @@ For example, a pattern that transform
       outs(%init1 : tensor<2x1x3xf32>)
       dimensions = [1, 0, 2]
   %out = linalg.transpose
-      ins(%tranpose: tensor<2x1x3xf32>)
+      ins(%transpose: tensor<2x1x3xf32>)
       outs(%init2 : tensor<3x1x2xf32>)
       permutation = [2, 1, 0]
 ```

diff  --git 
a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td 
b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
index 081bf9b6d3b239..e86d1754897759 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
@@ -1007,7 +1007,7 @@ def PackTransposeOp : Op<Transform_Dialect, 
"structured.pack_transpose", [
 
     This operation may produce a silenceableFailure if the transpose spec is
     ill-formed (i.e. `outer_perm` or `inner_perm` are not permutations of the
-    proper rank) or if the tranposition of all involved operations fails for 
any
+    proper rank) or if the transposition of all involved operations fails for 
any
     reason.
 
     This operation returns 3 handles, one to the transformed LinalgOp, one to

diff  --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td 
b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 3b027dcfdfc70a..835c006356342e 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -2779,7 +2779,7 @@ def Vector_MatmulOp : Vector_Op<"matrix_multiply", [Pure,
     "`:` `(` type($lhs) `,` type($rhs) `)` `->` type($res)";
 }
 
-/// Vector dialect matrix tranposition op that operates on flattened 1-D
+/// Vector dialect matrix transposition op that operates on flattened 1-D
 /// MLIR vectors. This is the counterpart of llvm.matrix.transpose in MLIR.
 /// This may seem redundant with vector.transpose but it serves the purposes of
 /// more progressive lowering and localized type conversion on the path:
@@ -2799,7 +2799,7 @@ def Vector_FlatTransposeOp : Vector_Op<"flat_transpose", 
[Pure,
   let description = [{
     This is the counterpart of llvm.matrix.transpose in MLIR. It serves
     the purposes of more progressive lowering and localized type conversion.
-    Higher levels typically lower matrix tranpositions into 'vector.transpose'
+    Higher levels typically lower matrix transpositions into 'vector.transpose'
     operations. Subsequent rewriting rule progressively lower these operations
     into 'vector.flat_transpose' operations to bring the operations closer
     to the hardware ISA.

diff  --git a/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp 
b/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp
index 55965d9c2a531d..4be0fffe8b7285 100644
--- a/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp
+++ b/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp
@@ -332,7 +332,7 @@ struct SplatOpToArmSMELowering : public 
OpRewritePattern<vector::SplatOp> {
 ///   %transposed_src = arm_sme.tile_load %alloca[%c0, %c0]
 ///     layout<vertical> : memref<?x?xi32>, vector<[4]x[4]xi32>
 ///
-/// NOTE: Tranposing via memory is obviously expensive, the current intention
+/// NOTE: Transposing via memory is obviously expensive, the current intention
 /// is to avoid the transpose if possible, this is therefore intended as a
 /// fallback and to provide base support for Vector ops. If it turns out
 /// transposes can't be avoided then this should be replaced with a more 
optimal

diff  --git a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp 
b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
index dc4ee4e926bb46..2d915b83f9a77a 100644
--- a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
+++ b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
@@ -179,7 +179,7 @@ struct TransferReadLowering : public 
OpRewritePattern<vector::TransferReadOp> {
     if (isTransposeLoad &&
         elementType.getIntOrFloatBitWidth() < minTransposeBitWidth)
       return rewriter.notifyMatchFailure(
-          readOp, "Unsupported data type for tranposition");
+          readOp, "Unsupported data type for transposition");
 
     // If load is transposed, get the base shape for the tensor descriptor.
     SmallVector<int64_t> descShape(vecTy.getShape());

diff  --git a/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp 
b/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp
index 12c65a72babcb8..dec3dca988ae91 100644
--- a/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp
+++ b/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp
@@ -304,7 +304,7 @@ struct LegalizeTransferReadOpsByDecomposition
                                          kMatchFailureNonPermutationMap);
 
     // Note: For 2D vector types the only non-identity permutation is a simple
-    // tranpose [1, 0].
+    // transpose [1, 0].
     bool transposed = !permutationMap.isIdentity();
 
     auto loc = readOp.getLoc();
@@ -352,7 +352,7 @@ struct LegalizeTransferWriteOpsByDecomposition
                                          kMatchFailureNonPermutationMap);
 
     // Note: For 2D vector types the only non-identity permutation is a simple
-    // tranpose [1, 0].
+    // transpose [1, 0].
     bool transposed = !permutationMap.isIdentity();
 
     auto loc = writeOp.getLoc();

diff  --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp 
b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index dc7e724379ed05..3e0a6987bd85b0 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -935,7 +935,7 @@ LogicalResult NVVM::WgmmaMmaAsyncOp::verify() {
   // Check transpose (only available for f16/bf16)
   // Matrices A should be stored in row-major and B in column-major.
   // Only f16/bf16 matrices can be stored in either column-major or row-major
-  // by setting the tranpose value(imm-trans-a,imm-trans-b) in PTX code.
+  // by setting the transpose value(imm-trans-a,imm-trans-b) in PTX code.
   if ((typeA != WGMMATypes::f16 && typeA != WGMMATypes::bf16) &&
       (getLayoutA() == mlir::NVVM::MMALayout::col ||
        getLayoutB() == mlir::NVVM::MMALayout::row)) {

diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp 
b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
index caf9cdb3a3eb4f..4185fcce393d5b 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
@@ -147,7 +147,7 @@ linalg::isaBroadcastOpInterface(GenericOp op) {
 }
 
 
//===----------------------------------------------------------------------===//
-// TranposeOpInterface implementation
+// TransposeOpInterface implementation
 
//===----------------------------------------------------------------------===//
 std::optional<SmallVector<int64_t>>
 linalg::isaTransposeOpInterface(GenericOp op) {

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp 
b/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp
index bdaf1f8666b92e..436d485ab9368c 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp
@@ -138,7 +138,7 @@ FailureOr<Operation *> transposeConv2D(RewriterBase 
&rewriter,
                                linalg::Conv2DNhwcHwcfQOp>(rewriter, op);
 }
 
-void populateTranposeConv2DPatterns(RewritePatternSet &patterns) {
+void populateTransposeConv2DPatterns(RewritePatternSet &patterns) {
   MLIRContext *context = patterns.getContext();
   patterns.insert<
       ConvConverter<linalg::Conv2DNhwcFhwcOp, linalg::Conv2DNhwcHwcfOp>,

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp 
b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
index b7fac163ba5fe3..a988b2f4f1f4ab 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
@@ -1269,7 +1269,7 @@ struct LinalgOpRewriter : public 
OpRewritePattern<linalg::GenericOp> {
     AffineExpr i, j, k;
     bindDims(getContext(), i, j, k);
 
-    // TODO: more robust patterns, tranposed versions, more kernels,
+    // TODO: more robust patterns, transposed versions, more kernels,
     //       identify alpha and beta and pass them to the CUDA calls.
 
     // Recognize a SpMV kernel.

diff  --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp 
b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index b35422f4ca3a9f..6a329499c71109 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -1488,7 +1488,7 @@ class ExtractFromInsertTransposeChainState {
 
   /// Try to fold in place to extract(source, extractPosition) and return the
   /// folded result. Return null if folding is not possible (e.g. due to an
-  /// internal tranposition in the result).
+  /// internal transposition in the result).
   Value tryToFoldExtractOpInPlace(Value source);
 
   ExtractOp extractOp;
@@ -1582,7 +1582,7 @@ 
ExtractFromInsertTransposeChainState::handleInsertOpWithPrefixPos(Value &res) {
 
 /// Try to fold in place to extract(source, extractPosition) and return the
 /// folded result. Return null if folding is not possible (e.g. due to an
-/// internal tranposition in the result).
+/// internal transposition in the result).
 Value ExtractFromInsertTransposeChainState::tryToFoldExtractOpInPlace(
     Value source) {
   // TODO: Canonicalization for dynamic position not implemented yet.

diff  --git a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp 
b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
index 3035c419a1b565..b53aa997c90144 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
@@ -377,18 +377,18 @@ 
mlir::vector::castAwayContractionLeadingOneDim(vector::ContractionOp contractOp,
     int64_t orginalZeroDim = it.value().getDimPosition(0);
     if (orginalZeroDim != dimToDrop) {
       // There are two reasons to be in this path, 1. We need to
-      // tranpose the operand to make the dim to be dropped
+      // transpose the operand to make the dim to be dropped
       // leading. 2. The dim to be dropped does not exist and in
-      // that case we dont want to add a unit tranpose but we must
+      // that case we dont want to add a unit transpose but we must
       // check all the indices to make sure this is the case.
-      bool tranposeNeeded = false;
+      bool transposeNeeded = false;
       SmallVector<int64_t> perm;
       SmallVector<AffineExpr> transposeResults;
 
       for (int64_t i = 0, e = map.getNumResults(); i < e; ++i) {
         int64_t currDim = map.getDimPosition(i);
         if (currDim == dimToDrop) {
-          tranposeNeeded = true;
+          transposeNeeded = true;
           perm.insert(perm.begin(), i);
           auto targetExpr = rewriter.getAffineDimExpr(currDim);
           transposeResults.insert(transposeResults.begin(), targetExpr);
@@ -413,9 +413,9 @@ 
mlir::vector::castAwayContractionLeadingOneDim(vector::ContractionOp contractOp,
         }
       }
 
-      // Do the tranpose now if needed so that we can drop the
+      // Do the transpose now if needed so that we can drop the
       // correct dim using extract later.
-      if (tranposeNeeded) {
+      if (transposeNeeded) {
         map = AffineMap::get(map.getNumDims(), 0, transposeResults,
                              contractOp.getContext());
         if (transposeNonOuterUnitDims) {
@@ -474,7 +474,7 @@ namespace {
 
 /// Turns vector.contract on vector with leading 1 dimensions into
 /// vector.extract followed by vector.contract on vector without leading
-/// 1 dimensions. Also performs tranpose of lhs and rhs operands if required
+/// 1 dimensions. Also performs transpose of lhs and rhs operands if required
 /// prior to extract.
 struct CastAwayContractionLeadingOneDim
     : public MaskableOpRewritePattern<vector::ContractionOp> {

diff  --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp 
b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index 275f11160487aa..47fca8e72b5739 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -1792,11 +1792,11 @@ struct DropUnitDimsFromTransposeOp final
     auto dropDimsShapeCast = rewriter.create<vector::ShapeCastOp>(
         loc, sourceTypeWithoutUnitDims, op.getVector());
     // Create the new transpose.
-    auto tranposeWithoutUnitDims =
+    auto transposeWithoutUnitDims =
         rewriter.create<vector::TransposeOp>(loc, dropDimsShapeCast, newPerm);
     // Restore the unit dims via shape cast.
     rewriter.replaceOpWithNewOp<vector::ShapeCastOp>(
-        op, op.getResultVectorType(), tranposeWithoutUnitDims);
+        op, op.getResultVectorType(), transposeWithoutUnitDims);
 
     return success();
   }

diff  --git a/mlir/test/Dialect/Vector/vector-unroll-options.mlir 
b/mlir/test/Dialect/Vector/vector-unroll-options.mlir
index c51fc755dffa86..7e3fe56f6b1242 100644
--- a/mlir/test/Dialect/Vector/vector-unroll-options.mlir
+++ b/mlir/test/Dialect/Vector/vector-unroll-options.mlir
@@ -232,11 +232,11 @@ func.func @vector_reduction(%v : vector<8xf32>) -> f32 {
 //       CHECK:   %[[add3:.*]] = arith.addf %[[add2]], %[[r3]]
 //       CHECK:   return %[[add3]]
 
-func.func @vector_tranpose(%v : vector<2x4x3x8xf32>) -> vector<2x3x8x4xf32> {
+func.func @vector_transpose(%v : vector<2x4x3x8xf32>) -> vector<2x3x8x4xf32> {
   %t = vector.transpose %v, [0, 2, 3, 1] : vector<2x4x3x8xf32> to 
vector<2x3x8x4xf32>
   return %t : vector<2x3x8x4xf32>
 }
-// CHECK-LABEL: func @vector_tranpose
+// CHECK-LABEL: func @vector_transpose
 //       CHECK:   %[[VI:.*]] = arith.constant dense<0.000000e+00> : 
vector<2x3x8x4xf32>
 //       CHECK:   %[[E0:.*]] = vector.extract_strided_slice %{{.*}} {offsets = 
[0, 0, 0, 0], sizes = [1, 2, 3, 4], strides = [1, 1, 1, 1]} : 
vector<2x4x3x8xf32> to vector<1x2x3x4xf32>
 //       CHECK:   %[[T0:.*]] = vector.transpose %[[E0]], [0, 2, 3, 1] : 
vector<1x2x3x4xf32> to vector<1x3x4x2xf32>


        
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to