================ @@ -172,6 +172,27 @@ Decl *SemaHLSL::ActOnStartBuffer(Scope *BufferScope, bool CBuffer, return Result; } +static unsigned calculateLegacyCbufferFieldAlign(const ASTContext &Context, + QualType T) { + // Aggregate types are always aligned to new buffer rows + if (T->isAggregateType()) + return 16; + + assert(Context.getTypeSize(T) <= 64 && + "Scalar bit widths larger than 64 not supported"); + + // 64 bit types such as double and uint64_t align to 8 bytes + if (Context.getTypeSize(T) == 64) + return 8; + + // Half types align to 2 bytes only if native half is available + if (T->isHalfType() && Context.getLangOpts().NativeHalfType) ---------------- V-FEXrt wrote:
True, I can probably make this function a lot simpler by just looking at the bitwidth. Are there any types where the bitwidth is 32 but the alignment is not 4 bytes? I think I read somewhere that "everything is 4 byte align except these X exceptions" which is a weird way to write it unless some non-32 bitwide type is also 4 byte aligned. Otherwise its clearer to say "everything is aligned to its bytewidth except these X exceptions" https://github.com/llvm/llvm-project/pull/128086 _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits