================
@@ -230,22 +230,24 @@ uint64_t MCAssembler::computeFragmentSize(const 
MCFragment &F) const {
   case MCFragment::FT_Align: {
     unsigned Offset = F.Offset + F.getFixedSize();
     unsigned Size = offsetToAlignment(Offset, F.getAlignment());
-
-    // Insert extra Nops for code alignment if the target define
-    // shouldInsertExtraNopBytesForCodeAlign target hook.
-    if (F.getParent()->useCodeAlign() && F.hasAlignEmitNops() &&
-        getBackend().shouldInsertExtraNopBytesForCodeAlign(F, Size))
-      return F.getFixedSize() + Size;
-
-    // If we are padding with nops, force the padding to be larger than the
-    // minimum nop size.
-    if (Size > 0 && F.hasAlignEmitNops()) {
-      while (Size % getBackend().getMinimumNopSize())
-        Size += F.getAlignment().value();
+    auto &Frag = const_cast<MCFragment &>(F);
+    // In the nops mode, RISC-V style linker relaxation might adjust the size
+    // and add a fixup, even if `Size` is originally 0.
+    bool AlignFixup = false;
+    if (F.hasAlignEmitNops()) {
+      AlignFixup = getBackend().relaxAlign(Frag, Size);
----------------
MaskRay wrote:

Thanks for the feedback! There is currently an oddity in the layout algorithm

* 
https://github.com/llvm/llvm-project/blob/a9147e64aa751caaa106953fded2d0f7223bb167/llvm/lib/MC/MCAssembler.cpp#L676
 This `layoutSection` assigns offsets to fragments, including FT_Align (using 
`computeFragmentSize`)
* `relaxOnce` checks FT_Relaxable (which should probably be renamed to FT_Insn) 
but not FT_Align

Future `computeFragmentSize` calls should not modify the member variable. I 
should perhaps move this FT_Align handling to `layoutSection` so that 
`computeFragmentSize` can simply return `getSize()`.

https://github.com/llvm/llvm-project/pull/149465
_______________________________________________
llvm-branch-commits mailing list
llvm-branch-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to