Similar to the arm64 case, 64-bit x86 can benefit from using 32-bit
relative references rather than 64-bit absolute ones when emitting
struct jump_entry instances. Not only does this reduce the memory
footprint of the entries themselves by 50%, it also removes the need
for carrying relocation metadata on relocatable builds (i.e., for KASLR)
which saves a fair chunk of .init space as well (although the savings
are not as dramatic as on arm64)

Signed-off-by: Ard Biesheuvel <[email protected]>
---
 arch/x86/Kconfig                  |  1 +
 arch/x86/include/asm/jump_label.h | 28 ++++++--------------
 tools/objtool/special.c           |  4 +--
 3 files changed, 11 insertions(+), 22 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e10a3542db7e..dd71258ec1cc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -118,6 +118,7 @@ config X86
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_HUGE_VMAP              if X86_64 || X86_PAE
        select HAVE_ARCH_JUMP_LABEL
+       select HAVE_ARCH_JUMP_LABEL_RELATIVE
        select HAVE_ARCH_KASAN                  if X86_64
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_MMAP_RND_BITS          if MMU
diff --git a/arch/x86/include/asm/jump_label.h 
b/arch/x86/include/asm/jump_label.h
index 8c0de4282659..d0f1f25b41d5 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -36,8 +36,8 @@ static __always_inline bool arch_static_branch(struct 
static_key *key, bool bran
        asm_volatile_goto("1:"
                ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
                ".pushsection __jump_table,  \"aw\" \n\t"
-               _ASM_ALIGN "\n\t"
-               _ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
+               ".balign 4\n\t"
+               ".long 1b - ., %l[l_yes] - ., %c0 + %c1 - .\n\t"
                ".popsection \n\t"
                : :  "i" (key), "i" (branch) : : l_yes);
 
@@ -52,8 +52,8 @@ static __always_inline bool arch_static_branch_jump(struct 
static_key *key, bool
                ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
                "2:\n\t"
                ".pushsection __jump_table,  \"aw\" \n\t"
-               _ASM_ALIGN "\n\t"
-               _ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
+               ".balign 4\n\t"
+               ".long 1b - ., %l[l_yes] - ., %c0 + %c1 - .\n\t"
                ".popsection \n\t"
                : :  "i" (key), "i" (branch) : : l_yes);
 
@@ -62,18 +62,6 @@ static __always_inline bool arch_static_branch_jump(struct 
static_key *key, bool
        return true;
 }
 
-#ifdef CONFIG_X86_64
-typedef u64 jump_label_t;
-#else
-typedef u32 jump_label_t;
-#endif
-
-struct jump_entry {
-       jump_label_t code;
-       jump_label_t target;
-       jump_label_t key;
-};
-
 #else  /* __ASSEMBLY__ */
 
 .macro STATIC_JUMP_IF_TRUE target, key, def
@@ -87,8 +75,8 @@ struct jump_entry {
        .byte           STATIC_KEY_INIT_NOP
        .endif
        .pushsection __jump_table, "aw"
-       _ASM_ALIGN
-       _ASM_PTR        .Lstatic_jump_\@, \target, \key
+       .balign         4
+       .long           .Lstatic_jump_\@ - ., \target - ., \key - .
        .popsection
 .endm
 
@@ -103,8 +91,8 @@ struct jump_entry {
 .Lstatic_jump_after_\@:
        .endif
        .pushsection __jump_table, "aw"
-       _ASM_ALIGN
-       _ASM_PTR        .Lstatic_jump_\@, \target, \key + 1
+       .balign         4
+       .long           .Lstatic_jump_\@ - ., \target - ., \key + 1 - .
        .popsection
 .endm
 
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index 84f001d52322..98ae55b39037 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -30,9 +30,9 @@
 #define EX_ORIG_OFFSET         0
 #define EX_NEW_OFFSET          4
 
-#define JUMP_ENTRY_SIZE                24
+#define JUMP_ENTRY_SIZE                12
 #define JUMP_ORIG_OFFSET       0
-#define JUMP_NEW_OFFSET                8
+#define JUMP_NEW_OFFSET                4
 
 #define ALT_ENTRY_SIZE         13
 #define ALT_ORIG_OFFSET                0
-- 
2.11.0

Reply via email to