we enable the WP protection).
On ARM[32,64] we do not have the luxury of a global register that can
be changed after boot. In lieu of that we use the vmap to create
a temporary virtual address in which we can use instead.
To do this we need to stash during livepatch: vmap of the hypervisor
code, vmap of the .livepatch_funcs (vmap comes in page aligned virtual
addresses), offset in the vmap (in case it is not nicely aligned), and
the original first livepatch_funcs to figure out the index.
Equipped with that we can patch livepatch functions which have
.livepatch_funcs in rodata section.
An alternative is to add the 'W' flag during loading of the
.livepatch_funcs which would result the section being in writeable
region from the gecko. >
Note that this vmap solution could be extended to x86 as well.
Signed-off-by: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
---
xen/arch/arm/arm32/livepatch.c | 11 ++++++---
xen/arch/arm/arm64/livepatch.c | 11 ++++++---
xen/arch/arm/livepatch.c | 52 ++++++++++++++++++++++++++++++++---------
xen/arch/x86/livepatch.c | 2 +-
xen/common/livepatch.c | 5 ++--
xen/include/asm-arm/livepatch.h | 13 ++++++++---
xen/include/xen/livepatch.h | 2 +-
7 files changed, 71 insertions(+), 25 deletions(-)
diff --git a/xen/arch/arm/arm32/livepatch.c b/xen/arch/arm/arm32/livepatch.c
index 10887ace81..d793ebcaad 100644
--- a/xen/arch/arm/arm32/livepatch.c
+++ b/xen/arch/arm/arm32/livepatch.c
@@ -16,18 +16,23 @@ void arch_livepatch_apply(struct livepatch_func *func)
uint32_t insn;
uint32_t *new_ptr;
unsigned int i, len;
+ struct livepatch_func *f;
BUILD_BUG_ON(ARCH_PATCH_INSN_SIZE > sizeof(func->opaque));
BUILD_BUG_ON(ARCH_PATCH_INSN_SIZE != sizeof(insn));
- ASSERT(vmap_of_xen_text);
+ ASSERT(livepatch_vmap.text);
len = livepatch_insn_len(func);
if ( !len )
return;
+ /* Index in the vmap region. */
+ i = livepatch_vmap.va - func;
+ f = (struct livepatch_func *)(livepatch_vmap.funcs +
livepatch_vmap.offset) + i;
+
/* Save old ones. */
- memcpy(func->opaque, func->old_addr, len);
+ memcpy(f->opaque, func->old_addr, len);
if ( func->new_addr )
{
@@ -56,7 +61,7 @@ void arch_livepatch_apply(struct livepatch_func *func)
else
insn = 0xe1a00000; /* mov r0, r0 */
- new_ptr = func->old_addr - (void *)_start + vmap_of_xen_text;
+ new_ptr = func->old_addr - (void *)_start + livepatch_vmap.text;
len = len / sizeof(uint32_t);
/* PATCH! */
diff --git a/xen/arch/arm/arm64/livepatch.c b/xen/arch/arm/arm64/livepatch.c
index 2728e2a125..662bedabc3 100644
--- a/xen/arch/arm/arm64/livepatch.c
+++ b/xen/arch/arm/arm64/livepatch.c
@@ -20,18 +20,23 @@ void arch_livepatch_apply(struct livepatch_func *func)
uint32_t insn;
uint32_t *new_ptr;
unsigned int i, len;
+ struct livepatch_func *f;
BUILD_BUG_ON(ARCH_PATCH_INSN_SIZE > sizeof(func->opaque));
BUILD_BUG_ON(ARCH_PATCH_INSN_SIZE != sizeof(insn));
- ASSERT(vmap_of_xen_text);
+ ASSERT(livepatch_vmap.text);
len = livepatch_insn_len(func);
if ( !len )
return;
+ /* Index in the vmap region. */
+ i = livepatch_vmap.va - func;
+ f = (struct livepatch_func *)(livepatch_vmap.funcs +
livepatch_vmap.offset) + i;
+
/* Save old ones. */
- memcpy(func->opaque, func->old_addr, len);
+ memcpy(f->opaque, func->old_addr, len);
if ( func->new_addr )
insn = aarch64_insn_gen_branch_imm((unsigned long)func->old_addr,
@@ -43,7 +48,7 @@ void arch_livepatch_apply(struct livepatch_func *func)
/* Verified in livepatch_verify_distance. */
ASSERT(insn != AARCH64_BREAK_FAULT);
- new_ptr = func->old_addr - (void *)_start + vmap_of_xen_text;
+ new_ptr = func->old_addr - (void *)_start + livepatch_vmap.text;
len = len / sizeof(uint32_t);
/* PATCH! */
diff --git a/xen/arch/arm/livepatch.c b/xen/arch/arm/livepatch.c
index 3e53524365..2f9ae8e61e 100644
--- a/xen/arch/arm/livepatch.c
+++ b/xen/arch/arm/livepatch.c
@@ -6,6 +6,7 @@
#include <xen/lib.h>
#include <xen/livepatch_elf.h>
#include <xen/livepatch.h>
+#include <xen/pfn.h>
#include <xen/vmap.h>
#include <asm/cpufeature.h>
@@ -16,14 +17,18 @@
#undef virt_to_mfn
#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-void *vmap_of_xen_text;
+struct livepatch_vmap_stash livepatch_vmap;
-int arch_livepatch_quiesce(void)
+int arch_livepatch_quiesce(struct livepatch_func *funcs, unsigned int nfuncs)
{
- mfn_t text_mfn;
+ mfn_t text_mfn, rodata_mfn;
+ void *vmap_addr;
unsigned int text_order;
+ unsigned long va = (unsigned long)(funcs);
+ unsigned int offs = va & (PAGE_SIZE - 1);
+ unsigned int size = PFN_UP(offs + nfuncs * sizeof(*funcs));
- if ( vmap_of_xen_text )
+ if ( livepatch_vmap.text || livepatch_vmap.funcs )
return -EINVAL;
text_mfn = virt_to_mfn(_start);
@@ -33,16 +38,33 @@ int arch_livepatch_quiesce(void)
* The text section is read-only. So re-map Xen to be able to patch
* the code.
*/
- vmap_of_xen_text = __vmap(&text_mfn, 1U << text_order, 1, 1,
PAGE_HYPERVISOR,
- VMAP_DEFAULT);
+ vmap_addr = __vmap(&text_mfn, 1U << text_order, 1, 1, PAGE_HYPERVISOR,
+ VMAP_DEFAULT);
- if ( !vmap_of_xen_text )
+ if ( !vmap_addr )
{
printk(XENLOG_ERR LIVEPATCH "Failed to setup vmap of hypervisor!
(order=%u)\n",
text_order);
return -ENOMEM;
}
+ livepatch_vmap.text = vmap_addr;
+ livepatch_vmap.offset = offs;
+
+ rodata_mfn = virt_to_mfn(va & PAGE_MASK);
+ vmap_addr = __vmap(&rodata_mfn, size, 1, 1, PAGE_HYPERVISOR,
VMAP_DEFAULT);
+ if ( !vmap_addr )
+ {
+ printk(XENLOG_ERR LIVEPATCH "Failed to setup vmap of livepatch_funcs!
(mfn=%"PRI_mfn", size=%u)\n",
+ mfn_x(rodata_mfn), size);
+ vunmap(livepatch_vmap.text);
+ livepatch_vmap.text = NULL;
+ return -ENOMEM;
+ }
+
+ livepatch_vmap.funcs = vmap_addr;
+ livepatch_vmap.va = funcs;
+
return 0;
}
@@ -54,10 +76,18 @@ void arch_livepatch_revive(void)
*/
invalidate_icache();
- if ( vmap_of_xen_text )
- vunmap(vmap_of_xen_text);
+ if ( livepatch_vmap.text )
+ vunmap(livepatch_vmap.text);
+
+ livepatch_vmap.text = NULL;
+
+ if ( livepatch_vmap.funcs )
+ vunmap(livepatch_vmap.funcs);
+
+ livepatch_vmap.funcs = NULL;
- vmap_of_xen_text = NULL;
+ livepatch_vmap.va = NULL;
+ livepatch_vmap.offset = 0;
}
int arch_livepatch_verify_func(const struct livepatch_func *func)
@@ -78,7 +108,7 @@ void arch_livepatch_revert(const struct livepatch_func *func)
uint32_t *new_ptr;
unsigned int len;
- new_ptr = func->old_addr - (void *)_start + vmap_of_xen_text;
+ new_ptr = func->old_addr - (void *)_start + livepatch_vmap.text;
len = livepatch_insn_len(func);
memcpy(new_ptr, func->opaque, len);
diff --git a/xen/arch/x86/livepatch.c b/xen/arch/x86/livepatch.c
index 48d20fdacd..8522fcbd36 100644
--- a/xen/arch/x86/livepatch.c
+++ b/xen/arch/x86/livepatch.c
@@ -14,7 +14,7 @@
#include <asm/nmi.h>
#include <asm/livepatch.h>
-int arch_livepatch_quiesce(void)
+int arch_livepatch_quiesce(struct livepatch_func *func, unsigned int nfuncs)
{
/* Disable WP to allow changes to read-only pages. */
write_cr0(read_cr0() & ~X86_CR0_WP);
diff --git a/xen/common/livepatch.c b/xen/common/livepatch.c
index dbab8a3f6f..e707802279 100644
--- a/xen/common/livepatch.c
+++ b/xen/common/livepatch.c
@@ -571,7 +571,6 @@ static int prepare_payload(struct payload *payload,
if ( rc )
return rc;
}
-
sec = livepatch_elf_sec_by_name(elf, ".livepatch.hooks.load");
if ( sec )
{
@@ -1070,7 +1069,7 @@ static int apply_payload(struct payload *data)
printk(XENLOG_INFO LIVEPATCH "%s: Applying %u functions\n",
data->name, data->nfuncs);
- rc = arch_livepatch_quiesce();
+ rc = arch_livepatch_quiesce(data->funcs, data->nfuncs);
if ( rc )
{
printk(XENLOG_ERR LIVEPATCH "%s: unable to quiesce!\n", data->name);
@@ -1111,7 +1110,7 @@ static int revert_payload(struct payload *data)
printk(XENLOG_INFO LIVEPATCH "%s: Reverting\n", data->name);
- rc = arch_livepatch_quiesce();
+ rc = arch_livepatch_quiesce(data->funcs, data->nfuncs);
if ( rc )
{
printk(XENLOG_ERR LIVEPATCH "%s: unable to quiesce!\n", data->name);
diff --git a/xen/include/asm-arm/livepatch.h b/xen/include/asm-arm/livepatch.h
index 6bca79deb9..e030aedced 100644
--- a/xen/include/asm-arm/livepatch.h
+++ b/xen/include/asm-arm/livepatch.h
@@ -12,10 +12,17 @@
#define ARCH_PATCH_INSN_SIZE 4
/*
- * The va of the hypervisor .text region. We need this as the
- * normal va are write protected.
+ * The va of the hypervisor .text region and the livepatch_funcs.
+ * We need this as the normal va are write protected.
*/
-extern void *vmap_of_xen_text;
+struct livepatch_vmap_stash {
+ void *text; /* vmap of hypervisor code. */
+ void *funcs; /* vmap of the .livepatch.funcs. */
+ unsigned int offset; /* Offset in 'funcs'. */
+ struct livepatch_func *va; /* The original va. */
+};
+
+extern struct livepatch_vmap_stash livepatch_vmap;
/* These ranges are only for unconditional branches. */
#ifdef CONFIG_ARM_32
diff --git a/xen/include/xen/livepatch.h b/xen/include/xen/livepatch.h
index e9bab87f28..a97afb92f9 100644
--- a/xen/include/xen/livepatch.h
+++ b/xen/include/xen/livepatch.h
@@ -104,7 +104,7 @@ static inline int livepatch_verify_distance(const struct
livepatch_func *func)
* These functions are called around the critical region patching live code,
* for an architecture to take make appropratie global state adjustments.
*/
-int arch_livepatch_quiesce(void);
+int arch_livepatch_quiesce(struct livepatch_func *func, unsigned int nfuncs);
void arch_livepatch_revive(void);
void arch_livepatch_apply(struct livepatch_func *func);