This routine will be used in the spapr backend. Also introduce a short
xive_alloc_order() helper.

Signed-off-by: Cédric Le Goater <c...@kaod.org>
Reviewed-by: David Gibson <da...@gibson.dropbear.id.au>
---
 arch/powerpc/sysdev/xive/common.c        | 16 ++++++++++++++++
 arch/powerpc/sysdev/xive/native.c        | 16 +++++-----------
 arch/powerpc/sysdev/xive/xive-internal.h |  6 ++++++
 3 files changed, 27 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/sysdev/xive/common.c 
b/arch/powerpc/sysdev/xive/common.c
index 6e0c9dee724f..26999ceae20e 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -1424,6 +1424,22 @@ bool xive_core_init(const struct xive_ops *ops, void 
__iomem *area, u32 offset,
        return true;
 }
 
+__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
+{
+       unsigned int alloc_order;
+       struct page *pages;
+       __be32 *qpage;
+
+       alloc_order = xive_alloc_order(queue_shift);
+       pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
+       if (!pages)
+               return ERR_PTR(-ENOMEM);
+       qpage = (__be32 *)page_address(pages);
+       memset(qpage, 0, 1 << queue_shift);
+
+       return qpage;
+}
+
 static int __init xive_off(char *arg)
 {
        xive_cmdline_disabled = true;
diff --git a/arch/powerpc/sysdev/xive/native.c 
b/arch/powerpc/sysdev/xive/native.c
index 0f95476b01f6..ef92a83090e1 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -202,17 +202,12 @@ EXPORT_SYMBOL_GPL(xive_native_disable_queue);
 static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 
prio)
 {
        struct xive_q *q = &xc->queue[prio];
-       unsigned int alloc_order;
-       struct page *pages;
        __be32 *qpage;
 
-       alloc_order = (xive_queue_shift > PAGE_SHIFT) ?
-               (xive_queue_shift - PAGE_SHIFT) : 0;
-       pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
-       if (!pages)
-               return -ENOMEM;
-       qpage = (__be32 *)page_address(pages);
-       memset(qpage, 0, 1 << xive_queue_shift);
+       qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
+       if (IS_ERR(qpage))
+               return PTR_ERR(qpage);
+
        return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
                                           q, prio, qpage, xive_queue_shift, 
false);
 }
@@ -227,8 +222,7 @@ static void xive_native_cleanup_queue(unsigned int cpu, 
struct xive_cpu *xc, u8
         * from an IPI and iounmap isn't safe
         */
        __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
-       alloc_order = (xive_queue_shift > PAGE_SHIFT) ?
-               (xive_queue_shift - PAGE_SHIFT) : 0;
+       alloc_order = xive_alloc_order(xive_queue_shift);
        free_pages((unsigned long)q->qpage, alloc_order);
        q->qpage = NULL;
 }
diff --git a/arch/powerpc/sysdev/xive/xive-internal.h 
b/arch/powerpc/sysdev/xive/xive-internal.h
index d07ef2d29caf..dd1e2022cce4 100644
--- a/arch/powerpc/sysdev/xive/xive-internal.h
+++ b/arch/powerpc/sysdev/xive/xive-internal.h
@@ -56,6 +56,12 @@ struct xive_ops {
 
 bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
                    u8 max_prio);
+__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
+
+static inline u32 xive_alloc_order(u32 queue_shift)
+{
+       return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0;
+}
 
 extern bool xive_cmdline_disabled;
 
-- 
2.13.5

Reply via email to