Hi Jens,

> On 26 Apr 2024, at 14:11, Jens Wiklander <jens.wiklan...@linaro.org> wrote:
> 
> Hi Bertrand,
> 
> On Fri, Apr 26, 2024 at 11:20 AM Bertrand Marquis
> <bertrand.marq...@arm.com> wrote:
>> 
>> Hi Jens,
>> 
>>> On 26 Apr 2024, at 10:47, Jens Wiklander <jens.wiklan...@linaro.org> wrote:
>>> 
>>> Add support for FF-A notifications, currently limited to an SP (Secure
>>> Partition) sending an asynchronous notification to a guest.
>>> 
>>> Guests and Xen itself are made aware of pending notifications with an
>>> interrupt. The interrupt handler retrieves the notifications using the
>>> FF-A ABI and deliver them to their destinations.
>>> 
>>> Update ffa_partinfo_domain_init() to return error code like
>>> ffa_notif_domain_init().
>>> 
>>> Signed-off-by: Jens Wiklander <jens.wiklan...@linaro.org>
>>> ---
>>> v2->v3:
>>> - Add a GUEST_ prefix and move FFA_NOTIF_PEND_INTR_ID and
>>> FFA_SCHEDULE_RECV_INTR_ID to public/arch-arm.h
>>> - Register the Xen SRI handler on each CPU using on_selected_cpus() and
>>> setup_irq()
>>> - Check that the SGI ID retrieved with FFA_FEATURE_SCHEDULE_RECV_INTR
>>> doesn't conflict with static SGI handlers
>>> 
>>> v1->v2:
>>> - Addressing review comments
>>> - Change ffa_handle_notification_{bind,unbind,set}() to take struct
>>> cpu_user_regs *regs as argument.
>>> - Update ffa_partinfo_domain_init() and ffa_notif_domain_init() to return
>>> an error code.
>>> - Fixing a bug in handle_features() for FFA_FEATURE_SCHEDULE_RECV_INTR.
>>> ---
>>> xen/arch/arm/irq.c              |   2 +-
>>> xen/arch/arm/tee/Makefile       |   1 +
>>> xen/arch/arm/tee/ffa.c          |  55 ++++-
>>> xen/arch/arm/tee/ffa_notif.c    | 378 ++++++++++++++++++++++++++++++++
>>> xen/arch/arm/tee/ffa_partinfo.c |   9 +-
>>> xen/arch/arm/tee/ffa_private.h  |  56 ++++-
>>> xen/include/public/arch-arm.h   |  14 ++
>>> 7 files changed, 507 insertions(+), 8 deletions(-)
>>> create mode 100644 xen/arch/arm/tee/ffa_notif.c
>>> 
>>> diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c
>>> index d7306aa64d50..5224898265a5 100644
>>> --- a/xen/arch/arm/irq.c
>>> +++ b/xen/arch/arm/irq.c
>>> @@ -155,7 +155,7 @@ void __init init_IRQ(void)
>>>    {
>>>        /* SGIs are always edge-triggered */
>>>        if ( irq < NR_GIC_SGI )
>>> -            local_irqs_type[irq] = DT_IRQ_TYPE_EDGE_RISING;
>>> +            local_irqs_type[irq] = IRQ_TYPE_EDGE_RISING;
>>>        else
>>>            local_irqs_type[irq] = IRQ_TYPE_INVALID;
>>>    }
>>> diff --git a/xen/arch/arm/tee/Makefile b/xen/arch/arm/tee/Makefile
>>> index f0112a2f922d..7c0f46f7f446 100644
>>> --- a/xen/arch/arm/tee/Makefile
>>> +++ b/xen/arch/arm/tee/Makefile
>>> @@ -2,5 +2,6 @@ obj-$(CONFIG_FFA) += ffa.o
>>> obj-$(CONFIG_FFA) += ffa_shm.o
>>> obj-$(CONFIG_FFA) += ffa_partinfo.o
>>> obj-$(CONFIG_FFA) += ffa_rxtx.o
>>> +obj-$(CONFIG_FFA) += ffa_notif.o
>>> obj-y += tee.o
>>> obj-$(CONFIG_OPTEE) += optee.o
>>> diff --git a/xen/arch/arm/tee/ffa.c b/xen/arch/arm/tee/ffa.c
>>> index 5209612963e1..912e905a27bd 100644
>>> --- a/xen/arch/arm/tee/ffa.c
>>> +++ b/xen/arch/arm/tee/ffa.c
>>> @@ -39,6 +39,9 @@
>>> *   - at most 32 shared memory regions per guest
>>> * o FFA_MSG_SEND_DIRECT_REQ:
>>> *   - only supported from a VM to an SP
>>> + * o FFA_NOTIFICATION_*:
>>> + *   - only supports global notifications, that is, per vCPU notifications
>>> + *     are not supported
>>> *
>>> * There are some large locked sections with ffa_tx_buffer_lock and
>>> * ffa_rx_buffer_lock. Especially the ffa_tx_buffer_lock spinlock used
>>> @@ -194,6 +197,8 @@ out:
>>> 
>>> static void handle_features(struct cpu_user_regs *regs)
>>> {
>>> +    struct domain *d = current->domain;
>>> +    struct ffa_ctx *ctx = d->arch.tee;
>>>    uint32_t a1 = get_user_reg(regs, 1);
>>>    unsigned int n;
>>> 
>>> @@ -240,6 +245,30 @@ static void handle_features(struct cpu_user_regs *regs)
>>>        BUILD_BUG_ON(PAGE_SIZE != FFA_PAGE_SIZE);
>>>        ffa_set_regs_success(regs, 0, 0);
>>>        break;
>>> +    case FFA_FEATURE_NOTIF_PEND_INTR:
>>> +        if ( ctx->notif.enabled )
>>> +            ffa_set_regs_success(regs, GUEST_FFA_NOTIF_PEND_INTR_ID, 0);
>>> +        else
>>> +            ffa_set_regs_error(regs, FFA_RET_NOT_SUPPORTED);
>>> +        break;
>>> +    case FFA_FEATURE_SCHEDULE_RECV_INTR:
>>> +        if ( ctx->notif.enabled )
>>> +            ffa_set_regs_success(regs, GUEST_FFA_SCHEDULE_RECV_INTR_ID, 0);
>>> +        else
>>> +            ffa_set_regs_error(regs, FFA_RET_NOT_SUPPORTED);
>>> +        break;
>>> +
>>> +    case FFA_NOTIFICATION_BIND:
>>> +    case FFA_NOTIFICATION_UNBIND:
>>> +    case FFA_NOTIFICATION_GET:
>>> +    case FFA_NOTIFICATION_SET:
>>> +    case FFA_NOTIFICATION_INFO_GET_32:
>>> +    case FFA_NOTIFICATION_INFO_GET_64:
>>> +        if ( ctx->notif.enabled )
>>> +            ffa_set_regs_success(regs, 0, 0);
>>> +        else
>>> +            ffa_set_regs_error(regs, FFA_RET_NOT_SUPPORTED);
>>> +        break;
>>>    default:
>>>        ffa_set_regs_error(regs, FFA_RET_NOT_SUPPORTED);
>>>        break;
>>> @@ -305,6 +334,22 @@ static bool ffa_handle_call(struct cpu_user_regs *regs)
>>>                                                     get_user_reg(regs, 1)),
>>>                                   get_user_reg(regs, 3));
>>>        break;
>>> +    case FFA_NOTIFICATION_BIND:
>>> +        e = ffa_handle_notification_bind(regs);
>>> +        break;
>>> +    case FFA_NOTIFICATION_UNBIND:
>>> +        e = ffa_handle_notification_unbind(regs);
>>> +        break;
>>> +    case FFA_NOTIFICATION_INFO_GET_32:
>>> +    case FFA_NOTIFICATION_INFO_GET_64:
>>> +        ffa_handle_notification_info_get(regs);
>>> +        return true;
>>> +    case FFA_NOTIFICATION_GET:
>>> +        ffa_handle_notification_get(regs);
>>> +        return true;
>>> +    case FFA_NOTIFICATION_SET:
>>> +        e = ffa_handle_notification_set(regs);
>>> +        break;
>>> 
>>>    default:
>>>        gprintk(XENLOG_ERR, "ffa: unhandled fid 0x%x\n", fid);
>>> @@ -322,6 +367,7 @@ static bool ffa_handle_call(struct cpu_user_regs *regs)
>>> static int ffa_domain_init(struct domain *d)
>>> {
>>>    struct ffa_ctx *ctx;
>>> +    int ret;
>>> 
>>>    if ( !ffa_version )
>>>        return -ENODEV;
>>> @@ -345,10 +391,11 @@ static int ffa_domain_init(struct domain *d)
>>>     * error, so no need for cleanup in this function.
>>>     */
>>> 
>>> -    if ( !ffa_partinfo_domain_init(d) )
>>> -        return -EIO;
>>> +    ret = ffa_partinfo_domain_init(d);
>>> +    if ( ret )
>>> +        return ret;
>>> 
>>> -    return 0;
>>> +    return ffa_notif_domain_init(d);
>>> }
>>> 
>>> static void ffa_domain_teardown_continue(struct ffa_ctx *ctx, bool 
>>> first_time)
>>> @@ -423,6 +470,7 @@ static int ffa_domain_teardown(struct domain *d)
>>>        return 0;
>>> 
>>>    ffa_rxtx_domain_destroy(d);
>>> +    ffa_notif_domain_destroy(d);
>>> 
>>>    ffa_domain_teardown_continue(ctx, true /* first_time */);
>>> 
>>> @@ -502,6 +550,7 @@ static bool ffa_probe(void)
>>>    if ( !ffa_partinfo_init() )
>>>        goto err_rxtx_destroy;
>>> 
>>> +    ffa_notif_init();
>>>    INIT_LIST_HEAD(&ffa_teardown_head);
>>>    init_timer(&ffa_teardown_timer, ffa_teardown_timer_callback, NULL, 0);
>>> 
>>> diff --git a/xen/arch/arm/tee/ffa_notif.c b/xen/arch/arm/tee/ffa_notif.c
>>> new file mode 100644
>>> index 000000000000..6bb0804ee2f8
>>> --- /dev/null
>>> +++ b/xen/arch/arm/tee/ffa_notif.c
>>> @@ -0,0 +1,378 @@
>>> +/* SPDX-License-Identifier: GPL-2.0-only */
>>> +/*
>>> + * Copyright (C) 2024  Linaro Limited
>>> + */
>>> +
>>> +#include <xen/const.h>
>>> +#include <xen/list.h>
>>> +#include <xen/spinlock.h>
>>> +#include <xen/types.h>
>>> +
>>> +#include <asm/smccc.h>
>>> +#include <asm/regs.h>
>>> +
>>> +#include "ffa_private.h"
>>> +
>>> +static bool __ro_after_init notif_enabled;
>>> +
>>> +int ffa_handle_notification_bind(struct cpu_user_regs *regs)
>>> +{
>>> +    struct domain *d = current->domain;
>>> +    uint32_t src_dst = get_user_reg(regs, 1);
>>> +    uint32_t flags = get_user_reg(regs, 2);
>>> +    uint32_t bitmap_lo = get_user_reg(regs, 3);
>>> +    uint32_t bitmap_hi = get_user_reg(regs, 4);
>>> +
>>> +    if ( !notif_enabled )
>>> +        return FFA_RET_NOT_SUPPORTED;
>>> +
>>> +    if ( (src_dst & 0xFFFFU) != ffa_get_vm_id(d) )
>>> +        return FFA_RET_INVALID_PARAMETERS;
>>> +
>>> +    if ( flags )    /* Only global notifications are supported */
>>> +        return FFA_RET_DENIED;
>>> +
>>> +    /*
>>> +     * We only support notifications from SP so no need to check the sender
>>> +     * endpoint ID, the SPMC will take care of that for us.
>>> +     */
>>> +    return ffa_simple_call(FFA_NOTIFICATION_BIND, src_dst, flags, 
>>> bitmap_hi,
>>> +                           bitmap_lo);
>>> +}
>>> +
>>> +int ffa_handle_notification_unbind(struct cpu_user_regs *regs)
>>> +{
>>> +    struct domain *d = current->domain;
>>> +    uint32_t src_dst = get_user_reg(regs, 1);
>>> +    uint32_t bitmap_lo = get_user_reg(regs, 3);
>>> +    uint32_t bitmap_hi = get_user_reg(regs, 4);
>>> +
>>> +    if ( !notif_enabled )
>>> +        return FFA_RET_NOT_SUPPORTED;
>>> +
>>> +    if ( (src_dst & 0xFFFFU) != ffa_get_vm_id(d) )
>>> +        return FFA_RET_INVALID_PARAMETERS;
>>> +
>>> +    /*
>>> +     * We only support notifications from SP so no need to check the
>>> +     * destination endpoint ID, the SPMC will take care of that for us.
>>> +     */
>>> +    return  ffa_simple_call(FFA_NOTIFICATION_UNBIND, src_dst, 0, bitmap_hi,
>>> +                            bitmap_lo);
>>> +}
>>> +
>>> +void ffa_handle_notification_info_get(struct cpu_user_regs *regs)
>>> +{
>>> +    struct domain *d = current->domain;
>>> +    struct ffa_ctx *ctx = d->arch.tee;
>>> +    bool pending_global;
>>> +
>>> +    if ( !notif_enabled )
>>> +    {
>>> +        ffa_set_regs_error(regs, FFA_RET_NOT_SUPPORTED);
>>> +        return;
>>> +    }
>>> +
>>> +    spin_lock(&ctx->notif.lock);
>>> +    pending_global = ctx->notif.secure_pending;
>>> +    ctx->notif.secure_pending = false;
>>> +    spin_unlock(&ctx->notif.lock);
>>> +
>>> +    if ( pending_global )
>>> +    {
>>> +        /* A pending global notification for the guest */
>>> +        ffa_set_regs(regs, FFA_SUCCESS_64, 0,
>>> +                     1U << FFA_NOTIF_INFO_GET_ID_COUNT_SHIFT, 
>>> ffa_get_vm_id(d),
>>> +                     0, 0, 0, 0);
>>> +    }
>>> +    else
>>> +    {
>>> +        /* Report an error if there where no pending global notification */
>>> +        ffa_set_regs_error(regs, FFA_RET_NO_DATA);
>>> +    }
>>> +}
>>> +
>>> +void ffa_handle_notification_get(struct cpu_user_regs *regs)
>>> +{
>>> +    struct domain *d = current->domain;
>>> +    uint32_t recv = get_user_reg(regs, 1);
>>> +    uint32_t flags = get_user_reg(regs, 2);
>>> +    uint32_t w2 = 0;
>>> +    uint32_t w3 = 0;
>>> +    uint32_t w4 = 0;
>>> +    uint32_t w5 = 0;
>>> +    uint32_t w6 = 0;
>>> +    uint32_t w7 = 0;
>>> +
>>> +    if ( !notif_enabled )
>>> +    {
>>> +        ffa_set_regs_error(regs, FFA_RET_NOT_SUPPORTED);
>>> +        return;
>>> +    }
>>> +
>>> +    if ( (recv & 0xFFFFU) != ffa_get_vm_id(d) )
>>> +    {
>>> +        ffa_set_regs_error(regs, FFA_RET_INVALID_PARAMETERS);
>>> +        return;
>>> +    }
>>> +
>>> +    if ( flags & ( FFA_NOTIF_FLAG_BITMAP_SP | FFA_NOTIF_FLAG_BITMAP_SPM ) )
>>> +    {
>>> +        struct arm_smccc_1_2_regs arg = {
>>> +            .a0 = FFA_NOTIFICATION_GET,
>>> +            .a1 = recv,
>>> +            .a2 = flags & ( FFA_NOTIF_FLAG_BITMAP_SP |
>>> +                            FFA_NOTIF_FLAG_BITMAP_SPM ),
>>> +        };
>>> +        struct arm_smccc_1_2_regs resp;
>>> +        int32_t e;
>>> +
>>> +        arm_smccc_1_2_smc(&arg, &resp);
>>> +        e = ffa_get_ret_code(&resp);
>>> +        if ( e )
>>> +        {
>>> +            ffa_set_regs_error(regs, e);
>>> +            return;
>>> +        }
>>> +
>>> +        if ( flags & FFA_NOTIF_FLAG_BITMAP_SP )
>>> +        {
>>> +            w2 = resp.a2;
>>> +            w3 = resp.a3;
>>> +        }
>>> +
>>> +        if ( flags & FFA_NOTIF_FLAG_BITMAP_SPM )
>>> +            w6 = resp.a6;
>>> +    }
>> 
>> In here you never clean the secure_pending flag but in practice there would 
>> be
>> no more pending notification if SP and SPM flags are set so the response to
>> notification_info_get would wrongly say there is something pending.
>> 
>> I am not completely sure how this could be handled if both SP and SPM are
>> not set so i would say for now we should at least put a comment in info_get
>> to keep a note of this fact.
>> Do you agree ?
> 
> Good point. I'll add a comment and clear secure_pending.
> 
>> 
>>> +
>>> +    ffa_set_regs(regs, FFA_SUCCESS_32, 0, w2, w3, w4, w5, w6, w7);
>>> +}
>>> +
>>> +int ffa_handle_notification_set(struct cpu_user_regs *regs)
>>> +{
>>> +    struct domain *d = current->domain;
>>> +    uint32_t src_dst = get_user_reg(regs, 1);
>>> +    uint32_t flags = get_user_reg(regs, 2);
>>> +    uint32_t bitmap_lo = get_user_reg(regs, 3);
>>> +    uint32_t bitmap_hi = get_user_reg(regs, 4);
>>> +
>>> +    if ( !notif_enabled )
>>> +        return FFA_RET_NOT_SUPPORTED;
>>> +
>>> +    if ( (src_dst >> 16) != ffa_get_vm_id(d) )
>>> +        return FFA_RET_INVALID_PARAMETERS;
>>> +
>>> +    /* Let the SPMC check the destination of the notification */
>>> +    return ffa_simple_call(FFA_NOTIFICATION_SET, src_dst, flags, bitmap_lo,
>>> +                           bitmap_hi);
>>> +}
>>> +
>>> +/*
>>> + * Extract a 16-bit ID (index n) from the successful return value from
>>> + * FFA_NOTIFICATION_INFO_GET_64 or FFA_NOTIFICATION_INFO_GET_32. IDs are
>>> + * returned in registers 3 to 7 with four IDs per register for 64-bit
>>> + * calling convention and two IDs per register for 32-bit calling
>>> + * convention.
>>> + */
>>> +static uint16_t get_id_from_resp(struct arm_smccc_1_2_regs *resp,
>>> +                                 unsigned int n)
>>> +{
>>> +    unsigned int ids_per_reg;
>>> +    unsigned int reg_idx;
>>> +    unsigned int reg_shift;
>>> +
>>> +    if ( smccc_is_conv_64(resp->a0) )
>>> +        ids_per_reg = 4;
>>> +    else
>>> +        ids_per_reg = 2;
>>> +
>>> +    reg_idx = n / ids_per_reg + 3;
>>> +    reg_shift = ( n % ids_per_reg ) * 16;
>>> +
>>> +    switch ( reg_idx )
>>> +    {
>>> +    case 3:
>>> +        return resp->a3 >> reg_shift;
>>> +    case 4:
>>> +        return resp->a4 >> reg_shift;
>>> +    case 5:
>>> +        return resp->a5 >> reg_shift;
>>> +    case 6:
>>> +        return resp->a6 >> reg_shift;
>>> +    case 7:
>>> +        return resp->a7 >> reg_shift;
>>> +    default:
>>> +        ASSERT(0); /* "Can't happen" */
>>> +        return 0;
>>> +    }
>>> +}
>>> +
>>> +static void notif_irq_handler(int irq, void *data)
>>> +{
>>> +    const struct arm_smccc_1_2_regs arg = {
>>> +        .a0 = FFA_NOTIFICATION_INFO_GET_64,
>>> +    };
>>> +    struct arm_smccc_1_2_regs resp;
>>> +    unsigned int id_pos;
>>> +    unsigned int list_count;
>>> +    uint64_t ids_count;
>>> +    unsigned int n;
>>> +    int32_t res;
>>> +
>>> +    do {
>>> +        arm_smccc_1_2_smc(&arg, &resp);
>>> +        res = ffa_get_ret_code(&resp);
>>> +        if ( res )
>>> +        {
>>> +            if ( res != FFA_RET_NO_DATA )
>>> +                printk(XENLOG_ERR "ffa: notification info get failed: 
>>> error %d\n",
>>> +                       res);
>>> +            return;
>>> +        }
>>> +
>>> +        ids_count = resp.a2 >> FFA_NOTIF_INFO_GET_ID_LIST_SHIFT;
>>> +        list_count = ( resp.a2 >> FFA_NOTIF_INFO_GET_ID_COUNT_SHIFT ) &
>>> +                     FFA_NOTIF_INFO_GET_ID_COUNT_MASK;
>>> +
>>> +        id_pos = 0;
>>> +        for ( n = 0; n < list_count; n++ )
>>> +        {
>>> +            unsigned int count = ((ids_count >> 2 * n) & 0x3) + 1;
>>> +            struct domain *d;
>>> +
>> 
>> If a notification is pending for a secure partition at this stage we are not
>> signaling anything so I think this fact should be listed in the limitations 
>> to
>> say that we do not signal any secondary scheduler if a notification is
>> pending for a secure partition.
> 
> I agree, I'll add a note in the limitation.
> 
>> 
>>> +            d = ffa_get_domain_by_vm_id(get_id_from_resp(&resp, id_pos));
>>> +
>>> +            if ( d )
>>> +            {
>>> +                struct ffa_ctx *ctx = d->arch.tee;
>>> +
>>> +                spin_lock(&ctx->notif.lock);
>>> +                ctx->notif.secure_pending = true;
>>> +                spin_unlock(&ctx->notif.lock);
>>> +
>>> +                /*
>>> +                 * Since we're only delivering global notification, always
>>> +                 * deliver to the first vCPU. It doesn't matter which we
>>> +                 * chose, as long as it's available.
>>> +                 */
>>> +                vgic_inject_irq(d, d->vcpu[0], 
>>> GUEST_FFA_NOTIF_PEND_INTR_ID,
>>> +                                true);
>>> +
>>> +                put_domain(d);
>>> +            }
>>> +
>>> +            id_pos += count;
>>> +        }
>>> +
>>> +    } while (resp.a2 & FFA_NOTIF_INFO_GET_MORE_FLAG);
>>> +}
>>> +
>>> +static int32_t ffa_notification_bitmap_create(uint16_t vm_id,
>>> +                                              uint32_t vcpu_count)
>>> +{
>>> +    return ffa_simple_call(FFA_NOTIFICATION_BITMAP_CREATE, vm_id, 
>>> vcpu_count,
>>> +                           0, 0);
>>> +}
>>> +
>>> +static int32_t ffa_notification_bitmap_destroy(uint16_t vm_id)
>>> +{
>>> +    return ffa_simple_call(FFA_NOTIFICATION_BITMAP_DESTROY, vm_id, 0, 0, 
>>> 0);
>>> +}
>>> +
>>> +struct notif_irq_info {
>>> +    unsigned int irq;
>>> +    int ret;
>>> +    struct irqaction *action;
>>> +};
>>> +
>>> +static void notif_irq_enable(void *info)
>>> +{
>>> +    struct notif_irq_info *irq_info = info;
>>> +
>>> +    irq_info->ret = setup_irq(irq_info->irq, 0, irq_info->action);
>>> +    if ( irq_info->ret )
>>> +        printk(XENLOG_ERR "ffa: request_irq irq %u failed: error %d\n",
>>> +               irq_info->irq, irq_info->ret);
>>> +}
>>> +
>>> +void ffa_notif_init(void)
>>> +{
>>> +    const struct arm_smccc_1_2_regs arg = {
>>> +        .a0 = FFA_FEATURES,
>>> +        .a1 = FFA_FEATURE_SCHEDULE_RECV_INTR,
>>> +    };
>>> +    struct notif_irq_info irq_info = { };
>>> +    struct arm_smccc_1_2_regs resp;
>>> +    unsigned int cpu;
>>> +
>>> +    arm_smccc_1_2_smc(&arg, &resp);
>>> +    if ( resp.a0 != FFA_SUCCESS_32 )
>>> +        return;
>>> +
>>> +    irq_info.irq = resp.a2;
>>> +    if ( irq_info.irq < GIC_SGI_STATIC_MAX || irq_info.irq >= NR_GIC_SGI )
>>> +    {
>>> +        printk(XENLOG_ERR "ffa: notification initialization failed: 
>>> conflicting SGI %u\n",
>>> +               irq_info.irq);
>>> +        return;
>>> +    }
>>> +
>>> +    /*
>>> +     * SGIs are per-CPU so we must enable the IRQ on each CPU. We use an
>>> +     * IPI to call notif_irq_enable() on each CPU including the current
>>> +     * CPU. The struct irqaction is preallocated since we can't allocate
>>> +     * memory while in interrupt context.
>>> +     */
>>> +    for_each_online_cpu(cpu)
>>> +    {
>>> +        irq_info.action = xmalloc(struct irqaction);
>> 
>> You allocate one action per cpu but you have only one action pointer in your 
>> structure
>> which means you will overload the previously allocated one and lose track.
>> 
>> You should have a table of actions in your structure instead unless one 
>> action is
>> enough and can be reused on all cpus and in this case you should move out of
>> your loop the allocation part.
> 
> That shouldn't be needed because this is done in sequence only one CPU
> at a time.

Sorry i do not understand here.
You have a loop over each online cpu and on each loop you are assigning
irq_info.action with a newly allocated struct irqaction so you are in practice
overloading on cpu 2 the action that was allocated for cpu 1.

What do you mean by sequence here ?

Cheers
Bertrand

Reply via email to