On Wed, 2024-05-15 at 20:51 +0800, libao...@huaweicloud.com wrote:
> From: Baokun Li <libaok...@huawei.com>
> 
> Reusing the msg_id after a maliciously completed reopen request may cause
> a read request to remain unprocessed and result in a hung, as shown below:
> 
>        t1       |      t2       |      t3
> -------------------------------------------------
> cachefiles_ondemand_select_req
>  cachefiles_ondemand_object_is_close(A)
>  cachefiles_ondemand_set_object_reopening(A)
>  queue_work(fscache_object_wq, &info->work)
>                 ondemand_object_worker
>                  cachefiles_ondemand_init_object(A)
>                   cachefiles_ondemand_send_req(OPEN)
>                     // get msg_id 6
>                     wait_for_completion(&req_A->done)
> cachefiles_ondemand_daemon_read
>  // read msg_id 6 req_A
>  cachefiles_ondemand_get_fd
>  copy_to_user
>                                 // Malicious completion msg_id 6
>                                 copen 6,-1
>                                 cachefiles_ondemand_copen
>                                  complete(&req_A->done)
>                                  // will not set the object to close
>                                  // because ondemand_id && fd is valid.
> 
>                 // ondemand_object_worker() is done
>                 // but the object is still reopening.
> 
>                                 // new open req_B
>                                 cachefiles_ondemand_init_object(B)
>                                  cachefiles_ondemand_send_req(OPEN)
>                                  // reuse msg_id 6
> process_open_req
>  copen 6,A.size
>  // The expected failed copen was executed successfully
> 
> Expect copen to fail, and when it does, it closes fd, which sets the
> object to close, and then close triggers reopen again. However, due to
> msg_id reuse resulting in a successful copen, the anonymous fd is not
> closed until the daemon exits. Therefore read requests waiting for reopen
> to complete may trigger hung task.
> 
> To avoid this issue, allocate the msg_id cyclically to avoid reusing the
> msg_id for a very short duration of time.
> 
> Fixes: c8383054506c ("cachefiles: notify the user daemon when looking up 
> cookie")
> Signed-off-by: Baokun Li <libaok...@huawei.com>
> ---
>  fs/cachefiles/internal.h |  1 +
>  fs/cachefiles/ondemand.c | 20 ++++++++++++++++----
>  2 files changed, 17 insertions(+), 4 deletions(-)
> 
> diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
> index 8ecd296cc1c4..9200c00f3e98 100644
> --- a/fs/cachefiles/internal.h
> +++ b/fs/cachefiles/internal.h
> @@ -128,6 +128,7 @@ struct cachefiles_cache {
>       unsigned long                   req_id_next;
>       struct xarray                   ondemand_ids;   /* xarray for 
> ondemand_id allocation */
>       u32                             ondemand_id_next;
> +     u32                             msg_id_next;
>  };
>  
>  static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache 
> *cache)
> diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
> index f6440b3e7368..b10952f77472 100644
> --- a/fs/cachefiles/ondemand.c
> +++ b/fs/cachefiles/ondemand.c
> @@ -433,20 +433,32 @@ static int cachefiles_ondemand_send_req(struct 
> cachefiles_object *object,
>               smp_mb();
>  
>               if (opcode == CACHEFILES_OP_CLOSE &&
> -                     !cachefiles_ondemand_object_is_open(object)) {
> +                 !cachefiles_ondemand_object_is_open(object)) {
>                       WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
>                       xas_unlock(&xas);
>                       ret = -EIO;
>                       goto out;
>               }
>  
> -             xas.xa_index = 0;
> +             /*
> +              * Cyclically find a free xas to avoid msg_id reuse that would
> +              * cause the daemon to successfully copen a stale msg_id.
> +              */
> +             xas.xa_index = cache->msg_id_next;
>               xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
> +             if (xas.xa_node == XAS_RESTART) {
> +                     xas.xa_index = 0;
> +                     xas_find_marked(&xas, cache->msg_id_next - 1, 
> XA_FREE_MARK);
> +             }
>               if (xas.xa_node == XAS_RESTART)
>                       xas_set_err(&xas, -EBUSY);
> +
>               xas_store(&xas, req);
> -             xas_clear_mark(&xas, XA_FREE_MARK);
> -             xas_set_mark(&xas, CACHEFILES_REQ_NEW);
> +             if (xas_valid(&xas)) {
> +                     cache->msg_id_next = xas.xa_index + 1;

If you have a long-standing stuck request, could this counter wrap
around and you still end up with reuse? Maybe this should be using
ida_alloc/free instead, which would prevent that too?



> +                     xas_clear_mark(&xas, XA_FREE_MARK);
> +                     xas_set_mark(&xas, CACHEFILES_REQ_NEW);
> +             }
>               xas_unlock(&xas);
>       } while (xas_nomem(&xas, GFP_KERNEL));
>  

-- 
Jeff Layton <jlay...@kernel.org>

Reply via email to