On Thu, 3 Jul 2025, John Garry wrote:

> The atomic write unit max value is limited by any stacked device stripe
> size.
> 
> It is required that the atomic write unit is a power-of-2 factor of the
> stripe size.
> 
> Currently we use io_min limit to hold the stripe size, and check for a
> io_min <= SECTOR_SIZE when deciding if we have a striped stacked device.
> 
> Nilay reports that this causes a problem when the physical block size is
> greater than SECTOR_SIZE [0].
> 
> Furthermore, io_min may be mutated when stacking devices, and this makes
> it a poor candidate to hold the stripe size. Such an example (of when
> io_min may change) would be when the io_min is less than the physical
> block size.
> 
> Use chunk_sectors to hold the stripe size, which is more appropriate.
> 
> [0] 
> https://lore.kernel.org/linux-block/888f3b1d-7817-4007-b3b3-1a2ea04df...@linux.ibm.com/T/#mecca17129f72811137d3c2f1e477634e77f06781
> 
> Reviewed-by: Nilay Shroff <ni...@linux.ibm.com>
> Signed-off-by: John Garry <john.g.ga...@oracle.com>
> ---
>  block/blk-settings.c | 51 +++++++++++++++++++++++++-------------------
>  1 file changed, 29 insertions(+), 22 deletions(-)
> 
> diff --git a/block/blk-settings.c b/block/blk-settings.c
> index 7ca21fb32598..20d3563f5d3f 100644
> --- a/block/blk-settings.c
> +++ b/block/blk-settings.c
> @@ -596,41 +596,47 @@ static bool 
> blk_stack_atomic_writes_boundary_head(struct queue_limits *t,
>       return true;
>  }
>  
> +static inline unsigned int max_pow_of_two_factor(const unsigned int nr)
> +{
> +     return 1 << (ffs(nr) - 1);

This could be changed to "nr & -nr".

> +}
>  
> -/* Check stacking of first bottom device */
> -static bool blk_stack_atomic_writes_head(struct queue_limits *t,
> -                             struct queue_limits *b)
> +static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t)
>  {
> -     if (b->atomic_write_hw_boundary &&
> -         !blk_stack_atomic_writes_boundary_head(t, b))
> -             return false;
> +     unsigned int chunk_bytes = t->chunk_sectors << SECTOR_SHIFT;

What about integer overflow?

> -     if (t->io_min <= SECTOR_SIZE) {
> -             /* No chunk sectors, so use bottom device values directly */
> -             t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
> -             t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
> -             t->atomic_write_hw_max = b->atomic_write_hw_max;
> -             return true;
> -     }
> +     if (!t->chunk_sectors)
> +             return;
>  
>       /*
>        * Find values for limits which work for chunk size.
>        * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
> -      * size (t->io_min), as chunk size is not restricted to a power-of-2.
> +      * size, as the chunk size is not restricted to a power-of-2.
>        * So we need to find highest power-of-2 which works for the chunk
>        * size.
> -      * As an example scenario, we could have b->unit_max = 16K and
> -      * t->io_min = 24K. For this case, reduce t->unit_max to a value
> -      * aligned with both limits, i.e. 8K in this example.
> +      * As an example scenario, we could have t->unit_max = 16K and
> +      * t->chunk_sectors = 24KB. For this case, reduce t->unit_max to a
> +      * value aligned with both limits, i.e. 8K in this example.
>        */
> -     t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
> -     while (t->io_min % t->atomic_write_hw_unit_max)
> -             t->atomic_write_hw_unit_max /= 2;
> +     t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
> +                                     max_pow_of_two_factor(chunk_bytes));
>  
> -     t->atomic_write_hw_unit_min = min(b->atomic_write_hw_unit_min,
> +     t->atomic_write_hw_unit_min = min(t->atomic_write_hw_unit_min,
>                                         t->atomic_write_hw_unit_max);
> -     t->atomic_write_hw_max = min(b->atomic_write_hw_max, t->io_min);
> +     t->atomic_write_hw_max = min(t->atomic_write_hw_max, chunk_bytes);
> +}
>  
> +/* Check stacking of first bottom device */
> +static bool blk_stack_atomic_writes_head(struct queue_limits *t,
> +                             struct queue_limits *b)
> +{
> +     if (b->atomic_write_hw_boundary &&
> +         !blk_stack_atomic_writes_boundary_head(t, b))
> +             return false;
> +
> +     t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
> +     t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
> +     t->atomic_write_hw_max = b->atomic_write_hw_max;
>       return true;
>  }
>  
> @@ -658,6 +664,7 @@ static void blk_stack_atomic_writes_limits(struct 
> queue_limits *t,
>  
>       if (!blk_stack_atomic_writes_head(t, b))
>               goto unsupported;
> +     blk_stack_atomic_writes_chunk_sectors(t);
>       return;
>  
>  unsupported:
> -- 
> 2.43.5
> 


Reply via email to