On Fri, Aug 02, 2024 at 09:03:47AM +0300, Baruch Siach wrote:
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 3b4be4ca3b08..62b36fda44c9 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -20,7 +20,7 @@
>   * it for entirely different regions. In that case the arch code needs to
>   * override the variable below for dma-direct to work properly.
>   */
> -unsigned int zone_dma_bits __ro_after_init = 24;
> +u64 zone_dma_limit __ro_after_init = DMA_BIT_MASK(24);

u64 here makes sense even if it may be larger than phys_addr_t. It
matches the phys_limit type in the swiotlb code. The compilers should no
longer complain.

> diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
> index d10613eb0f63..7b04f7575796 100644
> --- a/kernel/dma/pool.c
> +++ b/kernel/dma/pool.c
> @@ -70,9 +70,9 @@ static bool cma_in_zone(gfp_t gfp)
>       /* CMA can't cross zone boundaries, see cma_activate_area() */
>       end = cma_get_base(cma) + size - 1;
>       if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
> -             return end <= DMA_BIT_MASK(zone_dma_bits);
> +             return end <= zone_dma_limit;
>       if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
> -             return end <= DMA_BIT_MASK(32);
> +             return end <= max(DMA_BIT_MASK(32), zone_dma_limit);
>       return true;
>  }
>  
> diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> index 043b0ecd3e8d..bb51bd5335ad 100644
> --- a/kernel/dma/swiotlb.c
> +++ b/kernel/dma/swiotlb.c
> @@ -450,9 +450,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
>       if (!remap)
>               io_tlb_default_mem.can_grow = true;
>       if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
> -             io_tlb_default_mem.phys_limit = DMA_BIT_MASK(zone_dma_bits);
> +             io_tlb_default_mem.phys_limit = zone_dma_limit;
>       else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
> -             io_tlb_default_mem.phys_limit = DMA_BIT_MASK(32);
> +             io_tlb_default_mem.phys_limit = max(DMA_BIT_MASK(32), 
> zone_dma_limit);
>       else
>               io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
>  #endif

These two look correct to me now and it's the least intrusive (the
alternative would have been a zone_dma32_limit). The arch code, however,
needs to ensure that zone_dma_limit can always support 32-bit devices
even if it is above 4GB (with the relevant dma offsets in place for such
devices).

-- 
Catalin

Reply via email to