On 25/08/2023 10:02, Vikram Garhwal wrote:
> Add rangesets for IRQs and IOMEMs. This was done to accommodate dynamic
> overlay
> node addition/removal operations. With overlay operations, new IRQs and IOMEMs
> are added in dt_host and routed. While removing overlay nodes, nodes are
> remove
s/remove/removed/
> from dt_host and their IRQs and IOMEMs routing is also removed. Storing IRQs
> and
> IOMEMs in the rangeset will avoid re-parsing the device tree nodes to get the
> IOMEM and IRQ ranges for overlay remove ops.
>
> Dynamic overlay node add/remove will be introduced in follow-up patches.
>
> Signed-off-by: Vikram Garhwal <vikram.garh...@amd.com>
> ---
> xen/arch/arm/device.c | 43 +++++++++++++++++++++++++-------
> xen/arch/arm/domain_build.c | 4 +--
> xen/arch/arm/include/asm/setup.h | 5 +++-
> 3 files changed, 40 insertions(+), 12 deletions(-)
>
> diff --git a/xen/arch/arm/device.c b/xen/arch/arm/device.c
> index 857f171a27..9df37abac8 100644
> --- a/xen/arch/arm/device.c
> +++ b/xen/arch/arm/device.c
> @@ -165,6 +165,14 @@ int map_range_to_domain(const struct dt_device_node *dev,
> dt_dprintk(" - MMIO: %010"PRIx64" - %010"PRIx64" P2MType=%x\n",
> addr, addr + len, mr_data->p2mt);
>
> + if ( mr_data->iomem_ranges )
> + {
> + res = rangeset_add_range(mr_data->iomem_ranges, paddr_to_pfn(addr),
> + paddr_to_pfn(PAGE_ALIGN(addr + len - 1)));
paddr_to_pfn_aligned()?
> + if ( res )
> + return res;
> + }
> +
> return 0;
> }
>
> @@ -178,9 +186,10 @@ int map_range_to_domain(const struct dt_device_node *dev,
> */
> int handle_device_interrupts(struct domain *d,
This needs to be renamed as stated in one of the first patches.
> struct dt_device_node *dev,
> - bool need_mapping)
> + bool need_mapping,
> + struct rangeset *irq_ranges)
> {
> - unsigned int i, nirq;
> + unsigned int i, nirq, irq;
> int res;
> struct dt_raw_irq rirq;
>
> @@ -208,17 +217,24 @@ int handle_device_interrupts(struct domain *d,
> continue;
> }
>
> - res = platform_get_irq(dev, i);
> - if ( res < 0 )
> + irq = platform_get_irq(dev, i);
platform_get_irq() can return negative and you even have a check for < 0 below.
So you cannot assign it to unsigned.
> + if ( irq < 0 )
> {
> printk(XENLOG_ERR "Unable to get irq %u for %s\n",
> i, dt_node_full_name(dev));
> - return res;
> + return irq;
> }
>
> - res = map_irq_to_domain(d, res, need_mapping, dt_node_name(dev));
> + res = map_irq_to_domain(d, irq, need_mapping, dt_node_name(dev));
> if ( res )
> return res;
> +
> + if ( irq_ranges )
> + {
> + res = rangeset_add_singleton(irq_ranges, irq);
> + if ( res )
> + return res;
> + }
> }
>
> return 0;
> @@ -249,6 +265,11 @@ static int map_dt_irq_to_domain(const struct
> dt_device_node *dev,
> }
>
> res = map_irq_to_domain(d, irq, !mr_data->skip_mapping,
> dt_node_name(dev));
> + if ( res )
> + return res;
> +
> + if ( mr_data->irq_ranges )
> + res = rangeset_add_singleton(mr_data->irq_ranges, irq);
>
> return res;
> }
> @@ -289,7 +310,8 @@ static int map_device_children(const struct
> dt_device_node *dev,
> * - Assign the device to the guest if it's protected by an IOMMU
> * - Map the IRQs and iomem regions to DOM0
> */
> -int handle_device(struct domain *d, struct dt_device_node *dev, p2m_type_t
> p2mt)
> +int handle_device(struct domain *d, struct dt_device_node *dev, p2m_type_t
> p2mt,
> + struct rangeset *iomem_ranges, struct rangeset *irq_ranges)
You modified a function but the corresponding prototype change is missing.
> {
> unsigned int naddr;
> unsigned int i;
> @@ -304,10 +326,13 @@ int handle_device(struct domain *d, struct
> dt_device_node *dev, p2m_type_t p2mt)
> * pci_host_bridge_mappings().
> */
> struct map_range_data mr_data = {
> + .d = d,
There's something wrong. I can see that you removed this line in patch no. 3.
Why?
I don't see any reason so please fix it (otherwise code state from patch 3 to
15 is broken since nothing else sets mr_data.d).
When moving the code (which you should just copy/paste), do not perform any
modification (unless mentioned otherwise).
Also, each patch in a series (except rare occasions) should not cause any
build/runtime failure.
~Michal