Hi Tang,

I don't think it can work well.
The patch gets memory range of hotpluggable memory by
acpi_numa_memory_affinity_init(). But it too late.
For example, if we use log_buf_len boot options, memblock allocator
runs before getting SRAT information. In this case, this movablecore_map
boot option does not work well.

Thanks,
Yasuaki Ishimatsu

2012/12/19 18:11, Tang Chen wrote:
> The Hot Plugable bit in SRAT flags specifys if the memory range
> could be hotplugged.
> 
> If user specified movablecore_map=nn[KMG]@ss[KMG], reset
> movablecore_map.map to the intersection of hotpluggable ranges from
> SRAT and old movablecore_map.map.
> Else if user specified movablecore_map=acpi, just use the hotpluggable
> ranges from SRAT.
> Otherwise, do nothing. The kernel will use all the memory in all nodes
> evenly.
> 
> The idea "getting info from SRAT" was from Liu Jiang <jiang....@huawei.com>.
> And the idea "do more limit for memblock" was from Wu Jianguo 
> <wujian...@huawei.com>
> 
> Signed-off-by: Tang Chen <tangc...@cn.fujitsu.com>
> Tested-by: Gu Zheng <guz.f...@cn.fujitsu.com>
> ---
>   arch/x86/mm/srat.c |   55 
> +++++++++++++++++++++++++++++++++++++++++++++++++--
>   1 files changed, 52 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
> index 4ddf497..a8856d2 100644
> --- a/arch/x86/mm/srat.c
> +++ b/arch/x86/mm/srat.c
> @@ -146,7 +146,12 @@ int __init
>   acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
>   {
>       u64 start, end;
> +     u32 hotpluggable;
>       int node, pxm;
> +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
> +     int overlap;
> +     unsigned long start_pfn, end_pfn;
> +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
>   
>       if (srat_disabled())
>               return -1;
> @@ -157,8 +162,10 @@ acpi_numa_memory_affinity_init(struct 
> acpi_srat_mem_affinity *ma)
>       if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
>               return -1;
>   
> -     if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
> +     hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
> +     if (hotpluggable && !save_add_info())
>               return -1;
> +
>       start = ma->base_address;
>       end = start + ma->length;
>       pxm = ma->proximity_domain;
> @@ -178,9 +185,51 @@ acpi_numa_memory_affinity_init(struct 
> acpi_srat_mem_affinity *ma)
>   
>       node_set(node, numa_nodes_parsed);
>   
> -     printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
> +     printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx] %s\n",
>              node, pxm,
> -            (unsigned long long) start, (unsigned long long) end - 1);
> +            (unsigned long long) start, (unsigned long long) end - 1,
> +            hotpluggable ? "Hot Pluggable": "");
> +
> +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
> +     start_pfn = PFN_DOWN(start);
> +     end_pfn = PFN_UP(end);
> +
> +     if (!hotpluggable) {
> +             /* Clear the range overlapped in movablecore_map.map */
> +             remove_movablecore_map(start_pfn, end_pfn);
> +             goto out;
> +     }
> +
> +     if (!movablecore_map.acpi) {
> +             for (overlap = 0; overlap < movablecore_map.nr_map; overlap++) {
> +                     if (start_pfn < movablecore_map.map[overlap].end_pfn)
> +                             break;
> +             }
> +
> +             /*
> +              * If there is no overlapped range, or the end of the overlapped
> +              * range is higher than end_pfn, then insert nothing.
> +              */
> +             if (end_pfn <= movablecore_map.map[overlap].end_pfn)
> +                     goto out;
> +
> +             /*
> +              * Otherwise, insert the rest of this range to prevent memblock
> +              * from allocating memory in it.
> +              */
> +             start_pfn = movablecore_map.map[overlap].end_pfn;
> +             start = start_pfn >> PAGE_SHIFT;
> +     }
> +
> +     /* If user chose to use SRAT info, insert the range anyway. */
> +     if (insert_movablecore_map(start_pfn, end_pfn))
> +             pr_err("movablecore_map: too many entries;"
> +                     " ignoring [mem %#010llx-%#010llx]\n",
> +                     (unsigned long long) start,
> +                     (unsigned long long) (end - 1));
> +
> +out:
> +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
>       return 0;
>   }
>   
> 


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to