On Tue, 16 Jul 2024 14:13:41 +0300
Mike Rapoport <r...@kernel.org> wrote:

> From: "Mike Rapoport (Microsoft)" <r...@kernel.org>
> 
> Move code dealing with numa_memblks from arch/x86 to mm/ and add Kconfig
> options to let x86 select it in its Kconfig.
> 
> This code will be later reused by arch_numa.
> 
> No functional changes.
> 
> Signed-off-by: Mike Rapoport (Microsoft) <r...@kernel.org>
Hi Mike,

My only real concern in here is there are a few places where
the lifted code makes changes to memblocks that are x86 only today.
I need to do some more digging to work out if those are safe
in all cases.

Jonathan



> +/**
> + * numa_cleanup_meminfo - Cleanup a numa_meminfo
> + * @mi: numa_meminfo to clean up
> + *
> + * Sanitize @mi by merging and removing unnecessary memblks.  Also check for
> + * conflicts and clear unused memblks.
> + *
> + * RETURNS:
> + * 0 on success, -errno on failure.
> + */
> +int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
> +{
> +     const u64 low = 0;

Given always zero, why not just use that value inline?

> +     const u64 high = PFN_PHYS(max_pfn);
> +     int i, j, k;
> +
> +     /* first, trim all entries */
> +     for (i = 0; i < mi->nr_blks; i++) {
> +             struct numa_memblk *bi = &mi->blk[i];
> +
> +             /* move / save reserved memory ranges */
> +             if (!memblock_overlaps_region(&memblock.memory,
> +                                     bi->start, bi->end - bi->start)) {
> +                     numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi);
> +                     continue;
> +             }
> +
> +             /* make sure all non-reserved blocks are inside the limits */
> +             bi->start = max(bi->start, low);
> +
> +             /* preserve info for non-RAM areas above 'max_pfn': */
> +             if (bi->end > high) {
> +                     numa_add_memblk_to(bi->nid, high, bi->end,
> +                                        &numa_reserved_meminfo);
> +                     bi->end = high;
> +             }
> +
> +             /* and there's no empty block */
> +             if (bi->start >= bi->end)
> +                     numa_remove_memblk_from(i--, mi);
> +     }
> +
> +     /* merge neighboring / overlapping entries */
> +     for (i = 0; i < mi->nr_blks; i++) {
> +             struct numa_memblk *bi = &mi->blk[i];
> +
> +             for (j = i + 1; j < mi->nr_blks; j++) {
> +                     struct numa_memblk *bj = &mi->blk[j];
> +                     u64 start, end;
> +
> +                     /*
> +                      * See whether there are overlapping blocks.  Whine
> +                      * about but allow overlaps of the same nid.  They
> +                      * will be merged below.
> +                      */
> +                     if (bi->end > bj->start && bi->start < bj->end) {
> +                             if (bi->nid != bj->nid) {
> +                                     pr_err("node %d [mem %#010Lx-%#010Lx] 
> overlaps with node %d [mem %#010Lx-%#010Lx]\n",
> +                                            bi->nid, bi->start, bi->end - 1,
> +                                            bj->nid, bj->start, bj->end - 1);
> +                                     return -EINVAL;
> +                             }
> +                             pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] 
> overlaps with itself [mem %#010Lx-%#010Lx]\n",
> +                                     bi->nid, bi->start, bi->end - 1,
> +                                     bj->start, bj->end - 1);
> +                     }
> +
> +                     /*
> +                      * Join together blocks on the same node, holes
> +                      * between which don't overlap with memory on other
> +                      * nodes.
> +                      */
> +                     if (bi->nid != bj->nid)
> +                             continue;
> +                     start = min(bi->start, bj->start);
> +                     end = max(bi->end, bj->end);
> +                     for (k = 0; k < mi->nr_blks; k++) {
> +                             struct numa_memblk *bk = &mi->blk[k];
> +
> +                             if (bi->nid == bk->nid)
> +                                     continue;
> +                             if (start < bk->end && end > bk->start)
> +                                     break;
> +                     }
> +                     if (k < mi->nr_blks)
> +                             continue;
> +                     pr_info("NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem 
> %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
> +                            bi->nid, bi->start, bi->end - 1, bj->start,
> +                            bj->end - 1, start, end - 1);
> +                     bi->start = start;
> +                     bi->end = end;
> +                     numa_remove_memblk_from(j--, mi);
> +             }
> +     }
> +
> +     /* clear unused ones */
> +     for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
> +             mi->blk[i].start = mi->blk[i].end = 0;
> +             mi->blk[i].nid = NUMA_NO_NODE;
> +     }
> +
> +     return 0;
> +}

...


> +/*
> + * Mark all currently memblock-reserved physical memory (which covers the
> + * kernel's own memory ranges) as hot-unswappable.
> + */
> +static void __init numa_clear_kernel_node_hotplug(void)

This will be a change for non x86 architectures.  'should' be fine
but I'm not 100% sure.

> +{
> +     nodemask_t reserved_nodemask = NODE_MASK_NONE;
> +     struct memblock_region *mb_region;
> +     int i;
> +
> +     /*
> +      * We have to do some preprocessing of memblock regions, to
> +      * make them suitable for reservation.
> +      *
> +      * At this time, all memory regions reserved by memblock are
> +      * used by the kernel, but those regions are not split up
> +      * along node boundaries yet, and don't necessarily have their
> +      * node ID set yet either.
> +      *
> +      * So iterate over all memory known to the x86 architecture,

Comment needs an update at least given not x86 specific any more.

> +      * and use those ranges to set the nid in memblock.reserved.
> +      * This will split up the memblock regions along node
> +      * boundaries and will set the node IDs as well.
> +      */
> +     for (i = 0; i < numa_meminfo.nr_blks; i++) {
> +             struct numa_memblk *mb = numa_meminfo.blk + i;
> +             int ret;
> +
> +             ret = memblock_set_node(mb->start, mb->end - mb->start,
> +                                     &memblock.reserved, mb->nid);
> +             WARN_ON_ONCE(ret);
> +     }
> +
> +     /*
> +      * Now go over all reserved memblock regions, to construct a
> +      * node mask of all kernel reserved memory areas.
> +      *
> +      * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
> +      *   numa_meminfo might not include all memblock.reserved
> +      *   memory ranges, because quirks such as trim_snb_memory()
> +      *   reserve specific pages for Sandy Bridge graphics. ]
> +      */
> +     for_each_reserved_mem_region(mb_region) {
> +             int nid = memblock_get_region_node(mb_region);
> +
> +             if (nid != MAX_NUMNODES)
> +                     node_set(nid, reserved_nodemask);
> +     }
> +
> +     /*
> +      * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
> +      * belonging to the reserved node mask.
> +      *
> +      * Note that this will include memory regions that reside
> +      * on nodes that contain kernel memory - entire nodes
> +      * become hot-unpluggable:
> +      */
> +     for (i = 0; i < numa_meminfo.nr_blks; i++) {
> +             struct numa_memblk *mb = numa_meminfo.blk + i;
> +
> +             if (!node_isset(mb->nid, reserved_nodemask))
> +                     continue;
> +
> +             memblock_clear_hotplug(mb->start, mb->end - mb->start);
> +     }
> +}

Reply via email to