On Fri, 16 Jun 2023 at 11:03, Song Gao <gaos...@loongson.cn> wrote:
>
> From: Tianrui Zhao <zhaotian...@loongson.cn>
>
> 1. Implement some functions for LoongArch numa support;
> 2. Implement fdt_add_memory_node() for fdt;
> 3. build_srat() fills node_id and adds build numa memory.
>
> Reviewed-by: Song Gao <gaos...@loongson.cn>
> Signed-off-by: Tianrui Zhao <zhaotian...@loongson.cn>
> Signed-off-by: Song Gao <gaos...@loongson.cn>
> Message-Id: <20230613122613.2471743-1-zhaotian...@loongson.cn>

Hi; Coverity has pointed out a memory leak in this commit
(CID 1544773):

> @@ -799,17 +823,43 @@ static void loongarch_init(MachineState *machine)
>          machine->possible_cpus->cpus[i].cpu = OBJECT(cpu);
>      }
>      fdt_add_cpu_nodes(lams);
> -    /* Add memory region */
> -    memory_region_init_alias(&lams->lowmem, NULL, "loongarch.lowram",
> -                             machine->ram, 0, 256 * MiB);
> -    memory_region_add_subregion(address_space_mem, offset, &lams->lowmem);
> -    offset += 256 * MiB;
> -    memmap_add_entry(0, 256 * MiB, 1);
> -    highram_size = ram_size - 256 * MiB;
> -    memory_region_init_alias(&lams->highmem, NULL, "loongarch.highmem",
> -                             machine->ram, offset, highram_size);
> -    memory_region_add_subregion(address_space_mem, 0x90000000, 
> &lams->highmem);
> -    memmap_add_entry(0x90000000, highram_size, 1);
> +
> +    /* Node0 memory */
> +    memmap_add_entry(VIRT_LOWMEM_BASE, VIRT_LOWMEM_SIZE, 1);
> +    fdt_add_memory_node(machine, VIRT_LOWMEM_BASE, VIRT_LOWMEM_SIZE, 0);
> +    memory_region_init_alias(&lams->lowmem, NULL, "loongarch.node0.lowram",
> +                             machine->ram, offset, VIRT_LOWMEM_SIZE);
> +    memory_region_add_subregion(address_space_mem, phyAddr, &lams->lowmem);
> +
> +    offset += VIRT_LOWMEM_SIZE;
> +    if (nb_numa_nodes > 0) {
> +        assert(numa_info[0].node_mem > VIRT_LOWMEM_SIZE);
> +        highram_size = numa_info[0].node_mem - VIRT_LOWMEM_SIZE;
> +    } else {
> +        highram_size = ram_size - VIRT_LOWMEM_SIZE;
> +    }
> +    phyAddr = VIRT_HIGHMEM_BASE;
> +    memmap_add_entry(phyAddr, highram_size, 1);
> +    fdt_add_memory_node(machine, phyAddr, highram_size, 0);
> +    memory_region_init_alias(&lams->highmem, NULL, "loongarch.node0.highram",
> +                              machine->ram, offset, highram_size);
> +    memory_region_add_subregion(address_space_mem, phyAddr, &lams->highmem);
> +
> +    /* Node1 - Nodemax memory */
> +    offset += highram_size;
> +    phyAddr += highram_size;
> +
> +    for (i = 1; i < nb_numa_nodes; i++) {
> +        MemoryRegion *nodemem = g_new(MemoryRegion, 1);
> +        ramName = g_strdup_printf("loongarch.node%d.ram", i);
> +        memory_region_init_alias(nodemem, NULL, ramName, machine->ram,
> +                                 offset,  numa_info[i].node_mem);
> +        memory_region_add_subregion(address_space_mem, phyAddr, nodemem);
> +        memmap_add_entry(phyAddr, numa_info[i].node_mem, 1);
> +        fdt_add_memory_node(machine, phyAddr, numa_info[i].node_mem, i);
> +        offset += numa_info[i].node_mem;
> +        phyAddr += numa_info[i].node_mem;

In this loop, we allocate memory via g_strdup_printf(),
but never free it. The nicest fix for this is to use the
g_autofree mechanism so that the memory is automatically
freed when execution reaches the end of the block:
   g_autofree ramName = g_strdup_printf("....", ...);

thanks
-- PMM

Reply via email to