From: Ye Liu <li...@kylinos.cn>

Replace repeated (20 - PAGE_SHIFT) calculations with standard macros:
- MB_TO_PAGES(mb)    converts MB to page count
- PAGES_TO_MB(pages) converts pages to MB

No functional change.

Signed-off-by: Ye Liu <li...@kylinos.cn>
---
 include/linux/mm.h    | 9 +++++++++
 kernel/rcu/rcuscale.c | 2 +-
 kernel/sched/fair.c   | 5 ++---
 mm/backing-dev.c      | 2 +-
 mm/huge_memory.c      | 2 +-
 mm/swap.c             | 2 +-
 6 files changed, 15 insertions(+), 7 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 957acde6ae62..0c1b2c074142 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -69,6 +69,15 @@ static inline void totalram_pages_add(long count)
 
 extern void * high_memory;
 
+/*
+ * Convert between pages and MB
+ * 20 is the shift for 1MB (2^20 = 1MB)
+ * PAGE_SHIFT is the shift for page size (e.g., 12 for 4KB pages)
+ * So (20 - PAGE_SHIFT) converts between pages and MB
+ */
+#define PAGES_TO_MB(pages) ((pages) >> (20 - PAGE_SHIFT))
+#define MB_TO_PAGES(mb)    ((mb) << (20 - PAGE_SHIFT))
+
 #ifdef CONFIG_SYSCTL
 extern int sysctl_legacy_va_layout;
 #else
diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
index b521d0455992..7484d8ad5767 100644
--- a/kernel/rcu/rcuscale.c
+++ b/kernel/rcu/rcuscale.c
@@ -796,7 +796,7 @@ kfree_scale_thread(void *arg)
                pr_alert("Total time taken by all kfree'ers: %llu ns, loops: 
%d, batches: %ld, memory footprint: %lldMB\n",
                       (unsigned long long)(end_time - start_time), kfree_loops,
                       rcuscale_seq_diff(b_rcu_gp_test_finished, 
b_rcu_gp_test_started),
-                      (mem_begin - mem_during) >> (20 - PAGE_SHIFT));
+                      PAGES_TO_MB(mem_begin - mem_during));
 
                if (shutdown) {
                        smp_mb(); /* Assign before wake. */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b9b4bbbf0af6..ae1d9a7ef202 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1489,7 +1489,7 @@ static unsigned int task_nr_scan_windows(struct 
task_struct *p)
         * by the PTE scanner and NUMA hinting faults should be trapped based
         * on resident pages
         */
-       nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
+       nr_scan_pages = MB_TO_PAGES(sysctl_numa_balancing_scan_size);
        rss = get_mm_rss(p->mm);
        if (!rss)
                rss = nr_scan_pages;
@@ -1926,8 +1926,7 @@ bool should_numa_migrate_memory(struct task_struct *p, 
struct folio *folio,
                }
 
                def_th = sysctl_numa_balancing_hot_threshold;
-               rate_limit = sysctl_numa_balancing_promote_rate_limit << \
-                       (20 - PAGE_SHIFT);
+               rate_limit = 
MB_TO_PAGES(sysctl_numa_balancing_promote_rate_limit);
                numa_promotion_adjust_threshold(pgdat, rate_limit, def_th);
 
                th = pgdat->nbp_threshold ? : def_th;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 783904d8c5ef..e4d578e6121c 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -510,7 +510,7 @@ static void wb_update_bandwidth_workfn(struct work_struct 
*work)
 /*
  * Initial write bandwidth: 100 MB/s
  */
-#define INIT_BW                (100 << (20 - PAGE_SHIFT))
+#define INIT_BW                MB_TO_PAGES(100)
 
 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
                   gfp_t gfp)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 389620c65a5f..dcc33d9c300f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -911,7 +911,7 @@ static int __init hugepage_init(void)
         * where the extra memory used could hurt more than TLB overhead
         * is likely to save.  The admin can still enable it through /sys.
         */
-       if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
+       if (totalram_pages() < MB_TO_PAGES(512)) {
                transparent_hugepage_flags = 0;
                return 0;
        }
diff --git a/mm/swap.c b/mm/swap.c
index 3632dd061beb..cb164f9ef9e3 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -1096,7 +1096,7 @@ static const struct ctl_table swap_sysctl_table[] = {
  */
 void __init swap_setup(void)
 {
-       unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
+       unsigned long megs = PAGES_TO_MB(totalram_pages());
 
        /* Use a smaller cluster for small-memory machines */
        if (megs < 16)
-- 
2.43.0


Reply via email to