The current kcov KCOV_TRACE_PC mode stores PC in sequence.

Introduce KCOV_TRACE_UNIQ_PC mode to store unique PC info.
In unique PC mode,
- use hashmap to store unique PC in kcov_entry
- use gen_pool_alloc in __sanitizer_cov_trace_pc to avoid
sleeping function kmalloc

Signed-off-by: Jiao, Joey <quic_jiang...@quicinc.com>
---
 include/linux/kcov.h      |   6 +-
 include/uapi/linux/kcov.h |   2 +
 kernel/kcov.c             | 190 ++++++++++++++++++++++++++++++++++++++++------
 3 files changed, 173 insertions(+), 25 deletions(-)

diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index 
75a2fb8b16c32917817b8ec7f5e45421793431ae..aafd9f88450cb8672c701349300b54662bc38079
 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -20,9 +20,11 @@ enum kcov_mode {
         */
        KCOV_MODE_TRACE_PC = 2,
        /* Collecting comparison operands mode. */
-       KCOV_MODE_TRACE_CMP = 3,
+       KCOV_MODE_TRACE_CMP = 4,
        /* The process owns a KCOV remote reference. */
-       KCOV_MODE_REMOTE = 4,
+       KCOV_MODE_REMOTE = 8,
+       /* COllecting uniq pc mode. */
+       KCOV_MODE_TRACE_UNIQ_PC = 16,
 };
 
 #define KCOV_IN_CTXSW  (1 << 30)
diff --git a/include/uapi/linux/kcov.h b/include/uapi/linux/kcov.h
index 
ed95dba9fa37e291e9e9e0109eb8481bb7a5e9da..d2a2bff36f285a5e3a03395f8890fcb716cf3f07
 100644
--- a/include/uapi/linux/kcov.h
+++ b/include/uapi/linux/kcov.h
@@ -35,6 +35,8 @@ enum {
        KCOV_TRACE_PC = 0,
        /* Collecting comparison operands mode. */
        KCOV_TRACE_CMP = 1,
+       /* Collecting uniq PC mode. */
+       KCOV_TRACE_UNIQ_PC = 2,
 };
 
 /*
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 
28a6be6e64fdd721d49c4040ed10ce33f9d890a1..bbd7b7503206fe595976458ab685b95f784607d7
 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -9,9 +9,11 @@
 #include <linux/types.h>
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/genalloc.h>
 #include <linux/hashtable.h>
 #include <linux/init.h>
 #include <linux/jiffies.h>
+#include <linux/jhash.h>
 #include <linux/kmsan-checks.h>
 #include <linux/mm.h>
 #include <linux/preempt.h>
@@ -32,6 +34,29 @@
 /* Number of 64-bit words written per one comparison: */
 #define KCOV_WORDS_PER_CMP 4
 
+struct kcov_entry {
+       unsigned long           ent;
+
+       struct hlist_node       node;
+};
+
+/* Min gen pool alloc order. */
+#define MIN_POOL_ALLOC_ORDER ilog2(roundup_pow_of_two(sizeof(struct 
kcov_entry)))
+
+/*
+ * kcov hashmap to store uniq pc, prealloced mem for kcov_entry
+ * and area shared between kernel and userspace.
+ */
+struct kcov_map {
+       /* 15 bits fit most cases for hash collision, memory and performance. */
+       DECLARE_HASHTABLE(buckets, 15);
+       struct gen_pool         *pool;
+       /* Prealloced memory added to pool to be used as kcov_entry. */
+       void                    *mem;
+       /* Buffer shared with user space. */
+       void                    *area;
+};
+
 /*
  * kcov descriptor (one per opened debugfs file).
  * State transitions of the descriptor:
@@ -60,6 +85,8 @@ struct kcov {
        unsigned int            size;
        /* Coverage buffer shared with user space. */
        void                    *area;
+       /* Coverage hashmap for unique pc. */
+       struct kcov_map         *map;
        /* Task for which we collect coverage, or NULL. */
        struct task_struct      *t;
        /* Collecting coverage from remote (background) threads. */
@@ -171,7 +198,7 @@ static inline bool in_softirq_really(void)
        return in_serving_softirq() && !in_hardirq() && !in_nmi();
 }
 
-static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct 
task_struct *t)
+static notrace unsigned int check_kcov_mode(enum kcov_mode needed_mode, struct 
task_struct *t)
 {
        unsigned int mode;
 
@@ -191,7 +218,94 @@ static notrace bool check_kcov_mode(enum kcov_mode 
needed_mode, struct task_stru
         * kcov_start().
         */
        barrier();
-       return mode == needed_mode;
+       return mode & needed_mode;
+}
+
+static int kcov_map_init(struct kcov *kcov, unsigned long size)
+{
+       struct kcov_map *map;
+       void *area;
+       unsigned long flags;
+
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
+       if (!map)
+               return -ENOMEM;
+
+       area = vmalloc_user(size * sizeof(unsigned long));
+       if (!area) {
+               kfree(map);
+               return -ENOMEM;
+       }
+
+       spin_lock_irqsave(&kcov->lock, flags);
+       map->area = area;
+
+       kcov->map = map;
+       kcov->area = area;
+       spin_unlock_irqrestore(&kcov->lock, flags);
+
+       hash_init(map->buckets);
+
+       map->pool = gen_pool_create(MIN_POOL_ALLOC_ORDER, -1);
+       if (!map->pool)
+               return -ENOMEM;
+
+       map->mem = vmalloc(size * (1 << MIN_POOL_ALLOC_ORDER));
+       if (!map->mem) {
+               vfree(area);
+               gen_pool_destroy(map->pool);
+               kfree(map);
+               return -ENOMEM;
+       }
+
+       if (gen_pool_add(map->pool, (unsigned long)map->mem, size *
+           (1 << MIN_POOL_ALLOC_ORDER), -1)) {
+               vfree(area);
+               vfree(map->mem);
+               gen_pool_destroy(map->pool);
+               kfree(map);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static inline u32 hash_key(const struct kcov_entry *k)
+{
+       return jhash((u32 *)k, offsetof(struct kcov_entry, node), 0);
+}
+
+static notrace inline void kcov_map_add(struct kcov_map *map, struct 
kcov_entry *ent,
+                                       struct task_struct *t)
+{
+       struct kcov *kcov;
+       struct kcov_entry *entry;
+       unsigned int key = hash_key(ent);
+       unsigned long pos, *area;
+
+       kcov = t->kcov;
+
+       hash_for_each_possible_rcu(map->buckets, entry, node, key) {
+               if (entry->ent == ent->ent)
+                       return;
+       }
+
+       entry = (struct kcov_entry *)gen_pool_alloc(map->pool, 1 << 
MIN_POOL_ALLOC_ORDER);
+       if (unlikely(!entry))
+               return;
+
+       barrier();
+       memcpy(entry, ent, sizeof(*entry));
+       hash_add_rcu(map->buckets, &entry->node, key);
+
+       area = t->kcov_area;
+
+       pos = READ_ONCE(area[0]) + 1;
+       if (likely(pos < t->kcov_size)) {
+               WRITE_ONCE(area[0], pos);
+               barrier();
+               area[pos] = ent->ent;
+       }
 }
 
 static notrace unsigned long canonicalize_ip(unsigned long ip)
@@ -212,25 +326,34 @@ void notrace __sanitizer_cov_trace_pc(void)
        unsigned long *area;
        unsigned long ip = canonicalize_ip(_RET_IP_);
        unsigned long pos;
+       struct kcov_entry entry = {0};
+       unsigned int mode;
 
        t = current;
-       if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
+       if (!check_kcov_mode(KCOV_MODE_TRACE_PC | KCOV_MODE_TRACE_UNIQ_PC, t))
                return;
 
        area = t->kcov_area;
-       /* The first 64-bit word is the number of subsequent PCs. */
-       pos = READ_ONCE(area[0]) + 1;
-       if (likely(pos < t->kcov_size)) {
-               /* Previously we write pc before updating pos. However, some
-                * early interrupt code could bypass check_kcov_mode() check
-                * and invoke __sanitizer_cov_trace_pc(). If such interrupt is
-                * raised between writing pc and updating pos, the pc could be
-                * overitten by the recursive __sanitizer_cov_trace_pc().
-                * Update pos before writing pc to avoid such interleaving.
-                */
-               WRITE_ONCE(area[0], pos);
-               barrier();
-               area[pos] = ip;
+       mode = t->kcov_mode;
+       if (mode == KCOV_MODE_TRACE_PC) {
+               area = t->kcov_area;
+               /* The first 64-bit word is the number of subsequent PCs. */
+               pos = READ_ONCE(area[0]) + 1;
+               if (likely(pos < t->kcov_size)) {
+                       /* Previously we write pc before updating pos. However, 
some
+                        * early interrupt code could bypass check_kcov_mode() 
check
+                        * and invoke __sanitizer_cov_trace_pc(). If such 
interrupt is
+                        * raised between writing pc and updating pos, the pc 
could be
+                        * overitten by the recursive 
__sanitizer_cov_trace_pc().
+                        * Update pos before writing pc to avoid such 
interleaving.
+                        */
+                       WRITE_ONCE(area[0], pos);
+                       barrier();
+                       area[pos] = ip;
+               }
+       } else {
+               entry.ent = ip;
+               kcov_map_add(t->kcov->map, &entry, t);
        }
 }
 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
@@ -432,11 +555,33 @@ static void kcov_get(struct kcov *kcov)
        refcount_inc(&kcov->refcount);
 }
 
+static void kcov_map_free(struct kcov *kcov)
+{
+       int bkt;
+       struct hlist_node *tmp;
+       struct kcov_entry *entry;
+       struct kcov_map *map;
+
+       map = kcov->map;
+       if (!map)
+               return;
+       rcu_read_lock();
+       hash_for_each_safe(map->buckets, bkt, tmp, entry, node) {
+               hash_del_rcu(&entry->node);
+               gen_pool_free(map->pool, (unsigned long)entry, 1 << 
MIN_POOL_ALLOC_ORDER);
+       }
+       rcu_read_unlock();
+       vfree(map->area);
+       vfree(map->mem);
+       gen_pool_destroy(map->pool);
+       kfree(map);
+}
+
 static void kcov_put(struct kcov *kcov)
 {
        if (refcount_dec_and_test(&kcov->refcount)) {
                kcov_remote_reset(kcov);
-               vfree(kcov->area);
+               kcov_map_free(kcov);
                kfree(kcov);
        }
 }
@@ -546,6 +691,8 @@ static int kcov_get_mode(unsigned long arg)
 #else
                return -ENOTSUPP;
 #endif
+       else if (arg == KCOV_TRACE_UNIQ_PC)
+               return KCOV_MODE_TRACE_UNIQ_PC;
        else
                return -EINVAL;
 }
@@ -698,7 +845,6 @@ static long kcov_ioctl(struct file *filep, unsigned int 
cmd, unsigned long arg)
        unsigned int remote_num_handles;
        unsigned long remote_arg_size;
        unsigned long size, flags;
-       void *area;
 
        kcov = filep->private_data;
        switch (cmd) {
@@ -713,16 +859,14 @@ static long kcov_ioctl(struct file *filep, unsigned int 
cmd, unsigned long arg)
                size = arg;
                if (size < 2 || size > INT_MAX / sizeof(unsigned long))
                        return -EINVAL;
-               area = vmalloc_user(size * sizeof(unsigned long));
-               if (area == NULL)
-                       return -ENOMEM;
+               res = kcov_map_init(kcov, size);
+               if (res)
+                       return res;
                spin_lock_irqsave(&kcov->lock, flags);
                if (kcov->mode != KCOV_MODE_DISABLED) {
                        spin_unlock_irqrestore(&kcov->lock, flags);
-                       vfree(area);
                        return -EBUSY;
                }
-               kcov->area = area;
                kcov->size = size;
                kcov->mode = KCOV_MODE_INIT;
                spin_unlock_irqrestore(&kcov->lock, flags);

-- 
2.47.1


Reply via email to