This patch build call graph during insn scan inside check_subprogs.
Then do recursive and unreachable subprog detection using call graph.

Signed-off-by: Jiong Wang <jiong.w...@netronome.com>
---
 include/linux/bpf_verifier.h |   1 +
 kernel/bpf/cfg.c             | 133 +++++++++++++++++++++++++++++++++++++++++++
 kernel/bpf/cfg.h             |   4 ++
 kernel/bpf/verifier.c        |  32 +++++++++--
 4 files changed, 166 insertions(+), 4 deletions(-)

diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 9c0a808..96337ba 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -176,6 +176,7 @@ static inline bool bpf_verifier_log_needed(const struct 
bpf_verifier_log *log)
 struct bpf_subprog_info {
        u32 start; /* insn idx of function entry point */
        u16 stack_depth; /* max. stack depth used by this function */
+       struct list_head callees; /* callees list. */
        struct list_head bbs; /* basic blocks list. */
        u32 bb_num; /* total basic block num. */
        unsigned long *dtree;
diff --git a/kernel/bpf/cfg.c b/kernel/bpf/cfg.c
index a34a95c..174e3f0 100644
--- a/kernel/bpf/cfg.c
+++ b/kernel/bpf/cfg.c
@@ -13,6 +13,12 @@
 
 #include "cfg.h"
 
+struct cedge_node {
+       struct list_head l;
+       u8 caller_idx;
+       u8 callee_idx;
+};
+
 struct edge_node {
        struct list_head l;
        struct bb_node *src;
@@ -652,6 +658,126 @@ int add_subprog(struct bpf_verifier_env *env, int off)
        return 0;
 }
 
+struct callee_iter {
+       struct list_head *list_head;
+       struct cedge_node *callee;
+};
+
+#define first_callee(c_list)   list_first_entry(c_list, struct cedge_node, l)
+#define next_callee(c)         list_next_entry(c, l)
+
+static bool ci_end_p(struct callee_iter *ci)
+{
+       return &ci->callee->l == ci->list_head;
+}
+
+static void ci_next(struct callee_iter *ci)
+{
+       struct cedge_node *c = ci->callee;
+
+       ci->callee = next_callee(c);
+}
+
+#define EXPLORING      1
+#define EXPLORED       2
+int cgraph_check_recursive_unreachable(struct bpf_verifier_env *env,
+                                      struct bpf_subprog_info *subprog)
+{
+       struct callee_iter *stack;
+       struct cedge_node *callee;
+       int sp = 0, idx = 0, ret;
+       struct callee_iter ci;
+       int *status;
+
+       stack = kmalloc_array(128, sizeof(struct callee_iter), GFP_KERNEL);
+       if (!stack)
+               return -ENOMEM;
+       status = kcalloc(env->subprog_cnt, sizeof(int), GFP_KERNEL);
+       if (!status) {
+               kfree(stack);
+               return -ENOMEM;
+       }
+       ci.callee = first_callee(&subprog->callees);
+       ci.list_head = &subprog->callees;
+       status[0] = EXPLORING;
+
+       while (1) {
+               while (!ci_end_p(&ci)) {
+                       callee = ci.callee;
+                       idx = callee->callee_idx;
+                       if (status[idx] == EXPLORING) {
+                               bpf_verifier_log_write(env, "cgraph - recursive 
call\n");
+                               ret = -EINVAL;
+                               goto err_free;
+                       }
+
+                       status[idx] = EXPLORING;
+
+                       if (sp == 127) {
+                               bpf_verifier_log_write(env, "cgraph - call 
frame too deep\n");
+                               ret = -EINVAL;
+                               goto err_free;
+                       }
+
+                       stack[sp++] = ci;
+                       ci.callee = first_callee(&subprog[idx].callees);
+                       ci.list_head = &subprog[idx].callees;
+               }
+
+               if (!list_empty(ci.list_head))
+                       status[first_callee(ci.list_head)->caller_idx] =
+                               EXPLORED;
+               else
+                       /* leaf func. */
+                       status[idx] = EXPLORED;
+
+               if (!sp)
+                       break;
+
+               ci = stack[--sp];
+               ci_next(&ci);
+       }
+
+       for (idx = 0; idx < env->subprog_cnt; idx++)
+               if (status[idx] != EXPLORED) {
+                       bpf_verifier_log_write(env, "cgraph - unreachable 
subprog\n");
+                       ret = -EINVAL;
+                       goto err_free;
+               }
+
+       ret = 0;
+err_free:
+       kfree(status);
+       kfree(stack);
+       return ret;
+}
+
+int subprog_append_callee(struct bpf_verifier_env *env,
+                         struct list_head *callees_list,
+                         int caller_idx, int off)
+{
+       int callee_idx = find_subprog(env, off);
+       struct cedge_node *new_callee, *callee;
+
+       if (callee_idx < 0)
+               return callee_idx;
+
+       list_for_each_entry(callee, callees_list, l) {
+               if (callee->callee_idx == callee_idx)
+                       return 0;
+       }
+
+       new_callee = kzalloc(sizeof(*new_callee), GFP_KERNEL);
+       if (!new_callee)
+               return -ENOMEM;
+
+       new_callee->caller_idx = caller_idx;
+       new_callee->callee_idx = callee_idx;
+       list_add_tail(&new_callee->l, callees_list);
+
+       return 0;
+}
+
 static void subprog_free_edge(struct bb_node *bb)
 {
        struct list_head *succs = &bb->e_succs;
@@ -669,7 +795,9 @@ void subprog_free(struct bpf_subprog_info *subprog, int 
end_idx)
        int i = 0;
 
        for (; i <= end_idx; i++) {
+               struct list_head *callees = &subprog[i].callees;
                struct list_head *bbs = &subprog[i].bbs;
+               struct cedge_node *callee, *tmp_callee;
                struct bb_node *bb, *tmp, *exit;
 
                bb = entry_bb(bbs);
@@ -680,6 +808,11 @@ void subprog_free(struct bpf_subprog_info *subprog, int 
end_idx)
                        kfree(bb);
                }
 
+               list_for_each_entry_safe(callee, tmp_callee, callees, l) {
+                       list_del(&callee->l);
+                       kfree(callee);
+               }
+
                if (subprog[i].dtree_avail)
                        kfree(subprog[i].dtree);
        }
diff --git a/kernel/bpf/cfg.h b/kernel/bpf/cfg.h
index 57eab9b..577c22c 100644
--- a/kernel/bpf/cfg.h
+++ b/kernel/bpf/cfg.h
@@ -9,9 +9,13 @@
 #define __BPF_CFG_H__
 
 int add_subprog(struct bpf_verifier_env *env, int off);
+int cgraph_check_recursive_unreachable(struct bpf_verifier_env *env,
+                                      struct bpf_subprog_info *subprog);
 int find_subprog(struct bpf_verifier_env *env, int off);
 int subprog_add_bb_edges(struct bpf_insn *insns, struct list_head *bb_list);
 int subprog_append_bb(struct list_head *bb_list, int head);
+int subprog_append_callee(struct bpf_verifier_env *env,
+                         struct list_head *bb_list, int caller_idx, int off);
 int subprog_build_dom_info(struct bpf_verifier_env *env,
                           struct bpf_subprog_info *subprog);
 int subprog_fini_bb(struct list_head *bb_list, int subprog_end);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 72bda84..12f5399 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -739,9 +739,9 @@ static int check_subprogs(struct bpf_verifier_env *env)
 {
        int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
        struct bpf_subprog_info *subprog = env->subprog_info;
+       struct list_head *cur_bb_list, *cur_callee_list;
        struct bpf_insn *insn = env->prog->insnsi;
        int insn_cnt = env->prog->len;
-       struct list_head *cur_bb_list;
 
        /* Add entry function. */
        ret = add_subprog(env, 0);
@@ -772,16 +772,23 @@ static int check_subprogs(struct bpf_verifier_env *env)
         */
        subprog[env->subprog_cnt].start = insn_cnt;
 
-       if (env->log.level > 1)
-               for (i = 0; i < env->subprog_cnt; i++)
+       for (i = 0; i < env->subprog_cnt; i++) {
+               if (env->log.level > 1)
                        verbose(env, "func#%d @%d\n", i, subprog[i].start);
 
+               /* Don't init callees inside add_subprog which will sort the
+                * array which breaks list link.
+                */
+               INIT_LIST_HEAD(&subprog[i].callees);
+       }
+
        subprog_start = subprog[cur_subprog].start;
        subprog_end = subprog[cur_subprog + 1].start;
        cur_bb_list = &subprog[cur_subprog].bbs;
        ret = subprog_init_bb(cur_bb_list, subprog_start);
        if (ret < 0)
                goto free_nodes;
+       cur_callee_list = &env->subprog_info[cur_subprog].callees;
        /* now check that all jumps are within the same subprog */
        for (i = 0; i < insn_cnt; i++) {
                u8 code = insn[i].code;
@@ -798,8 +805,20 @@ static int check_subprogs(struct bpf_verifier_env *env)
                        goto next;
                }
 
-               if (BPF_OP(code) == BPF_CALL)
+               if (BPF_OP(code) == BPF_CALL) {
+                       if (insn[i].src_reg == BPF_PSEUDO_CALL) {
+                               int target = i + insn[i].imm + 1;
+
+                               ret = subprog_append_callee(env,
+                                                           cur_callee_list,
+                                                           cur_subprog,
+                                                           target);
+                               if (ret < 0)
+                                       return ret;
+                       }
+
                        goto next;
+               }
 
                off = i + insn[i].off + 1;
                if (off < subprog_start || off >= subprog_end) {
@@ -855,10 +874,15 @@ static int check_subprogs(struct bpf_verifier_env *env)
                                                      subprog_start);
                                if (ret < 0)
                                        goto free_nodes;
+                               cur_callee_list = &subprog[cur_subprog].callees;
                        }
                }
        }
 
+       ret = cgraph_check_recursive_unreachable(env, env->subprog_info);
+       if (ret < 0)
+               goto free_nodes;
+
        ret = 0;
 
 free_nodes:
-- 
2.7.4

Reply via email to