Does this patch (which is only compile tested) reduce number of pending works
when hitting "BUG: MAX_LOCKDEP_KEYS too low!" ?
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 07cb5d15e743..c6c6902090f0 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -41,6 +41,7 @@ struct bpf_local_storage_map;
struct kobject;
struct mem_cgroup;
+extern struct workqueue_struct *bpf_free_wq;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
extern struct kobject *btf_kobj;
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 1f8453343bf2..8b1cf6aab089 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -994,7 +994,7 @@ static void prog_array_map_clear(struct bpf_map *map)
struct bpf_array_aux *aux = container_of(map, struct bpf_array,
map)->aux;
bpf_map_inc(map);
- schedule_work(&aux->work);
+ queue_work(bpf_free_wq, &aux->work);
}
static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 96555a8a2c54..f272844163df 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -160,7 +160,7 @@ static void cgroup_bpf_release_fn(struct percpu_ref *ref)
struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
- queue_work(system_wq, &cgrp->bpf.release_work);
+ queue_work(bpf_free_wq, &cgrp->bpf.release_work);
}
/* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 261f8692d0d2..9d76c0d77687 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -34,6 +34,15 @@
#include <linux/log2.h>
#include <asm/unaligned.h>
+struct workqueue_struct *bpf_free_wq;
+
+static int __init bpf_free_wq_init(void)
+{
+ bpf_free_wq = alloc_workqueue("bpf_free_wq", WQ_MEM_RECLAIM |
WQ_HIGHPRI | WQ_UNBOUND, 0);
+ return 0;
+}
+subsys_initcall(bpf_free_wq_init);
+
/* Registers */
#define BPF_R0 regs[BPF_REG_0]
#define BPF_R1 regs[BPF_REG_1]
@@ -2152,7 +2161,7 @@ void bpf_prog_free(struct bpf_prog *fp)
if (aux->dst_prog)
bpf_prog_put(aux->dst_prog);
INIT_WORK(&aux->work, bpf_prog_free_deferred);
- schedule_work(&aux->work);
+ queue_work(bpf_free_wq, &aux->work);
}
EXPORT_SYMBOL_GPL(bpf_prog_free);
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 747313698178..6507cc8263fc 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -515,7 +515,7 @@ static void __cpu_map_entry_replace(struct bpf_cpu_map
*cmap,
if (old_rcpu) {
call_rcu(&old_rcpu->rcu, __cpu_map_entry_free);
INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop);
- schedule_work(&old_rcpu->kthread_stop_wq);
+ queue_work(bpf_free_wq, &old_rcpu->kthread_stop_wq);
}
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index e5999d86c76e..084b903b4ee6 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -477,7 +477,7 @@ static void __bpf_map_put(struct bpf_map *map, bool
do_idr_lock)
bpf_map_free_id(map, do_idr_lock);
btf_put(map->btf);
INIT_WORK(&map->work, bpf_map_free_deferred);
- schedule_work(&map->work);
+ queue_work(bpf_free_wq, &map->work);
}
}
@@ -2343,7 +2343,7 @@ void bpf_link_put(struct bpf_link *link)
if (in_atomic()) {
INIT_WORK(&link->work, bpf_link_put_deferred);
- schedule_work(&link->work);
+ queue_work(bpf_free_wq, &link->work);
} else {
bpf_link_free(link);
}