On Sat, Jul 6, 2019 at 1:47 AM Toke Høiland-Jørgensen <t...@redhat.com> wrote: > > From: Toke Høiland-Jørgensen <t...@redhat.com> > > The subsequent patch to add a new devmap sub-type can re-use much of the > initialisation and allocation code, so refactor it into separate functions. > > Signed-off-by: Toke Høiland-Jørgensen <t...@redhat.com>
Acked-by: Yonghong Song <y...@fb.com> > --- > kernel/bpf/devmap.c | 137 > +++++++++++++++++++++++++++++++-------------------- > 1 file changed, 84 insertions(+), 53 deletions(-) > > diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c > index d83cf8ccc872..a2fe16362129 100644 > --- a/kernel/bpf/devmap.c > +++ b/kernel/bpf/devmap.c > @@ -60,7 +60,7 @@ struct xdp_bulk_queue { > struct bpf_dtab_netdev { > struct net_device *dev; /* must be first member, due to tracepoint */ > struct bpf_dtab *dtab; > - unsigned int bit; > + unsigned int idx; /* keep track of map index for tracepoint */ > struct xdp_bulk_queue __percpu *bulkq; > struct rcu_head rcu; > }; > @@ -75,28 +75,22 @@ struct bpf_dtab { > static DEFINE_SPINLOCK(dev_map_lock); > static LIST_HEAD(dev_map_list); > > -static struct bpf_map *dev_map_alloc(union bpf_attr *attr) > +static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr, > + bool check_memlock) > { > - struct bpf_dtab *dtab; > int err, cpu; > u64 cost; > > - if (!capable(CAP_NET_ADMIN)) > - return ERR_PTR(-EPERM); > - > /* check sanity of attributes */ > if (attr->max_entries == 0 || attr->key_size != 4 || > attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK) > - return ERR_PTR(-EINVAL); > + return -EINVAL; > > /* Lookup returns a pointer straight to dev->ifindex, so make sure the > * verifier prevents writes from the BPF side > */ > attr->map_flags |= BPF_F_RDONLY_PROG; > > - dtab = kzalloc(sizeof(*dtab), GFP_USER); > - if (!dtab) > - return ERR_PTR(-ENOMEM); > > bpf_map_init_from_attr(&dtab->map, attr); > > @@ -107,9 +101,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) > /* if map size is larger than memlock limit, reject it */ > err = bpf_map_charge_init(&dtab->map.memory, cost); > if (err) > - goto free_dtab; > - > - err = -ENOMEM; > + return -EINVAL; > > dtab->flush_list = alloc_percpu(struct list_head); > if (!dtab->flush_list) > @@ -124,19 +116,38 @@ static struct bpf_map *dev_map_alloc(union bpf_attr > *attr) > if (!dtab->netdev_map) > goto free_percpu; > > - spin_lock(&dev_map_lock); > - list_add_tail_rcu(&dtab->list, &dev_map_list); > - spin_unlock(&dev_map_lock); > - > - return &dtab->map; > + return 0; > > free_percpu: > free_percpu(dtab->flush_list); > free_charge: > bpf_map_charge_finish(&dtab->map.memory); > -free_dtab: > - kfree(dtab); > - return ERR_PTR(err); > + return -ENOMEM; > +} > + [...]