On Tue, Jan 15, 2019 at 09:08:22PM -0800, Alexei Starovoitov wrote:
[ ... ]

> +/* copy everything but bpf_spin_lock */
> +static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
> +{
> +     if (unlikely(map_value_has_spin_lock(map))) {
> +             u32 off = map->spin_lock_off;
> +
> +             memcpy(dst, src, off);
> +             memcpy(dst + off + sizeof(struct bpf_spin_lock),
> +                    src + off + sizeof(struct bpf_spin_lock),
> +                    map->value_size - off - sizeof(struct bpf_spin_lock));
> +     } else {
> +             memcpy(dst, src, map->value_size);
> +     }
> +}
> +
[ ... ]

> +int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
> +{
> +     const struct btf_member *member;
> +     u32 i, off = -ENOENT;
> +
> +     if (BTF_INFO_KIND(t->info) != BTF_KIND_STRUCT)
> +             return -EINVAL;
> +
> +     for_each_member(i, t, member) {
> +             const struct btf_type *member_type = btf_type_by_id(btf,
> +                                                                 
> member->type);
> +             if (!btf_type_is_struct(member_type))
may be using "BTF_INFO_KIND(t->info) != BTF_KIND_STRUCT" here also.

> +                     continue;
> +             if (member_type->size != sizeof(struct bpf_spin_lock))
> +                     continue;
> +             if (strcmp(__btf_name_by_offset(btf, member_type->name_off),
> +                        "bpf_spin_lock"))
> +                     continue;
> +             if (off != -ENOENT)
> +                     /* only one 'struct bpf_spin_lock' is allowed */
> +                     return -E2BIG;
> +             off = btf_member_bit_offset(t, member);
> +             if (off % 8)
> +                     /* valid C code cannot generate such BTF */
> +                     return -EINVAL;
> +             off /= 8;
> +             if (off % __alignof__(struct bpf_spin_lock))
> +                     /* valid struct bpf_spin_lock will be 4 byte aligned */
> +                     return -EINVAL;
> +     }
> +     return off;
> +}
> +
[ ... ]


> diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
> index b155cd17c1bd..ebf0a673cb83 100644
> --- a/kernel/bpf/syscall.c
> +++ b/kernel/bpf/syscall.c
[ ... ]

>       err = security_bpf_map_alloc(map);
> @@ -740,7 +757,7 @@ static int map_lookup_elem(union bpf_attr *attr)
>                       err = -ENOENT;
>               } else {
>                       err = 0;
> -                     memcpy(value, ptr, value_size);
> +                     copy_map_value(map, value, ptr);
copy_map_value() skips the bpf_spin_lock and "value" has not been zero-ed.
"value" is then copied to the "__user *uvalue".
May be init the bpf_spin_lock part of the "uvalue" to 0?

btw, somehow patch 6 and 7 are missing:
https://patchwork.ozlabs.org/cover/1025640/

Reply via email to