The very first check in test_pkt_md_access is failing on s390, which
happens because loading a part of a struct __sk_buff field produces
an incorrect result.

The preprocessed code of the check is:

{
        __u8 tmp = *((volatile __u8 *)&skb->len +
                ((sizeof(skb->len) - sizeof(__u8)) / sizeof(__u8)));
        if (tmp != ((*(volatile __u32 *)&skb->len) & 0xFF)) return 2;
};

clang generates the following code for it:

      0:        71 21 00 03 00 00 00 00 r2 = *(u8 *)(r1 + 3)
      1:        61 31 00 00 00 00 00 00 r3 = *(u32 *)(r1 + 0)
      2:        57 30 00 00 00 00 00 ff r3 &= 255
      3:        5d 23 00 1d 00 00 00 00 if r2 != r3 goto +29 <LBB0_10>

Finally, verifier transforms it to:

  0: (61) r2 = *(u32 *)(r1 +104)
  1: (bc) w2 = w2
  2: (74) w2 >>= 24
  3: (bc) w2 = w2
  4: (54) w2 &= 255
  5: (bc) w2 = w2

The problem is that when verifier emits the code to replace a partial
load of a struct __sk_buff field (*(u8 *)(r1 + 3)) with a full load of
struct sk_buff field (*(u32 *)(r1 + 104)), an optional shift and a
bitwise AND, it assumes that the machine is little endian and
incorrectly decides to use a shift.

Adjust shift count calculation to account for endianness.

Fixes: 31fd85816dbe ("bpf: permits narrower load from bpf program context 
fields")
Signed-off-by: Ilya Leoshkevich <i...@linux.ibm.com>
---
 include/linux/filter.h | 13 +++++++++++++
 kernel/bpf/verifier.c  |  4 ++--
 2 files changed, 15 insertions(+), 2 deletions(-)

diff --git a/include/linux/filter.h b/include/linux/filter.h
index ff65d22cf336..4fe88e43f0fe 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -24,6 +24,8 @@
 
 #include <net/sch_generic.h>
 
+#include <asm/byteorder.h>
+
 #include <uapi/linux/filter.h>
 #include <uapi/linux/bpf.h>
 
@@ -1216,4 +1218,15 @@ struct bpf_sockopt_kern {
        s32             retval;
 };
 
+static inline u8 bpf_narrower_load_shift(u32 size_default, u32 size, u32 off)
+{
+       u8 load_off = off & (size_default - 1);
+
+#ifdef __LITTLE_ENDIAN
+       return load_off * 8;
+#else
+       return (size_default - (load_off + size)) * 8;
+#endif
+}
+
 #endif /* __LINUX_FILTER_H__ */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 5900cbb966b1..48edc9c9a879 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -8616,8 +8616,8 @@ static int convert_ctx_accesses(struct bpf_verifier_env 
*env)
                }
 
                if (is_narrower_load && size < target_size) {
-                       u8 shift = (off & (size_default - 1)) * 8;
-
+                       u8 shift = bpf_narrower_load_shift(size_default, size,
+                                                          off);
                        if (ctx_field_size <= 4) {
                                if (shift)
                                        insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
-- 
2.21.0

Reply via email to