Home Home > GIT Browse > SLE12-SP4-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGary Lin <glin@suse.com>2019-02-20 12:20:01 +0800
committerGary Lin <glin@suse.com>2019-02-20 12:20:03 +0800
commit800750b0f2b2ca3676200cb09ef4546e0e0001d6 (patch)
tree23ebbf834ea39a43a8e6c573860b4432e47749c7
parentaf79ae70d277ba984abef395aedcd0cc1486475d (diff)
bpf: move tmp variable into ax register in interpreter
(bsc#1124055 CVE-2019-7308).
-rw-r--r--patches.fixes/bpf-move-tmp-variable-into-ax-register-in-interprete.patch144
-rw-r--r--series.conf1
2 files changed, 145 insertions, 0 deletions
diff --git a/patches.fixes/bpf-move-tmp-variable-into-ax-register-in-interprete.patch b/patches.fixes/bpf-move-tmp-variable-into-ax-register-in-interprete.patch
new file mode 100644
index 0000000000..51961e7db9
--- /dev/null
+++ b/patches.fixes/bpf-move-tmp-variable-into-ax-register-in-interprete.patch
@@ -0,0 +1,144 @@
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Thu, 3 Jan 2019 00:58:28 +0100
+Subject: bpf: move tmp variable into ax register in interpreter
+Patch-mainline: v5.0-rc1
+Git-commit: 144cd91c4c2bced6eb8a7e25e590f6618a11e854
+References: bsc#1124055 CVE-2019-7308
+
+This change moves the on-stack 64 bit tmp variable in ___bpf_prog_run()
+into the hidden ax register. The latter is currently only used in JITs
+for constant blinding as a temporary scratch register, meaning the BPF
+interpreter will never see the use of ax. Therefore it is safe to use
+it for the cases where tmp has been used earlier. This is needed to later
+on allow restricted hidden use of ax in both interpreter and JITs.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Gary Lin <glin@suse.com>
+---
+ include/linux/filter.h | 3 ++-
+ kernel/bpf/core.c | 38 +++++++++++++++++++-------------------
+ 2 files changed, 21 insertions(+), 20 deletions(-)
+
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -55,7 +55,8 @@ struct bpf_prog_aux;
+ * constants. See JIT pre-step in bpf_jit_blind_constants().
+ */
+ #define BPF_REG_AX MAX_BPF_REG
+-#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
++#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1)
++#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG
+
+ /* unused opcode to mark special call to bpf_tail_call() helper */
+ #define BPF_TAIL_CALL 0xf0
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -51,6 +51,7 @@
+ #define DST regs[insn->dst_reg]
+ #define SRC regs[insn->src_reg]
+ #define FP regs[BPF_REG_FP]
++#define AX regs[BPF_REG_AX]
+ #define ARG1 regs[BPF_REG_ARG1]
+ #define CTX regs[BPF_REG_CTX]
+ #define IMM insn->imm
+@@ -771,7 +772,6 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
+ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
+ u64 *stack)
+ {
+- u64 tmp;
+ static const void *jumptable[256] = {
+ [0 ... 255] = &&default_label,
+ /* Now overwrite non-defaults ... */
+@@ -945,22 +945,22 @@ select_insn:
+ ALU64_MOD_X:
+ if (unlikely(SRC == 0))
+ return 0;
+- div64_u64_rem(DST, SRC, &tmp);
+- DST = tmp;
++ div64_u64_rem(DST, SRC, &AX);
++ DST = AX;
+ CONT;
+ ALU_MOD_X:
+ if (unlikely((u32)SRC == 0))
+ return 0;
+- tmp = (u32) DST;
+- DST = do_div(tmp, (u32) SRC);
++ AX = (u32) DST;
++ DST = do_div(AX, (u32) SRC);
+ CONT;
+ ALU64_MOD_K:
+- div64_u64_rem(DST, IMM, &tmp);
+- DST = tmp;
++ div64_u64_rem(DST, IMM, &AX);
++ DST = AX;
+ CONT;
+ ALU_MOD_K:
+- tmp = (u32) DST;
+- DST = do_div(tmp, (u32) IMM);
++ AX = (u32) DST;
++ DST = do_div(AX, (u32) IMM);
+ CONT;
+ ALU64_DIV_X:
+ if (unlikely(SRC == 0))
+@@ -970,17 +970,17 @@ select_insn:
+ ALU_DIV_X:
+ if (unlikely((u32)SRC == 0))
+ return 0;
+- tmp = (u32) DST;
+- do_div(tmp, (u32) SRC);
+- DST = (u32) tmp;
++ AX = (u32) DST;
++ do_div(AX, (u32) SRC);
++ DST = (u32) AX;
+ CONT;
+ ALU64_DIV_K:
+ DST = div64_u64(DST, IMM);
+ CONT;
+ ALU_DIV_K:
+- tmp = (u32) DST;
+- do_div(tmp, (u32) IMM);
+- DST = (u32) tmp;
++ AX = (u32) DST;
++ do_div(AX, (u32) IMM);
++ DST = (u32) AX;
+ CONT;
+ ALU_END_TO_BE:
+ switch (IMM) {
+@@ -1235,7 +1235,7 @@ load_word:
+ * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
+ */
+
+- ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
++ ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &AX);
+ if (likely(ptr != NULL)) {
+ BPF_R0 = get_unaligned_be32(ptr);
+ CONT;
+@@ -1245,7 +1245,7 @@ load_word:
+ LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
+ off = IMM;
+ load_half:
+- ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
++ ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &AX);
+ if (likely(ptr != NULL)) {
+ BPF_R0 = get_unaligned_be16(ptr);
+ CONT;
+@@ -1255,7 +1255,7 @@ load_half:
+ LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
+ off = IMM;
+ load_byte:
+- ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
++ ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &AX);
+ if (likely(ptr != NULL)) {
+ BPF_R0 = *(u8 *)ptr;
+ CONT;
+@@ -1284,7 +1284,7 @@ STACK_FRAME_NON_STANDARD(___bpf_prog_run
+ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
+ { \
+ u64 stack[stack_size / sizeof(u64)]; \
+- u64 regs[MAX_BPF_REG]; \
++ u64 regs[MAX_BPF_EXT_REG]; \
+ \
+ FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
+ ARG1 = (u64) (unsigned long) ctx; \
diff --git a/series.conf b/series.conf
index 9501388702..fe57c7a291 100644
--- a/series.conf
+++ b/series.conf
@@ -20189,6 +20189,7 @@
patches.suse/net-hamradio-6pack-use-mod_timer-to-rearm-timers.patch
patches.drivers/isdn-fix-kernel-infoleak-in-capi_unlocked_ioctl.patch
patches.fixes/bpf-move-prev_-insn_idx-into-verifier-env.patch
+ patches.fixes/bpf-move-tmp-variable-into-ax-register-in-interprete.patch
patches.fixes/bpf-restrict-map-value-pointer-arithmetic-for-unpriv.patch
patches.fixes/bpf-restrict-stack-pointer-arithmetic-for-unprivileg.patch
patches.fixes/bpf-restrict-unknown-scalars-of-mixed-signed-bounds-.patch