Home Home > GIT Browse > SLE15-SP2-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKernel Build Daemon <kbuild@suse.de>2019-12-27 07:35:30 +0100
committerKernel Build Daemon <kbuild@suse.de>2019-12-27 07:35:30 +0100
commitc7651c5834d33e49a5fb7088422c023be66b8bb5 (patch)
tree570645a7481303c7ccbf7fdf1e9325ca3b5faa46
parenta55955e7879efb78dcea7686a0b968c4592d7b25 (diff)
parentcdfb8b3db93398248aff37df1fc44977732afc31 (diff)
Merge branch 'SLE15-SP2' into SLE15-SP2-AZURESLE15-SP2-AZURE
-rw-r--r--patches.suse/0001-futex-Move-futex-exit-handling-into-futex-code.patch227
-rw-r--r--patches.suse/0002-futex-Replace-PF_EXITPIDONE-with-a-state.patch203
-rw-r--r--patches.suse/0003-exit-exec-Seperate-mm_release.patch108
-rw-r--r--patches.suse/0004-futex-Split-futex_mm_release-for-exit-exec.patch105
-rw-r--r--patches.suse/0005-futex-Set-task-futex_state-to-DEAD-right-after-handl.patch53
-rw-r--r--patches.suse/0006-futex-Mark-the-begin-of-futex-exit-explicitly.patch168
-rw-r--r--patches.suse/0007-futex-Sanitize-exit-state-handling.patch55
-rw-r--r--patches.suse/0008-futex-Provide-state-handling-for-exec-as-well.patch102
-rw-r--r--patches.suse/0009-futex-Add-mutex-around-futex-exit.patch90
-rw-r--r--patches.suse/0010-futex-Provide-distinct-return-value-when-owner-is-ex.patch86
-rw-r--r--patches.suse/0011-futex-Prevent-exit-livelock.patch347
-rw-r--r--patches.suse/bpf-Add-cb-access-in-kfree_skb-test.patch183
-rw-r--r--patches.suse/bpf-Fix-build-in-minimal-configurations-again.patch37
-rw-r--r--patches.suse/bpf-Fix-cgroup-local-storage-prog-tracking.patch141
-rw-r--r--patches.suse/bpf-Fix-missing-prog-untrack-in-release_maps.patch96
-rw-r--r--patches.suse/bpf-Fix-record_func_key-to-perform-backtracking-on-r.patch184
-rw-r--r--patches.suse/bpf-Introduce-BPF_TRACE_x-helper-for-the-tracing-tes.patch541
-rw-r--r--patches.suse/bpf-clarify-when-bpf_trace_printk-discards-lines.patch34
-rw-r--r--patches.suse/bpf-fix-struct-pt_reg-typo-in-documentation.patch51
-rw-r--r--patches.suse/bpf-libbpf-Add-kernel-version-section-parsing-back.patch67
-rw-r--r--patches.suse/bpf-sync-bpf.h-to-tools-1f8919b1.patch60
-rw-r--r--patches.suse/bpftool-Fix-bpftool-build-by-switching-to-bpf_object.patch131
-rw-r--r--patches.suse/ext4-check-for-directory-entries-too-close-to-block-.patch43
-rw-r--r--patches.suse/ext4-fix-ext4_empty_dir-for-directories-with-holes.patch100
-rw-r--r--patches.suse/libbpf-Add-BPF-side-definitions-of-supported-field-r.patch62
-rw-r--r--patches.suse/libbpf-Add-BPF_CORE_READ-BPF_CORE_READ_INTO-helpers.patch362
-rw-r--r--patches.suse/libbpf-Add-auto-pinning-of-maps-when-loading-BPF-obj.patch329
-rw-r--r--patches.suse/libbpf-Add-bpf_program__get_-type-expected_attach_ty.patch73
-rw-r--r--patches.suse/libbpf-Add-cscope-and-tags-targets-to-Makefile.patch69
-rw-r--r--patches.suse/libbpf-Add-getter-for-program-size.patch61
-rw-r--r--patches.suse/libbpf-Add-support-for-attaching-BPF-programs-to-oth.patch224
-rw-r--r--patches.suse/libbpf-Add-support-for-field-existance-CO-RE-relocat.patch176
-rw-r--r--patches.suse/libbpf-Add-support-for-field-size-relocations.patch116
-rw-r--r--patches.suse/libbpf-Add-support-for-prog_tracing.patch270
-rw-r--r--patches.suse/libbpf-Add-support-for-relocatable-bitfields.patch494
-rw-r--r--patches.suse/libbpf-Add-support-to-attach-to-fentry-fexit-tracing.patch230
-rw-r--r--patches.suse/libbpf-Add-uprobe-uretprobe-and-tp-raw_tp-section-su.patch36
-rw-r--r--patches.suse/libbpf-Bump-current-version-to-v0.0.6.patch27
-rw-r--r--patches.suse/libbpf-Fix-BTF-defined-map-s-__type-macro-handling-o.patch50
-rw-r--r--patches.suse/libbpf-Fix-bpf_object-name-determination-for-bpf_obj.patch35
-rw-r--r--patches.suse/libbpf-Fix-error-handling-in-bpf_map__reuse_fd.patch61
-rw-r--r--patches.suse/libbpf-Fix-global-variable-relocation.patch283
-rw-r--r--patches.suse/libbpf-Fix-up-generation-of-bpf_helper_defs.h.patch71
-rw-r--r--patches.suse/libbpf-Fix-usage-of-u32-in-userspace-code.patch30
-rw-r--r--patches.suse/libbpf-Fix-various-errors-and-warning-reported-by-ch.patch150
-rw-r--r--patches.suse/libbpf-Generate-more-efficient-BPF_CORE_READ-code.patch61
-rw-r--r--patches.suse/libbpf-Introduce-btf__find_by_name_kind.patch72
-rw-r--r--patches.suse/libbpf-Make-DECLARE_LIBBPF_OPTS-macro-strictly-a-var.patch149
-rw-r--r--patches.suse/libbpf-Move-bpf_-helpers-helper_defs-endian-tracing-.patch824
-rw-r--r--patches.suse/libbpf-Move-directory-creation-into-_pin-functions.patch141
-rw-r--r--patches.suse/libbpf-Refactor-bpf_object__open-APIs-to-use-common-.patch170
-rw-r--r--patches.suse/libbpf-Refactor-relocation-handling.patch376
-rw-r--r--patches.suse/libbpf-Store-map-pin-path-and-status-in-struct-bpf_m.patch310
-rw-r--r--patches.suse/libbpf-Support-initialized-global-variables.patch257
-rw-r--r--patches.suse/libbpf-Teach-bpf_object__open-to-guess-program-types.patch146
-rw-r--r--patches.suse/libbpf-Unpin-auto-pinned-maps-if-loading-fails.patch68
-rw-r--r--patches.suse/libbpf-Update-BTF-reloc-support-to-latest-Clang-form.patch211
-rw-r--r--patches.suse/libbpf-add-bpf_object__open_-file-mem-w-extensible-o.patch284
-rw-r--r--patches.suse/libbpf-auto-generate-list-of-BPF-helper-definitions.patch346
-rw-r--r--patches.suse/libbpf-fix-bpf_object__name-to-actually-return-objec.patch27
-rw-r--r--patches.suse/libbpf-fix-sym-st_value-print-on-32-bit-arches.patch24
-rw-r--r--patches.suse/libbpf-relicense-bpf_helpers.h-and-bpf_endian.h.patch64
-rw-r--r--patches.suse/libbpf-stop-enforcing-kern_version-populate-it-for-u.patch258
-rw-r--r--patches.suse/net-ibmvnic-Fix-typo-in-retry-check.patch3
-rw-r--r--patches.suse/samples-bpf-Fix-broken-xdp_rxq_info-due-to-map-order.patch64
-rw-r--r--patches.suse/samples-bpf-convert-xdp_sample_pkts_user-to-perf_buf.patch154
-rw-r--r--patches.suse/samples-bpf-switch-trace_output-sample-to-perf_buffe.patch117
-rw-r--r--patches.suse/scripts-bpf-Fix-xdp_md-forward-declaration-typo.patch30
-rw-r--r--patches.suse/scripts-bpf-teach-bpf_helpers_doc.py-to-dump-BPF-hel.patch193
-rw-r--r--patches.suse/selftest-bpf-Add-relocatable-bitfield-reading-tests.patch404
-rw-r--r--patches.suse/selftest-bpf-Get-rid-of-a-bunch-of-explicit-BPF-prog.patch79
-rw-r--r--patches.suse/selftest-bpf-Simple-test-for-fentry-fexit.patch164
-rw-r--r--patches.suse/selftests-Add-tests-for-automatic-map-pinning.patch293
-rw-r--r--patches.suse/selftests-bpf-Add-BPF-trampoline-performance-test.patch242
-rw-r--r--patches.suse/selftests-bpf-Add-BPF_CORE_READ-and-BPF_CORE_READ_ST.patch140
-rw-r--r--patches.suse/selftests-bpf-Add-BPF_TYPE_MAP_ARRAY-mmap-tests.patch397
-rw-r--r--patches.suse/selftests-bpf-Add-a-test-for-attaching-BPF-prog-to-a.patch198
-rw-r--r--patches.suse/selftests-bpf-Add-combined-fentry-fexit-test.patch112
-rw-r--r--patches.suse/selftests-bpf-Add-fexit-tests-for-BPF-trampoline.patch190
-rw-r--r--patches.suse/selftests-bpf-Add-field-existence-CO-RE-relocs-tests.patch341
-rw-r--r--patches.suse/selftests-bpf-Add-field-size-relocation-tests.patch194
-rw-r--r--patches.suse/selftests-bpf-Add-read-only-map-values-propagation-t.patch212
-rw-r--r--patches.suse/selftests-bpf-Add-static-to-enable_all_controllers.patch43
-rw-r--r--patches.suse/selftests-bpf-Add-stress-test-for-maximum-number-of-.patch98
-rw-r--r--patches.suse/selftests-bpf-Add-test-for-BPF-trampoline.patch203
-rw-r--r--patches.suse/selftests-bpf-Adjust-CO-RE-reloc-tests-for-new-bpf_c.patch300
-rw-r--r--patches.suse/selftests-bpf-Enforce-libbpf-build-before-BPF-progra.patch32
-rw-r--r--patches.suse/selftests-bpf-Ensure-core_reloc_kernel-is-reading-te.patch100
-rw-r--r--patches.suse/selftests-bpf-Extend-test_pkt_access-test.patch75
-rw-r--r--patches.suse/selftests-bpf-Fix-dependency-ordering-for-attach_pro.patch45
-rw-r--r--patches.suse/selftests-bpf-Integrate-verbose-verifier-log-into-te.patch173
-rw-r--r--patches.suse/selftests-bpf-Make-CO-RE-reloc-test-impartial-to-tes.patch59
-rw-r--r--patches.suse/selftests-bpf-Make-a-copy-of-subtest-name.patch67
-rw-r--r--patches.suse/selftests-bpf-Make-reference_tracking-test-use-subte.patch117
-rw-r--r--patches.suse/selftests-bpf-Move-test_section_names-into-test_prog.patch476
-rw-r--r--patches.suse/selftests-bpf-Remove-too-strict-field-offset-relo-te.patch169
-rw-r--r--patches.suse/selftests-bpf-Split-off-tracing-only-helpers-into-bp.patch595
-rw-r--r--patches.suse/selftests-bpf-Undo-GCC-specific-bpf_helpers.h-change.patch51
-rw-r--r--patches.suse/selftests-bpf-add-BPF_CORE_READ-relocatable-read-mac.patch44
-rw-r--r--patches.suse/selftests-bpf-add-CO-RE-relocs-array-tests.patch286
-rw-r--r--patches.suse/selftests-bpf-add-CO-RE-relocs-enum-ptr-func_proto-t.patch247
-rw-r--r--patches.suse/selftests-bpf-add-CO-RE-relocs-ints-tests.patch299
-rw-r--r--patches.suse/selftests-bpf-add-CO-RE-relocs-misc-tests.patch148
-rw-r--r--patches.suse/selftests-bpf-add-CO-RE-relocs-modifiers-typedef-tes.patch231
-rw-r--r--patches.suse/selftests-bpf-add-CO-RE-relocs-nesting-tests.patch532
-rw-r--r--patches.suse/selftests-bpf-add-CO-RE-relocs-ptr-as-array-tests.patch128
-rw-r--r--patches.suse/selftests-bpf-add-CO-RE-relocs-struct-flavors-tests.patch170
-rw-r--r--patches.suse/selftests-bpf-add-CO-RE-relocs-testing-setup.patch193
-rw-r--r--patches.suse/selftests-bpf-add-bpf-gcc-support.patch248
-rw-r--r--patches.suse/selftests-bpf-bpf_tcp_gen_syncookie-bpf_helpers.patch29
-rw-r--r--patches.suse/selftests-bpf-samples-bpf-Split-off-legacy-stuff-fro.patch445
-rw-r--r--patches.suse/selftests-bpf-switch-tests-to-new-bpf_object__open_-.patch163
-rw-r--r--patches.suse/selftests-bpf-xdping-is-not-meant-to-be-run-standalo.patch6
-rw-r--r--patches.suse/selftests-bpftool-Set-EXIT-trap-after-usage-function.patch74
-rw-r--r--patches.suse/selftests-bpftool-Skip-the-build-test-if-not-in-tree.patch35
-rw-r--r--patches.suse/selftests-bps-Clean-up-removed-ints-relocations-nega.patch36
-rw-r--r--patches.suse/tools-bpf-fix-core_reloc.c-compilation-error.patch45
-rw-r--r--patches.suse/uapi-bpf-fix-helper-docs.patch298
-rw-r--r--series.conf117
119 files changed, 20094 insertions, 19 deletions
diff --git a/patches.suse/0001-futex-Move-futex-exit-handling-into-futex-code.patch b/patches.suse/0001-futex-Move-futex-exit-handling-into-futex-code.patch
new file mode 100644
index 0000000000..4f7a20cfda
--- /dev/null
+++ b/patches.suse/0001-futex-Move-futex-exit-handling-into-futex-code.patch
@@ -0,0 +1,227 @@
+From ba31c1a48538992316cc71ce94fa9cd3e7b427c0 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Nov 2019 22:55:36 +0100
+Subject: [PATCH 01/11] futex: Move futex exit handling into futex code
+Git-commit: ba31c1a48538992316cc71ce94fa9cd3e7b427c0
+Patch-mainline: v5.5-rc1
+References: bsc#1149032
+
+The futex exit handling is #ifdeffed into mm_release() which is not pretty
+to begin with. But upcoming changes to address futex exit races need to add
+more functionality to this exit code.
+
+Split it out into a function, move it into futex code and make the various
+futex exit functions static.
+
+Preparatory only and no functional change.
+
+Folded build fix from Borislav.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20191106224556.049705556@linutronix.de
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+
+---
+ include/linux/compat.h | 2 --
+ include/linux/futex.h | 29 ++++++++++++++++-------------
+ kernel/fork.c | 25 +++----------------------
+ kernel/futex.c | 33 +++++++++++++++++++++++++++++----
+ 4 files changed, 48 insertions(+), 41 deletions(-)
+
+diff --git a/include/linux/compat.h b/include/linux/compat.h
+index 16dafd9f4b86..c4c389c7e1b4 100644
+--- a/include/linux/compat.h
++++ b/include/linux/compat.h
+@@ -410,8 +410,6 @@ struct compat_kexec_segment;
+ struct compat_mq_attr;
+ struct compat_msgbuf;
+
+-extern void compat_exit_robust_list(struct task_struct *curr);
+-
+ #define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
+
+ #define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
+diff --git a/include/linux/futex.h b/include/linux/futex.h
+index ccaef0097785..d6ed11c51a8e 100644
+--- a/include/linux/futex.h
++++ b/include/linux/futex.h
+@@ -2,7 +2,9 @@
+ #ifndef _LINUX_FUTEX_H
+ #define _LINUX_FUTEX_H
+
++#include <linux/sched.h>
+ #include <linux/ktime.h>
++
+ #include <uapi/linux/futex.h>
+
+ struct inode;
+@@ -48,15 +50,24 @@ union futex_key {
+ #define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
+
+ #ifdef CONFIG_FUTEX
+-extern void exit_robust_list(struct task_struct *curr);
+
+-long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+- u32 __user *uaddr2, u32 val2, u32 val3);
+-#else
+-static inline void exit_robust_list(struct task_struct *curr)
++static inline void futex_init_task(struct task_struct *tsk)
+ {
++ tsk->robust_list = NULL;
++#ifdef CONFIG_COMPAT
++ tsk->compat_robust_list = NULL;
++#endif
++ INIT_LIST_HEAD(&tsk->pi_state_list);
++ tsk->pi_state_cache = NULL;
+ }
+
++void futex_mm_release(struct task_struct *tsk);
++
++long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
++ u32 __user *uaddr2, u32 val2, u32 val3);
++#else
++static inline void futex_init_task(struct task_struct *tsk) { }
++static inline void futex_mm_release(struct task_struct *tsk) { }
+ static inline long do_futex(u32 __user *uaddr, int op, u32 val,
+ ktime_t *timeout, u32 __user *uaddr2,
+ u32 val2, u32 val3)
+@@ -65,12 +76,4 @@ static inline long do_futex(u32 __user *uaddr, int op, u32 val,
+ }
+ #endif
+
+-#ifdef CONFIG_FUTEX_PI
+-extern void exit_pi_state_list(struct task_struct *curr);
+-#else
+-static inline void exit_pi_state_list(struct task_struct *curr)
+-{
+-}
+-#endif
+-
+ #endif
+diff --git a/kernel/fork.c b/kernel/fork.c
+index bcdf53125210..bd7c218691d4 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1286,20 +1286,7 @@ static int wait_for_vfork_done(struct task_struct *child,
+ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
+ {
+ /* Get rid of any futexes when releasing the mm */
+-#ifdef CONFIG_FUTEX
+- if (unlikely(tsk->robust_list)) {
+- exit_robust_list(tsk);
+- tsk->robust_list = NULL;
+- }
+-#ifdef CONFIG_COMPAT
+- if (unlikely(tsk->compat_robust_list)) {
+- compat_exit_robust_list(tsk);
+- tsk->compat_robust_list = NULL;
+- }
+-#endif
+- if (unlikely(!list_empty(&tsk->pi_state_list)))
+- exit_pi_state_list(tsk);
+-#endif
++ futex_mm_release(tsk);
+
+ uprobe_free_utask(tsk);
+
+@@ -2062,14 +2049,8 @@ static __latent_entropy struct task_struct *copy_process(
+ #ifdef CONFIG_BLOCK
+ p->plug = NULL;
+ #endif
+-#ifdef CONFIG_FUTEX
+- p->robust_list = NULL;
+-#ifdef CONFIG_COMPAT
+- p->compat_robust_list = NULL;
+-#endif
+- INIT_LIST_HEAD(&p->pi_state_list);
+- p->pi_state_cache = NULL;
+-#endif
++ futex_init_task(p);
++
+ /*
+ * sigaltstack should be cleared when sharing the same VM
+ */
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 49eaf5be851a..f8f00d47c821 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -325,6 +325,12 @@ static inline bool should_fail_futex(bool fshared)
+ }
+ #endif /* CONFIG_FAIL_FUTEX */
+
++#ifdef CONFIG_COMPAT
++static void compat_exit_robust_list(struct task_struct *curr);
++#else
++static inline void compat_exit_robust_list(struct task_struct *curr) { }
++#endif
++
+ static inline void futex_get_mm(union futex_key *key)
+ {
+ mmgrab(key->private.mm);
+@@ -890,7 +896,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
+ * Kernel cleans up PI-state, but userspace is likely hosed.
+ * (Robust-futex cleanup is separate and might save the day for userspace.)
+ */
+-void exit_pi_state_list(struct task_struct *curr)
++static void exit_pi_state_list(struct task_struct *curr)
+ {
+ struct list_head *next, *head = &curr->pi_state_list;
+ struct futex_pi_state *pi_state;
+@@ -960,7 +966,8 @@ void exit_pi_state_list(struct task_struct *curr)
+ }
+ raw_spin_unlock_irq(&curr->pi_lock);
+ }
+-
++#else
++static inline void exit_pi_state_list(struct task_struct *curr) { }
+ #endif
+
+ /*
+@@ -3588,7 +3595,7 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
+ *
+ * We silently return on any sign of list-walking problem.
+ */
+-void exit_robust_list(struct task_struct *curr)
++static void exit_robust_list(struct task_struct *curr)
+ {
+ struct robust_list_head __user *head = curr->robust_list;
+ struct robust_list __user *entry, *next_entry, *pending;
+@@ -3653,6 +3660,24 @@ void exit_robust_list(struct task_struct *curr)
+ }
+ }
+
++void futex_mm_release(struct task_struct *tsk)
++{
++ if (unlikely(tsk->robust_list)) {
++ exit_robust_list(tsk);
++ tsk->robust_list = NULL;
++ }
++
++#ifdef CONFIG_COMPAT
++ if (unlikely(tsk->compat_robust_list)) {
++ compat_exit_robust_list(tsk);
++ tsk->compat_robust_list = NULL;
++ }
++#endif
++
++ if (unlikely(!list_empty(&tsk->pi_state_list)))
++ exit_pi_state_list(tsk);
++}
++
+ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3)
+ {
+@@ -3780,7 +3805,7 @@ static void __user *futex_uaddr(struct robust_list __user *entry,
+ *
+ * We silently return on any sign of list-walking problem.
+ */
+-void compat_exit_robust_list(struct task_struct *curr)
++static void compat_exit_robust_list(struct task_struct *curr)
+ {
+ struct compat_robust_list_head __user *head = curr->compat_robust_list;
+ struct robust_list __user *entry, *next_entry, *pending;
+--
+2.16.4
+
diff --git a/patches.suse/0002-futex-Replace-PF_EXITPIDONE-with-a-state.patch b/patches.suse/0002-futex-Replace-PF_EXITPIDONE-with-a-state.patch
new file mode 100644
index 0000000000..bdafbae951
--- /dev/null
+++ b/patches.suse/0002-futex-Replace-PF_EXITPIDONE-with-a-state.patch
@@ -0,0 +1,203 @@
+From 3d4775df0a89240f671861c6ab6e8d59af8e9e41 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Nov 2019 22:55:37 +0100
+Subject: [PATCH 02/11] futex: Replace PF_EXITPIDONE with a state
+Git-commit: 3d4775df0a89240f671861c6ab6e8d59af8e9e41
+Patch-mainline: v5.5-rc1
+References: bsc#1149032
+
+The futex exit handling relies on PF_ flags. That's suboptimal as it
+requires a smp_mb() and an ugly lock/unlock of the exiting tasks pi_lock in
+the middle of do_exit() to enforce the observability of PF_EXITING in the
+futex code.
+
+Add a futex_state member to task_struct and convert the PF_EXITPIDONE logic
+over to the new state. The PF_EXITING dependency will be cleaned up in a
+later step.
+
+This prepares for handling various futex exit issues later.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20191106224556.149449274@linutronix.de
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+
+---
+ include/linux/futex.h | 33 +++++++++++++++++++++++++++++++++
+ include/linux/sched.h | 2 +-
+ kernel/exit.c | 18 ++----------------
+ kernel/futex.c | 25 +++++++++++++------------
+ 4 files changed, 49 insertions(+), 29 deletions(-)
+
+diff --git a/include/linux/futex.h b/include/linux/futex.h
+index d6ed11c51a8e..025ad96bcf9d 100644
+--- a/include/linux/futex.h
++++ b/include/linux/futex.h
+@@ -50,6 +50,10 @@ union futex_key {
+ #define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
+
+ #ifdef CONFIG_FUTEX
++enum {
++ FUTEX_STATE_OK,
++ FUTEX_STATE_DEAD,
++};
+
+ static inline void futex_init_task(struct task_struct *tsk)
+ {
+@@ -59,6 +63,34 @@ static inline void futex_init_task(struct task_struct *tsk)
+ #endif
+ INIT_LIST_HEAD(&tsk->pi_state_list);
+ tsk->pi_state_cache = NULL;
++ tsk->futex_state = FUTEX_STATE_OK;
++}
++
++/**
++ * futex_exit_done - Sets the tasks futex state to FUTEX_STATE_DEAD
++ * @tsk: task to set the state on
++ *
++ * Set the futex exit state of the task lockless. The futex waiter code
++ * observes that state when a task is exiting and loops until the task has
++ * actually finished the futex cleanup. The worst case for this is that the
++ * waiter runs through the wait loop until the state becomes visible.
++ *
++ * This has two callers:
++ *
++ * - futex_mm_release() after the futex exit cleanup has been done
++ *
++ * - do_exit() from the recursive fault handling path.
++ *
++ * In case of a recursive fault this is best effort. Either the futex exit
++ * code has run already or not. If the OWNER_DIED bit has been set on the
++ * futex then the waiter can take it over. If not, the problem is pushed
++ * back to user space. If the futex exit code did not run yet, then an
++ * already queued waiter might block forever, but there is nothing which
++ * can be done about that.
++ */
++static inline void futex_exit_done(struct task_struct *tsk)
++{
++ tsk->futex_state = FUTEX_STATE_DEAD;
+ }
+
+ void futex_mm_release(struct task_struct *tsk);
+@@ -68,6 +100,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ #else
+ static inline void futex_init_task(struct task_struct *tsk) { }
+ static inline void futex_mm_release(struct task_struct *tsk) { }
++static inline void futex_exit_done(struct task_struct *tsk) { }
+ static inline long do_futex(u32 __user *uaddr, int op, u32 val,
+ ktime_t *timeout, u32 __user *uaddr2,
+ u32 val2, u32 val3)
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 2c2e56bd8913..85dab2f721c9 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1053,6 +1053,7 @@ struct task_struct {
+ #endif
+ struct list_head pi_state_list;
+ struct futex_pi_state *pi_state_cache;
++ unsigned int futex_state;
+ #endif
+ #ifdef CONFIG_PERF_EVENTS
+ struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
+@@ -1441,7 +1442,6 @@ extern struct pid *cad_pid;
+ */
+ #define PF_IDLE 0x00000002 /* I am an IDLE thread */
+ #define PF_EXITING 0x00000004 /* Getting shut down */
+-#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
+ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
+ #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
+ #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
+diff --git a/kernel/exit.c b/kernel/exit.c
+index a46a50d67002..d11bdcaac2e1 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -746,16 +746,7 @@ void __noreturn do_exit(long code)
+ */
+ if (unlikely(tsk->flags & PF_EXITING)) {
+ pr_alert("Fixing recursive fault but reboot is needed!\n");
+- /*
+- * We can do this unlocked here. The futex code uses
+- * this flag just to verify whether the pi state
+- * cleanup has been done or not. In the worst case it
+- * loops once more. We pretend that the cleanup was
+- * done as there is no way to return. Either the
+- * OWNER_DIED bit is set by now or we push the blocked
+- * task into the wait for ever nirwana as well.
+- */
+- tsk->flags |= PF_EXITPIDONE;
++ futex_exit_done(tsk);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+ }
+@@ -846,12 +837,7 @@ void __noreturn do_exit(long code)
+ * Make sure we are holding no locks:
+ */
+ debug_check_no_locks_held();
+- /*
+- * We can do this unlocked here. The futex code uses this flag
+- * just to verify whether the pi state cleanup has been done
+- * or not. In the worst case it loops once more.
+- */
+- tsk->flags |= PF_EXITPIDONE;
++ futex_exit_done(tsk);
+
+ if (tsk->io_context)
+ exit_io_context(tsk);
+diff --git a/kernel/futex.c b/kernel/futex.c
+index f8f00d47c821..41c75277d7d1 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1182,9 +1182,10 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
+ u32 uval2;
+
+ /*
+- * If PF_EXITPIDONE is not yet set, then try again.
++ * If the futex exit state is not yet FUTEX_STATE_DEAD, wait
++ * for it to finish.
+ */
+- if (tsk && !(tsk->flags & PF_EXITPIDONE))
++ if (tsk && tsk->futex_state != FUTEX_STATE_DEAD)
+ return -EAGAIN;
+
+ /*
+@@ -1203,8 +1204,9 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
+ * *uaddr = 0xC0000000; tsk = get_task(PID);
+ * } if (!tsk->flags & PF_EXITING) {
+ * ... attach();
+- * tsk->flags |= PF_EXITPIDONE; } else {
+- * if (!(tsk->flags & PF_EXITPIDONE))
++ * tsk->futex_state = } else {
++ * FUTEX_STATE_DEAD; if (tsk->futex_state !=
++ * FUTEX_STATE_DEAD)
+ * return -EAGAIN;
+ * return -ESRCH; <--- FAIL
+ * }
+@@ -1260,17 +1262,16 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
+ }
+
+ /*
+- * We need to look at the task state flags to figure out,
+- * whether the task is exiting. To protect against the do_exit
+- * change of the task flags, we do this protected by
+- * p->pi_lock:
++ * We need to look at the task state to figure out, whether the
++ * task is exiting. To protect against the change of the task state
++ * in futex_exit_release(), we do this protected by p->pi_lock:
+ */
+ raw_spin_lock_irq(&p->pi_lock);
+- if (unlikely(p->flags & PF_EXITING)) {
++ if (unlikely(p->futex_state != FUTEX_STATE_OK)) {
+ /*
+- * The task is on the way out. When PF_EXITPIDONE is
+- * set, we know that the task has finished the
+- * cleanup:
++ * The task is on the way out. When the futex state is
++ * FUTEX_STATE_DEAD, we know that the task has finished
++ * the cleanup:
+ */
+ int ret = handle_exit_race(uaddr, uval, p);
+
+--
+2.16.4
+
diff --git a/patches.suse/0003-exit-exec-Seperate-mm_release.patch b/patches.suse/0003-exit-exec-Seperate-mm_release.patch
new file mode 100644
index 0000000000..837bb47c17
--- /dev/null
+++ b/patches.suse/0003-exit-exec-Seperate-mm_release.patch
@@ -0,0 +1,108 @@
+From 4610ba7ad877fafc0a25a30c6c82015304120426 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Nov 2019 22:55:38 +0100
+Subject: [PATCH 03/11] exit/exec: Seperate mm_release()
+Git-commit: 4610ba7ad877fafc0a25a30c6c82015304120426
+Patch-mainline: v5.5-rc1
+References: bsc#1149032
+
+mm_release() contains the futex exit handling. mm_release() is called from
+do_exit()->exit_mm() and from exec()->exec_mm().
+
+In the exit_mm() case PF_EXITING and the futex state is updated. In the
+exec_mm() case these states are not touched.
+
+As the futex exit code needs further protections against exit races, this
+needs to be split into two functions.
+
+Preparatory only, no functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20191106224556.240518241@linutronix.de
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+
+---
+ fs/exec.c | 2 +-
+ include/linux/sched/mm.h | 6 ++++--
+ kernel/exit.c | 2 +-
+ kernel/fork.c | 12 +++++++++++-
+ 4 files changed, 17 insertions(+), 5 deletions(-)
+
+diff --git a/fs/exec.c b/fs/exec.c
+index 555e93c7dec8..c27231234764 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1015,7 +1015,7 @@ static int exec_mmap(struct mm_struct *mm)
+ /* Notify parent that we're no longer interested in the old VM */
+ tsk = current;
+ old_mm = current->mm;
+- mm_release(tsk, old_mm);
++ exec_mm_release(tsk, old_mm);
+
+ if (old_mm) {
+ sync_mm_rss(old_mm);
+diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
+index e6770012db18..c49257a3b510 100644
+--- a/include/linux/sched/mm.h
++++ b/include/linux/sched/mm.h
+@@ -117,8 +117,10 @@ extern struct mm_struct *get_task_mm(struct task_struct *task);
+ * succeeds.
+ */
+ extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
+-/* Remove the current tasks stale references to the old mm_struct */
+-extern void mm_release(struct task_struct *, struct mm_struct *);
++/* Remove the current tasks stale references to the old mm_struct on exit() */
++extern void exit_mm_release(struct task_struct *, struct mm_struct *);
++/* Remove the current tasks stale references to the old mm_struct on exec() */
++extern void exec_mm_release(struct task_struct *, struct mm_struct *);
+
+ #ifdef CONFIG_MEMCG
+ extern void mm_update_next_owner(struct mm_struct *mm);
+diff --git a/kernel/exit.c b/kernel/exit.c
+index d11bdcaac2e1..cd893b530902 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -437,7 +437,7 @@ static void exit_mm(void)
+ struct mm_struct *mm = current->mm;
+ struct core_state *core_state;
+
+- mm_release(current, mm);
++ exit_mm_release(current, mm);
+ if (!mm)
+ return;
+ sync_mm_rss(mm);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index bd7c218691d4..096f9d840bb8 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1283,7 +1283,7 @@ static int wait_for_vfork_done(struct task_struct *child,
+ * restoring the old one. . .
+ * Eric Biederman 10 January 1998
+ */
+-void mm_release(struct task_struct *tsk, struct mm_struct *mm)
++static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
+ {
+ /* Get rid of any futexes when releasing the mm */
+ futex_mm_release(tsk);
+@@ -1320,6 +1320,16 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
+ complete_vfork_done(tsk);
+ }
+
++void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
++{
++ mm_release(tsk, mm);
++}
++
++void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
++{
++ mm_release(tsk, mm);
++}
++
+ /**
+ * dup_mm() - duplicates an existing mm structure
+ * @tsk: the task_struct with which the new mm will be associated.
+--
+2.16.4
+
diff --git a/patches.suse/0004-futex-Split-futex_mm_release-for-exit-exec.patch b/patches.suse/0004-futex-Split-futex_mm_release-for-exit-exec.patch
new file mode 100644
index 0000000000..c34cccb021
--- /dev/null
+++ b/patches.suse/0004-futex-Split-futex_mm_release-for-exit-exec.patch
@@ -0,0 +1,105 @@
+From 150d71584b12809144b8145b817e83b81158ae5f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Nov 2019 22:55:39 +0100
+Subject: [PATCH 04/11] futex: Split futex_mm_release() for exit/exec
+Git-commit: 150d71584b12809144b8145b817e83b81158ae5f
+Patch-mainline: v5.5-rc1
+References: bsc#1149032
+
+To allow separate handling of the futex exit state in the futex exit code
+for exit and exec, split futex_mm_release() into two functions and invoke
+them from the corresponding exit/exec_mm_release() callsites.
+
+Preparatory only, no functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20191106224556.332094221@linutronix.de
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+
+---
+ include/linux/futex.h | 6 ++++--
+ kernel/fork.c | 5 ++---
+ kernel/futex.c | 7 ++++++-
+ 3 files changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/include/linux/futex.h b/include/linux/futex.h
+index 025ad96bcf9d..6414cfaf88e0 100644
+--- a/include/linux/futex.h
++++ b/include/linux/futex.h
+@@ -93,14 +93,16 @@ static inline void futex_exit_done(struct task_struct *tsk)
+ tsk->futex_state = FUTEX_STATE_DEAD;
+ }
+
+-void futex_mm_release(struct task_struct *tsk);
++void futex_exit_release(struct task_struct *tsk);
++void futex_exec_release(struct task_struct *tsk);
+
+ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3);
+ #else
+ static inline void futex_init_task(struct task_struct *tsk) { }
+-static inline void futex_mm_release(struct task_struct *tsk) { }
+ static inline void futex_exit_done(struct task_struct *tsk) { }
++static inline void futex_exit_release(struct task_struct *tsk) { }
++static inline void futex_exec_release(struct task_struct *tsk) { }
+ static inline long do_futex(u32 __user *uaddr, int op, u32 val,
+ ktime_t *timeout, u32 __user *uaddr2,
+ u32 val2, u32 val3)
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 096f9d840bb8..f1eb4d1f1a3b 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1285,9 +1285,6 @@ static int wait_for_vfork_done(struct task_struct *child,
+ */
+ static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
+ {
+- /* Get rid of any futexes when releasing the mm */
+- futex_mm_release(tsk);
+-
+ uprobe_free_utask(tsk);
+
+ /* Get rid of any cached register state */
+@@ -1322,11 +1319,13 @@ static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
+
+ void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
+ {
++ futex_exit_release(tsk);
+ mm_release(tsk, mm);
+ }
+
+ void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
+ {
++ futex_exec_release(tsk);
+ mm_release(tsk, mm);
+ }
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 41c75277d7d1..909e4d3c3099 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -3661,7 +3661,7 @@ static void exit_robust_list(struct task_struct *curr)
+ }
+ }
+
+-void futex_mm_release(struct task_struct *tsk)
++void futex_exec_release(struct task_struct *tsk)
+ {
+ if (unlikely(tsk->robust_list)) {
+ exit_robust_list(tsk);
+@@ -3679,6 +3679,11 @@ void futex_mm_release(struct task_struct *tsk)
+ exit_pi_state_list(tsk);
+ }
+
++void futex_exit_release(struct task_struct *tsk)
++{
++ futex_exec_release(tsk);
++}
++
+ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3)
+ {
+--
+2.16.4
+
diff --git a/patches.suse/0005-futex-Set-task-futex_state-to-DEAD-right-after-handl.patch b/patches.suse/0005-futex-Set-task-futex_state-to-DEAD-right-after-handl.patch
new file mode 100644
index 0000000000..c8bc1677e0
--- /dev/null
+++ b/patches.suse/0005-futex-Set-task-futex_state-to-DEAD-right-after-handl.patch
@@ -0,0 +1,53 @@
+From f24f22435dcc11389acc87e5586239c1819d217c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Nov 2019 22:55:40 +0100
+Subject: [PATCH 05/11] futex: Set task::futex_state to DEAD right after handling futex exit
+Git-commit: f24f22435dcc11389acc87e5586239c1819d217c
+Patch-mainline: v5.5-rc1
+References: bsc#1149032
+
+Setting task::futex_state in do_exit() is rather arbitrarily placed for no
+reason. Move it into the futex code.
+
+Note, this is only done for the exit cleanup as the exec cleanup cannot set
+the state to FUTEX_STATE_DEAD because the task struct is still in active
+use.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20191106224556.439511191@linutronix.de
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+
+---
+ kernel/exit.c | 1 -
+ kernel/futex.c | 1 +
+ 2 files changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/exit.c b/kernel/exit.c
+index cd893b530902..f3b8fa1b8945 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -837,7 +837,6 @@ void __noreturn do_exit(long code)
+ * Make sure we are holding no locks:
+ */
+ debug_check_no_locks_held();
+- futex_exit_done(tsk);
+
+ if (tsk->io_context)
+ exit_io_context(tsk);
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 909e4d3c3099..426dd71e170d 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -3682,6 +3682,7 @@ void futex_exec_release(struct task_struct *tsk)
+ void futex_exit_release(struct task_struct *tsk)
+ {
+ futex_exec_release(tsk);
++ futex_exit_done(tsk);
+ }
+
+ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+--
+2.16.4
+
diff --git a/patches.suse/0006-futex-Mark-the-begin-of-futex-exit-explicitly.patch b/patches.suse/0006-futex-Mark-the-begin-of-futex-exit-explicitly.patch
new file mode 100644
index 0000000000..f952dda6d3
--- /dev/null
+++ b/patches.suse/0006-futex-Mark-the-begin-of-futex-exit-explicitly.patch
@@ -0,0 +1,168 @@
+From 18f694385c4fd77a09851fd301236746ca83f3cb Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Nov 2019 22:55:41 +0100
+Subject: [PATCH 06/11] futex: Mark the begin of futex exit explicitly
+Git-commit: 18f694385c4fd77a09851fd301236746ca83f3cb
+Patch-mainline: v5.5-rc1
+References: bsc#1149032
+
+Instead of relying on PF_EXITING use an explicit state for the futex exit
+and set it in the futex exit function. This moves the smp barrier and the
+lock/unlock serialization into the futex code.
+
+As with the DEAD state this is restricted to the exit path as exec
+continues to use the same task struct.
+
+This allows to simplify that logic in a next step.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20191106224556.539409004@linutronix.de
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+
+---
+ include/linux/futex.h | 31 +++----------------------------
+ kernel/exit.c | 13 +------------
+ kernel/futex.c | 37 ++++++++++++++++++++++++++++++++++++-
+ 3 files changed, 40 insertions(+), 41 deletions(-)
+
+diff --git a/include/linux/futex.h b/include/linux/futex.h
+index 6414cfaf88e0..9f2792427d64 100644
+--- a/include/linux/futex.h
++++ b/include/linux/futex.h
+@@ -52,6 +52,7 @@ union futex_key {
+ #ifdef CONFIG_FUTEX
+ enum {
+ FUTEX_STATE_OK,
++ FUTEX_STATE_EXITING,
+ FUTEX_STATE_DEAD,
+ };
+
+@@ -66,33 +67,7 @@ static inline void futex_init_task(struct task_struct *tsk)
+ tsk->futex_state = FUTEX_STATE_OK;
+ }
+
+-/**
+- * futex_exit_done - Sets the tasks futex state to FUTEX_STATE_DEAD
+- * @tsk: task to set the state on
+- *
+- * Set the futex exit state of the task lockless. The futex waiter code
+- * observes that state when a task is exiting and loops until the task has
+- * actually finished the futex cleanup. The worst case for this is that the
+- * waiter runs through the wait loop until the state becomes visible.
+- *
+- * This has two callers:
+- *
+- * - futex_mm_release() after the futex exit cleanup has been done
+- *
+- * - do_exit() from the recursive fault handling path.
+- *
+- * In case of a recursive fault this is best effort. Either the futex exit
+- * code has run already or not. If the OWNER_DIED bit has been set on the
+- * futex then the waiter can take it over. If not, the problem is pushed
+- * back to user space. If the futex exit code did not run yet, then an
+- * already queued waiter might block forever, but there is nothing which
+- * can be done about that.
+- */
+-static inline void futex_exit_done(struct task_struct *tsk)
+-{
+- tsk->futex_state = FUTEX_STATE_DEAD;
+-}
+-
++void futex_exit_recursive(struct task_struct *tsk);
+ void futex_exit_release(struct task_struct *tsk);
+ void futex_exec_release(struct task_struct *tsk);
+
+@@ -100,7 +75,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3);
+ #else
+ static inline void futex_init_task(struct task_struct *tsk) { }
+-static inline void futex_exit_done(struct task_struct *tsk) { }
++static inline void futex_exit_recursive(struct task_struct *tsk) { }
+ static inline void futex_exit_release(struct task_struct *tsk) { }
+ static inline void futex_exec_release(struct task_struct *tsk) { }
+ static inline long do_futex(u32 __user *uaddr, int op, u32 val,
+diff --git a/kernel/exit.c b/kernel/exit.c
+index f3b8fa1b8945..d351fd09e739 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -746,23 +746,12 @@ void __noreturn do_exit(long code)
+ */
+ if (unlikely(tsk->flags & PF_EXITING)) {
+ pr_alert("Fixing recursive fault but reboot is needed!\n");
+- futex_exit_done(tsk);
++ futex_exit_recursive(tsk);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+ }
+
+ exit_signals(tsk); /* sets PF_EXITING */
+- /*
+- * Ensure that all new tsk->pi_lock acquisitions must observe
+- * PF_EXITING. Serializes against futex.c:attach_to_pi_owner().
+- */
+- smp_mb();
+- /*
+- * Ensure that we must observe the pi_state in exit_mm() ->
+- * mm_release() -> exit_pi_state_list().
+- */
+- raw_spin_lock_irq(&tsk->pi_lock);
+- raw_spin_unlock_irq(&tsk->pi_lock);
+
+ if (unlikely(in_atomic())) {
+ pr_info("note: %s[%d] exited with preempt_count %d\n",
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 426dd71e170d..3488fb024a20 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -3679,10 +3679,45 @@ void futex_exec_release(struct task_struct *tsk)
+ exit_pi_state_list(tsk);
+ }
+
++/**
++ * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
++ * @tsk: task to set the state on
++ *
++ * Set the futex exit state of the task lockless. The futex waiter code
++ * observes that state when a task is exiting and loops until the task has
++ * actually finished the futex cleanup. The worst case for this is that the
++ * waiter runs through the wait loop until the state becomes visible.
++ *
++ * This is called from the recursive fault handling path in do_exit().
++ *
++ * This is best effort. Either the futex exit code has run already or
++ * not. If the OWNER_DIED bit has been set on the futex then the waiter can
++ * take it over. If not, the problem is pushed back to user space. If the
++ * futex exit code did not run yet, then an already queued waiter might
++ * block forever, but there is nothing which can be done about that.
++ */
++void futex_exit_recursive(struct task_struct *tsk)
++{
++ tsk->futex_state = FUTEX_STATE_DEAD;
++}
++
+ void futex_exit_release(struct task_struct *tsk)
+ {
++ tsk->futex_state = FUTEX_STATE_EXITING;
++ /*
++ * Ensure that all new tsk->pi_lock acquisitions must observe
++ * FUTEX_STATE_EXITING. Serializes against attach_to_pi_owner().
++ */
++ smp_mb();
++ /*
++ * Ensure that we must observe the pi_state in exit_pi_state_list().
++ */
++ raw_spin_lock_irq(&tsk->pi_lock);
++ raw_spin_unlock_irq(&tsk->pi_lock);
++
+ futex_exec_release(tsk);
+- futex_exit_done(tsk);
++
++ tsk->futex_state = FUTEX_STATE_DEAD;
+ }
+
+ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+--
+2.16.4
+
diff --git a/patches.suse/0007-futex-Sanitize-exit-state-handling.patch b/patches.suse/0007-futex-Sanitize-exit-state-handling.patch
new file mode 100644
index 0000000000..d1bedfcce9
--- /dev/null
+++ b/patches.suse/0007-futex-Sanitize-exit-state-handling.patch
@@ -0,0 +1,55 @@
+From 4a8e991b91aca9e20705d434677ac013974e0e30 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Nov 2019 22:55:42 +0100
+Subject: [PATCH 07/11] futex: Sanitize exit state handling
+Git-commit: 4a8e991b91aca9e20705d434677ac013974e0e30
+Patch-mainline: v5.5-rc1
+References: bsc#1149032
+
+Instead of having a smp_mb() and an empty lock/unlock of task::pi_lock move
+the state setting into to the lock section.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20191106224556.645603214@linutronix.de
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+
+---
+ kernel/futex.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 3488fb024a20..f618562b4f5f 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -3703,16 +3703,19 @@ void futex_exit_recursive(struct task_struct *tsk)
+
+ void futex_exit_release(struct task_struct *tsk)
+ {
+- tsk->futex_state = FUTEX_STATE_EXITING;
+- /*
+- * Ensure that all new tsk->pi_lock acquisitions must observe
+- * FUTEX_STATE_EXITING. Serializes against attach_to_pi_owner().
+- */
+- smp_mb();
+ /*
+- * Ensure that we must observe the pi_state in exit_pi_state_list().
++ * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
++ *
++ * This ensures that all subsequent checks of tsk->futex_state in
++ * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
++ * tsk->pi_lock held.
++ *
++ * It guarantees also that a pi_state which was queued right before
++ * the state change under tsk->pi_lock by a concurrent waiter must
++ * be observed in exit_pi_state_list().
+ */
+ raw_spin_lock_irq(&tsk->pi_lock);
++ tsk->futex_state = FUTEX_STATE_EXITING;
+ raw_spin_unlock_irq(&tsk->pi_lock);
+
+ futex_exec_release(tsk);
+--
+2.16.4
+
diff --git a/patches.suse/0008-futex-Provide-state-handling-for-exec-as-well.patch b/patches.suse/0008-futex-Provide-state-handling-for-exec-as-well.patch
new file mode 100644
index 0000000000..53d7722280
--- /dev/null
+++ b/patches.suse/0008-futex-Provide-state-handling-for-exec-as-well.patch
@@ -0,0 +1,102 @@
+From af8cbda2cfcaa5515d61ec500498d46e9a8247e2 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Nov 2019 22:55:43 +0100
+Subject: [PATCH 08/11] futex: Provide state handling for exec() as well
+Git-commit: af8cbda2cfcaa5515d61ec500498d46e9a8247e2
+Patch-mainline: v5.5-rc1
+References: bsc#1149032
+
+exec() attempts to handle potentially held futexes gracefully by running
+the futex exit handling code like exit() does.
+
+The current implementation has no protection against concurrent incoming
+waiters. The reason is that the futex state cannot be set to
+FUTEX_STATE_DEAD after the cleanup because the task struct is still active
+and just about to execute the new binary.
+
+While its arguably buggy when a task holds a futex over exec(), for
+consistency sake the state handling can at least cover the actual futex
+exit cleanup section. This provides state consistency protection accross
+the cleanup. As the futex state of the task becomes FUTEX_STATE_OK after the
+cleanup has been finished, this cannot prevent subsequent attempts to
+attach to the task in case that the cleanup was not successfull in mopping
+up all leftovers.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20191106224556.753355618@linutronix.de
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+
+---
+ kernel/futex.c | 38 ++++++++++++++++++++++++++++++++++----
+ 1 file changed, 34 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index f618562b4f5f..0c9850af2724 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -3661,7 +3661,7 @@ static void exit_robust_list(struct task_struct *curr)
+ }
+ }
+
+-void futex_exec_release(struct task_struct *tsk)
++static void futex_cleanup(struct task_struct *tsk)
+ {
+ if (unlikely(tsk->robust_list)) {
+ exit_robust_list(tsk);
+@@ -3701,7 +3701,7 @@ void futex_exit_recursive(struct task_struct *tsk)
+ tsk->futex_state = FUTEX_STATE_DEAD;
+ }
+
+-void futex_exit_release(struct task_struct *tsk)
++static void futex_cleanup_begin(struct task_struct *tsk)
+ {
+ /*
+ * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
+@@ -3717,10 +3717,40 @@ void futex_exit_release(struct task_struct *tsk)
+ raw_spin_lock_irq(&tsk->pi_lock);
+ tsk->futex_state = FUTEX_STATE_EXITING;
+ raw_spin_unlock_irq(&tsk->pi_lock);
++}
+
+- futex_exec_release(tsk);
++static void futex_cleanup_end(struct task_struct *tsk, int state)
++{
++ /*
++ * Lockless store. The only side effect is that an observer might
++ * take another loop until it becomes visible.
++ */
++ tsk->futex_state = state;
++}
+
+- tsk->futex_state = FUTEX_STATE_DEAD;
++void futex_exec_release(struct task_struct *tsk)
++{
++ /*
++ * The state handling is done for consistency, but in the case of
++ * exec() there is no way to prevent futher damage as the PID stays
++ * the same. But for the unlikely and arguably buggy case that a
++ * futex is held on exec(), this provides at least as much state
++ * consistency protection which is possible.
++ */
++ futex_cleanup_begin(tsk);
++ futex_cleanup(tsk);
++ /*
++ * Reset the state to FUTEX_STATE_OK. The task is alive and about
++ * exec a new binary.
++ */
++ futex_cleanup_end(tsk, FUTEX_STATE_OK);
++}
++
++void futex_exit_release(struct task_struct *tsk)
++{
++ futex_cleanup_begin(tsk);
++ futex_cleanup(tsk);
++ futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
+ }
+
+ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+--
+2.16.4
+
diff --git a/patches.suse/0009-futex-Add-mutex-around-futex-exit.patch b/patches.suse/0009-futex-Add-mutex-around-futex-exit.patch
new file mode 100644
index 0000000000..095d9cd245
--- /dev/null
+++ b/patches.suse/0009-futex-Add-mutex-around-futex-exit.patch
@@ -0,0 +1,90 @@
+From 3f186d974826847a07bc7964d79ec4eded475ad9 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Nov 2019 22:55:44 +0100
+Subject: [PATCH 09/11] futex: Add mutex around futex exit
+Git-commit: 3f186d974826847a07bc7964d79ec4eded475ad9
+Patch-mainline: v5.5-rc1
+References: bsc#1149032
+
+The mutex will be used in subsequent changes to replace the busy looping of
+a waiter when the futex owner is currently executing the exit cleanup to
+prevent a potential live lock.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20191106224556.845798895@linutronix.de
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+
+---
+ include/linux/futex.h | 1 +
+ include/linux/sched.h | 1 +
+ kernel/futex.c | 16 ++++++++++++++++
+ 3 files changed, 18 insertions(+)
+
+diff --git a/include/linux/futex.h b/include/linux/futex.h
+index 9f2792427d64..5cc3fed27d4c 100644
+--- a/include/linux/futex.h
++++ b/include/linux/futex.h
+@@ -65,6 +65,7 @@ static inline void futex_init_task(struct task_struct *tsk)
+ INIT_LIST_HEAD(&tsk->pi_state_list);
+ tsk->pi_state_cache = NULL;
+ tsk->futex_state = FUTEX_STATE_OK;
++ mutex_init(&tsk->futex_exit_mutex);
+ }
+
+ void futex_exit_recursive(struct task_struct *tsk);
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 85dab2f721c9..1ebe540f8a08 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1053,6 +1053,7 @@ struct task_struct {
+ #endif
+ struct list_head pi_state_list;
+ struct futex_pi_state *pi_state_cache;
++ struct mutex futex_exit_mutex;
+ unsigned int futex_state;
+ #endif
+ #ifdef CONFIG_PERF_EVENTS
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 0c9850af2724..46a81e611065 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -3698,11 +3698,22 @@ static void futex_cleanup(struct task_struct *tsk)
+ */
+ void futex_exit_recursive(struct task_struct *tsk)
+ {
++ /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
++ if (tsk->futex_state == FUTEX_STATE_EXITING)
++ mutex_unlock(&tsk->futex_exit_mutex);
+ tsk->futex_state = FUTEX_STATE_DEAD;
+ }
+
+ static void futex_cleanup_begin(struct task_struct *tsk)
+ {
++ /*
++ * Prevent various race issues against a concurrent incoming waiter
++ * including live locks by forcing the waiter to block on
++ * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
++ * attach_to_pi_owner().
++ */
++ mutex_lock(&tsk->futex_exit_mutex);
++
+ /*
+ * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
+ *
+@@ -3726,6 +3737,11 @@ static void futex_cleanup_end(struct task_struct *tsk, int state)
+ * take another loop until it becomes visible.
+ */
+ tsk->futex_state = state;
++ /*
++ * Drop the exit protection. This unblocks waiters which observed
++ * FUTEX_STATE_EXITING to reevaluate the state.
++ */
++ mutex_unlock(&tsk->futex_exit_mutex);
+ }
+
+ void futex_exec_release(struct task_struct *tsk)
+--
+2.16.4
+
diff --git a/patches.suse/0010-futex-Provide-distinct-return-value-when-owner-is-ex.patch b/patches.suse/0010-futex-Provide-distinct-return-value-when-owner-is-ex.patch
new file mode 100644
index 0000000000..257b40a3e6
--- /dev/null
+++ b/patches.suse/0010-futex-Provide-distinct-return-value-when-owner-is-ex.patch
@@ -0,0 +1,86 @@
+From ac31c7ff8624409ba3c4901df9237a616c187a5d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Nov 2019 22:55:45 +0100
+Subject: [PATCH 10/11] futex: Provide distinct return value when owner is exiting
+Git-commit: ac31c7ff8624409ba3c4901df9237a616c187a5d
+Patch-mainline: v5.5-rc1
+References: bsc#1149032
+
+attach_to_pi_owner() returns -EAGAIN for various cases:
+
+ - Owner task is exiting
+ - Futex value has changed
+
+The caller drops the held locks (hash bucket, mmap_sem) and retries the
+operation. In case of the owner task exiting this can result in a live
+lock.
+
+As a preparatory step for seperating those cases, provide a distinct return
+value (EBUSY) for the owner exiting case.
+
+No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20191106224556.935606117@linutronix.de
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+
+---
+ kernel/futex.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 46a81e611065..4f9d7a4b6dbf 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1182,11 +1182,11 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
+ u32 uval2;
+
+ /*
+- * If the futex exit state is not yet FUTEX_STATE_DEAD, wait
+- * for it to finish.
++ * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the
++ * caller that the alleged owner is busy.
+ */
+ if (tsk && tsk->futex_state != FUTEX_STATE_DEAD)
+- return -EAGAIN;
++ return -EBUSY;
+
+ /*
+ * Reread the user space value to handle the following situation:
+@@ -2092,12 +2092,13 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ if (!ret)
+ goto retry;
+ goto out;
++ case -EBUSY:
+ case -EAGAIN:
+ /*
+ * Two reasons for this:
+- * - Owner is exiting and we just wait for the
++ * - EBUSY: Owner is exiting and we just wait for the
+ * exit to complete.
+- * - The user space value changed.
++ * - EAGAIN: The user space value changed.
+ */
+ double_unlock_hb(hb1, hb2);
+ hb_waiters_dec(hb2);
+@@ -2843,12 +2844,13 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
+ goto out_unlock_put_key;
+ case -EFAULT:
+ goto uaddr_faulted;
++ case -EBUSY:
+ case -EAGAIN:
+ /*
+ * Two reasons for this:
+- * - Task is exiting and we just wait for the
++ * - EBUSY: Task is exiting and we just wait for the
+ * exit to complete.
+- * - The user space value changed.
++ * - EAGAIN: The user space value changed.
+ */
+ queue_unlock(hb);
+ put_futex_key(&q.key);
+--
+2.16.4
+
diff --git a/patches.suse/0011-futex-Prevent-exit-livelock.patch b/patches.suse/0011-futex-Prevent-exit-livelock.patch
new file mode 100644
index 0000000000..ea29290848
--- /dev/null
+++ b/patches.suse/0011-futex-Prevent-exit-livelock.patch
@@ -0,0 +1,347 @@
+From 3ef240eaff36b8119ac9e2ea17cbf41179c930ba Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Nov 2019 22:55:46 +0100
+Subject: [PATCH 11/11] futex: Prevent exit livelock
+Git-commit: 3ef240eaff36b8119ac9e2ea17cbf41179c930ba
+Patch-mainline: v5.5-rc1
+References: bsc#1149032
+
+Oleg provided the following test case:
+
+int main(void)
+{
+ struct sched_param sp = {};
+
+ sp.sched_priority = 2;
+ assert(sched_setscheduler(0, SCHED_FIFO, &sp) == 0);
+
+ int lock = vfork();
+ if (!lock) {
+ sp.sched_priority = 1;
+ assert(sched_setscheduler(0, SCHED_FIFO, &sp) == 0);
+ _exit(0);
+ }
+
+ syscall(__NR_futex, &lock, FUTEX_LOCK_PI, 0,0,0);
+ return 0;
+}
+
+This creates an unkillable RT process spinning in futex_lock_pi() on a UP
+machine or if the process is affine to a single CPU. The reason is:
+
+ parent child
+
+ set FIFO prio 2
+
+ vfork() -> set FIFO prio 1
+ implies wait_for_child() sched_setscheduler(...)
+ exit()
+ do_exit()
+ ....
+ mm_release()
+ tsk->futex_state = FUTEX_STATE_EXITING;
+ exit_futex(); (NOOP in this case)
+ complete() --> wakes parent
+ sys_futex()
+ loop infinite because
+ tsk->futex_state == FUTEX_STATE_EXITING
+
+The same problem can happen just by regular preemption as well:
+
+ task holds futex
+ ...
+ do_exit()
+ tsk->futex_state = FUTEX_STATE_EXITING;
+
+ --> preemption (unrelated wakeup of some other higher prio task, e.g. timer)
+
+ switch_to(other_task)
+
+ return to user
+ sys_futex()
+ loop infinite as above
+
+Just for the fun of it the futex exit cleanup could trigger the wakeup
+itself before the task sets its futex state to DEAD.
+
+To cure this, the handling of the exiting owner is changed so:
+
+ - A refcount is held on the task
+
+ - The task pointer is stored in a caller visible location
+
+ - The caller drops all locks (hash bucket, mmap_sem) and blocks
+ on task::futex_exit_mutex. When the mutex is acquired then
+ the exiting task has completed the cleanup and the state
+ is consistent and can be reevaluated.
+
+This is not a pretty solution, but there is no choice other than returning
+an error code to user space, which would break the state consistency
+guarantee and open another can of problems including regressions.
+
+For stable backports the preparatory commits ac31c7ff8624 .. ba31c1a48538
+are required as well, but for anything older than 5.3.y the backports are
+going to be provided when this hits mainline as the other dependencies for
+those kernels are definitely not stable material.
+
+Fixes: 778e9a9c3e71 ("pi-futex: fix exit races and locking problems")
+Reported-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Stable Team <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20191106224557.041676471@linutronix.de
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+
+---
+ kernel/futex.c | 106 +++++++++++++++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 91 insertions(+), 15 deletions(-)
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 4f9d7a4b6dbf..03c518e9747e 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1176,6 +1176,36 @@ static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
+ return ret;
+ }
+
++/**
++ * wait_for_owner_exiting - Block until the owner has exited
++ * @exiting: Pointer to the exiting task
++ *
++ * Caller must hold a refcount on @exiting.
++ */
++static void wait_for_owner_exiting(int ret, struct task_struct *exiting)
++{
++ if (ret != -EBUSY) {
++ WARN_ON_ONCE(exiting);
++ return;
++ }
++
++ if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
++ return;
++
++ mutex_lock(&exiting->futex_exit_mutex);
++ /*
++ * No point in doing state checking here. If the waiter got here
++ * while the task was in exec()->exec_futex_release() then it can
++ * have any FUTEX_STATE_* value when the waiter has acquired the
++ * mutex. OK, if running, EXITING or DEAD if it reached exit()
++ * already. Highly unlikely and not a problem. Just one more round
++ * through the futex maze.
++ */
++ mutex_unlock(&exiting->futex_exit_mutex);
++
++ put_task_struct(exiting);
++}
++
+ static int handle_exit_race(u32 __user *uaddr, u32 uval,
+ struct task_struct *tsk)
+ {
+@@ -1237,7 +1267,8 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
+ * it after doing proper sanity checks.
+ */
+ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
+- struct futex_pi_state **ps)
++ struct futex_pi_state **ps,
++ struct task_struct **exiting)
+ {
+ pid_t pid = uval & FUTEX_TID_MASK;
+ struct futex_pi_state *pi_state;
+@@ -1276,7 +1307,19 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
+ int ret = handle_exit_race(uaddr, uval, p);
+
+ raw_spin_unlock_irq(&p->pi_lock);
+- put_task_struct(p);
++ /*
++ * If the owner task is between FUTEX_STATE_EXITING and
++ * FUTEX_STATE_DEAD then store the task pointer and keep
++ * the reference on the task struct. The calling code will
++ * drop all locks, wait for the task to reach
++ * FUTEX_STATE_DEAD and then drop the refcount. This is
++ * required to prevent a live lock when the current task
++ * preempted the exiting task between the two states.
++ */
++ if (ret == -EBUSY)
++ *exiting = p;
++ else
++ put_task_struct(p);
+ return ret;
+ }
+
+@@ -1315,7 +1358,8 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
+
+ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
+ struct futex_hash_bucket *hb,
+- union futex_key *key, struct futex_pi_state **ps)
++ union futex_key *key, struct futex_pi_state **ps,
++ struct task_struct **exiting)
+ {
+ struct futex_q *top_waiter = futex_top_waiter(hb, key);
+
+@@ -1330,7 +1374,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
+ * We are the first waiter - try to look up the owner based on
+ * @uval and attach to it.
+ */
+- return attach_to_pi_owner(uaddr, uval, key, ps);
++ return attach_to_pi_owner(uaddr, uval, key, ps, exiting);
+ }
+
+ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
+@@ -1358,6 +1402,8 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
+ * lookup
+ * @task: the task to perform the atomic lock work for. This will
+ * be "current" except in the case of requeue pi.
++ * @exiting: Pointer to store the task pointer of the owner task
++ * which is in the middle of exiting
+ * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
+ *
+ * Return:
+@@ -1366,11 +1412,17 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
+ * - <0 - error
+ *
+ * The hb->lock and futex_key refs shall be held by the caller.
++ *
++ * @exiting is only set when the return value is -EBUSY. If so, this holds
++ * a refcount on the exiting task on return and the caller needs to drop it
++ * after waiting for the exit to complete.
+ */
+ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
+ union futex_key *key,
+ struct futex_pi_state **ps,
+- struct task_struct *task, int set_waiters)
++ struct task_struct *task,
++ struct task_struct **exiting,
++ int set_waiters)
+ {
+ u32 uval, newval, vpid = task_pid_vnr(task);
+ struct futex_q *top_waiter;
+@@ -1440,7 +1492,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
+ * attach to the owner. If that fails, no harm done, we only
+ * set the FUTEX_WAITERS bit in the user space variable.
+ */
+- return attach_to_pi_owner(uaddr, newval, key, ps);
++ return attach_to_pi_owner(uaddr, newval, key, ps, exiting);
+ }
+
+ /**
+@@ -1858,6 +1910,8 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
+ * @key1: the from futex key
+ * @key2: the to futex key
+ * @ps: address to store the pi_state pointer
++ * @exiting: Pointer to store the task pointer of the owner task
++ * which is in the middle of exiting
+ * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
+ *
+ * Try and get the lock on behalf of the top waiter if we can do it atomically.
+@@ -1865,16 +1919,20 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
+ * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
+ * hb1 and hb2 must be held by the caller.
+ *
++ * @exiting is only set when the return value is -EBUSY. If so, this holds
++ * a refcount on the exiting task on return and the caller needs to drop it
++ * after waiting for the exit to complete.
++ *
+ * Return:
+ * - 0 - failed to acquire the lock atomically;
+ * - >0 - acquired the lock, return value is vpid of the top_waiter
+ * - <0 - error
+ */
+-static int futex_proxy_trylock_atomic(u32 __user *pifutex,
+- struct futex_hash_bucket *hb1,
+- struct futex_hash_bucket *hb2,
+- union futex_key *key1, union futex_key *key2,
+- struct futex_pi_state **ps, int set_waiters)
++static int
++futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
++ struct futex_hash_bucket *hb2, union futex_key *key1,
++ union futex_key *key2, struct futex_pi_state **ps,
++ struct task_struct **exiting, int set_waiters)
+ {
+ struct futex_q *top_waiter = NULL;
+ u32 curval;
+@@ -1911,7 +1969,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
+ */
+ vpid = task_pid_vnr(top_waiter->task);
+ ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
+- set_waiters);
++ exiting, set_waiters);
+ if (ret == 1) {
+ requeue_pi_wake_futex(top_waiter, key2, hb2);
+ return vpid;
+@@ -2040,6 +2098,8 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ }
+
+ if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
++ struct task_struct *exiting = NULL;
++
+ /*
+ * Attempt to acquire uaddr2 and wake the top waiter. If we
+ * intend to requeue waiters, force setting the FUTEX_WAITERS
+@@ -2047,7 +2107,8 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ * faults rather in the requeue loop below.
+ */
+ ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
+- &key2, &pi_state, nr_requeue);
++ &key2, &pi_state,
++ &exiting, nr_requeue);
+
+ /*
+ * At this point the top_waiter has either taken uaddr2 or is
+@@ -2074,7 +2135,8 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ * If that call succeeds then we have pi_state and an
+ * initial refcount on it.
+ */
+- ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
++ ret = lookup_pi_state(uaddr2, ret, hb2, &key2,
++ &pi_state, &exiting);
+ }
+
+ switch (ret) {
+@@ -2104,6 +2166,12 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ hb_waiters_dec(hb2);
+ put_futex_key(&key2);
+ put_futex_key(&key1);
++ /*
++ * Handle the case where the owner is in the middle of
++ * exiting. Wait for the exit to complete otherwise
++ * this task might loop forever, aka. live lock.
++ */
++ wait_for_owner_exiting(ret, exiting);
+ cond_resched();
+ goto retry;
+ default:
+@@ -2810,6 +2878,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
+ {
+ struct hrtimer_sleeper timeout, *to;
+ struct futex_pi_state *pi_state = NULL;
++ struct task_struct *exiting = NULL;
+ struct rt_mutex_waiter rt_waiter;
+ struct futex_hash_bucket *hb;
+ struct futex_q q = futex_q_init;
+@@ -2831,7 +2900,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
+ retry_private:
+ hb = queue_lock(&q);
+
+- ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
++ ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
++ &exiting, 0);
+ if (unlikely(ret)) {
+ /*
+ * Atomic work succeeded and we got the lock,
+@@ -2854,6 +2924,12 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
+ */
+ queue_unlock(hb);
+ put_futex_key(&q.key);
++ /*
++ * Handle the case where the owner is in the middle of
++ * exiting. Wait for the exit to complete otherwise
++ * this task might loop forever, aka. live lock.
++ */
++ wait_for_owner_exiting(ret, exiting);
+ cond_resched();
+ goto retry;
+ default:
+--
+2.16.4
+
diff --git a/patches.suse/bpf-Add-cb-access-in-kfree_skb-test.patch b/patches.suse/bpf-Add-cb-access-in-kfree_skb-test.patch
new file mode 100644
index 0000000000..7138cd65b6
--- /dev/null
+++ b/patches.suse/bpf-Add-cb-access-in-kfree_skb-test.patch
@@ -0,0 +1,183 @@
+From: Martin KaFai Lau <kafai@fb.com>
+Date: Thu, 7 Nov 2019 10:09:05 -0800
+Subject: bpf: Add cb access in kfree_skb test
+Patch-mainline: v5.5-rc1
+Git-commit: ed5941af3f67ba9062b98aba1a6ca398867dc0de
+References: bsc#1155518
+
+Access the skb->cb[] in the kfree_skb test.
+
+Signed-off-by: Martin KaFai Lau <kafai@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191107180905.4097871-1-kafai@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/kfree_skb.c | 54 ++++++++++++++++-----
+ tools/testing/selftests/bpf/progs/kfree_skb.c | 25 +++++++--
+ 2 files changed, 63 insertions(+), 16 deletions(-)
+
+--- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
++++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
+@@ -1,15 +1,38 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <test_progs.h>
+
++struct meta {
++ int ifindex;
++ __u32 cb32_0;
++ __u8 cb8_0;
++};
++
++static union {
++ __u32 cb32[5];
++ __u8 cb8[20];
++} cb = {
++ .cb32[0] = 0x81828384,
++};
++
+ static void on_sample(void *ctx, int cpu, void *data, __u32 size)
+ {
+- int ifindex = *(int *)data, duration = 0;
+- struct ipv6_packet *pkt_v6 = data + 4;
++ struct meta *meta = (struct meta *)data;
++ struct ipv6_packet *pkt_v6 = data + sizeof(*meta);
++ int duration = 0;
+
+- if (ifindex != 1)
++ if (CHECK(size != 72 + sizeof(*meta), "check_size", "size %u != %zu\n",
++ size, 72 + sizeof(*meta)))
++ return;
++ if (CHECK(meta->ifindex != 1, "check_meta_ifindex",
++ "meta->ifindex = %d\n", meta->ifindex))
+ /* spurious kfree_skb not on loopback device */
+ return;
+- if (CHECK(size != 76, "check_size", "size %u != 76\n", size))
++ if (CHECK(meta->cb8_0 != cb.cb8[0], "check_cb8_0", "cb8_0 %x != %x\n",
++ meta->cb8_0, cb.cb8[0]))
++ return;
++ if (CHECK(meta->cb32_0 != cb.cb32[0], "check_cb32_0",
++ "cb32_0 %x != %x\n",
++ meta->cb32_0, cb.cb32[0]))
+ return;
+ if (CHECK(pkt_v6->eth.h_proto != 0xdd86, "check_eth",
+ "h_proto %x\n", pkt_v6->eth.h_proto))
+@@ -26,6 +49,13 @@ static void on_sample(void *ctx, int cpu
+
+ void test_kfree_skb(void)
+ {
++ struct __sk_buff skb = {};
++ struct bpf_prog_test_run_attr tattr = {
++ .data_in = &pkt_v6,
++ .data_size_in = sizeof(pkt_v6),
++ .ctx_in = &skb,
++ .ctx_size_in = sizeof(skb),
++ };
+ struct bpf_prog_load_attr attr = {
+ .file = "./kfree_skb.o",
+ };
+@@ -36,11 +66,12 @@ void test_kfree_skb(void)
+ struct bpf_link *link = NULL;
+ struct bpf_map *perf_buf_map;
+ struct bpf_program *prog;
+- __u32 duration, retval;
+- int err, pkt_fd, kfree_skb_fd;
++ int err, kfree_skb_fd;
+ bool passed = false;
++ __u32 duration = 0;
+
+- err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &pkt_fd);
++ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
++ &obj, &tattr.prog_fd);
+ if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
+ return;
+
+@@ -66,11 +97,12 @@ void test_kfree_skb(void)
+ if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
+ goto close_prog;
+
+- err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+- NULL, NULL, &retval, &duration);
+- CHECK(err || retval, "ipv6",
++ memcpy(skb.cb, &cb, sizeof(cb));
++ err = bpf_prog_test_run_xattr(&tattr);
++ duration = tattr.duration;
++ CHECK(err || tattr.retval, "ipv6",
+ "err %d errno %d retval %d duration %d\n",
+- err, errno, retval, duration);
++ err, errno, tattr.retval, duration);
+
+ /* read perf buffer */
+ err = perf_buffer__poll(pb, 100);
+--- a/tools/testing/selftests/bpf/progs/kfree_skb.c
++++ b/tools/testing/selftests/bpf/progs/kfree_skb.c
+@@ -43,6 +43,7 @@ struct sk_buff {
+ refcount_t users;
+ unsigned char *data;
+ char __pkt_type_offset[0];
++ char cb[48];
+ };
+
+ /* copy arguments from
+@@ -57,28 +58,41 @@ struct trace_kfree_skb {
+ void *location;
+ };
+
++struct meta {
++ int ifindex;
++ __u32 cb32_0;
++ __u8 cb8_0;
++};
++
+ SEC("tp_btf/kfree_skb")
+ int trace_kfree_skb(struct trace_kfree_skb *ctx)
+ {
+ struct sk_buff *skb = ctx->skb;
+ struct net_device *dev;
+- int ifindex;
+ struct callback_head *ptr;
+ void *func;
+ int users;
+ unsigned char *data;
+ unsigned short pkt_data;
++ struct meta meta = {};
+ char pkt_type;
++ __u32 *cb32;
++ __u8 *cb8;
+
+ __builtin_preserve_access_index(({
+ users = skb->users.refs.counter;
+ data = skb->data;
+ dev = skb->dev;
+- ifindex = dev->ifindex;
+ ptr = dev->ifalias->rcuhead.next;
+ func = ptr->func;
++ cb8 = (__u8 *)&skb->cb;
++ cb32 = (__u32 *)&skb->cb;
+ }));
+
++ meta.ifindex = _(dev->ifindex);
++ meta.cb8_0 = cb8[8];
++ meta.cb32_0 = cb32[2];
++
+ bpf_probe_read_kernel(&pkt_type, sizeof(pkt_type), _(&skb->__pkt_type_offset));
+ pkt_type &= 7;
+
+@@ -90,14 +104,15 @@ int trace_kfree_skb(struct trace_kfree_s
+ _(skb->len), users, pkt_type);
+ bpf_printk("skb->queue_mapping %d\n", _(skb->queue_mapping));
+ bpf_printk("dev->ifindex %d data %llx pkt_data %x\n",
+- ifindex, data, pkt_data);
++ meta.ifindex, data, pkt_data);
++ bpf_printk("cb8_0:%x cb32_0:%x\n", meta.cb8_0, meta.cb32_0);
+
+- if (users != 1 || pkt_data != bpf_htons(0x86dd) || ifindex != 1)
++ if (users != 1 || pkt_data != bpf_htons(0x86dd) || meta.ifindex != 1)
+ /* raw tp ignores return value */
+ return 0;
+
+ /* send first 72 byte of the packet to user space */
+ bpf_skb_output(skb, &perf_buf_map, (72ull << 32) | BPF_F_CURRENT_CPU,
+- &ifindex, sizeof(ifindex));
++ &meta, sizeof(meta));
+ return 0;
+ }
diff --git a/patches.suse/bpf-Fix-build-in-minimal-configurations-again.patch b/patches.suse/bpf-Fix-build-in-minimal-configurations-again.patch
new file mode 100644
index 0000000000..edcb5c8ed9
--- /dev/null
+++ b/patches.suse/bpf-Fix-build-in-minimal-configurations-again.patch
@@ -0,0 +1,37 @@
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Tue, 10 Dec 2019 21:35:46 +0100
+Subject: bpf: Fix build in minimal configurations, again
+Patch-mainline: v5.5-rc3
+Git-commit: 4c80c7bc583a87ded5f61906f81256b57c795806
+References: bsc#1155518
+
+Building with -Werror showed another failure:
+
+kernel/bpf/btf.c: In function 'btf_get_prog_ctx_type.isra.31':
+kernel/bpf/btf.c:3508:63: error: array subscript 0 is above array bounds of 'u8[0]' {aka 'unsigned char[0]'} [-Werror=array-bounds]
+ ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
+
+I don't actually understand why the array is empty, but a similar
+fix has addressed a related problem, so I suppose we can do the
+same thing here.
+
+Fixes: ce27709b8162 ("bpf: Fix build in minimal configurations")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Link: https://lore.kernel.org/bpf/20191210203553.2941035-1-arnd@arndb.de
+Acked-by: Gary Lin <glin@suse.com>
+---
+ kernel/bpf/btf.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -3470,6 +3470,7 @@ static u8 bpf_ctx_convert_map[] = {
+ [_id] = __ctx_convert##_id,
+ #include <linux/bpf_types.h>
+ #undef BPF_PROG_TYPE
++ 0, /* avoid empty array */
+ };
+ #undef BPF_MAP_TYPE
+
diff --git a/patches.suse/bpf-Fix-cgroup-local-storage-prog-tracking.patch b/patches.suse/bpf-Fix-cgroup-local-storage-prog-tracking.patch
new file mode 100644
index 0000000000..d09ec755b7
--- /dev/null
+++ b/patches.suse/bpf-Fix-cgroup-local-storage-prog-tracking.patch
@@ -0,0 +1,141 @@
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Tue, 17 Dec 2019 13:28:16 +0100
+Subject: bpf: Fix cgroup local storage prog tracking
+Patch-mainline: v5.5-rc3
+Git-commit: e47304232b373362228bf233f17bd12b11c9aafc
+References: bsc#1155518
+
+Recently noticed that we're tracking programs related to local storage maps
+through their prog pointer. This is a wrong assumption since the prog pointer
+can still change throughout the verification process, for example, whenever
+bpf_patch_insn_single() is called.
+
+Therefore, the prog pointer that was assigned via bpf_cgroup_storage_assign()
+is not guaranteed to be the same as we pass in bpf_cgroup_storage_release()
+and the map would therefore remain in busy state forever. Fix this by using
+the prog's aux pointer which is stable throughout verification and beyond.
+
+Fixes: de9cbbaadba5 ("bpf: introduce cgroup storage maps")
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Cc: Roman Gushchin <guro@fb.com>
+Cc: Martin KaFai Lau <kafai@fb.com>
+Link: https://lore.kernel.org/bpf/1471c69eca3022218666f909bc927a92388fd09e.1576580332.git.daniel@iogearbox.net
+Acked-by: Gary Lin <glin@suse.com>
+---
+ include/linux/bpf-cgroup.h | 8 ++++----
+ kernel/bpf/core.c | 3 +--
+ kernel/bpf/local_storage.c | 24 ++++++++++++------------
+ kernel/bpf/verifier.c | 2 +-
+ 4 files changed, 18 insertions(+), 19 deletions(-)
+
+--- a/include/linux/bpf-cgroup.h
++++ b/include/linux/bpf-cgroup.h
+@@ -157,8 +157,8 @@ void bpf_cgroup_storage_link(struct bpf_
+ struct cgroup *cgroup,
+ enum bpf_attach_type type);
+ void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
+-int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
+-void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
++int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
++void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map);
+
+ int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
+ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
+@@ -360,9 +360,9 @@ static inline int cgroup_bpf_prog_query(
+
+ static inline void bpf_cgroup_storage_set(
+ struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
+-static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
++static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
+ struct bpf_map *map) { return 0; }
+-static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
++static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux,
+ struct bpf_map *map) {}
+ static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
+ struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2043,8 +2043,7 @@ static void bpf_free_cgroup_storage(stru
+ for_each_cgroup_storage_type(stype) {
+ if (!aux->cgroup_storage[stype])
+ continue;
+- bpf_cgroup_storage_release(aux->prog,
+- aux->cgroup_storage[stype]);
++ bpf_cgroup_storage_release(aux, aux->cgroup_storage[stype]);
+ }
+ }
+
+--- a/kernel/bpf/local_storage.c
++++ b/kernel/bpf/local_storage.c
+@@ -20,7 +20,7 @@ struct bpf_cgroup_storage_map {
+ struct bpf_map map;
+
+ spinlock_t lock;
+- struct bpf_prog *prog;
++ struct bpf_prog_aux *aux;
+ struct rb_root root;
+ struct list_head list;
+ };
+@@ -420,7 +420,7 @@ const struct bpf_map_ops cgroup_storage_
+ .map_seq_show_elem = cgroup_storage_seq_show_elem,
+ };
+
+-int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
++int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map)
+ {
+ enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
+ struct bpf_cgroup_storage_map *map = map_to_storage(_map);
+@@ -428,14 +428,14 @@ int bpf_cgroup_storage_assign(struct bpf
+
+ spin_lock_bh(&map->lock);
+
+- if (map->prog && map->prog != prog)
++ if (map->aux && map->aux != aux)
+ goto unlock;
+- if (prog->aux->cgroup_storage[stype] &&
+- prog->aux->cgroup_storage[stype] != _map)
++ if (aux->cgroup_storage[stype] &&
++ aux->cgroup_storage[stype] != _map)
+ goto unlock;
+
+- map->prog = prog;
+- prog->aux->cgroup_storage[stype] = _map;
++ map->aux = aux;
++ aux->cgroup_storage[stype] = _map;
+ ret = 0;
+ unlock:
+ spin_unlock_bh(&map->lock);
+@@ -443,16 +443,16 @@ unlock:
+ return ret;
+ }
+
+-void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map)
++void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *_map)
+ {
+ enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
+ struct bpf_cgroup_storage_map *map = map_to_storage(_map);
+
+ spin_lock_bh(&map->lock);
+- if (map->prog == prog) {
+- WARN_ON(prog->aux->cgroup_storage[stype] != _map);
+- map->prog = NULL;
+- prog->aux->cgroup_storage[stype] = NULL;
++ if (map->aux == aux) {
++ WARN_ON(aux->cgroup_storage[stype] != _map);
++ map->aux = NULL;
++ aux->cgroup_storage[stype] = NULL;
+ }
+ spin_unlock_bh(&map->lock);
+ }
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -8268,7 +8268,7 @@ static int replace_map_fd_with_map_ptr(s
+ env->used_maps[env->used_map_cnt++] = map;
+
+ if (bpf_map_is_cgroup_storage(map) &&
+- bpf_cgroup_storage_assign(env->prog, map)) {
++ bpf_cgroup_storage_assign(env->prog->aux, map)) {
+ verbose(env, "only one cgroup storage of each type is allowed\n");
+ fdput(f);
+ return -EBUSY;
diff --git a/patches.suse/bpf-Fix-missing-prog-untrack-in-release_maps.patch b/patches.suse/bpf-Fix-missing-prog-untrack-in-release_maps.patch
new file mode 100644
index 0000000000..e8fccce1fc
--- /dev/null
+++ b/patches.suse/bpf-Fix-missing-prog-untrack-in-release_maps.patch
@@ -0,0 +1,96 @@
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Mon, 16 Dec 2019 17:49:00 +0100
+Subject: bpf: Fix missing prog untrack in release_maps
+Patch-mainline: v5.5-rc3
+Git-commit: a2ea07465c8d7984cc6b8b1f0b3324f9b138094a
+References: bsc#1155518
+
+Commit da765a2f5993 ("bpf: Add poke dependency tracking for prog array
+maps") wrongly assumed that in case of prog load errors, we're cleaning
+up all program tracking via bpf_free_used_maps().
+
+However, it can happen that we're still at the point where we didn't copy
+map pointers into the prog's aux section such that env->prog->aux->used_maps
+is still zero, running into a UAF. In such case, the verifier has similar
+release_maps() helper that drops references to used maps from its env.
+
+Consolidate the release code into __bpf_free_used_maps() and call it from
+all sides to fix it.
+
+Fixes: da765a2f5993 ("bpf: Add poke dependency tracking for prog array maps")
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Yonghong Song <yhs@fb.com>
+Link: https://lore.kernel.org/bpf/1c2909484ca524ae9f55109b06f22b6213e76376.1576514756.git.daniel@iogearbox.net
+Acked-by: Gary Lin <glin@suse.com>
+---
+ include/linux/bpf.h | 2 ++
+ kernel/bpf/core.c | 14 ++++++++++----
+ kernel/bpf/verifier.c | 14 ++------------
+ 3 files changed, 14 insertions(+), 16 deletions(-)
+
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -817,6 +817,8 @@ struct bpf_prog * __must_check bpf_prog_
+ void bpf_prog_put(struct bpf_prog *prog);
+ int __bpf_prog_charge(struct user_struct *user, u32 pages);
+ void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
++void __bpf_free_used_maps(struct bpf_prog_aux *aux,
++ struct bpf_map **used_maps, u32 len);
+
+ void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
+ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2048,18 +2048,24 @@ static void bpf_free_cgroup_storage(stru
+ }
+ }
+
+-static void bpf_free_used_maps(struct bpf_prog_aux *aux)
++void __bpf_free_used_maps(struct bpf_prog_aux *aux,
++ struct bpf_map **used_maps, u32 len)
+ {
+ struct bpf_map *map;
+- int i;
++ u32 i;
+
+ bpf_free_cgroup_storage(aux);
+- for (i = 0; i < aux->used_map_cnt; i++) {
+- map = aux->used_maps[i];
++ for (i = 0; i < len; i++) {
++ map = used_maps[i];
+ if (map->ops->map_poke_untrack)
+ map->ops->map_poke_untrack(map, aux);
+ bpf_map_put(map);
+ }
++}
++
++static void bpf_free_used_maps(struct bpf_prog_aux *aux)
++{
++ __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
+ kfree(aux->used_maps);
+ }
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -8298,18 +8298,8 @@ next_insn:
+ /* drop refcnt of maps used by the rejected program */
+ static void release_maps(struct bpf_verifier_env *env)
+ {
+- enum bpf_cgroup_storage_type stype;
+- int i;
+-
+- for_each_cgroup_storage_type(stype) {
+- if (!env->prog->aux->cgroup_storage[stype])
+- continue;
+- bpf_cgroup_storage_release(env->prog,
+- env->prog->aux->cgroup_storage[stype]);
+- }
+-
+- for (i = 0; i < env->used_map_cnt; i++)
+- bpf_map_put(env->used_maps[i]);
++ __bpf_free_used_maps(env->prog->aux, env->used_maps,
++ env->used_map_cnt);
+ }
+
+ /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
diff --git a/patches.suse/bpf-Fix-record_func_key-to-perform-backtracking-on-r.patch b/patches.suse/bpf-Fix-record_func_key-to-perform-backtracking-on-r.patch
new file mode 100644
index 0000000000..ad1e665011
--- /dev/null
+++ b/patches.suse/bpf-Fix-record_func_key-to-perform-backtracking-on-r.patch
@@ -0,0 +1,184 @@
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Thu, 19 Dec 2019 22:19:50 +0100
+Subject: bpf: Fix record_func_key to perform backtracking on r3
+Patch-mainline: v5.5-rc3
+Git-commit: cc52d9140aa920d8d61c7f6de3fff5fea6692ea9
+References: bsc#1155518
+
+While testing Cilium with /unreleased/ Linus' tree under BPF-based NodePort
+implementation, I noticed a strange BPF SNAT engine behavior from time to
+time. In some cases it would do the correct SNAT/DNAT service translation,
+but at a random point in time it would just stop and perform an unexpected
+translation after SYN, SYN/ACK and stack would send a RST back. While initially
+assuming that there is some sort of a race condition in BPF code, adding
+trace_printk()s for debugging purposes at some point seemed to have resolved
+the issue auto-magically.
+
+Digging deeper on this Heisenbug and reducing the trace_printk() calls to
+an absolute minimum, it turns out that a single call would suffice to
+trigger / not trigger the seen RST issue, even though the logic of the
+program itself remains unchanged. Turns out the single call changed verifier
+pruning behavior to get everything to work. Reconstructing a minimal test
+case, the incorrect JIT dump looked as follows:
+
+ # bpftool p d j i 11346
+ 0xffffffffc0cba96c:
+ [...]
+ 21: movzbq 0x30(%rdi),%rax
+ 26: cmp $0xd,%rax
+ 2a: je 0x000000000000003a
+ 2c: xor %edx,%edx
+ 2e: movabs $0xffff89cc74e85800,%rsi
+ 38: jmp 0x0000000000000049
+ 3a: mov $0x2,%edx
+ 3f: movabs $0xffff89cc74e85800,%rsi
+ 49: mov -0x224(%rbp),%eax
+ 4f: cmp $0x20,%eax
+ 52: ja 0x0000000000000062
+ 54: add $0x1,%eax
+ 57: mov %eax,-0x224(%rbp)
+ 5d: jmpq 0xffffffffffff6911
+ 62: mov $0x1,%eax
+ [...]
+
+Hence, unexpectedly, JIT emitted a direct jump even though retpoline based
+one would have been needed since in line 2c and 3a we have different slot
+keys in BPF reg r3. Verifier log of the test case reveals what happened:
+
+ 0: (b7) r0 = 14
+ 1: (73) *(u8 *)(r1 +48) = r0
+ 2: (71) r0 = *(u8 *)(r1 +48)
+ 3: (15) if r0 == 0xd goto pc+4
+ R0_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff)) R1=ctx(id=0,off=0,imm=0) R10=fp0
+ 4: (b7) r3 = 0
+ 5: (18) r2 = 0xffff89cc74d54a00
+ 7: (05) goto pc+3
+ 11: (85) call bpf_tail_call#12
+ 12: (b7) r0 = 1
+ 13: (95) exit
+ from 3 to 8: R0_w=inv13 R1=ctx(id=0,off=0,imm=0) R10=fp0
+ 8: (b7) r3 = 2
+ 9: (18) r2 = 0xffff89cc74d54a00
+ 11: safe
+ processed 13 insns (limit 1000000) [...]
+
+Second branch is pruned by verifier since considered safe, but issue is that
+record_func_key() couldn't have seen the index in line 3a and therefore
+decided that emitting a direct jump at this location was okay.
+
+Fix this by reusing our backtracking logic for precise scalar verification
+in order to prevent pruning on the slot key. This means verifier will track
+content of r3 all the way backwards and only prune if both scalars were
+unknown in state equivalence check and therefore poisoned in the first place
+in record_func_key(). The range is [x,x] in record_func_key() case since
+the slot always would have to be constant immediate. Correct verification
+after fix:
+
+ 0: (b7) r0 = 14
+ 1: (73) *(u8 *)(r1 +48) = r0
+ 2: (71) r0 = *(u8 *)(r1 +48)
+ 3: (15) if r0 == 0xd goto pc+4
+ R0_w=invP(id=0,umax_value=255,var_off=(0x0; 0xff)) R1=ctx(id=0,off=0,imm=0) R10=fp0
+ 4: (b7) r3 = 0
+ 5: (18) r2 = 0x0
+ 7: (05) goto pc+3
+ 11: (85) call bpf_tail_call#12
+ 12: (b7) r0 = 1
+ 13: (95) exit
+ from 3 to 8: R0_w=invP13 R1=ctx(id=0,off=0,imm=0) R10=fp0
+ 8: (b7) r3 = 2
+ 9: (18) r2 = 0x0
+ 11: (85) call bpf_tail_call#12
+ 12: (b7) r0 = 1
+ 13: (95) exit
+ processed 15 insns (limit 1000000) [...]
+
+And correct corresponding JIT dump:
+
+ # bpftool p d j i 11
+ 0xffffffffc0dc34c4:
+ [...]
+ 21: movzbq 0x30(%rdi),%rax
+ 26: cmp $0xd,%rax
+ 2a: je 0x000000000000003a
+ 2c: xor %edx,%edx
+ 2e: movabs $0xffff9928b4c02200,%rsi
+ 38: jmp 0x0000000000000049
+ 3a: mov $0x2,%edx
+ 3f: movabs $0xffff9928b4c02200,%rsi
+ 49: cmp $0x4,%rdx
+ 4d: jae 0x0000000000000093
+ 4f: and $0x3,%edx
+ 52: mov %edx,%edx
+ 54: cmp %edx,0x24(%rsi)
+ 57: jbe 0x0000000000000093
+ 59: mov -0x224(%rbp),%eax
+ 5f: cmp $0x20,%eax
+ 62: ja 0x0000000000000093
+ 64: add $0x1,%eax
+ 67: mov %eax,-0x224(%rbp)
+ 6d: mov 0x110(%rsi,%rdx,8),%rax
+ 75: test %rax,%rax
+ 78: je 0x0000000000000093
+ 7a: mov 0x30(%rax),%rax
+ 7e: add $0x19,%rax
+ 82: callq 0x000000000000008e
+ 87: pause
+ 89: lfence
+ 8c: jmp 0x0000000000000087
+ 8e: mov %rax,(%rsp)
+ 92: retq
+ 93: mov $0x1,%eax
+ [...]
+
+Also explicitly adding explicit env->allow_ptr_leaks to fixup_bpf_calls() since
+backtracking is enabled under former (direct jumps as well, but use different
+test). In case of only tracking different map pointers as in c93552c443eb ("bpf:
+properly enforce index mask to prevent out-of-bounds speculation"), pruning
+cannot make such short-cuts, neither if there are paths with scalar and non-scalar
+types as r3. mark_chain_precision() is only needed after we know that
+register_is_const(). If it was not the case, we already poison the key on first
+path and non-const key in later paths are not matching the scalar range in regsafe()
+either. Cilium NodePort testing passes fine as well now. Note, released kernels
+not affected.
+
+Fixes: d2e4c1e6c294 ("bpf: Constant map key tracking for prog array pokes")
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/ac43ffdeb7386c5bd688761ed266f3722bb39823.1576789878.git.daniel@iogearbox.net
+Acked-by: Gary Lin <glin@suse.com>
+---
+ kernel/bpf/verifier.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -4134,6 +4134,7 @@ record_func_key(struct bpf_verifier_env
+ struct bpf_map *map = meta->map_ptr;
+ struct tnum range;
+ u64 val;
++ int err;
+
+ if (func_id != BPF_FUNC_tail_call)
+ return 0;
+@@ -4150,6 +4151,10 @@ record_func_key(struct bpf_verifier_env
+ return 0;
+ }
+
++ err = mark_chain_precision(env, BPF_REG_3);
++ if (err)
++ return err;
++
+ val = reg->var_off.value;
+ if (bpf_map_key_unseen(aux))
+ bpf_map_key_store(aux, val);
+@@ -9272,7 +9277,8 @@ static int fixup_bpf_calls(struct bpf_ve
+ insn->code = BPF_JMP | BPF_TAIL_CALL;
+
+ aux = &env->insn_aux_data[i + delta];
+- if (prog->jit_requested && !expect_blinding &&
++ if (env->allow_ptr_leaks && !expect_blinding &&
++ prog->jit_requested &&
+ !bpf_map_key_poisoned(aux) &&
+ !bpf_map_ptr_poisoned(aux) &&
+ !bpf_map_ptr_unpriv(aux)) {
diff --git a/patches.suse/bpf-Introduce-BPF_TRACE_x-helper-for-the-tracing-tes.patch b/patches.suse/bpf-Introduce-BPF_TRACE_x-helper-for-the-tracing-tes.patch
new file mode 100644
index 0000000000..532914c5ee
--- /dev/null
+++ b/patches.suse/bpf-Introduce-BPF_TRACE_x-helper-for-the-tracing-tes.patch
@@ -0,0 +1,541 @@
+From: Martin KaFai Lau <kafai@fb.com>
+Date: Sat, 23 Nov 2019 12:25:04 -0800
+Subject: bpf: Introduce BPF_TRACE_x helper for the tracing tests
+Patch-mainline: v5.5-rc1
+Git-commit: f9a7cf6eb17cd0110c8c47d9e7969fc2716e5772
+References: bsc#1155518
+
+For BPF_PROG_TYPE_TRACING, the bpf_prog's ctx is an array of u64.
+This patch borrows the idea from BPF_CALL_x in filter.h to
+convert a u64 to the arg type of the traced function.
+
+The new BPF_TRACE_x has an arg to specify the return type of a bpf_prog.
+It will be used in the future TCP-ops bpf_prog that may return "void".
+
+The new macros are defined in the new header file "bpf_trace_helpers.h".
+It is under selftests/bpf/ for now. It could be moved to libbpf later
+after seeing more upcoming non-tracing use cases.
+
+The tests are changed to use these new macros also. Hence,
+the k[s]u8/16/32/64 are no longer needed and they are removed
+from the bpf_helpers.h.
+
+Signed-off-by: Martin KaFai Lau <kafai@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191123202504.1502696-1-kafai@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/bpf_helpers.h | 13 ---
+ tools/testing/selftests/bpf/bpf_trace_helpers.h | 58 +++++++++++++++
+ tools/testing/selftests/bpf/progs/fentry_test.c | 72 ++++---------------
+ tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c | 27 ++-----
+ tools/testing/selftests/bpf/progs/fexit_test.c | 83 +++++-----------------
+ tools/testing/selftests/bpf/progs/kfree_skb.c | 43 +++--------
+ tools/testing/selftests/bpf/progs/test_overhead.c | 16 +---
+ 7 files changed, 125 insertions(+), 187 deletions(-)
+ create mode 100644 tools/testing/selftests/bpf/bpf_trace_helpers.h
+
+--- a/tools/lib/bpf/bpf_helpers.h
++++ b/tools/lib/bpf/bpf_helpers.h
+@@ -44,17 +44,4 @@ enum libbpf_pin_type {
+ LIBBPF_PIN_BY_NAME,
+ };
+
+-/* The following types should be used by BPF_PROG_TYPE_TRACING program to
+- * access kernel function arguments. BPF trampoline and raw tracepoints
+- * typecast arguments to 'unsigned long long'.
+- */
+-typedef int __attribute__((aligned(8))) ks32;
+-typedef char __attribute__((aligned(8))) ks8;
+-typedef short __attribute__((aligned(8))) ks16;
+-typedef long long __attribute__((aligned(8))) ks64;
+-typedef unsigned int __attribute__((aligned(8))) ku32;
+-typedef unsigned char __attribute__((aligned(8))) ku8;
+-typedef unsigned short __attribute__((aligned(8))) ku16;
+-typedef unsigned long long __attribute__((aligned(8))) ku64;
+-
+ #endif
+--- /dev/null
++++ b/tools/testing/selftests/bpf/bpf_trace_helpers.h
+@@ -0,0 +1,58 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __BPF_TRACE_HELPERS_H
++#define __BPF_TRACE_HELPERS_H
++
++#include "bpf_helpers.h"
++
++#define __BPF_MAP_0(i, m, v, ...) v
++#define __BPF_MAP_1(i, m, v, t, a, ...) m(t, a, ctx[i])
++#define __BPF_MAP_2(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_1(i+1, m, v, __VA_ARGS__)
++#define __BPF_MAP_3(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_2(i+1, m, v, __VA_ARGS__)
++#define __BPF_MAP_4(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_3(i+1, m, v, __VA_ARGS__)
++#define __BPF_MAP_5(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_4(i+1, m, v, __VA_ARGS__)
++#define __BPF_MAP_6(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_5(i+1, m, v, __VA_ARGS__)
++#define __BPF_MAP_7(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_6(i+1, m, v, __VA_ARGS__)
++#define __BPF_MAP_8(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_7(i+1, m, v, __VA_ARGS__)
++#define __BPF_MAP_9(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_8(i+1, m, v, __VA_ARGS__)
++#define __BPF_MAP_10(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_9(i+1, m, v, __VA_ARGS__)
++#define __BPF_MAP_11(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_10(i+1, m, v, __VA_ARGS__)
++#define __BPF_MAP_12(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_11(i+1, m, v, __VA_ARGS__)
++#define __BPF_MAP(n, ...) __BPF_MAP_##n(0, __VA_ARGS__)
++
++/* BPF sizeof(void *) is always 8, so no need to cast to long first
++ * for ptr to avoid compiler warning.
++ */
++#define __BPF_CAST(t, a, ctx) (t) ctx
++#define __BPF_V void
++#define __BPF_N
++
++#define __BPF_DECL_ARGS(t, a, ctx) t a
++
++#define BPF_TRACE_x(x, sec_name, fname, ret_type, ...) \
++static __always_inline ret_type \
++____##fname(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
++ \
++SEC(sec_name) \
++ret_type fname(__u64 *ctx) \
++{ \
++ return ____##fname(__BPF_MAP(x, __BPF_CAST, __BPF_N, __VA_ARGS__));\
++} \
++ \
++static __always_inline \
++ret_type ____##fname(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
++
++#define BPF_TRACE_0(sec, fname, ...) BPF_TRACE_x(0, sec, fname, int, __VA_ARGS__)
++#define BPF_TRACE_1(sec, fname, ...) BPF_TRACE_x(1, sec, fname, int, __VA_ARGS__)
++#define BPF_TRACE_2(sec, fname, ...) BPF_TRACE_x(2, sec, fname, int, __VA_ARGS__)
++#define BPF_TRACE_3(sec, fname, ...) BPF_TRACE_x(3, sec, fname, int, __VA_ARGS__)
++#define BPF_TRACE_4(sec, fname, ...) BPF_TRACE_x(4, sec, fname, int, __VA_ARGS__)
++#define BPF_TRACE_5(sec, fname, ...) BPF_TRACE_x(5, sec, fname, int, __VA_ARGS__)
++#define BPF_TRACE_6(sec, fname, ...) BPF_TRACE_x(6, sec, fname, int, __VA_ARGS__)
++#define BPF_TRACE_7(sec, fname, ...) BPF_TRACE_x(7, sec, fname, int, __VA_ARGS__)
++#define BPF_TRACE_8(sec, fname, ...) BPF_TRACE_x(8, sec, fname, int, __VA_ARGS__)
++#define BPF_TRACE_9(sec, fname, ...) BPF_TRACE_x(9, sec, fname, int, __VA_ARGS__)
++#define BPF_TRACE_10(sec, fname, ...) BPF_TRACE_x(10, sec, fname, int, __VA_ARGS__)
++#define BPF_TRACE_11(sec, fname, ...) BPF_TRACE_x(11, sec, fname, int, __VA_ARGS__)
++#define BPF_TRACE_12(sec, fname, ...) BPF_TRACE_x(12, sec, fname, int, __VA_ARGS__)
++
++#endif
+--- a/tools/testing/selftests/bpf/progs/fentry_test.c
++++ b/tools/testing/selftests/bpf/progs/fentry_test.c
+@@ -2,89 +2,53 @@
+ /* Copyright (c) 2019 Facebook */
+ #include <linux/bpf.h>
+ #include "bpf_helpers.h"
++#include "bpf_trace_helpers.h"
+
+ char _license[] SEC("license") = "GPL";
+
+-struct test1 {
+- ks32 a;
+-};
+ static volatile __u64 test1_result;
+-SEC("fentry/bpf_fentry_test1")
+-int test1(struct test1 *ctx)
++BPF_TRACE_1("fentry/bpf_fentry_test1", test1, int, a)
+ {
+- test1_result = ctx->a == 1;
++ test1_result = a == 1;
+ return 0;
+ }
+
+-struct test2 {
+- ks32 a;
+- ku64 b;
+-};
+ static volatile __u64 test2_result;
+-SEC("fentry/bpf_fentry_test2")
+-int test2(struct test2 *ctx)
++BPF_TRACE_2("fentry/bpf_fentry_test2", test2, int, a, __u64, b)
+ {
+- test2_result = ctx->a == 2 && ctx->b == 3;
++ test2_result = a == 2 && b == 3;
+ return 0;
+ }
+
+-struct test3 {
+- ks8 a;
+- ks32 b;
+- ku64 c;
+-};
+ static volatile __u64 test3_result;
+-SEC("fentry/bpf_fentry_test3")
+-int test3(struct test3 *ctx)
++BPF_TRACE_3("fentry/bpf_fentry_test3", test3, char, a, int, b, __u64, c)
+ {
+- test3_result = ctx->a == 4 && ctx->b == 5 && ctx->c == 6;
++ test3_result = a == 4 && b == 5 && c == 6;
+ return 0;
+ }
+
+-struct test4 {
+- void *a;
+- ks8 b;
+- ks32 c;
+- ku64 d;
+-};
+ static volatile __u64 test4_result;
+-SEC("fentry/bpf_fentry_test4")
+-int test4(struct test4 *ctx)
++BPF_TRACE_4("fentry/bpf_fentry_test4", test4,
++ void *, a, char, b, int, c, __u64, d)
+ {
+- test4_result = ctx->a == (void *)7 && ctx->b == 8 && ctx->c == 9 &&
+- ctx->d == 10;
++ test4_result = a == (void *)7 && b == 8 && c == 9 && d == 10;
+ return 0;
+ }
+
+-struct test5 {
+- ku64 a;
+- void *b;
+- ks16 c;
+- ks32 d;
+- ku64 e;
+-};
+ static volatile __u64 test5_result;
+-SEC("fentry/bpf_fentry_test5")
+-int test5(struct test5 *ctx)
++BPF_TRACE_5("fentry/bpf_fentry_test5", test5,
++ __u64, a, void *, b, short, c, int, d, __u64, e)
+ {
+- test5_result = ctx->a == 11 && ctx->b == (void *)12 && ctx->c == 13 &&
+- ctx->d == 14 && ctx->e == 15;
++ test5_result = a == 11 && b == (void *)12 && c == 13 && d == 14 &&
++ e == 15;
+ return 0;
+ }
+
+-struct test6 {
+- ku64 a;
+- void *b;
+- ks16 c;
+- ks32 d;
+- void *e;
+- ks64 f;
+-};
+ static volatile __u64 test6_result;
+-SEC("fentry/bpf_fentry_test6")
+-int test6(struct test6 *ctx)
++BPF_TRACE_6("fentry/bpf_fentry_test6", test6,
++ __u64, a, void *, b, short, c, int, d, void *, e, __u64, f)
+ {
+- test6_result = ctx->a == 16 && ctx->b == (void *)17 && ctx->c == 18 &&
+- ctx->d == 19 && ctx->e == (void *)20 && ctx->f == 21;
++ test6_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
++ e == (void *)20 && f == 21;
+ return 0;
+ }
+--- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
++++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
+@@ -2,46 +2,37 @@
+ /* Copyright (c) 2019 Facebook */
+ #include <linux/bpf.h>
+ #include "bpf_helpers.h"
++#include "bpf_trace_helpers.h"
+
+ struct sk_buff {
+ unsigned int len;
+ };
+
+-struct args {
+- struct sk_buff *skb;
+- ks32 ret;
+-};
+ static volatile __u64 test_result;
+-SEC("fexit/test_pkt_access")
+-int test_main(struct args *ctx)
++BPF_TRACE_2("fexit/test_pkt_access", test_main,
++ struct sk_buff *, skb, int, ret)
+ {
+- struct sk_buff *skb = ctx->skb;
+ int len;
+
+ __builtin_preserve_access_index(({
+ len = skb->len;
+ }));
+- if (len != 74 || ctx->ret != 0)
++ if (len != 74 || ret != 0)
+ return 0;
+ test_result = 1;
+ return 0;
+ }
+
+-struct args_subprog1 {
+- struct sk_buff *skb;
+- ks32 ret;
+-};
+ static volatile __u64 test_result_subprog1;
+-SEC("fexit/test_pkt_access_subprog1")
+-int test_subprog1(struct args_subprog1 *ctx)
++BPF_TRACE_2("fexit/test_pkt_access_subprog1", test_subprog1,
++ struct sk_buff *, skb, int, ret)
+ {
+- struct sk_buff *skb = ctx->skb;
+ int len;
+
+ __builtin_preserve_access_index(({
+ len = skb->len;
+ }));
+- if (len != 74 || ctx->ret != 148)
++ if (len != 74 || ret != 148)
+ return 0;
+ test_result_subprog1 = 1;
+ return 0;
+@@ -62,8 +53,8 @@ int test_subprog1(struct args_subprog1 *
+ * instead of accurate types.
+ */
+ struct args_subprog2 {
+- ku64 args[5];
+- ku64 ret;
++ __u64 args[5];
++ __u64 ret;
+ };
+ static volatile __u64 test_result_subprog2;
+ SEC("fexit/test_pkt_access_subprog2")
+--- a/tools/testing/selftests/bpf/progs/fexit_test.c
++++ b/tools/testing/selftests/bpf/progs/fexit_test.c
+@@ -2,97 +2,56 @@
+ /* Copyright (c) 2019 Facebook */
+ #include <linux/bpf.h>
+ #include "bpf_helpers.h"
++#include "bpf_trace_helpers.h"
+
+ char _license[] SEC("license") = "GPL";
+
+-struct test1 {
+- ks32 a;
+- ks32 ret;
+-};
+ static volatile __u64 test1_result;
+-SEC("fexit/bpf_fentry_test1")
+-int test1(struct test1 *ctx)
++BPF_TRACE_2("fexit/bpf_fentry_test1", test1, int, a, int, ret)
+ {
+- test1_result = ctx->a == 1 && ctx->ret == 2;
++ test1_result = a == 1 && ret == 2;
+ return 0;
+ }
+
+-struct test2 {
+- ks32 a;
+- ku64 b;
+- ks32 ret;
+-};
+ static volatile __u64 test2_result;
+-SEC("fexit/bpf_fentry_test2")
+-int test2(struct test2 *ctx)
++BPF_TRACE_3("fexit/bpf_fentry_test2", test2, int, a, __u64, b, int, ret)
+ {
+- test2_result = ctx->a == 2 && ctx->b == 3 && ctx->ret == 5;
++ test2_result = a == 2 && b == 3 && ret == 5;
+ return 0;
+ }
+
+-struct test3 {
+- ks8 a;
+- ks32 b;
+- ku64 c;
+- ks32 ret;
+-};
+ static volatile __u64 test3_result;
+-SEC("fexit/bpf_fentry_test3")
+-int test3(struct test3 *ctx)
++BPF_TRACE_4("fexit/bpf_fentry_test3", test3, char, a, int, b, __u64, c, int, ret)
+ {
+- test3_result = ctx->a == 4 && ctx->b == 5 && ctx->c == 6 &&
+- ctx->ret == 15;
++ test3_result = a == 4 && b == 5 && c == 6 && ret == 15;
+ return 0;
+ }
+
+-struct test4 {
+- void *a;
+- ks8 b;
+- ks32 c;
+- ku64 d;
+- ks32 ret;
+-};
+ static volatile __u64 test4_result;
+-SEC("fexit/bpf_fentry_test4")
+-int test4(struct test4 *ctx)
++BPF_TRACE_5("fexit/bpf_fentry_test4", test4,
++ void *, a, char, b, int, c, __u64, d, int, ret)
+ {
+- test4_result = ctx->a == (void *)7 && ctx->b == 8 && ctx->c == 9 &&
+- ctx->d == 10 && ctx->ret == 34;
++
++ test4_result = a == (void *)7 && b == 8 && c == 9 && d == 10 &&
++ ret == 34;
+ return 0;
+ }
+
+-struct test5 {
+- ku64 a;
+- void *b;
+- ks16 c;
+- ks32 d;
+- ku64 e;
+- ks32 ret;
+-};
+ static volatile __u64 test5_result;
+-SEC("fexit/bpf_fentry_test5")
+-int test5(struct test5 *ctx)
++BPF_TRACE_6("fexit/bpf_fentry_test5", test5,
++ __u64, a, void *, b, short, c, int, d, __u64, e, int, ret)
+ {
+- test5_result = ctx->a == 11 && ctx->b == (void *)12 && ctx->c == 13 &&
+- ctx->d == 14 && ctx->e == 15 && ctx->ret == 65;
++ test5_result = a == 11 && b == (void *)12 && c == 13 && d == 14 &&
++ e == 15 && ret == 65;
+ return 0;
+ }
+
+-struct test6 {
+- ku64 a;
+- void *b;
+- ks16 c;
+- ks32 d;
+- void *e;
+- ks64 f;
+- ks32 ret;
+-};
+ static volatile __u64 test6_result;
+-SEC("fexit/bpf_fentry_test6")
+-int test6(struct test6 *ctx)
++BPF_TRACE_7("fexit/bpf_fentry_test6", test6,
++ __u64, a, void *, b, short, c, int, d, void *, e, __u64, f,
++ int, ret)
+ {
+- test6_result = ctx->a == 16 && ctx->b == (void *)17 && ctx->c == 18 &&
+- ctx->d == 19 && ctx->e == (void *)20 && ctx->f == 21 &&
+- ctx->ret == 111;
++ test6_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
++ e == (void *)20 && f == 21 && ret == 111;
+ return 0;
+ }
+--- a/tools/testing/selftests/bpf/progs/kfree_skb.c
++++ b/tools/testing/selftests/bpf/progs/kfree_skb.c
+@@ -4,6 +4,7 @@
+ #include <stdbool.h>
+ #include "bpf_helpers.h"
+ #include "bpf_endian.h"
++#include "bpf_trace_helpers.h"
+
+ char _license[] SEC("license") = "GPL";
+ struct {
+@@ -47,28 +48,18 @@ struct sk_buff {
+ char cb[48];
+ };
+
+-/* copy arguments from
+- * include/trace/events/skb.h:
+- * TRACE_EVENT(kfree_skb,
+- * TP_PROTO(struct sk_buff *skb, void *location),
+- *
+- * into struct below:
+- */
+-struct trace_kfree_skb {
+- struct sk_buff *skb;
+- void *location;
+-};
+-
+ struct meta {
+ int ifindex;
+ __u32 cb32_0;
+ __u8 cb8_0;
+ };
+
+-SEC("tp_btf/kfree_skb")
+-int trace_kfree_skb(struct trace_kfree_skb *ctx)
++/* TRACE_EVENT(kfree_skb,
++ * TP_PROTO(struct sk_buff *skb, void *location),
++ */
++BPF_TRACE_2("tp_btf/kfree_skb", trace_kfree_skb,
++ struct sk_buff *, skb, void *, location)
+ {
+- struct sk_buff *skb = ctx->skb;
+ struct net_device *dev;
+ struct callback_head *ptr;
+ void *func;
+@@ -123,17 +114,10 @@ static volatile struct {
+ bool fexit_test_ok;
+ } result;
+
+-struct eth_type_trans_args {
+- struct sk_buff *skb;
+- struct net_device *dev;
+- unsigned short protocol; /* return value available to fexit progs */
+-};
+-
+-SEC("fentry/eth_type_trans")
+-int fentry_eth_type_trans(struct eth_type_trans_args *ctx)
++BPF_TRACE_3("fentry/eth_type_trans", fentry_eth_type_trans,
++ struct sk_buff *, skb, struct net_device *, dev,
++ unsigned short, protocol)
+ {
+- struct sk_buff *skb = ctx->skb;
+- struct net_device *dev = ctx->dev;
+ int len, ifindex;
+
+ __builtin_preserve_access_index(({
+@@ -148,11 +132,10 @@ int fentry_eth_type_trans(struct eth_typ
+ return 0;
+ }
+
+-SEC("fexit/eth_type_trans")
+-int fexit_eth_type_trans(struct eth_type_trans_args *ctx)
++BPF_TRACE_3("fexit/eth_type_trans", fexit_eth_type_trans,
++ struct sk_buff *, skb, struct net_device *, dev,
++ unsigned short, protocol)
+ {
+- struct sk_buff *skb = ctx->skb;
+- struct net_device *dev = ctx->dev;
+ int len, ifindex;
+
+ __builtin_preserve_access_index(({
+@@ -163,7 +146,7 @@ int fexit_eth_type_trans(struct eth_type
+ /* fexit sees packet without L2 header that eth_type_trans should have
+ * consumed.
+ */
+- if (len != 60 || ctx->protocol != bpf_htons(0x86dd) || ifindex != 1)
++ if (len != 60 || protocol != bpf_htons(0x86dd) || ifindex != 1)
+ return 0;
+ result.fexit_test_ok = true;
+ return 0;
+--- a/tools/testing/selftests/bpf/progs/test_overhead.c
++++ b/tools/testing/selftests/bpf/progs/test_overhead.c
+@@ -3,6 +3,7 @@
+ #include <linux/bpf.h>
+ #include "bpf_helpers.h"
+ #include "bpf_tracing.h"
++#include "bpf_trace_helpers.h"
+
+ SEC("kprobe/__set_task_comm")
+ int prog1(struct pt_regs *ctx)
+@@ -22,20 +23,15 @@ int prog3(struct bpf_raw_tracepoint_args
+ return 0;
+ }
+
+-struct __set_task_comm_args {
+- struct task_struct *tsk;
+- const char *buf;
+- ku8 exec;
+-};
+-
+-SEC("fentry/__set_task_comm")
+-int prog4(struct __set_task_comm_args *ctx)
++struct task_struct;
++BPF_TRACE_3("fentry/__set_task_comm", prog4,
++ struct task_struct *, tsk, const char *, buf, __u8, exec)
+ {
+ return 0;
+ }
+
+-SEC("fexit/__set_task_comm")
+-int prog5(struct __set_task_comm_args *ctx)
++BPF_TRACE_3("fexit/__set_task_comm", prog5,
++ struct task_struct *, tsk, const char *, buf, __u8, exec)
+ {
+ return 0;
+ }
diff --git a/patches.suse/bpf-clarify-when-bpf_trace_printk-discards-lines.patch b/patches.suse/bpf-clarify-when-bpf_trace_printk-discards-lines.patch
new file mode 100644
index 0000000000..fa7739416e
--- /dev/null
+++ b/patches.suse/bpf-clarify-when-bpf_trace_printk-discards-lines.patch
@@ -0,0 +1,34 @@
+From: Peter Wu <peter@lekensteyn.nl>
+Date: Wed, 21 Aug 2019 00:08:59 +0100
+Subject: bpf: clarify when bpf_trace_printk discards lines
+Patch-mainline: v5.4-rc1
+Git-commit: 55c33dfbeb831eb3ab7cc1a3e295b0d4d57f23a3
+References: bsc#1155518
+
+I opened /sys/kernel/tracing/trace once and kept reading from it.
+bpf_trace_printk somehow did not seem to work, no entries were appended
+to that trace file. It turns out that tracing is disabled when that file
+is open. Save the next person some time and document this.
+
+The trace file is described in Documentation/trace/ftrace.rst, however
+the implication "tracing is disabled" did not immediate translate to
+"bpf_trace_printk silently discards entries".
+
+Signed-off-by: Peter Wu <peter@lekensteyn.nl>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Gary Lin <glin@suse.com>
+---
+ include/uapi/linux/bpf.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -580,6 +580,8 @@ union bpf_attr {
+ * limited to five).
+ *
+ * Each time the helper is called, it appends a line to the trace.
++ * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is
++ * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this.
+ * The format of the trace is customizable, and the exact output
+ * one will get depends on the options set in
+ * *\/sys/kernel/debug/tracing/trace_options* (see also the
diff --git a/patches.suse/bpf-fix-struct-pt_reg-typo-in-documentation.patch b/patches.suse/bpf-fix-struct-pt_reg-typo-in-documentation.patch
new file mode 100644
index 0000000000..3212cddc97
--- /dev/null
+++ b/patches.suse/bpf-fix-struct-pt_reg-typo-in-documentation.patch
@@ -0,0 +1,51 @@
+From: Peter Wu <peter@lekensteyn.nl>
+Date: Wed, 21 Aug 2019 00:08:58 +0100
+Subject: bpf: fix 'struct pt_reg' typo in documentation
+Patch-mainline: v5.4-rc1
+Git-commit: 8050a395112db5bedb7caa6cb673e711a3c04cd9
+References: bsc#1155518
+
+There is no 'struct pt_reg'.
+
+Signed-off-by: Peter Wu <peter@lekensteyn.nl>
+Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Gary Lin <glin@suse.com>
+---
+ include/uapi/linux/bpf.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index 8aa6126f0b6e..267544e140be 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -1018,7 +1018,7 @@ union bpf_attr {
+ * The realm of the route for the packet associated to *skb*, or 0
+ * if none was found.
+ *
+- * int bpf_perf_event_output(struct pt_reg *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
++ * int bpf_perf_event_output(struct pt_regs *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * Description
+ * Write raw *data* blob into a special BPF perf event held by
+ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
+@@ -1080,7 +1080,7 @@ union bpf_attr {
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+- * int bpf_get_stackid(struct pt_reg *ctx, struct bpf_map *map, u64 flags)
++ * int bpf_get_stackid(struct pt_regs *ctx, struct bpf_map *map, u64 flags)
+ * Description
+ * Walk a user or a kernel stack and return its id. To achieve
+ * this, the helper needs *ctx*, which is a pointer to the context
+@@ -1729,7 +1729,7 @@ union bpf_attr {
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+- * int bpf_override_return(struct pt_reg *regs, u64 rc)
++ * int bpf_override_return(struct pt_regs *regs, u64 rc)
+ * Description
+ * Used for error injection, this helper uses kprobes to override
+ * the return value of the probed function, and to set it to *rc*.
+--
+2.24.0
+
diff --git a/patches.suse/bpf-libbpf-Add-kernel-version-section-parsing-back.patch b/patches.suse/bpf-libbpf-Add-kernel-version-section-parsing-back.patch
new file mode 100644
index 0000000000..6bd39ba526
--- /dev/null
+++ b/patches.suse/bpf-libbpf-Add-kernel-version-section-parsing-back.patch
@@ -0,0 +1,67 @@
+From: John Fastabend <john.fastabend@gmail.com>
+Date: Fri, 18 Oct 2019 07:41:26 -0700
+Subject: bpf, libbpf: Add kernel version section parsing back
+Patch-mainline: v5.5-rc1
+Git-commit: 54b8625cd940b6baace0bd9b1cf26b2de68ba307
+References: bsc#1155518
+
+With commit "libbpf: stop enforcing kern_version,..." we removed the
+kernel version section parsing in favor of querying for the kernel
+using uname() and populating the version using the result of the
+query. After this any version sections were simply ignored.
+
+Unfortunately, the world of kernels is not so friendly. I've found some
+customized kernels where uname() does not match the in kernel version.
+To fix this so programs can load in this environment this patch adds
+back parsing the section and if it exists uses the user specified
+kernel version to override the uname() result. However, keep most the
+kernel uname() discovery bits so users are not required to insert the
+version except in these odd cases.
+
+Fixes: 5e61f27070292 ("libbpf: stop enforcing kern_version, populate it for users")
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Link: https://lore.kernel.org/bpf/157140968634.9073.6407090804163937103.stgit@john-XPS-13-9370
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 21 ++++++++++++++++++++-
+ 1 file changed, 20 insertions(+), 1 deletion(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -656,6 +656,21 @@ bpf_object__init_license(struct bpf_obje
+ return 0;
+ }
+
++static int
++bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
++{
++ __u32 kver;
++
++ if (size != sizeof(kver)) {
++ pr_warning("invalid kver section in %s\n", obj->path);
++ return -LIBBPF_ERRNO__FORMAT;
++ }
++ memcpy(&kver, data, sizeof(kver));
++ obj->kern_version = kver;
++ pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
++ return 0;
++}
++
+ static int compare_bpf_map(const void *_a, const void *_b)
+ {
+ const struct bpf_map *a = _a;
+@@ -1573,7 +1588,11 @@ static int bpf_object__elf_collect(struc
+ if (err)
+ return err;
+ } else if (strcmp(name, "version") == 0) {
+- /* skip, we don't need it anymore */
++ err = bpf_object__init_kversion(obj,
++ data->d_buf,
++ data->d_size);
++ if (err)
++ return err;
+ } else if (strcmp(name, "maps") == 0) {
+ obj->efile.maps_shndx = idx;
+ } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
diff --git a/patches.suse/bpf-sync-bpf.h-to-tools-1f8919b1.patch b/patches.suse/bpf-sync-bpf.h-to-tools-1f8919b1.patch
new file mode 100644
index 0000000000..fdf5eec750
--- /dev/null
+++ b/patches.suse/bpf-sync-bpf.h-to-tools-1f8919b1.patch
@@ -0,0 +1,60 @@
+From: Peter Wu <peter@lekensteyn.nl>
+Date: Wed, 21 Aug 2019 00:09:00 +0100
+Subject: bpf: sync bpf.h to tools/
+Patch-mainline: v5.4-rc1
+Git-commit: 1f8919b170318e7e13e303eedac363d44057995f
+References: bsc#1155518
+
+Fix a 'struct pt_reg' typo and clarify when bpf_trace_printk discards
+lines. Affects documentation only.
+
+Signed-off-by: Peter Wu <peter@lekensteyn.nl>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/include/uapi/linux/bpf.h | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index 8aa6126f0b6e..b5889257cc33 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -580,6 +580,8 @@ union bpf_attr {
+ * limited to five).
+ *
+ * Each time the helper is called, it appends a line to the trace.
++ * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is
++ * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this.
+ * The format of the trace is customizable, and the exact output
+ * one will get depends on the options set in
+ * *\/sys/kernel/debug/tracing/trace_options* (see also the
+@@ -1018,7 +1020,7 @@ union bpf_attr {
+ * The realm of the route for the packet associated to *skb*, or 0
+ * if none was found.
+ *
+- * int bpf_perf_event_output(struct pt_reg *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
++ * int bpf_perf_event_output(struct pt_regs *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * Description
+ * Write raw *data* blob into a special BPF perf event held by
+ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
+@@ -1080,7 +1082,7 @@ union bpf_attr {
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+- * int bpf_get_stackid(struct pt_reg *ctx, struct bpf_map *map, u64 flags)
++ * int bpf_get_stackid(struct pt_regs *ctx, struct bpf_map *map, u64 flags)
+ * Description
+ * Walk a user or a kernel stack and return its id. To achieve
+ * this, the helper needs *ctx*, which is a pointer to the context
+@@ -1729,7 +1731,7 @@ union bpf_attr {
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+- * int bpf_override_return(struct pt_reg *regs, u64 rc)
++ * int bpf_override_return(struct pt_regs *regs, u64 rc)
+ * Description
+ * Used for error injection, this helper uses kprobes to override
+ * the return value of the probed function, and to set it to *rc*.
+--
+2.24.0
+
diff --git a/patches.suse/bpftool-Fix-bpftool-build-by-switching-to-bpf_object.patch b/patches.suse/bpftool-Fix-bpftool-build-by-switching-to-bpf_object.patch
new file mode 100644
index 0000000000..aaffbf6e12
--- /dev/null
+++ b/patches.suse/bpftool-Fix-bpftool-build-by-switching-to-bpf_object.patch
@@ -0,0 +1,131 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Mon, 7 Oct 2019 15:56:04 -0700
+Subject: bpftool: Fix bpftool build by switching to bpf_object__open_file()
+Patch-mainline: v5.5-rc1
+Git-commit: 32e3e58e4c5910bb0d6024d151c2f559bb7e973c
+References: bsc#1155518
+
+As part of libbpf in 5e61f2707029 ("libbpf: stop enforcing kern_version,
+populate it for users") non-LIBBPF_API __bpf_object__open_xattr() API
+was removed from libbpf.h header. This broke bpftool, which relied on
+that function. This patch fixes the build by switching to newly added
+bpf_object__open_file() which provides the same capabilities, but is
+official and future-proof API.
+
+v1->v2:
+- fix prog_type shadowing (Stanislav).
+
+Fixes: 5e61f2707029 ("libbpf: stop enforcing kern_version, populate it for users")
+Reported-by: Stanislav Fomichev <sdf@google.com>
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Reviewed-by: Stanislav Fomichev <sdf@google.com>
+Link: https://lore.kernel.org/bpf/20191007225604.2006146-1-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/bpf/bpftool/main.c | 4 ++--
+ tools/bpf/bpftool/main.h | 2 +-
+ tools/bpf/bpftool/prog.c | 22 ++++++++++++----------
+ 3 files changed, 15 insertions(+), 13 deletions(-)
+
+--- a/tools/bpf/bpftool/main.c
++++ b/tools/bpf/bpftool/main.c
+@@ -27,7 +27,7 @@ bool json_output;
+ bool show_pinned;
+ bool block_mount;
+ bool verifier_logs;
+-int bpf_flags;
++bool relaxed_maps;
+ struct pinned_obj_table prog_table;
+ struct pinned_obj_table map_table;
+
+@@ -396,7 +396,7 @@ int main(int argc, char **argv)
+ show_pinned = true;
+ break;
+ case 'm':
+- bpf_flags = MAPS_RELAX_COMPAT;
++ relaxed_maps = true;
+ break;
+ case 'n':
+ block_mount = true;
+--- a/tools/bpf/bpftool/main.h
++++ b/tools/bpf/bpftool/main.h
+@@ -94,7 +94,7 @@ extern bool json_output;
+ extern bool show_pinned;
+ extern bool block_mount;
+ extern bool verifier_logs;
+-extern int bpf_flags;
++extern bool relaxed_maps;
+ extern struct pinned_obj_table prog_table;
+ extern struct pinned_obj_table map_table;
+
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -1092,9 +1092,7 @@ free_data_in:
+ static int load_with_options(int argc, char **argv, bool first_prog_only)
+ {
+ struct bpf_object_load_attr load_attr = { 0 };
+- struct bpf_object_open_attr open_attr = {
+- .prog_type = BPF_PROG_TYPE_UNSPEC,
+- };
++ enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
+ enum bpf_attach_type expected_attach_type;
+ struct map_replace *map_replace = NULL;
+ struct bpf_program *prog = NULL, *pos;
+@@ -1105,11 +1103,16 @@ static int load_with_options(int argc, c
+ const char *pinfile;
+ unsigned int i, j;
+ __u32 ifindex = 0;
++ const char *file;
+ int idx, err;
+
++ LIBBPF_OPTS(bpf_object_open_opts, open_opts,
++ .relaxed_maps = relaxed_maps,
++ );
++
+ if (!REQ_ARGS(2))
+ return -1;
+- open_attr.file = GET_ARG();
++ file = GET_ARG();
+ pinfile = GET_ARG();
+
+ while (argc) {
+@@ -1118,7 +1121,7 @@ static int load_with_options(int argc, c
+
+ NEXT_ARG();
+
+- if (open_attr.prog_type != BPF_PROG_TYPE_UNSPEC) {
++ if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
+ p_err("program type already specified");
+ goto err_free_reuse_maps;
+ }
+@@ -1135,8 +1138,7 @@ static int load_with_options(int argc, c
+ strcat(type, *argv);
+ strcat(type, "/");
+
+- err = libbpf_prog_type_by_name(type,
+- &open_attr.prog_type,
++ err = libbpf_prog_type_by_name(type, &common_prog_type,
+ &expected_attach_type);
+ free(type);
+ if (err < 0)
+@@ -1224,16 +1226,16 @@ static int load_with_options(int argc, c
+
+ set_max_rlimit();
+
+- obj = __bpf_object__open_xattr(&open_attr, bpf_flags);
++ obj = bpf_object__open_file(file, &open_opts);
+ if (IS_ERR_OR_NULL(obj)) {
+ p_err("failed to open object file");
+ goto err_free_reuse_maps;
+ }
+
+ bpf_object__for_each_program(pos, obj) {
+- enum bpf_prog_type prog_type = open_attr.prog_type;
++ enum bpf_prog_type prog_type = common_prog_type;
+
+- if (open_attr.prog_type == BPF_PROG_TYPE_UNSPEC) {
++ if (prog_type == BPF_PROG_TYPE_UNSPEC) {
+ const char *sec_name = bpf_program__title(pos, false);
+
+ err = libbpf_prog_type_by_name(sec_name, &prog_type,
diff --git a/patches.suse/ext4-check-for-directory-entries-too-close-to-block-.patch b/patches.suse/ext4-check-for-directory-entries-too-close-to-block-.patch
new file mode 100644
index 0000000000..8e0396b4f9
--- /dev/null
+++ b/patches.suse/ext4-check-for-directory-entries-too-close-to-block-.patch
@@ -0,0 +1,43 @@
+From 109ba779d6cca2d519c5dd624a3276d03e21948e Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Mon, 2 Dec 2019 18:02:13 +0100
+Subject: [PATCH] ext4: check for directory entries too close to block end
+Git-commit: 109ba779d6cca2d519c5dd624a3276d03e21948e
+Patch-mainline: v5.5-rc3
+References: bsc#1157717 CVE-2019-19037
+
+ext4_check_dir_entry() currently does not catch a case when a directory
+entry ends so close to the block end that the header of the next
+directory entry would not fit in the remaining space. This can lead to
+directory iteration code trying to access address beyond end of current
+buffer head leading to oops.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20191202170213.4761-3-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Acked-by: Jan Kara <jack@suse.cz>
+
+---
+ fs/ext4/dir.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
+index 9fdd2b269d61..6305d5ec25af 100644
+--- a/fs/ext4/dir.c
++++ b/fs/ext4/dir.c
+@@ -81,6 +81,11 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
+ error_msg = "rec_len is too small for name_len";
+ else if (unlikely(((char *) de - buf) + rlen > size))
+ error_msg = "directory entry overrun";
++ else if (unlikely(((char *) de - buf) + rlen >
++ size - EXT4_DIR_REC_LEN(1) &&
++ ((char *) de - buf) + rlen != size)) {
++ error_msg = "directory entry too close to block end";
++ }
+ else if (unlikely(le32_to_cpu(de->inode) >
+ le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
+ error_msg = "inode out of bounds";
+--
+2.16.4
+
diff --git a/patches.suse/ext4-fix-ext4_empty_dir-for-directories-with-holes.patch b/patches.suse/ext4-fix-ext4_empty_dir-for-directories-with-holes.patch
new file mode 100644
index 0000000000..ec42b29e44
--- /dev/null
+++ b/patches.suse/ext4-fix-ext4_empty_dir-for-directories-with-holes.patch
@@ -0,0 +1,100 @@
+From 64d4ce892383b2ad6d782e080d25502f91bf2a38 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Mon, 2 Dec 2019 18:02:12 +0100
+Subject: [PATCH] ext4: fix ext4_empty_dir() for directories with holes
+Git-commit: 64d4ce892383b2ad6d782e080d25502f91bf2a38
+Patch-mainline: v5.5-rc3
+
+Function ext4_empty_dir() doesn't correctly handle directories with
+holes and crashes on bh->b_data dereference when bh is NULL. Reorganize
+the loop to use 'offset' variable all the times instead of comparing
+pointers to current direntry with bh->b_data pointer. Also add more
+strict checking of '.' and '..' directory entries to avoid entering loop
+in possibly invalid state on corrupted filesystems.
+
+References: CVE-2019-19037
+Cc: stable@vger.kernel.org
+Fixes: 4e19d6b65fb4 ("ext4: allow directory holes")
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20191202170213.4761-2-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Acked-by: Jan Kara <jack@suse.cz>
+
+---
+ fs/ext4/namei.c | 32 ++++++++++++++++++--------------
+ 1 file changed, 18 insertions(+), 14 deletions(-)
+
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index a856997d87b5..9e6d14748b5a 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2822,7 +2822,7 @@ bool ext4_empty_dir(struct inode *inode)
+ {
+ unsigned int offset;
+ struct buffer_head *bh;
+- struct ext4_dir_entry_2 *de, *de1;
++ struct ext4_dir_entry_2 *de;
+ struct super_block *sb;
+
+ if (ext4_has_inline_data(inode)) {
+@@ -2847,19 +2847,25 @@ bool ext4_empty_dir(struct inode *inode)
+ return true;
+
+ de = (struct ext4_dir_entry_2 *) bh->b_data;
+- de1 = ext4_next_entry(de, sb->s_blocksize);
+- if (le32_to_cpu(de->inode) != inode->i_ino ||
+- le32_to_cpu(de1->inode) == 0 ||
+- strcmp(".", de->name) || strcmp("..", de1->name)) {
+- ext4_warning_inode(inode, "directory missing '.' and/or '..'");
++ if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
++ 0) ||
++ le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) {
++ ext4_warning_inode(inode, "directory missing '.'");
++ brelse(bh);
++ return true;
++ }
++ offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
++ de = ext4_next_entry(de, sb->s_blocksize);
++ if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
++ offset) ||
++ le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) {
++ ext4_warning_inode(inode, "directory missing '..'");
+ brelse(bh);
+ return true;
+ }
+- offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) +
+- ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize);
+- de = ext4_next_entry(de1, sb->s_blocksize);
++ offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
+ while (offset < inode->i_size) {
+- if ((void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
++ if (!(offset & (sb->s_blocksize - 1))) {
+ unsigned int lblock;
+ brelse(bh);
+ lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
+@@ -2870,12 +2876,11 @@ bool ext4_empty_dir(struct inode *inode)
+ }
+ if (IS_ERR(bh))
+ return true;
+- de = (struct ext4_dir_entry_2 *) bh->b_data;
+ }
++ de = (struct ext4_dir_entry_2 *) (bh->b_data +
++ (offset & (sb->s_blocksize - 1)));
+ if (ext4_check_dir_entry(inode, NULL, de, bh,
+ bh->b_data, bh->b_size, offset)) {
+- de = (struct ext4_dir_entry_2 *)(bh->b_data +
+- sb->s_blocksize);
+ offset = (offset | (sb->s_blocksize - 1)) + 1;
+ continue;
+ }
+@@ -2884,7 +2889,6 @@ bool ext4_empty_dir(struct inode *inode)
+ return false;
+ }
+ offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
+- de = ext4_next_entry(de, sb->s_blocksize);
+ }
+ brelse(bh);
+ return true;
+--
+2.16.4
+
diff --git a/patches.suse/libbpf-Add-BPF-side-definitions-of-supported-field-r.patch b/patches.suse/libbpf-Add-BPF-side-definitions-of-supported-field-r.patch
new file mode 100644
index 0000000000..26f77d7a8a
--- /dev/null
+++ b/patches.suse/libbpf-Add-BPF-side-definitions-of-supported-field-r.patch
@@ -0,0 +1,62 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 15 Oct 2019 11:28:48 -0700
+Subject: libbpf: Add BPF-side definitions of supported field relocation kinds
+Patch-mainline: v5.5-rc1
+Git-commit: 01340e31915bc73bf33a8f912ff1b74d514b8d79
+References: bsc#1155518
+
+Add enum definition for Clang's __builtin_preserve_field_info()
+second argument (info_kind). Currently only byte offset and existence
+are supported. Corresponding Clang changes introducing this built-in can
+be found at [0]
+
+ [0] https://reviews.llvm.org/D67980
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191015182849.3922287-5-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/bpf_core_read.h | 24 +++++++++++++++++++++++-
+ 1 file changed, 23 insertions(+), 1 deletion(-)
+
+--- a/tools/lib/bpf/bpf_core_read.h
++++ b/tools/lib/bpf/bpf_core_read.h
+@@ -3,6 +3,28 @@
+ #define __BPF_CORE_READ_H__
+
+ /*
++ * enum bpf_field_info_kind is passed as a second argument into
++ * __builtin_preserve_field_info() built-in to get a specific aspect of
++ * a field, captured as a first argument. __builtin_preserve_field_info(field,
++ * info_kind) returns __u32 integer and produces BTF field relocation, which
++ * is understood and processed by libbpf during BPF object loading. See
++ * selftests/bpf for examples.
++ */
++enum bpf_field_info_kind {
++ BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
++ BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
++};
++
++/*
++ * Convenience macro to check that field actually exists in target kernel's.
++ * Returns:
++ * 1, if matching field is present in target kernel;
++ * 0, if no matching field found.
++ */
++#define bpf_core_field_exists(field) \
++ __builtin_preserve_field_info(field, BPF_FIELD_EXISTS)
++
++/*
+ * bpf_core_read() abstracts away bpf_probe_read() call and captures offset
+ * relocation for source address using __builtin_preserve_access_index()
+ * built-in, provided by Clang.
+@@ -12,7 +34,7 @@
+ * a relocation, which records BTF type ID describing root struct/union and an
+ * accessor string which describes exact embedded field that was used to take
+ * an address. See detailed description of this relocation format and
+- * semantics in comments to struct bpf_offset_reloc in libbpf_internal.h.
++ * semantics in comments to struct bpf_field_reloc in libbpf_internal.h.
+ *
+ * This relocation allows libbpf to adjust BPF instruction to use correct
+ * actual field offset, based on target kernel BTF type that matches original
diff --git a/patches.suse/libbpf-Add-BPF_CORE_READ-BPF_CORE_READ_INTO-helpers.patch b/patches.suse/libbpf-Add-BPF_CORE_READ-BPF_CORE_READ_INTO-helpers.patch
new file mode 100644
index 0000000000..c398746b73
--- /dev/null
+++ b/patches.suse/libbpf-Add-BPF_CORE_READ-BPF_CORE_READ_INTO-helpers.patch
@@ -0,0 +1,362 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 8 Oct 2019 10:59:41 -0700
+Subject: libbpf: Add BPF_CORE_READ/BPF_CORE_READ_INTO helpers
+Patch-mainline: v5.5-rc1
+Git-commit: 7db3822ab99157e16c41caa5e7d788834d5a3c7c
+References: bsc#1155518
+
+Add few macros simplifying BCC-like multi-level probe reads, while also
+emitting CO-RE relocations for each read.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20191008175942.1769476-7-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/Makefile | 3
+ tools/lib/bpf/bpf_core_read.h | 167 ++++++++++
+ tools/lib/bpf/bpf_helpers.h | 32 -
+ tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c | 1
+ tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c | 1
+ tools/testing/selftests/bpf/progs/test_core_reloc_ints.c | 1
+ tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c | 1
+ tools/testing/selftests/bpf/progs/test_core_reloc_misc.c | 1
+ tools/testing/selftests/bpf/progs/test_core_reloc_mods.c | 1
+ tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c | 1
+ tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c | 1
+ tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c | 1
+ 12 files changed, 187 insertions(+), 24 deletions(-)
+ create mode 100644 tools/lib/bpf/bpf_core_read.h
+
+--- a/tools/lib/bpf/Makefile
++++ b/tools/lib/bpf/Makefile
+@@ -259,7 +259,8 @@ install_headers: bpf_helper_defs.h
+ $(call do_install,bpf_helpers.h,$(prefix)/include/bpf,644); \
+ $(call do_install,bpf_helper_defs.h,$(prefix)/include/bpf,644); \
+ $(call do_install,bpf_tracing.h,$(prefix)/include/bpf,644); \
+- $(call do_install,bpf_endian.h,$(prefix)/include/bpf,644);
++ $(call do_install,bpf_endian.h,$(prefix)/include/bpf,644); \
++ $(call do_install,bpf_core_read.h,$(prefix)/include/bpf,644);
+
+ install_pkgconfig: $(PC_FILE)
+ $(call QUIET_INSTALL, $(PC_FILE)) \
+--- /dev/null
++++ b/tools/lib/bpf/bpf_core_read.h
+@@ -0,0 +1,167 @@
++/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
++#ifndef __BPF_CORE_READ_H__
++#define __BPF_CORE_READ_H__
++
++/*
++ * bpf_core_read() abstracts away bpf_probe_read() call and captures offset
++ * relocation for source address using __builtin_preserve_access_index()
++ * built-in, provided by Clang.
++ *
++ * __builtin_preserve_access_index() takes as an argument an expression of
++ * taking an address of a field within struct/union. It makes compiler emit
++ * a relocation, which records BTF type ID describing root struct/union and an
++ * accessor string which describes exact embedded field that was used to take
++ * an address. See detailed description of this relocation format and
++ * semantics in comments to struct bpf_offset_reloc in libbpf_internal.h.
++ *
++ * This relocation allows libbpf to adjust BPF instruction to use correct
++ * actual field offset, based on target kernel BTF type that matches original
++ * (local) BTF, used to record relocation.
++ */
++#define bpf_core_read(dst, sz, src) \
++ bpf_probe_read(dst, sz, \
++ (const void *)__builtin_preserve_access_index(src))
++
++/*
++ * bpf_core_read_str() is a thin wrapper around bpf_probe_read_str()
++ * additionally emitting BPF CO-RE field relocation for specified source
++ * argument.
++ */
++#define bpf_core_read_str(dst, sz, src) \
++ bpf_probe_read_str(dst, sz, \
++ (const void *)__builtin_preserve_access_index(src))
++
++#define ___concat(a, b) a ## b
++#define ___apply(fn, n) ___concat(fn, n)
++#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N
++
++/*
++ * return number of provided arguments; used for switch-based variadic macro
++ * definitions (see ___last, ___arrow, etc below)
++ */
++#define ___narg(...) ___nth(_, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
++/*
++ * return 0 if no arguments are passed, N - otherwise; used for
++ * recursively-defined macros to specify termination (0) case, and generic
++ * (N) case (e.g., ___read_ptrs, ___core_read)
++ */
++#define ___empty(...) ___nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
++
++#define ___last1(x) x
++#define ___last2(a, x) x
++#define ___last3(a, b, x) x
++#define ___last4(a, b, c, x) x
++#define ___last5(a, b, c, d, x) x
++#define ___last6(a, b, c, d, e, x) x
++#define ___last7(a, b, c, d, e, f, x) x
++#define ___last8(a, b, c, d, e, f, g, x) x
++#define ___last9(a, b, c, d, e, f, g, h, x) x
++#define ___last10(a, b, c, d, e, f, g, h, i, x) x
++#define ___last(...) ___apply(___last, ___narg(__VA_ARGS__))(__VA_ARGS__)
++
++#define ___nolast2(a, _) a
++#define ___nolast3(a, b, _) a, b
++#define ___nolast4(a, b, c, _) a, b, c
++#define ___nolast5(a, b, c, d, _) a, b, c, d
++#define ___nolast6(a, b, c, d, e, _) a, b, c, d, e
++#define ___nolast7(a, b, c, d, e, f, _) a, b, c, d, e, f
++#define ___nolast8(a, b, c, d, e, f, g, _) a, b, c, d, e, f, g
++#define ___nolast9(a, b, c, d, e, f, g, h, _) a, b, c, d, e, f, g, h
++#define ___nolast10(a, b, c, d, e, f, g, h, i, _) a, b, c, d, e, f, g, h, i
++#define ___nolast(...) ___apply(___nolast, ___narg(__VA_ARGS__))(__VA_ARGS__)
++
++#define ___arrow1(a) a
++#define ___arrow2(a, b) a->b
++#define ___arrow3(a, b, c) a->b->c
++#define ___arrow4(a, b, c, d) a->b->c->d
++#define ___arrow5(a, b, c, d, e) a->b->c->d->e
++#define ___arrow6(a, b, c, d, e, f) a->b->c->d->e->f
++#define ___arrow7(a, b, c, d, e, f, g) a->b->c->d->e->f->g
++#define ___arrow8(a, b, c, d, e, f, g, h) a->b->c->d->e->f->g->h
++#define ___arrow9(a, b, c, d, e, f, g, h, i) a->b->c->d->e->f->g->h->i
++#define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j
++#define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__)
++
++#define ___type(...) typeof(___arrow(__VA_ARGS__))
++
++#define ___read(read_fn, dst, src_type, src, accessor) \
++ read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor)
++
++/* "recursively" read a sequence of inner pointers using local __t var */
++#define ___rd_last(...) \
++ ___read(bpf_core_read, &__t, \
++ ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__));
++#define ___rd_p0(src) const void *__t = src;
++#define ___rd_p1(...) ___rd_p0(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
++#define ___rd_p2(...) ___rd_p1(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
++#define ___rd_p3(...) ___rd_p2(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
++#define ___rd_p4(...) ___rd_p3(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
++#define ___rd_p5(...) ___rd_p4(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
++#define ___rd_p6(...) ___rd_p5(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
++#define ___rd_p7(...) ___rd_p6(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
++#define ___rd_p8(...) ___rd_p7(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
++#define ___rd_p9(...) ___rd_p8(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
++#define ___read_ptrs(src, ...) \
++ ___apply(___rd_p, ___narg(__VA_ARGS__))(src, __VA_ARGS__)
++
++#define ___core_read0(fn, dst, src, a) \
++ ___read(fn, dst, ___type(src), src, a);
++#define ___core_readN(fn, dst, src, ...) \
++ ___read_ptrs(src, ___nolast(__VA_ARGS__)) \
++ ___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t, \
++ ___last(__VA_ARGS__));
++#define ___core_read(fn, dst, src, a, ...) \
++ ___apply(___core_read, ___empty(__VA_ARGS__))(fn, dst, \
++ src, a, ##__VA_ARGS__)
++
++/*
++ * BPF_CORE_READ_INTO() is a more performance-conscious variant of
++ * BPF_CORE_READ(), in which final field is read into user-provided storage.
++ * See BPF_CORE_READ() below for more details on general usage.
++ */
++#define BPF_CORE_READ_INTO(dst, src, a, ...) \
++ ({ \
++ ___core_read(bpf_core_read, dst, src, a, ##__VA_ARGS__) \
++ })
++
++/*
++ * BPF_CORE_READ_STR_INTO() does same "pointer chasing" as
++ * BPF_CORE_READ() for intermediate pointers, but then executes (and returns
++ * corresponding error code) bpf_core_read_str() for final string read.
++ */
++#define BPF_CORE_READ_STR_INTO(dst, src, a, ...) \
++ ({ \
++ ___core_read(bpf_core_read_str, dst, src, a, ##__VA_ARGS__) \
++ })
++
++/*
++ * BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially
++ * when there are few pointer chasing steps.
++ * E.g., what in non-BPF world (or in BPF w/ BCC) would be something like:
++ * int x = s->a.b.c->d.e->f->g;
++ * can be succinctly achieved using BPF_CORE_READ as:
++ * int x = BPF_CORE_READ(s, a.b.c, d.e, f, g);
++ *
++ * BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF
++ * CO-RE relocatable bpf_probe_read() wrapper) calls, logically equivalent to:
++ * 1. const void *__t = s->a.b.c;
++ * 2. __t = __t->d.e;
++ * 3. __t = __t->f;
++ * 4. return __t->g;
++ *
++ * Equivalence is logical, because there is a heavy type casting/preservation
++ * involved, as well as all the reads are happening through bpf_probe_read()
++ * calls using __builtin_preserve_access_index() to emit CO-RE relocations.
++ *
++ * N.B. Only up to 9 "field accessors" are supported, which should be more
++ * than enough for any practical purpose.
++ */
++#define BPF_CORE_READ(src, a, ...) \
++ ({ \
++ ___type(src, a, ##__VA_ARGS__) __r; \
++ BPF_CORE_READ_INTO(&__r, src, a, ##__VA_ARGS__); \
++ __r; \
++ })
++
++#endif
++
+--- a/tools/lib/bpf/bpf_helpers.h
++++ b/tools/lib/bpf/bpf_helpers.h
+@@ -7,7 +7,7 @@
+ #define __uint(name, val) int (*name)[val]
+ #define __type(name, val) typeof(val) *name
+
+-/* helper macro to print out debug messages */
++/* Helper macro to print out debug messages */
+ #define bpf_printk(fmt, ...) \
+ ({ \
+ char ____fmt[] = fmt; \
+@@ -15,13 +15,19 @@
+ ##__VA_ARGS__); \
+ })
+
+-/* helper macro to place programs, maps, license in
++/*
++ * Helper macro to place programs, maps, license in
+ * different sections in elf_bpf file. Section names
+ * are interpreted by elf_bpf loader
+ */
+ #define SEC(NAME) __attribute__((section(NAME), used))
+
+-/* a helper structure used by eBPF C program
++#ifndef __always_inline
++#define __always_inline __attribute__((always_inline))
++#endif
++
++/*
++ * Helper structure used by eBPF C program
+ * to describe BPF map attributes to libbpf loader
+ */
+ struct bpf_map_def {
+@@ -32,24 +38,4 @@ struct bpf_map_def {
+ unsigned int map_flags;
+ };
+
+-/*
+- * bpf_core_read() abstracts away bpf_probe_read() call and captures offset
+- * relocation for source address using __builtin_preserve_access_index()
+- * built-in, provided by Clang.
+- *
+- * __builtin_preserve_access_index() takes as an argument an expression of
+- * taking an address of a field within struct/union. It makes compiler emit
+- * a relocation, which records BTF type ID describing root struct/union and an
+- * accessor string which describes exact embedded field that was used to take
+- * an address. See detailed description of this relocation format and
+- * semantics in comments to struct bpf_offset_reloc in libbpf_internal.h.
+- *
+- * This relocation allows libbpf to adjust BPF instruction to use correct
+- * actual field offset, based on target kernel BTF type that matches original
+- * (local) BTF, used to record relocation.
+- */
+-#define bpf_core_read(dst, sz, src) \
+- bpf_probe_read(dst, sz, \
+- (const void *)__builtin_preserve_access_index(src))
+-
+ #endif
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
+@@ -4,6 +4,7 @@
+ #include <linux/bpf.h>
+ #include <stdint.h>
+ #include "bpf_helpers.h"
++#include "bpf_core_read.h"
+
+ char _license[] SEC("license") = "GPL";
+
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c
+@@ -4,6 +4,7 @@
+ #include <linux/bpf.h>
+ #include <stdint.h>
+ #include "bpf_helpers.h"
++#include "bpf_core_read.h"
+
+ char _license[] SEC("license") = "GPL";
+
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_ints.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_ints.c
+@@ -4,6 +4,7 @@
+ #include <linux/bpf.h>
+ #include <stdint.h>
+ #include "bpf_helpers.h"
++#include "bpf_core_read.h"
+
+ char _license[] SEC("license") = "GPL";
+
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
+@@ -4,6 +4,7 @@
+ #include <linux/bpf.h>
+ #include <stdint.h>
+ #include "bpf_helpers.h"
++#include "bpf_core_read.h"
+
+ char _license[] SEC("license") = "GPL";
+
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_misc.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_misc.c
+@@ -4,6 +4,7 @@
+ #include <linux/bpf.h>
+ #include <stdint.h>
+ #include "bpf_helpers.h"
++#include "bpf_core_read.h"
+
+ char _license[] SEC("license") = "GPL";
+
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c
+@@ -4,6 +4,7 @@
+ #include <linux/bpf.h>
+ #include <stdint.h>
+ #include "bpf_helpers.h"
++#include "bpf_core_read.h"
+
+ char _license[] SEC("license") = "GPL";
+
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c
+@@ -4,6 +4,7 @@
+ #include <linux/bpf.h>
+ #include <stdint.h>
+ #include "bpf_helpers.h"
++#include "bpf_core_read.h"
+
+ char _license[] SEC("license") = "GPL";
+
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c
+@@ -4,6 +4,7 @@
+ #include <linux/bpf.h>
+ #include <stdint.h>
+ #include "bpf_helpers.h"
++#include "bpf_core_read.h"
+
+ char _license[] SEC("license") = "GPL";
+
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c
+@@ -4,6 +4,7 @@
+ #include <linux/bpf.h>
+ #include <stdint.h>
+ #include "bpf_helpers.h"
++#include "bpf_core_read.h"
+
+ char _license[] SEC("license") = "GPL";
+
diff --git a/patches.suse/libbpf-Add-auto-pinning-of-maps-when-loading-BPF-obj.patch b/patches.suse/libbpf-Add-auto-pinning-of-maps-when-loading-BPF-obj.patch
new file mode 100644
index 0000000000..5baa378aa3
--- /dev/null
+++ b/patches.suse/libbpf-Add-auto-pinning-of-maps-when-loading-BPF-obj.patch
@@ -0,0 +1,329 @@
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Sat, 2 Nov 2019 12:09:41 +0100
+Subject: libbpf: Add auto-pinning of maps when loading BPF objects
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.5-rc1
+Git-commit: 57a00f41644f20b11c12a27061d814655f633544
+References: bsc#1155518
+
+This adds support to libbpf for setting map pinning information as part of
+the BTF map declaration, to get automatic map pinning (and reuse) on load.
+The pinning type currently only supports a single PIN_BY_NAME mode, where
+each map will be pinned by its name in a path that can be overridden, but
+defaults to /sys/fs/bpf.
+
+Since auto-pinning only does something if any maps actually have a
+'pinning' BTF attribute set, we default the new option to enabled, on the
+assumption that seamless pinning is what most callers want.
+
+When a map has a pin_path set at load time, libbpf will compare the map
+pinned at that location (if any), and if the attributes match, will re-use
+that map instead of creating a new one. If no existing map is found, the
+newly created map will instead be pinned at the location.
+
+Programs wanting to customise the pinning can override the pinning paths
+using bpf_map__set_pin_path() before calling bpf_object__load() (including
+setting it to NULL to disable pinning of a particular map).
+
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Link: https://lore.kernel.org/bpf/157269298092.394725.3966306029218559681.stgit@toke.dk
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/bpf_helpers.h | 6 +
+ tools/lib/bpf/libbpf.c | 146 +++++++++++++++++++++++++++++++++++++++++---
+ tools/lib/bpf/libbpf.h | 13 +++
+ 3 files changed, 156 insertions(+), 9 deletions(-)
+
+--- a/tools/lib/bpf/bpf_helpers.h
++++ b/tools/lib/bpf/bpf_helpers.h
+@@ -38,4 +38,10 @@ struct bpf_map_def {
+ unsigned int map_flags;
+ };
+
++enum libbpf_pin_type {
++ LIBBPF_PIN_NONE,
++ /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
++ LIBBPF_PIN_BY_NAME,
++};
++
+ #endif
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -1093,10 +1093,32 @@ static bool get_map_field_int(const char
+ return true;
+ }
+
++static int build_map_pin_path(struct bpf_map *map, const char *path)
++{
++ char buf[PATH_MAX];
++ int err, len;
++
++ if (!path)
++ path = "/sys/fs/bpf";
++
++ len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
++ if (len < 0)
++ return -EINVAL;
++ else if (len >= PATH_MAX)
++ return -ENAMETOOLONG;
++
++ err = bpf_map__set_pin_path(map, buf);
++ if (err)
++ return err;
++
++ return 0;
++}
++
+ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
+ const struct btf_type *sec,
+ int var_idx, int sec_idx,
+- const Elf_Data *data, bool strict)
++ const Elf_Data *data, bool strict,
++ const char *pin_root_path)
+ {
+ const struct btf_type *var, *def, *t;
+ const struct btf_var_secinfo *vi;
+@@ -1272,6 +1294,30 @@ static int bpf_object__init_user_btf_map
+ }
+ map->def.value_size = sz;
+ map->btf_value_type_id = t->type;
++ } else if (strcmp(name, "pinning") == 0) {
++ __u32 val;
++ int err;
++
++ if (!get_map_field_int(map_name, obj->btf, def, m,
++ &val))
++ return -EINVAL;
++ pr_debug("map '%s': found pinning = %u.\n",
++ map_name, val);
++
++ if (val != LIBBPF_PIN_NONE &&
++ val != LIBBPF_PIN_BY_NAME) {
++ pr_warning("map '%s': invalid pinning value %u.\n",
++ map_name, val);
++ return -EINVAL;
++ }
++ if (val == LIBBPF_PIN_BY_NAME) {
++ err = build_map_pin_path(map, pin_root_path);
++ if (err) {
++ pr_warning("map '%s': couldn't build pin path.\n",
++ map_name);
++ return err;
++ }
++ }
+ } else {
+ if (strict) {
+ pr_warning("map '%s': unknown field '%s'.\n",
+@@ -1291,7 +1337,8 @@ static int bpf_object__init_user_btf_map
+ return 0;
+ }
+
+-static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict)
++static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
++ const char *pin_root_path)
+ {
+ const struct btf_type *sec = NULL;
+ int nr_types, i, vlen, err;
+@@ -1333,7 +1380,7 @@ static int bpf_object__init_user_btf_map
+ for (i = 0; i < vlen; i++) {
+ err = bpf_object__init_user_btf_map(obj, sec, i,
+ obj->efile.btf_maps_shndx,
+- data, strict);
++ data, strict, pin_root_path);
+ if (err)
+ return err;
+ }
+@@ -1341,7 +1388,8 @@ static int bpf_object__init_user_btf_map
+ return 0;
+ }
+
+-static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps)
++static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps,
++ const char *pin_root_path)
+ {
+ bool strict = !relaxed_maps;
+ int err;
+@@ -1350,7 +1398,7 @@ static int bpf_object__init_maps(struct
+ if (err)
+ return err;
+
+- err = bpf_object__init_user_btf_maps(obj, strict);
++ err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
+ if (err)
+ return err;
+
+@@ -1540,7 +1588,8 @@ static int bpf_object__sanitize_and_load
+ return 0;
+ }
+
+-static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps)
++static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps,
++ const char *pin_root_path)
+ {
+ Elf *elf = obj->efile.elf;
+ GElf_Ehdr *ep = &obj->efile.ehdr;
+@@ -1675,7 +1724,7 @@ static int bpf_object__elf_collect(struc
+ }
+ err = bpf_object__init_btf(obj, btf_data, btf_ext_data);
+ if (!err)
+- err = bpf_object__init_maps(obj, relaxed_maps);
++ err = bpf_object__init_maps(obj, relaxed_maps, pin_root_path);
+ if (!err)
+ err = bpf_object__sanitize_and_load_btf(obj);
+ if (!err)
+@@ -2131,6 +2180,66 @@ bpf_object__probe_caps(struct bpf_object
+ return 0;
+ }
+
++static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
++{
++ struct bpf_map_info map_info = {};
++ char msg[STRERR_BUFSIZE];
++ __u32 map_info_len;
++
++ map_info_len = sizeof(map_info);
++
++ if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
++ pr_warning("failed to get map info for map FD %d: %s\n",
++ map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
++ return false;
++ }
++
++ return (map_info.type == map->def.type &&
++ map_info.key_size == map->def.key_size &&
++ map_info.value_size == map->def.value_size &&
++ map_info.max_entries == map->def.max_entries &&
++ map_info.map_flags == map->def.map_flags);
++}
++
++static int
++bpf_object__reuse_map(struct bpf_map *map)
++{
++ char *cp, errmsg[STRERR_BUFSIZE];
++ int err, pin_fd;
++
++ pin_fd = bpf_obj_get(map->pin_path);
++ if (pin_fd < 0) {
++ err = -errno;
++ if (err == -ENOENT) {
++ pr_debug("found no pinned map to reuse at '%s'\n",
++ map->pin_path);
++ return 0;
++ }
++
++ cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
++ pr_warning("couldn't retrieve pinned map '%s': %s\n",
++ map->pin_path, cp);
++ return err;
++ }
++
++ if (!map_is_reuse_compat(map, pin_fd)) {
++ pr_warning("couldn't reuse pinned map at '%s': parameter mismatch\n",
++ map->pin_path);
++ close(pin_fd);
++ return -EINVAL;
++ }
++
++ err = bpf_map__reuse_fd(map, pin_fd);
++ if (err) {
++ close(pin_fd);
++ return err;
++ }
++ map->pinned = true;
++ pr_debug("reused pinned map at '%s'\n", map->pin_path);
++
++ return 0;
++}
++
+ static int
+ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
+ {
+@@ -2173,6 +2282,15 @@ bpf_object__create_maps(struct bpf_objec
+ char *cp, errmsg[STRERR_BUFSIZE];
+ int *pfd = &map->fd;
+
++ if (map->pin_path) {
++ err = bpf_object__reuse_map(map);
++ if (err) {
++ pr_warning("error reusing pinned map %s\n",
++ map->name);
++ return err;
++ }
++ }
++
+ if (map->fd >= 0) {
+ pr_debug("skip map create (preset) %s: fd=%d\n",
+ map->name, map->fd);
+@@ -2251,6 +2369,15 @@ err_out:
+ }
+ }
+
++ if (map->pin_path && !map->pinned) {
++ err = bpf_map__pin(map, NULL);
++ if (err) {
++ pr_warning("failed to auto-pin map name '%s' at '%s'\n",
++ map->name, map->pin_path);
++ return err;
++ }
++ }
++
+ pr_debug("created map %s: fd=%d\n", map->name, *pfd);
+ }
+
+@@ -3628,6 +3755,7 @@ static struct bpf_object *
+ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
+ struct bpf_object_open_opts *opts)
+ {
++ const char *pin_root_path;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ const char *obj_name;
+@@ -3662,11 +3790,13 @@ __bpf_object__open(const char *path, con
+
+ obj->relaxed_core_relocs = OPTS_GET(opts, relaxed_core_relocs, false);
+ relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
++ pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
+
+ CHECK_ERR(bpf_object__elf_init(obj), err, out);
+ CHECK_ERR(bpf_object__check_endianness(obj), err, out);
+ CHECK_ERR(bpf_object__probe_caps(obj), err, out);
+- CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps), err, out);
++ CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps, pin_root_path),
++ err, out);
+ CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
+ bpf_object__elf_finish(obj);
+
+--- a/tools/lib/bpf/libbpf.h
++++ b/tools/lib/bpf/libbpf.h
+@@ -103,8 +103,13 @@ struct bpf_object_open_opts {
+ bool relaxed_maps;
+ /* process CO-RE relocations non-strictly, allowing them to fail */
+ bool relaxed_core_relocs;
++ /* maps that set the 'pinning' attribute in their definition will have
++ * their pin_path attribute set to a file in this directory, and be
++ * auto-pinned to that path on load; defaults to "/sys/fs/bpf".
++ */
++ const char *pin_root_path;
+ };
+-#define bpf_object_open_opts__last_field relaxed_core_relocs
++#define bpf_object_open_opts__last_field pin_root_path
+
+ LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
+ LIBBPF_API struct bpf_object *
+@@ -125,6 +130,12 @@ int bpf_object__section_size(const struc
+ int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
+ __u32 *off);
+
++enum libbpf_pin_type {
++ LIBBPF_PIN_NONE,
++ /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
++ LIBBPF_PIN_BY_NAME,
++};
++
+ /* pin_maps and unpin_maps can both be called with a NULL path, in which case
+ * they will use the pin_path attribute of each map (and ignore all maps that
+ * don't have a pin_path set).
diff --git a/patches.suse/libbpf-Add-bpf_program__get_-type-expected_attach_ty.patch b/patches.suse/libbpf-Add-bpf_program__get_-type-expected_attach_ty.patch
new file mode 100644
index 0000000000..fa83d111db
--- /dev/null
+++ b/patches.suse/libbpf-Add-bpf_program__get_-type-expected_attach_ty.patch
@@ -0,0 +1,73 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Sun, 20 Oct 2019 20:38:57 -0700
+Subject: libbpf: Add bpf_program__get_{type, expected_attach_type) APIs
+Patch-mainline: v5.5-rc1
+Git-commit: f1eead9e3ceef67b98be4b55ed1bfcfa4497b7db
+References: bsc#1155518
+
+There are bpf_program__set_type() and
+bpf_program__set_expected_attach_type(), but no corresponding getters,
+which seems rather incomplete. Fix this.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20191021033902.3856966-3-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 11 +++++++++++
+ tools/lib/bpf/libbpf.h | 5 +++++
+ tools/lib/bpf/libbpf.map | 2 ++
+ 3 files changed, 18 insertions(+)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -4468,6 +4468,11 @@ int bpf_program__nth_fd(const struct bpf
+ return fd;
+ }
+
++enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
++{
++ return prog->type;
++}
++
+ void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
+ {
+ prog->type = type;
+@@ -4502,6 +4507,12 @@ BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PR
+ BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
+ BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
+
++enum bpf_attach_type
++bpf_program__get_expected_attach_type(struct bpf_program *prog)
++{
++ return prog->expected_attach_type;
++}
++
+ void bpf_program__set_expected_attach_type(struct bpf_program *prog,
+ enum bpf_attach_type type)
+ {
+--- a/tools/lib/bpf/libbpf.h
++++ b/tools/lib/bpf/libbpf.h
+@@ -302,8 +302,13 @@ LIBBPF_API int bpf_program__set_sched_cl
+ LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
+ LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
+ LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog);
++
++LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog);
+ LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
+ enum bpf_prog_type type);
++
++LIBBPF_API enum bpf_attach_type
++bpf_program__get_expected_attach_type(struct bpf_program *prog);
+ LIBBPF_API void
+ bpf_program__set_expected_attach_type(struct bpf_program *prog,
+ enum bpf_attach_type type);
+--- a/tools/lib/bpf/libbpf.map
++++ b/tools/lib/bpf/libbpf.map
+@@ -195,4 +195,6 @@ LIBBPF_0.0.6 {
+ global:
+ bpf_object__open_file;
+ bpf_object__open_mem;
++ bpf_program__get_expected_attach_type;
++ bpf_program__get_type;
+ } LIBBPF_0.0.5;
diff --git a/patches.suse/libbpf-Add-cscope-and-tags-targets-to-Makefile.patch b/patches.suse/libbpf-Add-cscope-and-tags-targets-to-Makefile.patch
new file mode 100644
index 0000000000..616ced719b
--- /dev/null
+++ b/patches.suse/libbpf-Add-cscope-and-tags-targets-to-Makefile.patch
@@ -0,0 +1,69 @@
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Fri, 4 Oct 2019 17:34:44 +0200
+Subject: libbpf: Add cscope and tags targets to Makefile
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.5-rc1
+Git-commit: a9eb048d5615152dc4b8aedb7e704a4e59bc2205
+References: bsc#1155518
+
+Using cscope and/or TAGS files for navigating the source code is useful.
+Add simple targets to the Makefile to generate the index files for both
+tools.
+
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Tested-by: Andrii Nakryiko <andriin@fb.com>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Link: https://lore.kernel.org/bpf/20191004153444.1711278-1-toke@redhat.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/.gitignore | 3 +++
+ tools/lib/bpf/Makefile | 12 +++++++++++-
+ 2 files changed, 14 insertions(+), 1 deletion(-)
+
+--- a/tools/lib/bpf/.gitignore
++++ b/tools/lib/bpf/.gitignore
+@@ -3,3 +3,6 @@ libbpf.pc
+ FEATURE-DUMP.libbpf
+ test_libbpf
+ libbpf.so.*
++TAGS
++tags
++cscope.*
+--- a/tools/lib/bpf/Makefile
++++ b/tools/lib/bpf/Makefile
+@@ -139,6 +139,8 @@ LIB_TARGET := $(addprefix $(OUTPUT),$(LI
+ LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
+ PC_FILE := $(addprefix $(OUTPUT),$(PC_FILE))
+
++TAGS_PROG := $(if $(shell which etags 2>/dev/null),etags,ctags)
++
+ GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \
+ cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \
+ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}' | \
+@@ -271,7 +273,7 @@ clean:
+
+
+
+-PHONY += force elfdep bpfdep
++PHONY += force elfdep bpfdep cscope tags
+ force:
+
+ elfdep:
+@@ -280,6 +282,14 @@ elfdep:
+ bpfdep:
+ @if [ "$(feature-bpf)" != "1" ]; then echo "BPF API too old"; exit 1 ; fi
+
++cscope:
++ ls *.c *.h > cscope.files
++ cscope -b -q -I $(srctree)/include -f cscope.out
++
++tags:
++ rm -f TAGS tags
++ ls *.c *.h | xargs $(TAGS_PROG) -a
++
+ # Declare the contents of the .PHONY variable as phony. We keep that
+ # information in a variable so we can use it in if_changed and friends.
+ .PHONY: $(PHONY)
diff --git a/patches.suse/libbpf-Add-getter-for-program-size.patch b/patches.suse/libbpf-Add-getter-for-program-size.patch
new file mode 100644
index 0000000000..4f5701673a
--- /dev/null
+++ b/patches.suse/libbpf-Add-getter-for-program-size.patch
@@ -0,0 +1,61 @@
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Sat, 9 Nov 2019 21:37:32 +0100
+Subject: libbpf: Add getter for program size
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.5-rc1
+Git-commit: 1a734efe06948c17122808f74f0c8cc550c10cf5
+References: bsc#1155518
+
+This adds a new getter for the BPF program size (in bytes). This is useful
+for a caller that is trying to predict how much memory will be locked by
+loading a BPF object into the kernel.
+
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Acked-by: David S. Miller <davem@davemloft.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/157333185272.88376.10996937115395724683.stgit@toke.dk
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 5 +++++
+ tools/lib/bpf/libbpf.h | 3 +++
+ tools/lib/bpf/libbpf.map | 1 +
+ 3 files changed, 9 insertions(+)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -4670,6 +4670,11 @@ int bpf_program__fd(const struct bpf_pro
+ return bpf_program__nth_fd(prog, 0);
+ }
+
++size_t bpf_program__size(const struct bpf_program *prog)
++{
++ return prog->insns_cnt * sizeof(struct bpf_insn);
++}
++
+ int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
+ bpf_program_prep_t prep)
+ {
+--- a/tools/lib/bpf/libbpf.h
++++ b/tools/lib/bpf/libbpf.h
+@@ -214,6 +214,9 @@ LIBBPF_API void bpf_program__set_ifindex
+ LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog,
+ bool needs_copy);
+
++/* returns program size in bytes */
++LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog);
++
+ LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license,
+ __u32 kern_version);
+ LIBBPF_API int bpf_program__fd(const struct bpf_program *prog);
+--- a/tools/lib/bpf/libbpf.map
++++ b/tools/lib/bpf/libbpf.map
+@@ -202,4 +202,5 @@ LIBBPF_0.0.6 {
+ bpf_program__get_type;
+ bpf_program__is_tracing;
+ bpf_program__set_tracing;
++ bpf_program__size;
+ } LIBBPF_0.0.5;
diff --git a/patches.suse/libbpf-Add-support-for-attaching-BPF-programs-to-oth.patch b/patches.suse/libbpf-Add-support-for-attaching-BPF-programs-to-oth.patch
new file mode 100644
index 0000000000..adcb1acd5b
--- /dev/null
+++ b/patches.suse/libbpf-Add-support-for-attaching-BPF-programs-to-oth.patch
@@ -0,0 +1,224 @@
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Thu, 14 Nov 2019 10:57:18 -0800
+Subject: libbpf: Add support for attaching BPF programs to other BPF programs
+Patch-mainline: v5.5-rc1
+Git-commit: e7bf94dbb882b7d679a6a18e40e4f28076eb249f
+References: bsc#1155518
+
+Extend libbpf api to pass attach_prog_fd into bpf_object__open.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20191114185720.1641606-19-ast@kernel.org
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/include/uapi/linux/bpf.h | 1
+ tools/lib/bpf/bpf.c | 8 ++--
+ tools/lib/bpf/bpf.h | 5 ++
+ tools/lib/bpf/libbpf.c | 71 ++++++++++++++++++++++++++++++++++-------
+ tools/lib/bpf/libbpf.h | 3 +
+ 5 files changed, 71 insertions(+), 17 deletions(-)
+
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -425,6 +425,7 @@ union bpf_attr {
+ __aligned_u64 line_info; /* line info */
+ __u32 line_info_cnt; /* number of bpf_line_info records */
+ __u32 attach_btf_id; /* in-kernel BTF type id to attach to */
++ __u32 attach_prog_fd; /* 0 to attach to vmlinux */
+ };
+
+ struct { /* anonymous struct used by BPF_OBJ_* commands */
+--- a/tools/lib/bpf/bpf.c
++++ b/tools/lib/bpf/bpf.c
+@@ -228,10 +228,13 @@ int bpf_load_program_xattr(const struct
+ memset(&attr, 0, sizeof(attr));
+ attr.prog_type = load_attr->prog_type;
+ attr.expected_attach_type = load_attr->expected_attach_type;
+- if (attr.prog_type == BPF_PROG_TYPE_TRACING)
++ if (attr.prog_type == BPF_PROG_TYPE_TRACING) {
+ attr.attach_btf_id = load_attr->attach_btf_id;
+- else
++ attr.attach_prog_fd = load_attr->attach_prog_fd;
++ } else {
+ attr.prog_ifindex = load_attr->prog_ifindex;
++ attr.kern_version = load_attr->kern_version;
++ }
+ attr.insn_cnt = (__u32)load_attr->insns_cnt;
+ attr.insns = ptr_to_u64(load_attr->insns);
+ attr.license = ptr_to_u64(load_attr->license);
+@@ -245,7 +248,6 @@ int bpf_load_program_xattr(const struct
+ attr.log_size = 0;
+ }
+
+- attr.kern_version = load_attr->kern_version;
+ attr.prog_btf_fd = load_attr->prog_btf_fd;
+ attr.func_info_rec_size = load_attr->func_info_rec_size;
+ attr.func_info_cnt = load_attr->func_info_cnt;
+--- a/tools/lib/bpf/bpf.h
++++ b/tools/lib/bpf/bpf.h
+@@ -77,7 +77,10 @@ struct bpf_load_program_attr {
+ const struct bpf_insn *insns;
+ size_t insns_cnt;
+ const char *license;
+- __u32 kern_version;
++ union {
++ __u32 kern_version;
++ __u32 attach_prog_fd;
++ };
+ union {
+ __u32 prog_ifindex;
+ __u32 attach_btf_id;
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -189,6 +189,7 @@ struct bpf_program {
+
+ enum bpf_attach_type expected_attach_type;
+ __u32 attach_btf_id;
++ __u32 attach_prog_fd;
+ void *func_info;
+ __u32 func_info_rec_size;
+ __u32 func_info_cnt;
+@@ -3570,8 +3571,13 @@ load_program(struct bpf_program *prog, s
+ load_attr.insns = insns;
+ load_attr.insns_cnt = insns_cnt;
+ load_attr.license = license;
+- load_attr.kern_version = kern_version;
+- load_attr.prog_ifindex = prog->prog_ifindex;
++ if (prog->type == BPF_PROG_TYPE_TRACING) {
++ load_attr.attach_prog_fd = prog->attach_prog_fd;
++ load_attr.attach_btf_id = prog->attach_btf_id;
++ } else {
++ load_attr.kern_version = kern_version;
++ load_attr.prog_ifindex = prog->prog_ifindex;
++ }
+ /* if .BTF.ext was loaded, kernel supports associated BTF for prog */
+ if (prog->obj->btf_ext)
+ btf_fd = bpf_object__btf_fd(prog->obj);
+@@ -3586,7 +3592,6 @@ load_program(struct bpf_program *prog, s
+ load_attr.line_info_cnt = prog->line_info_cnt;
+ load_attr.log_level = prog->log_level;
+ load_attr.prog_flags = prog->prog_flags;
+- load_attr.attach_btf_id = prog->attach_btf_id;
+
+ retry_load:
+ log_buf = malloc(log_buf_size);
+@@ -3749,9 +3754,9 @@ bpf_object__load_progs(struct bpf_object
+ return 0;
+ }
+
+-static int libbpf_attach_btf_id_by_name(const char *name,
+- enum bpf_attach_type attach_type);
+-
++static int libbpf_find_attach_btf_id(const char *name,
++ enum bpf_attach_type attach_type,
++ __u32 attach_prog_fd);
+ static struct bpf_object *
+ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
+ struct bpf_object_open_opts *opts)
+@@ -3762,6 +3767,7 @@ __bpf_object__open(const char *path, con
+ const char *obj_name;
+ char tmp_name[64];
+ bool relaxed_maps;
++ __u32 attach_prog_fd;
+ int err;
+
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+@@ -3792,6 +3798,7 @@ __bpf_object__open(const char *path, con
+ obj->relaxed_core_relocs = OPTS_GET(opts, relaxed_core_relocs, false);
+ relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
+ pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
++ attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
+
+ CHECK_ERR(bpf_object__elf_init(obj), err, out);
+ CHECK_ERR(bpf_object__check_endianness(obj), err, out);
+@@ -3816,11 +3823,13 @@ __bpf_object__open(const char *path, con
+ bpf_program__set_type(prog, prog_type);
+ bpf_program__set_expected_attach_type(prog, attach_type);
+ if (prog_type == BPF_PROG_TYPE_TRACING) {
+- err = libbpf_attach_btf_id_by_name(prog->section_name,
+- attach_type);
++ err = libbpf_find_attach_btf_id(prog->section_name,
++ attach_type,
++ attach_prog_fd);
+ if (err <= 0)
+ goto out;
+ prog->attach_btf_id = err;
++ prog->attach_prog_fd = attach_prog_fd;
+ }
+ }
+
+@@ -4974,8 +4983,42 @@ int libbpf_find_vmlinux_btf_id(const cha
+ return err;
+ }
+
+-static int libbpf_attach_btf_id_by_name(const char *name,
+- enum bpf_attach_type attach_type)
++static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
++{
++ struct bpf_prog_info_linear *info_linear;
++ struct bpf_prog_info *info;
++ struct btf *btf = NULL;
++ int err = -EINVAL;
++
++ info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
++ if (IS_ERR_OR_NULL(info_linear)) {
++ pr_warning("failed get_prog_info_linear for FD %d\n",
++ attach_prog_fd);
++ return -EINVAL;
++ }
++ info = &info_linear->info;
++ if (!info->btf_id) {
++ pr_warning("The target program doesn't have BTF\n");
++ goto out;
++ }
++ if (btf__get_from_id(info->btf_id, &btf)) {
++ pr_warning("Failed to get BTF of the program\n");
++ goto out;
++ }
++ err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
++ btf__free(btf);
++ if (err <= 0) {
++ pr_warning("%s is not found in prog's BTF\n", name);
++ goto out;
++ }
++out:
++ free(info_linear);
++ return err;
++}
++
++static int libbpf_find_attach_btf_id(const char *name,
++ enum bpf_attach_type attach_type,
++ __u32 attach_prog_fd)
+ {
+ int i, err;
+
+@@ -4987,8 +5030,12 @@ static int libbpf_attach_btf_id_by_name(
+ continue;
+ if (strncmp(name, section_names[i].sec, section_names[i].len))
+ continue;
+- err = libbpf_find_vmlinux_btf_id(name + section_names[i].len,
+- attach_type);
++ if (attach_prog_fd)
++ err = libbpf_find_prog_btf_id(name + section_names[i].len,
++ attach_prog_fd);
++ else
++ err = libbpf_find_vmlinux_btf_id(name + section_names[i].len,
++ attach_type);
+ if (err <= 0)
+ pr_warning("%s is not found in vmlinux BTF\n", name);
+ return err;
+--- a/tools/lib/bpf/libbpf.h
++++ b/tools/lib/bpf/libbpf.h
+@@ -108,8 +108,9 @@ struct bpf_object_open_opts {
+ * auto-pinned to that path on load; defaults to "/sys/fs/bpf".
+ */
+ const char *pin_root_path;
++ __u32 attach_prog_fd;
+ };
+-#define bpf_object_open_opts__last_field pin_root_path
++#define bpf_object_open_opts__last_field attach_prog_fd
+
+ LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
+ LIBBPF_API struct bpf_object *
diff --git a/patches.suse/libbpf-Add-support-for-field-existance-CO-RE-relocat.patch b/patches.suse/libbpf-Add-support-for-field-existance-CO-RE-relocat.patch
new file mode 100644
index 0000000000..4891b227ca
--- /dev/null
+++ b/patches.suse/libbpf-Add-support-for-field-existance-CO-RE-relocat.patch
@@ -0,0 +1,176 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 15 Oct 2019 11:28:47 -0700
+Subject: libbpf: Add support for field existance CO-RE relocation
+Patch-mainline: v5.5-rc1
+Git-commit: 62561eb442bd095f06534ce637b116b278e5e912
+References: bsc#1155518
+
+Add support for BPF_FRK_EXISTS relocation kind to detect existence of
+captured field in a destination BTF, allowing conditional logic to
+handle incompatible differences between kernels.
+
+Also introduce opt-in relaxed CO-RE relocation handling option, which
+makes libbpf emit warning for failed relocations, but proceed with other
+relocations. Instruction, for which relocation failed, is patched with
+(u32)-1 value.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191015182849.3922287-4-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 74 ++++++++++++++++++++++++++++++++++++++-----------
+ tools/lib/bpf/libbpf.h | 4 +-
+ 2 files changed, 61 insertions(+), 17 deletions(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -249,6 +249,7 @@ struct bpf_object {
+
+ bool loaded;
+ bool has_pseudo_calls;
++ bool relaxed_core_relocs;
+
+ /*
+ * Information when doing elf related work. Only valid if fd
+@@ -2771,26 +2772,54 @@ static int bpf_core_spec_match(struct bp
+
+ /*
+ * Patch relocatable BPF instruction.
+- * Expected insn->imm value is provided for validation, as well as the new
+- * relocated value.
++ *
++ * Patched value is determined by relocation kind and target specification.
++ * For field existence relocation target spec will be NULL if field is not
++ * found.
++ * Expected insn->imm value is determined using relocation kind and local
++ * spec, and is checked before patching instruction. If actual insn->imm value
++ * is wrong, bail out with error.
+ *
+ * Currently three kinds of BPF instructions are supported:
+ * 1. rX = <imm> (assignment with immediate operand);
+ * 2. rX += <imm> (arithmetic operations with immediate operand);
+- * 3. *(rX) = <imm> (indirect memory assignment with immediate operand).
+- *
+- * If actual insn->imm value is wrong, bail out.
+ */
+-static int bpf_core_reloc_insn(struct bpf_program *prog, int insn_off,
+- __u32 orig_off, __u32 new_off)
++static int bpf_core_reloc_insn(struct bpf_program *prog,
++ const struct bpf_field_reloc *relo,
++ const struct bpf_core_spec *local_spec,
++ const struct bpf_core_spec *targ_spec)
+ {
++ __u32 orig_val, new_val;
+ struct bpf_insn *insn;
+ int insn_idx;
+ __u8 class;
+
+- if (insn_off % sizeof(struct bpf_insn))
++ if (relo->insn_off % sizeof(struct bpf_insn))
++ return -EINVAL;
++ insn_idx = relo->insn_off / sizeof(struct bpf_insn);
++
++ switch (relo->kind) {
++ case BPF_FIELD_BYTE_OFFSET:
++ orig_val = local_spec->offset;
++ if (targ_spec) {
++ new_val = targ_spec->offset;
++ } else {
++ pr_warning("prog '%s': patching insn #%d w/ failed reloc, imm %d -> %d\n",
++ bpf_program__title(prog, false), insn_idx,
++ orig_val, -1);
++ new_val = (__u32)-1;
++ }
++ break;
++ case BPF_FIELD_EXISTS:
++ orig_val = 1; /* can't generate EXISTS relo w/o local field */
++ new_val = targ_spec ? 1 : 0;
++ break;
++ default:
++ pr_warning("prog '%s': unknown relo %d at insn #%d'\n",
++ bpf_program__title(prog, false),
++ relo->kind, insn_idx);
+ return -EINVAL;
+- insn_idx = insn_off / sizeof(struct bpf_insn);
++ }
+
+ insn = &prog->insns[insn_idx];
+ class = BPF_CLASS(insn->code);
+@@ -2798,12 +2827,12 @@ static int bpf_core_reloc_insn(struct bp
+ if (class == BPF_ALU || class == BPF_ALU64) {
+ if (BPF_SRC(insn->code) != BPF_K)
+ return -EINVAL;
+- if (insn->imm != orig_off)
++ if (insn->imm != orig_val)
+ return -EINVAL;
+- insn->imm = new_off;
++ insn->imm = new_val;
+ pr_debug("prog '%s': patched insn #%d (ALU/ALU64) imm %d -> %d\n",
+ bpf_program__title(prog, false),
+- insn_idx, orig_off, new_off);
++ insn_idx, orig_val, new_val);
+ } else {
+ pr_warning("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
+ bpf_program__title(prog, false),
+@@ -2811,6 +2840,7 @@ static int bpf_core_reloc_insn(struct bp
+ insn->off, insn->imm);
+ return -EINVAL;
+ }
++
+ return 0;
+ }
+
+@@ -3087,15 +3117,26 @@ static int bpf_core_reloc_field(struct b
+ cand_ids->data[j++] = cand_spec.spec[0].type_id;
+ }
+
+- cand_ids->len = j;
+- if (cand_ids->len == 0) {
++ /*
++ * For BPF_FIELD_EXISTS relo or when relaxed CO-RE reloc mode is
++ * requested, it's expected that we might not find any candidates.
++ * In this case, if field wasn't found in any candidate, the list of
++ * candidates shouldn't change at all, we'll just handle relocating
++ * appropriately, depending on relo's kind.
++ */
++ if (j > 0)
++ cand_ids->len = j;
++
++ if (j == 0 && !prog->obj->relaxed_core_relocs &&
++ relo->kind != BPF_FIELD_EXISTS) {
+ pr_warning("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
+ prog_name, relo_idx, local_id, local_name, spec_str);
+ return -ESRCH;
+ }
+
+- err = bpf_core_reloc_insn(prog, relo->insn_off,
+- local_spec.offset, targ_spec.offset);
++ /* bpf_core_reloc_insn should know how to handle missing targ_spec */
++ err = bpf_core_reloc_insn(prog, relo, &local_spec,
++ j ? &targ_spec : NULL);
+ if (err) {
+ pr_warning("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
+ prog_name, relo_idx, relo->insn_off, err);
+@@ -3587,6 +3628,7 @@ __bpf_object__open(const char *path, con
+ if (IS_ERR(obj))
+ return obj;
+
++ obj->relaxed_core_relocs = OPTS_GET(opts, relaxed_core_relocs, false);
+ relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
+
+ CHECK_ERR(bpf_object__elf_init(obj), err, out);
+--- a/tools/lib/bpf/libbpf.h
++++ b/tools/lib/bpf/libbpf.h
+@@ -96,8 +96,10 @@ struct bpf_object_open_opts {
+ const char *object_name;
+ /* parse map definitions non-strictly, allowing extra attributes/data */
+ bool relaxed_maps;
++ /* process CO-RE relocations non-strictly, allowing them to fail */
++ bool relaxed_core_relocs;
+ };
+-#define bpf_object_open_opts__last_field relaxed_maps
++#define bpf_object_open_opts__last_field relaxed_core_relocs
+
+ LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
+ LIBBPF_API struct bpf_object *
diff --git a/patches.suse/libbpf-Add-support-for-field-size-relocations.patch b/patches.suse/libbpf-Add-support-for-field-size-relocations.patch
new file mode 100644
index 0000000000..60b175d34a
--- /dev/null
+++ b/patches.suse/libbpf-Add-support-for-field-size-relocations.patch
@@ -0,0 +1,116 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Fri, 1 Nov 2019 15:28:08 -0700
+Subject: libbpf: Add support for field size relocations
+Patch-mainline: v5.5-rc1
+Git-commit: 94f060e98495e26fd02e18c14e78c61f0d643fd6
+References: bsc#1155518
+
+Add bpf_core_field_size() macro, capturing a relocation against field size.
+Adjust bits of internal libbpf relocation logic to allow capturing size
+relocations of various field types: arrays, structs/unions, enums, etc.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20191101222810.1246166-4-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/bpf_core_read.h | 7 +++++++
+ tools/lib/bpf/libbpf.c | 40 ++++++++++++++++++++++++++++++++--------
+ 2 files changed, 39 insertions(+), 8 deletions(-)
+
+--- a/tools/lib/bpf/bpf_core_read.h
++++ b/tools/lib/bpf/bpf_core_read.h
+@@ -97,6 +97,13 @@ enum bpf_field_info_kind {
+ __builtin_preserve_field_info(field, BPF_FIELD_EXISTS)
+
+ /*
++ * Convenience macro to get byte size of a field. Works for integers,
++ * struct/unions, pointers, arrays, and enums.
++ */
++#define bpf_core_field_size(field) \
++ __builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE)
++
++/*
+ * bpf_core_read() abstracts away bpf_probe_read() call and captures offset
+ * relocation for source address using __builtin_preserve_access_index()
+ * built-in, provided by Clang.
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -2706,8 +2706,10 @@ err_out:
+ /* Check two types for compatibility, skipping const/volatile/restrict and
+ * typedefs, to ensure we are relocating compatible entities:
+ * - any two STRUCTs/UNIONs are compatible and can be mixed;
+- * - any two FWDs are compatible;
++ * - any two FWDs are compatible, if their names match (modulo flavor suffix);
+ * - any two PTRs are always compatible;
++ * - for ENUMs, names should be the same (ignoring flavor suffix) or at
++ * least one of enums should be anonymous;
+ * - for ENUMs, check sizes, names are ignored;
+ * - for INT, size and signedness are ignored;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+@@ -2735,11 +2737,23 @@ recur:
+ return 0;
+
+ switch (btf_kind(local_type)) {
+- case BTF_KIND_FWD:
+ case BTF_KIND_PTR:
+ return 1;
+- case BTF_KIND_ENUM:
+- return local_type->size == targ_type->size;
++ case BTF_KIND_FWD:
++ case BTF_KIND_ENUM: {
++ const char *local_name, *targ_name;
++ size_t local_len, targ_len;
++
++ local_name = btf__name_by_offset(local_btf,
++ local_type->name_off);
++ targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
++ local_len = bpf_core_essential_name_len(local_name);
++ targ_len = bpf_core_essential_name_len(targ_name);
++ /* one of them is anonymous or both w/ same flavor-less names */
++ return local_len == 0 || targ_len == 0 ||
++ (local_len == targ_len &&
++ strncmp(local_name, targ_name, local_len) == 0);
++ }
+ case BTF_KIND_INT:
+ /* just reject deprecated bitfield-like integers; all other
+ * integers are by default compatible between each other
+@@ -2928,16 +2942,23 @@ static int bpf_core_calc_field_relo(cons
+ const struct btf_member *m;
+ const struct btf_type *mt;
+ bool bitfield;
++ __s64 sz;
+
+ /* a[n] accessor needs special handling */
+ if (!acc->name) {
+- if (relo->kind != BPF_FIELD_BYTE_OFFSET) {
+- pr_warning("prog '%s': relo %d at insn #%d can't be applied to array access'\n",
++ if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
++ *val = spec->bit_offset / 8;
++ } else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
++ sz = btf__resolve_size(spec->btf, acc->type_id);
++ if (sz < 0)
++ return -EINVAL;
++ *val = sz;
++ } else {
++ pr_warning("prog '%s': relo %d at insn #%d can't be applied to array access\n",
+ bpf_program__title(prog, false),
+ relo->kind, relo->insn_off / 8);
+ return -EINVAL;
+ }
+- *val = spec->bit_offset / 8;
+ if (validate)
+ *validate = true;
+ return 0;
+@@ -2965,7 +2986,10 @@ static int bpf_core_calc_field_relo(cons
+ byte_off = bit_off / 8 / byte_sz * byte_sz;
+ }
+ } else {
+- byte_sz = mt->size;
++ sz = btf__resolve_size(spec->btf, m->type);
++ if (sz < 0)
++ return -EINVAL;
++ byte_sz = sz;
+ byte_off = spec->bit_offset / 8;
+ bit_sz = byte_sz * 8;
+ }
diff --git a/patches.suse/libbpf-Add-support-for-prog_tracing.patch b/patches.suse/libbpf-Add-support-for-prog_tracing.patch
new file mode 100644
index 0000000000..e1abe3c239
--- /dev/null
+++ b/patches.suse/libbpf-Add-support-for-prog_tracing.patch
@@ -0,0 +1,270 @@
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Wed, 30 Oct 2019 15:32:12 -0700
+Subject: libbpf: Add support for prog_tracing
+Patch-mainline: v5.5-rc1
+Git-commit: 12a8654b2e5aab37b22c9608d008f9f0565862c0
+References: bsc#1155518
+
+Cleanup libbpf from expected_attach_type == attach_btf_id hack
+and introduce BPF_PROG_TYPE_TRACING.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Link: https://lore.kernel.org/bpf/20191030223212.953010-3-ast@kernel.org
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/include/uapi/linux/bpf.h | 2 +
+ tools/lib/bpf/bpf.c | 8 ++--
+ tools/lib/bpf/bpf.h | 5 ++
+ tools/lib/bpf/libbpf.c | 79 +++++++++++++++++++++++++++++------------
+ tools/lib/bpf/libbpf.h | 2 +
+ tools/lib/bpf/libbpf.map | 2 +
+ tools/lib/bpf/libbpf_probes.c | 1
+ 7 files changed, 71 insertions(+), 28 deletions(-)
+
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -173,6 +173,7 @@ enum bpf_prog_type {
+ BPF_PROG_TYPE_CGROUP_SYSCTL,
+ BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
+ BPF_PROG_TYPE_CGROUP_SOCKOPT,
++ BPF_PROG_TYPE_TRACING,
+ };
+
+ enum bpf_attach_type {
+@@ -199,6 +200,7 @@ enum bpf_attach_type {
+ BPF_CGROUP_UDP6_RECVMSG,
+ BPF_CGROUP_GETSOCKOPT,
+ BPF_CGROUP_SETSOCKOPT,
++ BPF_TRACE_RAW_TP,
+ __MAX_BPF_ATTACH_TYPE
+ };
+
+--- a/tools/lib/bpf/bpf.c
++++ b/tools/lib/bpf/bpf.c
+@@ -228,9 +228,10 @@ int bpf_load_program_xattr(const struct
+ memset(&attr, 0, sizeof(attr));
+ attr.prog_type = load_attr->prog_type;
+ attr.expected_attach_type = load_attr->expected_attach_type;
+- if (attr.prog_type == BPF_PROG_TYPE_RAW_TRACEPOINT)
+- /* expected_attach_type is ignored for tracing progs */
+- attr.attach_btf_id = attr.expected_attach_type;
++ if (attr.prog_type == BPF_PROG_TYPE_TRACING)
++ attr.attach_btf_id = load_attr->attach_btf_id;
++ else
++ attr.prog_ifindex = load_attr->prog_ifindex;
+ attr.insn_cnt = (__u32)load_attr->insns_cnt;
+ attr.insns = ptr_to_u64(load_attr->insns);
+ attr.license = ptr_to_u64(load_attr->license);
+@@ -245,7 +246,6 @@ int bpf_load_program_xattr(const struct
+ }
+
+ attr.kern_version = load_attr->kern_version;
+- attr.prog_ifindex = load_attr->prog_ifindex;
+ attr.prog_btf_fd = load_attr->prog_btf_fd;
+ attr.func_info_rec_size = load_attr->func_info_rec_size;
+ attr.func_info_cnt = load_attr->func_info_cnt;
+--- a/tools/lib/bpf/bpf.h
++++ b/tools/lib/bpf/bpf.h
+@@ -78,7 +78,10 @@ struct bpf_load_program_attr {
+ size_t insns_cnt;
+ const char *license;
+ __u32 kern_version;
+- __u32 prog_ifindex;
++ union {
++ __u32 prog_ifindex;
++ __u32 attach_btf_id;
++ };
+ __u32 prog_btf_fd;
+ __u32 func_info_rec_size;
+ const void *func_info;
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -188,6 +188,7 @@ struct bpf_program {
+ bpf_program_clear_priv_t clear_priv;
+
+ enum bpf_attach_type expected_attach_type;
++ __u32 attach_btf_id;
+ void *func_info;
+ __u32 func_info_rec_size;
+ __u32 func_info_cnt;
+@@ -3450,6 +3451,7 @@ load_program(struct bpf_program *prog, s
+ load_attr.line_info_cnt = prog->line_info_cnt;
+ load_attr.log_level = prog->log_level;
+ load_attr.prog_flags = prog->prog_flags;
++ load_attr.attach_btf_id = prog->attach_btf_id;
+
+ retry_load:
+ log_buf = malloc(log_buf_size);
+@@ -3612,6 +3614,8 @@ bpf_object__load_progs(struct bpf_object
+ return 0;
+ }
+
++static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id);
++
+ static struct bpf_object *
+ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
+ struct bpf_object_open_opts *opts)
+@@ -3661,6 +3665,7 @@ __bpf_object__open(const char *path, con
+ bpf_object__for_each_program(prog, obj) {
+ enum bpf_prog_type prog_type;
+ enum bpf_attach_type attach_type;
++ __u32 btf_id;
+
+ err = libbpf_prog_type_by_name(prog->section_name, &prog_type,
+ &attach_type);
+@@ -3672,6 +3677,12 @@ __bpf_object__open(const char *path, con
+
+ bpf_program__set_type(prog, prog_type);
+ bpf_program__set_expected_attach_type(prog, attach_type);
++ if (prog_type == BPF_PROG_TYPE_TRACING) {
++ err = libbpf_attach_btf_id_by_name(prog->section_name, &btf_id);
++ if (err)
++ goto out;
++ prog->attach_btf_id = btf_id;
++ }
+ }
+
+ return obj;
+@@ -4523,6 +4534,7 @@ BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_T
+ BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
+ BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
+ BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
++BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
+
+ enum bpf_attach_type
+ bpf_program__get_expected_attach_type(struct bpf_program *prog)
+@@ -4551,7 +4563,8 @@ void bpf_program__set_expected_attach_ty
+ BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, 0, eatype)
+
+ /* Programs that use BTF to identify attach point */
+-#define BPF_PROG_BTF(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 1, 0)
++#define BPF_PROG_BTF(string, ptype, eatype) \
++ BPF_PROG_SEC_IMPL(string, ptype, eatype, 0, 1, 0)
+
+ /* Programs that can be attached but attach type can't be identified by section
+ * name. Kept for backward compatibility.
+@@ -4578,7 +4591,8 @@ static const struct {
+ BPF_PROG_SEC("tp/", BPF_PROG_TYPE_TRACEPOINT),
+ BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
+ BPF_PROG_SEC("raw_tp/", BPF_PROG_TYPE_RAW_TRACEPOINT),
+- BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_RAW_TRACEPOINT),
++ BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_TRACING,
++ BPF_TRACE_RAW_TP),
+ BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
+ BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
+ BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
+@@ -4683,27 +4697,6 @@ int libbpf_prog_type_by_name(const char
+ continue;
+ *prog_type = section_names[i].prog_type;
+ *expected_attach_type = section_names[i].expected_attach_type;
+- if (section_names[i].is_attach_btf) {
+- struct btf *btf = bpf_core_find_kernel_btf();
+- char raw_tp_btf_name[128] = "btf_trace_";
+- char *dst = raw_tp_btf_name + sizeof("btf_trace_") - 1;
+- int ret;
+-
+- if (IS_ERR(btf)) {
+- pr_warning("vmlinux BTF is not found\n");
+- return -EINVAL;
+- }
+- /* prepend "btf_trace_" prefix per kernel convention */
+- strncat(dst, name + section_names[i].len,
+- sizeof(raw_tp_btf_name) - sizeof("btf_trace_"));
+- ret = btf__find_by_name(btf, raw_tp_btf_name);
+- btf__free(btf);
+- if (ret <= 0) {
+- pr_warning("%s is not found in vmlinux BTF\n", dst);
+- return -EINVAL;
+- }
+- *expected_attach_type = ret;
+- }
+ return 0;
+ }
+ pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
+@@ -4716,6 +4709,46 @@ int libbpf_prog_type_by_name(const char
+ return -ESRCH;
+ }
+
++#define BTF_PREFIX "btf_trace_"
++static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id)
++{
++ struct btf *btf = bpf_core_find_kernel_btf();
++ char raw_tp_btf_name[128] = BTF_PREFIX;
++ char *dst = raw_tp_btf_name + sizeof(BTF_PREFIX) - 1;
++ int ret, i, err = -EINVAL;
++
++ if (IS_ERR(btf)) {
++ pr_warning("vmlinux BTF is not found\n");
++ return -EINVAL;
++ }
++
++ if (!name)
++ goto out;
++
++ for (i = 0; i < ARRAY_SIZE(section_names); i++) {
++ if (!section_names[i].is_attach_btf)
++ continue;
++ if (strncmp(name, section_names[i].sec, section_names[i].len))
++ continue;
++ /* prepend "btf_trace_" prefix per kernel convention */
++ strncat(dst, name + section_names[i].len,
++ sizeof(raw_tp_btf_name) - sizeof(BTF_PREFIX));
++ ret = btf__find_by_name(btf, raw_tp_btf_name);
++ if (ret <= 0) {
++ pr_warning("%s is not found in vmlinux BTF\n", dst);
++ goto out;
++ }
++ *btf_id = ret;
++ err = 0;
++ goto out;
++ }
++ pr_warning("failed to identify btf_id based on ELF section name '%s'\n", name);
++ err = -ESRCH;
++out:
++ btf__free(btf);
++ return err;
++}
++
+ int libbpf_attach_type_by_name(const char *name,
+ enum bpf_attach_type *attach_type)
+ {
+--- a/tools/lib/bpf/libbpf.h
++++ b/tools/lib/bpf/libbpf.h
+@@ -307,6 +307,7 @@ LIBBPF_API int bpf_program__set_sched_cl
+ LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
+ LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
+ LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog);
++LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog);
+
+ LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog);
+ LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
+@@ -326,6 +327,7 @@ LIBBPF_API bool bpf_program__is_sched_cl
+ LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog);
+ LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog);
+ LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog);
++LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog);
+
+ /*
+ * No need for __attribute__((packed)), all members of 'bpf_map_def'
+--- a/tools/lib/bpf/libbpf.map
++++ b/tools/lib/bpf/libbpf.map
+@@ -197,4 +197,6 @@ LIBBPF_0.0.6 {
+ bpf_object__open_mem;
+ bpf_program__get_expected_attach_type;
+ bpf_program__get_type;
++ bpf_program__is_tracing;
++ bpf_program__set_tracing;
+ } LIBBPF_0.0.5;
+--- a/tools/lib/bpf/libbpf_probes.c
++++ b/tools/lib/bpf/libbpf_probes.c
+@@ -102,6 +102,7 @@ probe_load(enum bpf_prog_type prog_type,
+ case BPF_PROG_TYPE_FLOW_DISSECTOR:
+ case BPF_PROG_TYPE_CGROUP_SYSCTL:
+ case BPF_PROG_TYPE_CGROUP_SOCKOPT:
++ case BPF_PROG_TYPE_TRACING:
+ default:
+ break;
+ }
diff --git a/patches.suse/libbpf-Add-support-for-relocatable-bitfields.patch b/patches.suse/libbpf-Add-support-for-relocatable-bitfields.patch
new file mode 100644
index 0000000000..cc8f18601f
--- /dev/null
+++ b/patches.suse/libbpf-Add-support-for-relocatable-bitfields.patch
@@ -0,0 +1,494 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Fri, 1 Nov 2019 15:28:07 -0700
+Subject: libbpf: Add support for relocatable bitfields
+Patch-mainline: v5.5-rc1
+Git-commit: ee26dade0e3bcd8a34ae7520e373fb69365fce7a
+References: bsc#1155518
+
+Add support for the new field relocation kinds, necessary to support
+relocatable bitfield reads. Provide macro for abstracting necessary code doing
+full relocatable bitfield extraction into u64 value. Two separate macros are
+provided:
+- BPF_CORE_READ_BITFIELD macro for direct memory read-enabled BPF programs
+(e.g., typed raw tracepoints). It uses direct memory dereference to extract
+bitfield backing integer value.
+- BPF_CORE_READ_BITFIELD_PROBED macro for cases where bpf_probe_read() needs
+to be used to extract same backing integer value.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20191101222810.1246166-3-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/bpf_core_read.h | 72 +++++++++++++
+ tools/lib/bpf/libbpf.c | 211 ++++++++++++++++++++++++++++------------
+ tools/lib/bpf/libbpf_internal.h | 4
+ 3 files changed, 227 insertions(+), 60 deletions(-)
+
+--- a/tools/lib/bpf/bpf_core_read.h
++++ b/tools/lib/bpf/bpf_core_read.h
+@@ -12,9 +12,81 @@
+ */
+ enum bpf_field_info_kind {
+ BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
++ BPF_FIELD_BYTE_SIZE = 1,
+ BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
++ BPF_FIELD_SIGNED = 3,
++ BPF_FIELD_LSHIFT_U64 = 4,
++ BPF_FIELD_RSHIFT_U64 = 5,
+ };
+
++#define __CORE_RELO(src, field, info) \
++ __builtin_preserve_field_info((src)->field, BPF_FIELD_##info)
++
++#if __BYTE_ORDER == __LITTLE_ENDIAN
++#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
++ bpf_probe_read((void *)dst, \
++ __CORE_RELO(src, fld, BYTE_SIZE), \
++ (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
++#else
++/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so
++ * for big-endian we need to adjust destination pointer accordingly, based on
++ * field byte size
++ */
++#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
++ bpf_probe_read((void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \
++ __CORE_RELO(src, fld, BYTE_SIZE), \
++ (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
++#endif
++
++/*
++ * Extract bitfield, identified by src->field, and put its value into u64
++ * *res. All this is done in relocatable manner, so bitfield changes such as
++ * signedness, bit size, offset changes, this will be handled automatically.
++ * This version of macro is using bpf_probe_read() to read underlying integer
++ * storage. Macro functions as an expression and its return type is
++ * bpf_probe_read()'s return value: 0, on success, <0 on error.
++ */
++#define BPF_CORE_READ_BITFIELD_PROBED(src, field, res) ({ \
++ unsigned long long val; \
++ \
++ *res = 0; \
++ val = __CORE_BITFIELD_PROBE_READ(res, src, field); \
++ if (!val) { \
++ *res <<= __CORE_RELO(src, field, LSHIFT_U64); \
++ val = __CORE_RELO(src, field, RSHIFT_U64); \
++ if (__CORE_RELO(src, field, SIGNED)) \
++ *res = ((long long)*res) >> val; \
++ else \
++ *res = ((unsigned long long)*res) >> val; \
++ val = 0; \
++ } \
++ val; \
++})
++
++/*
++ * Extract bitfield, identified by src->field, and return its value as u64.
++ * This version of macro is using direct memory reads and should be used from
++ * BPF program types that support such functionality (e.g., typed raw
++ * tracepoints).
++ */
++#define BPF_CORE_READ_BITFIELD(s, field) ({ \
++ const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
++ unsigned long long val; \
++ \
++ switch (__CORE_RELO(s, field, BYTE_SIZE)) { \
++ case 1: val = *(const unsigned char *)p; \
++ case 2: val = *(const unsigned short *)p; \
++ case 4: val = *(const unsigned int *)p; \
++ case 8: val = *(const unsigned long long *)p; \
++ } \
++ val <<= __CORE_RELO(s, field, LSHIFT_U64); \
++ if (__CORE_RELO(s, field, SIGNED)) \
++ val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \
++ else \
++ val = val >> __CORE_RELO(s, field, RSHIFT_U64); \
++ val; \
++})
++
+ /*
+ * Convenience macro to check that field actually exists in target kernel's.
+ * Returns:
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -2472,8 +2472,8 @@ struct bpf_core_spec {
+ int raw_spec[BPF_CORE_SPEC_MAX_LEN];
+ /* raw spec length */
+ int raw_len;
+- /* field byte offset represented by spec */
+- __u32 offset;
++ /* field bit offset represented by spec */
++ __u32 bit_offset;
+ };
+
+ static bool str_is_empty(const char *s)
+@@ -2484,8 +2484,8 @@ static bool str_is_empty(const char *s)
+ /*
+ * Turn bpf_field_reloc into a low- and high-level spec representation,
+ * validating correctness along the way, as well as calculating resulting
+- * field offset (in bytes), specified by accessor string. Low-level spec
+- * captures every single level of nestedness, including traversing anonymous
++ * field bit offset, specified by accessor string. Low-level spec captures
++ * every single level of nestedness, including traversing anonymous
+ * struct/union members. High-level one only captures semantically meaningful
+ * "turning points": named fields and array indicies.
+ * E.g., for this case:
+@@ -2557,7 +2557,7 @@ static int bpf_core_spec_parse(const str
+ sz = btf__resolve_size(btf, id);
+ if (sz < 0)
+ return sz;
+- spec->offset = access_idx * sz;
++ spec->bit_offset = access_idx * sz * 8;
+
+ for (i = 1; i < spec->raw_len; i++) {
+ t = skip_mods_and_typedefs(btf, id, &id);
+@@ -2568,17 +2568,13 @@ static int bpf_core_spec_parse(const str
+
+ if (btf_is_composite(t)) {
+ const struct btf_member *m;
+- __u32 offset;
++ __u32 bit_offset;
+
+ if (access_idx >= btf_vlen(t))
+ return -EINVAL;
+- if (btf_member_bitfield_size(t, access_idx))
+- return -EINVAL;
+
+- offset = btf_member_bit_offset(t, access_idx);
+- if (offset % 8)
+- return -EINVAL;
+- spec->offset += offset / 8;
++ bit_offset = btf_member_bit_offset(t, access_idx);
++ spec->bit_offset += bit_offset;
+
+ m = btf_members(t) + access_idx;
+ if (m->name_off) {
+@@ -2607,7 +2603,7 @@ static int bpf_core_spec_parse(const str
+ sz = btf__resolve_size(btf, id);
+ if (sz < 0)
+ return sz;
+- spec->offset += access_idx * sz;
++ spec->bit_offset += access_idx * sz * 8;
+ } else {
+ pr_warning("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
+ type_id, spec_str, i, id, btf_kind(t));
+@@ -2708,12 +2704,12 @@ err_out:
+ }
+
+ /* Check two types for compatibility, skipping const/volatile/restrict and
+- * typedefs, to ensure we are relocating offset to the compatible entities:
++ * typedefs, to ensure we are relocating compatible entities:
+ * - any two STRUCTs/UNIONs are compatible and can be mixed;
+ * - any two FWDs are compatible;
+ * - any two PTRs are always compatible;
+ * - for ENUMs, check sizes, names are ignored;
+- * - for INT, size and bitness should match, signedness is ignored;
++ * - for INT, size and signedness are ignored;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * - everything else shouldn't be ever a target of relocation.
+@@ -2745,10 +2741,11 @@ recur:
+ case BTF_KIND_ENUM:
+ return local_type->size == targ_type->size;
+ case BTF_KIND_INT:
++ /* just reject deprecated bitfield-like integers; all other
++ * integers are by default compatible between each other
++ */
+ return btf_int_offset(local_type) == 0 &&
+- btf_int_offset(targ_type) == 0 &&
+- local_type->size == targ_type->size &&
+- btf_int_bits(local_type) == btf_int_bits(targ_type);
++ btf_int_offset(targ_type) == 0;
+ case BTF_KIND_ARRAY:
+ local_id = btf_array(local_type)->type;
+ targ_id = btf_array(targ_type)->type;
+@@ -2764,7 +2761,7 @@ recur:
+ * Given single high-level named field accessor in local type, find
+ * corresponding high-level accessor for a target type. Along the way,
+ * maintain low-level spec for target as well. Also keep updating target
+- * offset.
++ * bit offset.
+ *
+ * Searching is performed through recursive exhaustive enumeration of all
+ * fields of a struct/union. If there are any anonymous (embedded)
+@@ -2803,21 +2800,16 @@ static int bpf_core_match_member(const s
+ n = btf_vlen(targ_type);
+ m = btf_members(targ_type);
+ for (i = 0; i < n; i++, m++) {
+- __u32 offset;
++ __u32 bit_offset;
+
+- /* bitfield relocations not supported */
+- if (btf_member_bitfield_size(targ_type, i))
+- continue;
+- offset = btf_member_bit_offset(targ_type, i);
+- if (offset % 8)
+- continue;
++ bit_offset = btf_member_bit_offset(targ_type, i);
+
+ /* too deep struct/union/array nesting */
+ if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
+ return -E2BIG;
+
+ /* speculate this member will be the good one */
+- spec->offset += offset / 8;
++ spec->bit_offset += bit_offset;
+ spec->raw_spec[spec->raw_len++] = i;
+
+ targ_name = btf__name_by_offset(targ_btf, m->name_off);
+@@ -2846,7 +2838,7 @@ static int bpf_core_match_member(const s
+ return found;
+ }
+ /* member turned out not to be what we looked for */
+- spec->offset -= offset / 8;
++ spec->bit_offset -= bit_offset;
+ spec->raw_len--;
+ }
+
+@@ -2855,7 +2847,7 @@ static int bpf_core_match_member(const s
+
+ /*
+ * Try to match local spec to a target type and, if successful, produce full
+- * target spec (high-level, low-level + offset).
++ * target spec (high-level, low-level + bit offset).
+ */
+ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
+ const struct btf *targ_btf, __u32 targ_id,
+@@ -2918,13 +2910,110 @@ static int bpf_core_spec_match(struct bp
+ sz = btf__resolve_size(targ_btf, targ_id);
+ if (sz < 0)
+ return sz;
+- targ_spec->offset += local_acc->idx * sz;
++ targ_spec->bit_offset += local_acc->idx * sz * 8;
+ }
+ }
+
+ return 1;
+ }
+
++static int bpf_core_calc_field_relo(const struct bpf_program *prog,
++ const struct bpf_field_reloc *relo,
++ const struct bpf_core_spec *spec,
++ __u32 *val, bool *validate)
++{
++ const struct bpf_core_accessor *acc = &spec->spec[spec->len - 1];
++ const struct btf_type *t = btf__type_by_id(spec->btf, acc->type_id);
++ __u32 byte_off, byte_sz, bit_off, bit_sz;
++ const struct btf_member *m;
++ const struct btf_type *mt;
++ bool bitfield;
++
++ /* a[n] accessor needs special handling */
++ if (!acc->name) {
++ if (relo->kind != BPF_FIELD_BYTE_OFFSET) {
++ pr_warning("prog '%s': relo %d at insn #%d can't be applied to array access'\n",
++ bpf_program__title(prog, false),
++ relo->kind, relo->insn_off / 8);
++ return -EINVAL;
++ }
++ *val = spec->bit_offset / 8;
++ if (validate)
++ *validate = true;
++ return 0;
++ }
++
++ m = btf_members(t) + acc->idx;
++ mt = skip_mods_and_typedefs(spec->btf, m->type, NULL);
++ bit_off = spec->bit_offset;
++ bit_sz = btf_member_bitfield_size(t, acc->idx);
++
++ bitfield = bit_sz > 0;
++ if (bitfield) {
++ byte_sz = mt->size;
++ byte_off = bit_off / 8 / byte_sz * byte_sz;
++ /* figure out smallest int size necessary for bitfield load */
++ while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
++ if (byte_sz >= 8) {
++ /* bitfield can't be read with 64-bit read */
++ pr_warning("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
++ bpf_program__title(prog, false),
++ relo->kind, relo->insn_off / 8);
++ return -E2BIG;
++ }
++ byte_sz *= 2;
++ byte_off = bit_off / 8 / byte_sz * byte_sz;
++ }
++ } else {
++ byte_sz = mt->size;
++ byte_off = spec->bit_offset / 8;
++ bit_sz = byte_sz * 8;
++ }
++
++ /* for bitfields, all the relocatable aspects are ambiguous and we
++ * might disagree with compiler, so turn off validation of expected
++ * value, except for signedness
++ */
++ if (validate)
++ *validate = !bitfield;
++
++ switch (relo->kind) {
++ case BPF_FIELD_BYTE_OFFSET:
++ *val = byte_off;
++ break;
++ case BPF_FIELD_BYTE_SIZE:
++ *val = byte_sz;
++ break;
++ case BPF_FIELD_SIGNED:
++ /* enums will be assumed unsigned */
++ *val = btf_is_enum(mt) ||
++ (btf_int_encoding(mt) & BTF_INT_SIGNED);
++ if (validate)
++ *validate = true; /* signedness is never ambiguous */
++ break;
++ case BPF_FIELD_LSHIFT_U64:
++#if __BYTE_ORDER == __LITTLE_ENDIAN
++ *val = 64 - (bit_off + bit_sz - byte_off * 8);
++#else
++ *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
++#endif
++ break;
++ case BPF_FIELD_RSHIFT_U64:
++ *val = 64 - bit_sz;
++ if (validate)
++ *validate = true; /* right shift is never ambiguous */
++ break;
++ case BPF_FIELD_EXISTS:
++ default:
++ pr_warning("prog '%s': unknown relo %d at insn #%d\n",
++ bpf_program__title(prog, false),
++ relo->kind, relo->insn_off / 8);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ /*
+ * Patch relocatable BPF instruction.
+ *
+@@ -2944,36 +3033,31 @@ static int bpf_core_reloc_insn(struct bp
+ const struct bpf_core_spec *local_spec,
+ const struct bpf_core_spec *targ_spec)
+ {
++ bool failed = false, validate = true;
+ __u32 orig_val, new_val;
+ struct bpf_insn *insn;
+- int insn_idx;
++ int insn_idx, err;
+ __u8 class;
+
+ if (relo->insn_off % sizeof(struct bpf_insn))
+ return -EINVAL;
+ insn_idx = relo->insn_off / sizeof(struct bpf_insn);
+
+- switch (relo->kind) {
+- case BPF_FIELD_BYTE_OFFSET:
+- orig_val = local_spec->offset;
+- if (targ_spec) {
+- new_val = targ_spec->offset;
+- } else {
+- pr_warning("prog '%s': patching insn #%d w/ failed reloc, imm %d -> %d\n",
+- bpf_program__title(prog, false), insn_idx,
+- orig_val, -1);
+- new_val = (__u32)-1;
+- }
+- break;
+- case BPF_FIELD_EXISTS:
++ if (relo->kind == BPF_FIELD_EXISTS) {
+ orig_val = 1; /* can't generate EXISTS relo w/o local field */
+ new_val = targ_spec ? 1 : 0;
+- break;
+- default:
+- pr_warning("prog '%s': unknown relo %d at insn #%d'\n",
+- bpf_program__title(prog, false),
+- relo->kind, insn_idx);
+- return -EINVAL;
++ } else if (!targ_spec) {
++ failed = true;
++ new_val = (__u32)-1;
++ } else {
++ err = bpf_core_calc_field_relo(prog, relo, local_spec,
++ &orig_val, &validate);
++ if (err)
++ return err;
++ err = bpf_core_calc_field_relo(prog, relo, targ_spec,
++ &new_val, NULL);
++ if (err)
++ return err;
+ }
+
+ insn = &prog->insns[insn_idx];
+@@ -2982,12 +3066,17 @@ static int bpf_core_reloc_insn(struct bp
+ if (class == BPF_ALU || class == BPF_ALU64) {
+ if (BPF_SRC(insn->code) != BPF_K)
+ return -EINVAL;
+- if (insn->imm != orig_val)
++ if (!failed && validate && insn->imm != orig_val) {
++ pr_warning("prog '%s': unexpected insn #%d value: got %u, exp %u -> %u\n",
++ bpf_program__title(prog, false), insn_idx,
++ insn->imm, orig_val, new_val);
+ return -EINVAL;
++ }
++ orig_val = insn->imm;
+ insn->imm = new_val;
+- pr_debug("prog '%s': patched insn #%d (ALU/ALU64) imm %d -> %d\n",
+- bpf_program__title(prog, false),
+- insn_idx, orig_val, new_val);
++ pr_debug("prog '%s': patched insn #%d (ALU/ALU64)%s imm %u -> %u\n",
++ bpf_program__title(prog, false), insn_idx,
++ failed ? " w/ failed reloc" : "", orig_val, new_val);
+ } else {
+ pr_warning("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
+ bpf_program__title(prog, false),
+@@ -3105,7 +3194,8 @@ static void bpf_core_dump_spec(int level
+ libbpf_print(level, "%d%s", spec->raw_spec[i],
+ i == spec->raw_len - 1 ? " => " : ":");
+
+- libbpf_print(level, "%u @ &x", spec->offset);
++ libbpf_print(level, "%u.%u @ &x",
++ spec->bit_offset / 8, spec->bit_offset % 8);
+
+ for (i = 0; i < spec->len; i++) {
+ if (spec->spec[i].name)
+@@ -3219,7 +3309,8 @@ static int bpf_core_reloc_field(struct b
+ return -EINVAL;
+ }
+
+- pr_debug("prog '%s': relo #%d: spec is ", prog_name, relo_idx);
++ pr_debug("prog '%s': relo #%d: kind %d, spec is ", prog_name, relo_idx,
++ relo->kind);
+ bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
+ libbpf_print(LIBBPF_DEBUG, "\n");
+
+@@ -3259,13 +3350,13 @@ static int bpf_core_reloc_field(struct b
+
+ if (j == 0) {
+ targ_spec = cand_spec;
+- } else if (cand_spec.offset != targ_spec.offset) {
++ } else if (cand_spec.bit_offset != targ_spec.bit_offset) {
+ /* if there are many candidates, they should all
+- * resolve to the same offset
++ * resolve to the same bit offset
+ */
+ pr_warning("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
+- prog_name, relo_idx, cand_spec.offset,
+- targ_spec.offset);
++ prog_name, relo_idx, cand_spec.bit_offset,
++ targ_spec.bit_offset);
+ return -EINVAL;
+ }
+
+--- a/tools/lib/bpf/libbpf_internal.h
++++ b/tools/lib/bpf/libbpf_internal.h
+@@ -158,7 +158,11 @@ struct bpf_line_info_min {
+ */
+ enum bpf_field_info_kind {
+ BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
++ BPF_FIELD_BYTE_SIZE = 1,
+ BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
++ BPF_FIELD_SIGNED = 3,
++ BPF_FIELD_LSHIFT_U64 = 4,
++ BPF_FIELD_RSHIFT_U64 = 5,
+ };
+
+ /* The minimum bpf_field_reloc checked by the loader
diff --git a/patches.suse/libbpf-Add-support-to-attach-to-fentry-fexit-tracing.patch b/patches.suse/libbpf-Add-support-to-attach-to-fentry-fexit-tracing.patch
new file mode 100644
index 0000000000..13192f202b
--- /dev/null
+++ b/patches.suse/libbpf-Add-support-to-attach-to-fentry-fexit-tracing.patch
@@ -0,0 +1,230 @@
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Thu, 14 Nov 2019 10:57:06 -0800
+Subject: libbpf: Add support to attach to fentry/fexit tracing progs
+Patch-mainline: v5.5-rc1
+Git-commit: b8c54ea455dc2e0bda7ea9b0370279c224e21045
+References: bsc#1155518
+
+Teach libbpf to recognize tracing programs types and attach them to
+fentry/fexit.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Link: https://lore.kernel.org/bpf/20191114185720.1641606-7-ast@kernel.org
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/include/uapi/linux/bpf.h | 2
+ tools/lib/bpf/libbpf.c | 99 ++++++++++++++++++++++++++++++-----------
+ tools/lib/bpf/libbpf.h | 4 +
+ tools/lib/bpf/libbpf.map | 2
+ 4 files changed, 82 insertions(+), 25 deletions(-)
+
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -201,6 +201,8 @@ enum bpf_attach_type {
+ BPF_CGROUP_GETSOCKOPT,
+ BPF_CGROUP_SETSOCKOPT,
+ BPF_TRACE_RAW_TP,
++ BPF_TRACE_FENTRY,
++ BPF_TRACE_FEXIT,
+ __MAX_BPF_ATTACH_TYPE
+ };
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -3749,7 +3749,8 @@ bpf_object__load_progs(struct bpf_object
+ return 0;
+ }
+
+-static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id);
++static int libbpf_attach_btf_id_by_name(const char *name,
++ enum bpf_attach_type attach_type);
+
+ static struct bpf_object *
+ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
+@@ -3803,7 +3804,6 @@ __bpf_object__open(const char *path, con
+ bpf_object__for_each_program(prog, obj) {
+ enum bpf_prog_type prog_type;
+ enum bpf_attach_type attach_type;
+- __u32 btf_id;
+
+ err = libbpf_prog_type_by_name(prog->section_name, &prog_type,
+ &attach_type);
+@@ -3816,10 +3816,11 @@ __bpf_object__open(const char *path, con
+ bpf_program__set_type(prog, prog_type);
+ bpf_program__set_expected_attach_type(prog, attach_type);
+ if (prog_type == BPF_PROG_TYPE_TRACING) {
+- err = libbpf_attach_btf_id_by_name(prog->section_name, &btf_id);
+- if (err)
++ err = libbpf_attach_btf_id_by_name(prog->section_name,
++ attach_type);
++ if (err <= 0)
+ goto out;
+- prog->attach_btf_id = btf_id;
++ prog->attach_btf_id = err;
+ }
+ }
+
+@@ -4823,6 +4824,10 @@ static const struct {
+ BPF_PROG_SEC("raw_tp/", BPF_PROG_TYPE_RAW_TRACEPOINT),
+ BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_TRACING,
+ BPF_TRACE_RAW_TP),
++ BPF_PROG_BTF("fentry/", BPF_PROG_TYPE_TRACING,
++ BPF_TRACE_FENTRY),
++ BPF_PROG_BTF("fexit/", BPF_PROG_TYPE_TRACING,
++ BPF_TRACE_FEXIT),
+ BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
+ BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
+ BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
+@@ -4940,43 +4945,56 @@ int libbpf_prog_type_by_name(const char
+ }
+
+ #define BTF_PREFIX "btf_trace_"
+-static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id)
++int libbpf_find_vmlinux_btf_id(const char *name,
++ enum bpf_attach_type attach_type)
+ {
+ struct btf *btf = bpf_core_find_kernel_btf();
+- char raw_tp_btf_name[128] = BTF_PREFIX;
+- char *dst = raw_tp_btf_name + sizeof(BTF_PREFIX) - 1;
+- int ret, i, err = -EINVAL;
++ char raw_tp_btf[128] = BTF_PREFIX;
++ char *dst = raw_tp_btf + sizeof(BTF_PREFIX) - 1;
++ const char *btf_name;
++ int err = -EINVAL;
++ u32 kind;
+
+ if (IS_ERR(btf)) {
+ pr_warning("vmlinux BTF is not found\n");
+ return -EINVAL;
+ }
+
++ if (attach_type == BPF_TRACE_RAW_TP) {
++ /* prepend "btf_trace_" prefix per kernel convention */
++ strncat(dst, name, sizeof(raw_tp_btf) - sizeof(BTF_PREFIX));
++ btf_name = raw_tp_btf;
++ kind = BTF_KIND_TYPEDEF;
++ } else {
++ btf_name = name;
++ kind = BTF_KIND_FUNC;
++ }
++ err = btf__find_by_name_kind(btf, btf_name, kind);
++ btf__free(btf);
++ return err;
++}
++
++static int libbpf_attach_btf_id_by_name(const char *name,
++ enum bpf_attach_type attach_type)
++{
++ int i, err;
++
+ if (!name)
+- goto out;
++ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(section_names); i++) {
+ if (!section_names[i].is_attach_btf)
+ continue;
+ if (strncmp(name, section_names[i].sec, section_names[i].len))
+ continue;
+- /* prepend "btf_trace_" prefix per kernel convention */
+- strncat(dst, name + section_names[i].len,
+- sizeof(raw_tp_btf_name) - sizeof(BTF_PREFIX));
+- ret = btf__find_by_name(btf, raw_tp_btf_name);
+- if (ret <= 0) {
+- pr_warning("%s is not found in vmlinux BTF\n", dst);
+- goto out;
+- }
+- *btf_id = ret;
+- err = 0;
+- goto out;
++ err = libbpf_find_vmlinux_btf_id(name + section_names[i].len,
++ attach_type);
++ if (err <= 0)
++ pr_warning("%s is not found in vmlinux BTF\n", name);
++ return err;
+ }
+ pr_warning("failed to identify btf_id based on ELF section name '%s'\n", name);
+- err = -ESRCH;
+-out:
+- btf__free(btf);
+- return err;
++ return -ESRCH;
+ }
+
+ int libbpf_attach_type_by_name(const char *name,
+@@ -5601,6 +5619,37 @@ struct bpf_link *bpf_program__attach_raw
+ libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ return ERR_PTR(pfd);
+ }
++ link->fd = pfd;
++ return (struct bpf_link *)link;
++}
++
++struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
++{
++ char errmsg[STRERR_BUFSIZE];
++ struct bpf_link_fd *link;
++ int prog_fd, pfd;
++
++ prog_fd = bpf_program__fd(prog);
++ if (prog_fd < 0) {
++ pr_warning("program '%s': can't attach before loaded\n",
++ bpf_program__title(prog, false));
++ return ERR_PTR(-EINVAL);
++ }
++
++ link = malloc(sizeof(*link));
++ if (!link)
++ return ERR_PTR(-ENOMEM);
++ link->link.destroy = &bpf_link__destroy_fd;
++
++ pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
++ if (pfd < 0) {
++ pfd = -errno;
++ free(link);
++ pr_warning("program '%s': failed to attach to trace: %s\n",
++ bpf_program__title(prog, false),
++ libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
++ return ERR_PTR(pfd);
++ }
+ link->fd = pfd;
+ return (struct bpf_link *)link;
+ }
+--- a/tools/lib/bpf/libbpf.h
++++ b/tools/lib/bpf/libbpf.h
+@@ -188,6 +188,8 @@ libbpf_prog_type_by_name(const char *nam
+ enum bpf_attach_type *expected_attach_type);
+ LIBBPF_API int libbpf_attach_type_by_name(const char *name,
+ enum bpf_attach_type *attach_type);
++LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name,
++ enum bpf_attach_type attach_type);
+
+ /* Accessors of bpf_program */
+ struct bpf_program;
+@@ -251,6 +253,8 @@ LIBBPF_API struct bpf_link *
+ bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
+ const char *tp_name);
+
++LIBBPF_API struct bpf_link *
++bpf_program__attach_trace(struct bpf_program *prog);
+ struct bpf_insn;
+
+ /*
+--- a/tools/lib/bpf/libbpf.map
++++ b/tools/lib/bpf/libbpf.map
+@@ -198,10 +198,12 @@ LIBBPF_0.0.6 {
+ bpf_map__set_pin_path;
+ bpf_object__open_file;
+ bpf_object__open_mem;
++ bpf_program__attach_trace;
+ bpf_program__get_expected_attach_type;
+ bpf_program__get_type;
+ bpf_program__is_tracing;
+ bpf_program__set_tracing;
+ bpf_program__size;
+ btf__find_by_name_kind;
++ libbpf_find_vmlinux_btf_id;
+ } LIBBPF_0.0.5;
diff --git a/patches.suse/libbpf-Add-uprobe-uretprobe-and-tp-raw_tp-section-su.patch b/patches.suse/libbpf-Add-uprobe-uretprobe-and-tp-raw_tp-section-su.patch
new file mode 100644
index 0000000000..9ad8c4270a
--- /dev/null
+++ b/patches.suse/libbpf-Add-uprobe-uretprobe-and-tp-raw_tp-section-su.patch
@@ -0,0 +1,36 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Sun, 20 Oct 2019 20:38:58 -0700
+Subject: libbpf: Add uprobe/uretprobe and tp/raw_tp section suffixes
+Patch-mainline: v5.5-rc1
+Git-commit: 32dff6db29acc1d2b9fe0423ab033f15c717d776
+References: bsc#1155518
+
+Map uprobe/uretprobe into KPROBE program type. tp/raw_tp are just an
+alias for more verbose tracepoint/raw_tracepoint, respectively.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20191021033902.3856966-4-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -4541,11 +4541,15 @@ static const struct {
+ } section_names[] = {
+ BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
+ BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
++ BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
+ BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
++ BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
+ BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
+ BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
+ BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
++ BPF_PROG_SEC("tp/", BPF_PROG_TYPE_TRACEPOINT),
+ BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
++ BPF_PROG_SEC("raw_tp/", BPF_PROG_TYPE_RAW_TRACEPOINT),
+ BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_RAW_TRACEPOINT),
+ BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
+ BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
diff --git a/patches.suse/libbpf-Bump-current-version-to-v0.0.6.patch b/patches.suse/libbpf-Bump-current-version-to-v0.0.6.patch
new file mode 100644
index 0000000000..18e312e3a7
--- /dev/null
+++ b/patches.suse/libbpf-Bump-current-version-to-v0.0.6.patch
@@ -0,0 +1,27 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Mon, 30 Sep 2019 15:25:03 -0700
+Subject: libbpf: Bump current version to v0.0.6
+Patch-mainline: v5.5-rc1
+Git-commit: 03bd4773d898783fe3bc321287e4838e515fea92
+References: bsc#1155518
+
+New release cycle started, let's bump to v0.0.6 proactively.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20190930222503.519782-1-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.map | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/tools/lib/bpf/libbpf.map
++++ b/tools/lib/bpf/libbpf.map
+@@ -190,3 +190,6 @@ LIBBPF_0.0.5 {
+ global:
+ bpf_btf_get_next_id;
+ } LIBBPF_0.0.4;
++
++LIBBPF_0.0.6 {
++} LIBBPF_0.0.5;
diff --git a/patches.suse/libbpf-Fix-BTF-defined-map-s-__type-macro-handling-o.patch b/patches.suse/libbpf-Fix-BTF-defined-map-s-__type-macro-handling-o.patch
new file mode 100644
index 0000000000..646bb86b20
--- /dev/null
+++ b/patches.suse/libbpf-Fix-BTF-defined-map-s-__type-macro-handling-o.patch
@@ -0,0 +1,50 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Thu, 3 Oct 2019 21:02:11 -0700
+Subject: libbpf: Fix BTF-defined map's __type macro handling of arrays
+Patch-mainline: v5.5-rc1
+Git-commit: a53ba15d81995868651dd28a85d8045aef3d4e20
+References: bsc#1155518
+
+Due to a quirky C syntax of declaring pointers to array or function
+prototype, existing __type() macro doesn't work with map key/value types
+that are array or function prototype. One has to create a typedef first
+and use it to specify key/value type for a BPF map. By using typeof(),
+pointer to type is now handled uniformly for all kinds of types. Convert
+one of self-tests as a demonstration.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191004040211.2434033-1-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/bpf_helpers.h | 2 +-
+ tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c | 3 +--
+ 2 files changed, 2 insertions(+), 3 deletions(-)
+
+--- a/tools/testing/selftests/bpf/bpf_helpers.h
++++ b/tools/testing/selftests/bpf/bpf_helpers.h
+@@ -3,7 +3,7 @@
+ #define __BPF_HELPERS__
+
+ #define __uint(name, val) int (*name)[val]
+-#define __type(name, val) val *name
++#define __type(name, val) typeof(val) *name
+
+ /* helper macro to print out debug messages */
+ #define bpf_printk(fmt, ...) \
+--- a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
++++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
+@@ -47,12 +47,11 @@ struct {
+ * issue and avoid complicated C programming massaging.
+ * This is an acceptable workaround since there is one entry here.
+ */
+-typedef __u64 raw_stack_trace_t[2 * MAX_STACK_RAWTP];
+ struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+- __type(value, raw_stack_trace_t);
++ __type(value, __u64[2 * MAX_STACK_RAWTP]);
+ } rawdata_map SEC(".maps");
+
+ SEC("raw_tracepoint/sys_enter")
diff --git a/patches.suse/libbpf-Fix-bpf_object-name-determination-for-bpf_obj.patch b/patches.suse/libbpf-Fix-bpf_object-name-determination-for-bpf_obj.patch
new file mode 100644
index 0000000000..43bcdc4ab4
--- /dev/null
+++ b/patches.suse/libbpf-Fix-bpf_object-name-determination-for-bpf_obj.patch
@@ -0,0 +1,35 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Thu, 21 Nov 2019 16:35:27 -0800
+Subject: libbpf: Fix bpf_object name determination for bpf_object__open_file()
+Patch-mainline: v5.5-rc1
+Git-commit: 1aace10f41adf1080d1cc54de9b3db98b8b8b0fb
+References: bsc#1155518
+
+If bpf_object__open_file() gets path like "some/dir/obj.o", it should derive
+BPF object's name as "obj" (unless overriden through opts->object_name).
+Instead, due to using `path` as a fallback value for opts->obj_name, path is
+used as is for object name, so for above example BPF object's name will be
+verbatim "some/dir/obj", which leads to all sorts of troubles, especially when
+internal maps are concern (they are using up to 8 characters of object name).
+Fix that by ensuring object_name stays NULL, unless overriden.
+
+Fixes: 291ee02b5e40 ("libbpf: Refactor bpf_object__open APIs to use common opts")
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191122003527.551556-1-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -3806,7 +3806,7 @@ __bpf_object__open(const char *path, con
+ if (!OPTS_VALID(opts, bpf_object_open_opts))
+ return ERR_PTR(-EINVAL);
+
+- obj_name = OPTS_GET(opts, object_name, path);
++ obj_name = OPTS_GET(opts, object_name, NULL);
+ if (obj_buf) {
+ if (!obj_name) {
+ snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
diff --git a/patches.suse/libbpf-Fix-error-handling-in-bpf_map__reuse_fd.patch b/patches.suse/libbpf-Fix-error-handling-in-bpf_map__reuse_fd.patch
new file mode 100644
index 0000000000..a8a11ce156
--- /dev/null
+++ b/patches.suse/libbpf-Fix-error-handling-in-bpf_map__reuse_fd.patch
@@ -0,0 +1,61 @@
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Sat, 2 Nov 2019 12:09:37 +0100
+Subject: libbpf: Fix error handling in bpf_map__reuse_fd()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.5-rc1
+Git-commit: d1b4574a4b86565325ef2e545eda8dfc9aa07c60
+References: bsc#1155518
+
+bpf_map__reuse_fd() was calling close() in the error path before returning
+an error value based on errno. However, close can change errno, so that can
+lead to potentially misleading error messages. Instead, explicitly store
+errno in the err variable before each goto.
+
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Link: https://lore.kernel.org/bpf/157269297769.394725.12634985106772698611.stgit@toke.dk
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -1918,16 +1918,22 @@ int bpf_map__reuse_fd(struct bpf_map *ma
+ return -errno;
+
+ new_fd = open("/", O_RDONLY | O_CLOEXEC);
+- if (new_fd < 0)
++ if (new_fd < 0) {
++ err = -errno;
+ goto err_free_new_name;
++ }
+
+ new_fd = dup3(fd, new_fd, O_CLOEXEC);
+- if (new_fd < 0)
++ if (new_fd < 0) {
++ err = -errno;
+ goto err_close_new_fd;
++ }
+
+ err = zclose(map->fd);
+- if (err)
++ if (err) {
++ err = -errno;
+ goto err_close_new_fd;
++ }
+ free(map->name);
+
+ map->fd = new_fd;
+@@ -1946,7 +1952,7 @@ err_close_new_fd:
+ close(new_fd);
+ err_free_new_name:
+ free(new_name);
+- return -errno;
++ return err;
+ }
+
+ int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
diff --git a/patches.suse/libbpf-Fix-global-variable-relocation.patch b/patches.suse/libbpf-Fix-global-variable-relocation.patch
new file mode 100644
index 0000000000..13615094b2
--- /dev/null
+++ b/patches.suse/libbpf-Fix-global-variable-relocation.patch
@@ -0,0 +1,283 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Wed, 27 Nov 2019 12:06:50 -0800
+Subject: libbpf: Fix global variable relocation
+Patch-mainline: v5.5-rc1
+Git-commit: 53f8dd434b6fe666b1c4e0be80a8727e8fa9839f
+References: bsc#1155518
+
+Similarly to a0d7da26ce86 ("libbpf: Fix call relocation offset calculation
+bug"), relocations against global variables need to take into account
+referenced symbol's st_value, which holds offset into a corresponding data
+section (and, subsequently, offset into internal backing map). For static
+variables this offset is always zero and data offset is completely described
+by respective instruction's imm field.
+
+Convert a bunch of selftests to global variables. Previously they were relying
+on `static volatile` trick to ensure Clang doesn't inline static variables,
+which with global variables is not necessary anymore.
+
+Fixes: 393cdfbee809 ("libbpf: Support initialized global variables")
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Yonghong Song <yhs@fb.com>
+Link: https://lore.kernel.org/bpf/20191127200651.1381348-1-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 43 +++++++++-------------
+ tools/testing/selftests/bpf/progs/fentry_test.c | 12 +++---
+ tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c | 6 +--
+ tools/testing/selftests/bpf/progs/fexit_test.c | 12 +++---
+ tools/testing/selftests/bpf/progs/test_mmap.c | 4 +-
+ 5 files changed, 36 insertions(+), 41 deletions(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -169,10 +169,8 @@ struct bpf_program {
+ RELO_DATA,
+ } type;
+ int insn_idx;
+- union {
+- int map_idx;
+- int text_off;
+- };
++ int map_idx;
++ int sym_off;
+ } *reloc_desc;
+ int nr_reloc;
+ int log_level;
+@@ -1819,7 +1817,7 @@ static int bpf_program__record_reloc(str
+ }
+ reloc_desc->type = RELO_CALL;
+ reloc_desc->insn_idx = insn_idx;
+- reloc_desc->text_off = sym->st_value / 8;
++ reloc_desc->sym_off = sym->st_value;
+ obj->has_pseudo_calls = true;
+ return 0;
+ }
+@@ -1863,6 +1861,7 @@ static int bpf_program__record_reloc(str
+ reloc_desc->type = RELO_LD64;
+ reloc_desc->insn_idx = insn_idx;
+ reloc_desc->map_idx = map_idx;
++ reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
+ return 0;
+ }
+
+@@ -1894,6 +1893,7 @@ static int bpf_program__record_reloc(str
+ reloc_desc->type = RELO_DATA;
+ reloc_desc->insn_idx = insn_idx;
+ reloc_desc->map_idx = map_idx;
++ reloc_desc->sym_off = sym->st_value;
+ return 0;
+ }
+
+@@ -3420,8 +3420,8 @@ bpf_program__reloc_text(struct bpf_progr
+ return -LIBBPF_ERRNO__RELOC;
+
+ if (prog->idx == obj->efile.text_shndx) {
+- pr_warning("relo in .text insn %d into off %d\n",
+- relo->insn_idx, relo->text_off);
++ pr_warning("relo in .text insn %d into off %d (insn #%d)\n",
++ relo->insn_idx, relo->sym_off, relo->sym_off / 8);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+
+@@ -3456,7 +3456,7 @@ bpf_program__reloc_text(struct bpf_progr
+ prog->section_name);
+ }
+ insn = &prog->insns[relo->insn_idx];
+- insn->imm += relo->text_off + prog->main_prog_cnt - relo->insn_idx;
++ insn->imm += relo->sym_off / 8 + prog->main_prog_cnt - relo->insn_idx;
+ return 0;
+ }
+
+@@ -3479,31 +3479,26 @@ bpf_program__relocate(struct bpf_program
+ return 0;
+
+ for (i = 0; i < prog->nr_reloc; i++) {
+- if (prog->reloc_desc[i].type == RELO_LD64 ||
+- prog->reloc_desc[i].type == RELO_DATA) {
+- bool relo_data = prog->reloc_desc[i].type == RELO_DATA;
+- struct bpf_insn *insns = prog->insns;
+- int insn_idx, map_idx;
++ struct reloc_desc *relo = &prog->reloc_desc[i];
+
+- insn_idx = prog->reloc_desc[i].insn_idx;
+- map_idx = prog->reloc_desc[i].map_idx;
++ if (relo->type == RELO_LD64 || relo->type == RELO_DATA) {
++ struct bpf_insn *insn = &prog->insns[relo->insn_idx];
+
+- if (insn_idx + 1 >= (int)prog->insns_cnt) {
++ if (relo->insn_idx + 1 >= (int)prog->insns_cnt) {
+ pr_warning("relocation out of range: '%s'\n",
+ prog->section_name);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+
+- if (!relo_data) {
+- insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
++ if (relo->type != RELO_DATA) {
++ insn[0].src_reg = BPF_PSEUDO_MAP_FD;
+ } else {
+- insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE;
+- insns[insn_idx + 1].imm = insns[insn_idx].imm;
++ insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
++ insn[1].imm = insn[0].imm + relo->sym_off;
+ }
+- insns[insn_idx].imm = obj->maps[map_idx].fd;
+- } else if (prog->reloc_desc[i].type == RELO_CALL) {
+- err = bpf_program__reloc_text(prog, obj,
+- &prog->reloc_desc[i]);
++ insn[0].imm = obj->maps[relo->map_idx].fd;
++ } else if (relo->type == RELO_CALL) {
++ err = bpf_program__reloc_text(prog, obj, relo);
+ if (err)
+ return err;
+ }
+--- a/tools/testing/selftests/bpf/progs/fentry_test.c
++++ b/tools/testing/selftests/bpf/progs/fentry_test.c
+@@ -6,28 +6,28 @@
+
+ char _license[] SEC("license") = "GPL";
+
+-static volatile __u64 test1_result;
++__u64 test1_result = 0;
+ BPF_TRACE_1("fentry/bpf_fentry_test1", test1, int, a)
+ {
+ test1_result = a == 1;
+ return 0;
+ }
+
+-static volatile __u64 test2_result;
++__u64 test2_result = 0;
+ BPF_TRACE_2("fentry/bpf_fentry_test2", test2, int, a, __u64, b)
+ {
+ test2_result = a == 2 && b == 3;
+ return 0;
+ }
+
+-static volatile __u64 test3_result;
++__u64 test3_result = 0;
+ BPF_TRACE_3("fentry/bpf_fentry_test3", test3, char, a, int, b, __u64, c)
+ {
+ test3_result = a == 4 && b == 5 && c == 6;
+ return 0;
+ }
+
+-static volatile __u64 test4_result;
++__u64 test4_result = 0;
+ BPF_TRACE_4("fentry/bpf_fentry_test4", test4,
+ void *, a, char, b, int, c, __u64, d)
+ {
+@@ -35,7 +35,7 @@ BPF_TRACE_4("fentry/bpf_fentry_test4", t
+ return 0;
+ }
+
+-static volatile __u64 test5_result;
++__u64 test5_result = 0;
+ BPF_TRACE_5("fentry/bpf_fentry_test5", test5,
+ __u64, a, void *, b, short, c, int, d, __u64, e)
+ {
+@@ -44,7 +44,7 @@ BPF_TRACE_5("fentry/bpf_fentry_test5", t
+ return 0;
+ }
+
+-static volatile __u64 test6_result;
++__u64 test6_result = 0;
+ BPF_TRACE_6("fentry/bpf_fentry_test6", test6,
+ __u64, a, void *, b, short, c, int, d, void *, e, __u64, f)
+ {
+--- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
++++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
+@@ -8,7 +8,7 @@ struct sk_buff {
+ unsigned int len;
+ };
+
+-static volatile __u64 test_result;
++__u64 test_result = 0;
+ BPF_TRACE_2("fexit/test_pkt_access", test_main,
+ struct sk_buff *, skb, int, ret)
+ {
+@@ -23,7 +23,7 @@ BPF_TRACE_2("fexit/test_pkt_access", tes
+ return 0;
+ }
+
+-static volatile __u64 test_result_subprog1;
++__u64 test_result_subprog1 = 0;
+ BPF_TRACE_2("fexit/test_pkt_access_subprog1", test_subprog1,
+ struct sk_buff *, skb, int, ret)
+ {
+@@ -56,7 +56,7 @@ struct args_subprog2 {
+ __u64 args[5];
+ __u64 ret;
+ };
+-static volatile __u64 test_result_subprog2;
++__u64 test_result_subprog2 = 0;
+ SEC("fexit/test_pkt_access_subprog2")
+ int test_subprog2(struct args_subprog2 *ctx)
+ {
+--- a/tools/testing/selftests/bpf/progs/fexit_test.c
++++ b/tools/testing/selftests/bpf/progs/fexit_test.c
+@@ -6,28 +6,28 @@
+
+ char _license[] SEC("license") = "GPL";
+
+-static volatile __u64 test1_result;
++__u64 test1_result = 0;
+ BPF_TRACE_2("fexit/bpf_fentry_test1", test1, int, a, int, ret)
+ {
+ test1_result = a == 1 && ret == 2;
+ return 0;
+ }
+
+-static volatile __u64 test2_result;
++__u64 test2_result = 0;
+ BPF_TRACE_3("fexit/bpf_fentry_test2", test2, int, a, __u64, b, int, ret)
+ {
+ test2_result = a == 2 && b == 3 && ret == 5;
+ return 0;
+ }
+
+-static volatile __u64 test3_result;
++__u64 test3_result = 0;
+ BPF_TRACE_4("fexit/bpf_fentry_test3", test3, char, a, int, b, __u64, c, int, ret)
+ {
+ test3_result = a == 4 && b == 5 && c == 6 && ret == 15;
+ return 0;
+ }
+
+-static volatile __u64 test4_result;
++__u64 test4_result = 0;
+ BPF_TRACE_5("fexit/bpf_fentry_test4", test4,
+ void *, a, char, b, int, c, __u64, d, int, ret)
+ {
+@@ -37,7 +37,7 @@ BPF_TRACE_5("fexit/bpf_fentry_test4", te
+ return 0;
+ }
+
+-static volatile __u64 test5_result;
++__u64 test5_result = 0;
+ BPF_TRACE_6("fexit/bpf_fentry_test5", test5,
+ __u64, a, void *, b, short, c, int, d, __u64, e, int, ret)
+ {
+@@ -46,7 +46,7 @@ BPF_TRACE_6("fexit/bpf_fentry_test5", te
+ return 0;
+ }
+
+-static volatile __u64 test6_result;
++__u64 test6_result = 0;
+ BPF_TRACE_7("fexit/bpf_fentry_test6", test6,
+ __u64, a, void *, b, short, c, int, d, void *, e, __u64, f,
+ int, ret)
+--- a/tools/testing/selftests/bpf/progs/test_mmap.c
++++ b/tools/testing/selftests/bpf/progs/test_mmap.c
+@@ -15,8 +15,8 @@ struct {
+ __type(value, __u64);
+ } data_map SEC(".maps");
+
+-static volatile __u64 in_val;
+-static volatile __u64 out_val;
++__u64 in_val = 0;
++__u64 out_val = 0;
+
+ SEC("raw_tracepoint/sys_enter")
+ int test_mmap(void *ctx)
diff --git a/patches.suse/libbpf-Fix-up-generation-of-bpf_helper_defs.h.patch b/patches.suse/libbpf-Fix-up-generation-of-bpf_helper_defs.h.patch
new file mode 100644
index 0000000000..4c7c703f17
--- /dev/null
+++ b/patches.suse/libbpf-Fix-up-generation-of-bpf_helper_defs.h.patch
@@ -0,0 +1,71 @@
+From: Arnaldo Carvalho de Melo <arnaldo.melo@gmail.com>
+Date: Tue, 26 Nov 2019 12:10:45 -0300
+Subject: libbpf: Fix up generation of bpf_helper_defs.h
+Patch-mainline: v5.5-rc1
+Git-commit: 1fd450f99272791df8ea8e1b0f5657678e118e90
+References: bsc#1155518
+
+ $ make -C tools/perf build-test
+
+does, ends up with these two problems:
+
+ make[3]: *** No rule to make target '/tmp/tmp.zq13cHILGB/perf-5.3.0/include/uapi/linux/bpf.h', needed by 'bpf_helper_defs.h'. Stop.
+ make[3]: *** Waiting for unfinished jobs....
+ make[2]: *** [Makefile.perf:757: /tmp/tmp.zq13cHILGB/perf-5.3.0/tools/lib/bpf/libbpf.a] Error 2
+ make[2]: *** Waiting for unfinished jobs....
+
+Because $(srcdir) points to the /tmp/tmp.zq13cHILGB/perf-5.3.0 directory
+and we need '/tools/ after that variable, and after fixing this then we
+get to another problem:
+
+ /bin/sh: /home/acme/git/perf/tools/scripts/bpf_helpers_doc.py: No such file or directory
+ make[3]: *** [Makefile:184: bpf_helper_defs.h] Error 127
+ make[3]: *** Deleting file 'bpf_helper_defs.h'
+ LD /tmp/build/perf/libapi-in.o
+ make[2]: *** [Makefile.perf:778: /tmp/build/perf/libbpf.a] Error 2
+ make[2]: *** Waiting for unfinished jobs....
+
+Because this requires something outside the tools/ directories that gets
+collected into perf's detached tarballs, to fix it just add it to
+tools/perf/MANIFEST, which this patch does, now it works for that case
+and also for all these other cases.
+
+Fixes: e01a75c15969 ("libbpf: Move bpf_{helpers, helper_defs, endian, tracing}.h into libbpf")
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Andrii Nakryiko <andriin@fb.com>
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Martin KaFai Lau <kafai@fb.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Link: https://lkml.kernel.org/n/tip-4pnkg2vmdvq5u6eivc887wen@git.kernel.org
+Link: https://lore.kernel.org/bpf/20191126151045.GB19483@kernel.org
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/Makefile | 4 ++--
+ tools/perf/MANIFEST | 1 +
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/tools/lib/bpf/Makefile
++++ b/tools/lib/bpf/Makefile
+@@ -184,9 +184,9 @@ $(BPF_IN_SHARED): force elfdep bpfdep bp
+ $(BPF_IN_STATIC): force elfdep bpfdep bpf_helper_defs.h
+ $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
+
+-bpf_helper_defs.h: $(srctree)/include/uapi/linux/bpf.h
++bpf_helper_defs.h: $(srctree)/tools/include/uapi/linux/bpf.h
+ $(Q)$(srctree)/scripts/bpf_helpers_doc.py --header \
+- --file $(srctree)/include/uapi/linux/bpf.h > bpf_helper_defs.h
++ --file $(srctree)/tools/include/uapi/linux/bpf.h > bpf_helper_defs.h
+
+ $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
+
+--- a/tools/perf/MANIFEST
++++ b/tools/perf/MANIFEST
+@@ -19,3 +19,4 @@ tools/lib/bitmap.c
+ tools/lib/str_error_r.c
+ tools/lib/vsprintf.c
+ tools/lib/zalloc.c
++scripts/bpf_helpers_doc.py
diff --git a/patches.suse/libbpf-Fix-usage-of-u32-in-userspace-code.patch b/patches.suse/libbpf-Fix-usage-of-u32-in-userspace-code.patch
new file mode 100644
index 0000000000..c06ad2d7f7
--- /dev/null
+++ b/patches.suse/libbpf-Fix-usage-of-u32-in-userspace-code.patch
@@ -0,0 +1,30 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Mon, 25 Nov 2019 13:29:48 -0800
+Subject: libbpf: Fix usage of u32 in userspace code
+Patch-mainline: v5.5-rc1
+Git-commit: b615e5a1e067dcb327482d1af7463268b89b1629
+References: bsc#1155518
+
+u32 is not defined for libbpf when compiled outside of kernel sources (e.g.,
+in Github projection). Use __u32 instead.
+
+Fixes: b8c54ea455dc ("libbpf: Add support to attach to fentry/fexit tracing progs")
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191125212948.1163343-1-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -4994,7 +4994,7 @@ int libbpf_find_vmlinux_btf_id(const cha
+ char *dst = raw_tp_btf + sizeof(BTF_PREFIX) - 1;
+ const char *btf_name;
+ int err = -EINVAL;
+- u32 kind;
++ __u32 kind;
+
+ if (IS_ERR(btf)) {
+ pr_warning("vmlinux BTF is not found\n");
diff --git a/patches.suse/libbpf-Fix-various-errors-and-warning-reported-by-ch.patch b/patches.suse/libbpf-Fix-various-errors-and-warning-reported-by-ch.patch
new file mode 100644
index 0000000000..9c2b79ef2f
--- /dev/null
+++ b/patches.suse/libbpf-Fix-various-errors-and-warning-reported-by-ch.patch
@@ -0,0 +1,150 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Wed, 20 Nov 2019 23:07:42 -0800
+Subject: libbpf: Fix various errors and warning reported by checkpatch.pl
+Patch-mainline: v5.5-rc1
+Git-commit: 8983b731ceb42939acaa6158abcf8adb56f834bf
+References: bsc#1155518
+
+Fix a bunch of warnings and errors reported by checkpatch.pl, to make it
+easier to spot new problems.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191121070743.1309473-4-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 40 ++++++++++++++++++++++------------------
+ 1 file changed, 22 insertions(+), 18 deletions(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -105,7 +105,7 @@ void libbpf_print(enum libbpf_print_leve
+ err = action; \
+ if (err) \
+ goto out; \
+-} while(0)
++} while (0)
+
+
+ /* Copied from tools/perf/util/util.h */
+@@ -958,8 +958,7 @@ static int bpf_object__init_user_maps(st
+ obj->path, nr_maps, data->d_size);
+
+ if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
+- pr_warning("unable to determine map definition size "
+- "section %s, %d maps in %zd bytes\n",
++ pr_warning("unable to determine map definition size section %s, %d maps in %zd bytes\n",
+ obj->path, nr_maps, data->d_size);
+ return -EINVAL;
+ }
+@@ -1023,12 +1022,11 @@ static int bpf_object__init_user_maps(st
+ * incompatible.
+ */
+ char *b;
++
+ for (b = ((char *)def) + sizeof(struct bpf_map_def);
+ b < ((char *)def) + map_def_sz; b++) {
+ if (*b != 0) {
+- pr_warning("maps section in %s: \"%s\" "
+- "has unrecognized, non-zero "
+- "options\n",
++ pr_warning("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
+ obj->path, map_name);
+ if (strict)
+ return -EINVAL;
+@@ -1066,7 +1064,8 @@ skip_mods_and_typedefs(const struct btf
+ */
+ static bool get_map_field_int(const char *map_name, const struct btf *btf,
+ const struct btf_type *def,
+- const struct btf_member *m, __u32 *res) {
++ const struct btf_member *m, __u32 *res)
++{
+ const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
+ const char *name = btf__name_by_offset(btf, m->name_off);
+ const struct btf_array *arr_info;
+@@ -1381,7 +1380,8 @@ static int bpf_object__init_user_btf_map
+ for (i = 0; i < vlen; i++) {
+ err = bpf_object__init_user_btf_map(obj, sec, i,
+ obj->efile.btf_maps_shndx,
+- data, strict, pin_root_path);
++ data, strict,
++ pin_root_path);
+ if (err)
+ return err;
+ }
+@@ -1668,12 +1668,14 @@ static int bpf_object__elf_collect(struc
+ if (strcmp(name, ".text") == 0)
+ obj->efile.text_shndx = idx;
+ err = bpf_object__add_program(obj, data->d_buf,
+- data->d_size, name, idx);
++ data->d_size,
++ name, idx);
+ if (err) {
+ char errmsg[STRERR_BUFSIZE];
+- char *cp = libbpf_strerror_r(-err, errmsg,
+- sizeof(errmsg));
++ char *cp;
+
++ cp = libbpf_strerror_r(-err, errmsg,
++ sizeof(errmsg));
+ pr_warning("failed to alloc program %s (%s): %s",
+ name, obj->path, cp);
+ return err;
+@@ -1823,8 +1825,8 @@ static int bpf_program__record_reloc(str
+ }
+
+ if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
+- pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
+- insn_idx, insn->code);
++ pr_warning("invalid relo for insns[%d].code 0x%x\n",
++ insn_idx, insn->code);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
+@@ -2138,7 +2140,7 @@ bpf_object__probe_global_data(struct bpf
+
+ static int bpf_object__probe_btf_func(struct bpf_object *obj)
+ {
+- const char strs[] = "\0int\0x\0a";
++ static const char strs[] = "\0int\0x\0a";
+ /* void x(int a) {} */
+ __u32 types[] = {
+ /* int */
+@@ -2164,7 +2166,7 @@ static int bpf_object__probe_btf_func(st
+
+ static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
+ {
+- const char strs[] = "\0x\0.data";
++ static const char strs[] = "\0x\0.data";
+ /* static int a; */
+ __u32 types[] = {
+ /* int */
+@@ -4971,7 +4973,7 @@ int libbpf_prog_type_by_name(const char
+ *expected_attach_type = section_names[i].expected_attach_type;
+ return 0;
+ }
+- pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
++ pr_warning("failed to guess program type from ELF section '%s'\n", name);
+ type_names = libbpf_get_type_names(false);
+ if (type_names != NULL) {
+ pr_info("supported section(type) names are:%s\n", type_names);
+@@ -6198,7 +6200,8 @@ static struct bpf_prog_info_array_desc b
+
+ };
+
+-static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
++static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
++ int offset)
+ {
+ __u32 *array = (__u32 *)info;
+
+@@ -6207,7 +6210,8 @@ static __u32 bpf_prog_info_read_offset_u
+ return -(int)offset;
+ }
+
+-static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
++static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
++ int offset)
+ {
+ __u64 *array = (__u64 *)info;
+
diff --git a/patches.suse/libbpf-Generate-more-efficient-BPF_CORE_READ-code.patch b/patches.suse/libbpf-Generate-more-efficient-BPF_CORE_READ-code.patch
new file mode 100644
index 0000000000..6b9f5118b1
--- /dev/null
+++ b/patches.suse/libbpf-Generate-more-efficient-BPF_CORE_READ-code.patch
@@ -0,0 +1,61 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Thu, 10 Oct 2019 19:38:47 -0700
+Subject: libbpf: Generate more efficient BPF_CORE_READ code
+Patch-mainline: v5.5-rc1
+Git-commit: 409017847d2014db8ab1da49dd48182af88344b7
+References: bsc#1155518
+
+Existing BPF_CORE_READ() macro generates slightly suboptimal code. If
+there are intermediate pointers to be read, initial source pointer is
+going to be assigned into a temporary variable and then temporary
+variable is going to be uniformly used as a "source" pointer for all
+intermediate pointer reads. Schematically (ignoring all the type casts),
+BPF_CORE_READ(s, a, b, c) is expanded into:
+({
+ const void *__t = src;
+ bpf_probe_read(&__t, sizeof(*__t), &__t->a);
+ bpf_probe_read(&__t, sizeof(*__t), &__t->b);
+
+ typeof(s->a->b->c) __r;
+ bpf_probe_read(&__r, sizeof(*__r), &__t->c);
+})
+
+This initial `__t = src` makes calls more uniform, but causes slightly
+less optimal register usage sometimes when compiled with Clang. This can
+cascase into, e.g., more register spills.
+
+This patch fixes this issue by generating more optimal sequence:
+({
+ const void *__t;
+ bpf_probe_read(&__t, sizeof(*__t), &src->a); /* <-- src here */
+ bpf_probe_read(&__t, sizeof(*__t), &__t->b);
+
+ typeof(s->a->b->c) __r;
+ bpf_probe_read(&__r, sizeof(*__r), &__t->c);
+})
+
+Fixes: 7db3822ab991 ("libbpf: Add BPF_CORE_READ/BPF_CORE_READ_INTO helpers")
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20191011023847.275936-1-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/bpf_core_read.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/tools/lib/bpf/bpf_core_read.h
++++ b/tools/lib/bpf/bpf_core_read.h
+@@ -88,11 +88,11 @@
+ read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor)
+
+ /* "recursively" read a sequence of inner pointers using local __t var */
++#define ___rd_first(src, a) ___read(bpf_core_read, &__t, ___type(src), src, a);
+ #define ___rd_last(...) \
+ ___read(bpf_core_read, &__t, \
+ ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__));
+-#define ___rd_p0(src) const void *__t = src;
+-#define ___rd_p1(...) ___rd_p0(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
++#define ___rd_p1(...) const void *__t; ___rd_first(__VA_ARGS__)
+ #define ___rd_p2(...) ___rd_p1(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
+ #define ___rd_p3(...) ___rd_p2(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
+ #define ___rd_p4(...) ___rd_p3(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
diff --git a/patches.suse/libbpf-Introduce-btf__find_by_name_kind.patch b/patches.suse/libbpf-Introduce-btf__find_by_name_kind.patch
new file mode 100644
index 0000000000..2efe26a054
--- /dev/null
+++ b/patches.suse/libbpf-Introduce-btf__find_by_name_kind.patch
@@ -0,0 +1,72 @@
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Thu, 14 Nov 2019 10:57:05 -0800
+Subject: libbpf: Introduce btf__find_by_name_kind()
+Patch-mainline: v5.5-rc1
+Git-commit: 1442e2871b7679271fc9fcbf043ba1be511a7428
+References: bsc#1155518
+
+Introduce btf__find_by_name_kind() helper to search BTF by name and kind, since
+name alone can be ambiguous.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Link: https://lore.kernel.org/bpf/20191114185720.1641606-6-ast@kernel.org
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/btf.c | 22 ++++++++++++++++++++++
+ tools/lib/bpf/btf.h | 2 ++
+ tools/lib/bpf/libbpf.map | 1 +
+ 3 files changed, 25 insertions(+)
+
+--- a/tools/lib/bpf/btf.c
++++ b/tools/lib/bpf/btf.c
+@@ -316,6 +316,28 @@ __s32 btf__find_by_name(const struct btf
+ return -ENOENT;
+ }
+
++__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
++ __u32 kind)
++{
++ __u32 i;
++
++ if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
++ return 0;
++
++ for (i = 1; i <= btf->nr_types; i++) {
++ const struct btf_type *t = btf->types[i];
++ const char *name;
++
++ if (btf_kind(t) != kind)
++ continue;
++ name = btf__name_by_offset(btf, t->name_off);
++ if (name && !strcmp(type_name, name))
++ return i;
++ }
++
++ return -ENOENT;
++}
++
+ void btf__free(struct btf *btf)
+ {
+ if (!btf)
+--- a/tools/lib/bpf/btf.h
++++ b/tools/lib/bpf/btf.h
+@@ -72,6 +72,8 @@ LIBBPF_API int btf__finalize_data(struct
+ LIBBPF_API int btf__load(struct btf *btf);
+ LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
+ const char *type_name);
++LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
++ const char *type_name, __u32 kind);
+ LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
+ LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
+ __u32 id);
+--- a/tools/lib/bpf/libbpf.map
++++ b/tools/lib/bpf/libbpf.map
+@@ -203,4 +203,5 @@ LIBBPF_0.0.6 {
+ bpf_program__is_tracing;
+ bpf_program__set_tracing;
+ bpf_program__size;
++ btf__find_by_name_kind;
+ } LIBBPF_0.0.5;
diff --git a/patches.suse/libbpf-Make-DECLARE_LIBBPF_OPTS-macro-strictly-a-var.patch b/patches.suse/libbpf-Make-DECLARE_LIBBPF_OPTS-macro-strictly-a-var.patch
new file mode 100644
index 0000000000..31cb4b929d
--- /dev/null
+++ b/patches.suse/libbpf-Make-DECLARE_LIBBPF_OPTS-macro-strictly-a-var.patch
@@ -0,0 +1,149 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 22 Oct 2019 10:21:00 -0700
+Subject: libbpf: Make DECLARE_LIBBPF_OPTS macro strictly a variable
+ declaration
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.5-rc1
+Git-commit: e00aca65e646da08f8dce31c9b89f11dab76198c
+References: bsc#1155518
+
+LIBBPF_OPTS is implemented as a mix of field declaration and memset
++ assignment. This makes it neither variable declaration nor purely
+statements, which is a problem, because you can't mix it with either
+other variable declarations nor other function statements, because C90
+compiler mode emits warning on mixing all that together.
+
+This patch changes LIBBPF_OPTS into a strictly declaration of variable
+and solves this problem, as can be seen in case of bpftool, which
+previously would emit compiler warning, if done this way (LIBBPF_OPTS as
+part of function variables declaration block).
+
+This patch also renames LIBBPF_OPTS into DECLARE_LIBBPF_OPTS to follow
+kernel convention for similar macros more closely.
+
+v1->v2:
+- rename LIBBPF_OPTS into DECLARE_LIBBPF_OPTS (Jakub Sitnicki).
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Link: https://lore.kernel.org/bpf/20191022172100.3281465-1-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/bpf/bpftool/prog.c | 8 ++---
+ tools/lib/bpf/libbpf.c | 4 +-
+ tools/lib/bpf/libbpf.h | 19 +++++++-----
+ tools/testing/selftests/bpf/prog_tests/attach_probe.c | 2 -
+ tools/testing/selftests/bpf/prog_tests/core_reloc.c | 2 -
+ tools/testing/selftests/bpf/prog_tests/reference_tracking.c | 2 -
+ 6 files changed, 21 insertions(+), 16 deletions(-)
+
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -1091,8 +1091,11 @@ free_data_in:
+
+ static int load_with_options(int argc, char **argv, bool first_prog_only)
+ {
+- struct bpf_object_load_attr load_attr = { 0 };
+ enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
++ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
++ .relaxed_maps = relaxed_maps,
++ );
++ struct bpf_object_load_attr load_attr = { 0 };
+ enum bpf_attach_type expected_attach_type;
+ struct map_replace *map_replace = NULL;
+ struct bpf_program *prog = NULL, *pos;
+@@ -1106,9 +1109,6 @@ static int load_with_options(int argc, c
+ const char *file;
+ int idx, err;
+
+- LIBBPF_OPTS(bpf_object_open_opts, open_opts,
+- .relaxed_maps = relaxed_maps,
+- );
+
+ if (!REQ_ARGS(2))
+ return -1;
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -3666,7 +3666,7 @@ out:
+ static struct bpf_object *
+ __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
+ {
+- LIBBPF_OPTS(bpf_object_open_opts, opts,
++ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
+ .relaxed_maps = flags & MAPS_RELAX_COMPAT,
+ );
+
+@@ -3718,7 +3718,7 @@ struct bpf_object *
+ bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
+ const char *name)
+ {
+- LIBBPF_OPTS(bpf_object_open_opts, opts,
++ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
+ .object_name = name,
+ /* wrong default, but backwards-compatible */
+ .relaxed_maps = true,
+--- a/tools/lib/bpf/libbpf.h
++++ b/tools/lib/bpf/libbpf.h
+@@ -75,14 +75,19 @@ struct bpf_object_open_attr {
+ * have all the padding bytes initialized to zero. It's not guaranteed though,
+ * when copying literal, that compiler won't copy garbage in literal's padding
+ * bytes, but that's the best way I've found and it seems to work in practice.
++ *
++ * Macro declares opts struct of given type and name, zero-initializes,
++ * including any extra padding, it with memset() and then assigns initial
++ * values provided by users in struct initializer-syntax as varargs.
+ */
+-#define LIBBPF_OPTS(TYPE, NAME, ...) \
+- struct TYPE NAME; \
+- memset(&NAME, 0, sizeof(struct TYPE)); \
+- NAME = (struct TYPE) { \
+- .sz = sizeof(struct TYPE), \
+- __VA_ARGS__ \
+- }
++#define DECLARE_LIBBPF_OPTS(TYPE, NAME, ...) \
++ struct TYPE NAME = ({ \
++ memset(&NAME, 0, sizeof(struct TYPE)); \
++ (struct TYPE) { \
++ .sz = sizeof(struct TYPE), \
++ __VA_ARGS__ \
++ }; \
++ })
+
+ struct bpf_object_open_opts {
+ /* size of this struct, for forward/backward compatiblity */
+--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
++++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+@@ -50,7 +50,7 @@ void test_attach_probe(void)
+ const int kprobe_idx = 0, kretprobe_idx = 1;
+ const int uprobe_idx = 2, uretprobe_idx = 3;
+ const char *obj_name = "attach_probe";
+- LIBBPF_OPTS(bpf_object_open_opts, open_opts,
++ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
+ .object_name = obj_name,
+ .relaxed_maps = true,
+ );
+--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
++++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+@@ -377,7 +377,7 @@ void test_core_reloc(void)
+ if (!test__start_subtest(test_case->case_name))
+ continue;
+
+- LIBBPF_OPTS(bpf_object_open_opts, opts,
++ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
+ .relaxed_core_relocs = test_case->relaxed_core_relocs,
+ );
+
+--- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
++++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
+@@ -5,7 +5,7 @@ void test_reference_tracking(void)
+ {
+ const char *file = "test_sk_lookup_kern.o";
+ const char *obj_name = "ref_track";
+- LIBBPF_OPTS(bpf_object_open_opts, open_opts,
++ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
+ .object_name = obj_name,
+ .relaxed_maps = true,
+ );
diff --git a/patches.suse/libbpf-Move-bpf_-helpers-helper_defs-endian-tracing-.patch b/patches.suse/libbpf-Move-bpf_-helpers-helper_defs-endian-tracing-.patch
new file mode 100644
index 0000000000..ba33642f37
--- /dev/null
+++ b/patches.suse/libbpf-Move-bpf_-helpers-helper_defs-endian-tracing-.patch
@@ -0,0 +1,824 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 8 Oct 2019 10:59:40 -0700
+Subject: libbpf: Move bpf_{helpers, helper_defs, endian, tracing}.h into
+ libbpf
+Patch-mainline: v5.5-rc1
+Git-commit: e01a75c159691714607b8a22daa2ba7be275dd01
+References: bsc#1155518
+
+Move bpf_helpers.h, bpf_tracing.h, and bpf_endian.h into libbpf. Move
+bpf_helper_defs.h generation into libbpf's Makefile. Ensure all those
+headers are installed along the other libbpf headers. Also, adjust
+selftests and samples include path to include libbpf now.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20191008175942.1769476-6-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ samples/bpf/Makefile | 2 +-
+ tools/lib/bpf/.gitignore | 1 +
+ tools/lib/bpf/Makefile | 16 ++++++++++++----
+ .../{testing/selftests => lib}/bpf/bpf_endian.h | 0
+ .../{testing/selftests => lib}/bpf/bpf_helpers.h | 0
+ .../{testing/selftests => lib}/bpf/bpf_tracing.h | 0
+ samples/bpf/Makefile | 2
+ tools/lib/bpf/.gitignore | 1
+ tools/lib/bpf/Makefile | 19 ++
+ tools/lib/bpf/bpf_endian.h | 72 +++++++++++
+ tools/lib/bpf/bpf_helpers.h | 55 ++++++++
+ tools/lib/bpf/bpf_tracing.h | 195 ++++++++++++++++++++++++++++++
+ tools/testing/selftests/bpf/.gitignore | 1
+ tools/testing/selftests/bpf/Makefile | 10 -
+ tools/testing/selftests/bpf/bpf_endian.h | 72 -----------
+ tools/testing/selftests/bpf/bpf_helpers.h | 55 --------
+ tools/testing/selftests/bpf/bpf_tracing.h | 195 ------------------------------
+ 11 files changed, 341 insertions(+), 336 deletions(-)
+ rename tools/{testing/selftests => lib}/bpf/bpf_endian.h (100%)
+ rename tools/{testing/selftests => lib}/bpf/bpf_helpers.h (100%)
+ rename tools/{testing/selftests => lib}/bpf/bpf_tracing.h (100%)
+
+--- a/samples/bpf/Makefile
++++ b/samples/bpf/Makefile
+@@ -283,7 +283,7 @@ $(obj)/hbm_edt_kern.o: $(src)/hbm.h $(sr
+ $(obj)/%.o: $(src)/%.c
+ @echo " CLANG-bpf " $@
+ $(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \
+- -I$(srctree)/tools/testing/selftests/bpf/ \
++ -I$(srctree)/tools/testing/selftests/bpf/ -I$(srctree)/tools/lib/bpf/ \
+ -D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \
+ -D__TARGET_ARCH_$(SRCARCH) -Wno-compare-distinct-pointer-types \
+ -Wno-gnu-variable-sized-type-not-at-end \
+--- a/tools/lib/bpf/.gitignore
++++ b/tools/lib/bpf/.gitignore
+@@ -6,3 +6,4 @@ libbpf.so.*
+ TAGS
+ tags
+ cscope.*
++/bpf_helper_defs.h
+--- a/tools/lib/bpf/Makefile
++++ b/tools/lib/bpf/Makefile
+@@ -163,7 +163,7 @@ all: fixdep
+
+ all_cmd: $(CMD_TARGETS) check
+
+-$(BPF_IN_SHARED): force elfdep bpfdep
++$(BPF_IN_SHARED): force elfdep bpfdep bpf_helper_defs.h
+ @(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
+ (diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
+ echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true
+@@ -181,9 +181,13 @@ $(BPF_IN_SHARED): force elfdep bpfdep
+ echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
+ $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)"
+
+-$(BPF_IN_STATIC): force elfdep bpfdep
++$(BPF_IN_STATIC): force elfdep bpfdep bpf_helper_defs.h
+ $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
+
++bpf_helper_defs.h: $(srctree)/include/uapi/linux/bpf.h
++ $(Q)$(srctree)/scripts/bpf_helpers_doc.py --header \
++ --file $(srctree)/include/uapi/linux/bpf.h > bpf_helper_defs.h
++
+ $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
+
+ $(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED)
+@@ -245,13 +249,17 @@ install_lib: all_cmd
+ $(call do_install_mkdir,$(libdir_SQ)); \
+ cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
+
+-install_headers:
++install_headers: bpf_helper_defs.h
+ $(call QUIET_INSTALL, headers) \
+ $(call do_install,bpf.h,$(prefix)/include/bpf,644); \
+ $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
+ $(call do_install,btf.h,$(prefix)/include/bpf,644); \
+ $(call do_install,libbpf_util.h,$(prefix)/include/bpf,644); \
+- $(call do_install,xsk.h,$(prefix)/include/bpf,644);
++ $(call do_install,xsk.h,$(prefix)/include/bpf,644); \
++ $(call do_install,bpf_helpers.h,$(prefix)/include/bpf,644); \
++ $(call do_install,bpf_helper_defs.h,$(prefix)/include/bpf,644); \
++ $(call do_install,bpf_tracing.h,$(prefix)/include/bpf,644); \
++ $(call do_install,bpf_endian.h,$(prefix)/include/bpf,644);
+
+ install_pkgconfig: $(PC_FILE)
+ $(call QUIET_INSTALL, $(PC_FILE)) \
+@@ -268,7 +276,8 @@ config-clean:
+ clean:
+ $(call QUIET_CLEAN, libbpf) $(RM) -rf $(TARGETS) $(CXX_TEST_TARGET) \
+ *.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \
+- *.pc LIBBPF-CFLAGS $(SHARED_OBJDIR) $(STATIC_OBJDIR)
++ *.pc LIBBPF-CFLAGS bpf_helper_defs.h \
++ $(SHARED_OBJDIR) $(STATIC_OBJDIR)
+ $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
+
+
+--- /dev/null
++++ b/tools/lib/bpf/bpf_endian.h
+@@ -0,0 +1,72 @@
++/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
++#ifndef __BPF_ENDIAN__
++#define __BPF_ENDIAN__
++
++#include <linux/stddef.h>
++#include <linux/swab.h>
++
++/* LLVM's BPF target selects the endianness of the CPU
++ * it compiles on, or the user specifies (bpfel/bpfeb),
++ * respectively. The used __BYTE_ORDER__ is defined by
++ * the compiler, we cannot rely on __BYTE_ORDER from
++ * libc headers, since it doesn't reflect the actual
++ * requested byte order.
++ *
++ * Note, LLVM's BPF target has different __builtin_bswapX()
++ * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE
++ * in bpfel and bpfeb case, which means below, that we map
++ * to cpu_to_be16(). We could use it unconditionally in BPF
++ * case, but better not rely on it, so that this header here
++ * can be used from application and BPF program side, which
++ * use different targets.
++ */
++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
++# define __bpf_ntohs(x) __builtin_bswap16(x)
++# define __bpf_htons(x) __builtin_bswap16(x)
++# define __bpf_constant_ntohs(x) ___constant_swab16(x)
++# define __bpf_constant_htons(x) ___constant_swab16(x)
++# define __bpf_ntohl(x) __builtin_bswap32(x)
++# define __bpf_htonl(x) __builtin_bswap32(x)
++# define __bpf_constant_ntohl(x) ___constant_swab32(x)
++# define __bpf_constant_htonl(x) ___constant_swab32(x)
++# define __bpf_be64_to_cpu(x) __builtin_bswap64(x)
++# define __bpf_cpu_to_be64(x) __builtin_bswap64(x)
++# define __bpf_constant_be64_to_cpu(x) ___constant_swab64(x)
++# define __bpf_constant_cpu_to_be64(x) ___constant_swab64(x)
++#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++# define __bpf_ntohs(x) (x)
++# define __bpf_htons(x) (x)
++# define __bpf_constant_ntohs(x) (x)
++# define __bpf_constant_htons(x) (x)
++# define __bpf_ntohl(x) (x)
++# define __bpf_htonl(x) (x)
++# define __bpf_constant_ntohl(x) (x)
++# define __bpf_constant_htonl(x) (x)
++# define __bpf_be64_to_cpu(x) (x)
++# define __bpf_cpu_to_be64(x) (x)
++# define __bpf_constant_be64_to_cpu(x) (x)
++# define __bpf_constant_cpu_to_be64(x) (x)
++#else
++# error "Fix your compiler's __BYTE_ORDER__?!"
++#endif
++
++#define bpf_htons(x) \
++ (__builtin_constant_p(x) ? \
++ __bpf_constant_htons(x) : __bpf_htons(x))
++#define bpf_ntohs(x) \
++ (__builtin_constant_p(x) ? \
++ __bpf_constant_ntohs(x) : __bpf_ntohs(x))
++#define bpf_htonl(x) \
++ (__builtin_constant_p(x) ? \
++ __bpf_constant_htonl(x) : __bpf_htonl(x))
++#define bpf_ntohl(x) \
++ (__builtin_constant_p(x) ? \
++ __bpf_constant_ntohl(x) : __bpf_ntohl(x))
++#define bpf_cpu_to_be64(x) \
++ (__builtin_constant_p(x) ? \
++ __bpf_constant_cpu_to_be64(x) : __bpf_cpu_to_be64(x))
++#define bpf_be64_to_cpu(x) \
++ (__builtin_constant_p(x) ? \
++ __bpf_constant_be64_to_cpu(x) : __bpf_be64_to_cpu(x))
++
++#endif /* __BPF_ENDIAN__ */
+--- /dev/null
++++ b/tools/lib/bpf/bpf_helpers.h
+@@ -0,0 +1,55 @@
++/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
++#ifndef __BPF_HELPERS__
++#define __BPF_HELPERS__
++
++#include "bpf_helper_defs.h"
++
++#define __uint(name, val) int (*name)[val]
++#define __type(name, val) typeof(val) *name
++
++/* helper macro to print out debug messages */
++#define bpf_printk(fmt, ...) \
++({ \
++ char ____fmt[] = fmt; \
++ bpf_trace_printk(____fmt, sizeof(____fmt), \
++ ##__VA_ARGS__); \
++})
++
++/* helper macro to place programs, maps, license in
++ * different sections in elf_bpf file. Section names
++ * are interpreted by elf_bpf loader
++ */
++#define SEC(NAME) __attribute__((section(NAME), used))
++
++/* a helper structure used by eBPF C program
++ * to describe BPF map attributes to libbpf loader
++ */
++struct bpf_map_def {
++ unsigned int type;
++ unsigned int key_size;
++ unsigned int value_size;
++ unsigned int max_entries;
++ unsigned int map_flags;
++};
++
++/*
++ * bpf_core_read() abstracts away bpf_probe_read() call and captures offset
++ * relocation for source address using __builtin_preserve_access_index()
++ * built-in, provided by Clang.
++ *
++ * __builtin_preserve_access_index() takes as an argument an expression of
++ * taking an address of a field within struct/union. It makes compiler emit
++ * a relocation, which records BTF type ID describing root struct/union and an
++ * accessor string which describes exact embedded field that was used to take
++ * an address. See detailed description of this relocation format and
++ * semantics in comments to struct bpf_offset_reloc in libbpf_internal.h.
++ *
++ * This relocation allows libbpf to adjust BPF instruction to use correct
++ * actual field offset, based on target kernel BTF type that matches original
++ * (local) BTF, used to record relocation.
++ */
++#define bpf_core_read(dst, sz, src) \
++ bpf_probe_read(dst, sz, \
++ (const void *)__builtin_preserve_access_index(src))
++
++#endif
+--- /dev/null
++++ b/tools/lib/bpf/bpf_tracing.h
+@@ -0,0 +1,195 @@
++/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
++#ifndef __BPF_TRACING_H__
++#define __BPF_TRACING_H__
++
++/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
++#if defined(__TARGET_ARCH_x86)
++ #define bpf_target_x86
++ #define bpf_target_defined
++#elif defined(__TARGET_ARCH_s390)
++ #define bpf_target_s390
++ #define bpf_target_defined
++#elif defined(__TARGET_ARCH_arm)
++ #define bpf_target_arm
++ #define bpf_target_defined
++#elif defined(__TARGET_ARCH_arm64)
++ #define bpf_target_arm64
++ #define bpf_target_defined
++#elif defined(__TARGET_ARCH_mips)
++ #define bpf_target_mips
++ #define bpf_target_defined
++#elif defined(__TARGET_ARCH_powerpc)
++ #define bpf_target_powerpc
++ #define bpf_target_defined
++#elif defined(__TARGET_ARCH_sparc)
++ #define bpf_target_sparc
++ #define bpf_target_defined
++#else
++ #undef bpf_target_defined
++#endif
++
++/* Fall back to what the compiler says */
++#ifndef bpf_target_defined
++#if defined(__x86_64__)
++ #define bpf_target_x86
++#elif defined(__s390__)
++ #define bpf_target_s390
++#elif defined(__arm__)
++ #define bpf_target_arm
++#elif defined(__aarch64__)
++ #define bpf_target_arm64
++#elif defined(__mips__)
++ #define bpf_target_mips
++#elif defined(__powerpc__)
++ #define bpf_target_powerpc
++#elif defined(__sparc__)
++ #define bpf_target_sparc
++#endif
++#endif
++
++#if defined(bpf_target_x86)
++
++#ifdef __KERNEL__
++#define PT_REGS_PARM1(x) ((x)->di)
++#define PT_REGS_PARM2(x) ((x)->si)
++#define PT_REGS_PARM3(x) ((x)->dx)
++#define PT_REGS_PARM4(x) ((x)->cx)
++#define PT_REGS_PARM5(x) ((x)->r8)
++#define PT_REGS_RET(x) ((x)->sp)
++#define PT_REGS_FP(x) ((x)->bp)
++#define PT_REGS_RC(x) ((x)->ax)
++#define PT_REGS_SP(x) ((x)->sp)
++#define PT_REGS_IP(x) ((x)->ip)
++#else
++#ifdef __i386__
++/* i386 kernel is built with -mregparm=3 */
++#define PT_REGS_PARM1(x) ((x)->eax)
++#define PT_REGS_PARM2(x) ((x)->edx)
++#define PT_REGS_PARM3(x) ((x)->ecx)
++#define PT_REGS_PARM4(x) 0
++#define PT_REGS_PARM5(x) 0
++#define PT_REGS_RET(x) ((x)->esp)
++#define PT_REGS_FP(x) ((x)->ebp)
++#define PT_REGS_RC(x) ((x)->eax)
++#define PT_REGS_SP(x) ((x)->esp)
++#define PT_REGS_IP(x) ((x)->eip)
++#else
++#define PT_REGS_PARM1(x) ((x)->rdi)
++#define PT_REGS_PARM2(x) ((x)->rsi)
++#define PT_REGS_PARM3(x) ((x)->rdx)
++#define PT_REGS_PARM4(x) ((x)->rcx)
++#define PT_REGS_PARM5(x) ((x)->r8)
++#define PT_REGS_RET(x) ((x)->rsp)
++#define PT_REGS_FP(x) ((x)->rbp)
++#define PT_REGS_RC(x) ((x)->rax)
++#define PT_REGS_SP(x) ((x)->rsp)
++#define PT_REGS_IP(x) ((x)->rip)
++#endif
++#endif
++
++#elif defined(bpf_target_s390)
++
++/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
++struct pt_regs;
++#define PT_REGS_S390 const volatile user_pt_regs
++#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2])
++#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3])
++#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4])
++#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5])
++#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6])
++#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14])
++/* Works only with CONFIG_FRAME_POINTER */
++#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11])
++#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2])
++#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
++#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
++
++#elif defined(bpf_target_arm)
++
++#define PT_REGS_PARM1(x) ((x)->uregs[0])
++#define PT_REGS_PARM2(x) ((x)->uregs[1])
++#define PT_REGS_PARM3(x) ((x)->uregs[2])
++#define PT_REGS_PARM4(x) ((x)->uregs[3])
++#define PT_REGS_PARM5(x) ((x)->uregs[4])
++#define PT_REGS_RET(x) ((x)->uregs[14])
++#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */
++#define PT_REGS_RC(x) ((x)->uregs[0])
++#define PT_REGS_SP(x) ((x)->uregs[13])
++#define PT_REGS_IP(x) ((x)->uregs[12])
++
++#elif defined(bpf_target_arm64)
++
++/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
++struct pt_regs;
++#define PT_REGS_ARM64 const volatile struct user_pt_regs
++#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
++#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
++#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
++#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
++#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
++#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30])
++/* Works only with CONFIG_FRAME_POINTER */
++#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
++#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0])
++#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
++#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
++
++#elif defined(bpf_target_mips)
++
++#define PT_REGS_PARM1(x) ((x)->regs[4])
++#define PT_REGS_PARM2(x) ((x)->regs[5])
++#define PT_REGS_PARM3(x) ((x)->regs[6])
++#define PT_REGS_PARM4(x) ((x)->regs[7])
++#define PT_REGS_PARM5(x) ((x)->regs[8])
++#define PT_REGS_RET(x) ((x)->regs[31])
++#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
++#define PT_REGS_RC(x) ((x)->regs[1])
++#define PT_REGS_SP(x) ((x)->regs[29])
++#define PT_REGS_IP(x) ((x)->cp0_epc)
++
++#elif defined(bpf_target_powerpc)
++
++#define PT_REGS_PARM1(x) ((x)->gpr[3])
++#define PT_REGS_PARM2(x) ((x)->gpr[4])
++#define PT_REGS_PARM3(x) ((x)->gpr[5])
++#define PT_REGS_PARM4(x) ((x)->gpr[6])
++#define PT_REGS_PARM5(x) ((x)->gpr[7])
++#define PT_REGS_RC(x) ((x)->gpr[3])
++#define PT_REGS_SP(x) ((x)->sp)
++#define PT_REGS_IP(x) ((x)->nip)
++
++#elif defined(bpf_target_sparc)
++
++#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
++#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
++#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2])
++#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3])
++#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4])
++#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
++#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
++#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
++
++/* Should this also be a bpf_target check for the sparc case? */
++#if defined(__arch64__)
++#define PT_REGS_IP(x) ((x)->tpc)
++#else
++#define PT_REGS_IP(x) ((x)->pc)
++#endif
++
++#endif
++
++#if defined(bpf_target_powerpc)
++#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
++#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
++#elif defined(bpf_target_sparc)
++#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
++#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
++#else
++#define BPF_KPROBE_READ_RET_IP(ip, ctx) \
++ ({ bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
++#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \
++ ({ bpf_probe_read(&(ip), sizeof(ip), \
++ (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
++#endif
++
++#endif
+--- a/tools/testing/selftests/bpf/.gitignore
++++ b/tools/testing/selftests/bpf/.gitignore
+@@ -39,7 +39,6 @@ libbpf.so.*
+ test_hashmap
+ test_btf_dump
+ xdping
+-/bpf_helper_defs.h
+ test_sockopt
+ test_sockopt_sk
+ test_sockopt_multi
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -91,10 +91,6 @@ include ../lib.mk
+ TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
+ all: $(TEST_CUSTOM_PROGS)
+
+-bpf_helper_defs.h: $(APIDIR)/linux/bpf.h
+- $(BPFDIR)/../../../scripts/bpf_helpers_doc.py --header \
+- --file $(APIDIR)/linux/bpf.h > bpf_helper_defs.h
+-
+ $(OUTPUT)/urandom_read: $(OUTPUT)/%: %.c
+ $(CC) -o $@ $< -Wl,--build-id
+
+@@ -132,7 +128,7 @@ $(OUTPUT)/test_tcp_rtt: cgroup_helpers.c
+ # force a rebuild of BPFOBJ when its dependencies are updated
+ force:
+
+-$(BPFOBJ): force bpf_helper_defs.h
++$(BPFOBJ): force
+ $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
+
+ PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
+@@ -157,7 +153,7 @@ $(shell $(1) -v -E - </dev/null 2>&1 \
+ endef
+ CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG))
+ BPF_CFLAGS = -I. -I./include/uapi -I../../../include/uapi \
+- -I$(OUTPUT)/../usr/include -D__TARGET_ARCH_$(SRCARCH)
++ -I$(BPFDIR) -I$(OUTPUT)/../usr/include -D__TARGET_ARCH_$(SRCARCH)
+
+ CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \
+ -Wno-compare-distinct-pointer-types
+@@ -328,4 +324,4 @@ $(VERIFIER_TESTS_H): $(VERIFIER_TEST_FIL
+
+ EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) $(BPF_GCC_BUILD_DIR) \
+ $(VERIFIER_TESTS_H) $(PROG_TESTS_H) $(MAP_TESTS_H) \
+- feature bpf_helper_defs.h
++ feature
+--- a/tools/testing/selftests/bpf/bpf_endian.h
++++ /dev/null
+@@ -1,72 +0,0 @@
+-/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+-#ifndef __BPF_ENDIAN__
+-#define __BPF_ENDIAN__
+-
+-#include <linux/stddef.h>
+-#include <linux/swab.h>
+-
+-/* LLVM's BPF target selects the endianness of the CPU
+- * it compiles on, or the user specifies (bpfel/bpfeb),
+- * respectively. The used __BYTE_ORDER__ is defined by
+- * the compiler, we cannot rely on __BYTE_ORDER from
+- * libc headers, since it doesn't reflect the actual
+- * requested byte order.
+- *
+- * Note, LLVM's BPF target has different __builtin_bswapX()
+- * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE
+- * in bpfel and bpfeb case, which means below, that we map
+- * to cpu_to_be16(). We could use it unconditionally in BPF
+- * case, but better not rely on it, so that this header here
+- * can be used from application and BPF program side, which
+- * use different targets.
+- */
+-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+-# define __bpf_ntohs(x) __builtin_bswap16(x)
+-# define __bpf_htons(x) __builtin_bswap16(x)
+-# define __bpf_constant_ntohs(x) ___constant_swab16(x)
+-# define __bpf_constant_htons(x) ___constant_swab16(x)
+-# define __bpf_ntohl(x) __builtin_bswap32(x)
+-# define __bpf_htonl(x) __builtin_bswap32(x)
+-# define __bpf_constant_ntohl(x) ___constant_swab32(x)
+-# define __bpf_constant_htonl(x) ___constant_swab32(x)
+-# define __bpf_be64_to_cpu(x) __builtin_bswap64(x)
+-# define __bpf_cpu_to_be64(x) __builtin_bswap64(x)
+-# define __bpf_constant_be64_to_cpu(x) ___constant_swab64(x)
+-# define __bpf_constant_cpu_to_be64(x) ___constant_swab64(x)
+-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+-# define __bpf_ntohs(x) (x)
+-# define __bpf_htons(x) (x)
+-# define __bpf_constant_ntohs(x) (x)
+-# define __bpf_constant_htons(x) (x)
+-# define __bpf_ntohl(x) (x)
+-# define __bpf_htonl(x) (x)
+-# define __bpf_constant_ntohl(x) (x)
+-# define __bpf_constant_htonl(x) (x)
+-# define __bpf_be64_to_cpu(x) (x)
+-# define __bpf_cpu_to_be64(x) (x)
+-# define __bpf_constant_be64_to_cpu(x) (x)
+-# define __bpf_constant_cpu_to_be64(x) (x)
+-#else
+-# error "Fix your compiler's __BYTE_ORDER__?!"
+-#endif
+-
+-#define bpf_htons(x) \
+- (__builtin_constant_p(x) ? \
+- __bpf_constant_htons(x) : __bpf_htons(x))
+-#define bpf_ntohs(x) \
+- (__builtin_constant_p(x) ? \
+- __bpf_constant_ntohs(x) : __bpf_ntohs(x))
+-#define bpf_htonl(x) \
+- (__builtin_constant_p(x) ? \
+- __bpf_constant_htonl(x) : __bpf_htonl(x))
+-#define bpf_ntohl(x) \
+- (__builtin_constant_p(x) ? \
+- __bpf_constant_ntohl(x) : __bpf_ntohl(x))
+-#define bpf_cpu_to_be64(x) \
+- (__builtin_constant_p(x) ? \
+- __bpf_constant_cpu_to_be64(x) : __bpf_cpu_to_be64(x))
+-#define bpf_be64_to_cpu(x) \
+- (__builtin_constant_p(x) ? \
+- __bpf_constant_be64_to_cpu(x) : __bpf_be64_to_cpu(x))
+-
+-#endif /* __BPF_ENDIAN__ */
+--- a/tools/testing/selftests/bpf/bpf_helpers.h
++++ /dev/null
+@@ -1,55 +0,0 @@
+-/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+-#ifndef __BPF_HELPERS__
+-#define __BPF_HELPERS__
+-
+-#include "bpf_helper_defs.h"
+-
+-#define __uint(name, val) int (*name)[val]
+-#define __type(name, val) typeof(val) *name
+-
+-/* helper macro to print out debug messages */
+-#define bpf_printk(fmt, ...) \
+-({ \
+- char ____fmt[] = fmt; \
+- bpf_trace_printk(____fmt, sizeof(____fmt), \
+- ##__VA_ARGS__); \
+-})
+-
+-/* helper macro to place programs, maps, license in
+- * different sections in elf_bpf file. Section names
+- * are interpreted by elf_bpf loader
+- */
+-#define SEC(NAME) __attribute__((section(NAME), used))
+-
+-/* a helper structure used by eBPF C program
+- * to describe BPF map attributes to libbpf loader
+- */
+-struct bpf_map_def {
+- unsigned int type;
+- unsigned int key_size;
+- unsigned int value_size;
+- unsigned int max_entries;
+- unsigned int map_flags;
+-};
+-
+-/*
+- * bpf_core_read() abstracts away bpf_probe_read() call and captures offset
+- * relocation for source address using __builtin_preserve_access_index()
+- * built-in, provided by Clang.
+- *
+- * __builtin_preserve_access_index() takes as an argument an expression of
+- * taking an address of a field within struct/union. It makes compiler emit
+- * a relocation, which records BTF type ID describing root struct/union and an
+- * accessor string which describes exact embedded field that was used to take
+- * an address. See detailed description of this relocation format and
+- * semantics in comments to struct bpf_offset_reloc in libbpf_internal.h.
+- *
+- * This relocation allows libbpf to adjust BPF instruction to use correct
+- * actual field offset, based on target kernel BTF type that matches original
+- * (local) BTF, used to record relocation.
+- */
+-#define bpf_core_read(dst, sz, src) \
+- bpf_probe_read(dst, sz, \
+- (const void *)__builtin_preserve_access_index(src))
+-
+-#endif
+--- a/tools/testing/selftests/bpf/bpf_tracing.h
++++ /dev/null
+@@ -1,195 +0,0 @@
+-/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+-#ifndef __BPF_TRACING_H__
+-#define __BPF_TRACING_H__
+-
+-/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
+-#if defined(__TARGET_ARCH_x86)
+- #define bpf_target_x86
+- #define bpf_target_defined
+-#elif defined(__TARGET_ARCH_s390)
+- #define bpf_target_s390
+- #define bpf_target_defined
+-#elif defined(__TARGET_ARCH_arm)
+- #define bpf_target_arm
+- #define bpf_target_defined
+-#elif defined(__TARGET_ARCH_arm64)
+- #define bpf_target_arm64
+- #define bpf_target_defined
+-#elif defined(__TARGET_ARCH_mips)
+- #define bpf_target_mips
+- #define bpf_target_defined
+-#elif defined(__TARGET_ARCH_powerpc)
+- #define bpf_target_powerpc
+- #define bpf_target_defined
+-#elif defined(__TARGET_ARCH_sparc)
+- #define bpf_target_sparc
+- #define bpf_target_defined
+-#else
+- #undef bpf_target_defined
+-#endif
+-
+-/* Fall back to what the compiler says */
+-#ifndef bpf_target_defined
+-#if defined(__x86_64__)
+- #define bpf_target_x86
+-#elif defined(__s390__)
+- #define bpf_target_s390
+-#elif defined(__arm__)
+- #define bpf_target_arm
+-#elif defined(__aarch64__)
+- #define bpf_target_arm64
+-#elif defined(__mips__)
+- #define bpf_target_mips
+-#elif defined(__powerpc__)
+- #define bpf_target_powerpc
+-#elif defined(__sparc__)
+- #define bpf_target_sparc
+-#endif
+-#endif
+-
+-#if defined(bpf_target_x86)
+-
+-#ifdef __KERNEL__
+-#define PT_REGS_PARM1(x) ((x)->di)
+-#define PT_REGS_PARM2(x) ((x)->si)
+-#define PT_REGS_PARM3(x) ((x)->dx)
+-#define PT_REGS_PARM4(x) ((x)->cx)
+-#define PT_REGS_PARM5(x) ((x)->r8)
+-#define PT_REGS_RET(x) ((x)->sp)
+-#define PT_REGS_FP(x) ((x)->bp)
+-#define PT_REGS_RC(x) ((x)->ax)
+-#define PT_REGS_SP(x) ((x)->sp)
+-#define PT_REGS_IP(x) ((x)->ip)
+-#else
+-#ifdef __i386__
+-/* i386 kernel is built with -mregparm=3 */
+-#define PT_REGS_PARM1(x) ((x)->eax)
+-#define PT_REGS_PARM2(x) ((x)->edx)
+-#define PT_REGS_PARM3(x) ((x)->ecx)
+-#define PT_REGS_PARM4(x) 0
+-#define PT_REGS_PARM5(x) 0
+-#define PT_REGS_RET(x) ((x)->esp)
+-#define PT_REGS_FP(x) ((x)->ebp)
+-#define PT_REGS_RC(x) ((x)->eax)
+-#define PT_REGS_SP(x) ((x)->esp)
+-#define PT_REGS_IP(x) ((x)->eip)
+-#else
+-#define PT_REGS_PARM1(x) ((x)->rdi)
+-#define PT_REGS_PARM2(x) ((x)->rsi)
+-#define PT_REGS_PARM3(x) ((x)->rdx)
+-#define PT_REGS_PARM4(x) ((x)->rcx)
+-#define PT_REGS_PARM5(x) ((x)->r8)
+-#define PT_REGS_RET(x) ((x)->rsp)
+-#define PT_REGS_FP(x) ((x)->rbp)
+-#define PT_REGS_RC(x) ((x)->rax)
+-#define PT_REGS_SP(x) ((x)->rsp)
+-#define PT_REGS_IP(x) ((x)->rip)
+-#endif
+-#endif
+-
+-#elif defined(bpf_target_s390)
+-
+-/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
+-struct pt_regs;
+-#define PT_REGS_S390 const volatile user_pt_regs
+-#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2])
+-#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3])
+-#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4])
+-#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5])
+-#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6])
+-#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14])
+-/* Works only with CONFIG_FRAME_POINTER */
+-#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11])
+-#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2])
+-#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
+-#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
+-
+-#elif defined(bpf_target_arm)
+-
+-#define PT_REGS_PARM1(x) ((x)->uregs[0])
+-#define PT_REGS_PARM2(x) ((x)->uregs[1])
+-#define PT_REGS_PARM3(x) ((x)->uregs[2])
+-#define PT_REGS_PARM4(x) ((x)->uregs[3])
+-#define PT_REGS_PARM5(x) ((x)->uregs[4])
+-#define PT_REGS_RET(x) ((x)->uregs[14])
+-#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */
+-#define PT_REGS_RC(x) ((x)->uregs[0])
+-#define PT_REGS_SP(x) ((x)->uregs[13])
+-#define PT_REGS_IP(x) ((x)->uregs[12])
+-
+-#elif defined(bpf_target_arm64)
+-
+-/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
+-struct pt_regs;
+-#define PT_REGS_ARM64 const volatile struct user_pt_regs
+-#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
+-#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
+-#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
+-#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
+-#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
+-#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30])
+-/* Works only with CONFIG_FRAME_POINTER */
+-#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
+-#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0])
+-#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
+-#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
+-
+-#elif defined(bpf_target_mips)
+-
+-#define PT_REGS_PARM1(x) ((x)->regs[4])
+-#define PT_REGS_PARM2(x) ((x)->regs[5])
+-#define PT_REGS_PARM3(x) ((x)->regs[6])
+-#define PT_REGS_PARM4(x) ((x)->regs[7])
+-#define PT_REGS_PARM5(x) ((x)->regs[8])
+-#define PT_REGS_RET(x) ((x)->regs[31])
+-#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
+-#define PT_REGS_RC(x) ((x)->regs[1])
+-#define PT_REGS_SP(x) ((x)->regs[29])
+-#define PT_REGS_IP(x) ((x)->cp0_epc)
+-
+-#elif defined(bpf_target_powerpc)
+-
+-#define PT_REGS_PARM1(x) ((x)->gpr[3])
+-#define PT_REGS_PARM2(x) ((x)->gpr[4])
+-#define PT_REGS_PARM3(x) ((x)->gpr[5])
+-#define PT_REGS_PARM4(x) ((x)->gpr[6])
+-#define PT_REGS_PARM5(x) ((x)->gpr[7])
+-#define PT_REGS_RC(x) ((x)->gpr[3])
+-#define PT_REGS_SP(x) ((x)->sp)
+-#define PT_REGS_IP(x) ((x)->nip)
+-
+-#elif defined(bpf_target_sparc)
+-
+-#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
+-#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
+-#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2])
+-#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3])
+-#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4])
+-#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
+-#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
+-#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
+-
+-/* Should this also be a bpf_target check for the sparc case? */
+-#if defined(__arch64__)
+-#define PT_REGS_IP(x) ((x)->tpc)
+-#else
+-#define PT_REGS_IP(x) ((x)->pc)
+-#endif
+-
+-#endif
+-
+-#if defined(bpf_target_powerpc)
+-#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
+-#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
+-#elif defined(bpf_target_sparc)
+-#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
+-#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
+-#else
+-#define BPF_KPROBE_READ_RET_IP(ip, ctx) \
+- ({ bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
+-#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \
+- ({ bpf_probe_read(&(ip), sizeof(ip), \
+- (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
+-#endif
+-
+-#endif
diff --git a/patches.suse/libbpf-Move-directory-creation-into-_pin-functions.patch b/patches.suse/libbpf-Move-directory-creation-into-_pin-functions.patch
new file mode 100644
index 0000000000..476f290ccd
--- /dev/null
+++ b/patches.suse/libbpf-Move-directory-creation-into-_pin-functions.patch
@@ -0,0 +1,141 @@
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Sat, 2 Nov 2019 12:09:39 +0100
+Subject: libbpf: Move directory creation into _pin() functions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.5-rc1
+Git-commit: 196f8487f51ee6e2a46f51e10ac3f4ca67574ba9
+References: bsc#1155518
+
+The existing pin_*() functions all try to create the parent directory
+before pinning. Move this check into the per-object _pin() functions
+instead. This ensures consistent behaviour when auto-pinning is
+added (which doesn't go through the top-level pin_maps() function), at the
+cost of a few more calls to mkdir().
+
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Link: https://lore.kernel.org/bpf/157269297985.394725.5882630952992598610.stgit@toke.dk
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 61 +++++++++++++++++++++++++++----------------------
+ 1 file changed, 34 insertions(+), 27 deletions(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -3793,6 +3793,28 @@ int bpf_object__load(struct bpf_object *
+ return bpf_object__load_xattr(&attr);
+ }
+
++static int make_parent_dir(const char *path)
++{
++ char *cp, errmsg[STRERR_BUFSIZE];
++ char *dname, *dir;
++ int err = 0;
++
++ dname = strdup(path);
++ if (dname == NULL)
++ return -ENOMEM;
++
++ dir = dirname(dname);
++ if (mkdir(dir, 0700) && errno != EEXIST)
++ err = -errno;
++
++ free(dname);
++ if (err) {
++ cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
++ pr_warning("failed to mkdir %s: %s\n", path, cp);
++ }
++ return err;
++}
++
+ static int check_path(const char *path)
+ {
+ char *cp, errmsg[STRERR_BUFSIZE];
+@@ -3829,6 +3851,10 @@ int bpf_program__pin_instance(struct bpf
+ char *cp, errmsg[STRERR_BUFSIZE];
+ int err;
+
++ err = make_parent_dir(path);
++ if (err)
++ return err;
++
+ err = check_path(path);
+ if (err)
+ return err;
+@@ -3882,25 +3908,14 @@ int bpf_program__unpin_instance(struct b
+ return 0;
+ }
+
+-static int make_dir(const char *path)
+-{
+- char *cp, errmsg[STRERR_BUFSIZE];
+- int err = 0;
+-
+- if (mkdir(path, 0700) && errno != EEXIST)
+- err = -errno;
+-
+- if (err) {
+- cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
+- pr_warning("failed to mkdir %s: %s\n", path, cp);
+- }
+- return err;
+-}
+-
+ int bpf_program__pin(struct bpf_program *prog, const char *path)
+ {
+ int i, err;
+
++ err = make_parent_dir(path);
++ if (err)
++ return err;
++
+ err = check_path(path);
+ if (err)
+ return err;
+@@ -3921,10 +3936,6 @@ int bpf_program__pin(struct bpf_program
+ return bpf_program__pin_instance(prog, path, 0);
+ }
+
+- err = make_dir(path);
+- if (err)
+- return err;
+-
+ for (i = 0; i < prog->instances.nr; i++) {
+ char buf[PATH_MAX];
+ int len;
+@@ -4047,6 +4058,10 @@ int bpf_map__pin(struct bpf_map *map, co
+ }
+ }
+
++ err = make_parent_dir(map->pin_path);
++ if (err)
++ return err;
++
+ err = check_path(map->pin_path);
+ if (err)
+ return err;
+@@ -4141,10 +4156,6 @@ int bpf_object__pin_maps(struct bpf_obje
+ return -ENOENT;
+ }
+
+- err = make_dir(path);
+- if (err)
+- return err;
+-
+ bpf_object__for_each_map(map, obj) {
+ char *pin_path = NULL;
+ char buf[PATH_MAX];
+@@ -4231,10 +4242,6 @@ int bpf_object__pin_programs(struct bpf_
+ return -ENOENT;
+ }
+
+- err = make_dir(path);
+- if (err)
+- return err;
+-
+ bpf_object__for_each_program(prog, obj) {
+ char buf[PATH_MAX];
+ int len;
diff --git a/patches.suse/libbpf-Refactor-bpf_object__open-APIs-to-use-common-.patch b/patches.suse/libbpf-Refactor-bpf_object__open-APIs-to-use-common-.patch
new file mode 100644
index 0000000000..564de028a0
--- /dev/null
+++ b/patches.suse/libbpf-Refactor-bpf_object__open-APIs-to-use-common-.patch
@@ -0,0 +1,170 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 15 Oct 2019 11:28:46 -0700
+Subject: libbpf: Refactor bpf_object__open APIs to use common opts
+Patch-mainline: v5.5-rc1
+Git-commit: 291ee02b5e407eb1eb99c9eeaa968ef8a0c16949
+References: bsc#1155518
+
+Refactor all the various bpf_object__open variations to ultimately
+specify common bpf_object_open_opts struct. This makes it easy to keep
+extending this common struct w/ extra parameters without having to
+update all the legacy APIs.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191015182849.3922287-3-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 71 ++++++++++++++++++++++++-------------------------
+ 1 file changed, 35 insertions(+), 36 deletions(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -1322,9 +1322,9 @@ static int bpf_object__init_user_btf_map
+ return 0;
+ }
+
+-static int bpf_object__init_maps(struct bpf_object *obj, int flags)
++static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps)
+ {
+- bool strict = !(flags & MAPS_RELAX_COMPAT);
++ bool strict = !relaxed_maps;
+ int err;
+
+ err = bpf_object__init_user_maps(obj, strict);
+@@ -1521,7 +1521,7 @@ static int bpf_object__sanitize_and_load
+ return 0;
+ }
+
+-static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
++static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps)
+ {
+ Elf *elf = obj->efile.elf;
+ GElf_Ehdr *ep = &obj->efile.ehdr;
+@@ -1652,7 +1652,7 @@ static int bpf_object__elf_collect(struc
+ }
+ err = bpf_object__init_btf(obj, btf_data, btf_ext_data);
+ if (!err)
+- err = bpf_object__init_maps(obj, flags);
++ err = bpf_object__init_maps(obj, relaxed_maps);
+ if (!err)
+ err = bpf_object__sanitize_and_load_btf(obj);
+ if (!err)
+@@ -3554,24 +3554,45 @@ bpf_object__load_progs(struct bpf_object
+
+ static struct bpf_object *
+ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
+- const char *obj_name, int flags)
++ struct bpf_object_open_opts *opts)
+ {
+ struct bpf_object *obj;
++ const char *obj_name;
++ char tmp_name[64];
++ bool relaxed_maps;
+ int err;
+
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+- pr_warning("failed to init libelf for %s\n", path);
++ pr_warning("failed to init libelf for %s\n",
++ path ? : "(mem buf)");
+ return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
+ }
+
++ if (!OPTS_VALID(opts, bpf_object_open_opts))
++ return ERR_PTR(-EINVAL);
++
++ obj_name = OPTS_GET(opts, object_name, path);
++ if (obj_buf) {
++ if (!obj_name) {
++ snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
++ (unsigned long)obj_buf,
++ (unsigned long)obj_buf_sz);
++ obj_name = tmp_name;
++ }
++ path = obj_name;
++ pr_debug("loading object '%s' from buffer\n", obj_name);
++ }
++
+ obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
+ if (IS_ERR(obj))
+ return obj;
+
++ relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
++
+ CHECK_ERR(bpf_object__elf_init(obj), err, out);
+ CHECK_ERR(bpf_object__check_endianness(obj), err, out);
+ CHECK_ERR(bpf_object__probe_caps(obj), err, out);
+- CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
++ CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps), err, out);
+ CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
+
+ bpf_object__elf_finish(obj);
+@@ -3584,13 +3605,16 @@ out:
+ static struct bpf_object *
+ __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
+ {
++ LIBBPF_OPTS(bpf_object_open_opts, opts,
++ .relaxed_maps = flags & MAPS_RELAX_COMPAT,
++ );
++
+ /* param validation */
+ if (!attr->file)
+ return NULL;
+
+ pr_debug("loading %s\n", attr->file);
+-
+- return __bpf_object__open(attr->file, NULL, 0, NULL, flags);
++ return __bpf_object__open(attr->file, NULL, 0, &opts);
+ }
+
+ struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
+@@ -3611,47 +3635,22 @@ struct bpf_object *bpf_object__open(cons
+ struct bpf_object *
+ bpf_object__open_file(const char *path, struct bpf_object_open_opts *opts)
+ {
+- const char *obj_name;
+- bool relaxed_maps;
+-
+- if (!OPTS_VALID(opts, bpf_object_open_opts))
+- return ERR_PTR(-EINVAL);
+ if (!path)
+ return ERR_PTR(-EINVAL);
+
+ pr_debug("loading %s\n", path);
+
+- obj_name = OPTS_GET(opts, object_name, path);
+- relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
+- return __bpf_object__open(path, NULL, 0, obj_name,
+- relaxed_maps ? MAPS_RELAX_COMPAT : 0);
++ return __bpf_object__open(path, NULL, 0, opts);
+ }
+
+ struct bpf_object *
+ bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
+ struct bpf_object_open_opts *opts)
+ {
+- char tmp_name[64];
+- const char *obj_name;
+- bool relaxed_maps;
+-
+- if (!OPTS_VALID(opts, bpf_object_open_opts))
+- return ERR_PTR(-EINVAL);
+ if (!obj_buf || obj_buf_sz == 0)
+ return ERR_PTR(-EINVAL);
+
+- obj_name = OPTS_GET(opts, object_name, NULL);
+- if (!obj_name) {
+- snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
+- (unsigned long)obj_buf,
+- (unsigned long)obj_buf_sz);
+- obj_name = tmp_name;
+- }
+- pr_debug("loading object '%s' from buffer\n", obj_name);
+-
+- relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
+- return __bpf_object__open(obj_name, obj_buf, obj_buf_sz, obj_name,
+- relaxed_maps ? MAPS_RELAX_COMPAT : 0);
++ return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
+ }
+
+ struct bpf_object *
diff --git a/patches.suse/libbpf-Refactor-relocation-handling.patch b/patches.suse/libbpf-Refactor-relocation-handling.patch
new file mode 100644
index 0000000000..e6099372ae
--- /dev/null
+++ b/patches.suse/libbpf-Refactor-relocation-handling.patch
@@ -0,0 +1,376 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Wed, 20 Nov 2019 23:07:41 -0800
+Subject: libbpf: Refactor relocation handling
+Patch-mainline: v5.5-rc1
+Git-commit: 1f8e2bcb2cd5ee1a731fb625a5438e2c305f6a7c
+References: bsc#1155518
+
+Relocation handling code is convoluted and unnecessarily deeply nested. Split
+out per-relocation logic into separate function. Also refactor the logic to be
+more a sequence of per-relocation type checks and processing steps, making it
+simpler to follow control flow. This makes it easier to further extends it to
+new kinds of relocations (e.g., support for extern variables).
+
+This patch also makes relocation's section verification more robust.
+Previously relocations against not yet supported externs were silently ignored
+because of obj->efile.text_shndx was zero, when all BPF programs had custom
+section names and there was no .text section. Also, invalid LDIMM64 relocations
+against non-map sections were passed through, if they were pointing to a .text
+section (or 0, which is invalid section). All these bugs are fixed within this
+refactoring and checks are made more appropriate for each type of relocation.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191121070743.1309473-3-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 264 ++++++++++++++++++++++++++-----------------------
+ 1 file changed, 144 insertions(+), 120 deletions(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -269,8 +269,8 @@ struct bpf_object {
+ struct {
+ GElf_Shdr shdr;
+ Elf_Data *data;
+- } *reloc;
+- int nr_reloc;
++ } *reloc_sects;
++ int nr_reloc_sects;
+ int maps_shndx;
+ int btf_maps_shndx;
+ int text_shndx;
+@@ -568,8 +568,8 @@ static void bpf_object__elf_finish(struc
+ obj->efile.rodata = NULL;
+ obj->efile.bss = NULL;
+
+- zfree(&obj->efile.reloc);
+- obj->efile.nr_reloc = 0;
++ zfree(&obj->efile.reloc_sects);
++ obj->efile.nr_reloc_sects = 0;
+ zclose(obj->efile.fd);
+ obj->efile.obj_buf = NULL;
+ obj->efile.obj_buf_sz = 0;
+@@ -1635,8 +1635,8 @@ static int bpf_object__elf_collect(struc
+ pr_debug("skip section(%d) %s\n", idx, name);
+ }
+ } else if (sh.sh_type == SHT_REL) {
+- int nr_reloc = obj->efile.nr_reloc;
+- void *reloc = obj->efile.reloc;
++ int nr_sects = obj->efile.nr_reloc_sects;
++ void *sects = obj->efile.reloc_sects;
+ int sec = sh.sh_info; /* points to other section */
+
+ /* Only do relo for section with exec instructions */
+@@ -1646,18 +1646,18 @@ static int bpf_object__elf_collect(struc
+ continue;
+ }
+
+- reloc = reallocarray(reloc, nr_reloc + 1,
+- sizeof(*obj->efile.reloc));
+- if (!reloc) {
+- pr_warning("realloc failed\n");
++ sects = reallocarray(sects, nr_sects + 1,
++ sizeof(*obj->efile.reloc_sects));
++ if (!sects) {
++ pr_warning("reloc_sects realloc failed\n");
+ return -ENOMEM;
+ }
+
+- obj->efile.reloc = reloc;
+- obj->efile.nr_reloc++;
++ obj->efile.reloc_sects = sects;
++ obj->efile.nr_reloc_sects++;
+
+- obj->efile.reloc[nr_reloc].shdr = sh;
+- obj->efile.reloc[nr_reloc].data = data;
++ obj->efile.reloc_sects[nr_sects].shdr = sh;
++ obj->efile.reloc_sects[nr_sects].data = data;
+ } else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) {
+ obj->efile.bss = data;
+ obj->efile.bss_shndx = idx;
+@@ -1722,14 +1722,6 @@ static bool bpf_object__shndx_is_maps(co
+ shndx == obj->efile.btf_maps_shndx;
+ }
+
+-static bool bpf_object__relo_in_known_section(const struct bpf_object *obj,
+- int shndx)
+-{
+- return shndx == obj->efile.text_shndx ||
+- bpf_object__shndx_is_maps(obj, shndx) ||
+- bpf_object__shndx_is_data(obj, shndx);
+-}
+-
+ static enum libbpf_map_type
+ bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
+ {
+@@ -1743,14 +1735,124 @@ bpf_object__section_to_libbpf_map_type(c
+ return LIBBPF_MAP_UNSPEC;
+ }
+
++static int bpf_program__record_reloc(struct bpf_program *prog,
++ struct reloc_desc *reloc_desc,
++ __u32 insn_idx, const char *name,
++ const GElf_Sym *sym, const GElf_Rel *rel)
++{
++ struct bpf_insn *insn = &prog->insns[insn_idx];
++ size_t map_idx, nr_maps = prog->obj->nr_maps;
++ struct bpf_object *obj = prog->obj;
++ __u32 shdr_idx = sym->st_shndx;
++ enum libbpf_map_type type;
++ struct bpf_map *map;
++
++ /* sub-program call relocation */
++ if (insn->code == (BPF_JMP | BPF_CALL)) {
++ if (insn->src_reg != BPF_PSEUDO_CALL) {
++ pr_warning("incorrect bpf_call opcode\n");
++ return -LIBBPF_ERRNO__RELOC;
++ }
++ /* text_shndx can be 0, if no default "main" program exists */
++ if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
++ pr_warning("bad call relo against section %u\n", shdr_idx);
++ return -LIBBPF_ERRNO__RELOC;
++ }
++ if (sym->st_value % 8) {
++ pr_warning("bad call relo offset: %lu\n", sym->st_value);
++ return -LIBBPF_ERRNO__RELOC;
++ }
++ reloc_desc->type = RELO_CALL;
++ reloc_desc->insn_idx = insn_idx;
++ reloc_desc->text_off = sym->st_value / 8;
++ obj->has_pseudo_calls = true;
++ return 0;
++ }
++
++ if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
++ pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
++ insn_idx, insn->code);
++ return -LIBBPF_ERRNO__RELOC;
++ }
++ if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
++ pr_warning("relocation: not yet supported relo for non-static global \'%s\' variable in special section (0x%x) found in insns[%d].code 0x%x\n",
++ name, shdr_idx, insn_idx, insn->code);
++ return -LIBBPF_ERRNO__RELOC;
++ }
++
++ type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
++
++ /* generic map reference relocation */
++ if (type == LIBBPF_MAP_UNSPEC) {
++ if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
++ pr_warning("bad map relo against section %u\n",
++ shdr_idx);
++ return -LIBBPF_ERRNO__RELOC;
++ }
++ for (map_idx = 0; map_idx < nr_maps; map_idx++) {
++ map = &obj->maps[map_idx];
++ if (map->libbpf_type != type ||
++ map->sec_idx != sym->st_shndx ||
++ map->sec_offset != sym->st_value)
++ continue;
++ pr_debug("found map %zd (%s, sec %d, off %zu) for insn %u\n",
++ map_idx, map->name, map->sec_idx,
++ map->sec_offset, insn_idx);
++ break;
++ }
++ if (map_idx >= nr_maps) {
++ pr_warning("map relo failed to find map for sec %u, off %llu\n",
++ shdr_idx, (__u64)sym->st_value);
++ return -LIBBPF_ERRNO__RELOC;
++ }
++ reloc_desc->type = RELO_LD64;
++ reloc_desc->insn_idx = insn_idx;
++ reloc_desc->map_idx = map_idx;
++ return 0;
++ }
++
++ /* global data map relocation */
++ if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
++ pr_warning("bad data relo against section %u\n", shdr_idx);
++ return -LIBBPF_ERRNO__RELOC;
++ }
++ if (GELF_ST_BIND(sym->st_info) == STB_GLOBAL) {
++ pr_warning("relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n",
++ name, insn_idx, insn->code);
++ return -LIBBPF_ERRNO__RELOC;
++ }
++ if (!obj->caps.global_data) {
++ pr_warning("relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
++ name, insn_idx);
++ return -LIBBPF_ERRNO__RELOC;
++ }
++ for (map_idx = 0; map_idx < nr_maps; map_idx++) {
++ map = &obj->maps[map_idx];
++ if (map->libbpf_type != type)
++ continue;
++ pr_debug("found data map %zd (%s, sec %d, off %zu) for insn %u\n",
++ map_idx, map->name, map->sec_idx, map->sec_offset,
++ insn_idx);
++ break;
++ }
++ if (map_idx >= nr_maps) {
++ pr_warning("data relo failed to find map for sec %u\n",
++ shdr_idx);
++ return -LIBBPF_ERRNO__RELOC;
++ }
++
++ reloc_desc->type = RELO_DATA;
++ reloc_desc->insn_idx = insn_idx;
++ reloc_desc->map_idx = map_idx;
++ return 0;
++}
++
+ static int
+ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
+ Elf_Data *data, struct bpf_object *obj)
+ {
+ Elf_Data *symbols = obj->efile.symbols;
+- struct bpf_map *maps = obj->maps;
+- size_t nr_maps = obj->nr_maps;
+- int i, nrels;
++ int err, i, nrels;
+
+ pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
+ nrels = shdr->sh_size / shdr->sh_entsize;
+@@ -1763,12 +1865,8 @@ bpf_program__collect_reloc(struct bpf_pr
+ prog->nr_reloc = nrels;
+
+ for (i = 0; i < nrels; i++) {
+- struct bpf_insn *insns = prog->insns;
+- enum libbpf_map_type type;
+- unsigned int insn_idx;
+- unsigned int shdr_idx;
+ const char *name;
+- size_t map_idx;
++ __u32 insn_idx;
+ GElf_Sym sym;
+ GElf_Rel rel;
+
+@@ -1776,101 +1874,27 @@ bpf_program__collect_reloc(struct bpf_pr
+ pr_warning("relocation: failed to get %d reloc\n", i);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+-
+ if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
+ pr_warning("relocation: symbol %"PRIx64" not found\n",
+ GELF_R_SYM(rel.r_info));
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+-
++ if (rel.r_offset % sizeof(struct bpf_insn))
++ return -LIBBPF_ERRNO__FORMAT;
++
++ insn_idx = rel.r_offset / sizeof(struct bpf_insn);
+ name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
+ sym.st_name) ? : "<?>";
++ pr_debug("relo for shdr %u, symb %llu, value %llu, type %d, bind %d, name %d (\'%s\'), insn %u\n",
++ (__u32)sym.st_shndx, (__u64)GELF_R_SYM(rel.r_info),
++ (__u64)sym.st_value, GELF_ST_TYPE(sym.st_info),
++ GELF_ST_BIND(sym.st_info), sym.st_name, name,
++ insn_idx);
+
+- pr_debug("relo for %lld value %lld name %d (\'%s\')\n",
+- (long long) (rel.r_info >> 32),
+- (long long) sym.st_value, sym.st_name, name);
+-
+- shdr_idx = sym.st_shndx;
+- insn_idx = rel.r_offset / sizeof(struct bpf_insn);
+- pr_debug("relocation: insn_idx=%u, shdr_idx=%u\n",
+- insn_idx, shdr_idx);
+-
+- if (shdr_idx >= SHN_LORESERVE) {
+- pr_warning("relocation: not yet supported relo for non-static global \'%s\' variable in special section (0x%x) found in insns[%d].code 0x%x\n",
+- name, shdr_idx, insn_idx,
+- insns[insn_idx].code);
+- return -LIBBPF_ERRNO__RELOC;
+- }
+- if (!bpf_object__relo_in_known_section(obj, shdr_idx)) {
+- pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n",
+- prog->section_name, shdr_idx);
+- return -LIBBPF_ERRNO__RELOC;
+- }
+-
+- if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
+- if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
+- pr_warning("incorrect bpf_call opcode\n");
+- return -LIBBPF_ERRNO__RELOC;
+- }
+- if (sym.st_value % 8) {
+- pr_warning("bad call relo offset: %lu\n", sym.st_value);
+- return -LIBBPF_ERRNO__RELOC;
+- }
+- prog->reloc_desc[i].type = RELO_CALL;
+- prog->reloc_desc[i].insn_idx = insn_idx;
+- prog->reloc_desc[i].text_off = sym.st_value / 8;
+- obj->has_pseudo_calls = true;
+- continue;
+- }
+-
+- if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
+- pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
+- insn_idx, insns[insn_idx].code);
+- return -LIBBPF_ERRNO__RELOC;
+- }
+-
+- if (bpf_object__shndx_is_maps(obj, shdr_idx) ||
+- bpf_object__shndx_is_data(obj, shdr_idx)) {
+- type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
+- if (type != LIBBPF_MAP_UNSPEC) {
+- if (GELF_ST_BIND(sym.st_info) == STB_GLOBAL) {
+- pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n",
+- name, insn_idx, insns[insn_idx].code);
+- return -LIBBPF_ERRNO__RELOC;
+- }
+- if (!obj->caps.global_data) {
+- pr_warning("bpf: relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
+- name, insn_idx);
+- return -LIBBPF_ERRNO__RELOC;
+- }
+- }
+-
+- for (map_idx = 0; map_idx < nr_maps; map_idx++) {
+- if (maps[map_idx].libbpf_type != type)
+- continue;
+- if (type != LIBBPF_MAP_UNSPEC ||
+- (maps[map_idx].sec_idx == sym.st_shndx &&
+- maps[map_idx].sec_offset == sym.st_value)) {
+- pr_debug("relocation: found map %zd (%s, sec_idx %d, offset %zu) for insn %u\n",
+- map_idx, maps[map_idx].name,
+- maps[map_idx].sec_idx,
+- maps[map_idx].sec_offset,
+- insn_idx);
+- break;
+- }
+- }
+-
+- if (map_idx >= nr_maps) {
+- pr_warning("bpf relocation: map_idx %d larger than %d\n",
+- (int)map_idx, (int)nr_maps - 1);
+- return -LIBBPF_ERRNO__RELOC;
+- }
+-
+- prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ?
+- RELO_DATA : RELO_LD64;
+- prog->reloc_desc[i].insn_idx = insn_idx;
+- prog->reloc_desc[i].map_idx = map_idx;
+- }
++ err = bpf_program__record_reloc(prog, &prog->reloc_desc[i],
++ insn_idx, name, &sym, &rel);
++ if (err)
++ return err;
+ }
+ return 0;
+ }
+@@ -3393,9 +3417,9 @@ static int bpf_object__collect_reloc(str
+ return -LIBBPF_ERRNO__INTERNAL;
+ }
+
+- for (i = 0; i < obj->efile.nr_reloc; i++) {
+- GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
+- Elf_Data *data = obj->efile.reloc[i].data;
++ for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
++ GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
++ Elf_Data *data = obj->efile.reloc_sects[i].data;
+ int idx = shdr->sh_info;
+ struct bpf_program *prog;
+
diff --git a/patches.suse/libbpf-Store-map-pin-path-and-status-in-struct-bpf_m.patch b/patches.suse/libbpf-Store-map-pin-path-and-status-in-struct-bpf_m.patch
new file mode 100644
index 0000000000..d0a8ba436a
--- /dev/null
+++ b/patches.suse/libbpf-Store-map-pin-path-and-status-in-struct-bpf_m.patch
@@ -0,0 +1,310 @@
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Sat, 2 Nov 2019 12:09:38 +0100
+Subject: libbpf: Store map pin path and status in struct bpf_map
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.5-rc1
+Git-commit: 4580b25fcee5347327aaffcec31c615ec28a889a
+References: bsc#1155518
+
+Support storing and setting a pin path in struct bpf_map, which can be used
+for automatic pinning. Also store the pin status so we can avoid attempts
+to re-pin a map that has already been pinned (or reused from a previous
+pinning).
+
+The behaviour of bpf_object__{un,}pin_maps() is changed so that if it is
+called with a NULL path argument (which was previously illegal), it will
+(un)pin only those maps that have a pin_path set.
+
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Link: https://lore.kernel.org/bpf/157269297876.394725.14782206533681896279.stgit@toke.dk
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 164 +++++++++++++++++++++++++++++++++++------------
+ tools/lib/bpf/libbpf.h | 8 ++
+ tools/lib/bpf/libbpf.map | 3
+ 3 files changed, 134 insertions(+), 41 deletions(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -227,6 +227,8 @@ struct bpf_map {
+ void *priv;
+ bpf_map_clear_priv_t clear_priv;
+ enum libbpf_map_type libbpf_type;
++ char *pin_path;
++ bool pinned;
+ };
+
+ struct bpf_secdata {
+@@ -4041,47 +4043,119 @@ int bpf_map__pin(struct bpf_map *map, co
+ char *cp, errmsg[STRERR_BUFSIZE];
+ int err;
+
+- err = check_path(path);
+- if (err)
+- return err;
+-
+ if (map == NULL) {
+ pr_warning("invalid map pointer\n");
+ return -EINVAL;
+ }
+
+- if (bpf_obj_pin(map->fd, path)) {
+- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+- pr_warning("failed to pin map: %s\n", cp);
+- return -errno;
++ if (map->pin_path) {
++ if (path && strcmp(path, map->pin_path)) {
++ pr_warning("map '%s' already has pin path '%s' different from '%s'\n",
++ bpf_map__name(map), map->pin_path, path);
++ return -EINVAL;
++ } else if (map->pinned) {
++ pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
++ bpf_map__name(map), map->pin_path);
++ return 0;
++ }
++ } else {
++ if (!path) {
++ pr_warning("missing a path to pin map '%s' at\n",
++ bpf_map__name(map));
++ return -EINVAL;
++ } else if (map->pinned) {
++ pr_warning("map '%s' already pinned\n", bpf_map__name(map));
++ return -EEXIST;
++ }
++
++ map->pin_path = strdup(path);
++ if (!map->pin_path) {
++ err = -errno;
++ goto out_err;
++ }
+ }
+
+- pr_debug("pinned map '%s'\n", path);
++ err = check_path(map->pin_path);
++ if (err)
++ return err;
++
++ if (bpf_obj_pin(map->fd, map->pin_path)) {
++ err = -errno;
++ goto out_err;
++ }
++
++ map->pinned = true;
++ pr_debug("pinned map '%s'\n", map->pin_path);
+
+ return 0;
++
++out_err:
++ cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
++ pr_warning("failed to pin map: %s\n", cp);
++ return err;
+ }
+
+ int bpf_map__unpin(struct bpf_map *map, const char *path)
+ {
+ int err;
+
+- err = check_path(path);
+- if (err)
+- return err;
+-
+ if (map == NULL) {
+ pr_warning("invalid map pointer\n");
+ return -EINVAL;
+ }
+
++ if (map->pin_path) {
++ if (path && strcmp(path, map->pin_path)) {
++ pr_warning("map '%s' already has pin path '%s' different from '%s'\n",
++ bpf_map__name(map), map->pin_path, path);
++ return -EINVAL;
++ }
++ path = map->pin_path;
++ } else if (!path) {
++ pr_warning("no path to unpin map '%s' from\n",
++ bpf_map__name(map));
++ return -EINVAL;
++ }
++
++ err = check_path(path);
++ if (err)
++ return err;
++
+ err = unlink(path);
+ if (err != 0)
+ return -errno;
+- pr_debug("unpinned map '%s'\n", path);
+
++ map->pinned = false;
++ pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
++
++ return 0;
++}
++
++int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
++{
++ char *new = NULL;
++
++ if (path) {
++ new = strdup(path);
++ if (!new)
++ return -errno;
++ }
++
++ free(map->pin_path);
++ map->pin_path = new;
+ return 0;
+ }
+
++const char *bpf_map__get_pin_path(const struct bpf_map *map)
++{
++ return map->pin_path;
++}
++
++bool bpf_map__is_pinned(const struct bpf_map *map)
++{
++ return map->pinned;
++}
++
+ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
+ {
+ struct bpf_map *map;
+@@ -4100,20 +4174,27 @@ int bpf_object__pin_maps(struct bpf_obje
+ return err;
+
+ bpf_object__for_each_map(map, obj) {
++ char *pin_path = NULL;
+ char buf[PATH_MAX];
+- int len;
+
+- len = snprintf(buf, PATH_MAX, "%s/%s", path,
+- bpf_map__name(map));
+- if (len < 0) {
+- err = -EINVAL;
+- goto err_unpin_maps;
+- } else if (len >= PATH_MAX) {
+- err = -ENAMETOOLONG;
+- goto err_unpin_maps;
++ if (path) {
++ int len;
++
++ len = snprintf(buf, PATH_MAX, "%s/%s", path,
++ bpf_map__name(map));
++ if (len < 0) {
++ err = -EINVAL;
++ goto err_unpin_maps;
++ } else if (len >= PATH_MAX) {
++ err = -ENAMETOOLONG;
++ goto err_unpin_maps;
++ }
++ pin_path = buf;
++ } else if (!map->pin_path) {
++ continue;
+ }
+
+- err = bpf_map__pin(map, buf);
++ err = bpf_map__pin(map, pin_path);
+ if (err)
+ goto err_unpin_maps;
+ }
+@@ -4122,17 +4203,10 @@ int bpf_object__pin_maps(struct bpf_obje
+
+ err_unpin_maps:
+ while ((map = bpf_map__prev(map, obj))) {
+- char buf[PATH_MAX];
+- int len;
+-
+- len = snprintf(buf, PATH_MAX, "%s/%s", path,
+- bpf_map__name(map));
+- if (len < 0)
+- continue;
+- else if (len >= PATH_MAX)
++ if (!map->pin_path)
+ continue;
+
+- bpf_map__unpin(map, buf);
++ bpf_map__unpin(map, NULL);
+ }
+
+ return err;
+@@ -4147,17 +4221,24 @@ int bpf_object__unpin_maps(struct bpf_ob
+ return -ENOENT;
+
+ bpf_object__for_each_map(map, obj) {
++ char *pin_path = NULL;
+ char buf[PATH_MAX];
+- int len;
+
+- len = snprintf(buf, PATH_MAX, "%s/%s", path,
+- bpf_map__name(map));
+- if (len < 0)
+- return -EINVAL;
+- else if (len >= PATH_MAX)
+- return -ENAMETOOLONG;
++ if (path) {
++ int len;
++
++ len = snprintf(buf, PATH_MAX, "%s/%s", path,
++ bpf_map__name(map));
++ if (len < 0)
++ return -EINVAL;
++ else if (len >= PATH_MAX)
++ return -ENAMETOOLONG;
++ pin_path = buf;
++ } else if (!map->pin_path) {
++ continue;
++ }
+
+- err = bpf_map__unpin(map, buf);
++ err = bpf_map__unpin(map, pin_path);
+ if (err)
+ return err;
+ }
+@@ -4282,6 +4363,7 @@ void bpf_object__close(struct bpf_object
+
+ for (i = 0; i < obj->nr_maps; i++) {
+ zfree(&obj->maps[i].name);
++ zfree(&obj->maps[i].pin_path);
+ if (obj->maps[i].clear_priv)
+ obj->maps[i].clear_priv(&obj->maps[i],
+ obj->maps[i].priv);
+--- a/tools/lib/bpf/libbpf.h
++++ b/tools/lib/bpf/libbpf.h
+@@ -124,6 +124,11 @@ int bpf_object__section_size(const struc
+ __u32 *size);
+ int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
+ __u32 *off);
++
++/* pin_maps and unpin_maps can both be called with a NULL path, in which case
++ * they will use the pin_path attribute of each map (and ignore all maps that
++ * don't have a pin_path set).
++ */
+ LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path);
+ LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj,
+ const char *path);
+@@ -387,6 +392,9 @@ LIBBPF_API int bpf_map__resize(struct bp
+ LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
+ LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
+ LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
++LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
++LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map);
++LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map);
+ LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
+ LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
+
+--- a/tools/lib/bpf/libbpf.map
++++ b/tools/lib/bpf/libbpf.map
+@@ -193,6 +193,9 @@ LIBBPF_0.0.5 {
+
+ LIBBPF_0.0.6 {
+ global:
++ bpf_map__get_pin_path;
++ bpf_map__is_pinned;
++ bpf_map__set_pin_path;
+ bpf_object__open_file;
+ bpf_object__open_mem;
+ bpf_program__get_expected_attach_type;
diff --git a/patches.suse/libbpf-Support-initialized-global-variables.patch b/patches.suse/libbpf-Support-initialized-global-variables.patch
new file mode 100644
index 0000000000..2d27d235a1
--- /dev/null
+++ b/patches.suse/libbpf-Support-initialized-global-variables.patch
@@ -0,0 +1,257 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Wed, 20 Nov 2019 23:07:43 -0800
+Subject: libbpf: Support initialized global variables
+Patch-mainline: v5.5-rc1
+Git-commit: 393cdfbee809891dc6ba859a44cc6441fa8dce9e
+References: bsc#1155518
+
+Initialized global variables are no different in ELF from static variables,
+and don't require any extra support from libbpf. But they are matching
+semantics of global data (backed by BPF maps) more closely, preventing
+LLVM/Clang from aggressively inlining constant values and not requiring
+volatile incantations to prevent those. This patch enables global variables.
+It still disables uninitialized variables, which will be put into special COM
+(common) ELF section, because BPF doesn't allow uninitialized data to be
+accessed.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191121070743.1309473-5-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 9 ++-------
+ tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c | 4 ++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c | 4 ++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c | 4 ++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_existence.c | 4 ++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c | 4 ++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_ints.c | 4 ++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c | 4 ++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_misc.c | 4 ++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_mods.c | 4 ++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c | 4 ++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c | 4 ++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c | 4 ++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_size.c | 4 ++--
+ 14 files changed, 28 insertions(+), 33 deletions(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -1828,8 +1828,8 @@ static int bpf_program__record_reloc(str
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
+- pr_warning("relocation: not yet supported relo for non-static global \'%s\' variable in special section (0x%x) found in insns[%d].code 0x%x\n",
+- name, shdr_idx, insn_idx, insn->code);
++ pr_warning("invalid relo for \'%s\' in special section 0x%x; forgot to initialize global var?..\n",
++ name, shdr_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+
+@@ -1869,11 +1869,6 @@ static int bpf_program__record_reloc(str
+ pr_warning("bad data relo against section %u\n", shdr_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+- if (GELF_ST_BIND(sym->st_info) == STB_GLOBAL) {
+- pr_warning("relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n",
+- name, insn_idx, insn->code);
+- return -LIBBPF_ERRNO__RELOC;
+- }
+ if (!obj->caps.global_data) {
+ pr_warning("relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
+ name, insn_idx);
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
+@@ -8,10 +8,10 @@
+
+ char _license[] SEC("license") = "GPL";
+
+-static volatile struct data {
++struct {
+ char in[256];
+ char out[256];
+-} data;
++} data = {};
+
+ struct core_reloc_arrays_output {
+ int a2;
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c
+@@ -8,10 +8,10 @@
+
+ char _license[] SEC("license") = "GPL";
+
+-static volatile struct data {
++struct {
+ char in[256];
+ char out[256];
+-} data;
++} data = {};
+
+ struct core_reloc_bitfields {
+ /* unsigned bitfields */
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c
+@@ -8,10 +8,10 @@
+
+ char _license[] SEC("license") = "GPL";
+
+-static volatile struct data {
++struct {
+ char in[256];
+ char out[256];
+-} data;
++} data = {};
+
+ struct core_reloc_bitfields {
+ /* unsigned bitfields */
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c
+@@ -8,10 +8,10 @@
+
+ char _license[] SEC("license") = "GPL";
+
+-static volatile struct data {
++struct {
+ char in[256];
+ char out[256];
+-} data;
++} data = {};
+
+ struct core_reloc_existence_output {
+ int a_exists;
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c
+@@ -8,10 +8,10 @@
+
+ char _license[] SEC("license") = "GPL";
+
+-static volatile struct data {
++struct {
+ char in[256];
+ char out[256];
+-} data;
++} data = {};
+
+ struct core_reloc_flavors {
+ int a;
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_ints.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_ints.c
+@@ -8,10 +8,10 @@
+
+ char _license[] SEC("license") = "GPL";
+
+-static volatile struct data {
++struct {
+ char in[256];
+ char out[256];
+-} data;
++} data = {};
+
+ struct core_reloc_ints {
+ uint8_t u8_field;
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
+@@ -8,10 +8,10 @@
+
+ char _license[] SEC("license") = "GPL";
+
+-static volatile struct data {
++struct {
+ char in[256];
+ char out[256];
+-} data;
++} data = {};
+
+ struct core_reloc_kernel_output {
+ int valid[10];
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_misc.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_misc.c
+@@ -8,10 +8,10 @@
+
+ char _license[] SEC("license") = "GPL";
+
+-static volatile struct data {
++struct {
+ char in[256];
+ char out[256];
+-} data;
++} data = {};
+
+ struct core_reloc_misc_output {
+ int a, b, c;
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c
+@@ -8,10 +8,10 @@
+
+ char _license[] SEC("license") = "GPL";
+
+-static volatile struct data {
++struct {
+ char in[256];
+ char out[256];
+-} data;
++} data = {};
+
+ struct core_reloc_mods_output {
+ int a, b, c, d, e, f, g, h;
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c
+@@ -8,10 +8,10 @@
+
+ char _license[] SEC("license") = "GPL";
+
+-static volatile struct data {
++struct {
+ char in[256];
+ char out[256];
+-} data;
++} data = {};
+
+ struct core_reloc_nesting_substruct {
+ int a;
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c
+@@ -8,10 +8,10 @@
+
+ char _license[] SEC("license") = "GPL";
+
+-static volatile struct data {
++struct {
+ char in[256];
+ char out[256];
+-} data;
++} data = {};
+
+ enum core_reloc_primitives_enum {
+ A = 0,
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c
+@@ -8,10 +8,10 @@
+
+ char _license[] SEC("license") = "GPL";
+
+-static volatile struct data {
++struct {
+ char in[256];
+ char out[256];
+-} data;
++} data = {};
+
+ struct core_reloc_ptr_as_arr {
+ int a;
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
+@@ -8,10 +8,10 @@
+
+ char _license[] SEC("license") = "GPL";
+
+-static volatile struct data {
++struct {
+ char in[256];
+ char out[256];
+-} data;
++} data = {};
+
+ struct core_reloc_size_output {
+ int int_sz;
diff --git a/patches.suse/libbpf-Teach-bpf_object__open-to-guess-program-types.patch b/patches.suse/libbpf-Teach-bpf_object__open-to-guess-program-types.patch
new file mode 100644
index 0000000000..d4d8c56548
--- /dev/null
+++ b/patches.suse/libbpf-Teach-bpf_object__open-to-guess-program-types.patch
@@ -0,0 +1,146 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Sun, 20 Oct 2019 20:38:59 -0700
+Subject: libbpf: Teach bpf_object__open to guess program types
+Patch-mainline: v5.5-rc1
+Git-commit: dd4436bb838338cfda253d7f012610a73e4078fd
+References: bsc#1155518
+
+Teach bpf_object__open how to guess program type and expected attach
+type from section names, similar to what bpf_prog_load() does. This
+seems like a really useful features and an oversight to not have this
+done during bpf_object_open(). To preserver backwards compatible
+behavior of bpf_prog_load(), its attr->prog_type is treated as an
+override of bpf_object__open() decisions, if attr->prog_type is not
+UNSPECIFIED.
+
+There is a slight difference in behavior for bpf_prog_load().
+Previously, if bpf_prog_load() was loading BPF object with more than one
+program, first program's guessed program type and expected attach type
+would determine corresponding attributes of all the subsequent program
+types, even if their sections names suggest otherwise. That seems like
+a rather dubious behavior and with this change it will behave more
+sanely: each program's type is determined individually, unless they are
+forced to uniformity through attr->prog_type.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20191021033902.3856966-5-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 65 +++++++++++++++++++++++++++----------------------
+ 1 file changed, 36 insertions(+), 29 deletions(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -3616,6 +3616,7 @@ static struct bpf_object *
+ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
+ struct bpf_object_open_opts *opts)
+ {
++ struct bpf_program *prog;
+ struct bpf_object *obj;
+ const char *obj_name;
+ char tmp_name[64];
+@@ -3655,8 +3656,24 @@ __bpf_object__open(const char *path, con
+ CHECK_ERR(bpf_object__probe_caps(obj), err, out);
+ CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps), err, out);
+ CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
+-
+ bpf_object__elf_finish(obj);
++
++ bpf_object__for_each_program(prog, obj) {
++ enum bpf_prog_type prog_type;
++ enum bpf_attach_type attach_type;
++
++ err = libbpf_prog_type_by_name(prog->section_name, &prog_type,
++ &attach_type);
++ if (err == -ESRCH)
++ /* couldn't guess, but user might manually specify */
++ continue;
++ if (err)
++ goto out;
++
++ bpf_program__set_type(prog, prog_type);
++ bpf_program__set_expected_attach_type(prog, attach_type);
++ }
++
+ return obj;
+ out:
+ bpf_object__close(obj);
+@@ -4685,7 +4702,7 @@ int libbpf_prog_type_by_name(const char
+ free(type_names);
+ }
+
+- return -EINVAL;
++ return -ESRCH;
+ }
+
+ int libbpf_attach_type_by_name(const char *name,
+@@ -4715,15 +4732,6 @@ int libbpf_attach_type_by_name(const cha
+ return -EINVAL;
+ }
+
+-static int
+-bpf_program__identify_section(struct bpf_program *prog,
+- enum bpf_prog_type *prog_type,
+- enum bpf_attach_type *expected_attach_type)
+-{
+- return libbpf_prog_type_by_name(prog->section_name, prog_type,
+- expected_attach_type);
+-}
+-
+ int bpf_map__fd(const struct bpf_map *map)
+ {
+ return map ? map->fd : -EINVAL;
+@@ -4891,8 +4899,6 @@ int bpf_prog_load_xattr(const struct bpf
+ {
+ struct bpf_object_open_attr open_attr = {};
+ struct bpf_program *prog, *first_prog = NULL;
+- enum bpf_attach_type expected_attach_type;
+- enum bpf_prog_type prog_type;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+ int err;
+@@ -4910,26 +4916,27 @@ int bpf_prog_load_xattr(const struct bpf
+ return -ENOENT;
+
+ bpf_object__for_each_program(prog, obj) {
++ enum bpf_attach_type attach_type = attr->expected_attach_type;
+ /*
+- * If type is not specified, try to guess it based on
+- * section name.
++ * to preserve backwards compatibility, bpf_prog_load treats
++ * attr->prog_type, if specified, as an override to whatever
++ * bpf_object__open guessed
+ */
+- prog_type = attr->prog_type;
+- prog->prog_ifindex = attr->ifindex;
+- expected_attach_type = attr->expected_attach_type;
+- if (prog_type == BPF_PROG_TYPE_UNSPEC) {
+- err = bpf_program__identify_section(prog, &prog_type,
+- &expected_attach_type);
+- if (err < 0) {
+- bpf_object__close(obj);
+- return -EINVAL;
+- }
++ if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
++ bpf_program__set_type(prog, attr->prog_type);
++ bpf_program__set_expected_attach_type(prog,
++ attach_type);
++ }
++ if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
++ /*
++ * we haven't guessed from section name and user
++ * didn't provide a fallback type, too bad...
++ */
++ bpf_object__close(obj);
++ return -EINVAL;
+ }
+
+- bpf_program__set_type(prog, prog_type);
+- bpf_program__set_expected_attach_type(prog,
+- expected_attach_type);
+-
++ prog->prog_ifindex = attr->ifindex;
+ prog->log_level = attr->log_level;
+ prog->prog_flags = attr->prog_flags;
+ if (!first_prog)
diff --git a/patches.suse/libbpf-Unpin-auto-pinned-maps-if-loading-fails.patch b/patches.suse/libbpf-Unpin-auto-pinned-maps-if-loading-fails.patch
new file mode 100644
index 0000000000..85b10f8be8
--- /dev/null
+++ b/patches.suse/libbpf-Unpin-auto-pinned-maps-if-loading-fails.patch
@@ -0,0 +1,68 @@
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Sat, 9 Nov 2019 21:37:27 +0100
+Subject: libbpf: Unpin auto-pinned maps if loading fails
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.5-rc1
+Git-commit: ec6d5f47bfe36f46aa0de707e5beb2f58d96b76d
+References: bsc#1155518
+
+Since the automatic map-pinning happens during load, it will leave pinned
+maps around if the load fails at a later stage. Fix this by unpinning any
+pinned maps on cleanup. To avoid unpinning pinned maps that were reused
+rather than newly pinned, add a new boolean property on struct bpf_map to
+keep track of whether that map was reused or not; and only unpin those maps
+that were not reused.
+
+Fixes: 57a00f41644f ("libbpf: Add auto-pinning of maps when loading BPF objects")
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Acked-by: David S. Miller <davem@davemloft.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/157333184731.88376.9992935027056165873.stgit@toke.dk
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -229,6 +229,7 @@ struct bpf_map {
+ enum libbpf_map_type libbpf_type;
+ char *pin_path;
+ bool pinned;
++ bool reused;
+ };
+
+ struct bpf_secdata {
+@@ -1997,6 +1998,7 @@ int bpf_map__reuse_fd(struct bpf_map *ma
+ map->def.map_flags = info.map_flags;
+ map->btf_key_type_id = info.btf_key_type_id;
+ map->btf_value_type_id = info.btf_value_type_id;
++ map->reused = true;
+
+ return 0;
+
+@@ -3916,7 +3918,7 @@ int bpf_object__unload(struct bpf_object
+ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
+ {
+ struct bpf_object *obj;
+- int err;
++ int err, i;
+
+ if (!attr)
+ return -EINVAL;
+@@ -3937,6 +3939,11 @@ int bpf_object__load_xattr(struct bpf_ob
+
+ return 0;
+ out:
++ /* unpin any maps that were auto-pinned during load */
++ for (i = 0; i < obj->nr_maps; i++)
++ if (obj->maps[i].pinned && !obj->maps[i].reused)
++ bpf_map__unpin(&obj->maps[i], NULL);
++
+ bpf_object__unload(obj);
+ pr_warning("failed to load object '%s'\n", obj->path);
+ return err;
diff --git a/patches.suse/libbpf-Update-BTF-reloc-support-to-latest-Clang-form.patch b/patches.suse/libbpf-Update-BTF-reloc-support-to-latest-Clang-form.patch
new file mode 100644
index 0000000000..6027b191a3
--- /dev/null
+++ b/patches.suse/libbpf-Update-BTF-reloc-support-to-latest-Clang-form.patch
@@ -0,0 +1,211 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 15 Oct 2019 11:28:45 -0700
+Subject: libbpf: Update BTF reloc support to latest Clang format
+Patch-mainline: v5.5-rc1
+Git-commit: 511bb0085c6fe48353c35cd3d25f4f8720579a6d
+References: bsc#1155518
+
+BTF offset reloc was generalized in recent Clang into field relocation,
+capturing extra u32 field, specifying what aspect of captured field
+needs to be relocated. This changes .BTF.ext's record size for this
+relocation from 12 bytes to 16 bytes. Given these format changes
+happened in Clang before official released version, it's ok to not
+support outdated 12-byte record size w/o breaking ABI.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191015182849.3922287-2-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/btf.c | 16 ++++++++--------
+ tools/lib/bpf/btf.h | 4 ++--
+ tools/lib/bpf/libbpf.c | 24 ++++++++++++------------
+ tools/lib/bpf/libbpf_internal.h | 25 ++++++++++++++++++-------
+ 4 files changed, 40 insertions(+), 29 deletions(-)
+
+--- a/tools/lib/bpf/btf.c
++++ b/tools/lib/bpf/btf.c
+@@ -888,14 +888,14 @@ static int btf_ext_setup_line_info(struc
+ return btf_ext_setup_info(btf_ext, &param);
+ }
+
+-static int btf_ext_setup_offset_reloc(struct btf_ext *btf_ext)
++static int btf_ext_setup_field_reloc(struct btf_ext *btf_ext)
+ {
+ struct btf_ext_sec_setup_param param = {
+- .off = btf_ext->hdr->offset_reloc_off,
+- .len = btf_ext->hdr->offset_reloc_len,
+- .min_rec_size = sizeof(struct bpf_offset_reloc),
+- .ext_info = &btf_ext->offset_reloc_info,
+- .desc = "offset_reloc",
++ .off = btf_ext->hdr->field_reloc_off,
++ .len = btf_ext->hdr->field_reloc_len,
++ .min_rec_size = sizeof(struct bpf_field_reloc),
++ .ext_info = &btf_ext->field_reloc_info,
++ .desc = "field_reloc",
+ };
+
+ return btf_ext_setup_info(btf_ext, &param);
+@@ -975,9 +975,9 @@ struct btf_ext *btf_ext__new(__u8 *data,
+ goto done;
+
+ if (btf_ext->hdr->hdr_len <
+- offsetofend(struct btf_ext_header, offset_reloc_len))
++ offsetofend(struct btf_ext_header, field_reloc_len))
+ goto done;
+- err = btf_ext_setup_offset_reloc(btf_ext);
++ err = btf_ext_setup_field_reloc(btf_ext);
+ if (err)
+ goto done;
+
+--- a/tools/lib/bpf/btf.h
++++ b/tools/lib/bpf/btf.h
+@@ -60,8 +60,8 @@ struct btf_ext_header {
+ __u32 line_info_len;
+
+ /* optional part of .BTF.ext header */
+- __u32 offset_reloc_off;
+- __u32 offset_reloc_len;
++ __u32 field_reloc_off;
++ __u32 field_reloc_len;
+ };
+
+ LIBBPF_API void btf__free(struct btf *btf);
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -2326,7 +2326,7 @@ static bool str_is_empty(const char *s)
+ }
+
+ /*
+- * Turn bpf_offset_reloc into a low- and high-level spec representation,
++ * Turn bpf_field_reloc into a low- and high-level spec representation,
+ * validating correctness along the way, as well as calculating resulting
+ * field offset (in bytes), specified by accessor string. Low-level spec
+ * captures every single level of nestedness, including traversing anonymous
+@@ -2977,7 +2977,7 @@ static void *u32_as_hash_key(__u32 x)
+ * types should be compatible (see bpf_core_fields_are_compat for details).
+ * 3. It is supported and expected that there might be multiple flavors
+ * matching the spec. As long as all the specs resolve to the same set of
+- * offsets across all candidates, there is not error. If there is any
++ * offsets across all candidates, there is no error. If there is any
+ * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
+ * imprefection of BTF deduplication, which can cause slight duplication of
+ * the same BTF type, if some directly or indirectly referenced (by
+@@ -2992,12 +2992,12 @@ static void *u32_as_hash_key(__u32 x)
+ * CPU-wise compared to prebuilding a map from all local type names to
+ * a list of candidate type names. It's also sped up by caching resolved
+ * list of matching candidates per each local "root" type ID, that has at
+- * least one bpf_offset_reloc associated with it. This list is shared
++ * least one bpf_field_reloc associated with it. This list is shared
+ * between multiple relocations for the same type ID and is updated as some
+ * of the candidates are pruned due to structural incompatibility.
+ */
+-static int bpf_core_reloc_offset(struct bpf_program *prog,
+- const struct bpf_offset_reloc *relo,
++static int bpf_core_reloc_field(struct bpf_program *prog,
++ const struct bpf_field_reloc *relo,
+ int relo_idx,
+ const struct btf *local_btf,
+ const struct btf *targ_btf,
+@@ -3106,10 +3106,10 @@ static int bpf_core_reloc_offset(struct
+ }
+
+ static int
+-bpf_core_reloc_offsets(struct bpf_object *obj, const char *targ_btf_path)
++bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
+ {
+ const struct btf_ext_info_sec *sec;
+- const struct bpf_offset_reloc *rec;
++ const struct bpf_field_reloc *rec;
+ const struct btf_ext_info *seg;
+ struct hashmap_entry *entry;
+ struct hashmap *cand_cache = NULL;
+@@ -3134,7 +3134,7 @@ bpf_core_reloc_offsets(struct bpf_object
+ goto out;
+ }
+
+- seg = &obj->btf_ext->offset_reloc_info;
++ seg = &obj->btf_ext->field_reloc_info;
+ for_each_btf_ext_sec(seg, sec) {
+ sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
+ if (str_is_empty(sec_name)) {
+@@ -3153,8 +3153,8 @@ bpf_core_reloc_offsets(struct bpf_object
+ sec_name, sec->num_info);
+
+ for_each_btf_ext_rec(seg, sec, i, rec) {
+- err = bpf_core_reloc_offset(prog, rec, i, obj->btf,
+- targ_btf, cand_cache);
++ err = bpf_core_reloc_field(prog, rec, i, obj->btf,
++ targ_btf, cand_cache);
+ if (err) {
+ pr_warning("prog '%s': relo #%d: failed to relocate: %d\n",
+ sec_name, i, err);
+@@ -3179,8 +3179,8 @@ bpf_object__relocate_core(struct bpf_obj
+ {
+ int err = 0;
+
+- if (obj->btf_ext->offset_reloc_info.len)
+- err = bpf_core_reloc_offsets(obj, targ_btf_path);
++ if (obj->btf_ext->field_reloc_info.len)
++ err = bpf_core_reloc_fields(obj, targ_btf_path);
+
+ return err;
+ }
+--- a/tools/lib/bpf/libbpf_internal.h
++++ b/tools/lib/bpf/libbpf_internal.h
+@@ -126,7 +126,7 @@ struct btf_ext {
+ };
+ struct btf_ext_info func_info;
+ struct btf_ext_info line_info;
+- struct btf_ext_info offset_reloc_info;
++ struct btf_ext_info field_reloc_info;
+ __u32 data_size;
+ };
+
+@@ -151,13 +151,23 @@ struct bpf_line_info_min {
+ __u32 line_col;
+ };
+
+-/* The minimum bpf_offset_reloc checked by the loader
++/* bpf_field_info_kind encodes which aspect of captured field has to be
++ * adjusted by relocations. Currently supported values are:
++ * - BPF_FIELD_BYTE_OFFSET: field offset (in bytes);
++ * - BPF_FIELD_EXISTS: field existence (1, if field exists; 0, otherwise);
++ */
++enum bpf_field_info_kind {
++ BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
++ BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
++};
++
++/* The minimum bpf_field_reloc checked by the loader
+ *
+- * Offset relocation captures the following data:
++ * Field relocation captures the following data:
+ * - insn_off - instruction offset (in bytes) within a BPF program that needs
+- * its insn->imm field to be relocated with actual offset;
++ * its insn->imm field to be relocated with actual field info;
+ * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
+- * offset;
++ * field;
+ * - access_str_off - offset into corresponding .BTF string section. String
+ * itself encodes an accessed field using a sequence of field and array
+ * indicies, separated by colon (:). It's conceptually very close to LLVM's
+@@ -188,15 +198,16 @@ struct bpf_line_info_min {
+ * bpf_probe_read(&dst, sizeof(dst),
+ * __builtin_preserve_access_index(&src->a.b.c));
+ *
+- * In this case Clang will emit offset relocation recording necessary data to
++ * In this case Clang will emit field relocation recording necessary data to
+ * be able to find offset of embedded `a.b.c` field within `src` struct.
+ *
+ * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
+ */
+-struct bpf_offset_reloc {
++struct bpf_field_reloc {
+ __u32 insn_off;
+ __u32 type_id;
+ __u32 access_str_off;
++ enum bpf_field_info_kind kind;
+ };
+
+ #endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/patches.suse/libbpf-add-bpf_object__open_-file-mem-w-extensible-o.patch b/patches.suse/libbpf-add-bpf_object__open_-file-mem-w-extensible-o.patch
new file mode 100644
index 0000000000..1d52cd2cf5
--- /dev/null
+++ b/patches.suse/libbpf-add-bpf_object__open_-file-mem-w-extensible-o.patch
@@ -0,0 +1,284 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Fri, 4 Oct 2019 15:40:35 -0700
+Subject: libbpf: add bpf_object__open_{file, mem} w/ extensible opts
+Patch-mainline: v5.5-rc1
+Git-commit: 2ce8450ef5a381e5ffeb4682c0093a3ab5d07008
+References: bsc#1155518
+
+Add new set of bpf_object__open APIs using new approach to optional
+parameters extensibility allowing simpler ABI compatibility approach.
+
+This patch demonstrates an approach to implementing libbpf APIs that
+makes it easy to extend existing APIs with extra optional parameters in
+such a way, that ABI compatibility is preserved without having to do
+symbol versioning and generating lots of boilerplate code to handle it.
+To facilitate succinct code for working with options, add OPTS_VALID,
+OPTS_HAS, and OPTS_GET macros that hide all the NULL, size, and zero
+checks.
+
+Additionally, newly added libbpf APIs are encouraged to follow similar
+pattern of having all mandatory parameters as formal function parameters
+and always have optional (NULL-able) xxx_opts struct, which should
+always have real struct size as a first field and the rest would be
+optional parameters added over time, which tune the behavior of existing
+API, if specified by user.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 87 +++++++++++++++++++++++++++++++---------
+ tools/lib/bpf/libbpf.h | 46 +++++++++++++++++++--
+ tools/lib/bpf/libbpf.map | 3 +
+ tools/lib/bpf/libbpf_internal.h | 32 ++++++++++++++
+ 4 files changed, 146 insertions(+), 22 deletions(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -505,7 +505,8 @@ static __u32 get_kernel_version(void)
+
+ static struct bpf_object *bpf_object__new(const char *path,
+ const void *obj_buf,
+- size_t obj_buf_sz)
++ size_t obj_buf_sz,
++ const char *obj_name)
+ {
+ struct bpf_object *obj;
+ char *end;
+@@ -517,11 +518,17 @@ static struct bpf_object *bpf_object__ne
+ }
+
+ strcpy(obj->path, path);
+- /* Using basename() GNU version which doesn't modify arg. */
+- strncpy(obj->name, basename((void *)path), sizeof(obj->name) - 1);
+- end = strchr(obj->name, '.');
+- if (end)
+- *end = 0;
++ if (obj_name) {
++ strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
++ obj->name[sizeof(obj->name) - 1] = 0;
++ } else {
++ /* Using basename() GNU version which doesn't modify arg. */
++ strncpy(obj->name, basename((void *)path),
++ sizeof(obj->name) - 1);
++ end = strchr(obj->name, '.');
++ if (end)
++ *end = 0;
++ }
+
+ obj->efile.fd = -1;
+ /*
+@@ -3547,7 +3554,7 @@ bpf_object__load_progs(struct bpf_object
+
+ static struct bpf_object *
+ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
+- int flags)
++ const char *obj_name, int flags)
+ {
+ struct bpf_object *obj;
+ int err;
+@@ -3557,7 +3564,7 @@ __bpf_object__open(const char *path, con
+ return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
+ }
+
+- obj = bpf_object__new(path, obj_buf, obj_buf_sz);
++ obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
+ if (IS_ERR(obj))
+ return obj;
+
+@@ -3583,7 +3590,7 @@ __bpf_object__open_xattr(struct bpf_obje
+
+ pr_debug("loading %s\n", attr->file);
+
+- return __bpf_object__open(attr->file, NULL, 0, flags);
++ return __bpf_object__open(attr->file, NULL, 0, NULL, flags);
+ }
+
+ struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
+@@ -3601,25 +3608,67 @@ struct bpf_object *bpf_object__open(cons
+ return bpf_object__open_xattr(&attr);
+ }
+
+-struct bpf_object *bpf_object__open_buffer(void *obj_buf,
+- size_t obj_buf_sz,
+- const char *name)
++struct bpf_object *
++bpf_object__open_file(const char *path, struct bpf_object_open_opts *opts)
++{
++ const char *obj_name;
++ bool relaxed_maps;
++
++ if (!OPTS_VALID(opts, bpf_object_open_opts))
++ return ERR_PTR(-EINVAL);
++ if (!path)
++ return ERR_PTR(-EINVAL);
++
++ pr_debug("loading %s\n", path);
++
++ obj_name = OPTS_GET(opts, object_name, path);
++ relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
++ return __bpf_object__open(path, NULL, 0, obj_name,
++ relaxed_maps ? MAPS_RELAX_COMPAT : 0);
++}
++
++struct bpf_object *
++bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
++ struct bpf_object_open_opts *opts)
+ {
+ char tmp_name[64];
++ const char *obj_name;
++ bool relaxed_maps;
+
+- /* param validation */
+- if (!obj_buf || obj_buf_sz <= 0)
+- return NULL;
++ if (!OPTS_VALID(opts, bpf_object_open_opts))
++ return ERR_PTR(-EINVAL);
++ if (!obj_buf || obj_buf_sz == 0)
++ return ERR_PTR(-EINVAL);
+
+- if (!name) {
++ obj_name = OPTS_GET(opts, object_name, NULL);
++ if (!obj_name) {
+ snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
+ (unsigned long)obj_buf,
+ (unsigned long)obj_buf_sz);
+- name = tmp_name;
++ obj_name = tmp_name;
+ }
+- pr_debug("loading object '%s' from buffer\n", name);
++ pr_debug("loading object '%s' from buffer\n", obj_name);
++
++ relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
++ return __bpf_object__open(obj_name, obj_buf, obj_buf_sz, obj_name,
++ relaxed_maps ? MAPS_RELAX_COMPAT : 0);
++}
++
++struct bpf_object *
++bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
++ const char *name)
++{
++ LIBBPF_OPTS(bpf_object_open_opts, opts,
++ .object_name = name,
++ /* wrong default, but backwards-compatible */
++ .relaxed_maps = true,
++ );
++
++ /* returning NULL is wrong, but backwards-compatible */
++ if (!obj_buf || obj_buf_sz == 0)
++ return NULL;
+
+- return __bpf_object__open(name, obj_buf, obj_buf_sz, true);
++ return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
+ }
+
+ int bpf_object__unload(struct bpf_object *obj)
+--- a/tools/lib/bpf/libbpf.h
++++ b/tools/lib/bpf/libbpf.h
+@@ -67,12 +67,52 @@ struct bpf_object_open_attr {
+ enum bpf_prog_type prog_type;
+ };
+
++/* Helper macro to declare and initialize libbpf options struct
++ *
++ * This dance with uninitialized declaration, followed by memset to zero,
++ * followed by assignment using compound literal syntax is done to preserve
++ * ability to use a nice struct field initialization syntax and **hopefully**
++ * have all the padding bytes initialized to zero. It's not guaranteed though,
++ * when copying literal, that compiler won't copy garbage in literal's padding
++ * bytes, but that's the best way I've found and it seems to work in practice.
++ */
++#define LIBBPF_OPTS(TYPE, NAME, ...) \
++ struct TYPE NAME; \
++ memset(&NAME, 0, sizeof(struct TYPE)); \
++ NAME = (struct TYPE) { \
++ .sz = sizeof(struct TYPE), \
++ __VA_ARGS__ \
++ }
++
++struct bpf_object_open_opts {
++ /* size of this struct, for forward/backward compatiblity */
++ size_t sz;
++ /* object name override, if provided:
++ * - for object open from file, this will override setting object
++ * name from file path's base name;
++ * - for object open from memory buffer, this will specify an object
++ * name and will override default "<addr>-<buf-size>" name;
++ */
++ const char *object_name;
++ /* parse map definitions non-strictly, allowing extra attributes/data */
++ bool relaxed_maps;
++};
++#define bpf_object_open_opts__last_field relaxed_maps
++
+ LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
+ LIBBPF_API struct bpf_object *
++bpf_object__open_file(const char *path, struct bpf_object_open_opts *opts);
++LIBBPF_API struct bpf_object *
++bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
++ struct bpf_object_open_opts *opts);
++
++/* deprecated bpf_object__open variants */
++LIBBPF_API struct bpf_object *
++bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
++ const char *name);
++LIBBPF_API struct bpf_object *
+ bpf_object__open_xattr(struct bpf_object_open_attr *attr);
+-LIBBPF_API struct bpf_object *bpf_object__open_buffer(void *obj_buf,
+- size_t obj_buf_sz,
+- const char *name);
++
+ int bpf_object__section_size(const struct bpf_object *obj, const char *name,
+ __u32 *size);
+ int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
+--- a/tools/lib/bpf/libbpf.map
++++ b/tools/lib/bpf/libbpf.map
+@@ -192,4 +192,7 @@ LIBBPF_0.0.5 {
+ } LIBBPF_0.0.4;
+
+ LIBBPF_0.0.6 {
++ global:
++ bpf_object__open_file;
++ bpf_object__open_mem;
+ } LIBBPF_0.0.5;
+--- a/tools/lib/bpf/libbpf_internal.h
++++ b/tools/lib/bpf/libbpf_internal.h
+@@ -63,6 +63,38 @@ do { \
+ #define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
+ #define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
+
++static inline bool libbpf_validate_opts(const char *opts,
++ size_t opts_sz, size_t user_sz,
++ const char *type_name)
++{
++ if (user_sz < sizeof(size_t)) {
++ pr_warning("%s size (%zu) is too small\n", type_name, user_sz);
++ return false;
++ }
++ if (user_sz > opts_sz) {
++ size_t i;
++
++ for (i = opts_sz; i < user_sz; i++) {
++ if (opts[i]) {
++ pr_warning("%s has non-zero extra bytes",
++ type_name);
++ return false;
++ }
++ }
++ }
++ return true;
++}
++
++#define OPTS_VALID(opts, type) \
++ (!(opts) || libbpf_validate_opts((const char *)opts, \
++ offsetofend(struct type, \
++ type##__last_field), \
++ (opts)->sz, #type))
++#define OPTS_HAS(opts, field) \
++ ((opts) && opts->sz >= offsetofend(typeof(*(opts)), field))
++#define OPTS_GET(opts, field, fallback_value) \
++ (OPTS_HAS(opts, field) ? (opts)->field : fallback_value)
++
+ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
+ const char *str_sec, size_t str_len);
+
diff --git a/patches.suse/libbpf-auto-generate-list-of-BPF-helper-definitions.patch b/patches.suse/libbpf-auto-generate-list-of-BPF-helper-definitions.patch
new file mode 100644
index 0000000000..e6573a2fa5
--- /dev/null
+++ b/patches.suse/libbpf-auto-generate-list-of-BPF-helper-definitions.patch
@@ -0,0 +1,346 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Sun, 6 Oct 2019 20:07:38 -0700
+Subject: libbpf: auto-generate list of BPF helper definitions
+Patch-mainline: v5.5-rc1
+Git-commit: 24f25763d6de229e8ada7616db76fd9ba83775e9
+References: bsc#1155518
+
+Get rid of list of BPF helpers in bpf_helpers.h (irony...) and
+auto-generate it into bpf_helpers_defs.h, which is now included from
+bpf_helpers.h.
+
+Suggested-by: Alexei Starovoitov <ast@fb.com>
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/.gitignore | 1
+ tools/testing/selftests/bpf/Makefile | 8
+ tools/testing/selftests/bpf/bpf_helpers.h | 264 ------------------------------
+ 3 files changed, 9 insertions(+), 264 deletions(-)
+
+--- a/tools/testing/selftests/bpf/.gitignore
++++ b/tools/testing/selftests/bpf/.gitignore
+@@ -39,6 +39,7 @@ libbpf.so.*
+ test_hashmap
+ test_btf_dump
+ xdping
++/bpf_helper_defs.h
+ test_sockopt
+ test_sockopt_sk
+ test_sockopt_multi
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -91,6 +91,10 @@ include ../lib.mk
+ TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
+ all: $(TEST_CUSTOM_PROGS)
+
++bpf_helper_defs.h: $(APIDIR)/linux/bpf.h
++ $(BPFDIR)/../../../scripts/bpf_helpers_doc.py --header \
++ --file $(APIDIR)/linux/bpf.h > bpf_helper_defs.h
++
+ $(OUTPUT)/urandom_read: $(OUTPUT)/%: %.c
+ $(CC) -o $@ $< -Wl,--build-id
+
+@@ -128,7 +132,7 @@ $(OUTPUT)/test_tcp_rtt: cgroup_helpers.c
+ # force a rebuild of BPFOBJ when its dependencies are updated
+ force:
+
+-$(BPFOBJ): force
++$(BPFOBJ): force bpf_helper_defs.h
+ $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
+
+ PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
+@@ -324,4 +328,4 @@ $(VERIFIER_TESTS_H): $(VERIFIER_TEST_FIL
+
+ EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) $(BPF_GCC_BUILD_DIR) \
+ $(VERIFIER_TESTS_H) $(PROG_TESTS_H) $(MAP_TESTS_H) \
+- feature
++ feature bpf_helper_defs.h
+--- a/tools/testing/selftests/bpf/bpf_helpers.h
++++ b/tools/testing/selftests/bpf/bpf_helpers.h
+@@ -2,6 +2,8 @@
+ #ifndef __BPF_HELPERS__
+ #define __BPF_HELPERS__
+
++#include "bpf_helper_defs.h"
++
+ #define __uint(name, val) int (*name)[val]
+ #define __type(name, val) typeof(val) *name
+
+@@ -21,219 +23,6 @@
+ */
+ #define SEC(NAME) __attribute__((section(NAME), used))
+
+-/* helper functions called from eBPF programs written in C */
+-static void *(*bpf_map_lookup_elem)(void *map, const void *key) =
+- (void *) BPF_FUNC_map_lookup_elem;
+-static int (*bpf_map_update_elem)(void *map, const void *key, const void *value,
+- unsigned long long flags) =
+- (void *) BPF_FUNC_map_update_elem;
+-static int (*bpf_map_delete_elem)(void *map, const void *key) =
+- (void *) BPF_FUNC_map_delete_elem;
+-static int (*bpf_map_push_elem)(void *map, const void *value,
+- unsigned long long flags) =
+- (void *) BPF_FUNC_map_push_elem;
+-static int (*bpf_map_pop_elem)(void *map, void *value) =
+- (void *) BPF_FUNC_map_pop_elem;
+-static int (*bpf_map_peek_elem)(void *map, void *value) =
+- (void *) BPF_FUNC_map_peek_elem;
+-static int (*bpf_probe_read)(void *dst, int size, const void *unsafe_ptr) =
+- (void *) BPF_FUNC_probe_read;
+-static unsigned long long (*bpf_ktime_get_ns)(void) =
+- (void *) BPF_FUNC_ktime_get_ns;
+-static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
+- (void *) BPF_FUNC_trace_printk;
+-static void (*bpf_tail_call)(void *ctx, void *map, int index) =
+- (void *) BPF_FUNC_tail_call;
+-static unsigned long long (*bpf_get_smp_processor_id)(void) =
+- (void *) BPF_FUNC_get_smp_processor_id;
+-static unsigned long long (*bpf_get_current_pid_tgid)(void) =
+- (void *) BPF_FUNC_get_current_pid_tgid;
+-static unsigned long long (*bpf_get_current_uid_gid)(void) =
+- (void *) BPF_FUNC_get_current_uid_gid;
+-static int (*bpf_get_current_comm)(void *buf, int buf_size) =
+- (void *) BPF_FUNC_get_current_comm;
+-static unsigned long long (*bpf_perf_event_read)(void *map,
+- unsigned long long flags) =
+- (void *) BPF_FUNC_perf_event_read;
+-static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) =
+- (void *) BPF_FUNC_clone_redirect;
+-static int (*bpf_redirect)(int ifindex, int flags) =
+- (void *) BPF_FUNC_redirect;
+-static int (*bpf_redirect_map)(void *map, int key, int flags) =
+- (void *) BPF_FUNC_redirect_map;
+-static int (*bpf_perf_event_output)(void *ctx, void *map,
+- unsigned long long flags, void *data,
+- int size) =
+- (void *) BPF_FUNC_perf_event_output;
+-static int (*bpf_get_stackid)(void *ctx, void *map, int flags) =
+- (void *) BPF_FUNC_get_stackid;
+-static int (*bpf_probe_write_user)(void *dst, const void *src, int size) =
+- (void *) BPF_FUNC_probe_write_user;
+-static int (*bpf_current_task_under_cgroup)(void *map, int index) =
+- (void *) BPF_FUNC_current_task_under_cgroup;
+-static int (*bpf_skb_get_tunnel_key)(void *ctx, void *key, int size, int flags) =
+- (void *) BPF_FUNC_skb_get_tunnel_key;
+-static int (*bpf_skb_set_tunnel_key)(void *ctx, void *key, int size, int flags) =
+- (void *) BPF_FUNC_skb_set_tunnel_key;
+-static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, int size) =
+- (void *) BPF_FUNC_skb_get_tunnel_opt;
+-static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, int size) =
+- (void *) BPF_FUNC_skb_set_tunnel_opt;
+-static unsigned long long (*bpf_get_prandom_u32)(void) =
+- (void *) BPF_FUNC_get_prandom_u32;
+-static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
+- (void *) BPF_FUNC_xdp_adjust_head;
+-static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) =
+- (void *) BPF_FUNC_xdp_adjust_meta;
+-static int (*bpf_get_socket_cookie)(void *ctx) =
+- (void *) BPF_FUNC_get_socket_cookie;
+-static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
+- int optlen) =
+- (void *) BPF_FUNC_setsockopt;
+-static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval,
+- int optlen) =
+- (void *) BPF_FUNC_getsockopt;
+-static int (*bpf_sock_ops_cb_flags_set)(void *ctx, int flags) =
+- (void *) BPF_FUNC_sock_ops_cb_flags_set;
+-static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
+- (void *) BPF_FUNC_sk_redirect_map;
+-static int (*bpf_sk_redirect_hash)(void *ctx, void *map, void *key, int flags) =
+- (void *) BPF_FUNC_sk_redirect_hash;
+-static int (*bpf_sock_map_update)(void *map, void *key, void *value,
+- unsigned long long flags) =
+- (void *) BPF_FUNC_sock_map_update;
+-static int (*bpf_sock_hash_update)(void *map, void *key, void *value,
+- unsigned long long flags) =
+- (void *) BPF_FUNC_sock_hash_update;
+-static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags,
+- void *buf, unsigned int buf_size) =
+- (void *) BPF_FUNC_perf_event_read_value;
+-static int (*bpf_perf_prog_read_value)(void *ctx, void *buf,
+- unsigned int buf_size) =
+- (void *) BPF_FUNC_perf_prog_read_value;
+-static int (*bpf_override_return)(void *ctx, unsigned long rc) =
+- (void *) BPF_FUNC_override_return;
+-static int (*bpf_msg_redirect_map)(void *ctx, void *map, int key, int flags) =
+- (void *) BPF_FUNC_msg_redirect_map;
+-static int (*bpf_msg_redirect_hash)(void *ctx,
+- void *map, void *key, int flags) =
+- (void *) BPF_FUNC_msg_redirect_hash;
+-static int (*bpf_msg_apply_bytes)(void *ctx, int len) =
+- (void *) BPF_FUNC_msg_apply_bytes;
+-static int (*bpf_msg_cork_bytes)(void *ctx, int len) =
+- (void *) BPF_FUNC_msg_cork_bytes;
+-static int (*bpf_msg_pull_data)(void *ctx, int start, int end, int flags) =
+- (void *) BPF_FUNC_msg_pull_data;
+-static int (*bpf_msg_push_data)(void *ctx, int start, int end, int flags) =
+- (void *) BPF_FUNC_msg_push_data;
+-static int (*bpf_msg_pop_data)(void *ctx, int start, int cut, int flags) =
+- (void *) BPF_FUNC_msg_pop_data;
+-static int (*bpf_bind)(void *ctx, void *addr, int addr_len) =
+- (void *) BPF_FUNC_bind;
+-static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) =
+- (void *) BPF_FUNC_xdp_adjust_tail;
+-static int (*bpf_skb_get_xfrm_state)(void *ctx, int index, void *state,
+- int size, int flags) =
+- (void *) BPF_FUNC_skb_get_xfrm_state;
+-static int (*bpf_sk_select_reuseport)(void *ctx, void *map, void *key, __u32 flags) =
+- (void *) BPF_FUNC_sk_select_reuseport;
+-static int (*bpf_get_stack)(void *ctx, void *buf, int size, int flags) =
+- (void *) BPF_FUNC_get_stack;
+-static int (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params,
+- int plen, __u32 flags) =
+- (void *) BPF_FUNC_fib_lookup;
+-static int (*bpf_lwt_push_encap)(void *ctx, unsigned int type, void *hdr,
+- unsigned int len) =
+- (void *) BPF_FUNC_lwt_push_encap;
+-static int (*bpf_lwt_seg6_store_bytes)(void *ctx, unsigned int offset,
+- void *from, unsigned int len) =
+- (void *) BPF_FUNC_lwt_seg6_store_bytes;
+-static int (*bpf_lwt_seg6_action)(void *ctx, unsigned int action, void *param,
+- unsigned int param_len) =
+- (void *) BPF_FUNC_lwt_seg6_action;
+-static int (*bpf_lwt_seg6_adjust_srh)(void *ctx, unsigned int offset,
+- unsigned int len) =
+- (void *) BPF_FUNC_lwt_seg6_adjust_srh;
+-static int (*bpf_rc_repeat)(void *ctx) =
+- (void *) BPF_FUNC_rc_repeat;
+-static int (*bpf_rc_keydown)(void *ctx, unsigned int protocol,
+- unsigned long long scancode, unsigned int toggle) =
+- (void *) BPF_FUNC_rc_keydown;
+-static unsigned long long (*bpf_get_current_cgroup_id)(void) =
+- (void *) BPF_FUNC_get_current_cgroup_id;
+-static void *(*bpf_get_local_storage)(void *map, unsigned long long flags) =
+- (void *) BPF_FUNC_get_local_storage;
+-static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) =
+- (void *) BPF_FUNC_skb_cgroup_id;
+-static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) =
+- (void *) BPF_FUNC_skb_ancestor_cgroup_id;
+-static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
+- struct bpf_sock_tuple *tuple,
+- int size, unsigned long long netns_id,
+- unsigned long long flags) =
+- (void *) BPF_FUNC_sk_lookup_tcp;
+-static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx,
+- struct bpf_sock_tuple *tuple,
+- int size, unsigned long long netns_id,
+- unsigned long long flags) =
+- (void *) BPF_FUNC_skc_lookup_tcp;
+-static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
+- struct bpf_sock_tuple *tuple,
+- int size, unsigned long long netns_id,
+- unsigned long long flags) =
+- (void *) BPF_FUNC_sk_lookup_udp;
+-static int (*bpf_sk_release)(struct bpf_sock *sk) =
+- (void *) BPF_FUNC_sk_release;
+-static int (*bpf_skb_vlan_push)(void *ctx, __be16 vlan_proto, __u16 vlan_tci) =
+- (void *) BPF_FUNC_skb_vlan_push;
+-static int (*bpf_skb_vlan_pop)(void *ctx) =
+- (void *) BPF_FUNC_skb_vlan_pop;
+-static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) =
+- (void *) BPF_FUNC_rc_pointer_rel;
+-static void (*bpf_spin_lock)(struct bpf_spin_lock *lock) =
+- (void *) BPF_FUNC_spin_lock;
+-static void (*bpf_spin_unlock)(struct bpf_spin_lock *lock) =
+- (void *) BPF_FUNC_spin_unlock;
+-static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
+- (void *) BPF_FUNC_sk_fullsock;
+-static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
+- (void *) BPF_FUNC_tcp_sock;
+-static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) =
+- (void *) BPF_FUNC_get_listener_sock;
+-static int (*bpf_skb_ecn_set_ce)(void *ctx) =
+- (void *) BPF_FUNC_skb_ecn_set_ce;
+-static int (*bpf_tcp_check_syncookie)(struct bpf_sock *sk,
+- void *ip, int ip_len, void *tcp, int tcp_len) =
+- (void *) BPF_FUNC_tcp_check_syncookie;
+-static int (*bpf_sysctl_get_name)(void *ctx, char *buf,
+- unsigned long long buf_len,
+- unsigned long long flags) =
+- (void *) BPF_FUNC_sysctl_get_name;
+-static int (*bpf_sysctl_get_current_value)(void *ctx, char *buf,
+- unsigned long long buf_len) =
+- (void *) BPF_FUNC_sysctl_get_current_value;
+-static int (*bpf_sysctl_get_new_value)(void *ctx, char *buf,
+- unsigned long long buf_len) =
+- (void *) BPF_FUNC_sysctl_get_new_value;
+-static int (*bpf_sysctl_set_new_value)(void *ctx, const char *buf,
+- unsigned long long buf_len) =
+- (void *) BPF_FUNC_sysctl_set_new_value;
+-static int (*bpf_strtol)(const char *buf, unsigned long long buf_len,
+- unsigned long long flags, long *res) =
+- (void *) BPF_FUNC_strtol;
+-static int (*bpf_strtoul)(const char *buf, unsigned long long buf_len,
+- unsigned long long flags, unsigned long *res) =
+- (void *) BPF_FUNC_strtoul;
+-static void *(*bpf_sk_storage_get)(void *map, struct bpf_sock *sk,
+- void *value, __u64 flags) =
+- (void *) BPF_FUNC_sk_storage_get;
+-static int (*bpf_sk_storage_delete)(void *map, struct bpf_sock *sk) =
+- (void *)BPF_FUNC_sk_storage_delete;
+-static int (*bpf_send_signal)(unsigned sig) = (void *)BPF_FUNC_send_signal;
+-static long long (*bpf_tcp_gen_syncookie)(struct bpf_sock *sk, void *ip,
+- int ip_len, void *tcp, int tcp_len) =
+- (void *) BPF_FUNC_tcp_gen_syncookie;
+-
+ /* llvm builtin functions that eBPF C program may use to
+ * emit BPF_LD_ABS and BPF_LD_IND instructions
+ */
+@@ -273,55 +62,6 @@ struct bpf_map_def {
+ __attribute__ ((section(".maps." #name), used)) \
+ ____btf_map_##name = { }
+
+-static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) =
+- (void *) BPF_FUNC_skb_load_bytes;
+-static int (*bpf_skb_load_bytes_relative)(void *ctx, int off, void *to, int len, __u32 start_header) =
+- (void *) BPF_FUNC_skb_load_bytes_relative;
+-static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
+- (void *) BPF_FUNC_skb_store_bytes;
+-static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) =
+- (void *) BPF_FUNC_l3_csum_replace;
+-static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
+- (void *) BPF_FUNC_l4_csum_replace;
+-static int (*bpf_csum_diff)(void *from, int from_size, void *to, int to_size, int seed) =
+- (void *) BPF_FUNC_csum_diff;
+-static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
+- (void *) BPF_FUNC_skb_under_cgroup;
+-static int (*bpf_skb_change_head)(void *, int len, int flags) =
+- (void *) BPF_FUNC_skb_change_head;
+-static int (*bpf_skb_pull_data)(void *, int len) =
+- (void *) BPF_FUNC_skb_pull_data;
+-static unsigned int (*bpf_get_cgroup_classid)(void *ctx) =
+- (void *) BPF_FUNC_get_cgroup_classid;
+-static unsigned int (*bpf_get_route_realm)(void *ctx) =
+- (void *) BPF_FUNC_get_route_realm;
+-static int (*bpf_skb_change_proto)(void *ctx, __be16 proto, __u64 flags) =
+- (void *) BPF_FUNC_skb_change_proto;
+-static int (*bpf_skb_change_type)(void *ctx, __u32 type) =
+- (void *) BPF_FUNC_skb_change_type;
+-static unsigned int (*bpf_get_hash_recalc)(void *ctx) =
+- (void *) BPF_FUNC_get_hash_recalc;
+-static unsigned long long (*bpf_get_current_task)(void) =
+- (void *) BPF_FUNC_get_current_task;
+-static int (*bpf_skb_change_tail)(void *ctx, __u32 len, __u64 flags) =
+- (void *) BPF_FUNC_skb_change_tail;
+-static long long (*bpf_csum_update)(void *ctx, __u32 csum) =
+- (void *) BPF_FUNC_csum_update;
+-static void (*bpf_set_hash_invalid)(void *ctx) =
+- (void *) BPF_FUNC_set_hash_invalid;
+-static int (*bpf_get_numa_node_id)(void) =
+- (void *) BPF_FUNC_get_numa_node_id;
+-static int (*bpf_probe_read_str)(void *ctx, __u32 size,
+- const void *unsafe_ptr) =
+- (void *) BPF_FUNC_probe_read_str;
+-static unsigned int (*bpf_get_socket_uid)(void *ctx) =
+- (void *) BPF_FUNC_get_socket_uid;
+-static unsigned int (*bpf_set_hash)(void *ctx, __u32 hash) =
+- (void *) BPF_FUNC_set_hash;
+-static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
+- unsigned long long flags) =
+- (void *) BPF_FUNC_skb_adjust_room;
+-
+ /* Scan the ARCH passed in from ARCH env variable (see Makefile) */
+ #if defined(__TARGET_ARCH_x86)
+ #define bpf_target_x86
diff --git a/patches.suse/libbpf-fix-bpf_object__name-to-actually-return-objec.patch b/patches.suse/libbpf-fix-bpf_object__name-to-actually-return-objec.patch
new file mode 100644
index 0000000000..b34f794357
--- /dev/null
+++ b/patches.suse/libbpf-fix-bpf_object__name-to-actually-return-objec.patch
@@ -0,0 +1,27 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Fri, 4 Oct 2019 15:40:36 -0700
+Subject: libbpf: fix bpf_object__name() to actually return object name
+Patch-mainline: v5.5-rc1
+Git-commit: c9e4c3010c8c98aa867fce386ee459a32c00a487
+References: bsc#1155518
+
+bpf_object__name() was returning file path, not name. Fix this.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -4231,7 +4231,7 @@ bpf_object__next(struct bpf_object *prev
+
+ const char *bpf_object__name(const struct bpf_object *obj)
+ {
+- return obj ? obj->path : ERR_PTR(-EINVAL);
++ return obj ? obj->name : ERR_PTR(-EINVAL);
+ }
+
+ unsigned int bpf_object__kversion(const struct bpf_object *obj)
diff --git a/patches.suse/libbpf-fix-sym-st_value-print-on-32-bit-arches.patch b/patches.suse/libbpf-fix-sym-st_value-print-on-32-bit-arches.patch
index 3e7ca90960..e432c815ff 100644
--- a/patches.suse/libbpf-fix-sym-st_value-print-on-32-bit-arches.patch
+++ b/patches.suse/libbpf-fix-sym-st_value-print-on-32-bit-arches.patch
@@ -18,20 +18,20 @@ Fix it with (__u64) cast.
Fixes: 1f8e2bcb2cd5 ("libbpf: Refactor relocation handling")
Reported-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+
+NOTE (Gary Lin): Refresh this patch after applying 1f8e2bcb2cd5
---
- tools/lib/bpf/libbpf.c | 2 +-
+ tools/lib/bpf/libbpf.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
-diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
-index bae692831e14..3f09772192f1 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
-@@ -1792,7 +1792,7 @@
- return -LIBBPF_ERRNO__RELOC;
- }
- if (sym.st_value % 8) {
-- pr_warning("bad call relo offset: %lu\n", sym.st_value);
-+ pr_warning("bad call relo offset: %llu\n", (__u64)sym.st_value);
- return -LIBBPF_ERRNO__RELOC;
- }
- prog->reloc_desc[i].type = RELO_CALL;
+@@ -1813,7 +1813,7 @@ static int bpf_program__record_reloc(str
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ if (sym->st_value % 8) {
+- pr_warning("bad call relo offset: %lu\n", sym->st_value);
++ pr_warning("bad call relo offset: %llu\n", (__u64)sym->st_value);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ reloc_desc->type = RELO_CALL;
diff --git a/patches.suse/libbpf-relicense-bpf_helpers.h-and-bpf_endian.h.patch b/patches.suse/libbpf-relicense-bpf_helpers.h-and-bpf_endian.h.patch
new file mode 100644
index 0000000000..9d98a01bf1
--- /dev/null
+++ b/patches.suse/libbpf-relicense-bpf_helpers.h-and-bpf_endian.h.patch
@@ -0,0 +1,64 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Thu, 15 Aug 2019 22:45:43 -0700
+Subject: libbpf: relicense bpf_helpers.h and bpf_endian.h
+Patch-mainline: v5.4-rc1
+Git-commit: 929ffa6e9df0832fdf541ff2d9532e209153c0ec
+References: bsc#1155518
+
+bpf_helpers.h and bpf_endian.h contain useful macros and BPF helper
+definitions essential to almost every BPF program. Which makes them
+useful not just for selftests. To be able to expose them as part of
+libbpf, though, we need them to be dual-licensed as LGPL-2.1 OR
+BSD-2-Clause. This patch updates licensing of those two files.
+
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Hechao Li <hechaol@fb.com>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Acked-by: Andrey Ignatov <rdna@fb.com>
+Acked-by: Yonghong Song <yhs@fb.com>
+Acked-by: Lawrence Brakmo <brakmo@fb.com>
+Acked-by: Adam Barth <arb@fb.com>
+Acked-by: Roman Gushchin <guro@fb.com>
+Acked-by: Josef Bacik <jbacik@fb.com>
+Acked-by: Joe Stringer <joe@wand.net.nz>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Joel Fernandes (Google) <joel@joelfernandes.org>
+Acked-by: David Ahern <dsahern@gmail.com>
+Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
+Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
+Acked-by: Lorenz Bauer <lmb@cloudflare.com>
+Acked-by: Adrian Ratiu <adrian.ratiu@collabora.com>
+Acked-by: Nikita V. Shirokov <tehnerd@tehnerd.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Acked-by: Petar Penkov <ppenkov@google.com>
+Acked-by: Teng Qin <palmtenor@gmail.com>
+Cc: Michael Holzheu <holzheu@linux.vnet.ibm.com>
+Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Michal Rostecki <mrostecki@opensuse.org>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Cc: Sargun Dhillon <sargun@sargun.me>
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/bpf_endian.h | 2 +-
+ tools/testing/selftests/bpf/bpf_helpers.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/tools/testing/selftests/bpf/bpf_endian.h
++++ b/tools/testing/selftests/bpf/bpf_endian.h
+@@ -1,4 +1,4 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
++/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+ #ifndef __BPF_ENDIAN__
+ #define __BPF_ENDIAN__
+
+--- a/tools/testing/selftests/bpf/bpf_helpers.h
++++ b/tools/testing/selftests/bpf/bpf_helpers.h
+@@ -1,4 +1,4 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
++/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+ #ifndef __BPF_HELPERS_H
+ #define __BPF_HELPERS_H
+
diff --git a/patches.suse/libbpf-stop-enforcing-kern_version-populate-it-for-u.patch b/patches.suse/libbpf-stop-enforcing-kern_version-populate-it-for-u.patch
new file mode 100644
index 0000000000..c243de963c
--- /dev/null
+++ b/patches.suse/libbpf-stop-enforcing-kern_version-populate-it-for-u.patch
@@ -0,0 +1,258 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Fri, 4 Oct 2019 15:40:34 -0700
+Subject: libbpf: stop enforcing kern_version, populate it for users
+Patch-mainline: v5.5-rc1
+Git-commit: 5e61f27070292d4ad3af51dc68eebab6c1df69d3
+References: bsc#1155518
+
+Kernel version enforcement for kprobes/kretprobes was removed from
+5.0 kernel in 6c4fc209fcf9 ("bpf: remove useless version check for prog load").
+Since then, BPF programs were specifying SEC("version") just to please
+libbpf. We should stop enforcing this in libbpf, if even kernel doesn't
+care. Furthermore, libbpf now will pre-populate current kernel version
+of the host system, in case we are still running on old kernel.
+
+This patch also removes __bpf_object__open_xattr from libbpf.h, as
+nothing in libbpf is relying on having it in that header. That function
+was never exported as LIBBPF_API and even name suggests its internal
+version. So this should be safe to remove, as it doesn't break ABI.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/libbpf.c | 100 +++------------
+ tools/lib/bpf/libbpf.h | 2
+ tools/testing/selftests/bpf/progs/test_attach_probe.c | 1
+ tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c | 1
+ tools/testing/selftests/bpf/progs/test_perf_buffer.c | 1
+ tools/testing/selftests/bpf/progs/test_stacktrace_map.c | 1
+ 6 files changed, 23 insertions(+), 83 deletions(-)
+
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -33,6 +33,7 @@
+ #include <linux/limits.h>
+ #include <linux/perf_event.h>
+ #include <linux/ring_buffer.h>
++#include <linux/version.h>
+ #include <sys/epoll.h>
+ #include <sys/ioctl.h>
+ #include <sys/mman.h>
+@@ -255,7 +256,7 @@ struct bpf_object {
+ */
+ struct {
+ int fd;
+- void *obj_buf;
++ const void *obj_buf;
+ size_t obj_buf_sz;
+ Elf *elf;
+ GElf_Ehdr ehdr;
+@@ -491,8 +492,19 @@ bpf_object__init_prog_names(struct bpf_o
+ return 0;
+ }
+
++static __u32 get_kernel_version(void)
++{
++ __u32 major, minor, patch;
++ struct utsname info;
++
++ uname(&info);
++ if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
++ return 0;
++ return KERNEL_VERSION(major, minor, patch);
++}
++
+ static struct bpf_object *bpf_object__new(const char *path,
+- void *obj_buf,
++ const void *obj_buf,
+ size_t obj_buf_sz)
+ {
+ struct bpf_object *obj;
+@@ -526,6 +538,7 @@ static struct bpf_object *bpf_object__ne
+ obj->efile.rodata_shndx = -1;
+ obj->efile.bss_shndx = -1;
+
++ obj->kern_version = get_kernel_version();
+ obj->loaded = false;
+
+ INIT_LIST_HEAD(&obj->list);
+@@ -569,7 +582,7 @@ static int bpf_object__elf_init(struct b
+ * obj_buf should have been validated by
+ * bpf_object__open_buffer().
+ */
+- obj->efile.elf = elf_memory(obj->efile.obj_buf,
++ obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
+ obj->efile.obj_buf_sz);
+ } else {
+ obj->efile.fd = open(obj->path, O_RDONLY);
+@@ -636,21 +649,6 @@ bpf_object__init_license(struct bpf_obje
+ return 0;
+ }
+
+-static int
+-bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
+-{
+- __u32 kver;
+-
+- if (size != sizeof(kver)) {
+- pr_warning("invalid kver section in %s\n", obj->path);
+- return -LIBBPF_ERRNO__FORMAT;
+- }
+- memcpy(&kver, data, sizeof(kver));
+- obj->kern_version = kver;
+- pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
+- return 0;
+-}
+-
+ static int compare_bpf_map(const void *_a, const void *_b)
+ {
+ const struct bpf_map *a = _a;
+@@ -1568,11 +1566,7 @@ static int bpf_object__elf_collect(struc
+ if (err)
+ return err;
+ } else if (strcmp(name, "version") == 0) {
+- err = bpf_object__init_kversion(obj,
+- data->d_buf,
+- data->d_size);
+- if (err)
+- return err;
++ /* skip, we don't need it anymore */
+ } else if (strcmp(name, "maps") == 0) {
+ obj->efile.maps_shndx = idx;
+ } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
+@@ -3551,54 +3545,9 @@ bpf_object__load_progs(struct bpf_object
+ return 0;
+ }
+
+-static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
+-{
+- switch (type) {
+- case BPF_PROG_TYPE_SOCKET_FILTER:
+- case BPF_PROG_TYPE_SCHED_CLS:
+- case BPF_PROG_TYPE_SCHED_ACT:
+- case BPF_PROG_TYPE_XDP:
+- case BPF_PROG_TYPE_CGROUP_SKB:
+- case BPF_PROG_TYPE_CGROUP_SOCK:
+- case BPF_PROG_TYPE_LWT_IN:
+- case BPF_PROG_TYPE_LWT_OUT:
+- case BPF_PROG_TYPE_LWT_XMIT:
+- case BPF_PROG_TYPE_LWT_SEG6LOCAL:
+- case BPF_PROG_TYPE_SOCK_OPS:
+- case BPF_PROG_TYPE_SK_SKB:
+- case BPF_PROG_TYPE_CGROUP_DEVICE:
+- case BPF_PROG_TYPE_SK_MSG:
+- case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+- case BPF_PROG_TYPE_LIRC_MODE2:
+- case BPF_PROG_TYPE_SK_REUSEPORT:
+- case BPF_PROG_TYPE_FLOW_DISSECTOR:
+- case BPF_PROG_TYPE_UNSPEC:
+- case BPF_PROG_TYPE_TRACEPOINT:
+- case BPF_PROG_TYPE_RAW_TRACEPOINT:
+- case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
+- case BPF_PROG_TYPE_PERF_EVENT:
+- case BPF_PROG_TYPE_CGROUP_SYSCTL:
+- case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+- return false;
+- case BPF_PROG_TYPE_KPROBE:
+- default:
+- return true;
+- }
+-}
+-
+-static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
+-{
+- if (needs_kver && obj->kern_version == 0) {
+- pr_warning("%s doesn't provide kernel version\n",
+- obj->path);
+- return -LIBBPF_ERRNO__KVERSION;
+- }
+- return 0;
+-}
+-
+ static struct bpf_object *
+-__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
+- bool needs_kver, int flags)
++__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
++ int flags)
+ {
+ struct bpf_object *obj;
+ int err;
+@@ -3617,7 +3566,6 @@ __bpf_object__open(const char *path, voi
+ CHECK_ERR(bpf_object__probe_caps(obj), err, out);
+ CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
+ CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
+- CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
+
+ bpf_object__elf_finish(obj);
+ return obj;
+@@ -3626,8 +3574,8 @@ out:
+ return ERR_PTR(err);
+ }
+
+-struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
+- int flags)
++static struct bpf_object *
++__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
+ {
+ /* param validation */
+ if (!attr->file)
+@@ -3635,9 +3583,7 @@ struct bpf_object *__bpf_object__open_xa
+
+ pr_debug("loading %s\n", attr->file);
+
+- return __bpf_object__open(attr->file, NULL, 0,
+- bpf_prog_type__needs_kver(attr->prog_type),
+- flags);
++ return __bpf_object__open(attr->file, NULL, 0, flags);
+ }
+
+ struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
+@@ -3673,7 +3619,7 @@ struct bpf_object *bpf_object__open_buff
+ }
+ pr_debug("loading object '%s' from buffer\n", name);
+
+- return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
++ return __bpf_object__open(name, obj_buf, obj_buf_sz, true);
+ }
+
+ int bpf_object__unload(struct bpf_object *obj)
+--- a/tools/lib/bpf/libbpf.h
++++ b/tools/lib/bpf/libbpf.h
+@@ -70,8 +70,6 @@ struct bpf_object_open_attr {
+ LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
+ LIBBPF_API struct bpf_object *
+ bpf_object__open_xattr(struct bpf_object_open_attr *attr);
+-struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
+- int flags);
+ LIBBPF_API struct bpf_object *bpf_object__open_buffer(void *obj_buf,
+ size_t obj_buf_sz,
+ const char *name);
+--- a/tools/testing/selftests/bpf/progs/test_attach_probe.c
++++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c
+@@ -49,4 +49,3 @@ int handle_uprobe_return(struct pt_regs
+ }
+
+ char _license[] SEC("license") = "GPL";
+-__u32 _version SEC("version") = 1;
+--- a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
++++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
+@@ -99,4 +99,3 @@ int bpf_prog1(void *ctx)
+ }
+
+ char _license[] SEC("license") = "GPL";
+-__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
+--- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c
++++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c
+@@ -22,4 +22,3 @@ int handle_sys_nanosleep_entry(struct pt
+ }
+
+ char _license[] SEC("license") = "GPL";
+-__u32 _version SEC("version") = 1;
+--- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
++++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
+@@ -74,4 +74,3 @@ int oncpu(struct sched_switch_args *ctx)
+ }
+
+ char _license[] SEC("license") = "GPL";
+-__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/patches.suse/net-ibmvnic-Fix-typo-in-retry-check.patch b/patches.suse/net-ibmvnic-Fix-typo-in-retry-check.patch
index 98e186fde5..7e076cd8b8 100644
--- a/patches.suse/net-ibmvnic-Fix-typo-in-retry-check.patch
+++ b/patches.suse/net-ibmvnic-Fix-typo-in-retry-check.patch
@@ -4,8 +4,7 @@ Date: Wed, 11 Dec 2019 09:38:39 -0600
Subject: [PATCH] net/ibmvnic: Fix typo in retry check
References: bsc#1155689 ltc#182047
-Patch-mainline: queued
-Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+Patch-mainline: v5.5-rc3
Git-commit: 8f9cc1ee296275d27770245cbd247a4952bbb2be
This conditional is missing a bang, with the intent
diff --git a/patches.suse/samples-bpf-Fix-broken-xdp_rxq_info-due-to-map-order.patch b/patches.suse/samples-bpf-Fix-broken-xdp_rxq_info-due-to-map-order.patch
new file mode 100644
index 0000000000..d3bc12faf0
--- /dev/null
+++ b/patches.suse/samples-bpf-Fix-broken-xdp_rxq_info-due-to-map-order.patch
@@ -0,0 +1,64 @@
+From: Jesper Dangaard Brouer <brouer@redhat.com>
+Date: Mon, 2 Dec 2019 13:37:31 +0100
+Subject: samples/bpf: Fix broken xdp_rxq_info due to map order assumptions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.5-rc1
+Git-commit: edbca120a8cdfa5a5793707e33497aa5185875ca
+References: bsc#1155518
+
+In the days of using bpf_load.c the order in which the 'maps' sections
+were defines in BPF side (*_kern.c) file, were used by userspace side
+to identify the map via using the map order as an index. In effect the
+order-index is created based on the order the maps sections are stored
+in the ELF-object file, by the LLVM compiler.
+
+This have also carried over in libbpf via API bpf_map__next(NULL, obj)
+to extract maps in the order libbpf parsed the ELF-object file.
+
+When BTF based maps were introduced a new section type ".maps" were
+created. I found that the LLVM compiler doesn't create the ".maps"
+sections in the order they are defined in the C-file. The order in the
+ELF file is based on the order the map pointer is referenced in the code.
+
+This combination of changes lead to xdp_rxq_info mixing up the map
+file-descriptors in userspace, resulting in very broken behaviour, but
+without warning the user.
+
+This patch fix issue by instead using bpf_object__find_map_by_name()
+to find maps via their names. (Note, this is the ELF name, which can
+be longer than the name the kernel retains).
+
+Fixes: be5bca44aa6b ("samples: bpf: convert some XDP samples from bpf_load to libbpf")
+Fixes: 451d1dc886b5 ("samples: bpf: update map definition to new syntax BTF-defined map")
+Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Link: https://lore.kernel.org/bpf/157529025128.29832.5953245340679936909.stgit@firesoul
+Acked-by: Gary Lin <glin@suse.com>
+---
+ samples/bpf/xdp_rxq_info_user.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
+index 51e0d810e070..8fc3ad01de72 100644
+--- a/samples/bpf/xdp_rxq_info_user.c
++++ b/samples/bpf/xdp_rxq_info_user.c
+@@ -489,9 +489,9 @@ int main(int argc, char **argv)
+ if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
+ return EXIT_FAIL;
+
+- map = bpf_map__next(NULL, obj);
+- stats_global_map = bpf_map__next(map, obj);
+- rx_queue_index_map = bpf_map__next(stats_global_map, obj);
++ map = bpf_object__find_map_by_name(obj, "config_map");
++ stats_global_map = bpf_object__find_map_by_name(obj, "stats_global_map");
++ rx_queue_index_map = bpf_object__find_map_by_name(obj, "rx_queue_index_map");
+ if (!map || !stats_global_map || !rx_queue_index_map) {
+ printf("finding a map in obj file failed\n");
+ return EXIT_FAIL;
+--
+2.24.0
+
diff --git a/patches.suse/samples-bpf-convert-xdp_sample_pkts_user-to-perf_buf.patch b/patches.suse/samples-bpf-convert-xdp_sample_pkts_user-to-perf_buf.patch
new file mode 100644
index 0000000000..242a20ac85
--- /dev/null
+++ b/patches.suse/samples-bpf-convert-xdp_sample_pkts_user-to-perf_buf.patch
@@ -0,0 +1,154 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 23 Jul 2019 14:34:43 -0700
+Subject: samples/bpf: convert xdp_sample_pkts_user to perf_buffer API
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.4-rc1
+Git-commit: f58a4d51d8da7b248d8796e9981feb3d5a43d3d2
+References: bsc#1155518
+
+Convert xdp_sample_pkts_user to libbpf's perf_buffer API.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Acked-by: Song Liu <songliubraving@fb.com>
+Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Gary Lin <glin@suse.com>
+---
+ samples/bpf/xdp_sample_pkts_user.c | 61 +++++++++---------------------
+ 1 file changed, 17 insertions(+), 44 deletions(-)
+
+diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c
+index dc66345a929a..3002714e3cd5 100644
+--- a/samples/bpf/xdp_sample_pkts_user.c
++++ b/samples/bpf/xdp_sample_pkts_user.c
+@@ -17,14 +17,13 @@
+ #include <linux/if_link.h>
+
+ #include "perf-sys.h"
+-#include "trace_helpers.h"
+
+ #define MAX_CPUS 128
+-static int pmu_fds[MAX_CPUS], if_idx;
+-static struct perf_event_mmap_page *headers[MAX_CPUS];
++static int if_idx;
+ static char *if_name;
+ static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+ static __u32 prog_id;
++static struct perf_buffer *pb = NULL;
+
+ static int do_attach(int idx, int fd, const char *name)
+ {
+@@ -73,7 +72,7 @@ static int do_detach(int idx, const char *name)
+
+ #define SAMPLE_SIZE 64
+
+-static int print_bpf_output(void *data, int size)
++static void print_bpf_output(void *ctx, int cpu, void *data, __u32 size)
+ {
+ struct {
+ __u16 cookie;
+@@ -83,45 +82,20 @@ static int print_bpf_output(void *data, int size)
+ int i;
+
+ if (e->cookie != 0xdead) {
+- printf("BUG cookie %x sized %d\n",
+- e->cookie, size);
+- return LIBBPF_PERF_EVENT_ERROR;
++ printf("BUG cookie %x sized %d\n", e->cookie, size);
++ return;
+ }
+
+ printf("Pkt len: %-5d bytes. Ethernet hdr: ", e->pkt_len);
+ for (i = 0; i < 14 && i < e->pkt_len; i++)
+ printf("%02x ", e->pkt_data[i]);
+ printf("\n");
+-
+- return LIBBPF_PERF_EVENT_CONT;
+-}
+-
+-static void test_bpf_perf_event(int map_fd, int num)
+-{
+- struct perf_event_attr attr = {
+- .sample_type = PERF_SAMPLE_RAW,
+- .type = PERF_TYPE_SOFTWARE,
+- .config = PERF_COUNT_SW_BPF_OUTPUT,
+- .wakeup_events = 1, /* get an fd notification for every event */
+- };
+- int i;
+-
+- for (i = 0; i < num; i++) {
+- int key = i;
+-
+- pmu_fds[i] = sys_perf_event_open(&attr, -1/*pid*/, i/*cpu*/,
+- -1/*group_fd*/, 0);
+-
+- assert(pmu_fds[i] >= 0);
+- assert(bpf_map_update_elem(map_fd, &key,
+- &pmu_fds[i], BPF_ANY) == 0);
+- ioctl(pmu_fds[i], PERF_EVENT_IOC_ENABLE, 0);
+- }
+ }
+
+ static void sig_handler(int signo)
+ {
+ do_detach(if_idx, if_name);
++ perf_buffer__free(pb);
+ exit(0);
+ }
+
+@@ -140,13 +114,13 @@ int main(int argc, char **argv)
+ struct bpf_prog_load_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_XDP,
+ };
++ struct perf_buffer_opts pb_opts = {};
+ const char *optstr = "F";
+ int prog_fd, map_fd, opt;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+ char filename[256];
+- int ret, err, i;
+- int numcpus;
++ int ret, err;
+
+ while ((opt = getopt(argc, argv, optstr)) != -1) {
+ switch (opt) {
+@@ -169,10 +143,6 @@ int main(int argc, char **argv)
+ return 1;
+ }
+
+- numcpus = get_nprocs();
+- if (numcpus > MAX_CPUS)
+- numcpus = MAX_CPUS;
+-
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ prog_load_attr.file = filename;
+
+@@ -211,14 +181,17 @@ int main(int argc, char **argv)
+ return 1;
+ }
+
+- test_bpf_perf_event(map_fd, numcpus);
++ pb_opts.sample_cb = print_bpf_output;
++ pb = perf_buffer__new(map_fd, 8, &pb_opts);
++ err = libbpf_get_error(pb);
++ if (err) {
++ perror("perf_buffer setup failed");
++ return 1;
++ }
+
+- for (i = 0; i < numcpus; i++)
+- if (perf_event_mmap_header(pmu_fds[i], &headers[i]) < 0)
+- return 1;
++ while ((ret = perf_buffer__poll(pb, 1000)) >= 0) {
++ }
+
+- ret = perf_event_poller_multi(pmu_fds, headers, numcpus,
+- print_bpf_output);
+ kill(0, SIGINT);
+ return ret;
+ }
+--
+2.24.0
+
diff --git a/patches.suse/samples-bpf-switch-trace_output-sample-to-perf_buffe.patch b/patches.suse/samples-bpf-switch-trace_output-sample-to-perf_buffe.patch
new file mode 100644
index 0000000000..f82318622b
--- /dev/null
+++ b/patches.suse/samples-bpf-switch-trace_output-sample-to-perf_buffe.patch
@@ -0,0 +1,117 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 23 Jul 2019 14:34:44 -0700
+Subject: samples/bpf: switch trace_output sample to perf_buffer API
+Patch-mainline: v5.4-rc1
+Git-commit: c17bec549c9dc969b4e725c56aa9ebb125378397
+References: bsc#1155518
+
+Convert trace_output sample to libbpf's perf_buffer API.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Acked-by: Song Liu <songliubraving@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Gary Lin <glin@suse.com>
+---
+ samples/bpf/trace_output_user.c | 43 +++++++++++----------------------
+ 1 file changed, 14 insertions(+), 29 deletions(-)
+
+diff --git a/samples/bpf/trace_output_user.c b/samples/bpf/trace_output_user.c
+index 2dd1d39b152a..8ee47699a870 100644
+--- a/samples/bpf/trace_output_user.c
++++ b/samples/bpf/trace_output_user.c
+@@ -18,9 +18,6 @@
+ #include <libbpf.h>
+ #include "bpf_load.h"
+ #include "perf-sys.h"
+-#include "trace_helpers.h"
+-
+-static int pmu_fd;
+
+ static __u64 time_get_ns(void)
+ {
+@@ -31,12 +28,12 @@ static __u64 time_get_ns(void)
+ }
+
+ static __u64 start_time;
++static __u64 cnt;
+
+ #define MAX_CNT 100000ll
+
+-static int print_bpf_output(void *data, int size)
++static void print_bpf_output(void *ctx, int cpu, void *data, __u32 size)
+ {
+- static __u64 cnt;
+ struct {
+ __u64 pid;
+ __u64 cookie;
+@@ -45,7 +42,7 @@ static int print_bpf_output(void *data, int size)
+ if (e->cookie != 0x12345678) {
+ printf("BUG pid %llx cookie %llx sized %d\n",
+ e->pid, e->cookie, size);
+- return LIBBPF_PERF_EVENT_ERROR;
++ return;
+ }
+
+ cnt++;
+@@ -53,30 +50,14 @@ static int print_bpf_output(void *data, int size)
+ if (cnt == MAX_CNT) {
+ printf("recv %lld events per sec\n",
+ MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+- return LIBBPF_PERF_EVENT_DONE;
++ return;
+ }
+-
+- return LIBBPF_PERF_EVENT_CONT;
+-}
+-
+-static void test_bpf_perf_event(void)
+-{
+- struct perf_event_attr attr = {
+- .sample_type = PERF_SAMPLE_RAW,
+- .type = PERF_TYPE_SOFTWARE,
+- .config = PERF_COUNT_SW_BPF_OUTPUT,
+- };
+- int key = 0;
+-
+- pmu_fd = sys_perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
+-
+- assert(pmu_fd >= 0);
+- assert(bpf_map_update_elem(map_fd[0], &key, &pmu_fd, BPF_ANY) == 0);
+- ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ }
+
+ int main(int argc, char **argv)
+ {
++ struct perf_buffer_opts pb_opts = {};
++ struct perf_buffer *pb;
+ char filename[256];
+ FILE *f;
+ int ret;
+@@ -88,16 +69,20 @@ int main(int argc, char **argv)
+ return 1;
+ }
+
+- test_bpf_perf_event();
+-
+- if (perf_event_mmap(pmu_fd) < 0)
++ pb_opts.sample_cb = print_bpf_output;
++ pb = perf_buffer__new(map_fd[0], 8, &pb_opts);
++ ret = libbpf_get_error(pb);
++ if (ret) {
++ printf("failed to setup perf_buffer: %d\n", ret);
+ return 1;
++ }
+
+ f = popen("taskset 1 dd if=/dev/zero of=/dev/null", "r");
+ (void) f;
+
+ start_time = time_get_ns();
+- ret = perf_event_poller(pmu_fd, print_bpf_output);
++ while ((ret = perf_buffer__poll(pb, 1000)) >= 0 && cnt < MAX_CNT) {
++ }
+ kill(0, SIGINT);
+ return ret;
+ }
+--
+2.24.0
+
diff --git a/patches.suse/scripts-bpf-Fix-xdp_md-forward-declaration-typo.patch b/patches.suse/scripts-bpf-Fix-xdp_md-forward-declaration-typo.patch
new file mode 100644
index 0000000000..5c6415ac98
--- /dev/null
+++ b/patches.suse/scripts-bpf-Fix-xdp_md-forward-declaration-typo.patch
@@ -0,0 +1,30 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Wed, 9 Oct 2019 21:25:34 -0700
+Subject: scripts/bpf: Fix xdp_md forward declaration typo
+Patch-mainline: v5.5-rc1
+Git-commit: e0b68fb186b251374adbd870f99b1ecea236e770
+References: bsc#1155518
+
+Fix typo in struct xpd_md, generated from bpf_helpers_doc.py, which is
+causing compilation warnings for programs using bpf_helpers.h
+
+Fixes: 7a387bed47f7 ("scripts/bpf: teach bpf_helpers_doc.py to dump BPF helper definitions")
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191010042534.290562-1-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ scripts/bpf_helpers_doc.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/scripts/bpf_helpers_doc.py
++++ b/scripts/bpf_helpers_doc.py
+@@ -418,7 +418,7 @@ class PrinterHelpers(Printer):
+
+ 'struct __sk_buff',
+ 'struct sk_msg_md',
+- 'struct xpd_md',
++ 'struct xdp_md',
+ ]
+ known_types = {
+ '...',
diff --git a/patches.suse/scripts-bpf-teach-bpf_helpers_doc.py-to-dump-BPF-hel.patch b/patches.suse/scripts-bpf-teach-bpf_helpers_doc.py-to-dump-BPF-hel.patch
new file mode 100644
index 0000000000..edb6e0c1bc
--- /dev/null
+++ b/patches.suse/scripts-bpf-teach-bpf_helpers_doc.py-to-dump-BPF-hel.patch
@@ -0,0 +1,193 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Sun, 6 Oct 2019 20:07:37 -0700
+Subject: scripts/bpf: teach bpf_helpers_doc.py to dump BPF helper definitions
+Patch-mainline: v5.5-rc1
+Git-commit: 7a387bed47f7e80e257d966cd64a3e92a63e26a1
+References: bsc#1155518
+
+Enhance scripts/bpf_helpers_doc.py to emit C header with BPF helper
+definitions (to be included from libbpf's bpf_helpers.h).
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Gary Lin <glin@suse.com>
+---
+ scripts/bpf_helpers_doc.py | 155 ++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 154 insertions(+), 1 deletion(-)
+
+--- a/scripts/bpf_helpers_doc.py
++++ b/scripts/bpf_helpers_doc.py
+@@ -391,6 +391,154 @@ SEE ALSO
+
+ print('')
+
++class PrinterHelpers(Printer):
++ """
++ A printer for dumping collected information about helpers as C header to
++ be included from BPF program.
++ @helpers: array of Helper objects to print to standard output
++ """
++
++ type_fwds = [
++ 'struct bpf_fib_lookup',
++ 'struct bpf_perf_event_data',
++ 'struct bpf_perf_event_value',
++ 'struct bpf_sock',
++ 'struct bpf_sock_addr',
++ 'struct bpf_sock_ops',
++ 'struct bpf_sock_tuple',
++ 'struct bpf_spin_lock',
++ 'struct bpf_sysctl',
++ 'struct bpf_tcp_sock',
++ 'struct bpf_tunnel_key',
++ 'struct bpf_xfrm_state',
++ 'struct pt_regs',
++ 'struct sk_reuseport_md',
++ 'struct sockaddr',
++ 'struct tcphdr',
++
++ 'struct __sk_buff',
++ 'struct sk_msg_md',
++ 'struct xpd_md',
++ ]
++ known_types = {
++ '...',
++ 'void',
++ 'const void',
++ 'char',
++ 'const char',
++ 'int',
++ 'long',
++ 'unsigned long',
++
++ '__be16',
++ '__be32',
++ '__wsum',
++
++ 'struct bpf_fib_lookup',
++ 'struct bpf_perf_event_data',
++ 'struct bpf_perf_event_value',
++ 'struct bpf_sock',
++ 'struct bpf_sock_addr',
++ 'struct bpf_sock_ops',
++ 'struct bpf_sock_tuple',
++ 'struct bpf_spin_lock',
++ 'struct bpf_sysctl',
++ 'struct bpf_tcp_sock',
++ 'struct bpf_tunnel_key',
++ 'struct bpf_xfrm_state',
++ 'struct pt_regs',
++ 'struct sk_reuseport_md',
++ 'struct sockaddr',
++ 'struct tcphdr',
++ }
++ mapped_types = {
++ 'u8': '__u8',
++ 'u16': '__u16',
++ 'u32': '__u32',
++ 'u64': '__u64',
++ 's8': '__s8',
++ 's16': '__s16',
++ 's32': '__s32',
++ 's64': '__s64',
++ 'size_t': 'unsigned long',
++ 'struct bpf_map': 'void',
++ 'struct sk_buff': 'struct __sk_buff',
++ 'const struct sk_buff': 'const struct __sk_buff',
++ 'struct sk_msg_buff': 'struct sk_msg_md',
++ 'struct xdp_buff': 'struct xdp_md',
++ }
++
++ def print_header(self):
++ header = '''\
++/* This is auto-generated file. See bpf_helpers_doc.py for details. */
++
++/* Forward declarations of BPF structs */'''
++
++ print(header)
++ for fwd in self.type_fwds:
++ print('%s;' % fwd)
++ print('')
++
++ def print_footer(self):
++ footer = ''
++ print(footer)
++
++ def map_type(self, t):
++ if t in self.known_types:
++ return t
++ if t in self.mapped_types:
++ return self.mapped_types[t]
++ print("")
++ print("Unrecognized type '%s', please add it to known types!" % t)
++ sys.exit(1)
++
++ seen_helpers = set()
++
++ def print_one(self, helper):
++ proto = helper.proto_break_down()
++
++ if proto['name'] in self.seen_helpers:
++ return
++ self.seen_helpers.add(proto['name'])
++
++ print('/*')
++ print(" * %s" % proto['name'])
++ print(" *")
++ if (helper.desc):
++ # Do not strip all newline characters: formatted code at the end of
++ # a section must be followed by a blank line.
++ for line in re.sub('\n$', '', helper.desc, count=1).split('\n'):
++ print(' *{}{}'.format(' \t' if line else '', line))
++
++ if (helper.ret):
++ print(' *')
++ print(' * Returns')
++ for line in helper.ret.rstrip().split('\n'):
++ print(' *{}{}'.format(' \t' if line else '', line))
++
++ print(' */')
++ print('static %s %s(*%s)(' % (self.map_type(proto['ret_type']),
++ proto['ret_star'], proto['name']), end='')
++ comma = ''
++ for i, a in enumerate(proto['args']):
++ t = a['type']
++ n = a['name']
++ if proto['name'] == 'bpf_get_socket_cookie' and i == 0:
++ t = 'void'
++ n = 'ctx'
++ one_arg = '{}{}'.format(comma, self.map_type(t))
++ if n:
++ if a['star']:
++ one_arg += ' {}'.format(a['star'])
++ else:
++ one_arg += ' '
++ one_arg += '{}'.format(n)
++ comma = ', '
++ print(one_arg, end='')
++
++ print(') = (void *) %d;' % len(self.seen_helpers))
++ print('')
++
+ ###############################################################################
+
+ # If script is launched from scripts/ from kernel tree and can access
+@@ -405,6 +553,8 @@ Parse eBPF header file and generate docu
+ The RST-formatted output produced can be turned into a manual page with the
+ rst2man utility.
+ """)
++argParser.add_argument('--header', action='store_true',
++ help='generate C header file')
+ if (os.path.isfile(bpfh)):
+ argParser.add_argument('--filename', help='path to include/uapi/linux/bpf.h',
+ default=bpfh)
+@@ -417,5 +567,8 @@ headerParser = HeaderParser(args.filenam
+ headerParser.run()
+
+ # Print formatted output to standard output.
+-printer = PrinterRST(headerParser.helpers)
++if args.header:
++ printer = PrinterHelpers(headerParser.helpers)
++else:
++ printer = PrinterRST(headerParser.helpers)
+ printer.print_all()
diff --git a/patches.suse/selftest-bpf-Add-relocatable-bitfield-reading-tests.patch b/patches.suse/selftest-bpf-Add-relocatable-bitfield-reading-tests.patch
new file mode 100644
index 0000000000..5c59302bc9
--- /dev/null
+++ b/patches.suse/selftest-bpf-Add-relocatable-bitfield-reading-tests.patch
@@ -0,0 +1,404 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Fri, 1 Nov 2019 15:28:09 -0700
+Subject: selftest/bpf: Add relocatable bitfield reading tests
+Patch-mainline: v5.5-rc1
+Git-commit: 8b1cb1c9601f835c025af5b3cf0e98c8048ad30b
+References: bsc#1155518
+
+Add a bunch of selftests verifying correctness of relocatable bitfield reading
+support in libbpf. Both bpf_probe_read()-based and direct read-based bitfield
+macros are tested. core_reloc.c "test_harness" is extended to support raw
+tracepoint and new typed raw tracepoints as test BPF program types.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20191101222810.1246166-5-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/core_reloc.c | 84 +++++++++-
+ tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c | 3
+ tools/testing/selftests/bpf/progs/core_reloc_types.h | 72 ++++++++
+ tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c | 63 +++++++
+ tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c | 62 +++++++
+ 9 files changed, 294 insertions(+), 2 deletions(-)
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c
+ create mode 100644 tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c
+ create mode 100644 tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c
+
+--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
++++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+@@ -189,6 +189,42 @@
+ .fails = true, \
+ }
+
++#define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \
++ .case_name = test_name_prefix#name, \
++ .bpf_obj_file = objfile, \
++ .btf_src_file = "btf__core_reloc_" #name ".o"
++
++#define BITFIELDS_CASE(name, ...) { \
++ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
++ "direct:", name), \
++ .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
++ .input_len = sizeof(struct core_reloc_##name), \
++ .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
++ __VA_ARGS__, \
++ .output_len = sizeof(struct core_reloc_bitfields_output), \
++}, { \
++ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
++ "probed:", name), \
++ .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
++ .input_len = sizeof(struct core_reloc_##name), \
++ .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
++ __VA_ARGS__, \
++ .output_len = sizeof(struct core_reloc_bitfields_output), \
++ .direct_raw_tp = true, \
++}
++
++
++#define BITFIELDS_ERR_CASE(name) { \
++ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
++ "probed:", name), \
++ .fails = true, \
++}, { \
++ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
++ "direct:", name), \
++ .direct_raw_tp = true, \
++ .fails = true, \
++}
++
+ struct core_reloc_test_case {
+ const char *case_name;
+ const char *bpf_obj_file;
+@@ -199,6 +235,7 @@ struct core_reloc_test_case {
+ int output_len;
+ bool fails;
+ bool relaxed_core_relocs;
++ bool direct_raw_tp;
+ };
+
+ static struct core_reloc_test_case test_cases[] = {
+@@ -352,6 +389,40 @@ static struct core_reloc_test_case test_
+ EXISTENCE_ERR_CASE(existence__err_arr_kind),
+ EXISTENCE_ERR_CASE(existence__err_arr_value_type),
+ EXISTENCE_ERR_CASE(existence__err_struct_type),
++
++ /* bitfield relocation checks */
++ BITFIELDS_CASE(bitfields, {
++ .ub1 = 1,
++ .ub2 = 2,
++ .ub7 = 96,
++ .sb4 = -7,
++ .sb20 = -0x76543,
++ .u32 = 0x80000000,
++ .s32 = -0x76543210,
++ }),
++ BITFIELDS_CASE(bitfields___bit_sz_change, {
++ .ub1 = 6,
++ .ub2 = 0xABCDE,
++ .ub7 = 1,
++ .sb4 = -1,
++ .sb20 = -0x17654321,
++ .u32 = 0xBEEF,
++ .s32 = -0x3FEDCBA987654321,
++ }),
++ BITFIELDS_CASE(bitfields___bitfield_vs_int, {
++ .ub1 = 0xFEDCBA9876543210,
++ .ub2 = 0xA6,
++ .ub7 = -0x7EDCBA987654321,
++ .sb4 = -0x6123456789ABCDE,
++ .sb20 = 0xD00D,
++ .u32 = -0x76543,
++ .s32 = 0x0ADEADBEEFBADB0B,
++ }),
++ BITFIELDS_CASE(bitfields___just_big_enough, {
++ .ub1 = 0xF,
++ .ub2 = 0x0812345678FEDCBA,
++ }),
++ BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield),
+ };
+
+ struct data {
+@@ -361,9 +432,9 @@ struct data {
+
+ void test_core_reloc(void)
+ {
+- const char *probe_name = "raw_tracepoint/sys_enter";
+ struct bpf_object_load_attr load_attr = {};
+ struct core_reloc_test_case *test_case;
++ const char *tp_name, *probe_name;
+ int err, duration = 0, i, equal;
+ struct bpf_link *link = NULL;
+ struct bpf_map *data_map;
+@@ -387,6 +458,15 @@ void test_core_reloc(void)
+ test_case->bpf_obj_file, PTR_ERR(obj)))
+ continue;
+
++ /* for typed raw tracepoints, NULL should be specified */
++ if (test_case->direct_raw_tp) {
++ probe_name = "tp_btf/sys_enter";
++ tp_name = NULL;
++ } else {
++ probe_name = "raw_tracepoint/sys_enter";
++ tp_name = "sys_enter";
++ }
++
+ prog = bpf_object__find_program_by_title(obj, probe_name);
+ if (CHECK(!prog, "find_probe",
+ "prog '%s' not found\n", probe_name))
+@@ -407,7 +487,7 @@ void test_core_reloc(void)
+ goto cleanup;
+ }
+
+- link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
++ link = bpf_program__attach_raw_tracepoint(prog, tp_name);
+ if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n",
+ PTR_ERR(link)))
+ goto cleanup;
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_bitfields x) {}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_bitfields___bit_sz_change x) {}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_bitfields___bitfield_vs_int x) {}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_bitfields___err_too_big_bitfield x) {}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_bitfields___just_big_enough x) {}
+--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
++++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
+@@ -730,3 +730,75 @@ struct core_reloc_existence___err_wrong_
+ struct core_reloc_existence___err_wrong_struct_type {
+ int s;
+ };
++
++/*
++ * BITFIELDS
++ */
++/* bitfield read results, all as plain integers */
++struct core_reloc_bitfields_output {
++ int64_t ub1;
++ int64_t ub2;
++ int64_t ub7;
++ int64_t sb4;
++ int64_t sb20;
++ int64_t u32;
++ int64_t s32;
++};
++
++struct core_reloc_bitfields {
++ /* unsigned bitfields */
++ uint8_t ub1: 1;
++ uint8_t ub2: 2;
++ uint32_t ub7: 7;
++ /* signed bitfields */
++ int8_t sb4: 4;
++ int32_t sb20: 20;
++ /* non-bitfields */
++ uint32_t u32;
++ int32_t s32;
++};
++
++/* different bit sizes (both up and down) */
++struct core_reloc_bitfields___bit_sz_change {
++ /* unsigned bitfields */
++ uint16_t ub1: 3; /* 1 -> 3 */
++ uint32_t ub2: 20; /* 2 -> 20 */
++ uint8_t ub7: 1; /* 7 -> 1 */
++ /* signed bitfields */
++ int8_t sb4: 1; /* 4 -> 1 */
++ int32_t sb20: 30; /* 20 -> 30 */
++ /* non-bitfields */
++ uint16_t u32; /* 32 -> 16 */
++ int64_t s32; /* 32 -> 64 */
++};
++
++/* turn bitfield into non-bitfield and vice versa */
++struct core_reloc_bitfields___bitfield_vs_int {
++ uint64_t ub1; /* 3 -> 64 non-bitfield */
++ uint8_t ub2; /* 20 -> 8 non-bitfield */
++ int64_t ub7; /* 7 -> 64 non-bitfield signed */
++ int64_t sb4; /* 4 -> 64 non-bitfield signed */
++ uint64_t sb20; /* 20 -> 16 non-bitfield unsigned */
++ int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */
++ uint64_t s32: 60; /* 32 non-bitfield -> 60 bitfield */
++};
++
++struct core_reloc_bitfields___just_big_enough {
++ uint64_t ub1: 4;
++ uint64_t ub2: 60; /* packed tightly */
++ uint32_t ub7;
++ uint32_t sb4;
++ uint32_t sb20;
++ uint32_t u32;
++ uint32_t s32;
++} __attribute__((packed)) ;
++
++struct core_reloc_bitfields___err_too_big_bitfield {
++ uint64_t ub1: 4;
++ uint64_t ub2: 61; /* packed tightly */
++ uint32_t ub7;
++ uint32_t sb4;
++ uint32_t sb20;
++ uint32_t u32;
++ uint32_t s32;
++} __attribute__((packed)) ;
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c
+@@ -0,0 +1,63 @@
++// SPDX-License-Identifier: GPL-2.0
++// Copyright (c) 2019 Facebook
++
++#include <linux/bpf.h>
++#include <stdint.h>
++#include "bpf_helpers.h"
++#include "bpf_core_read.h"
++
++char _license[] SEC("license") = "GPL";
++
++static volatile struct data {
++ char in[256];
++ char out[256];
++} data;
++
++struct core_reloc_bitfields {
++ /* unsigned bitfields */
++ uint8_t ub1: 1;
++ uint8_t ub2: 2;
++ uint32_t ub7: 7;
++ /* signed bitfields */
++ int8_t sb4: 4;
++ int32_t sb20: 20;
++ /* non-bitfields */
++ uint32_t u32;
++ int32_t s32;
++};
++
++/* bitfield read results, all as plain integers */
++struct core_reloc_bitfields_output {
++ int64_t ub1;
++ int64_t ub2;
++ int64_t ub7;
++ int64_t sb4;
++ int64_t sb20;
++ int64_t u32;
++ int64_t s32;
++};
++
++struct pt_regs;
++
++struct trace_sys_enter {
++ struct pt_regs *regs;
++ long id;
++};
++
++SEC("tp_btf/sys_enter")
++int test_core_bitfields_direct(void *ctx)
++{
++ struct core_reloc_bitfields *in = (void *)&data.in;
++ struct core_reloc_bitfields_output *out = (void *)&data.out;
++
++ out->ub1 = BPF_CORE_READ_BITFIELD(in, ub1);
++ out->ub2 = BPF_CORE_READ_BITFIELD(in, ub2);
++ out->ub7 = BPF_CORE_READ_BITFIELD(in, ub7);
++ out->sb4 = BPF_CORE_READ_BITFIELD(in, sb4);
++ out->sb20 = BPF_CORE_READ_BITFIELD(in, sb20);
++ out->u32 = BPF_CORE_READ_BITFIELD(in, u32);
++ out->s32 = BPF_CORE_READ_BITFIELD(in, s32);
++
++ return 0;
++}
++
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c
+@@ -0,0 +1,62 @@
++// SPDX-License-Identifier: GPL-2.0
++// Copyright (c) 2019 Facebook
++
++#include <linux/bpf.h>
++#include <stdint.h>
++#include "bpf_helpers.h"
++#include "bpf_core_read.h"
++
++char _license[] SEC("license") = "GPL";
++
++static volatile struct data {
++ char in[256];
++ char out[256];
++} data;
++
++struct core_reloc_bitfields {
++ /* unsigned bitfields */
++ uint8_t ub1: 1;
++ uint8_t ub2: 2;
++ uint32_t ub7: 7;
++ /* signed bitfields */
++ int8_t sb4: 4;
++ int32_t sb20: 20;
++ /* non-bitfields */
++ uint32_t u32;
++ int32_t s32;
++};
++
++/* bitfield read results, all as plain integers */
++struct core_reloc_bitfields_output {
++ int64_t ub1;
++ int64_t ub2;
++ int64_t ub7;
++ int64_t sb4;
++ int64_t sb20;
++ int64_t u32;
++ int64_t s32;
++};
++
++#define TRANSFER_BITFIELD(in, out, field) \
++ if (BPF_CORE_READ_BITFIELD_PROBED(in, field, &res)) \
++ return 1; \
++ out->field = res
++
++SEC("raw_tracepoint/sys_enter")
++int test_core_bitfields(void *ctx)
++{
++ struct core_reloc_bitfields *in = (void *)&data.in;
++ struct core_reloc_bitfields_output *out = (void *)&data.out;
++ uint64_t res;
++
++ TRANSFER_BITFIELD(in, out, ub1);
++ TRANSFER_BITFIELD(in, out, ub2);
++ TRANSFER_BITFIELD(in, out, ub7);
++ TRANSFER_BITFIELD(in, out, sb4);
++ TRANSFER_BITFIELD(in, out, sb20);
++ TRANSFER_BITFIELD(in, out, u32);
++ TRANSFER_BITFIELD(in, out, s32);
++
++ return 0;
++}
++
diff --git a/patches.suse/selftest-bpf-Get-rid-of-a-bunch-of-explicit-BPF-prog.patch b/patches.suse/selftest-bpf-Get-rid-of-a-bunch-of-explicit-BPF-prog.patch
new file mode 100644
index 0000000000..6d87c7ddb2
--- /dev/null
+++ b/patches.suse/selftest-bpf-Get-rid-of-a-bunch-of-explicit-BPF-prog.patch
@@ -0,0 +1,79 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Sun, 20 Oct 2019 20:39:02 -0700
+Subject: selftest/bpf: Get rid of a bunch of explicit BPF program type setting
+Patch-mainline: v5.5-rc1
+Git-commit: 1678e33c21b705e9e5d26385aa1611aabe5482dc
+References: bsc#1155518
+
+Now that libbpf can correctly guess BPF program types from section
+names, remove a bunch of explicit bpf_program__set_type() calls
+throughout tests.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20191021033902.3856966-8-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/attach_probe.c | 5 -----
+ tools/testing/selftests/bpf/prog_tests/core_reloc.c | 1 -
+ tools/testing/selftests/bpf/prog_tests/rdonly_maps.c | 4 ----
+ tools/testing/selftests/bpf/test_maps.c | 4 ----
+ 4 files changed, 14 deletions(-)
+
+--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
++++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+@@ -99,11 +99,6 @@ void test_attach_probe(void)
+ "prog '%s' not found\n", uretprobe_name))
+ goto cleanup;
+
+- bpf_program__set_kprobe(kprobe_prog);
+- bpf_program__set_kprobe(kretprobe_prog);
+- bpf_program__set_kprobe(uprobe_prog);
+- bpf_program__set_kprobe(uretprobe_prog);
+-
+ /* create maps && load programs */
+ err = bpf_object__load(obj);
+ if (CHECK(err, "obj_load", "err %d\n", err))
+--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
++++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+@@ -319,7 +319,6 @@ void test_core_reloc(void)
+ if (CHECK(!prog, "find_probe",
+ "prog '%s' not found\n", probe_name))
+ goto cleanup;
+- bpf_program__set_type(prog, BPF_PROG_TYPE_RAW_TRACEPOINT);
+
+ load_attr.obj = obj;
+ load_attr.log_level = 0;
+--- a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
++++ b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
+@@ -36,10 +36,6 @@ void test_rdonly_maps(void)
+ if (CHECK(IS_ERR(obj), "obj_open", "err %ld\n", PTR_ERR(obj)))
+ return;
+
+- bpf_object__for_each_program(prog, obj) {
+- bpf_program__set_raw_tracepoint(prog);
+- }
+-
+ err = bpf_object__load(obj);
+ if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
+ goto cleanup;
+--- a/tools/testing/selftests/bpf/test_maps.c
++++ b/tools/testing/selftests/bpf/test_maps.c
+@@ -1142,7 +1142,6 @@ out_sockmap:
+ #define MAPINMAP_PROG "./test_map_in_map.o"
+ static void test_map_in_map(void)
+ {
+- struct bpf_program *prog;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+ int mim_fd, fd, err;
+@@ -1179,9 +1178,6 @@ static void test_map_in_map(void)
+ goto out_map_in_map;
+ }
+
+- bpf_object__for_each_program(prog, obj) {
+- bpf_program__set_xdp(prog);
+- }
+ bpf_object__load(obj);
+
+ map = bpf_object__find_map_by_name(obj, "mim_array");
diff --git a/patches.suse/selftest-bpf-Simple-test-for-fentry-fexit.patch b/patches.suse/selftest-bpf-Simple-test-for-fentry-fexit.patch
new file mode 100644
index 0000000000..fa6f961449
--- /dev/null
+++ b/patches.suse/selftest-bpf-Simple-test-for-fentry-fexit.patch
@@ -0,0 +1,164 @@
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Thu, 14 Nov 2019 10:57:07 -0800
+Subject: selftest/bpf: Simple test for fentry/fexit
+Patch-mainline: v5.5-rc1
+Git-commit: e41074d39d71aa62a6ec557af09cd42ca0928e05
+References: bsc#1155518
+
+Add simple test for fentry and fexit programs around eth_type_trans.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20191114185720.1641606-8-ast@kernel.org
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/kfree_skb.c | 39 ++++++++++++++-
+ tools/testing/selftests/bpf/progs/kfree_skb.c | 52 +++++++++++++++++++++
+ 2 files changed, 88 insertions(+), 3 deletions(-)
+
+--- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
++++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
+@@ -60,15 +60,17 @@ void test_kfree_skb(void)
+ .file = "./kfree_skb.o",
+ };
+
++ struct bpf_link *link = NULL, *link_fentry = NULL, *link_fexit = NULL;
++ struct bpf_map *perf_buf_map, *global_data;
++ struct bpf_program *prog, *fentry, *fexit;
+ struct bpf_object *obj, *obj2 = NULL;
+ struct perf_buffer_opts pb_opts = {};
+ struct perf_buffer *pb = NULL;
+- struct bpf_link *link = NULL;
+- struct bpf_map *perf_buf_map;
+- struct bpf_program *prog;
+ int err, kfree_skb_fd;
+ bool passed = false;
+ __u32 duration = 0;
++ const int zero = 0;
++ bool test_ok[2];
+
+ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &tattr.prog_fd);
+@@ -82,9 +84,28 @@ void test_kfree_skb(void)
+ prog = bpf_object__find_program_by_title(obj2, "tp_btf/kfree_skb");
+ if (CHECK(!prog, "find_prog", "prog kfree_skb not found\n"))
+ goto close_prog;
++ fentry = bpf_object__find_program_by_title(obj2, "fentry/eth_type_trans");
++ if (CHECK(!fentry, "find_prog", "prog eth_type_trans not found\n"))
++ goto close_prog;
++ fexit = bpf_object__find_program_by_title(obj2, "fexit/eth_type_trans");
++ if (CHECK(!fexit, "find_prog", "prog eth_type_trans not found\n"))
++ goto close_prog;
++
++ global_data = bpf_object__find_map_by_name(obj2, "kfree_sk.bss");
++ if (CHECK(!global_data, "find global data", "not found\n"))
++ goto close_prog;
++
+ link = bpf_program__attach_raw_tracepoint(prog, NULL);
+ if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
+ goto close_prog;
++ link_fentry = bpf_program__attach_trace(fentry);
++ if (CHECK(IS_ERR(link_fentry), "attach fentry", "err %ld\n",
++ PTR_ERR(link_fentry)))
++ goto close_prog;
++ link_fexit = bpf_program__attach_trace(fexit);
++ if (CHECK(IS_ERR(link_fexit), "attach fexit", "err %ld\n",
++ PTR_ERR(link_fexit)))
++ goto close_prog;
+
+ perf_buf_map = bpf_object__find_map_by_name(obj2, "perf_buf_map");
+ if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n"))
+@@ -108,14 +129,26 @@ void test_kfree_skb(void)
+ err = perf_buffer__poll(pb, 100);
+ if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
+ goto close_prog;
++
+ /* make sure kfree_skb program was triggered
+ * and it sent expected skb into ring buffer
+ */
+ CHECK_FAIL(!passed);
++
++ err = bpf_map_lookup_elem(bpf_map__fd(global_data), &zero, test_ok);
++ if (CHECK(err, "get_result",
++ "failed to get output data: %d\n", err))
++ goto close_prog;
++
++ CHECK_FAIL(!test_ok[0] || !test_ok[1]);
+ close_prog:
+ perf_buffer__free(pb);
+ if (!IS_ERR_OR_NULL(link))
+ bpf_link__destroy(link);
++ if (!IS_ERR_OR_NULL(link_fentry))
++ bpf_link__destroy(link_fentry);
++ if (!IS_ERR_OR_NULL(link_fexit))
++ bpf_link__destroy(link_fexit);
+ bpf_object__close(obj);
+ bpf_object__close(obj2);
+ }
+--- a/tools/testing/selftests/bpf/progs/kfree_skb.c
++++ b/tools/testing/selftests/bpf/progs/kfree_skb.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ // Copyright (c) 2019 Facebook
+ #include <linux/bpf.h>
++#include <stdbool.h>
+ #include "bpf_helpers.h"
+ #include "bpf_endian.h"
+
+@@ -116,3 +117,54 @@ int trace_kfree_skb(struct trace_kfree_s
+ &meta, sizeof(meta));
+ return 0;
+ }
++
++static volatile struct {
++ bool fentry_test_ok;
++ bool fexit_test_ok;
++} result;
++
++struct eth_type_trans_args {
++ struct sk_buff *skb;
++ struct net_device *dev;
++ unsigned short protocol; /* return value available to fexit progs */
++};
++
++SEC("fentry/eth_type_trans")
++int fentry_eth_type_trans(struct eth_type_trans_args *ctx)
++{
++ struct sk_buff *skb = ctx->skb;
++ struct net_device *dev = ctx->dev;
++ int len, ifindex;
++
++ __builtin_preserve_access_index(({
++ len = skb->len;
++ ifindex = dev->ifindex;
++ }));
++
++ /* fentry sees full packet including L2 header */
++ if (len != 74 || ifindex != 1)
++ return 0;
++ result.fentry_test_ok = true;
++ return 0;
++}
++
++SEC("fexit/eth_type_trans")
++int fexit_eth_type_trans(struct eth_type_trans_args *ctx)
++{
++ struct sk_buff *skb = ctx->skb;
++ struct net_device *dev = ctx->dev;
++ int len, ifindex;
++
++ __builtin_preserve_access_index(({
++ len = skb->len;
++ ifindex = dev->ifindex;
++ }));
++
++ /* fexit sees packet without L2 header that eth_type_trans should have
++ * consumed.
++ */
++ if (len != 60 || ctx->protocol != bpf_htons(0x86dd) || ifindex != 1)
++ return 0;
++ result.fexit_test_ok = true;
++ return 0;
++}
diff --git a/patches.suse/selftests-Add-tests-for-automatic-map-pinning.patch b/patches.suse/selftests-Add-tests-for-automatic-map-pinning.patch
new file mode 100644
index 0000000000..001af55d65
--- /dev/null
+++ b/patches.suse/selftests-Add-tests-for-automatic-map-pinning.patch
@@ -0,0 +1,293 @@
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Sat, 2 Nov 2019 12:09:42 +0100
+Subject: selftests: Add tests for automatic map pinning
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.5-rc1
+Git-commit: 2f4a32cc83a584ac3669d44fa2aa1d7f115d44c1
+References: bsc#1155518
+
+This adds a new BPF selftest to exercise the new automatic map pinning
+code.
+
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Link: https://lore.kernel.org/bpf/157269298209.394725.15420085139296213182.stgit@toke.dk
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/pinning.c | 210 +++++++++++++++
+ tools/testing/selftests/bpf/progs/test_pinning.c | 31 ++
+ tools/testing/selftests/bpf/progs/test_pinning_invalid.c | 16 +
+ 3 files changed, 257 insertions(+)
+ create mode 100644 tools/testing/selftests/bpf/prog_tests/pinning.c
+ create mode 100644 tools/testing/selftests/bpf/progs/test_pinning.c
+ create mode 100644 tools/testing/selftests/bpf/progs/test_pinning_invalid.c
+
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/pinning.c
+@@ -0,0 +1,210 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <sys/types.h>
++#include <sys/stat.h>
++#include <unistd.h>
++#include <test_progs.h>
++
++__u32 get_map_id(struct bpf_object *obj, const char *name)
++{
++ struct bpf_map_info map_info = {};
++ __u32 map_info_len, duration = 0;
++ struct bpf_map *map;
++ int err;
++
++ map_info_len = sizeof(map_info);
++
++ map = bpf_object__find_map_by_name(obj, name);
++ if (CHECK(!map, "find map", "NULL map"))
++ return 0;
++
++ err = bpf_obj_get_info_by_fd(bpf_map__fd(map),
++ &map_info, &map_info_len);
++ CHECK(err, "get map info", "err %d errno %d", err, errno);
++ return map_info.id;
++}
++
++void test_pinning(void)
++{
++ const char *file_invalid = "./test_pinning_invalid.o";
++ const char *custpinpath = "/sys/fs/bpf/custom/pinmap";
++ const char *nopinpath = "/sys/fs/bpf/nopinmap";
++ const char *nopinpath2 = "/sys/fs/bpf/nopinmap2";
++ const char *custpath = "/sys/fs/bpf/custom";
++ const char *pinpath = "/sys/fs/bpf/pinmap";
++ const char *file = "./test_pinning.o";
++ __u32 map_id, map_id2, duration = 0;
++ struct stat statbuf = {};
++ struct bpf_object *obj;
++ struct bpf_map *map;
++ int err;
++ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
++ .pin_root_path = custpath,
++ );
++
++ /* check that opening fails with invalid pinning value in map def */
++ obj = bpf_object__open_file(file_invalid, NULL);
++ err = libbpf_get_error(obj);
++ if (CHECK(err != -EINVAL, "invalid open", "err %d errno %d\n", err, errno)) {
++ obj = NULL;
++ goto out;
++ }
++
++ /* open the valid object file */
++ obj = bpf_object__open_file(file, NULL);
++ err = libbpf_get_error(obj);
++ if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) {
++ obj = NULL;
++ goto out;
++ }
++
++ err = bpf_object__load(obj);
++ if (CHECK(err, "default load", "err %d errno %d\n", err, errno))
++ goto out;
++
++ /* check that pinmap was pinned */
++ err = stat(pinpath, &statbuf);
++ if (CHECK(err, "stat pinpath", "err %d errno %d\n", err, errno))
++ goto out;
++
++ /* check that nopinmap was *not* pinned */
++ err = stat(nopinpath, &statbuf);
++ if (CHECK(!err || errno != ENOENT, "stat nopinpath",
++ "err %d errno %d\n", err, errno))
++ goto out;
++
++ /* check that nopinmap2 was *not* pinned */
++ err = stat(nopinpath2, &statbuf);
++ if (CHECK(!err || errno != ENOENT, "stat nopinpath2",
++ "err %d errno %d\n", err, errno))
++ goto out;
++
++ map_id = get_map_id(obj, "pinmap");
++ if (!map_id)
++ goto out;
++
++ bpf_object__close(obj);
++
++ obj = bpf_object__open_file(file, NULL);
++ if (CHECK_FAIL(libbpf_get_error(obj))) {
++ obj = NULL;
++ goto out;
++ }
++
++ err = bpf_object__load(obj);
++ if (CHECK(err, "default load", "err %d errno %d\n", err, errno))
++ goto out;
++
++ /* check that same map ID was reused for second load */
++ map_id2 = get_map_id(obj, "pinmap");
++ if (CHECK(map_id != map_id2, "check reuse",
++ "err %d errno %d id %d id2 %d\n", err, errno, map_id, map_id2))
++ goto out;
++
++ /* should be no-op to re-pin same map */
++ map = bpf_object__find_map_by_name(obj, "pinmap");
++ if (CHECK(!map, "find map", "NULL map"))
++ goto out;
++
++ err = bpf_map__pin(map, NULL);
++ if (CHECK(err, "re-pin map", "err %d errno %d\n", err, errno))
++ goto out;
++
++ /* but error to pin at different location */
++ err = bpf_map__pin(map, "/sys/fs/bpf/other");
++ if (CHECK(!err, "pin map different", "err %d errno %d\n", err, errno))
++ goto out;
++
++ /* unpin maps with a pin_path set */
++ err = bpf_object__unpin_maps(obj, NULL);
++ if (CHECK(err, "unpin maps", "err %d errno %d\n", err, errno))
++ goto out;
++
++ /* and re-pin them... */
++ err = bpf_object__pin_maps(obj, NULL);
++ if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno))
++ goto out;
++
++ /* set pinning path of other map and re-pin all */
++ map = bpf_object__find_map_by_name(obj, "nopinmap");
++ if (CHECK(!map, "find map", "NULL map"))
++ goto out;
++
++ err = bpf_map__set_pin_path(map, custpinpath);
++ if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno))
++ goto out;
++
++ /* should only pin the one unpinned map */
++ err = bpf_object__pin_maps(obj, NULL);
++ if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno))
++ goto out;
++
++ /* check that nopinmap was pinned at the custom path */
++ err = stat(custpinpath, &statbuf);
++ if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno))
++ goto out;
++
++ /* remove the custom pin path to re-test it with auto-pinning below */
++ err = unlink(custpinpath);
++ if (CHECK(err, "unlink custpinpath", "err %d errno %d\n", err, errno))
++ goto out;
++
++ err = rmdir(custpath);
++ if (CHECK(err, "rmdir custpindir", "err %d errno %d\n", err, errno))
++ goto out;
++
++ bpf_object__close(obj);
++
++ /* open the valid object file again */
++ obj = bpf_object__open_file(file, NULL);
++ err = libbpf_get_error(obj);
++ if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) {
++ obj = NULL;
++ goto out;
++ }
++
++ /* swap pin paths of the two maps */
++ bpf_object__for_each_map(map, obj) {
++ if (!strcmp(bpf_map__name(map), "nopinmap"))
++ err = bpf_map__set_pin_path(map, pinpath);
++ else if (!strcmp(bpf_map__name(map), "pinmap"))
++ err = bpf_map__set_pin_path(map, NULL);
++ else
++ continue;
++
++ if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno))
++ goto out;
++ }
++
++ /* should fail because of map parameter mismatch */
++ err = bpf_object__load(obj);
++ if (CHECK(err != -EINVAL, "param mismatch load", "err %d errno %d\n", err, errno))
++ goto out;
++
++ bpf_object__close(obj);
++
++ /* test auto-pinning at custom path with open opt */
++ obj = bpf_object__open_file(file, &opts);
++ if (CHECK_FAIL(libbpf_get_error(obj))) {
++ obj = NULL;
++ goto out;
++ }
++
++ err = bpf_object__load(obj);
++ if (CHECK(err, "custom load", "err %d errno %d\n", err, errno))
++ goto out;
++
++ /* check that pinmap was pinned at the custom path */
++ err = stat(custpinpath, &statbuf);
++ if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno))
++ goto out;
++
++out:
++ unlink(pinpath);
++ unlink(nopinpath);
++ unlink(nopinpath2);
++ unlink(custpinpath);
++ rmdir(custpath);
++ if (obj)
++ bpf_object__close(obj);
++}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/test_pinning.c
+@@ -0,0 +1,31 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/bpf.h>
++#include "bpf_helpers.h"
++
++int _version SEC("version") = 1;
++
++struct {
++ __uint(type, BPF_MAP_TYPE_ARRAY);
++ __uint(max_entries, 1);
++ __type(key, __u32);
++ __type(value, __u64);
++ __uint(pinning, LIBBPF_PIN_BY_NAME);
++} pinmap SEC(".maps");
++
++struct {
++ __uint(type, BPF_MAP_TYPE_HASH);
++ __uint(max_entries, 1);
++ __type(key, __u32);
++ __type(value, __u64);
++} nopinmap SEC(".maps");
++
++struct {
++ __uint(type, BPF_MAP_TYPE_ARRAY);
++ __uint(max_entries, 1);
++ __type(key, __u32);
++ __type(value, __u64);
++ __uint(pinning, LIBBPF_PIN_NONE);
++} nopinmap2 SEC(".maps");
++
++char _license[] SEC("license") = "GPL";
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c
+@@ -0,0 +1,16 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/bpf.h>
++#include "bpf_helpers.h"
++
++int _version SEC("version") = 1;
++
++struct {
++ __uint(type, BPF_MAP_TYPE_ARRAY);
++ __uint(max_entries, 1);
++ __type(key, __u32);
++ __type(value, __u64);
++ __uint(pinning, 2); /* invalid */
++} nopinmap3 SEC(".maps");
++
++char _license[] SEC("license") = "GPL";
diff --git a/patches.suse/selftests-bpf-Add-BPF-trampoline-performance-test.patch b/patches.suse/selftests-bpf-Add-BPF-trampoline-performance-test.patch
new file mode 100644
index 0000000000..a36ef816d6
--- /dev/null
+++ b/patches.suse/selftests-bpf-Add-BPF-trampoline-performance-test.patch
@@ -0,0 +1,242 @@
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Thu, 21 Nov 2019 17:15:15 -0800
+Subject: selftests/bpf: Add BPF trampoline performance test
+Patch-mainline: v5.5-rc1
+Git-commit: c4781e37c6a22c39cb4a57411d14f42aca124f04
+References: bsc#1155518
+
+Add a test that benchmarks different ways of attaching BPF program to a kernel function.
+Here are the results for 2.4Ghz x86 cpu on a kernel without mitigations:
+$ ./test_progs -n 49 -v|grep events
+task_rename base 2743K events per sec
+task_rename kprobe 2419K events per sec
+task_rename kretprobe 1876K events per sec
+task_rename raw_tp 2578K events per sec
+task_rename fentry 2710K events per sec
+task_rename fexit 2685K events per sec
+
+On a kernel with retpoline:
+$ ./test_progs -n 49 -v|grep events
+task_rename base 2401K events per sec
+task_rename kprobe 1930K events per sec
+task_rename kretprobe 1485K events per sec
+task_rename raw_tp 2053K events per sec
+task_rename fentry 2351K events per sec
+task_rename fexit 2185K events per sec
+
+All 5 approaches:
+- kprobe/kretprobe in __set_task_comm()
+- raw tracepoint in trace_task_rename()
+- fentry/fexit in __set_task_comm()
+are roughly equivalent.
+
+__set_task_comm() by itself is quite fast, so any extra instructions add up.
+Until BPF trampoline was introduced the fastest mechanism was raw tracepoint.
+kprobe via ftrace was second best. kretprobe is slow due to trap. New
+fentry/fexit methods via BPF trampoline are clearly the fastest and the
+difference is more pronounced with retpoline on, since BPF trampoline doesn't
+use indirect jumps.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/20191122011515.255371-1-ast@kernel.org
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/test_overhead.c | 142 +++++++++++++++++
+ tools/testing/selftests/bpf/progs/test_overhead.c | 43 +++++
+ 2 files changed, 185 insertions(+)
+ create mode 100644 tools/testing/selftests/bpf/prog_tests/test_overhead.c
+ create mode 100644 tools/testing/selftests/bpf/progs/test_overhead.c
+
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
+@@ -0,0 +1,142 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (c) 2019 Facebook */
++#define _GNU_SOURCE
++#include <sched.h>
++#include <test_progs.h>
++
++#define MAX_CNT 100000
++
++static __u64 time_get_ns(void)
++{
++ struct timespec ts;
++
++ clock_gettime(CLOCK_MONOTONIC, &ts);
++ return ts.tv_sec * 1000000000ull + ts.tv_nsec;
++}
++
++static int test_task_rename(const char *prog)
++{
++ int i, fd, duration = 0, err;
++ char buf[] = "test\n";
++ __u64 start_time;
++
++ fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
++ if (CHECK(fd < 0, "open /proc", "err %d", errno))
++ return -1;
++ start_time = time_get_ns();
++ for (i = 0; i < MAX_CNT; i++) {
++ err = write(fd, buf, sizeof(buf));
++ if (err < 0) {
++ CHECK(err < 0, "task rename", "err %d", errno);
++ close(fd);
++ return -1;
++ }
++ }
++ printf("task_rename %s\t%lluK events per sec\n", prog,
++ MAX_CNT * 1000000ll / (time_get_ns() - start_time));
++ close(fd);
++ return 0;
++}
++
++static void test_run(const char *prog)
++{
++ test_task_rename(prog);
++}
++
++static void setaffinity(void)
++{
++ cpu_set_t cpuset;
++ int cpu = 0;
++
++ CPU_ZERO(&cpuset);
++ CPU_SET(cpu, &cpuset);
++ sched_setaffinity(0, sizeof(cpuset), &cpuset);
++}
++
++void test_test_overhead(void)
++{
++ const char *kprobe_name = "kprobe/__set_task_comm";
++ const char *kretprobe_name = "kretprobe/__set_task_comm";
++ const char *raw_tp_name = "raw_tp/task_rename";
++ const char *fentry_name = "fentry/__set_task_comm";
++ const char *fexit_name = "fexit/__set_task_comm";
++ const char *kprobe_func = "__set_task_comm";
++ struct bpf_program *kprobe_prog, *kretprobe_prog, *raw_tp_prog;
++ struct bpf_program *fentry_prog, *fexit_prog;
++ struct bpf_object *obj;
++ struct bpf_link *link;
++ int err, duration = 0;
++
++ obj = bpf_object__open_file("./test_overhead.o", NULL);
++ if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj)))
++ return;
++
++ kprobe_prog = bpf_object__find_program_by_title(obj, kprobe_name);
++ if (CHECK(!kprobe_prog, "find_probe",
++ "prog '%s' not found\n", kprobe_name))
++ goto cleanup;
++ kretprobe_prog = bpf_object__find_program_by_title(obj, kretprobe_name);
++ if (CHECK(!kretprobe_prog, "find_probe",
++ "prog '%s' not found\n", kretprobe_name))
++ goto cleanup;
++ raw_tp_prog = bpf_object__find_program_by_title(obj, raw_tp_name);
++ if (CHECK(!raw_tp_prog, "find_probe",
++ "prog '%s' not found\n", raw_tp_name))
++ goto cleanup;
++ fentry_prog = bpf_object__find_program_by_title(obj, fentry_name);
++ if (CHECK(!fentry_prog, "find_probe",
++ "prog '%s' not found\n", fentry_name))
++ goto cleanup;
++ fexit_prog = bpf_object__find_program_by_title(obj, fexit_name);
++ if (CHECK(!fexit_prog, "find_probe",
++ "prog '%s' not found\n", fexit_name))
++ goto cleanup;
++
++ err = bpf_object__load(obj);
++ if (CHECK(err, "obj_load", "err %d\n", err))
++ goto cleanup;
++
++ setaffinity();
++
++ /* base line run */
++ test_run("base");
++
++ /* attach kprobe */
++ link = bpf_program__attach_kprobe(kprobe_prog, false /* retprobe */,
++ kprobe_func);
++ if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
++ goto cleanup;
++ test_run("kprobe");
++ bpf_link__destroy(link);
++
++ /* attach kretprobe */
++ link = bpf_program__attach_kprobe(kretprobe_prog, true /* retprobe */,
++ kprobe_func);
++ if (CHECK(IS_ERR(link), "attach kretprobe", "err %ld\n", PTR_ERR(link)))
++ goto cleanup;
++ test_run("kretprobe");
++ bpf_link__destroy(link);
++
++ /* attach raw_tp */
++ link = bpf_program__attach_raw_tracepoint(raw_tp_prog, "task_rename");
++ if (CHECK(IS_ERR(link), "attach fentry", "err %ld\n", PTR_ERR(link)))
++ goto cleanup;
++ test_run("raw_tp");
++ bpf_link__destroy(link);
++
++ /* attach fentry */
++ link = bpf_program__attach_trace(fentry_prog);
++ if (CHECK(IS_ERR(link), "attach fentry", "err %ld\n", PTR_ERR(link)))
++ goto cleanup;
++ test_run("fentry");
++ bpf_link__destroy(link);
++
++ /* attach fexit */
++ link = bpf_program__attach_trace(fexit_prog);
++ if (CHECK(IS_ERR(link), "attach fexit", "err %ld\n", PTR_ERR(link)))
++ goto cleanup;
++ test_run("fexit");
++ bpf_link__destroy(link);
++cleanup:
++ bpf_object__close(obj);
++}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/test_overhead.c
+@@ -0,0 +1,43 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2019 Facebook */
++#include <linux/bpf.h>
++#include "bpf_helpers.h"
++#include "bpf_tracing.h"
++
++SEC("kprobe/__set_task_comm")
++int prog1(struct pt_regs *ctx)
++{
++ return 0;
++}
++
++SEC("kretprobe/__set_task_comm")
++int prog2(struct pt_regs *ctx)
++{
++ return 0;
++}
++
++SEC("raw_tp/task_rename")
++int prog3(struct bpf_raw_tracepoint_args *ctx)
++{
++ return 0;
++}
++
++struct __set_task_comm_args {
++ struct task_struct *tsk;
++ const char *buf;
++ ku8 exec;
++};
++
++SEC("fentry/__set_task_comm")
++int prog4(struct __set_task_comm_args *ctx)
++{
++ return 0;
++}
++
++SEC("fexit/__set_task_comm")
++int prog5(struct __set_task_comm_args *ctx)
++{
++ return 0;
++}
++
++char _license[] SEC("license") = "GPL";
diff --git a/patches.suse/selftests-bpf-Add-BPF_CORE_READ-and-BPF_CORE_READ_ST.patch b/patches.suse/selftests-bpf-Add-BPF_CORE_READ-and-BPF_CORE_READ_ST.patch
new file mode 100644
index 0000000000..6e69188788
--- /dev/null
+++ b/patches.suse/selftests-bpf-Add-BPF_CORE_READ-and-BPF_CORE_READ_ST.patch
@@ -0,0 +1,140 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 8 Oct 2019 10:59:42 -0700
+Subject: selftests/bpf: Add BPF_CORE_READ and BPF_CORE_READ_STR_INTO macro
+ tests
+Patch-mainline: v5.5-rc1
+Git-commit: ee2eb063d330dc8dbe71041a1dae3cea889fdcb5
+References: bsc#1155518
+
+Validate BPF_CORE_READ correctness and handling of up to 9 levels of
+nestedness using cyclic task->(group_leader->)*->tgid chains.
+
+Also add a test of maximum-dpeth BPF_CORE_READ_STR_INTO() macro.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20191008175942.1769476-8-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/core_reloc.c | 8 +
+ tools/testing/selftests/bpf/progs/core_reloc_types.h | 9 ++
+ tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c | 54 ++++++++++++-
+ 3 files changed, 68 insertions(+), 3 deletions(-)
+
+--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
++++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+@@ -193,8 +193,12 @@ static struct core_reloc_test_case test_
+ .btf_src_file = NULL, /* load from /lib/modules/$(uname -r) */
+ .input = "",
+ .input_len = 0,
+- .output = "\1", /* true */
+- .output_len = 1,
++ .output = STRUCT_TO_CHAR_PTR(core_reloc_kernel_output) {
++ .valid = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, },
++ .comm = "test_progs\0\0\0\0\0",
++ .comm_len = 11,
++ },
++ .output_len = sizeof(struct core_reloc_kernel_output),
+ },
+
+ /* validate BPF program can use multiple flavors to match against
+--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
++++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
+@@ -1,5 +1,14 @@
+ #include <stdint.h>
+ #include <stdbool.h>
++/*
++ * KERNEL
++ */
++
++struct core_reloc_kernel_output {
++ int valid[10];
++ char comm[16];
++ int comm_len;
++};
+
+ /*
+ * FLAVORS
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
+@@ -13,9 +13,17 @@ static volatile struct data {
+ char out[256];
+ } data;
+
++struct core_reloc_kernel_output {
++ int valid[10];
++ char comm[16];
++ int comm_len;
++};
++
+ struct task_struct {
+ int pid;
+ int tgid;
++ char comm[16];
++ struct task_struct *group_leader;
+ };
+
+ #define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
+@@ -24,7 +32,9 @@ SEC("raw_tracepoint/sys_enter")
+ int test_core_kernel(void *ctx)
+ {
+ struct task_struct *task = (void *)bpf_get_current_task();
++ struct core_reloc_kernel_output *out = (void *)&data.out;
+ uint64_t pid_tgid = bpf_get_current_pid_tgid();
++ uint32_t real_tgid = (uint32_t)pid_tgid;
+ int pid, tgid;
+
+ if (CORE_READ(&pid, &task->pid) ||
+@@ -32,7 +42,49 @@ int test_core_kernel(void *ctx)
+ return 1;
+
+ /* validate pid + tgid matches */
+- data.out[0] = (((uint64_t)pid << 32) | tgid) == pid_tgid;
++ out->valid[0] = (((uint64_t)pid << 32) | tgid) == pid_tgid;
++
++ /* test variadic BPF_CORE_READ macros */
++ out->valid[1] = BPF_CORE_READ(task,
++ tgid) == real_tgid;
++ out->valid[2] = BPF_CORE_READ(task,
++ group_leader,
++ tgid) == real_tgid;
++ out->valid[3] = BPF_CORE_READ(task,
++ group_leader, group_leader,
++ tgid) == real_tgid;
++ out->valid[4] = BPF_CORE_READ(task,
++ group_leader, group_leader, group_leader,
++ tgid) == real_tgid;
++ out->valid[5] = BPF_CORE_READ(task,
++ group_leader, group_leader, group_leader,
++ group_leader,
++ tgid) == real_tgid;
++ out->valid[6] = BPF_CORE_READ(task,
++ group_leader, group_leader, group_leader,
++ group_leader, group_leader,
++ tgid) == real_tgid;
++ out->valid[7] = BPF_CORE_READ(task,
++ group_leader, group_leader, group_leader,
++ group_leader, group_leader, group_leader,
++ tgid) == real_tgid;
++ out->valid[8] = BPF_CORE_READ(task,
++ group_leader, group_leader, group_leader,
++ group_leader, group_leader, group_leader,
++ group_leader,
++ tgid) == real_tgid;
++ out->valid[9] = BPF_CORE_READ(task,
++ group_leader, group_leader, group_leader,
++ group_leader, group_leader, group_leader,
++ group_leader, group_leader,
++ tgid) == real_tgid;
++
++ /* test BPF_CORE_READ_STR_INTO() returns correct code and contents */
++ out->comm_len = BPF_CORE_READ_STR_INTO(
++ &out->comm, task,
++ group_leader, group_leader, group_leader, group_leader,
++ group_leader, group_leader, group_leader, group_leader,
++ comm);
+
+ return 0;
+ }
diff --git a/patches.suse/selftests-bpf-Add-BPF_TYPE_MAP_ARRAY-mmap-tests.patch b/patches.suse/selftests-bpf-Add-BPF_TYPE_MAP_ARRAY-mmap-tests.patch
new file mode 100644
index 0000000000..f3a5e488b5
--- /dev/null
+++ b/patches.suse/selftests-bpf-Add-BPF_TYPE_MAP_ARRAY-mmap-tests.patch
@@ -0,0 +1,397 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Sun, 17 Nov 2019 09:28:06 -0800
+Subject: selftests/bpf: Add BPF_TYPE_MAP_ARRAY mmap() tests
+Patch-mainline: v5.5-rc1
+Git-commit: 5051b384523be92925d13694fabbc6bedf2f907b
+References: bsc#1155518
+
+Add selftests validating mmap()-ing BPF array maps: both single-element and
+multi-element ones. Check that plain bpf_map_update_elem() and
+bpf_map_lookup_elem() work correctly with memory-mapped array. Also convert
+CO-RE relocation tests to use memory-mapped views of global data.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20191117172806.2195367-6-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/core_reloc.c | 45 ++--
+ tools/testing/selftests/bpf/prog_tests/mmap.c | 220 ++++++++++++++++++++
+ tools/testing/selftests/bpf/progs/test_mmap.c | 45 ++++
+ 3 files changed, 292 insertions(+), 18 deletions(-)
+ create mode 100644 tools/testing/selftests/bpf/prog_tests/mmap.c
+ create mode 100644 tools/testing/selftests/bpf/progs/test_mmap.c
+
+--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
++++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <test_progs.h>
+ #include "progs/core_reloc_types.h"
++#include <sys/mman.h>
+
+ #define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name)
+
+@@ -453,8 +454,15 @@ struct data {
+ char out[256];
+ };
+
++static size_t roundup_page(size_t sz)
++{
++ long page_size = sysconf(_SC_PAGE_SIZE);
++ return (sz + page_size - 1) / page_size * page_size;
++}
++
+ void test_core_reloc(void)
+ {
++ const size_t mmap_sz = roundup_page(sizeof(struct data));
+ struct bpf_object_load_attr load_attr = {};
+ struct core_reloc_test_case *test_case;
+ const char *tp_name, *probe_name;
+@@ -463,8 +471,8 @@ void test_core_reloc(void)
+ struct bpf_map *data_map;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+- const int zero = 0;
+- struct data data;
++ struct data *data;
++ void *mmap_data = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ test_case = &test_cases[i];
+@@ -476,8 +484,7 @@ void test_core_reloc(void)
+ );
+
+ obj = bpf_object__open_file(test_case->bpf_obj_file, &opts);
+- if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
+- "failed to open '%s': %ld\n",
++ if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n",
+ test_case->bpf_obj_file, PTR_ERR(obj)))
+ continue;
+
+@@ -519,24 +526,22 @@ void test_core_reloc(void)
+ if (CHECK(!data_map, "find_data_map", "data map not found\n"))
+ goto cleanup;
+
+- memset(&data, 0, sizeof(data));
+- memcpy(data.in, test_case->input, test_case->input_len);
+-
+- err = bpf_map_update_elem(bpf_map__fd(data_map),
+- &zero, &data, 0);
+- if (CHECK(err, "update_data_map",
+- "failed to update .data map: %d\n", err))
++ mmap_data = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
++ MAP_SHARED, bpf_map__fd(data_map), 0);
++ if (CHECK(mmap_data == MAP_FAILED, "mmap",
++ ".bss mmap failed: %d", errno)) {
++ mmap_data = NULL;
+ goto cleanup;
++ }
++ data = mmap_data;
++
++ memset(mmap_data, 0, sizeof(*data));
++ memcpy(data->in, test_case->input, test_case->input_len);
+
+ /* trigger test run */
+ usleep(1);
+
+- err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &data);
+- if (CHECK(err, "get_result",
+- "failed to get output data: %d\n", err))
+- goto cleanup;
+-
+- equal = memcmp(data.out, test_case->output,
++ equal = memcmp(data->out, test_case->output,
+ test_case->output_len) == 0;
+ if (CHECK(!equal, "check_result",
+ "input/output data don't match\n")) {
+@@ -548,12 +553,16 @@ void test_core_reloc(void)
+ }
+ for (j = 0; j < test_case->output_len; j++) {
+ printf("output byte #%d: EXP 0x%02hhx GOT 0x%02hhx\n",
+- j, test_case->output[j], data.out[j]);
++ j, test_case->output[j], data->out[j]);
+ }
+ goto cleanup;
+ }
+
+ cleanup:
++ if (mmap_data) {
++ CHECK_FAIL(munmap(mmap_data, mmap_sz));
++ mmap_data = NULL;
++ }
+ if (!IS_ERR_OR_NULL(link)) {
+ bpf_link__destroy(link);
+ link = NULL;
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/mmap.c
+@@ -0,0 +1,220 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <test_progs.h>
++#include <sys/mman.h>
++
++struct map_data {
++ __u64 val[512 * 4];
++};
++
++struct bss_data {
++ __u64 in_val;
++ __u64 out_val;
++};
++
++static size_t roundup_page(size_t sz)
++{
++ long page_size = sysconf(_SC_PAGE_SIZE);
++ return (sz + page_size - 1) / page_size * page_size;
++}
++
++void test_mmap(void)
++{
++ const char *file = "test_mmap.o";
++ const char *probe_name = "raw_tracepoint/sys_enter";
++ const char *tp_name = "sys_enter";
++ const size_t bss_sz = roundup_page(sizeof(struct bss_data));
++ const size_t map_sz = roundup_page(sizeof(struct map_data));
++ const int zero = 0, one = 1, two = 2, far = 1500;
++ const long page_size = sysconf(_SC_PAGE_SIZE);
++ int err, duration = 0, i, data_map_fd;
++ struct bpf_program *prog;
++ struct bpf_object *obj;
++ struct bpf_link *link = NULL;
++ struct bpf_map *data_map, *bss_map;
++ void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
++ volatile struct bss_data *bss_data;
++ volatile struct map_data *map_data;
++ __u64 val = 0;
++
++ obj = bpf_object__open_file("test_mmap.o", NULL);
++ if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n",
++ file, PTR_ERR(obj)))
++ return;
++ prog = bpf_object__find_program_by_title(obj, probe_name);
++ if (CHECK(!prog, "find_probe", "prog '%s' not found\n", probe_name))
++ goto cleanup;
++ err = bpf_object__load(obj);
++ if (CHECK(err, "obj_load", "failed to load prog '%s': %d\n",
++ probe_name, err))
++ goto cleanup;
++
++ bss_map = bpf_object__find_map_by_name(obj, "test_mma.bss");
++ if (CHECK(!bss_map, "find_bss_map", ".bss map not found\n"))
++ goto cleanup;
++ data_map = bpf_object__find_map_by_name(obj, "data_map");
++ if (CHECK(!data_map, "find_data_map", "data_map map not found\n"))
++ goto cleanup;
++ data_map_fd = bpf_map__fd(data_map);
++
++ bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
++ bpf_map__fd(bss_map), 0);
++ if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap",
++ ".bss mmap failed: %d\n", errno)) {
++ bss_mmaped = NULL;
++ goto cleanup;
++ }
++ /* map as R/W first */
++ map_mmaped = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
++ data_map_fd, 0);
++ if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
++ "data_map mmap failed: %d\n", errno)) {
++ map_mmaped = NULL;
++ goto cleanup;
++ }
++
++ bss_data = bss_mmaped;
++ map_data = map_mmaped;
++
++ CHECK_FAIL(bss_data->in_val);
++ CHECK_FAIL(bss_data->out_val);
++ CHECK_FAIL(map_data->val[0]);
++ CHECK_FAIL(map_data->val[1]);
++ CHECK_FAIL(map_data->val[2]);
++ CHECK_FAIL(map_data->val[far]);
++
++ link = bpf_program__attach_raw_tracepoint(prog, tp_name);
++ if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
++ goto cleanup;
++
++ bss_data->in_val = 123;
++ val = 111;
++ CHECK_FAIL(bpf_map_update_elem(data_map_fd, &zero, &val, 0));
++
++ usleep(1);
++
++ CHECK_FAIL(bss_data->in_val != 123);
++ CHECK_FAIL(bss_data->out_val != 123);
++ CHECK_FAIL(map_data->val[0] != 111);
++ CHECK_FAIL(map_data->val[1] != 222);
++ CHECK_FAIL(map_data->val[2] != 123);
++ CHECK_FAIL(map_data->val[far] != 3 * 123);
++
++ CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &zero, &val));
++ CHECK_FAIL(val != 111);
++ CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &one, &val));
++ CHECK_FAIL(val != 222);
++ CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &two, &val));
++ CHECK_FAIL(val != 123);
++ CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &far, &val));
++ CHECK_FAIL(val != 3 * 123);
++
++ /* data_map freeze should fail due to R/W mmap() */
++ err = bpf_map_freeze(data_map_fd);
++ if (CHECK(!err || errno != EBUSY, "no_freeze",
++ "data_map freeze succeeded: err=%d, errno=%d\n", err, errno))
++ goto cleanup;
++
++ /* unmap R/W mapping */
++ err = munmap(map_mmaped, map_sz);
++ map_mmaped = NULL;
++ if (CHECK(err, "data_map_munmap", "data_map munmap failed: %d\n", errno))
++ goto cleanup;
++
++ /* re-map as R/O now */
++ map_mmaped = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
++ if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
++ "data_map R/O mmap failed: %d\n", errno)) {
++ map_mmaped = NULL;
++ goto cleanup;
++ }
++ map_data = map_mmaped;
++
++ /* map/unmap in a loop to test ref counting */
++ for (i = 0; i < 10; i++) {
++ int flags = i % 2 ? PROT_READ : PROT_WRITE;
++ void *p;
++
++ p = mmap(NULL, map_sz, flags, MAP_SHARED, data_map_fd, 0);
++ if (CHECK_FAIL(p == MAP_FAILED))
++ goto cleanup;
++ err = munmap(p, map_sz);
++ if (CHECK_FAIL(err))
++ goto cleanup;
++ }
++
++ /* data_map freeze should now succeed due to no R/W mapping */
++ err = bpf_map_freeze(data_map_fd);
++ if (CHECK(err, "freeze", "data_map freeze failed: err=%d, errno=%d\n",
++ err, errno))
++ goto cleanup;
++
++ /* mapping as R/W now should fail */
++ tmp1 = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
++ data_map_fd, 0);
++ if (CHECK(tmp1 != MAP_FAILED, "data_mmap", "mmap succeeded\n")) {
++ munmap(tmp1, map_sz);
++ goto cleanup;
++ }
++
++ bss_data->in_val = 321;
++ usleep(1);
++ CHECK_FAIL(bss_data->in_val != 321);
++ CHECK_FAIL(bss_data->out_val != 321);
++ CHECK_FAIL(map_data->val[0] != 111);
++ CHECK_FAIL(map_data->val[1] != 222);
++ CHECK_FAIL(map_data->val[2] != 321);
++ CHECK_FAIL(map_data->val[far] != 3 * 321);
++
++ /* check some more advanced mmap() manipulations */
++
++ /* map all but last page: pages 1-3 mapped */
++ tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED,
++ data_map_fd, 0);
++ if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno))
++ goto cleanup;
++
++ /* unmap second page: pages 1, 3 mapped */
++ err = munmap(tmp1 + page_size, page_size);
++ if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
++ munmap(tmp1, map_sz);
++ goto cleanup;
++ }
++
++ /* map page 2 back */
++ tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ,
++ MAP_SHARED | MAP_FIXED, data_map_fd, 0);
++ if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
++ munmap(tmp1, page_size);
++ munmap(tmp1 + 2*page_size, page_size);
++ goto cleanup;
++ }
++ CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
++ "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
++
++ /* re-map all 4 pages */
++ tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
++ data_map_fd, 0);
++ if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
++ munmap(tmp1, 3 * page_size); /* unmap page 1 */
++ goto cleanup;
++ }
++ CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
++
++ map_data = tmp2;
++ CHECK_FAIL(bss_data->in_val != 321);
++ CHECK_FAIL(bss_data->out_val != 321);
++ CHECK_FAIL(map_data->val[0] != 111);
++ CHECK_FAIL(map_data->val[1] != 222);
++ CHECK_FAIL(map_data->val[2] != 321);
++ CHECK_FAIL(map_data->val[far] != 3 * 321);
++
++ munmap(tmp2, 4 * page_size);
++cleanup:
++ if (bss_mmaped)
++ CHECK_FAIL(munmap(bss_mmaped, bss_sz));
++ if (map_mmaped)
++ CHECK_FAIL(munmap(map_mmaped, map_sz));
++ if (!IS_ERR_OR_NULL(link))
++ bpf_link__destroy(link);
++ bpf_object__close(obj);
++}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/test_mmap.c
+@@ -0,0 +1,45 @@
++// SPDX-License-Identifier: GPL-2.0
++// Copyright (c) 2019 Facebook
++
++#include <linux/bpf.h>
++#include <stdint.h>
++#include "bpf_helpers.h"
++
++char _license[] SEC("license") = "GPL";
++
++struct {
++ __uint(type, BPF_MAP_TYPE_ARRAY);
++ __uint(max_entries, 512 * 4); /* at least 4 pages of data */
++ __uint(map_flags, BPF_F_MMAPABLE);
++ __type(key, __u32);
++ __type(value, __u64);
++} data_map SEC(".maps");
++
++static volatile __u64 in_val;
++static volatile __u64 out_val;
++
++SEC("raw_tracepoint/sys_enter")
++int test_mmap(void *ctx)
++{
++ int zero = 0, one = 1, two = 2, far = 1500;
++ __u64 val, *p;
++
++ out_val = in_val;
++
++ /* data_map[2] = in_val; */
++ bpf_map_update_elem(&data_map, &two, (const void *)&in_val, 0);
++
++ /* data_map[1] = data_map[0] * 2; */
++ p = bpf_map_lookup_elem(&data_map, &zero);
++ if (p) {
++ val = (*p) * 2;
++ bpf_map_update_elem(&data_map, &one, &val, 0);
++ }
++
++ /* data_map[far] = in_val * 3; */
++ val = in_val * 3;
++ bpf_map_update_elem(&data_map, &far, &val, 0);
++
++ return 0;
++}
++
diff --git a/patches.suse/selftests-bpf-Add-a-test-for-attaching-BPF-prog-to-a.patch b/patches.suse/selftests-bpf-Add-a-test-for-attaching-BPF-prog-to-a.patch
new file mode 100644
index 0000000000..69209ee04e
--- /dev/null
+++ b/patches.suse/selftests-bpf-Add-a-test-for-attaching-BPF-prog-to-a.patch
@@ -0,0 +1,198 @@
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Thu, 14 Nov 2019 10:57:20 -0800
+Subject: selftests/bpf: Add a test for attaching BPF prog to another BPF prog
+ and subprog
+Patch-mainline: v5.5-rc1
+Git-commit: d6f39601ec5e708fb666a2ad437c7bef4cfab39b
+References: bsc#1155518
+
+Add a test that attaches one FEXIT program to main sched_cls networking program
+and two other FEXIT programs to subprograms. All three tracing programs
+access return values and skb->len of networking program and subprograms.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Link: https://lore.kernel.org/bpf/20191114185720.1641606-21-ast@kernel.org
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c | 76 ++++++++++++++
+ tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c | 91 +++++++++++++++++
+ 2 files changed, 167 insertions(+)
+ create mode 100644 tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
+ create mode 100644 tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
+
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
+@@ -0,0 +1,76 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2019 Facebook */
++#include <test_progs.h>
++
++#define PROG_CNT 3
++
++void test_fexit_bpf2bpf(void)
++{
++ const char *prog_name[PROG_CNT] = {
++ "fexit/test_pkt_access",
++ "fexit/test_pkt_access_subprog1",
++ "fexit/test_pkt_access_subprog2",
++ };
++ struct bpf_object *obj = NULL, *pkt_obj;
++ int err, pkt_fd, i;
++ struct bpf_link *link[PROG_CNT] = {};
++ struct bpf_program *prog[PROG_CNT];
++ __u32 duration, retval;
++ struct bpf_map *data_map;
++ const int zero = 0;
++ u64 result[PROG_CNT];
++
++ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_UNSPEC,
++ &pkt_obj, &pkt_fd);
++ if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
++ return;
++ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
++ .attach_prog_fd = pkt_fd,
++ );
++
++ obj = bpf_object__open_file("./fexit_bpf2bpf.o", &opts);
++ if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
++ "failed to open fexit_bpf2bpf: %ld\n",
++ PTR_ERR(obj)))
++ goto close_prog;
++
++ err = bpf_object__load(obj);
++ if (CHECK(err, "obj_load", "err %d\n", err))
++ goto close_prog;
++
++ for (i = 0; i < PROG_CNT; i++) {
++ prog[i] = bpf_object__find_program_by_title(obj, prog_name[i]);
++ if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name[i]))
++ goto close_prog;
++ link[i] = bpf_program__attach_trace(prog[i]);
++ if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
++ goto close_prog;
++ }
++ data_map = bpf_object__find_map_by_name(obj, "fexit_bp.bss");
++ if (CHECK(!data_map, "find_data_map", "data map not found\n"))
++ goto close_prog;
++
++ err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
++ NULL, NULL, &retval, &duration);
++ CHECK(err || retval, "ipv6",
++ "err %d errno %d retval %d duration %d\n",
++ err, errno, retval, duration);
++
++ err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result);
++ if (CHECK(err, "get_result",
++ "failed to get output data: %d\n", err))
++ goto close_prog;
++
++ for (i = 0; i < PROG_CNT; i++)
++ if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n",
++ result[i]))
++ goto close_prog;
++
++close_prog:
++ for (i = 0; i < PROG_CNT; i++)
++ if (!IS_ERR_OR_NULL(link[i]))
++ bpf_link__destroy(link[i]);
++ if (!IS_ERR_OR_NULL(obj))
++ bpf_object__close(obj);
++ bpf_object__close(pkt_obj);
++}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
+@@ -0,0 +1,91 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2019 Facebook */
++#include <linux/bpf.h>
++#include "bpf_helpers.h"
++
++struct sk_buff {
++ unsigned int len;
++};
++
++struct args {
++ struct sk_buff *skb;
++ ks32 ret;
++};
++static volatile __u64 test_result;
++SEC("fexit/test_pkt_access")
++int test_main(struct args *ctx)
++{
++ struct sk_buff *skb = ctx->skb;
++ int len;
++
++ __builtin_preserve_access_index(({
++ len = skb->len;
++ }));
++ if (len != 74 || ctx->ret != 0)
++ return 0;
++ test_result = 1;
++ return 0;
++}
++
++struct args_subprog1 {
++ struct sk_buff *skb;
++ ks32 ret;
++};
++static volatile __u64 test_result_subprog1;
++SEC("fexit/test_pkt_access_subprog1")
++int test_subprog1(struct args_subprog1 *ctx)
++{
++ struct sk_buff *skb = ctx->skb;
++ int len;
++
++ __builtin_preserve_access_index(({
++ len = skb->len;
++ }));
++ if (len != 74 || ctx->ret != 148)
++ return 0;
++ test_result_subprog1 = 1;
++ return 0;
++}
++
++/* Though test_pkt_access_subprog2() is defined in C as:
++ * static __attribute__ ((noinline))
++ * int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb)
++ * {
++ * return skb->len * val;
++ * }
++ * llvm optimizations remove 'int val' argument and generate BPF assembly:
++ * r0 = *(u32 *)(r1 + 0)
++ * w0 <<= 1
++ * exit
++ * In such case the verifier falls back to conservative and
++ * tracing program can access arguments and return value as u64
++ * instead of accurate types.
++ */
++struct args_subprog2 {
++ ku64 args[5];
++ ku64 ret;
++};
++static volatile __u64 test_result_subprog2;
++SEC("fexit/test_pkt_access_subprog2")
++int test_subprog2(struct args_subprog2 *ctx)
++{
++ struct sk_buff *skb = (void *)ctx->args[0];
++ __u64 ret;
++ int len;
++
++ bpf_probe_read_kernel(&len, sizeof(len),
++ __builtin_preserve_access_index(&skb->len));
++
++ ret = ctx->ret;
++ /* bpf_prog_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32
++ * which randomizes upper 32 bits after BPF_ALU32 insns.
++ * Hence after 'w0 <<= 1' upper bits of $rax are random.
++ * That is expected and correct. Trim them.
++ */
++ ret = (__u32) ret;
++ if (len != 74 || ret != 148)
++ return 0;
++ test_result_subprog2 = 1;
++ return 0;
++}
++char _license[] SEC("license") = "GPL";
diff --git a/patches.suse/selftests-bpf-Add-combined-fentry-fexit-test.patch b/patches.suse/selftests-bpf-Add-combined-fentry-fexit-test.patch
new file mode 100644
index 0000000000..64313325c1
--- /dev/null
+++ b/patches.suse/selftests-bpf-Add-combined-fentry-fexit-test.patch
@@ -0,0 +1,112 @@
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Thu, 14 Nov 2019 10:57:11 -0800
+Subject: selftests/bpf: Add combined fentry/fexit test
+Patch-mainline: v5.5-rc1
+Git-commit: 510312882c4b583fcd4fdf972d00e9ce631ed188
+References: bsc#1155518
+
+Add a combined fentry/fexit test.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20191114185720.1641606-12-ast@kernel.org
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/fentry_fexit.c | 90 ++++++++++++++++++
+ 1 file changed, 90 insertions(+)
+ create mode 100644 tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
+
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
+@@ -0,0 +1,90 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2019 Facebook */
++#include <test_progs.h>
++
++void test_fentry_fexit(void)
++{
++ struct bpf_prog_load_attr attr_fentry = {
++ .file = "./fentry_test.o",
++ };
++ struct bpf_prog_load_attr attr_fexit = {
++ .file = "./fexit_test.o",
++ };
++
++ struct bpf_object *obj_fentry = NULL, *obj_fexit = NULL, *pkt_obj;
++ struct bpf_map *data_map_fentry, *data_map_fexit;
++ char fentry_name[] = "fentry/bpf_fentry_testX";
++ char fexit_name[] = "fexit/bpf_fentry_testX";
++ int err, pkt_fd, kfree_skb_fd, i;
++ struct bpf_link *link[12] = {};
++ struct bpf_program *prog[12];
++ __u32 duration, retval;
++ const int zero = 0;
++ u64 result[12];
++
++ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
++ &pkt_obj, &pkt_fd);
++ if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
++ return;
++ err = bpf_prog_load_xattr(&attr_fentry, &obj_fentry, &kfree_skb_fd);
++ if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
++ goto close_prog;
++ err = bpf_prog_load_xattr(&attr_fexit, &obj_fexit, &kfree_skb_fd);
++ if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
++ goto close_prog;
++
++ for (i = 0; i < 6; i++) {
++ fentry_name[sizeof(fentry_name) - 2] = '1' + i;
++ prog[i] = bpf_object__find_program_by_title(obj_fentry, fentry_name);
++ if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fentry_name))
++ goto close_prog;
++ link[i] = bpf_program__attach_trace(prog[i]);
++ if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
++ goto close_prog;
++ }
++ data_map_fentry = bpf_object__find_map_by_name(obj_fentry, "fentry_t.bss");
++ if (CHECK(!data_map_fentry, "find_data_map", "data map not found\n"))
++ goto close_prog;
++
++ for (i = 6; i < 12; i++) {
++ fexit_name[sizeof(fexit_name) - 2] = '1' + i - 6;
++ prog[i] = bpf_object__find_program_by_title(obj_fexit, fexit_name);
++ if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fexit_name))
++ goto close_prog;
++ link[i] = bpf_program__attach_trace(prog[i]);
++ if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
++ goto close_prog;
++ }
++ data_map_fexit = bpf_object__find_map_by_name(obj_fexit, "fexit_te.bss");
++ if (CHECK(!data_map_fexit, "find_data_map", "data map not found\n"))
++ goto close_prog;
++
++ err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
++ NULL, NULL, &retval, &duration);
++ CHECK(err || retval, "ipv6",
++ "err %d errno %d retval %d duration %d\n",
++ err, errno, retval, duration);
++
++ err = bpf_map_lookup_elem(bpf_map__fd(data_map_fentry), &zero, &result);
++ if (CHECK(err, "get_result",
++ "failed to get output data: %d\n", err))
++ goto close_prog;
++
++ err = bpf_map_lookup_elem(bpf_map__fd(data_map_fexit), &zero, result + 6);
++ if (CHECK(err, "get_result",
++ "failed to get output data: %d\n", err))
++ goto close_prog;
++
++ for (i = 0; i < 12; i++)
++ if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n",
++ i % 6 + 1, result[i]))
++ goto close_prog;
++
++close_prog:
++ for (i = 0; i < 12; i++)
++ if (!IS_ERR_OR_NULL(link[i]))
++ bpf_link__destroy(link[i]);
++ bpf_object__close(obj_fentry);
++ bpf_object__close(obj_fexit);
++ bpf_object__close(pkt_obj);
++}
diff --git a/patches.suse/selftests-bpf-Add-fexit-tests-for-BPF-trampoline.patch b/patches.suse/selftests-bpf-Add-fexit-tests-for-BPF-trampoline.patch
new file mode 100644
index 0000000000..f0f8256ce0
--- /dev/null
+++ b/patches.suse/selftests-bpf-Add-fexit-tests-for-BPF-trampoline.patch
@@ -0,0 +1,190 @@
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Thu, 14 Nov 2019 10:57:10 -0800
+Subject: selftests/bpf: Add fexit tests for BPF trampoline
+Patch-mainline: v5.5-rc1
+Git-commit: d3b0856e5959fbb50a2f2f15a5614e20e51cb522
+References: bsc#1155518
+
+Add fexit tests for BPF trampoline that checks kernel functions
+with up to 6 arguments of different sizes and their return values.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20191114185720.1641606-11-ast@kernel.org
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/fexit_test.c | 64 +++++++++++++
+ tools/testing/selftests/bpf/progs/fexit_test.c | 98 ++++++++++++++++++++
+ 2 files changed, 162 insertions(+)
+ create mode 100644 tools/testing/selftests/bpf/prog_tests/fexit_test.c
+ create mode 100644 tools/testing/selftests/bpf/progs/fexit_test.c
+
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
+@@ -0,0 +1,64 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2019 Facebook */
++#include <test_progs.h>
++
++void test_fexit_test(void)
++{
++ struct bpf_prog_load_attr attr = {
++ .file = "./fexit_test.o",
++ };
++
++ char prog_name[] = "fexit/bpf_fentry_testX";
++ struct bpf_object *obj = NULL, *pkt_obj;
++ int err, pkt_fd, kfree_skb_fd, i;
++ struct bpf_link *link[6] = {};
++ struct bpf_program *prog[6];
++ __u32 duration, retval;
++ struct bpf_map *data_map;
++ const int zero = 0;
++ u64 result[6];
++
++ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
++ &pkt_obj, &pkt_fd);
++ if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
++ return;
++ err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd);
++ if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
++ goto close_prog;
++
++ for (i = 0; i < 6; i++) {
++ prog_name[sizeof(prog_name) - 2] = '1' + i;
++ prog[i] = bpf_object__find_program_by_title(obj, prog_name);
++ if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name))
++ goto close_prog;
++ link[i] = bpf_program__attach_trace(prog[i]);
++ if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
++ goto close_prog;
++ }
++ data_map = bpf_object__find_map_by_name(obj, "fexit_te.bss");
++ if (CHECK(!data_map, "find_data_map", "data map not found\n"))
++ goto close_prog;
++
++ err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
++ NULL, NULL, &retval, &duration);
++ CHECK(err || retval, "ipv6",
++ "err %d errno %d retval %d duration %d\n",
++ err, errno, retval, duration);
++
++ err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result);
++ if (CHECK(err, "get_result",
++ "failed to get output data: %d\n", err))
++ goto close_prog;
++
++ for (i = 0; i < 6; i++)
++ if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n",
++ i + 1, result[i]))
++ goto close_prog;
++
++close_prog:
++ for (i = 0; i < 6; i++)
++ if (!IS_ERR_OR_NULL(link[i]))
++ bpf_link__destroy(link[i]);
++ bpf_object__close(obj);
++ bpf_object__close(pkt_obj);
++}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/fexit_test.c
+@@ -0,0 +1,98 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2019 Facebook */
++#include <linux/bpf.h>
++#include "bpf_helpers.h"
++
++char _license[] SEC("license") = "GPL";
++
++struct test1 {
++ ks32 a;
++ ks32 ret;
++};
++static volatile __u64 test1_result;
++SEC("fexit/bpf_fentry_test1")
++int test1(struct test1 *ctx)
++{
++ test1_result = ctx->a == 1 && ctx->ret == 2;
++ return 0;
++}
++
++struct test2 {
++ ks32 a;
++ ku64 b;
++ ks32 ret;
++};
++static volatile __u64 test2_result;
++SEC("fexit/bpf_fentry_test2")
++int test2(struct test2 *ctx)
++{
++ test2_result = ctx->a == 2 && ctx->b == 3 && ctx->ret == 5;
++ return 0;
++}
++
++struct test3 {
++ ks8 a;
++ ks32 b;
++ ku64 c;
++ ks32 ret;
++};
++static volatile __u64 test3_result;
++SEC("fexit/bpf_fentry_test3")
++int test3(struct test3 *ctx)
++{
++ test3_result = ctx->a == 4 && ctx->b == 5 && ctx->c == 6 &&
++ ctx->ret == 15;
++ return 0;
++}
++
++struct test4 {
++ void *a;
++ ks8 b;
++ ks32 c;
++ ku64 d;
++ ks32 ret;
++};
++static volatile __u64 test4_result;
++SEC("fexit/bpf_fentry_test4")
++int test4(struct test4 *ctx)
++{
++ test4_result = ctx->a == (void *)7 && ctx->b == 8 && ctx->c == 9 &&
++ ctx->d == 10 && ctx->ret == 34;
++ return 0;
++}
++
++struct test5 {
++ ku64 a;
++ void *b;
++ ks16 c;
++ ks32 d;
++ ku64 e;
++ ks32 ret;
++};
++static volatile __u64 test5_result;
++SEC("fexit/bpf_fentry_test5")
++int test5(struct test5 *ctx)
++{
++ test5_result = ctx->a == 11 && ctx->b == (void *)12 && ctx->c == 13 &&
++ ctx->d == 14 && ctx->e == 15 && ctx->ret == 65;
++ return 0;
++}
++
++struct test6 {
++ ku64 a;
++ void *b;
++ ks16 c;
++ ks32 d;
++ void *e;
++ ks64 f;
++ ks32 ret;
++};
++static volatile __u64 test6_result;
++SEC("fexit/bpf_fentry_test6")
++int test6(struct test6 *ctx)
++{
++ test6_result = ctx->a == 16 && ctx->b == (void *)17 && ctx->c == 18 &&
++ ctx->d == 19 && ctx->e == (void *)20 && ctx->f == 21 &&
++ ctx->ret == 111;
++ return 0;
++}
diff --git a/patches.suse/selftests-bpf-Add-field-existence-CO-RE-relocs-tests.patch b/patches.suse/selftests-bpf-Add-field-existence-CO-RE-relocs-tests.patch
new file mode 100644
index 0000000000..6600afc09b
--- /dev/null
+++ b/patches.suse/selftests-bpf-Add-field-existence-CO-RE-relocs-tests.patch
@@ -0,0 +1,341 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 15 Oct 2019 11:28:49 -0700
+Subject: selftests/bpf: Add field existence CO-RE relocs tests
+Patch-mainline: v5.5-rc1
+Git-commit: c7566a69695cd3d8fe876c0da38a03a7472d3f56
+References: bsc#1155518
+
+Add a bunch of tests validating CO-RE is handling field existence
+relocation. Relaxed CO-RE relocation mode is activated for these new
+tests to prevent libbpf from rejecting BPF object for no-match
+relocation, even though test BPF program is not going to use that
+relocation, if field is missing.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191015182849.3922287-6-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/core_reloc.c | 76 +++++++++
+ tools/testing/selftests/bpf/progs/btf__core_reloc_existence.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_existence___minimal.c | 3
+ tools/testing/selftests/bpf/progs/core_reloc_types.h | 56 +++++++
+ tools/testing/selftests/bpf/progs/test_core_reloc_existence.c | 79 ++++++++++
+ 11 files changed, 233 insertions(+), 2 deletions(-)
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_existence.c
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_existence___minimal.c
+ create mode 100644 tools/testing/selftests/bpf/progs/test_core_reloc_existence.c
+
+--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
++++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+@@ -174,6 +174,21 @@
+ .fails = true, \
+ }
+
++#define EXISTENCE_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \
++ .a = 42, \
++}
++
++#define EXISTENCE_CASE_COMMON(name) \
++ .case_name = #name, \
++ .bpf_obj_file = "test_core_reloc_existence.o", \
++ .btf_src_file = "btf__core_reloc_" #name ".o", \
++ .relaxed_core_relocs = true \
++
++#define EXISTENCE_ERR_CASE(name) { \
++ EXISTENCE_CASE_COMMON(name), \
++ .fails = true, \
++}
++
+ struct core_reloc_test_case {
+ const char *case_name;
+ const char *bpf_obj_file;
+@@ -183,6 +198,7 @@ struct core_reloc_test_case {
+ const char *output;
+ int output_len;
+ bool fails;
++ bool relaxed_core_relocs;
+ };
+
+ static struct core_reloc_test_case test_cases[] = {
+@@ -283,6 +299,59 @@ static struct core_reloc_test_case test_
+ },
+ .output_len = sizeof(struct core_reloc_misc_output),
+ },
++
++ /* validate field existence checks */
++ {
++ EXISTENCE_CASE_COMMON(existence),
++ .input = STRUCT_TO_CHAR_PTR(core_reloc_existence) {
++ .a = 1,
++ .b = 2,
++ .c = 3,
++ .arr = { 4 },
++ .s = { .x = 5 },
++ },
++ .input_len = sizeof(struct core_reloc_existence),
++ .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
++ .a_exists = 1,
++ .b_exists = 1,
++ .c_exists = 1,
++ .arr_exists = 1,
++ .s_exists = 1,
++ .a_value = 1,
++ .b_value = 2,
++ .c_value = 3,
++ .arr_value = 4,
++ .s_value = 5,
++ },
++ .output_len = sizeof(struct core_reloc_existence_output),
++ },
++ {
++ EXISTENCE_CASE_COMMON(existence___minimal),
++ .input = STRUCT_TO_CHAR_PTR(core_reloc_existence___minimal) {
++ .a = 42,
++ },
++ .input_len = sizeof(struct core_reloc_existence),
++ .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
++ .a_exists = 1,
++ .b_exists = 0,
++ .c_exists = 0,
++ .arr_exists = 0,
++ .s_exists = 0,
++ .a_value = 42,
++ .b_value = 0xff000002u,
++ .c_value = 0xff000003u,
++ .arr_value = 0xff000004u,
++ .s_value = 0xff000005u,
++ },
++ .output_len = sizeof(struct core_reloc_existence_output),
++ },
++
++ EXISTENCE_ERR_CASE(existence__err_int_sz),
++ EXISTENCE_ERR_CASE(existence__err_int_type),
++ EXISTENCE_ERR_CASE(existence__err_int_kind),
++ EXISTENCE_ERR_CASE(existence__err_arr_kind),
++ EXISTENCE_ERR_CASE(existence__err_arr_value_type),
++ EXISTENCE_ERR_CASE(existence__err_struct_type),
+ };
+
+ struct data {
+@@ -305,11 +374,14 @@ void test_core_reloc(void)
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ test_case = &test_cases[i];
+-
+ if (!test__start_subtest(test_case->case_name))
+ continue;
+
+- obj = bpf_object__open(test_case->bpf_obj_file);
++ LIBBPF_OPTS(bpf_object_open_opts, opts,
++ .relaxed_core_relocs = test_case->relaxed_core_relocs,
++ );
++
++ obj = bpf_object__open_file(test_case->bpf_obj_file, &opts);
+ if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
+ "failed to open '%s': %ld\n",
+ test_case->bpf_obj_file, PTR_ERR(obj)))
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_existence x) {}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_existence___err_wrong_arr_kind x) {}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_existence___err_wrong_arr_value_type x) {}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_existence___err_wrong_int_kind x) {}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_existence___err_wrong_int_sz x) {}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_existence___err_wrong_int_type x) {}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_existence___err_wrong_struct_type x) {}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___minimal.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_existence___minimal x) {}
+--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
++++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
+@@ -674,3 +674,59 @@ struct core_reloc_misc_extensible {
+ int c;
+ int d;
+ };
++
++/*
++ * EXISTENCE
++ */
++struct core_reloc_existence_output {
++ int a_exists;
++ int a_value;
++ int b_exists;
++ int b_value;
++ int c_exists;
++ int c_value;
++ int arr_exists;
++ int arr_value;
++ int s_exists;
++ int s_value;
++};
++
++struct core_reloc_existence {
++ int a;
++ struct {
++ int b;
++ };
++ int c;
++ int arr[1];
++ struct {
++ int x;
++ } s;
++};
++
++struct core_reloc_existence___minimal {
++ int a;
++};
++
++struct core_reloc_existence___err_wrong_int_sz {
++ short a;
++};
++
++struct core_reloc_existence___err_wrong_int_type {
++ int b[1];
++};
++
++struct core_reloc_existence___err_wrong_int_kind {
++ struct{ int x; } c;
++};
++
++struct core_reloc_existence___err_wrong_arr_kind {
++ int arr;
++};
++
++struct core_reloc_existence___err_wrong_arr_value_type {
++ short arr[1];
++};
++
++struct core_reloc_existence___err_wrong_struct_type {
++ int s;
++};
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c
+@@ -0,0 +1,79 @@
++// SPDX-License-Identifier: GPL-2.0
++// Copyright (c) 2019 Facebook
++
++#include <linux/bpf.h>
++#include <stdint.h>
++#include "bpf_helpers.h"
++#include "bpf_core_read.h"
++
++char _license[] SEC("license") = "GPL";
++
++static volatile struct data {
++ char in[256];
++ char out[256];
++} data;
++
++struct core_reloc_existence_output {
++ int a_exists;
++ int a_value;
++ int b_exists;
++ int b_value;
++ int c_exists;
++ int c_value;
++ int arr_exists;
++ int arr_value;
++ int s_exists;
++ int s_value;
++};
++
++struct core_reloc_existence {
++ struct {
++ int x;
++ } s;
++ int arr[1];
++ int a;
++ struct {
++ int b;
++ };
++ int c;
++};
++
++SEC("raw_tracepoint/sys_enter")
++int test_core_existence(void *ctx)
++{
++ struct core_reloc_existence *in = (void *)&data.in;
++ struct core_reloc_existence_output *out = (void *)&data.out;
++
++ out->a_exists = bpf_core_field_exists(in->a);
++ if (bpf_core_field_exists(in->a))
++ out->a_value = BPF_CORE_READ(in, a);
++ else
++ out->a_value = 0xff000001u;
++
++ out->b_exists = bpf_core_field_exists(in->b);
++ if (bpf_core_field_exists(in->b))
++ out->b_value = BPF_CORE_READ(in, b);
++ else
++ out->b_value = 0xff000002u;
++
++ out->c_exists = bpf_core_field_exists(in->c);
++ if (bpf_core_field_exists(in->c))
++ out->c_value = BPF_CORE_READ(in, c);
++ else
++ out->c_value = 0xff000003u;
++
++ out->arr_exists = bpf_core_field_exists(in->arr);
++ if (bpf_core_field_exists(in->arr))
++ out->arr_value = BPF_CORE_READ(in, arr[0]);
++ else
++ out->arr_value = 0xff000004u;
++
++ out->s_exists = bpf_core_field_exists(in->s);
++ if (bpf_core_field_exists(in->s))
++ out->s_value = BPF_CORE_READ(in, s.x);
++ else
++ out->s_value = 0xff000005u;
++
++ return 0;
++}
++
diff --git a/patches.suse/selftests-bpf-Add-field-size-relocation-tests.patch b/patches.suse/selftests-bpf-Add-field-size-relocation-tests.patch
new file mode 100644
index 0000000000..9b978dbe64
--- /dev/null
+++ b/patches.suse/selftests-bpf-Add-field-size-relocation-tests.patch
@@ -0,0 +1,194 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Fri, 1 Nov 2019 15:28:10 -0700
+Subject: selftests/bpf: Add field size relocation tests
+Patch-mainline: v5.5-rc1
+Git-commit: 0b163565b918fd5ad1cf8ab7a92cffa06c13b204
+References: bsc#1155518
+
+Add test verifying correctness and logic of field size relocation support in
+libbpf.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20191101222810.1246166-6-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/core_reloc.c | 39 ++++++-
+ tools/testing/selftests/bpf/progs/btf__core_reloc_size.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c | 3
+ tools/testing/selftests/bpf/progs/core_reloc_types.h | 31 ++++++
+ tools/testing/selftests/bpf/progs/test_core_reloc_size.c | 51 ++++++++++
+ 5 files changed, 122 insertions(+), 5 deletions(-)
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_size.c
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c
+ create mode 100644 tools/testing/selftests/bpf/progs/test_core_reloc_size.c
+
+--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
++++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+@@ -174,15 +174,11 @@
+ .fails = true, \
+ }
+
+-#define EXISTENCE_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \
+- .a = 42, \
+-}
+-
+ #define EXISTENCE_CASE_COMMON(name) \
+ .case_name = #name, \
+ .bpf_obj_file = "test_core_reloc_existence.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".o", \
+- .relaxed_core_relocs = true \
++ .relaxed_core_relocs = true
+
+ #define EXISTENCE_ERR_CASE(name) { \
+ EXISTENCE_CASE_COMMON(name), \
+@@ -225,6 +221,35 @@
+ .fails = true, \
+ }
+
++#define SIZE_CASE_COMMON(name) \
++ .case_name = #name, \
++ .bpf_obj_file = "test_core_reloc_size.o", \
++ .btf_src_file = "btf__core_reloc_" #name ".o", \
++ .relaxed_core_relocs = true
++
++#define SIZE_OUTPUT_DATA(type) \
++ STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \
++ .int_sz = sizeof(((type *)0)->int_field), \
++ .struct_sz = sizeof(((type *)0)->struct_field), \
++ .union_sz = sizeof(((type *)0)->union_field), \
++ .arr_sz = sizeof(((type *)0)->arr_field), \
++ .arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \
++ .ptr_sz = sizeof(((type *)0)->ptr_field), \
++ .enum_sz = sizeof(((type *)0)->enum_field), \
++ }
++
++#define SIZE_CASE(name) { \
++ SIZE_CASE_COMMON(name), \
++ .input_len = 0, \
++ .output = SIZE_OUTPUT_DATA(struct core_reloc_##name), \
++ .output_len = sizeof(struct core_reloc_size_output), \
++}
++
++#define SIZE_ERR_CASE(name) { \
++ SIZE_CASE_COMMON(name), \
++ .fails = true, \
++}
++
+ struct core_reloc_test_case {
+ const char *case_name;
+ const char *bpf_obj_file;
+@@ -423,6 +448,10 @@ static struct core_reloc_test_case test_
+ .ub2 = 0x0812345678FEDCBA,
+ }),
+ BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield),
++
++ /* size relocation checks */
++ SIZE_CASE(size),
++ SIZE_CASE(size___diff_sz),
+ };
+
+ struct data {
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_size.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_size x) {}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_size___diff_sz x) {}
+--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
++++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
+@@ -802,3 +802,34 @@ struct core_reloc_bitfields___err_too_bi
+ uint32_t u32;
+ uint32_t s32;
+ } __attribute__((packed)) ;
++
++/*
++ * SIZE
++ */
++struct core_reloc_size_output {
++ int int_sz;
++ int struct_sz;
++ int union_sz;
++ int arr_sz;
++ int arr_elem_sz;
++ int ptr_sz;
++ int enum_sz;
++};
++
++struct core_reloc_size {
++ int int_field;
++ struct { int x; } struct_field;
++ union { int x; } union_field;
++ int arr_field[4];
++ void *ptr_field;
++ enum { VALUE = 123 } enum_field;
++};
++
++struct core_reloc_size___diff_sz {
++ uint64_t int_field;
++ struct { int x; int y; int z; } struct_field;
++ union { int x; char bla[123]; } union_field;
++ char arr_field[10];
++ void *ptr_field;
++ enum { OTHER_VALUE = 0xFFFFFFFFFFFFFFFF } enum_field;
++};
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
+@@ -0,0 +1,51 @@
++// SPDX-License-Identifier: GPL-2.0
++// Copyright (c) 2019 Facebook
++
++#include <linux/bpf.h>
++#include <stdint.h>
++#include "bpf_helpers.h"
++#include "bpf_core_read.h"
++
++char _license[] SEC("license") = "GPL";
++
++static volatile struct data {
++ char in[256];
++ char out[256];
++} data;
++
++struct core_reloc_size_output {
++ int int_sz;
++ int struct_sz;
++ int union_sz;
++ int arr_sz;
++ int arr_elem_sz;
++ int ptr_sz;
++ int enum_sz;
++};
++
++struct core_reloc_size {
++ int int_field;
++ struct { int x; } struct_field;
++ union { int x; } union_field;
++ int arr_field[4];
++ void *ptr_field;
++ enum { VALUE = 123 } enum_field;
++};
++
++SEC("raw_tracepoint/sys_enter")
++int test_core_size(void *ctx)
++{
++ struct core_reloc_size *in = (void *)&data.in;
++ struct core_reloc_size_output *out = (void *)&data.out;
++
++ out->int_sz = bpf_core_field_size(in->int_field);
++ out->struct_sz = bpf_core_field_size(in->struct_field);
++ out->union_sz = bpf_core_field_size(in->union_field);
++ out->arr_sz = bpf_core_field_size(in->arr_field);
++ out->arr_elem_sz = bpf_core_field_size(in->arr_field[0]);
++ out->ptr_sz = bpf_core_field_size(in->ptr_field);
++ out->enum_sz = bpf_core_field_size(in->enum_field);
++
++ return 0;
++}
++
diff --git a/patches.suse/selftests-bpf-Add-read-only-map-values-propagation-t.patch b/patches.suse/selftests-bpf-Add-read-only-map-values-propagation-t.patch
new file mode 100644
index 0000000000..0110fce9e6
--- /dev/null
+++ b/patches.suse/selftests-bpf-Add-read-only-map-values-propagation-t.patch
@@ -0,0 +1,212 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Wed, 9 Oct 2019 13:14:58 -0700
+Subject: selftests/bpf: Add read-only map values propagation tests
+Patch-mainline: v5.5-rc1
+Git-commit: 666b2c10ee9d51f14d04c416a14b1cb6fd0846e4
+References: bsc#1155518
+
+Add tests checking that verifier does proper constant propagation for
+read-only maps. If constant propagation didn't work, skipp_loop and
+part_loop BPF programs would be rejected due to BPF verifier otherwise
+not being able to prove they ever complete. With constant propagation,
+though, they are succesfully validated as properly terminating loops.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20191009201458.2679171-3-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/rdonly_maps.c | 99 +++++++++++++++++++
+ tools/testing/selftests/bpf/progs/test_rdonly_maps.c | 83 +++++++++++++++
+ 2 files changed, 182 insertions(+)
+ create mode 100644 tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
+ create mode 100644 tools/testing/selftests/bpf/progs/test_rdonly_maps.c
+
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
+@@ -0,0 +1,99 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <test_progs.h>
++
++struct bss {
++ unsigned did_run;
++ unsigned iters;
++ unsigned sum;
++};
++
++struct rdonly_map_subtest {
++ const char *subtest_name;
++ const char *prog_name;
++ unsigned exp_iters;
++ unsigned exp_sum;
++};
++
++void test_rdonly_maps(void)
++{
++ const char *prog_name_skip_loop = "raw_tracepoint/sys_enter:skip_loop";
++ const char *prog_name_part_loop = "raw_tracepoint/sys_enter:part_loop";
++ const char *prog_name_full_loop = "raw_tracepoint/sys_enter:full_loop";
++ const char *file = "test_rdonly_maps.o";
++ struct rdonly_map_subtest subtests[] = {
++ { "skip loop", prog_name_skip_loop, 0, 0 },
++ { "part loop", prog_name_part_loop, 3, 2 + 3 + 4 },
++ { "full loop", prog_name_full_loop, 4, 2 + 3 + 4 + 5 },
++ };
++ int i, err, zero = 0, duration = 0;
++ struct bpf_link *link = NULL;
++ struct bpf_program *prog;
++ struct bpf_map *bss_map;
++ struct bpf_object *obj;
++ struct bss bss;
++
++ obj = bpf_object__open_file(file, NULL);
++ if (CHECK(IS_ERR(obj), "obj_open", "err %ld\n", PTR_ERR(obj)))
++ return;
++
++ bpf_object__for_each_program(prog, obj) {
++ bpf_program__set_raw_tracepoint(prog);
++ }
++
++ err = bpf_object__load(obj);
++ if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
++ goto cleanup;
++
++ bss_map = bpf_object__find_map_by_name(obj, "test_rdo.bss");
++ if (CHECK(!bss_map, "find_bss_map", "failed\n"))
++ goto cleanup;
++
++ for (i = 0; i < ARRAY_SIZE(subtests); i++) {
++ const struct rdonly_map_subtest *t = &subtests[i];
++
++ if (!test__start_subtest(t->subtest_name))
++ continue;
++
++ prog = bpf_object__find_program_by_title(obj, t->prog_name);
++ if (CHECK(!prog, "find_prog", "prog '%s' not found\n",
++ t->prog_name))
++ goto cleanup;
++
++ memset(&bss, 0, sizeof(bss));
++ err = bpf_map_update_elem(bpf_map__fd(bss_map), &zero, &bss, 0);
++ if (CHECK(err, "set_bss", "failed to set bss data: %d\n", err))
++ goto cleanup;
++
++ link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
++ if (CHECK(IS_ERR(link), "attach_prog", "prog '%s', err %ld\n",
++ t->prog_name, PTR_ERR(link))) {
++ link = NULL;
++ goto cleanup;
++ }
++
++ /* trigger probe */
++ usleep(1);
++
++ bpf_link__destroy(link);
++ link = NULL;
++
++ err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, &bss);
++ if (CHECK(err, "get_bss", "failed to get bss data: %d\n", err))
++ goto cleanup;
++ if (CHECK(bss.did_run == 0, "check_run",
++ "prog '%s' didn't run?\n", t->prog_name))
++ goto cleanup;
++ if (CHECK(bss.iters != t->exp_iters, "check_iters",
++ "prog '%s' iters: %d, expected: %d\n",
++ t->prog_name, bss.iters, t->exp_iters))
++ goto cleanup;
++ if (CHECK(bss.sum != t->exp_sum, "check_sum",
++ "prog '%s' sum: %d, expected: %d\n",
++ t->prog_name, bss.sum, t->exp_sum))
++ goto cleanup;
++ }
++
++cleanup:
++ bpf_link__destroy(link);
++ bpf_object__close(obj);
++}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
+@@ -0,0 +1,83 @@
++// SPDX-License-Identifier: GPL-2.0
++// Copyright (c) 2019 Facebook
++
++#include <linux/ptrace.h>
++#include <linux/bpf.h>
++#include "bpf_helpers.h"
++
++static volatile const struct {
++ unsigned a[4];
++ /*
++ * if the struct's size is multiple of 16, compiler will put it into
++ * .rodata.cst16 section, which is not recognized by libbpf; work
++ * around this by ensuring we don't have 16-aligned struct
++ */
++ char _y;
++} rdonly_values = { .a = {2, 3, 4, 5} };
++
++static volatile struct {
++ unsigned did_run;
++ unsigned iters;
++ unsigned sum;
++} res;
++
++SEC("raw_tracepoint/sys_enter:skip_loop")
++int skip_loop(struct pt_regs *ctx)
++{
++ /* prevent compiler to optimize everything out */
++ unsigned * volatile p = (void *)&rdonly_values.a;
++ unsigned iters = 0, sum = 0;
++
++ /* we should never enter this loop */
++ while (*p & 1) {
++ iters++;
++ sum += *p;
++ p++;
++ }
++ res.did_run = 1;
++ res.iters = iters;
++ res.sum = sum;
++ return 0;
++}
++
++SEC("raw_tracepoint/sys_enter:part_loop")
++int part_loop(struct pt_regs *ctx)
++{
++ /* prevent compiler to optimize everything out */
++ unsigned * volatile p = (void *)&rdonly_values.a;
++ unsigned iters = 0, sum = 0;
++
++ /* validate verifier can derive loop termination */
++ while (*p < 5) {
++ iters++;
++ sum += *p;
++ p++;
++ }
++ res.did_run = 1;
++ res.iters = iters;
++ res.sum = sum;
++ return 0;
++}
++
++SEC("raw_tracepoint/sys_enter:full_loop")
++int full_loop(struct pt_regs *ctx)
++{
++ /* prevent compiler to optimize everything out */
++ unsigned * volatile p = (void *)&rdonly_values.a;
++ int i = sizeof(rdonly_values.a) / sizeof(rdonly_values.a[0]);
++ unsigned iters = 0, sum = 0;
++
++ /* validate verifier can allow full loop as well */
++ while (i > 0 ) {
++ iters++;
++ sum += *p;
++ p++;
++ i--;
++ }
++ res.did_run = 1;
++ res.iters = iters;
++ res.sum = sum;
++ return 0;
++}
++
++char _license[] SEC("license") = "GPL";
diff --git a/patches.suse/selftests-bpf-Add-static-to-enable_all_controllers.patch b/patches.suse/selftests-bpf-Add-static-to-enable_all_controllers.patch
new file mode 100644
index 0000000000..59646ba874
--- /dev/null
+++ b/patches.suse/selftests-bpf-Add-static-to-enable_all_controllers.patch
@@ -0,0 +1,43 @@
+From: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
+Date: Wed, 2 Oct 2019 15:04:03 +0300
+Subject: selftests/bpf: Add static to enable_all_controllers()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.5-rc1
+Git-commit: fb27dcd2909d32e2219b54636ea212dbde45f985
+References: bsc#1155518
+
+Add static to enable_all_controllers() to get rid from annoying warning
+during samples/bpf build:
+
+samples/bpf/../../tools/testing/selftests/bpf/cgroup_helpers.c:44:5:
+warning: no previous prototype for ‘enable_all_controllers’
+[-Wmissing-prototypes]
+ int enable_all_controllers(char *cgroup_path)
+
+Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20191002120404.26962-2-ivan.khoronzhuk@linaro.org
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/cgroup_helpers.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
+index e95c33e333a4..4d74f3c4619b 100644
+--- a/tools/testing/selftests/bpf/cgroup_helpers.c
++++ b/tools/testing/selftests/bpf/cgroup_helpers.c
+@@ -41,7 +41,7 @@
+ *
+ * If successful, 0 is returned.
+ */
+-int enable_all_controllers(char *cgroup_path)
++static int enable_all_controllers(char *cgroup_path)
+ {
+ char path[PATH_MAX + 1];
+ char buf[PATH_MAX];
+--
+2.24.0
+
diff --git a/patches.suse/selftests-bpf-Add-stress-test-for-maximum-number-of-.patch b/patches.suse/selftests-bpf-Add-stress-test-for-maximum-number-of-.patch
new file mode 100644
index 0000000000..7fc73cf882
--- /dev/null
+++ b/patches.suse/selftests-bpf-Add-stress-test-for-maximum-number-of-.patch
@@ -0,0 +1,98 @@
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Thu, 14 Nov 2019 10:57:12 -0800
+Subject: selftests/bpf: Add stress test for maximum number of progs
+Patch-mainline: v5.5-rc1
+Git-commit: e76d776e9ca1fe266b3a7f8091eee5d1e635a545
+References: bsc#1155518
+
+Add stress test for maximum number of attached BPF programs per BPF trampoline.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20191114185720.1641606-13-ast@kernel.org
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/fexit_stress.c | 76 ++++++++++++++++++
+ 1 file changed, 76 insertions(+)
+ create mode 100644 tools/testing/selftests/bpf/prog_tests/fexit_stress.c
+
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
+@@ -0,0 +1,76 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2019 Facebook */
++#include <test_progs.h>
++
++/* x86-64 fits 55 JITed and 43 interpreted progs into half page */
++#define CNT 40
++
++void test_fexit_stress(void)
++{
++ char test_skb[128] = {};
++ int fexit_fd[CNT] = {};
++ int link_fd[CNT] = {};
++ __u32 duration = 0;
++ char error[4096];
++ __u32 prog_ret;
++ int err, i, filter_fd;
++
++ const struct bpf_insn trace_program[] = {
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ };
++
++ struct bpf_load_program_attr load_attr = {
++ .prog_type = BPF_PROG_TYPE_TRACING,
++ .license = "GPL",
++ .insns = trace_program,
++ .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn),
++ .expected_attach_type = BPF_TRACE_FEXIT,
++ };
++
++ const struct bpf_insn skb_program[] = {
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ };
++
++ struct bpf_load_program_attr skb_load_attr = {
++ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
++ .license = "GPL",
++ .insns = skb_program,
++ .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn),
++ };
++
++ err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1",
++ load_attr.expected_attach_type);
++ if (CHECK(err <= 0, "find_vmlinux_btf_id", "failed: %d\n", err))
++ goto out;
++ load_attr.attach_btf_id = err;
++
++ for (i = 0; i < CNT; i++) {
++ fexit_fd[i] = bpf_load_program_xattr(&load_attr, error, sizeof(error));
++ if (CHECK(fexit_fd[i] < 0, "fexit loaded",
++ "failed: %d errno %d\n", fexit_fd[i], errno))
++ goto out;
++ link_fd[i] = bpf_raw_tracepoint_open(NULL, fexit_fd[i]);
++ if (CHECK(link_fd[i] < 0, "fexit attach failed",
++ "prog %d failed: %d err %d\n", i, link_fd[i], errno))
++ goto out;
++ }
++
++ filter_fd = bpf_load_program_xattr(&skb_load_attr, error, sizeof(error));
++ if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n",
++ filter_fd, errno))
++ goto out;
++
++ err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0,
++ 0, &prog_ret, 0);
++ close(filter_fd);
++ CHECK_FAIL(err);
++out:
++ for (i = 0; i < CNT; i++) {
++ if (link_fd[i])
++ close(link_fd[i]);
++ if (fexit_fd[i])
++ close(fexit_fd[i]);
++ }
++}
diff --git a/patches.suse/selftests-bpf-Add-test-for-BPF-trampoline.patch b/patches.suse/selftests-bpf-Add-test-for-BPF-trampoline.patch
new file mode 100644
index 0000000000..5f174503b1
--- /dev/null
+++ b/patches.suse/selftests-bpf-Add-test-for-BPF-trampoline.patch
@@ -0,0 +1,203 @@
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Thu, 14 Nov 2019 10:57:09 -0800
+Subject: selftests/bpf: Add test for BPF trampoline
+Patch-mainline: v5.5-rc1
+Git-commit: 11d1e2eefffe86339b3b0b773bd31ef3b88faf7d
+References: bsc#1155518
+
+Add sanity test for BPF trampoline that checks kernel functions
+with up to 6 arguments of different sizes.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20191114185720.1641606-10-ast@kernel.org
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/lib/bpf/bpf_helpers.h | 13 ++
+ tools/testing/selftests/bpf/prog_tests/fentry_test.c | 64 +++++++++++++
+ tools/testing/selftests/bpf/progs/fentry_test.c | 90 +++++++++++++++++++
+ 3 files changed, 167 insertions(+)
+ create mode 100644 tools/testing/selftests/bpf/prog_tests/fentry_test.c
+ create mode 100644 tools/testing/selftests/bpf/progs/fentry_test.c
+
+--- a/tools/lib/bpf/bpf_helpers.h
++++ b/tools/lib/bpf/bpf_helpers.h
+@@ -44,4 +44,17 @@ enum libbpf_pin_type {
+ LIBBPF_PIN_BY_NAME,
+ };
+
++/* The following types should be used by BPF_PROG_TYPE_TRACING program to
++ * access kernel function arguments. BPF trampoline and raw tracepoints
++ * typecast arguments to 'unsigned long long'.
++ */
++typedef int __attribute__((aligned(8))) ks32;
++typedef char __attribute__((aligned(8))) ks8;
++typedef short __attribute__((aligned(8))) ks16;
++typedef long long __attribute__((aligned(8))) ks64;
++typedef unsigned int __attribute__((aligned(8))) ku32;
++typedef unsigned char __attribute__((aligned(8))) ku8;
++typedef unsigned short __attribute__((aligned(8))) ku16;
++typedef unsigned long long __attribute__((aligned(8))) ku64;
++
+ #endif
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
+@@ -0,0 +1,64 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2019 Facebook */
++#include <test_progs.h>
++
++void test_fentry_test(void)
++{
++ struct bpf_prog_load_attr attr = {
++ .file = "./fentry_test.o",
++ };
++
++ char prog_name[] = "fentry/bpf_fentry_testX";
++ struct bpf_object *obj = NULL, *pkt_obj;
++ int err, pkt_fd, kfree_skb_fd, i;
++ struct bpf_link *link[6] = {};
++ struct bpf_program *prog[6];
++ __u32 duration, retval;
++ struct bpf_map *data_map;
++ const int zero = 0;
++ u64 result[6];
++
++ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
++ &pkt_obj, &pkt_fd);
++ if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
++ return;
++ err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd);
++ if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
++ goto close_prog;
++
++ for (i = 0; i < 6; i++) {
++ prog_name[sizeof(prog_name) - 2] = '1' + i;
++ prog[i] = bpf_object__find_program_by_title(obj, prog_name);
++ if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name))
++ goto close_prog;
++ link[i] = bpf_program__attach_trace(prog[i]);
++ if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
++ goto close_prog;
++ }
++ data_map = bpf_object__find_map_by_name(obj, "fentry_t.bss");
++ if (CHECK(!data_map, "find_data_map", "data map not found\n"))
++ goto close_prog;
++
++ err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
++ NULL, NULL, &retval, &duration);
++ CHECK(err || retval, "ipv6",
++ "err %d errno %d retval %d duration %d\n",
++ err, errno, retval, duration);
++
++ err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result);
++ if (CHECK(err, "get_result",
++ "failed to get output data: %d\n", err))
++ goto close_prog;
++
++ for (i = 0; i < 6; i++)
++ if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n",
++ i + 1, result[i]))
++ goto close_prog;
++
++close_prog:
++ for (i = 0; i < 6; i++)
++ if (!IS_ERR_OR_NULL(link[i]))
++ bpf_link__destroy(link[i]);
++ bpf_object__close(obj);
++ bpf_object__close(pkt_obj);
++}
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/fentry_test.c
+@@ -0,0 +1,90 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2019 Facebook */
++#include <linux/bpf.h>
++#include "bpf_helpers.h"
++
++char _license[] SEC("license") = "GPL";
++
++struct test1 {
++ ks32 a;
++};
++static volatile __u64 test1_result;
++SEC("fentry/bpf_fentry_test1")
++int test1(struct test1 *ctx)
++{
++ test1_result = ctx->a == 1;
++ return 0;
++}
++
++struct test2 {
++ ks32 a;
++ ku64 b;
++};
++static volatile __u64 test2_result;
++SEC("fentry/bpf_fentry_test2")
++int test2(struct test2 *ctx)
++{
++ test2_result = ctx->a == 2 && ctx->b == 3;
++ return 0;
++}
++
++struct test3 {
++ ks8 a;
++ ks32 b;
++ ku64 c;
++};
++static volatile __u64 test3_result;
++SEC("fentry/bpf_fentry_test3")
++int test3(struct test3 *ctx)
++{
++ test3_result = ctx->a == 4 && ctx->b == 5 && ctx->c == 6;
++ return 0;
++}
++
++struct test4 {
++ void *a;
++ ks8 b;
++ ks32 c;
++ ku64 d;
++};
++static volatile __u64 test4_result;
++SEC("fentry/bpf_fentry_test4")
++int test4(struct test4 *ctx)
++{
++ test4_result = ctx->a == (void *)7 && ctx->b == 8 && ctx->c == 9 &&
++ ctx->d == 10;
++ return 0;
++}
++
++struct test5 {
++ ku64 a;
++ void *b;
++ ks16 c;
++ ks32 d;
++ ku64 e;
++};
++static volatile __u64 test5_result;
++SEC("fentry/bpf_fentry_test5")
++int test5(struct test5 *ctx)
++{
++ test5_result = ctx->a == 11 && ctx->b == (void *)12 && ctx->c == 13 &&
++ ctx->d == 14 && ctx->e == 15;
++ return 0;
++}
++
++struct test6 {
++ ku64 a;
++ void *b;
++ ks16 c;
++ ks32 d;
++ void *e;
++ ks64 f;
++};
++static volatile __u64 test6_result;
++SEC("fentry/bpf_fentry_test6")
++int test6(struct test6 *ctx)
++{
++ test6_result = ctx->a == 16 && ctx->b == (void *)17 && ctx->c == 18 &&
++ ctx->d == 19 && ctx->e == (void *)20 && ctx->f == 21;
++ return 0;
++}
diff --git a/patches.suse/selftests-bpf-Adjust-CO-RE-reloc-tests-for-new-bpf_c.patch b/patches.suse/selftests-bpf-Adjust-CO-RE-reloc-tests-for-new-bpf_c.patch
new file mode 100644
index 0000000000..d38d7af7d4
--- /dev/null
+++ b/patches.suse/selftests-bpf-Adjust-CO-RE-reloc-tests-for-new-bpf_c.patch
@@ -0,0 +1,300 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 8 Oct 2019 10:59:38 -0700
+Subject: selftests/bpf: Adjust CO-RE reloc tests for new bpf_core_read() macro
+Patch-mainline: v5.5-rc1
+Git-commit: 694731e8ea7f6bbcf0c57763ed4f24faa14bf056
+References: bsc#1155518
+
+To allow adding a variadic BPF_CORE_READ macro with slightly different
+syntax and semantics, define CORE_READ in CO-RE reloc tests, which is
+a thin wrapper around low-level bpf_core_read() macro, which in turn is
+just a wrapper around bpf_probe_read().
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20191008175942.1769476-4-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/bpf_helpers.h | 8 ++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c | 10 +++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c | 8 ++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_ints.c | 18 +++++-----
+ tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c | 6 ++-
+ tools/testing/selftests/bpf/progs/test_core_reloc_misc.c | 8 ++--
+ tools/testing/selftests/bpf/progs/test_core_reloc_mods.c | 18 +++++-----
+ tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c | 6 ++-
+ tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c | 12 +++---
+ tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c | 4 +-
+ 10 files changed, 58 insertions(+), 40 deletions(-)
+
+--- a/tools/testing/selftests/bpf/bpf_helpers.h
++++ b/tools/testing/selftests/bpf/bpf_helpers.h
+@@ -223,7 +223,7 @@ struct pt_regs;
+ #endif
+
+ /*
+- * BPF_CORE_READ abstracts away bpf_probe_read() call and captures offset
++ * bpf_core_read() abstracts away bpf_probe_read() call and captures offset
+ * relocation for source address using __builtin_preserve_access_index()
+ * built-in, provided by Clang.
+ *
+@@ -238,8 +238,8 @@ struct pt_regs;
+ * actual field offset, based on target kernel BTF type that matches original
+ * (local) BTF, used to record relocation.
+ */
+-#define BPF_CORE_READ(dst, src) \
+- bpf_probe_read((dst), sizeof(*(src)), \
+- __builtin_preserve_access_index(src))
++#define bpf_core_read(dst, sz, src) \
++ bpf_probe_read(dst, sz, \
++ (const void *)__builtin_preserve_access_index(src))
+
+ #endif
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
+@@ -31,6 +31,8 @@ struct core_reloc_arrays {
+ struct core_reloc_arrays_substruct d[1][2];
+ };
+
++#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
++
+ SEC("raw_tracepoint/sys_enter")
+ int test_core_arrays(void *ctx)
+ {
+@@ -38,16 +40,16 @@ int test_core_arrays(void *ctx)
+ struct core_reloc_arrays_output *out = (void *)&data.out;
+
+ /* in->a[2] */
+- if (BPF_CORE_READ(&out->a2, &in->a[2]))
++ if (CORE_READ(&out->a2, &in->a[2]))
+ return 1;
+ /* in->b[1][2][3] */
+- if (BPF_CORE_READ(&out->b123, &in->b[1][2][3]))
++ if (CORE_READ(&out->b123, &in->b[1][2][3]))
+ return 1;
+ /* in->c[1].c */
+- if (BPF_CORE_READ(&out->c1c, &in->c[1].c))
++ if (CORE_READ(&out->c1c, &in->c[1].c))
+ return 1;
+ /* in->d[0][0].d */
+- if (BPF_CORE_READ(&out->d00d, &in->d[0][0].d))
++ if (CORE_READ(&out->d00d, &in->d[0][0].d))
+ return 1;
+
+ return 0;
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c
+@@ -39,6 +39,8 @@ struct core_reloc_flavors___weird {
+ };
+ };
+
++#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
++
+ SEC("raw_tracepoint/sys_enter")
+ int test_core_flavors(void *ctx)
+ {
+@@ -48,13 +50,13 @@ int test_core_flavors(void *ctx)
+ struct core_reloc_flavors *out = (void *)&data.out;
+
+ /* read a using weird layout */
+- if (BPF_CORE_READ(&out->a, &in_weird->a))
++ if (CORE_READ(&out->a, &in_weird->a))
+ return 1;
+ /* read b using reversed layout */
+- if (BPF_CORE_READ(&out->b, &in_rev->b))
++ if (CORE_READ(&out->b, &in_rev->b))
+ return 1;
+ /* read c using original layout */
+- if (BPF_CORE_READ(&out->c, &in_orig->c))
++ if (CORE_READ(&out->c, &in_orig->c))
+ return 1;
+
+ return 0;
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_ints.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_ints.c
+@@ -23,20 +23,22 @@ struct core_reloc_ints {
+ int64_t s64_field;
+ };
+
++#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
++
+ SEC("raw_tracepoint/sys_enter")
+ int test_core_ints(void *ctx)
+ {
+ struct core_reloc_ints *in = (void *)&data.in;
+ struct core_reloc_ints *out = (void *)&data.out;
+
+- if (BPF_CORE_READ(&out->u8_field, &in->u8_field) ||
+- BPF_CORE_READ(&out->s8_field, &in->s8_field) ||
+- BPF_CORE_READ(&out->u16_field, &in->u16_field) ||
+- BPF_CORE_READ(&out->s16_field, &in->s16_field) ||
+- BPF_CORE_READ(&out->u32_field, &in->u32_field) ||
+- BPF_CORE_READ(&out->s32_field, &in->s32_field) ||
+- BPF_CORE_READ(&out->u64_field, &in->u64_field) ||
+- BPF_CORE_READ(&out->s64_field, &in->s64_field))
++ if (CORE_READ(&out->u8_field, &in->u8_field) ||
++ CORE_READ(&out->s8_field, &in->s8_field) ||
++ CORE_READ(&out->u16_field, &in->u16_field) ||
++ CORE_READ(&out->s16_field, &in->s16_field) ||
++ CORE_READ(&out->u32_field, &in->u32_field) ||
++ CORE_READ(&out->s32_field, &in->s32_field) ||
++ CORE_READ(&out->u64_field, &in->u64_field) ||
++ CORE_READ(&out->s64_field, &in->s64_field))
+ return 1;
+
+ return 0;
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
+@@ -17,6 +17,8 @@ struct task_struct {
+ int tgid;
+ };
+
++#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
++
+ SEC("raw_tracepoint/sys_enter")
+ int test_core_kernel(void *ctx)
+ {
+@@ -24,8 +26,8 @@ int test_core_kernel(void *ctx)
+ uint64_t pid_tgid = bpf_get_current_pid_tgid();
+ int pid, tgid;
+
+- if (BPF_CORE_READ(&pid, &task->pid) ||
+- BPF_CORE_READ(&tgid, &task->tgid))
++ if (CORE_READ(&pid, &task->pid) ||
++ CORE_READ(&tgid, &task->tgid))
+ return 1;
+
+ /* validate pid + tgid matches */
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_misc.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_misc.c
+@@ -32,6 +32,8 @@ struct core_reloc_misc_extensible {
+ int b;
+ };
+
++#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
++
+ SEC("raw_tracepoint/sys_enter")
+ int test_core_misc(void *ctx)
+ {
+@@ -41,15 +43,15 @@ int test_core_misc(void *ctx)
+ struct core_reloc_misc_output *out = (void *)&data.out;
+
+ /* record two different relocations with the same accessor string */
+- if (BPF_CORE_READ(&out->a, &in_a->a1) || /* accessor: 0:0 */
+- BPF_CORE_READ(&out->b, &in_b->b1)) /* accessor: 0:0 */
++ if (CORE_READ(&out->a, &in_a->a1) || /* accessor: 0:0 */
++ CORE_READ(&out->b, &in_b->b1)) /* accessor: 0:0 */
+ return 1;
+
+ /* Validate relocations capture array-only accesses for structs with
+ * fixed header, but with potentially extendable tail. This will read
+ * first 4 bytes of 2nd element of in_ext array of potentially
+ * variably sized struct core_reloc_misc_extensible. */
+- if (BPF_CORE_READ(&out->c, &in_ext[2])) /* accessor: 2 */
++ if (CORE_READ(&out->c, &in_ext[2])) /* accessor: 2 */
+ return 1;
+
+ return 0;
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c
+@@ -41,20 +41,22 @@ struct core_reloc_mods {
+ core_reloc_mods_substruct_t h;
+ };
+
++#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
++
+ SEC("raw_tracepoint/sys_enter")
+ int test_core_mods(void *ctx)
+ {
+ struct core_reloc_mods *in = (void *)&data.in;
+ struct core_reloc_mods_output *out = (void *)&data.out;
+
+- if (BPF_CORE_READ(&out->a, &in->a) ||
+- BPF_CORE_READ(&out->b, &in->b) ||
+- BPF_CORE_READ(&out->c, &in->c) ||
+- BPF_CORE_READ(&out->d, &in->d) ||
+- BPF_CORE_READ(&out->e, &in->e[2]) ||
+- BPF_CORE_READ(&out->f, &in->f[1]) ||
+- BPF_CORE_READ(&out->g, &in->g.x) ||
+- BPF_CORE_READ(&out->h, &in->h.y))
++ if (CORE_READ(&out->a, &in->a) ||
++ CORE_READ(&out->b, &in->b) ||
++ CORE_READ(&out->c, &in->c) ||
++ CORE_READ(&out->d, &in->d) ||
++ CORE_READ(&out->e, &in->e[2]) ||
++ CORE_READ(&out->f, &in->f[1]) ||
++ CORE_READ(&out->g, &in->g.x) ||
++ CORE_READ(&out->h, &in->h.y))
+ return 1;
+
+ return 0;
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c
+@@ -30,15 +30,17 @@ struct core_reloc_nesting {
+ } b;
+ };
+
++#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
++
+ SEC("raw_tracepoint/sys_enter")
+ int test_core_nesting(void *ctx)
+ {
+ struct core_reloc_nesting *in = (void *)&data.in;
+ struct core_reloc_nesting *out = (void *)&data.out;
+
+- if (BPF_CORE_READ(&out->a.a.a, &in->a.a.a))
++ if (CORE_READ(&out->a.a.a, &in->a.a.a))
+ return 1;
+- if (BPF_CORE_READ(&out->b.b.b, &in->b.b.b))
++ if (CORE_READ(&out->b.b.b, &in->b.b.b))
+ return 1;
+
+ return 0;
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c
+@@ -25,17 +25,19 @@ struct core_reloc_primitives {
+ int (*f)(const char *);
+ };
+
++#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
++
+ SEC("raw_tracepoint/sys_enter")
+ int test_core_primitives(void *ctx)
+ {
+ struct core_reloc_primitives *in = (void *)&data.in;
+ struct core_reloc_primitives *out = (void *)&data.out;
+
+- if (BPF_CORE_READ(&out->a, &in->a) ||
+- BPF_CORE_READ(&out->b, &in->b) ||
+- BPF_CORE_READ(&out->c, &in->c) ||
+- BPF_CORE_READ(&out->d, &in->d) ||
+- BPF_CORE_READ(&out->f, &in->f))
++ if (CORE_READ(&out->a, &in->a) ||
++ CORE_READ(&out->b, &in->b) ||
++ CORE_READ(&out->c, &in->c) ||
++ CORE_READ(&out->d, &in->d) ||
++ CORE_READ(&out->f, &in->f))
+ return 1;
+
+ return 0;
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c
+@@ -16,13 +16,15 @@ struct core_reloc_ptr_as_arr {
+ int a;
+ };
+
++#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
++
+ SEC("raw_tracepoint/sys_enter")
+ int test_core_ptr_as_arr(void *ctx)
+ {
+ struct core_reloc_ptr_as_arr *in = (void *)&data.in;
+ struct core_reloc_ptr_as_arr *out = (void *)&data.out;
+
+- if (BPF_CORE_READ(&out->a, &in[2].a))
++ if (CORE_READ(&out->a, &in[2].a))
+ return 1;
+
+ return 0;
diff --git a/patches.suse/selftests-bpf-Enforce-libbpf-build-before-BPF-progra.patch b/patches.suse/selftests-bpf-Enforce-libbpf-build-before-BPF-progra.patch
new file mode 100644
index 0000000000..3c273fd58f
--- /dev/null
+++ b/patches.suse/selftests-bpf-Enforce-libbpf-build-before-BPF-progra.patch
@@ -0,0 +1,32 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Fri, 11 Oct 2019 15:01:45 -0700
+Subject: selftests/bpf: Enforce libbpf build before BPF programs are built
+Patch-mainline: v5.5-rc1
+Git-commit: 3fbe31ae7ec4ec284a908cef7218f19e951ee55b
+References: bsc#1155518
+
+Given BPF programs rely on libbpf's bpf_helper_defs.h, which is
+auto-generated during libbpf build, libbpf build has to happen before
+we attempt progs/*.c build. Enforce it as order-only dependency.
+
+Fixes: 24f25763d6de ("libbpf: auto-generate list of BPF helper definitions")
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191011220146.3798961-2-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/Makefile | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -261,7 +261,8 @@ ifeq ($(DWARF2BTF),y)
+ $(BTF_PAHOLE) -J $@
+ endif
+
+-$(OUTPUT)/%.o: progs/%.c
++# libbpf has to be built before BPF programs due to bpf_helper_defs.h
++$(OUTPUT)/%.o: progs/%.c | $(BPFOBJ)
+ ($(CLANG) $(BPF_CFLAGS) $(CLANG_CFLAGS) -O2 -target bpf -emit-llvm \
+ -c $< -o - || echo "clang failed") | \
+ $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
diff --git a/patches.suse/selftests-bpf-Ensure-core_reloc_kernel-is-reading-te.patch b/patches.suse/selftests-bpf-Ensure-core_reloc_kernel-is-reading-te.patch
new file mode 100644
index 0000000000..ab0dc0aacf
--- /dev/null
+++ b/patches.suse/selftests-bpf-Ensure-core_reloc_kernel-is-reading-te.patch
@@ -0,0 +1,100 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Thu, 21 Nov 2019 09:59:00 -0800
+Subject: selftests/bpf: Ensure core_reloc_kernel is reading test_progs's data
+ only
+Patch-mainline: v5.5-rc1
+Git-commit: 6147a140c99f1ded2b519dfbed17e781e5861bf3
+References: bsc#1155518
+
+test_core_reloc_kernel.c selftest is the only CO-RE test that reads and
+returns for validation calling thread's information (pid, tgid, comm). Thus it
+has to make sure that only test_prog's invocations are honored.
+
+Fixes: df36e621418b ("selftests/bpf: add CO-RE relocs testing setup")
+Reported-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/20191121175900.3486133-1-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/core_reloc.c | 16 ++++++++-----
+ tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c | 4 +++
+ 2 files changed, 15 insertions(+), 5 deletions(-)
+
+--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
++++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+@@ -2,6 +2,7 @@
+ #include <test_progs.h>
+ #include "progs/core_reloc_types.h"
+ #include <sys/mman.h>
++#include <sys/syscall.h>
+
+ #define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name)
+
+@@ -452,6 +453,7 @@ static struct core_reloc_test_case test_
+ struct data {
+ char in[256];
+ char out[256];
++ uint64_t my_pid_tgid;
+ };
+
+ static size_t roundup_page(size_t sz)
+@@ -471,9 +473,12 @@ void test_core_reloc(void)
+ struct bpf_map *data_map;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
++ uint64_t my_pid_tgid;
+ struct data *data;
+ void *mmap_data = NULL;
+
++ my_pid_tgid = getpid() | ((uint64_t)syscall(SYS_gettid) << 32);
++
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ test_case = &test_cases[i];
+ if (!test__start_subtest(test_case->case_name))
+@@ -517,11 +522,6 @@ void test_core_reloc(void)
+ goto cleanup;
+ }
+
+- link = bpf_program__attach_raw_tracepoint(prog, tp_name);
+- if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n",
+- PTR_ERR(link)))
+- goto cleanup;
+-
+ data_map = bpf_object__find_map_by_name(obj, "test_cor.bss");
+ if (CHECK(!data_map, "find_data_map", "data map not found\n"))
+ goto cleanup;
+@@ -537,6 +537,12 @@ void test_core_reloc(void)
+
+ memset(mmap_data, 0, sizeof(*data));
+ memcpy(data->in, test_case->input, test_case->input_len);
++ data->my_pid_tgid = my_pid_tgid;
++
++ link = bpf_program__attach_raw_tracepoint(prog, tp_name);
++ if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n",
++ PTR_ERR(link)))
++ goto cleanup;
+
+ /* trigger test run */
+ usleep(1);
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
+@@ -11,6 +11,7 @@ char _license[] SEC("license") = "GPL";
+ struct {
+ char in[256];
+ char out[256];
++ uint64_t my_pid_tgid;
+ } data = {};
+
+ struct core_reloc_kernel_output {
+@@ -38,6 +39,9 @@ int test_core_kernel(void *ctx)
+ uint32_t real_tgid = (uint32_t)pid_tgid;
+ int pid, tgid;
+
++ if (data.my_pid_tgid != pid_tgid)
++ return 0;
++
+ if (CORE_READ(&pid, &task->pid) ||
+ CORE_READ(&tgid, &task->tgid))
+ return 1;
diff --git a/patches.suse/selftests-bpf-Extend-test_pkt_access-test.patch b/patches.suse/selftests-bpf-Extend-test_pkt_access-test.patch
new file mode 100644
index 0000000000..226cce6d88
--- /dev/null
+++ b/patches.suse/selftests-bpf-Extend-test_pkt_access-test.patch
@@ -0,0 +1,75 @@
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Thu, 14 Nov 2019 10:57:19 -0800
+Subject: selftests/bpf: Extend test_pkt_access test
+Patch-mainline: v5.5-rc1
+Git-commit: 4c0963243c5f56bffe8eaba6acc5b076d51797f4
+References: bsc#1155518
+
+The test_pkt_access.o is used by multiple tests. Fix its section name so that
+program type can be automatically detected by libbpf and make it call other
+subprograms with skb argument.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Song Liu <songliubraving@fb.com>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Link: https://lore.kernel.org/bpf/20191114185720.1641606-20-ast@kernel.org
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/progs/test_pkt_access.c | 38 ++++++++++++++++++--
+ 1 file changed, 36 insertions(+), 2 deletions(-)
+
+--- a/tools/testing/selftests/bpf/progs/test_pkt_access.c
++++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c
+@@ -17,8 +17,38 @@
+ #define barrier() __asm__ __volatile__("": : :"memory")
+ int _version SEC("version") = 1;
+
+-SEC("test1")
+-int process(struct __sk_buff *skb)
++/* llvm will optimize both subprograms into exactly the same BPF assembly
++ *
++ * Disassembly of section .text:
++ *
++ * 0000000000000000 test_pkt_access_subprog1:
++ * ; return skb->len * 2;
++ * 0: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0)
++ * 1: 64 00 00 00 01 00 00 00 w0 <<= 1
++ * 2: 95 00 00 00 00 00 00 00 exit
++ *
++ * 0000000000000018 test_pkt_access_subprog2:
++ * ; return skb->len * val;
++ * 3: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0)
++ * 4: 64 00 00 00 01 00 00 00 w0 <<= 1
++ * 5: 95 00 00 00 00 00 00 00 exit
++ *
++ * Which makes it an interesting test for BTF-enabled verifier.
++ */
++static __attribute__ ((noinline))
++int test_pkt_access_subprog1(volatile struct __sk_buff *skb)
++{
++ return skb->len * 2;
++}
++
++static __attribute__ ((noinline))
++int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb)
++{
++ return skb->len * val;
++}
++
++SEC("classifier/test_pkt_access")
++int test_pkt_access(struct __sk_buff *skb)
+ {
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+@@ -48,6 +78,10 @@ int process(struct __sk_buff *skb)
+ tcp = (struct tcphdr *)((void *)(ip6h) + ihl_len);
+ }
+
++ if (test_pkt_access_subprog1(skb) != skb->len * 2)
++ return TC_ACT_SHOT;
++ if (test_pkt_access_subprog2(2, skb) != skb->len * 2)
++ return TC_ACT_SHOT;
+ if (tcp) {
+ if (((void *)(tcp) + 20) > data_end || proto != 6)
+ return TC_ACT_SHOT;
diff --git a/patches.suse/selftests-bpf-Fix-dependency-ordering-for-attach_pro.patch b/patches.suse/selftests-bpf-Fix-dependency-ordering-for-attach_pro.patch
new file mode 100644
index 0000000000..3379fbed51
--- /dev/null
+++ b/patches.suse/selftests-bpf-Fix-dependency-ordering-for-attach_pro.patch
@@ -0,0 +1,45 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Mon, 7 Oct 2019 13:41:49 -0700
+Subject: selftests/bpf: Fix dependency ordering for attach_probe test
+Patch-mainline: v5.5-rc1
+Git-commit: dcb5f40054b1c64ed608a7eecdcf67044e189e30
+References: bsc#1155518
+
+Current Makefile dependency chain is not strict enough and allows
+test_attach_probe.o to be built before test_progs's
+prog_test/attach_probe.o is built, which leads to assembler complaining
+about missing included binary.
+
+This patch is a minimal fix to fix this issue by enforcing that
+test_attach_probe.o (BPF object file) is built before
+prog_tests/attach_probe.c is attempted to be compiled.
+
+Fixes: 928ca75e59d7 ("selftests/bpf: switch tests to new bpf_object__open_{file, mem}() APIs")
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191007204149.1575990-1-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/Makefile | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -169,7 +169,7 @@ $(OUTPUT)/test_queue_map.o: test_queue_s
+ $(OUTPUT)/test_stack_map.o: test_queue_stack_map.h
+
+ $(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
+-$(OUTPUT)/test_progs.o: flow_dissector_load.h $(OUTPUT)/test_attach_probe.o
++$(OUTPUT)/test_progs.o: flow_dissector_load.h
+
+ BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
+ BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
+@@ -280,7 +280,7 @@ PROG_TESTS_H := $(PROG_TESTS_DIR)/tests.
+ PROG_TESTS_FILES := $(wildcard prog_tests/*.c)
+ test_progs.c: $(PROG_TESTS_H)
+ $(OUTPUT)/test_progs: CFLAGS += $(TEST_PROGS_CFLAGS)
+-$(OUTPUT)/test_progs: test_progs.c $(PROG_TESTS_FILES) | $(PROG_TESTS_H)
++$(OUTPUT)/test_progs: test_progs.c $(PROG_TESTS_FILES) | $(OUTPUT)/test_attach_probe.o $(PROG_TESTS_H)
+ $(PROG_TESTS_H): $(PROG_TESTS_FILES) | $(PROG_TESTS_DIR)
+ $(shell ( cd prog_tests/; \
+ echo '/* Generated header, do not edit */'; \
diff --git a/patches.suse/selftests-bpf-Integrate-verbose-verifier-log-into-te.patch b/patches.suse/selftests-bpf-Integrate-verbose-verifier-log-into-te.patch
new file mode 100644
index 0000000000..4b99e279d5
--- /dev/null
+++ b/patches.suse/selftests-bpf-Integrate-verbose-verifier-log-into-te.patch
@@ -0,0 +1,173 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 19 Nov 2019 16:35:48 -0800
+Subject: selftests/bpf: Integrate verbose verifier log into test_progs
+Patch-mainline: v5.5-rc1
+Git-commit: a8fdaad5cfd250b9effcec942b3bf7bc5a6c8b17
+References: bsc#1155518
+
+Add exra level of verboseness, activated by -vvv argument. When -vv is
+specified, verbose libbpf and verifier log (level 1) is output, even for
+successful tests. With -vvv, verifier log goes to level 2.
+
+This is extremely useful to debug verifier failures, as well as just see the
+state and flow of verification. Before this, you'd have to go and modify
+load_program()'s source code inside libbpf to specify extra log_level flags,
+which is suboptimal to say the least.
+
+Currently -vv and -vvv triggering verifier output is integrated into
+test_stub's bpf_prog_load as well as bpf_verif_scale.c tests.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191120003548.4159797-1-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c | 4 ++-
+ tools/testing/selftests/bpf/test_progs.c | 18 ++++++++++-----
+ tools/testing/selftests/bpf/test_progs.h | 10 ++++++--
+ tools/testing/selftests/bpf/test_stub.c | 4 +++
+ 4 files changed, 27 insertions(+), 9 deletions(-)
+
+--- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
+@@ -15,6 +15,8 @@ static int libbpf_debug_print(enum libbp
+ return 0;
+ }
+
++extern int extra_prog_load_log_flags;
++
+ static int check_load(const char *file, enum bpf_prog_type type)
+ {
+ struct bpf_prog_load_attr attr;
+@@ -24,7 +26,7 @@ static int check_load(const char *file,
+ memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
+ attr.file = file;
+ attr.prog_type = type;
+- attr.log_level = 4;
++ attr.log_level = 4 | extra_prog_load_log_flags;
+ attr.prog_flags = BPF_F_TEST_RND_HI32;
+ err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
+ bpf_object__close(obj);
+--- a/tools/testing/selftests/bpf/test_progs.c
++++ b/tools/testing/selftests/bpf/test_progs.c
+@@ -43,7 +43,7 @@ static void dump_test_log(const struct p
+
+ fflush(stdout); /* exports env.log_buf & env.log_cnt */
+
+- if (env.verbose || test->force_log || failed) {
++ if (env.verbosity > VERBOSE_NONE || test->force_log || failed) {
+ if (env.log_cnt) {
+ env.log_buf[env.log_cnt] = '\0';
+ fprintf(env.stdout, "%s", env.log_buf);
+@@ -304,14 +304,14 @@ static const struct argp_option opts[] =
+ { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0,
+ "Output verifier statistics", },
+ { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
+- "Verbose output (use -vv for extra verbose output)" },
++ "Verbose output (use -vv or -vvv for progressively verbose output)" },
+ {},
+ };
+
+ static int libbpf_print_fn(enum libbpf_print_level level,
+ const char *format, va_list args)
+ {
+- if (!env.very_verbose && level == LIBBPF_DEBUG)
++ if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
+ return 0;
+ vprintf(format, args);
+ return 0;
+@@ -377,6 +377,8 @@ int parse_num_list(const char *s, struct
+ return 0;
+ }
+
++extern int extra_prog_load_log_flags;
++
+ static error_t parse_arg(int key, char *arg, struct argp_state *state)
+ {
+ struct test_env *env = state->input;
+@@ -418,9 +420,14 @@ static error_t parse_arg(int key, char *
+ env->verifier_stats = true;
+ break;
+ case ARG_VERBOSE:
++ env->verbosity = VERBOSE_NORMAL;
+ if (arg) {
+ if (strcmp(arg, "v") == 0) {
+- env->very_verbose = true;
++ env->verbosity = VERBOSE_VERY;
++ extra_prog_load_log_flags = 1;
++ } else if (strcmp(arg, "vv") == 0) {
++ env->verbosity = VERBOSE_SUPER;
++ extra_prog_load_log_flags = 2;
+ } else {
+ fprintf(stderr,
+ "Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n",
+@@ -428,7 +435,6 @@ static error_t parse_arg(int key, char *
+ return -EINVAL;
+ }
+ }
+- env->verbose = true;
+ break;
+ case ARGP_KEY_ARG:
+ argp_usage(state);
+@@ -447,7 +453,7 @@ static void stdio_hijack(void)
+ env.stdout = stdout;
+ env.stderr = stderr;
+
+- if (env.verbose) {
++ if (env.verbosity > VERBOSE_NONE) {
+ /* nothing to do, output to stdout by default */
+ return;
+ }
+--- a/tools/testing/selftests/bpf/test_progs.h
++++ b/tools/testing/selftests/bpf/test_progs.h
+@@ -38,6 +38,13 @@ typedef __u16 __sum16;
+ #include "trace_helpers.h"
+ #include "flow_dissector_load.h"
+
++enum verbosity {
++ VERBOSE_NONE,
++ VERBOSE_NORMAL,
++ VERBOSE_VERY,
++ VERBOSE_SUPER,
++};
++
+ struct test_selector {
+ const char *name;
+ bool *num_set;
+@@ -48,8 +55,7 @@ struct test_env {
+ struct test_selector test_selector;
+ struct test_selector subtest_selector;
+ bool verifier_stats;
+- bool verbose;
+- bool very_verbose;
++ enum verbosity verbosity;
+
+ bool jit_enabled;
+
+--- a/tools/testing/selftests/bpf/test_stub.c
++++ b/tools/testing/selftests/bpf/test_stub.c
+@@ -5,6 +5,8 @@
+ #include <bpf/libbpf.h>
+ #include <string.h>
+
++int extra_prog_load_log_flags = 0;
++
+ int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
+ struct bpf_object **pobj, int *prog_fd)
+ {
+@@ -15,6 +17,7 @@ int bpf_prog_test_load(const char *file,
+ attr.prog_type = type;
+ attr.expected_attach_type = 0;
+ attr.prog_flags = BPF_F_TEST_RND_HI32;
++ attr.log_level = extra_prog_load_log_flags;
+
+ return bpf_prog_load_xattr(&attr, pobj, prog_fd);
+ }
+@@ -35,6 +38,7 @@ int bpf_test_load_program(enum bpf_prog_
+ load_attr.license = license;
+ load_attr.kern_version = kern_version;
+ load_attr.prog_flags = BPF_F_TEST_RND_HI32;
++ load_attr.log_level = extra_prog_load_log_flags;
+
+ return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
+ }
diff --git a/patches.suse/selftests-bpf-Make-CO-RE-reloc-test-impartial-to-tes.patch b/patches.suse/selftests-bpf-Make-CO-RE-reloc-test-impartial-to-tes.patch
new file mode 100644
index 0000000000..05e40533bf
--- /dev/null
+++ b/patches.suse/selftests-bpf-Make-CO-RE-reloc-test-impartial-to-tes.patch
@@ -0,0 +1,59 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 15 Oct 2019 23:00:46 -0700
+Subject: selftests/bpf: Make CO-RE reloc test impartial to test_progs flavor
+Patch-mainline: v5.5-rc1
+Git-commit: d25c5e23552d54ebb9eea0de0d8cf9b7a7c5535c
+References: bsc#1155518
+
+test_core_reloc_kernel test captures its own process name and validates
+it as part of the test. Given extra "flavors" of test_progs, this break
+for anything by default test_progs binary. Fix the test to cut out
+flavor part of the process name.
+
+Fixes: ee2eb063d330 ("selftests/bpf: Add BPF_CORE_READ and BPF_CORE_READ_STR_INTO macro tests")
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191016060051.2024182-3-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/core_reloc.c | 4 ++--
+ tools/testing/selftests/bpf/progs/core_reloc_types.h | 2 +-
+ tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c | 3 ++-
+ 3 files changed, 5 insertions(+), 4 deletions(-)
+
+--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
++++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+@@ -195,8 +195,8 @@ static struct core_reloc_test_case test_
+ .input_len = 0,
+ .output = STRUCT_TO_CHAR_PTR(core_reloc_kernel_output) {
+ .valid = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, },
+- .comm = "test_progs\0\0\0\0\0",
+- .comm_len = 11,
++ .comm = "test_progs",
++ .comm_len = sizeof("test_progs"),
+ },
+ .output_len = sizeof(struct core_reloc_kernel_output),
+ },
+--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
++++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
+@@ -6,7 +6,7 @@
+
+ struct core_reloc_kernel_output {
+ int valid[10];
+- char comm[16];
++ char comm[sizeof("test_progs")];
+ int comm_len;
+ };
+
+--- a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
++++ b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
+@@ -15,7 +15,8 @@ static volatile struct data {
+
+ struct core_reloc_kernel_output {
+ int valid[10];
+- char comm[16];
++ /* we have test_progs[-flavor], so cut flavor part */
++ char comm[sizeof("test_progs")];
+ int comm_len;
+ };
+
diff --git a/patches.suse/selftests-bpf-Make-a-copy-of-subtest-name.patch b/patches.suse/selftests-bpf-Make-a-copy-of-subtest-name.patch
new file mode 100644
index 0000000000..adbab24ad8
--- /dev/null
+++ b/patches.suse/selftests-bpf-Make-a-copy-of-subtest-name.patch
@@ -0,0 +1,67 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Sun, 20 Oct 2019 20:39:00 -0700
+Subject: selftests/bpf: Make a copy of subtest name
+Patch-mainline: v5.5-rc1
+Git-commit: f90415e9600c5227131531c0ed11514a2d3bbe62
+References: bsc#1155518
+
+test_progs never created a copy of subtest name, rather just stored
+pointer to whatever string test provided. This is bad as that string
+might be freed or modified by the end of subtest. Fix this by creating
+a copy of given subtest name when subtest starts.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20191021033902.3856966-6-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/test_progs.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/tools/testing/selftests/bpf/test_progs.c
++++ b/tools/testing/selftests/bpf/test_progs.c
+@@ -18,7 +18,7 @@ struct prog_test_def {
+ int skip_cnt;
+ bool tested;
+
+- const char *subtest_name;
++ char *subtest_name;
+ int subtest_num;
+
+ /* store counts before subtest started */
+@@ -79,16 +79,17 @@ void test__end_subtest()
+ fprintf(env.stdout, "#%d/%d %s:%s\n",
+ test->test_num, test->subtest_num,
+ test->subtest_name, sub_error_cnt ? "FAIL" : "OK");
++
++ free(test->subtest_name);
++ test->subtest_name = NULL;
+ }
+
+ bool test__start_subtest(const char *name)
+ {
+ struct prog_test_def *test = env.test;
+
+- if (test->subtest_name) {
++ if (test->subtest_name)
+ test__end_subtest();
+- test->subtest_name = NULL;
+- }
+
+ test->subtest_num++;
+
+@@ -102,7 +103,13 @@ bool test__start_subtest(const char *nam
+ if (!should_run(&env.subtest_selector, test->subtest_num, name))
+ return false;
+
+- test->subtest_name = name;
++ test->subtest_name = strdup(name);
++ if (!test->subtest_name) {
++ fprintf(env.stderr,
++ "Subtest #%d: failed to copy subtest name!\n",
++ test->subtest_num);
++ return false;
++ }
+ env.test->old_error_cnt = env.test->error_cnt;
+
+ return true;
diff --git a/patches.suse/selftests-bpf-Make-reference_tracking-test-use-subte.patch b/patches.suse/selftests-bpf-Make-reference_tracking-test-use-subte.patch
new file mode 100644
index 0000000000..592f30e1bc
--- /dev/null
+++ b/patches.suse/selftests-bpf-Make-reference_tracking-test-use-subte.patch
@@ -0,0 +1,117 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Sun, 20 Oct 2019 20:39:01 -0700
+Subject: selftests/bpf: Make reference_tracking test use subtests
+Patch-mainline: v5.5-rc1
+Git-commit: 8af1c8b8d6223c31fada6148fd870257407952d1
+References: bsc#1155518
+
+reference_tracking is actually a set of 9 sub-tests. Make it explicitly so.
+
+Also, add explicit "classifier/" prefix to BPF program section names to
+let libbpf correctly guess program type. Thus, also remove explicit
+bpf_prog__set_type() call.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20191021033902.3856966-7-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/prog_tests/reference_tracking.c | 3 +-
+ tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c | 18 ++++++------
+ 2 files changed, 11 insertions(+), 10 deletions(-)
+
+--- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
++++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
+@@ -31,7 +31,8 @@ void test_reference_tracking(void)
+ if (strstr(title, ".text") != NULL)
+ continue;
+
+- bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS);
++ if (!test__start_subtest(title))
++ continue;
+
+ /* Expect verifier failure if test name has 'fail' */
+ if (strstr(title, "fail") != NULL) {
+--- a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
++++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
+@@ -53,7 +53,7 @@ static struct bpf_sock_tuple *get_tuple(
+ return result;
+ }
+
+-SEC("sk_lookup_success")
++SEC("classifier/sk_lookup_success")
+ int bpf_sk_lookup_test0(struct __sk_buff *skb)
+ {
+ void *data_end = (void *)(long)skb->data_end;
+@@ -78,7 +78,7 @@ int bpf_sk_lookup_test0(struct __sk_buff
+ return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
+ }
+
+-SEC("sk_lookup_success_simple")
++SEC("classifier/sk_lookup_success_simple")
+ int bpf_sk_lookup_test1(struct __sk_buff *skb)
+ {
+ struct bpf_sock_tuple tuple = {};
+@@ -90,7 +90,7 @@ int bpf_sk_lookup_test1(struct __sk_buff
+ return 0;
+ }
+
+-SEC("fail_use_after_free")
++SEC("classifier/fail_use_after_free")
+ int bpf_sk_lookup_uaf(struct __sk_buff *skb)
+ {
+ struct bpf_sock_tuple tuple = {};
+@@ -105,7 +105,7 @@ int bpf_sk_lookup_uaf(struct __sk_buff *
+ return family;
+ }
+
+-SEC("fail_modify_sk_pointer")
++SEC("classifier/fail_modify_sk_pointer")
+ int bpf_sk_lookup_modptr(struct __sk_buff *skb)
+ {
+ struct bpf_sock_tuple tuple = {};
+@@ -120,7 +120,7 @@ int bpf_sk_lookup_modptr(struct __sk_buf
+ return 0;
+ }
+
+-SEC("fail_modify_sk_or_null_pointer")
++SEC("classifier/fail_modify_sk_or_null_pointer")
+ int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
+ {
+ struct bpf_sock_tuple tuple = {};
+@@ -134,7 +134,7 @@ int bpf_sk_lookup_modptr_or_null(struct
+ return 0;
+ }
+
+-SEC("fail_no_release")
++SEC("classifier/fail_no_release")
+ int bpf_sk_lookup_test2(struct __sk_buff *skb)
+ {
+ struct bpf_sock_tuple tuple = {};
+@@ -143,7 +143,7 @@ int bpf_sk_lookup_test2(struct __sk_buff
+ return 0;
+ }
+
+-SEC("fail_release_twice")
++SEC("classifier/fail_release_twice")
+ int bpf_sk_lookup_test3(struct __sk_buff *skb)
+ {
+ struct bpf_sock_tuple tuple = {};
+@@ -155,7 +155,7 @@ int bpf_sk_lookup_test3(struct __sk_buff
+ return 0;
+ }
+
+-SEC("fail_release_unchecked")
++SEC("classifier/fail_release_unchecked")
+ int bpf_sk_lookup_test4(struct __sk_buff *skb)
+ {
+ struct bpf_sock_tuple tuple = {};
+@@ -172,7 +172,7 @@ void lookup_no_release(struct __sk_buff
+ bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
+ }
+
+-SEC("fail_no_release_subcall")
++SEC("classifier/fail_no_release_subcall")
+ int bpf_sk_lookup_test5(struct __sk_buff *skb)
+ {
+ lookup_no_release(skb);
diff --git a/patches.suse/selftests-bpf-Move-test_section_names-into-test_prog.patch b/patches.suse/selftests-bpf-Move-test_section_names-into-test_prog.patch
new file mode 100644
index 0000000000..d8a8fb11f7
--- /dev/null
+++ b/patches.suse/selftests-bpf-Move-test_section_names-into-test_prog.patch
@@ -0,0 +1,476 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 22 Oct 2019 23:09:13 -0700
+Subject: selftests/bpf: Move test_section_names into test_progs and fix it
+Patch-mainline: v5.5-rc1
+Git-commit: 9bc6384b364407381cc24c2150d13dd29f5bfdd2
+References: bsc#1155518
+
+Make test_section_names into test_progs test. Also fix ESRCH expected
+results. Add uprobe/uretprobe and tp/raw_tp test cases.
+
+Fixes: dd4436bb8383 ("libbpf: Teach bpf_object__open to guess program types")
+Reported-by: kernel test robot <rong.a.chen@intel.com>
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20191023060913.1713817-1-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/Makefile | 2
+ tools/testing/selftests/bpf/prog_tests/section_names.c | 203 ++++++++++++++
+ tools/testing/selftests/bpf/test_section_names.c | 233 -----------------
+ 3 files changed, 204 insertions(+), 234 deletions(-)
+ rename tools/testing/selftests/bpf/{test_section_names.c => prog_tests/section_names.c} (73%)
+
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -27,7 +27,7 @@ LDLIBS += -lcap -lelf -lrt -lpthread
+ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
+ test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
+ test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
+- test_cgroup_storage test_select_reuseport test_section_names \
++ test_cgroup_storage test_select_reuseport \
+ test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \
+ test_btf_dump test_cgroup_attach xdping test_sockopt test_sockopt_sk \
+ test_sockopt_multi test_tcp_rtt
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/section_names.c
+@@ -0,0 +1,203 @@
++// SPDX-License-Identifier: GPL-2.0
++// Copyright (c) 2018 Facebook
++#include <test_progs.h>
++
++static int duration = 0;
++
++struct sec_name_test {
++ const char sec_name[32];
++ struct {
++ int rc;
++ enum bpf_prog_type prog_type;
++ enum bpf_attach_type expected_attach_type;
++ } expected_load;
++ struct {
++ int rc;
++ enum bpf_attach_type attach_type;
++ } expected_attach;
++};
++
++static struct sec_name_test tests[] = {
++ {"InvAliD", {-ESRCH, 0, 0}, {-EINVAL, 0} },
++ {"cgroup", {-ESRCH, 0, 0}, {-EINVAL, 0} },
++ {"socket", {0, BPF_PROG_TYPE_SOCKET_FILTER, 0}, {-EINVAL, 0} },
++ {"kprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
++ {"uprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
++ {"kretprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
++ {"uretprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
++ {"classifier", {0, BPF_PROG_TYPE_SCHED_CLS, 0}, {-EINVAL, 0} },
++ {"action", {0, BPF_PROG_TYPE_SCHED_ACT, 0}, {-EINVAL, 0} },
++ {"tracepoint/", {0, BPF_PROG_TYPE_TRACEPOINT, 0}, {-EINVAL, 0} },
++ {"tp/", {0, BPF_PROG_TYPE_TRACEPOINT, 0}, {-EINVAL, 0} },
++ {
++ "raw_tracepoint/",
++ {0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0},
++ {-EINVAL, 0},
++ },
++ {"raw_tp/", {0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0}, {-EINVAL, 0} },
++ {"xdp", {0, BPF_PROG_TYPE_XDP, 0}, {-EINVAL, 0} },
++ {"perf_event", {0, BPF_PROG_TYPE_PERF_EVENT, 0}, {-EINVAL, 0} },
++ {"lwt_in", {0, BPF_PROG_TYPE_LWT_IN, 0}, {-EINVAL, 0} },
++ {"lwt_out", {0, BPF_PROG_TYPE_LWT_OUT, 0}, {-EINVAL, 0} },
++ {"lwt_xmit", {0, BPF_PROG_TYPE_LWT_XMIT, 0}, {-EINVAL, 0} },
++ {"lwt_seg6local", {0, BPF_PROG_TYPE_LWT_SEG6LOCAL, 0}, {-EINVAL, 0} },
++ {
++ "cgroup_skb/ingress",
++ {0, BPF_PROG_TYPE_CGROUP_SKB, 0},
++ {0, BPF_CGROUP_INET_INGRESS},
++ },
++ {
++ "cgroup_skb/egress",
++ {0, BPF_PROG_TYPE_CGROUP_SKB, 0},
++ {0, BPF_CGROUP_INET_EGRESS},
++ },
++ {"cgroup/skb", {0, BPF_PROG_TYPE_CGROUP_SKB, 0}, {-EINVAL, 0} },
++ {
++ "cgroup/sock",
++ {0, BPF_PROG_TYPE_CGROUP_SOCK, 0},
++ {0, BPF_CGROUP_INET_SOCK_CREATE},
++ },
++ {
++ "cgroup/post_bind4",
++ {0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND},
++ {0, BPF_CGROUP_INET4_POST_BIND},
++ },
++ {
++ "cgroup/post_bind6",
++ {0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND},
++ {0, BPF_CGROUP_INET6_POST_BIND},
++ },
++ {
++ "cgroup/dev",
++ {0, BPF_PROG_TYPE_CGROUP_DEVICE, 0},
++ {0, BPF_CGROUP_DEVICE},
++ },
++ {"sockops", {0, BPF_PROG_TYPE_SOCK_OPS, 0}, {0, BPF_CGROUP_SOCK_OPS} },
++ {
++ "sk_skb/stream_parser",
++ {0, BPF_PROG_TYPE_SK_SKB, 0},
++ {0, BPF_SK_SKB_STREAM_PARSER},
++ },
++ {
++ "sk_skb/stream_verdict",
++ {0, BPF_PROG_TYPE_SK_SKB, 0},
++ {0, BPF_SK_SKB_STREAM_VERDICT},
++ },
++ {"sk_skb", {0, BPF_PROG_TYPE_SK_SKB, 0}, {-EINVAL, 0} },
++ {"sk_msg", {0, BPF_PROG_TYPE_SK_MSG, 0}, {0, BPF_SK_MSG_VERDICT} },
++ {"lirc_mode2", {0, BPF_PROG_TYPE_LIRC_MODE2, 0}, {0, BPF_LIRC_MODE2} },
++ {
++ "flow_dissector",
++ {0, BPF_PROG_TYPE_FLOW_DISSECTOR, 0},
++ {0, BPF_FLOW_DISSECTOR},
++ },
++ {
++ "cgroup/bind4",
++ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND},
++ {0, BPF_CGROUP_INET4_BIND},
++ },
++ {
++ "cgroup/bind6",
++ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND},
++ {0, BPF_CGROUP_INET6_BIND},
++ },
++ {
++ "cgroup/connect4",
++ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT},
++ {0, BPF_CGROUP_INET4_CONNECT},
++ },
++ {
++ "cgroup/connect6",
++ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT},
++ {0, BPF_CGROUP_INET6_CONNECT},
++ },
++ {
++ "cgroup/sendmsg4",
++ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG},
++ {0, BPF_CGROUP_UDP4_SENDMSG},
++ },
++ {
++ "cgroup/sendmsg6",
++ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG},
++ {0, BPF_CGROUP_UDP6_SENDMSG},
++ },
++ {
++ "cgroup/recvmsg4",
++ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG},
++ {0, BPF_CGROUP_UDP4_RECVMSG},
++ },
++ {
++ "cgroup/recvmsg6",
++ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG},
++ {0, BPF_CGROUP_UDP6_RECVMSG},
++ },
++ {
++ "cgroup/sysctl",
++ {0, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL},
++ {0, BPF_CGROUP_SYSCTL},
++ },
++ {
++ "cgroup/getsockopt",
++ {0, BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT},
++ {0, BPF_CGROUP_GETSOCKOPT},
++ },
++ {
++ "cgroup/setsockopt",
++ {0, BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT},
++ {0, BPF_CGROUP_SETSOCKOPT},
++ },
++};
++
++static void test_prog_type_by_name(const struct sec_name_test *test)
++{
++ enum bpf_attach_type expected_attach_type;
++ enum bpf_prog_type prog_type;
++ int rc;
++
++ rc = libbpf_prog_type_by_name(test->sec_name, &prog_type,
++ &expected_attach_type);
++
++ CHECK(rc != test->expected_load.rc, "check_code",
++ "prog: unexpected rc=%d for %s", rc, test->sec_name);
++
++ if (rc)
++ return;
++
++ CHECK(prog_type != test->expected_load.prog_type, "check_prog_type",
++ "prog: unexpected prog_type=%d for %s",
++ prog_type, test->sec_name);
++
++ CHECK(expected_attach_type != test->expected_load.expected_attach_type,
++ "check_attach_type", "prog: unexpected expected_attach_type=%d for %s",
++ expected_attach_type, test->sec_name);
++}
++
++static void test_attach_type_by_name(const struct sec_name_test *test)
++{
++ enum bpf_attach_type attach_type;
++ int rc;
++
++ rc = libbpf_attach_type_by_name(test->sec_name, &attach_type);
++
++ CHECK(rc != test->expected_attach.rc, "check_ret",
++ "attach: unexpected rc=%d for %s", rc, test->sec_name);
++
++ if (rc)
++ return;
++
++ CHECK(attach_type != test->expected_attach.attach_type,
++ "check_attach_type", "attach: unexpected attach_type=%d for %s",
++ attach_type, test->sec_name);
++}
++
++void test_section_names(void)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(tests); ++i) {
++ struct sec_name_test *test = &tests[i];
++
++ test_prog_type_by_name(test);
++ test_attach_type_by_name(test);
++ }
++}
+--- a/tools/testing/selftests/bpf/test_section_names.c
++++ /dev/null
+@@ -1,233 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-// Copyright (c) 2018 Facebook
+-
+-#include <err.h>
+-#include <bpf/libbpf.h>
+-
+-#include "bpf_util.h"
+-
+-struct sec_name_test {
+- const char sec_name[32];
+- struct {
+- int rc;
+- enum bpf_prog_type prog_type;
+- enum bpf_attach_type expected_attach_type;
+- } expected_load;
+- struct {
+- int rc;
+- enum bpf_attach_type attach_type;
+- } expected_attach;
+-};
+-
+-static struct sec_name_test tests[] = {
+- {"InvAliD", {-EINVAL, 0, 0}, {-EINVAL, 0} },
+- {"cgroup", {-EINVAL, 0, 0}, {-EINVAL, 0} },
+- {"socket", {0, BPF_PROG_TYPE_SOCKET_FILTER, 0}, {-EINVAL, 0} },
+- {"kprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
+- {"kretprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
+- {"classifier", {0, BPF_PROG_TYPE_SCHED_CLS, 0}, {-EINVAL, 0} },
+- {"action", {0, BPF_PROG_TYPE_SCHED_ACT, 0}, {-EINVAL, 0} },
+- {"tracepoint/", {0, BPF_PROG_TYPE_TRACEPOINT, 0}, {-EINVAL, 0} },
+- {
+- "raw_tracepoint/",
+- {0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0},
+- {-EINVAL, 0},
+- },
+- {"xdp", {0, BPF_PROG_TYPE_XDP, 0}, {-EINVAL, 0} },
+- {"perf_event", {0, BPF_PROG_TYPE_PERF_EVENT, 0}, {-EINVAL, 0} },
+- {"lwt_in", {0, BPF_PROG_TYPE_LWT_IN, 0}, {-EINVAL, 0} },
+- {"lwt_out", {0, BPF_PROG_TYPE_LWT_OUT, 0}, {-EINVAL, 0} },
+- {"lwt_xmit", {0, BPF_PROG_TYPE_LWT_XMIT, 0}, {-EINVAL, 0} },
+- {"lwt_seg6local", {0, BPF_PROG_TYPE_LWT_SEG6LOCAL, 0}, {-EINVAL, 0} },
+- {
+- "cgroup_skb/ingress",
+- {0, BPF_PROG_TYPE_CGROUP_SKB, 0},
+- {0, BPF_CGROUP_INET_INGRESS},
+- },
+- {
+- "cgroup_skb/egress",
+- {0, BPF_PROG_TYPE_CGROUP_SKB, 0},
+- {0, BPF_CGROUP_INET_EGRESS},
+- },
+- {"cgroup/skb", {0, BPF_PROG_TYPE_CGROUP_SKB, 0}, {-EINVAL, 0} },
+- {
+- "cgroup/sock",
+- {0, BPF_PROG_TYPE_CGROUP_SOCK, 0},
+- {0, BPF_CGROUP_INET_SOCK_CREATE},
+- },
+- {
+- "cgroup/post_bind4",
+- {0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND},
+- {0, BPF_CGROUP_INET4_POST_BIND},
+- },
+- {
+- "cgroup/post_bind6",
+- {0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND},
+- {0, BPF_CGROUP_INET6_POST_BIND},
+- },
+- {
+- "cgroup/dev",
+- {0, BPF_PROG_TYPE_CGROUP_DEVICE, 0},
+- {0, BPF_CGROUP_DEVICE},
+- },
+- {"sockops", {0, BPF_PROG_TYPE_SOCK_OPS, 0}, {0, BPF_CGROUP_SOCK_OPS} },
+- {
+- "sk_skb/stream_parser",
+- {0, BPF_PROG_TYPE_SK_SKB, 0},
+- {0, BPF_SK_SKB_STREAM_PARSER},
+- },
+- {
+- "sk_skb/stream_verdict",
+- {0, BPF_PROG_TYPE_SK_SKB, 0},
+- {0, BPF_SK_SKB_STREAM_VERDICT},
+- },
+- {"sk_skb", {0, BPF_PROG_TYPE_SK_SKB, 0}, {-EINVAL, 0} },
+- {"sk_msg", {0, BPF_PROG_TYPE_SK_MSG, 0}, {0, BPF_SK_MSG_VERDICT} },
+- {"lirc_mode2", {0, BPF_PROG_TYPE_LIRC_MODE2, 0}, {0, BPF_LIRC_MODE2} },
+- {
+- "flow_dissector",
+- {0, BPF_PROG_TYPE_FLOW_DISSECTOR, 0},
+- {0, BPF_FLOW_DISSECTOR},
+- },
+- {
+- "cgroup/bind4",
+- {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND},
+- {0, BPF_CGROUP_INET4_BIND},
+- },
+- {
+- "cgroup/bind6",
+- {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND},
+- {0, BPF_CGROUP_INET6_BIND},
+- },
+- {
+- "cgroup/connect4",
+- {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT},
+- {0, BPF_CGROUP_INET4_CONNECT},
+- },
+- {
+- "cgroup/connect6",
+- {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT},
+- {0, BPF_CGROUP_INET6_CONNECT},
+- },
+- {
+- "cgroup/sendmsg4",
+- {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG},
+- {0, BPF_CGROUP_UDP4_SENDMSG},
+- },
+- {
+- "cgroup/sendmsg6",
+- {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG},
+- {0, BPF_CGROUP_UDP6_SENDMSG},
+- },
+- {
+- "cgroup/recvmsg4",
+- {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG},
+- {0, BPF_CGROUP_UDP4_RECVMSG},
+- },
+- {
+- "cgroup/recvmsg6",
+- {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG},
+- {0, BPF_CGROUP_UDP6_RECVMSG},
+- },
+- {
+- "cgroup/sysctl",
+- {0, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL},
+- {0, BPF_CGROUP_SYSCTL},
+- },
+- {
+- "cgroup/getsockopt",
+- {0, BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT},
+- {0, BPF_CGROUP_GETSOCKOPT},
+- },
+- {
+- "cgroup/setsockopt",
+- {0, BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT},
+- {0, BPF_CGROUP_SETSOCKOPT},
+- },
+-};
+-
+-static int test_prog_type_by_name(const struct sec_name_test *test)
+-{
+- enum bpf_attach_type expected_attach_type;
+- enum bpf_prog_type prog_type;
+- int rc;
+-
+- rc = libbpf_prog_type_by_name(test->sec_name, &prog_type,
+- &expected_attach_type);
+-
+- if (rc != test->expected_load.rc) {
+- warnx("prog: unexpected rc=%d for %s", rc, test->sec_name);
+- return -1;
+- }
+-
+- if (rc)
+- return 0;
+-
+- if (prog_type != test->expected_load.prog_type) {
+- warnx("prog: unexpected prog_type=%d for %s", prog_type,
+- test->sec_name);
+- return -1;
+- }
+-
+- if (expected_attach_type != test->expected_load.expected_attach_type) {
+- warnx("prog: unexpected expected_attach_type=%d for %s",
+- expected_attach_type, test->sec_name);
+- return -1;
+- }
+-
+- return 0;
+-}
+-
+-static int test_attach_type_by_name(const struct sec_name_test *test)
+-{
+- enum bpf_attach_type attach_type;
+- int rc;
+-
+- rc = libbpf_attach_type_by_name(test->sec_name, &attach_type);
+-
+- if (rc != test->expected_attach.rc) {
+- warnx("attach: unexpected rc=%d for %s", rc, test->sec_name);
+- return -1;
+- }
+-
+- if (rc)
+- return 0;
+-
+- if (attach_type != test->expected_attach.attach_type) {
+- warnx("attach: unexpected attach_type=%d for %s", attach_type,
+- test->sec_name);
+- return -1;
+- }
+-
+- return 0;
+-}
+-
+-static int run_test_case(const struct sec_name_test *test)
+-{
+- if (test_prog_type_by_name(test))
+- return -1;
+- if (test_attach_type_by_name(test))
+- return -1;
+- return 0;
+-}
+-
+-static int run_tests(void)
+-{
+- int passes = 0;
+- int fails = 0;
+- int i;
+-
+- for (i = 0; i < ARRAY_SIZE(tests); ++i) {
+- if (run_test_case(&tests[i]))
+- ++fails;
+- else
+- ++passes;
+- }
+- printf("Summary: %d PASSED, %d FAILED\n", passes, fails);
+- return fails ? -1 : 0;
+-}
+-
+-int main(int argc, char **argv)
+-{
+- return run_tests();
+-}
diff --git a/patches.suse/selftests-bpf-Remove-too-strict-field-offset-relo-te.patch b/patches.suse/selftests-bpf-Remove-too-strict-field-offset-relo-te.patch
new file mode 100644
index 0000000000..0a7d0c61a9
--- /dev/null
+++ b/patches.suse/selftests-bpf-Remove-too-strict-field-offset-relo-te.patch
@@ -0,0 +1,169 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Fri, 1 Nov 2019 15:28:06 -0700
+Subject: selftests/bpf: Remove too strict field offset relo test cases
+Patch-mainline: v5.5-rc1
+Git-commit: 42765ede5c54ca915de5bfeab83be97207e46f68
+References: bsc#1155518
+
+As libbpf is going to gain support for more field relocations, including field
+size, some restrictions about exact size match are going to be lifted. Remove
+test cases that explicitly test such failures.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20191101222810.1246166-2-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c | 3
+ tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c | 3
+ tools/testing/selftests/bpf/progs/core_reloc_types.h | 70 ----------
+ 9 files changed, 4 insertions(+), 90 deletions(-)
+ create mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c
+ delete mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c
+ delete mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c
+ delete mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c
+ delete mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c
+ delete mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c
+ delete mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c
+ delete mode 100644 tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c
+
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_arrays___err_wrong_val_type x) {}
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_arrays___err_wrong_val_type1 x) {}
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_arrays___err_wrong_val_type2 x) {}
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_ints___err_bitfield x) {}
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_ints___err_wrong_sz_16 x) {}
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_ints___err_wrong_sz_32 x) {}
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_ints___err_wrong_sz_64 x) {}
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_ints___err_wrong_sz_8 x) {}
+--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
++++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
+@@ -386,14 +386,7 @@ struct core_reloc_arrays___err_non_array
+ struct core_reloc_arrays_substruct d[1][2];
+ };
+
+-struct core_reloc_arrays___err_wrong_val_type1 {
+- char a[5]; /* char instead of int */
+- char b[2][3][4];
+- struct core_reloc_arrays_substruct c[3];
+- struct core_reloc_arrays_substruct d[1][2];
+-};
+-
+-struct core_reloc_arrays___err_wrong_val_type2 {
++struct core_reloc_arrays___err_wrong_val_type {
+ int a[5];
+ char b[2][3][4];
+ int c[3]; /* value is not a struct */
+@@ -589,67 +582,6 @@ struct core_reloc_ints___bool {
+ int64_t s64_field;
+ };
+
+-struct core_reloc_ints___err_bitfield {
+- uint8_t u8_field;
+- int8_t s8_field;
+- uint16_t u16_field;
+- int16_t s16_field;
+- uint32_t u32_field: 32; /* bitfields are not supported */
+- int32_t s32_field;
+- uint64_t u64_field;
+- int64_t s64_field;
+-};
+-
+-struct core_reloc_ints___err_wrong_sz_8 {
+- uint16_t u8_field; /* not 8-bit anymore */
+- int16_t s8_field; /* not 8-bit anymore */
+-
+- uint16_t u16_field;
+- int16_t s16_field;
+- uint32_t u32_field;
+- int32_t s32_field;
+- uint64_t u64_field;
+- int64_t s64_field;
+-};
+-
+-struct core_reloc_ints___err_wrong_sz_16 {
+- uint8_t u8_field;
+- int8_t s8_field;
+-
+- uint32_t u16_field; /* not 16-bit anymore */
+- int32_t s16_field; /* not 16-bit anymore */
+-
+- uint32_t u32_field;
+- int32_t s32_field;
+- uint64_t u64_field;
+- int64_t s64_field;
+-};
+-
+-struct core_reloc_ints___err_wrong_sz_32 {
+- uint8_t u8_field;
+- int8_t s8_field;
+- uint16_t u16_field;
+- int16_t s16_field;
+-
+- uint64_t u32_field; /* not 32-bit anymore */
+- int64_t s32_field; /* not 32-bit anymore */
+-
+- uint64_t u64_field;
+- int64_t s64_field;
+-};
+-
+-struct core_reloc_ints___err_wrong_sz_64 {
+- uint8_t u8_field;
+- int8_t s8_field;
+- uint16_t u16_field;
+- int16_t s16_field;
+- uint32_t u32_field;
+- int32_t s32_field;
+-
+- uint32_t u64_field; /* not 64-bit anymore */
+- int32_t s64_field; /* not 64-bit anymore */
+-};
+-
+ /*
+ * MISC
+ */
diff --git a/patches.suse/selftests-bpf-Split-off-tracing-only-helpers-into-bp.patch b/patches.suse/selftests-bpf-Split-off-tracing-only-helpers-into-bp.patch
new file mode 100644
index 0000000000..d7b104e20e
--- /dev/null
+++ b/patches.suse/selftests-bpf-Split-off-tracing-only-helpers-into-bp.patch
@@ -0,0 +1,595 @@
+From: Andrii Nakryiko <andriin@fb.com>
+Date: Tue, 8 Oct 2019 10:59:39 -0700
+Subject: selftests/bpf: Split off tracing-only helpers into bpf_tracing.h
+Patch-mainline: v5.5-rc1
+Git-commit: 3ac4dbe3dd8943450e0366f8174fbfc286ea8f19
+References: bsc#1155518
+
+Split-off PT_REGS-related helpers into bpf_tracing.h header. Adjust
+selftests and samples to include it where necessary.
+
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Song Liu <songliubraving@fb.com>
+Link: https://lore.kernel.org/bpf/20191008175942.1769476-5-andriin@fb.com
+Acked-by: Gary Lin <glin@suse.com>
+---
+ samples/bpf/map_perf_test_kern.c | 1
+ samples/bpf/offwaketime_kern.c | 1
+ samples/bpf/sampleip_kern.c | 1
+ samples/bpf/spintest_kern.c | 1
+ samples/bpf/test_map_in_map_kern.c | 1
+ samples/bpf/test_overhead_kprobe_kern.c | 1
+ samples/bpf/test_probe_write_user_kern.c | 1
+ samples/bpf/trace_event_kern.c | 1
+ samples/bpf/tracex1_kern.c | 1
+ samples/bpf/tracex2_kern.c | 1
+ samples/bpf/tracex3_kern.c | 1
+ samples/bpf/tracex4_kern.c | 1
+ samples/bpf/tracex5_kern.c | 1
+ tools/testing/selftests/bpf/bpf_helpers.h | 190 -----------------------------
+ tools/testing/selftests/bpf/bpf_tracing.h | 195 ++++++++++++++++++++++++++++++
+ tools/testing/selftests/bpf/progs/loop1.c | 1
+ tools/testing/selftests/bpf/progs/loop2.c | 1
+ tools/testing/selftests/bpf/progs/loop3.c | 1
+ 18 files changed, 211 insertions(+), 190 deletions(-)
+ create mode 100644 tools/testing/selftests/bpf/bpf_tracing.h
+
+--- a/samples/bpf/map_perf_test_kern.c
++++ b/samples/bpf/map_perf_test_kern.c
+@@ -10,6 +10,7 @@
+ #include <uapi/linux/bpf.h>
+ #include "bpf_helpers.h"
+ #include "bpf_legacy.h"
++#include "bpf_tracing.h"
+
+ #define MAX_ENTRIES 1000
+ #define MAX_NR_CPUS 1024
+--- a/samples/bpf/offwaketime_kern.c
++++ b/samples/bpf/offwaketime_kern.c
+@@ -6,6 +6,7 @@
+ */
+ #include <uapi/linux/bpf.h>
+ #include "bpf_helpers.h"
++#include "bpf_tracing.h"
+ #include <uapi/linux/ptrace.h>
+ #include <uapi/linux/perf_event.h>
+ #include <linux/version.h>
+--- a/samples/bpf/sampleip_kern.c
++++ b/samples/bpf/sampleip_kern.c
+@@ -9,6 +9,7 @@
+ #include <uapi/linux/bpf.h>
+ #include <uapi/linux/bpf_perf_event.h>
+ #include "bpf_helpers.h"
++#include "bpf_tracing.h"
+
+ #define MAX_IPS 8192
+
+--- a/samples/bpf/spintest_kern.c
++++ b/samples/bpf/spintest_kern.c
+@@ -10,6 +10,7 @@
+ #include <uapi/linux/bpf.h>
+ #include <uapi/linux/perf_event.h>
+ #include "bpf_helpers.h"
++#include "bpf_tracing.h"
+
+ struct bpf_map_def SEC("maps") my_map = {
+ .type = BPF_MAP_TYPE_HASH,
+--- a/samples/bpf/test_map_in_map_kern.c
++++ b/samples/bpf/test_map_in_map_kern.c
+@@ -12,6 +12,7 @@
+ #include <uapi/linux/in6.h>
+ #include "bpf_helpers.h"
+ #include "bpf_legacy.h"
++#include "bpf_tracing.h"
+
+ #define MAX_NR_PORTS 65536
+
+--- a/samples/bpf/test_overhead_kprobe_kern.c
++++ b/samples/bpf/test_overhead_kprobe_kern.c
+@@ -8,6 +8,7 @@
+ #include <linux/ptrace.h>
+ #include <uapi/linux/bpf.h>
+ #include "bpf_helpers.h"
++#include "bpf_tracing.h"
+
+ #define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})
+
+--- a/samples/bpf/test_probe_write_user_kern.c
++++ b/samples/bpf/test_probe_write_user_kern.c
+@@ -9,6 +9,7 @@
+ #include <uapi/linux/bpf.h>
+ #include <linux/version.h>
+ #include "bpf_helpers.h"
++#include "bpf_tracing.h"
+
+ struct bpf_map_def SEC("maps") dnat_map = {
+ .type = BPF_MAP_TYPE_HASH,
+--- a/samples/bpf/trace_event_kern.c
++++ b/samples/bpf/trace_event_kern.c
+@@ -10,6 +10,7 @@
+ #include <uapi/linux/bpf_perf_event.h>
+ #include <uapi/linux/perf_event.h>
+ #include "bpf_helpers.h"
++#include "bpf_tracing.h"
+
+ struct key_t {
+ char comm[TASK_COMM_LEN];
+--- a/samples/bpf/tracex1_kern.c
++++ b/samples/bpf/tracex1_kern.c
+@@ -9,6 +9,7 @@
+ #include <uapi/linux/bpf.h>
+ #include <linux/version.h>
+ #include "bpf_helpers.h"
++#include "bpf_tracing.h"
+
+ #define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})
+
+--- a/samples/bpf/tracex2_kern.c
++++ b/samples/bpf/tracex2_kern.c
+@@ -9,6 +9,7 @@
+ #include <linux/version.h>
+ #include <uapi/linux/bpf.h>
+ #include "bpf_helpers.h"
++#include "bpf_tracing.h"
+
+ struct bpf_map_def SEC("maps") my_map = {
+ .type = BPF_MAP_TYPE_HASH,
+--- a/samples/bpf/tracex3_kern.c
++++ b/samples/bpf/tracex3_kern.c
+@@ -9,6 +9,7 @@
+ #include <linux/version.h>
+ #include <uapi/linux/bpf.h>
+ #include "bpf_helpers.h"
++#include "bpf_tracing.h"
+
+ struct bpf_map_def SEC("maps") my_map = {
+ .type = BPF_MAP_TYPE_HASH,
+--- a/samples/bpf/tracex4_kern.c
++++ b/samples/bpf/tracex4_kern.c
+@@ -8,6 +8,7 @@
+ #include <linux/version.h>
+ #include <uapi/linux/bpf.h>
+ #include "bpf_helpers.h"
++#include "bpf_tracing.h"
+
+ struct pair {
+ u64 val;
+--- a/samples/bpf/tracex5_kern.c
++++ b/samples/bpf/tracex5_kern.c
+@@ -11,6 +11,7 @@
+ #include <uapi/linux/unistd.h>
+ #include "syscall_nrs.h"
+ #include "bpf_helpers.h"
++#include "bpf_tracing.h"
+
+ #define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F
+
+--- a/tools/testing/selftests/bpf/bpf_helpers.h
++++ b/tools/testing/selftests/bpf/bpf_helpers.h
+@@ -32,196 +32,6 @@ struct bpf_map_def {
+ unsigned int map_flags;
+ };
+
+-/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
+-#if defined(__TARGET_ARCH_x86)
+- #define bpf_target_x86
+- #define bpf_target_defined
+-#elif defined(__TARGET_ARCH_s390)
+- #define bpf_target_s390
+- #define bpf_target_defined
+-#elif defined(__TARGET_ARCH_arm)
+- #define bpf_target_arm
+- #define bpf_target_defined
+-#elif defined(__TARGET_ARCH_arm64)
+- #define bpf_target_arm64
+- #define bpf_target_defined
+-#elif defined(__TARGET_ARCH_mips)
+- #define bpf_target_mips
+- #define bpf_target_defined
+-#elif defined(__TARGET_ARCH_powerpc)
+- #define bpf_target_powerpc
+- #define bpf_target_defined
+-#elif defined(__TARGET_ARCH_sparc)
+- #define bpf_target_sparc
+- #define bpf_target_defined
+-#else
+- #undef bpf_target_defined
+-#endif
+-
+-/* Fall back to what the compiler says */
+-#ifndef bpf_target_defined
+-#if defined(__x86_64__)
+- #define bpf_target_x86
+-#elif defined(__s390__)
+- #define bpf_target_s390
+-#elif defined(__arm__)
+- #define bpf_target_arm
+-#elif defined(__aarch64__)
+- #define bpf_target_arm64
+-#elif defined(__mips__)
+- #define bpf_target_mips
+-#elif defined(__powerpc__)
+- #define bpf_target_powerpc
+-#elif defined(__sparc__)
+- #define bpf_target_sparc
+-#endif
+-#endif
+-
+-#if defined(bpf_target_x86)
+-
+-#ifdef __KERNEL__
+-#define PT_REGS_PARM1(x) ((x)->di)
+-#define PT_REGS_PARM2(x) ((x)->si)
+-#define PT_REGS_PARM3(x) ((x)->dx)
+-#define PT_REGS_PARM4(x) ((x)->cx)
+-#define PT_REGS_PARM5(x) ((x)->r8)
+-#define PT_REGS_RET(x) ((x)->sp)
+-#define PT_REGS_FP(x) ((x)->bp)
+-#define PT_REGS_RC(x) ((x)->ax)
+-#define PT_REGS_SP(x) ((x)->sp)
+-#define PT_REGS_IP(x) ((x)->ip)
+-#else
+-#ifdef __i386__
+-/* i386 kernel is built with -mregparm=3 */
+-#define PT_REGS_PARM1(x) ((x)->eax)
+-#define PT_REGS_PARM2(x) ((x)->edx)
+-#define PT_REGS_PARM3(x) ((x)->ecx)
+-#define PT_REGS_PARM4(x) 0
+-#define PT_REGS_PARM5(x) 0
+-#define PT_REGS_RET(x) ((x)->esp)
+-#define PT_REGS_FP(x) ((x)->ebp)
+-#define PT_REGS_RC(x) ((x)->eax)
+-#define PT_REGS_SP(x) ((x)->esp)
+-#define PT_REGS_IP(x) ((x)->eip)
+-#else
+-#define PT_REGS_PARM1(x) ((x)->rdi)
+-#define PT_REGS_PARM2(x) ((x)->rsi)
+-#define PT_REGS_PARM3(x) ((x)->rdx)
+-#define PT_REGS_PARM4(x) ((x)->rcx)
+-#define PT_REGS_PARM5(x) ((x)->r8)
+-#define PT_REGS_RET(x) ((x)->rsp)
+-#define PT_REGS_FP(x) ((x)->rbp)
+-#define PT_REGS_RC(x) ((x)->rax)
+-#define PT_REGS_SP(x) ((x)->rsp)
+-#define PT_REGS_IP(x) ((x)->rip)
+-#endif
+-#endif
+-
+-#elif defined(bpf_target_s390)
+-
+-/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
+-struct pt_regs;
+-#define PT_REGS_S390 const volatile user_pt_regs
+-#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2])
+-#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3])
+-#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4])
+-#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5])
+-#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6])
+-#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14])
+-/* Works only with CONFIG_FRAME_POINTER */
+-#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11])
+-#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2])
+-#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
+-#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
+-
+-#elif defined(bpf_target_arm)
+-
+-#define PT_REGS_PARM1(x) ((x)->uregs[0])
+-#define PT_REGS_PARM2(x) ((x)->uregs[1])
+-#define PT_REGS_PARM3(x) ((x)->uregs[2])
+-#define PT_REGS_PARM4(x) ((x)->uregs[3])
+-#define PT_REGS_PARM5(x) ((x)->uregs[4])
+-#define PT_REGS_RET(x) ((x)->uregs[14])
+-#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */
+-#define PT_REGS_RC(x) ((x)->uregs[0])
+-#define PT_REGS_SP(x) ((x)->uregs[13])
+-#define PT_REGS_IP(x) ((x)->uregs[12])
+-
+-#elif defined(bpf_target_arm64)
+-
+-/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
+-struct pt_regs;
+-#define PT_REGS_ARM64 const volatile struct user_pt_regs
+-#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
+-#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
+-#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
+-#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
+-#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
+-#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30])
+-/* Works only with CONFIG_FRAME_POINTER */
+-#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
+-#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0])
+-#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
+-#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
+-
+-#elif defined(bpf_target_mips)
+-
+-#define PT_REGS_PARM1(x) ((x)->regs[4])
+-#define PT_REGS_PARM2(x) ((x)->regs[5])
+-#define PT_REGS_PARM3(x) ((x)->regs[6])
+-#define PT_REGS_PARM4(x) ((x)->regs[7])
+-#define PT_REGS_PARM5(x) ((x)->regs[8])
+-#define PT_REGS_RET(x) ((x)->regs[31])
+-#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
+-#define PT_REGS_RC(x) ((x)->regs[1])
+-#define PT_REGS_SP(x) ((x)->regs[29])
+-#define PT_REGS_IP(x) ((x)->cp0_epc)
+-
+-#elif defined(bpf_target_powerpc)
+-
+-#define PT_REGS_PARM1(x) ((x)->gpr[3])
+-#define PT_REGS_PARM2(x) ((x)->gpr[4])
+-#define PT_REGS_PARM3(x) ((x)->gpr[5])
+-#define PT_REGS_PARM4(x) ((x)->gpr[6])
+-#define PT_REGS_PARM5(x) ((x)->gpr[7])
+-#define PT_REGS_RC(x) ((x)->gpr[3])
+-#define PT_REGS_SP(x) ((x)->sp)
+-#define PT_REGS_IP(x) ((x)->nip)
+-
+-#elif defined(bpf_target_sparc)
+-
+-#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
+-#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
+-#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2])
+-#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3])
+-#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4])
+-#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
+-#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
+-#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
+-
+-/* Should this also be a bpf_target check for the sparc case? */
+-#if defined(__arch64__)
+-#define PT_REGS_IP(x) ((x)->tpc)
+-#else
+-#define PT_REGS_IP(x) ((x)->pc)
+-#endif
+-
+-#endif
+-
+-#if defined(bpf_target_powerpc)
+-#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
+-#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
+-#elif defined(bpf_target_sparc)
+-#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
+-#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
+-#else
+-#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \
+- bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
+-#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \
+- bpf_probe_read(&(ip), sizeof(ip), \
+- (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
+-#endif
+-
+ /*
+ * bpf_core_read() abstracts away bpf_probe_read() call and captures offset
+ * relocation for source address using __builtin_preserve_access_index()
+--- /dev/null
++++ b/tools/testing/selftests/bpf/bpf_tracing.h
+@@ -0,0 +1,195 @@
++/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
++#ifndef __BPF_TRACING_H__
++#define __BPF_TRACING_H__
++
++/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
++#if defined(__TARGET_ARCH_x86)
++ #define bpf_target_x86
++ #define bpf_target_defined
++#elif defined(__TARGET_ARCH_s390)
++ #define bpf_target_s390
++ #define bpf_target_defined
++#elif defined(__TARGET_ARCH_arm)
++ #define bpf_target_arm
++ #define bpf_target_defined
++#elif defined(__TARGET_ARCH_arm64)
++ #define bpf_target_arm64
++ #define bpf_target_defined
++#elif defined(__TARGET_ARCH_mips)
++ #define bpf_target_mips
++ #define bpf_target_defined
++#elif defined(__TARGET_ARCH_powerpc)
++ #define bpf_target_powerpc
++ #define bpf_target_defined
++#elif defined(__TARGET_ARCH_sparc)
++ #define bpf_target_sparc
++ #define bpf_target_defined
++#else
++ #undef bpf_target_defined
++#endif
++
++/* Fall back to what the compiler says */
++#ifndef bpf_target_defined
++#if defined(__x86_64__)
++ #define bpf_target_x86
++#elif defined(__s390__)
++ #define bpf_target_s390
++#elif defined(__arm__)
++ #define bpf_target_arm
++#elif defined(__aarch64__)
++ #define bpf_target_arm64
++#elif defined(__mips__)
++ #define bpf_target_mips
++#elif defined(__powerpc__)
++ #define bpf_target_powerpc
++#elif defined(__sparc__)
++ #define bpf_target_sparc
++#endif
++#endif
++
++#if defined(bpf_target_x86)
++
++#ifdef __KERNEL__
++#define PT_REGS_PARM1(x) ((x)->di)
++#define PT_REGS_PARM2(x) ((x)->si)
++#define PT_REGS_PARM3(x) ((x)->dx)
++#define PT_REGS_PARM4(x) ((x)->cx)
++#define PT_REGS_PARM5(x) ((x)->r8)
++#define PT_REGS_RET(x) ((x)->sp)
++#define PT_REGS_FP(x) ((x)->bp)
++#define PT_REGS_RC(x) ((x)->ax)
++#define PT_REGS_SP(x) ((x)->sp)
++#define PT_REGS_IP(x) ((x)->ip)
++#else
++#ifdef __i386__
++/* i386 kernel is built with -mregparm=3 */
++#define PT_REGS_PARM1(x) ((x)->eax)
++#define PT_REGS_PARM2(x) ((x)->edx)
++#define PT_REGS_PARM3(x) ((x)->ecx)
++#define PT_REGS_PARM4(x) 0
++#define PT_REGS_PARM5(x) 0
++#define PT_REGS_RET(x) ((x)->esp)
++#define PT_REGS_FP(x) ((x)->ebp)
++#define PT_REGS_RC(x) ((x)->eax)
++#define PT_REGS_SP(x) ((x)->esp)
++#define PT_REGS_IP(x) ((x)->eip)
++#else
++#define PT_REGS_PARM1(x) ((x)->rdi)
++#define PT_REGS_PARM2(x) ((x)->rsi)
++#define PT_REGS_PARM3(x) ((x)->rdx)
++#define PT_REGS_PARM4(x) ((x)->rcx)
++#define PT_REGS_PARM5(x) ((x)->r8)
++#define PT_REGS_RET(x) ((x)->rsp)
++#define PT_REGS_FP(x) ((x)->rbp)
++#define PT_REGS_RC(x) ((x)->rax)
++#define PT_REGS_SP(x) ((x)->rsp)
++#define PT_REGS_IP(x) ((x)->rip)
++#endif
++#endif
++
++#elif defined(bpf_target_s390)
++
++/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
++struct pt_regs;
++#define PT_REGS_S390 const volatile user_pt_regs
++#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2])
++#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3])
++#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4])
++#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5])
++#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6])
++#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14])
++/* Works only with CONFIG_FRAME_POINTER */
++#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11])
++#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2])
++#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
++#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
++
++#elif defined(bpf_target_arm)
++
++#define PT_REGS_PARM1(x) ((x)->uregs[0])
++#define PT_REGS_PARM2(x) ((x)->uregs[1])
++#define PT_REGS_PARM3(x) ((x)->uregs[2])
++#define PT_REGS_PARM4(x) ((x)->uregs[3])
++#define PT_REGS_PARM5(x) ((x)->uregs[4])
++#define PT_REGS_RET(x) ((x)->uregs[14])
++#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */
++#define PT_REGS_RC(x) ((x)->uregs[0])
++#define PT_REGS_SP(x) ((x)->uregs[13])
++#define PT_REGS_IP(x) ((x)->uregs[12])
++
++#elif defined(bpf_target_arm64)
++
++/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
++struct pt_regs;
++#define PT_REGS_ARM64 const volatile struct user_pt_regs
++#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
++#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
++#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
++#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
++#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
++#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30])
++/* Works only with CONFIG_FRAME_POINTER */
++#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
++#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0])
++#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
++#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
++
++#elif defined(bpf_target_mips)
++
++#define PT_REGS_PARM1(x) ((x)->regs[4])
++#define PT_REGS_PARM2(x) ((x)->regs[5])
++#define PT_REGS_PARM3(x) ((x)->regs[6])
++#define PT_REGS_PARM4(x) ((x)->regs[7])
++#define PT_REGS_PARM5(x) ((x)->regs[8])
++#define PT_REGS_RET(x) ((x)->regs[31])
++#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
++#define PT_REGS_RC(x) ((x)->regs[1])
++#define PT_REGS_SP(x) ((x)->regs[29])
++#define PT_REGS_IP(x) ((x)->cp0_epc)
++
++#elif defined(bpf_target_powerpc)
++
++#define PT_REGS_PARM1(x) ((x)->gpr[3])
++#define PT_REGS_PARM2(x) ((x)->gpr[4])
++#define PT_REGS_PARM3(x) ((x)->gpr[5])
++#define PT_REGS_PARM4(x) ((x)->gpr[6])
++#define PT_REGS_PARM5(x) ((x)->gpr[7])
++#define PT_REGS_RC(x) ((x)->gpr[3])
++#define PT_REGS_SP(x) ((x)->sp)
++#define PT_REGS_IP(x) ((x)->nip)
++
++#elif defined(bpf_target_sparc)
++
++#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
++#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
++#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2])
++#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3])
++#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4])