Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMiroslav Benes <mbenes@suse.cz>2018-11-01 14:18:41 +0100
committerMiroslav Benes <mbenes@suse.cz>2018-11-01 14:18:41 +0100
commitda9b4ebd289ef9cc1bce2d81964fade8eb5b9098 (patch)
treedd10346ebb92474dc73edeb57b070ad20920de57
parentec5c23f2f0d1b7670687d685975321c2e60787e6 (diff)
parent0c445bc20c89ad864e1d62311a481efdca8f048d (diff)
Merge branch 'bsc#1103098_15' into SLE15_Update_1
-rw-r--r--bsc1103098/livepatch_bsc1103098.c66
-rw-r--r--bsc1103098/livepatch_bsc1103098.h10
-rw-r--r--bsc1103098/livepatch_bsc1103098_nf_defrag_ipv6.c548
-rw-r--r--bsc1103098/livepatch_bsc1103098_nf_defrag_ipv6.h12
-rw-r--r--bsc1103098/livepatch_bsc1103098_vmlinux.c1120
-rw-r--r--bsc1103098/livepatch_bsc1103098_vmlinux.h20
-rw-r--r--bsc1103098/patched_funcs.csv4
7 files changed, 1780 insertions, 0 deletions
diff --git a/bsc1103098/livepatch_bsc1103098.c b/bsc1103098/livepatch_bsc1103098.c
new file mode 100644
index 0000000..bfa36f6
--- /dev/null
+++ b/bsc1103098/livepatch_bsc1103098.c
@@ -0,0 +1,66 @@
+/*
+ * livepatch_bsc1103098
+ *
+ * Fix for CVE-2018-5391, bsc#1103098
+ *
+ * Upstream commits:
+ * 56e2c94f055d ("inet: frag: enforce memory limits earlier")
+ * 4672694bd4f1 ("ipv4: frags: handle possible skb truesize change")
+ * 0ed4229b08c1 ("ipv6: defrag: drop non-last frags smaller than min mtu")
+ * 7969e5c40dfd ("ip: discard IPv4 datagrams with overlapping segments")
+ * 385114dec8a4 ("net: modify skb_rbtree_purge to return the truesize of all
+ * purged skbs.")
+ * fa0f527358bd ("ip: use rb trees for IP frag queue.")
+ * 70837ffe3085 ("ipv4: frags: precedence bug in ip_expire()")
+ *
+ * SLE12(-SP1) commits:
+ * none yet
+ *
+ * SLE12-SP2 and -SP3 commits:
+ * 8bde2925ce206adce28b6238d3c3ab6b7df60e5b
+ * 5cfee656d3eb7fb372f04e01c893d5184dc3db2c
+ * 5b8256ec0665fca693c418bf51e6e8dfec842fee
+ *
+ * SLE15 commits:
+ * bba51ae6043bea301aecddab13ce060bed5f1606
+ * d31209ff15ea37c73a99fb8a37f7310a071917d3
+ * 232ab4ae2a67c64e987f790bdee0ffb7c3ddabc1
+ * c5cd0491c10810d5e76a1f33139446482dc51538
+ *
+ *
+ * Copyright (c) 2018 SUSE
+ * Author: Nicolai Stange <nstange@suse.de>
+ *
+ * Based on the original Linux kernel code. Other copyrights apply.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "livepatch_bsc1103098.h"
+
+int livepatch_bsc1103098_init(void)
+{
+ int r;
+
+ r = livepatch_bsc1103098_vmlinux_init();
+ if (r)
+ return r;
+
+ return livepatch_bsc1103098_nf_defrag_ipv6_init();
+}
+
+void livepatch_bsc1103098_cleanup(void)
+{
+ livepatch_bsc1103098_nf_defrag_ipv6_cleanup();
+}
diff --git a/bsc1103098/livepatch_bsc1103098.h b/bsc1103098/livepatch_bsc1103098.h
new file mode 100644
index 0000000..c6c9a5f
--- /dev/null
+++ b/bsc1103098/livepatch_bsc1103098.h
@@ -0,0 +1,10 @@
+#ifndef _LIVEPATCH_BSC1103098_H
+#define _LIVEPATCH_BSC1103098_H
+
+#include "livepatch_bsc1103098_vmlinux.h"
+#include "livepatch_bsc1103098_nf_defrag_ipv6.h"
+
+int livepatch_bsc1103098_init(void);
+void livepatch_bsc1103098_cleanup(void);
+
+#endif /* _LIVEPATCH_BSC1103098_H */
diff --git a/bsc1103098/livepatch_bsc1103098_nf_defrag_ipv6.c b/bsc1103098/livepatch_bsc1103098_nf_defrag_ipv6.c
new file mode 100644
index 0000000..ed131e8
--- /dev/null
+++ b/bsc1103098/livepatch_bsc1103098_nf_defrag_ipv6.c
@@ -0,0 +1,548 @@
+/*
+ * livepatch_bsc1103098_nf_defrag_ipv6
+ *
+ * Fix for CVE-2018-5391, bsc#1103098 (nf_defrag_ipv6.ko part)
+ *
+ * Copyright (c) 2018 SUSE
+ * Author: Nicolai Stange <nstange@suse.de>
+ *
+ * Based on the original Linux kernel code. Other copyrights apply.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <net/inet_frag.h>
+#include <net/ipv6.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include "livepatch_bsc1103098_vmlinux.h"
+#include "livepatch_bsc1103098_nf_defrag_ipv6.h"
+#include "kallsyms_relocs.h"
+
+#if !IS_MODULE(CONFIG_NF_DEFRAG_IPV6)
+#error "Live patch supports only CONFIG_NF_DEFRAG_IPV6=m"
+#endif
+
+#define KLP_PATCHED_MODULE "nf_defrag_ipv6"
+
+
+static struct inet_frags *klp_nf_frags;
+
+static unsigned int (*klp_nf_hash_frag)(__be32 id, const struct in6_addr *saddr,
+ const struct in6_addr *daddr);
+
+static struct klp_kallsyms_reloc klp_funcs[] = {
+ { "nf_frags", (void *)&klp_nf_frags, "nf_defrag_ipv6" },
+ { "nf_hash_frag", (void *)&klp_nf_hash_frag, "nf_defrag_ipv6" },
+};
+
+
+/* from net/ipv6/netfilter/nf_conntrack_reasm.c */
+#define klp_ipv6_nf_pr_fmt(fmt) "IPv6-nf: " fmt
+
+#undef pr_fmt
+#define pr_fmt(fmt) klp_ipv6_nf_pr_fmt(fmt)
+
+struct nf_ct_frag6_skb_cb
+{
+ struct inet6_skb_parm h;
+ int offset;
+};
+
+#define KLP_NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb *)((skb)->cb))
+
+/* inlined */
+static inline u8 klp_ip6_frag_ecn(const struct ipv6hdr *ipv6h)
+{
+ return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
+}
+
+/* inlined */
+static inline struct frag_queue *klp_fq_find(struct net *net, __be32 id,
+ u32 user, struct in6_addr *src,
+ struct in6_addr *dst, int iif,
+ u8 ecn)
+{
+ struct inet_frag_queue *q;
+ struct ip6_create_arg arg;
+ unsigned int hash;
+
+ arg.id = id;
+ arg.user = user;
+ arg.src = src;
+ arg.dst = dst;
+ arg.iif = iif;
+ arg.ecn = ecn;
+
+ local_bh_disable();
+ hash = klp_nf_hash_frag(id, src, dst);
+
+ q = klp_inet_frag_find(&net->nf_frag.frags, klp_nf_frags, &arg, hash);
+ local_bh_enable();
+ if (IS_ERR_OR_NULL(q)) {
+ inet_frag_maybe_warn_overflow(q, pr_fmt());
+ return NULL;
+ }
+ return container_of(q, struct frag_queue, q);
+}
+
+/* inlined */
+static int klp_nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
+ const struct frag_hdr *fhdr, int nhoff)
+{
+ struct sk_buff *prev, *next;
+ unsigned int payload_len;
+ int offset, end;
+ u8 ecn;
+
+ if (fq->q.flags & INET_FRAG_COMPLETE) {
+ pr_debug("Already completed\n");
+ goto err;
+ }
+
+ payload_len = ntohs(ipv6_hdr(skb)->payload_len);
+
+ offset = ntohs(fhdr->frag_off) & ~0x7;
+ end = offset + (payload_len -
+ ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
+
+ if ((unsigned int)end > IPV6_MAXPLEN) {
+ pr_debug("offset is too large.\n");
+ return -1;
+ }
+
+ ecn = klp_ip6_frag_ecn(ipv6_hdr(skb));
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ const unsigned char *nh = skb_network_header(skb);
+ skb->csum = csum_sub(skb->csum,
+ csum_partial(nh, (u8 *)(fhdr + 1) - nh,
+ 0));
+ }
+
+ /* Is this the final fragment? */
+ if (!(fhdr->frag_off & htons(IP6_MF))) {
+ /* If we already have some bits beyond end
+ * or have different end, the segment is corrupted.
+ */
+ if (end < fq->q.len ||
+ ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) {
+ pr_debug("already received last fragment\n");
+ goto err;
+ }
+ fq->q.flags |= INET_FRAG_LAST_IN;
+ fq->q.len = end;
+ } else {
+ /* Check if the fragment is rounded to 8 bytes.
+ * Required by the RFC.
+ */
+ if (end & 0x7) {
+ /* RFC2460 says always send parameter problem in
+ * this case. -DaveM
+ */
+ pr_debug("end of fragment not rounded to 8 bytes.\n");
+ return -1;
+ }
+ if (end > fq->q.len) {
+ /* Some bits beyond end -> corruption. */
+ if (fq->q.flags & INET_FRAG_LAST_IN) {
+ pr_debug("last packet already reached.\n");
+ goto err;
+ }
+ fq->q.len = end;
+ }
+ }
+
+ if (end == offset)
+ goto err;
+
+ /* Point into the IP datagram 'data' part. */
+ if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
+ pr_debug("queue: message is too short.\n");
+ goto err;
+ }
+ if (pskb_trim_rcsum(skb, end - offset)) {
+ pr_debug("Can't trim\n");
+ goto err;
+ }
+
+ /* Find out which fragments are in front and at the back of us
+ * in the chain of fragments so far. We must know where to put
+ * this fragment, right?
+ */
+ prev = fq->q.fragments_tail;
+ if (!prev || KLP_NFCT_FRAG6_CB(prev)->offset < offset) {
+ next = NULL;
+ goto found;
+ }
+ prev = NULL;
+ for (next = fq->q.fragments; next != NULL; next = next->next) {
+ if (KLP_NFCT_FRAG6_CB(next)->offset >= offset)
+ break; /* bingo! */
+ prev = next;
+ }
+
+found:
+ /* RFC5722, Section 4:
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments, including those not yet received) MUST be silently
+ * discarded.
+ */
+
+ /* Check for overlap with preceding fragment. */
+ if (prev &&
+ (KLP_NFCT_FRAG6_CB(prev)->offset + prev->len) > offset)
+ goto discard_fq;
+
+ /* Look for overlap with succeeding segment. */
+ if (next && KLP_NFCT_FRAG6_CB(next)->offset < end)
+ goto discard_fq;
+
+ KLP_NFCT_FRAG6_CB(skb)->offset = offset;
+
+ /* Insert this fragment in the chain of fragments. */
+ skb->next = next;
+ if (!next)
+ fq->q.fragments_tail = skb;
+ if (prev)
+ prev->next = skb;
+ else
+ fq->q.fragments = skb;
+
+ if (skb->dev) {
+ fq->iif = skb->dev->ifindex;
+ skb->dev = NULL;
+ }
+ fq->q.stamp = skb->tstamp;
+ fq->q.meat += skb->len;
+ fq->ecn |= ecn;
+ if (payload_len > fq->q.max_size)
+ fq->q.max_size = payload_len;
+ add_frag_mem_limit(fq->q.net, skb->truesize);
+
+ /* The first fragment.
+ * nhoffset is obtained from the first fragment, of course.
+ */
+ if (offset == 0) {
+ fq->nhoffset = nhoff;
+ fq->q.flags |= INET_FRAG_FIRST_IN;
+ }
+
+ return 0;
+
+discard_fq:
+ inet_frag_kill(&fq->q, klp_nf_frags);
+err:
+ return -1;
+}
+
+/* inlined */
+static bool
+klp_nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
+{
+ struct sk_buff *fp, *head = fq->q.fragments;
+ int payload_len;
+ u8 ecn;
+
+ inet_frag_kill(&fq->q, klp_nf_frags);
+
+ WARN_ON(head == NULL);
+ WARN_ON(KLP_NFCT_FRAG6_CB(head)->offset != 0);
+
+ ecn = ip_frag_ecn_table[fq->ecn];
+ if (unlikely(ecn == 0xff))
+ return false;
+
+ /* Unfragmented part is taken from the first segment. */
+ payload_len = ((head->data - skb_network_header(head)) -
+ sizeof(struct ipv6hdr) + fq->q.len -
+ sizeof(struct frag_hdr));
+ if (payload_len > IPV6_MAXPLEN) {
+ net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
+ payload_len);
+ return false;
+ }
+
+ /* Head of list must not be cloned. */
+ if (skb_unclone(head, GFP_ATOMIC))
+ return false;
+
+ /* If the first fragment is fragmented itself, we split
+ * it to two chunks: the first with data and paged part
+ * and the second, holding only fragments. */
+ if (skb_has_frag_list(head)) {
+ struct sk_buff *clone;
+ int i, plen = 0;
+
+ clone = alloc_skb(0, GFP_ATOMIC);
+ if (clone == NULL)
+ return false;
+
+ clone->next = head->next;
+ head->next = clone;
+ skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+ skb_frag_list_init(head);
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
+ clone->len = clone->data_len = head->data_len - plen;
+ head->data_len -= clone->len;
+ head->len -= clone->len;
+ clone->csum = 0;
+ clone->ip_summed = head->ip_summed;
+
+ add_frag_mem_limit(fq->q.net, clone->truesize);
+ }
+
+ /* morph head into last received skb: prev.
+ *
+ * This allows callers of ipv6 conntrack defrag to continue
+ * to use the last skb(frag) passed into the reasm engine.
+ * The last skb frag 'silently' turns into the full reassembled skb.
+ *
+ * Since prev is also part of q->fragments we have to clone it first.
+ */
+ if (head != prev) {
+ struct sk_buff *iter;
+
+ fp = skb_clone(prev, GFP_ATOMIC);
+ if (!fp)
+ return false;
+
+ fp->next = prev->next;
+
+ iter = head;
+ while (iter) {
+ if (iter->next == prev) {
+ iter->next = fp;
+ break;
+ }
+ iter = iter->next;
+ }
+
+ skb_morph(prev, head);
+ prev->next = head->next;
+ consume_skb(head);
+ head = prev;
+ }
+
+ /* We have to remove fragment header from datagram and to relocate
+ * header in order to calculate ICV correctly. */
+ skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
+ memmove(head->head + sizeof(struct frag_hdr), head->head,
+ (head->data - head->head) - sizeof(struct frag_hdr));
+ head->mac_header += sizeof(struct frag_hdr);
+ head->network_header += sizeof(struct frag_hdr);
+
+ skb_shinfo(head)->frag_list = head->next;
+ skb_reset_transport_header(head);
+ skb_push(head, head->data - skb_network_header(head));
+
+ for (fp = head->next; fp; fp = fp->next) {
+ head->data_len += fp->len;
+ head->len += fp->len;
+ if (head->ip_summed != fp->ip_summed)
+ head->ip_summed = CHECKSUM_NONE;
+ else if (head->ip_summed == CHECKSUM_COMPLETE)
+ head->csum = csum_add(head->csum, fp->csum);
+ head->truesize += fp->truesize;
+ }
+ sub_frag_mem_limit(fq->q.net, head->truesize);
+
+ head->ignore_df = 1;
+ head->next = NULL;
+ head->dev = dev;
+ head->tstamp = fq->q.stamp;
+ ipv6_hdr(head)->payload_len = htons(payload_len);
+ ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
+ IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
+
+ /* Yes, and fold redundant checksum back. 8) */
+ if (head->ip_summed == CHECKSUM_COMPLETE)
+ head->csum = csum_partial(skb_network_header(head),
+ skb_network_header_len(head),
+ head->csum);
+
+ fq->q.fragments = NULL;
+ fq->q.fragments_tail = NULL;
+
+ return true;
+}
+
+/* inlined */
+static int
+klp_find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
+{
+ u8 nexthdr = ipv6_hdr(skb)->nexthdr;
+ const int netoff = skb_network_offset(skb);
+ u8 prev_nhoff = netoff + offsetof(struct ipv6hdr, nexthdr);
+ int start = netoff + sizeof(struct ipv6hdr);
+ int len = skb->len - start;
+ u8 prevhdr = NEXTHDR_IPV6;
+
+ while (nexthdr != NEXTHDR_FRAGMENT) {
+ struct ipv6_opt_hdr hdr;
+ int hdrlen;
+
+ if (!ipv6_ext_hdr(nexthdr)) {
+ return -1;
+ }
+ if (nexthdr == NEXTHDR_NONE) {
+ pr_debug("next header is none\n");
+ return -1;
+ }
+ if (len < (int)sizeof(struct ipv6_opt_hdr)) {
+ pr_debug("too short\n");
+ return -1;
+ }
+ if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
+ BUG();
+ if (nexthdr == NEXTHDR_AUTH)
+ hdrlen = (hdr.hdrlen+2)<<2;
+ else
+ hdrlen = ipv6_optlen(&hdr);
+
+ prevhdr = nexthdr;
+ prev_nhoff = start;
+
+ nexthdr = hdr.nexthdr;
+ len -= hdrlen;
+ start += hdrlen;
+ }
+
+ if (len < 0)
+ return -1;
+
+ *prevhdrp = prevhdr;
+ *prevhoff = prev_nhoff;
+ *fhoff = start;
+
+ return 0;
+}
+
+
+
+/* patched */
+int klp_nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
+{
+ struct net_device *dev = skb->dev;
+ int fhoff, nhoff, ret;
+ struct frag_hdr *fhdr;
+ struct frag_queue *fq;
+ struct ipv6hdr *hdr;
+ u8 prevhdr;
+
+ /* Jumbo payload inhibits frag. header */
+ if (ipv6_hdr(skb)->payload_len == 0) {
+ pr_debug("payload len = 0\n");
+ return 0;
+ }
+
+ if (klp_find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
+ return 0;
+
+ if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
+ return -ENOMEM;
+
+ skb_set_transport_header(skb, fhoff);
+ hdr = ipv6_hdr(skb);
+ fhdr = (struct frag_hdr *)skb_transport_header(skb);
+
+ /*
+ * Fix CVE-2018-5391
+ * +4 lines
+ */
+ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
+ fhdr->frag_off & htons(IP6_MF))
+ return -EINVAL;
+
+ skb_orphan(skb);
+ fq = klp_fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
+ skb->dev ? skb->dev->ifindex : 0, klp_ip6_frag_ecn(hdr));
+ if (fq == NULL) {
+ pr_debug("Can't find and can't create new queue\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_bh(&fq->q.lock);
+
+ if (klp_nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* after queue has assumed skb ownership, only 0 or -EINPROGRESS
+ * must be returned.
+ */
+ ret = -EINPROGRESS;
+ if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+ fq->q.meat == fq->q.len &&
+ klp_nf_ct_frag6_reasm(fq, skb, dev))
+ ret = 0;
+
+out_unlock:
+ spin_unlock_bh(&fq->q.lock);
+ inet_frag_put(&fq->q, klp_nf_frags);
+ return ret;
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt) fmt
+
+
+static int livepatch_bsc1103098_nf_defrag_ipv6_module_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct module *mod = data;
+ int ret;
+
+ if (action != MODULE_STATE_COMING || strcmp(mod->name, KLP_PATCHED_MODULE))
+ return 0;
+
+ ret = __klp_resolve_kallsyms_relocs(klp_funcs, ARRAY_SIZE(klp_funcs));
+ WARN(ret, "livepatch: delayed kallsyms lookup failed. System is broken and can crash.\n");
+
+ return ret;
+}
+
+static struct notifier_block livepatch_bsc1103098_nf_defrag_ipv6_module_nb = {
+ .notifier_call = livepatch_bsc1103098_nf_defrag_ipv6_module_notify,
+ .priority = INT_MIN+1,
+};
+
+int livepatch_bsc1103098_nf_defrag_ipv6_init(void)
+{
+ int ret;
+
+ mutex_lock(&module_mutex);
+ if (find_module(KLP_PATCHED_MODULE)) {
+ ret = __klp_resolve_kallsyms_relocs(klp_funcs,
+ ARRAY_SIZE(klp_funcs));
+ if (ret)
+ goto out;
+ }
+
+ ret = register_module_notifier(&livepatch_bsc1103098_nf_defrag_ipv6_module_nb);
+out:
+ mutex_unlock(&module_mutex);
+ return ret;
+}
+
+void livepatch_bsc1103098_nf_defrag_ipv6_cleanup(void)
+{
+ unregister_module_notifier(&livepatch_bsc1103098_nf_defrag_ipv6_module_nb);
+}
diff --git a/bsc1103098/livepatch_bsc1103098_nf_defrag_ipv6.h b/bsc1103098/livepatch_bsc1103098_nf_defrag_ipv6.h
new file mode 100644
index 0000000..4112ab0
--- /dev/null
+++ b/bsc1103098/livepatch_bsc1103098_nf_defrag_ipv6.h
@@ -0,0 +1,12 @@
+#ifndef _LIVEPATCH_BSC1103098_NF_DEFRAG_IPV6_H
+#define _LIVEPATCH_BSC1103098_NF_DEFRAG_IPV6_H
+
+#include <linux/types.h>
+
+int livepatch_bsc1103098_nf_defrag_ipv6_init(void);
+void livepatch_bsc1103098_nf_defrag_ipv6_cleanup(void);
+
+int klp_nf_ct_frag6_gather(struct net *net, struct sk_buff *skb,
+ u32 user);
+
+#endif /* _LIVEPATCH_BSC1103098_NF_DEFRAG_IPV6_H */
diff --git a/bsc1103098/livepatch_bsc1103098_vmlinux.c b/bsc1103098/livepatch_bsc1103098_vmlinux.c
new file mode 100644
index 0000000..5b28621
--- /dev/null
+++ b/bsc1103098/livepatch_bsc1103098_vmlinux.c
@@ -0,0 +1,1120 @@
+/*
+ * kgraft_patch_bsc1103098_vmlinux
+ *
+ * Fix for CVE-2018-5391, bsc#1103098 (vmlinux part)
+ *
+ * Copyright (c) 2018 SUSE
+ * Author: Nicolai Stange <nstange@suse.de>
+ *
+ * Based on the original Linux kernel code. Other copyrights apply.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/printk.h>
+#include <linux/workqueue.h>
+#include <net/addrconf.h>
+#include <net/inet_frag.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/ip6_fib.h>
+#include <net/inet_ecn.h>
+#include "livepatch_bsc1103098_vmlinux.h"
+#include "kallsyms_relocs.h"
+
+static struct inet_frags *klp_ip4_frags;
+static struct inet_frags *klp_ip6_frags;
+
+static struct inet_frag_bucket *
+(*klp_get_frag_bucket_locked)(struct inet_frag_queue *fq, struct inet_frags *f);
+static unsigned int (*klp_ipqhashfn)(__be16 id, __be32 saddr, __be32 daddr,
+ u8 prot);
+static unsigned int (*klp_inet6_hash_frag)(__be32 id,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr);
+static void (*klp_icmpv6_param_prob)(struct sk_buff *skb, u8 code, int pos);
+
+static struct klp_kallsyms_reloc klp_funcs[] = {
+ { "ip4_frags", (void *)&klp_ip4_frags },
+ { "ip6_frags", (void *)&klp_ip6_frags },
+ { "get_frag_bucket_locked", (void *)&klp_get_frag_bucket_locked },
+ { "ipqhashfn", (void *)&klp_ipqhashfn },
+ { "inet6_hash_frag", (void *)&klp_inet6_hash_frag },
+ { "icmpv6_param_prob", (void *)&klp_icmpv6_param_prob },
+};
+
+
+/* from net/ipv4/inet_fragment.c */
+#define KLP_INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
+
+/* inlined */
+static bool klp_inet_frag_may_rebuild(struct inet_frags *f)
+{
+ return time_after(jiffies,
+ f->last_rebuild_jiffies + KLP_INETFRAGS_MIN_REBUILD_INTERVAL);
+}
+
+/* optimized */
+static void klp_inet_frag_schedule_worker(struct inet_frags *f)
+{
+ if (unlikely(!work_pending(&f->frags_work)))
+ schedule_work(&f->frags_work);
+}
+
+/* inlined */
+static struct inet_frag_queue *
+klp_inet_frag_intern(struct netns_frags *nf,
+ struct inet_frag_queue *qp_in,
+ struct inet_frags *f,
+ void *arg)
+{
+ struct inet_frag_bucket *hb = klp_get_frag_bucket_locked(qp_in, f);
+ struct inet_frag_queue *qp;
+
+#ifdef CONFIG_SMP
+ /* With SMP race we have to recheck hash table, because
+ * such entry could have been created on other cpu before
+ * we acquired hash bucket lock.
+ */
+ hlist_for_each_entry(qp, &hb->chain, list) {
+ if (qp->net == nf && f->match(qp, arg)) {
+ atomic_inc(&qp->refcnt);
+ spin_unlock(&hb->chain_lock);
+ qp_in->flags |= INET_FRAG_COMPLETE;
+ inet_frag_put(qp_in, f);
+ return qp;
+ }
+ }
+#endif
+ qp = qp_in;
+ if (!mod_timer(&qp->timer, jiffies + nf->timeout))
+ atomic_inc(&qp->refcnt);
+
+ atomic_inc(&qp->refcnt);
+ hlist_add_head(&qp->list, &hb->chain);
+
+ spin_unlock(&hb->chain_lock);
+
+ return qp;
+}
+
+
+/* from net/ipv4/ip_fragment.c */
+#define klp_ipv4_pr_fmt(fmt) "IPv4: " fmt
+
+#undef pr_fmt
+#define pr_fmt(fmt) klp_ipv4_pr_fmt(fmt)
+
+struct ipfrag_skb_cb
+{
+ struct inet_skb_parm h;
+ int offset;
+};
+
+#define KLP_FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
+
+struct ipq {
+ struct inet_frag_queue q;
+
+ u32 user;
+ __be32 saddr;
+ __be32 daddr;
+ __be16 id;
+ u8 protocol;
+ u8 ecn; /* RFC3168 support */
+ u16 max_df_size; /* largest frag with DF set seen */
+ int iif;
+ int vif; /* L3 master device index */
+ unsigned int rid;
+ struct inet_peer *peer;
+};
+
+/* inlined */
+static u8 klp_ip4_frag_ecn(u8 tos)
+{
+ return 1 << (tos & INET_ECN_MASK);
+}
+
+struct ip4_create_arg {
+ struct iphdr *iph;
+ u32 user;
+ int vif;
+};
+
+/* inlined */
+static void klp_ipq_put(struct ipq *ipq)
+{
+ inet_frag_put(&ipq->q, klp_ip4_frags);
+}
+
+/* inlined */
+static void klp_ipq_kill(struct ipq *ipq)
+{
+ inet_frag_kill(&ipq->q, klp_ip4_frags);
+}
+
+/* inlined */
+static struct ipq *klp_ip_find(struct net *net, struct iphdr *iph,
+ u32 user, int vif)
+{
+ struct inet_frag_queue *q;
+ struct ip4_create_arg arg;
+ unsigned int hash;
+
+ arg.iph = iph;
+ arg.user = user;
+ arg.vif = vif;
+
+ hash = klp_ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
+
+ q = klp_inet_frag_find(&net->ipv4.frags, klp_ip4_frags, &arg, hash);
+ if (IS_ERR_OR_NULL(q)) {
+ inet_frag_maybe_warn_overflow(q, klp_ipv4_pr_fmt());
+ return NULL;
+ }
+ return container_of(q, struct ipq, q);
+}
+
+/* inlined */
+static int klp_ip_frag_too_far(struct ipq *qp)
+{
+ struct inet_peer *peer = qp->peer;
+ unsigned int max = qp->q.net->max_dist;
+ unsigned int start, end;
+
+ int rc;
+
+ if (!peer || !max)
+ return 0;
+
+ start = qp->rid;
+ end = atomic_inc_return(&peer->rid);
+ qp->rid = end;
+
+ rc = qp->q.fragments && (end - start) > max;
+
+ if (rc) {
+ struct net *net;
+
+ net = container_of(qp->q.net, struct net, ipv4.frags);
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
+ }
+
+ return rc;
+}
+
+/* inlined */
+static int klp_ip_frag_reinit(struct ipq *qp)
+{
+ struct sk_buff *fp;
+ unsigned int sum_truesize = 0;
+
+ if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
+ atomic_inc(&qp->q.refcnt);
+ return -ETIMEDOUT;
+ }
+
+ fp = qp->q.fragments;
+ do {
+ struct sk_buff *xp = fp->next;
+
+ sum_truesize += fp->truesize;
+ kfree_skb(fp);
+ fp = xp;
+ } while (fp);
+ sub_frag_mem_limit(qp->q.net, sum_truesize);
+
+ qp->q.flags = 0;
+ qp->q.len = 0;
+ qp->q.meat = 0;
+ qp->q.fragments = NULL;
+ qp->q.fragments_tail = NULL;
+ qp->iif = 0;
+ qp->ecn = 0;
+
+ return 0;
+}
+
+/* inlined */
+static int klp_ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
+ struct net_device *dev)
+{
+ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+ struct iphdr *iph;
+ struct sk_buff *fp, *head = qp->q.fragments;
+ int len;
+ int ihlen;
+ int err;
+ u8 ecn;
+
+ klp_ipq_kill(qp);
+
+ ecn = ip_frag_ecn_table[qp->ecn];
+ if (unlikely(ecn == 0xff)) {
+ err = -EINVAL;
+ goto out_fail;
+ }
+ /* Make the one we just received the head. */
+ if (prev) {
+ head = prev->next;
+ fp = skb_clone(head, GFP_ATOMIC);
+ if (!fp)
+ goto out_nomem;
+
+ fp->next = head->next;
+ if (!fp->next)
+ qp->q.fragments_tail = fp;
+ prev->next = fp;
+
+ skb_morph(head, qp->q.fragments);
+ head->next = qp->q.fragments->next;
+
+ consume_skb(qp->q.fragments);
+ qp->q.fragments = head;
+ }
+
+ WARN_ON(!head);
+ WARN_ON(KLP_FRAG_CB(head)->offset != 0);
+
+ /* Allocate a new buffer for the datagram. */
+ ihlen = ip_hdrlen(head);
+ len = ihlen + qp->q.len;
+
+ err = -E2BIG;
+ if (len > 65535)
+ goto out_oversize;
+
+ /* Head of list must not be cloned. */
+ if (skb_unclone(head, GFP_ATOMIC))
+ goto out_nomem;
+
+ /* If the first fragment is fragmented itself, we split
+ * it to two chunks: the first with data and paged part
+ * and the second, holding only fragments. */
+ if (skb_has_frag_list(head)) {
+ struct sk_buff *clone;
+ int i, plen = 0;
+
+ clone = alloc_skb(0, GFP_ATOMIC);
+ if (!clone)
+ goto out_nomem;
+ clone->next = head->next;
+ head->next = clone;
+ skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+ skb_frag_list_init(head);
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
+ clone->len = clone->data_len = head->data_len - plen;
+ head->data_len -= clone->len;
+ head->len -= clone->len;
+ clone->csum = 0;
+ clone->ip_summed = head->ip_summed;
+ add_frag_mem_limit(qp->q.net, clone->truesize);
+ }
+
+ skb_shinfo(head)->frag_list = head->next;
+ skb_push(head, head->data - skb_network_header(head));
+
+ for (fp=head->next; fp; fp = fp->next) {
+ head->data_len += fp->len;
+ head->len += fp->len;
+ if (head->ip_summed != fp->ip_summed)
+ head->ip_summed = CHECKSUM_NONE;
+ else if (head->ip_summed == CHECKSUM_COMPLETE)
+ head->csum = csum_add(head->csum, fp->csum);
+ head->truesize += fp->truesize;
+ }
+ sub_frag_mem_limit(qp->q.net, head->truesize);
+
+ head->next = NULL;
+ head->dev = dev;
+ head->tstamp = qp->q.stamp;
+ IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
+
+ iph = ip_hdr(head);
+ iph->tot_len = htons(len);
+ iph->tos |= ecn;
+
+ /* When we set IP_DF on a refragmented skb we must also force a
+ * call to ip_fragment to avoid forwarding a DF-skb of size s while
+ * original sender only sent fragments of size f (where f < s).
+ *
+ * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
+ * frag seen to avoid sending tiny DF-fragments in case skb was built
+ * from one very small df-fragment and one large non-df frag.
+ */
+ if (qp->max_df_size == qp->q.max_size) {
+ IPCB(head)->flags |= IPSKB_FRAG_PMTU;
+ iph->frag_off = htons(IP_DF);
+ } else {
+ iph->frag_off = 0;
+ }
+
+ ip_send_check(iph);
+
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
+ qp->q.fragments = NULL;
+ qp->q.fragments_tail = NULL;
+ return 0;
+
+out_nomem:
+ net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
+ err = -ENOMEM;
+ goto out_fail;
+out_oversize:
+ net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
+out_fail:
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
+ return err;
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt) fmt
+
+
+/* from net/ipv6/reassembly.c */
+#define klp_ipv6_pr_fmt(fmt) "IPv6: " fmt
+
+#undef pr_fmt
+#define pr_fmt(fmt) klp_ipv6_pr_fmt(fmt)
+
+struct ip6frag_skb_cb {
+ struct inet6_skb_parm h;
+ int offset;
+};
+
+#define KLP_FRAG6_CB(skb) ((struct ip6frag_skb_cb *)((skb)->cb))
+
+/* inlined */
+static u8 klp_ip6_frag_ecn(const struct ipv6hdr *ipv6h)
+{
+ return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
+}
+
+/* inlined */
+static struct frag_queue *
+klp_fq_find(struct net *net, __be32 id, const struct in6_addr *src,
+ const struct in6_addr *dst, int iif, u8 ecn)
+{
+ struct inet_frag_queue *q;
+ struct ip6_create_arg arg;
+ unsigned int hash;
+
+ arg.id = id;
+ arg.user = IP6_DEFRAG_LOCAL_DELIVER;
+ arg.src = src;
+ arg.dst = dst;
+ arg.iif = iif;
+ arg.ecn = ecn;
+
+ hash = klp_inet6_hash_frag(id, src, dst);
+
+ q = klp_inet_frag_find(&net->ipv6.frags, klp_ip6_frags, &arg, hash);
+ if (IS_ERR_OR_NULL(q)) {
+ inet_frag_maybe_warn_overflow(q, klp_ipv6_pr_fmt());
+ return NULL;
+ }
+ return container_of(q, struct frag_queue, q);
+}
+
+/* inlined */
+static int klp_ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
+ struct net_device *dev)
+{
+ struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
+ struct sk_buff *fp, *head = fq->q.fragments;
+ int payload_len;
+ unsigned int nhoff;
+ int sum_truesize;
+ u8 ecn;
+
+ inet_frag_kill(&fq->q, klp_ip6_frags);
+
+ ecn = ip_frag_ecn_table[fq->ecn];
+ if (unlikely(ecn == 0xff))
+ goto out_fail;
+
+ /* Make the one we just received the head. */
+ if (prev) {
+ head = prev->next;
+ fp = skb_clone(head, GFP_ATOMIC);
+
+ if (!fp)
+ goto out_oom;
+
+ fp->next = head->next;
+ if (!fp->next)
+ fq->q.fragments_tail = fp;
+ prev->next = fp;
+
+ skb_morph(head, fq->q.fragments);
+ head->next = fq->q.fragments->next;
+
+ consume_skb(fq->q.fragments);
+ fq->q.fragments = head;
+ }
+
+ WARN_ON(head == NULL);
+ WARN_ON(KLP_FRAG6_CB(head)->offset != 0);
+
+ /* Unfragmented part is taken from the first segment. */
+ payload_len = ((head->data - skb_network_header(head)) -
+ sizeof(struct ipv6hdr) + fq->q.len -
+ sizeof(struct frag_hdr));
+ if (payload_len > IPV6_MAXPLEN)
+ goto out_oversize;
+
+ /* Head of list must not be cloned. */
+ if (skb_unclone(head, GFP_ATOMIC))
+ goto out_oom;
+
+ /* If the first fragment is fragmented itself, we split
+ * it to two chunks: the first with data and paged part
+ * and the second, holding only fragments. */
+ if (skb_has_frag_list(head)) {
+ struct sk_buff *clone;
+ int i, plen = 0;
+
+ clone = alloc_skb(0, GFP_ATOMIC);
+ if (!clone)
+ goto out_oom;
+ clone->next = head->next;
+ head->next = clone;
+ skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+ skb_frag_list_init(head);
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
+ clone->len = clone->data_len = head->data_len - plen;
+ head->data_len -= clone->len;
+ head->len -= clone->len;
+ clone->csum = 0;
+ clone->ip_summed = head->ip_summed;
+ add_frag_mem_limit(fq->q.net, clone->truesize);
+ }
+
+ /* We have to remove fragment header from datagram and to relocate
+ * header in order to calculate ICV correctly. */
+ nhoff = fq->nhoffset;
+ skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
+ memmove(head->head + sizeof(struct frag_hdr), head->head,
+ (head->data - head->head) - sizeof(struct frag_hdr));
+ if (skb_mac_header_was_set(head))
+ head->mac_header += sizeof(struct frag_hdr);
+ head->network_header += sizeof(struct frag_hdr);
+
+ skb_reset_transport_header(head);
+ skb_push(head, head->data - skb_network_header(head));
+
+ sum_truesize = head->truesize;
+ for (fp = head->next; fp;) {
+ bool headstolen;
+ int delta;
+ struct sk_buff *next = fp->next;
+
+ sum_truesize += fp->truesize;
+ if (head->ip_summed != fp->ip_summed)
+ head->ip_summed = CHECKSUM_NONE;
+ else if (head->ip_summed == CHECKSUM_COMPLETE)
+ head->csum = csum_add(head->csum, fp->csum);
+
+ if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
+ kfree_skb_partial(fp, headstolen);
+ } else {
+ if (!skb_shinfo(head)->frag_list)
+ skb_shinfo(head)->frag_list = fp;
+ head->data_len += fp->len;
+ head->len += fp->len;
+ head->truesize += fp->truesize;
+ }
+ fp = next;
+ }
+ sub_frag_mem_limit(fq->q.net, sum_truesize);
+
+ head->next = NULL;
+ head->dev = dev;
+ head->tstamp = fq->q.stamp;
+ ipv6_hdr(head)->payload_len = htons(payload_len);
+ ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
+ IP6CB(head)->nhoff = nhoff;
+ IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
+ IP6CB(head)->frag_max_size = fq->q.max_size;
+
+ /* Yes, and fold redundant checksum back. 8) */
+ skb_postpush_rcsum(head, skb_network_header(head),
+ skb_network_header_len(head));
+
+ rcu_read_lock();
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
+ rcu_read_unlock();
+ fq->q.fragments = NULL;
+ fq->q.fragments_tail = NULL;
+ return 1;
+
+out_oversize:
+ net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len);
+ goto out_fail;
+out_oom:
+ net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
+out_fail:
+ rcu_read_lock();
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+ rcu_read_unlock();
+ return -1;
+}
+
+/* inlined */
+static int klp_ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
+ struct frag_hdr *fhdr, int nhoff)
+{
+ struct sk_buff *prev, *next;
+ struct net_device *dev;
+ int offset, end, fragsize;
+ struct net *net = dev_net(skb_dst(skb)->dev);
+ u8 ecn;
+
+ if (fq->q.flags & INET_FRAG_COMPLETE)
+ goto err;
+
+ offset = ntohs(fhdr->frag_off) & ~0x7;
+ end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
+ ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
+
+ if ((unsigned int)end > IPV6_MAXPLEN) {
+ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_INHDRERRORS);
+ klp_icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
+ ((u8 *)&fhdr->frag_off -
+ skb_network_header(skb)));
+ return -1;
+ }
+
+ ecn = klp_ip6_frag_ecn(ipv6_hdr(skb));
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ const unsigned char *nh = skb_network_header(skb);
+ skb->csum = csum_sub(skb->csum,
+ csum_partial(nh, (u8 *)(fhdr + 1) - nh,
+ 0));
+ }
+
+ /* Is this the final fragment? */
+ if (!(fhdr->frag_off & htons(IP6_MF))) {
+ /* If we already have some bits beyond end
+ * or have different end, the segment is corrupted.
+ */
+ if (end < fq->q.len ||
+ ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
+ goto err;
+ fq->q.flags |= INET_FRAG_LAST_IN;
+ fq->q.len = end;
+ } else {
+ /* Check if the fragment is rounded to 8 bytes.
+ * Required by the RFC.
+ */
+ if (end & 0x7) {
+ /* RFC2460 says always send parameter problem in
+ * this case. -DaveM
+ */
+ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_INHDRERRORS);
+ klp_icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
+ offsetof(struct ipv6hdr, payload_len));
+ return -1;
+ }
+ if (end > fq->q.len) {
+ /* Some bits beyond end -> corruption. */
+ if (fq->q.flags & INET_FRAG_LAST_IN)
+ goto err;
+ fq->q.len = end;
+ }
+ }
+
+ if (end == offset)
+ goto err;
+
+ /* Point into the IP datagram 'data' part. */
+ if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
+ goto err;
+
+ if (pskb_trim_rcsum(skb, end - offset))
+ goto err;
+
+ /* Find out which fragments are in front and at the back of us
+ * in the chain of fragments so far. We must know where to put
+ * this fragment, right?
+ */
+ prev = fq->q.fragments_tail;
+ if (!prev || KLP_FRAG6_CB(prev)->offset < offset) {
+ next = NULL;
+ goto found;
+ }
+ prev = NULL;
+ for (next = fq->q.fragments; next != NULL; next = next->next) {
+ if (KLP_FRAG6_CB(next)->offset >= offset)
+ break; /* bingo! */
+ prev = next;
+ }
+
+found:
+ /* RFC5722, Section 4, amended by Errata ID : 3089
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments) MUST be silently discarded.
+ */
+
+ /* Check for overlap with preceding fragment. */
+ if (prev &&
+ (KLP_FRAG6_CB(prev)->offset + prev->len) > offset)
+ goto discard_fq;
+
+ /* Look for overlap with succeeding segment. */
+ if (next && KLP_FRAG6_CB(next)->offset < end)
+ goto discard_fq;
+
+ KLP_FRAG6_CB(skb)->offset = offset;
+
+ /* Insert this fragment in the chain of fragments. */
+ skb->next = next;
+ if (!next)
+ fq->q.fragments_tail = skb;
+ if (prev)
+ prev->next = skb;
+ else
+ fq->q.fragments = skb;
+
+ dev = skb->dev;
+ if (dev) {
+ fq->iif = dev->ifindex;
+ skb->dev = NULL;
+ }
+ fq->q.stamp = skb->tstamp;
+ fq->q.meat += skb->len;
+ fq->ecn |= ecn;
+ add_frag_mem_limit(fq->q.net, skb->truesize);
+
+ fragsize = -skb_network_offset(skb) + skb->len;
+ if (fragsize > fq->q.max_size)
+ fq->q.max_size = fragsize;
+
+ /* The first fragment.
+ * nhoffset is obtained from the first fragment, of course.
+ */
+ if (offset == 0) {
+ fq->nhoffset = nhoff;
+ fq->q.flags |= INET_FRAG_FIRST_IN;
+ }
+
+ if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+ fq->q.meat == fq->q.len) {
+ int res;
+ unsigned long orefdst = skb->_skb_refdst;
+
+ skb->_skb_refdst = 0UL;
+ res = klp_ip6_frag_reasm(fq, prev, dev);
+ skb->_skb_refdst = orefdst;
+ return res;
+ }
+
+ skb_dst_drop(skb);
+ return -1;
+
+discard_fq:
+ inet_frag_kill(&fq->q, klp_ip6_frags);
+err:
+ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_REASMFAILS);
+ kfree_skb(skb);
+ return -1;
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt) fmt
+
+
+
+#undef pr_fmt
+#define pr_fmt(fmt) klp_ipv4_pr_fmt(fmt)
+
+/* patched, inlined */
+static struct inet_frag_queue *klp_inet_frag_alloc(struct netns_frags *nf,
+ struct inet_frags *f,
+ void *arg)
+{
+ struct inet_frag_queue *q;
+
+ /*
+ * Fix CVE-2018-5391
+ * -5 lines
+ */
+ q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
+ if (!q)
+ return NULL;
+
+ q->net = nf;
+ f->constructor(q, arg);
+ add_frag_mem_limit(nf, f->qsize);
+
+ setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
+ spin_lock_init(&q->lock);
+ atomic_set(&q->refcnt, 1);
+
+ return q;
+}
+
+/* patched, inlined, calls inlined inet_frag_alloc() */
+static struct inet_frag_queue *klp_inet_frag_create(struct netns_frags *nf,
+ struct inet_frags *f,
+ void *arg)
+{
+ struct inet_frag_queue *q;
+
+ q = klp_inet_frag_alloc(nf, f, arg);
+ if (!q)
+ return NULL;
+
+ return klp_inet_frag_intern(nf, q, f, arg);
+}
+
+/* patched by itself, also calls inlined inet_frag_create() */
+struct inet_frag_queue *klp_inet_frag_find(struct netns_frags *nf,
+ struct inet_frags *f, void *key,
+ unsigned int hash)
+{
+ struct inet_frag_bucket *hb;
+ struct inet_frag_queue *q;
+ int depth = 0;
+
+ /*
+ * Fix CVE-2018-5391
+ * +4 lines
+ */
+ if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
+ klp_inet_frag_schedule_worker(f);
+ return NULL;
+ }
+ if (frag_mem_limit(nf) > nf->low_thresh)
+ klp_inet_frag_schedule_worker(f);
+
+ hash &= (INETFRAGS_HASHSZ - 1);
+ hb = &f->hash[hash];
+
+ spin_lock(&hb->chain_lock);
+ hlist_for_each_entry(q, &hb->chain, list) {
+ if (q->net == nf && f->match(q, key)) {
+ atomic_inc(&q->refcnt);
+ spin_unlock(&hb->chain_lock);
+ return q;
+ }
+ depth++;
+ }
+ spin_unlock(&hb->chain_lock);
+
+ if (depth <= INETFRAGS_MAXDEPTH)
+ return klp_inet_frag_create(nf, f, key);
+
+ if (klp_inet_frag_may_rebuild(f)) {
+ if (!f->rebuild)
+ f->rebuild = true;
+ klp_inet_frag_schedule_worker(f);
+ }
+
+ return ERR_PTR(-ENOBUFS);
+}
+
+
+/* patched, inlined */
+static int klp_ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+{
+ /*
+ * Fix CVE-2018-5391
+ * +1 line
+ */
+ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+ struct sk_buff *prev, *next;
+ struct net_device *dev;
+ unsigned int fragsize;
+ int flags, offset;
+ int ihl, end;
+ int err = -ENOENT;
+ u8 ecn;
+
+ if (qp->q.flags & INET_FRAG_COMPLETE)
+ goto err;
+
+ if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
+ unlikely(klp_ip_frag_too_far(qp)) &&
+ unlikely(err = klp_ip_frag_reinit(qp))) {
+ klp_ipq_kill(qp);
+ goto err;
+ }
+
+ ecn = klp_ip4_frag_ecn(ip_hdr(skb)->tos);
+ offset = ntohs(ip_hdr(skb)->frag_off);
+ flags = offset & ~IP_OFFSET;
+ offset &= IP_OFFSET;
+ offset <<= 3; /* offset is in 8-byte chunks */
+ ihl = ip_hdrlen(skb);
+
+ /* Determine the position of this fragment. */
+ end = offset + skb->len - skb_network_offset(skb) - ihl;
+ err = -EINVAL;
+
+ /* Is this the final fragment? */
+ if ((flags & IP_MF) == 0) {
+ /* If we already have some bits beyond end
+ * or have different end, the segment is corrupted.
+ */
+ if (end < qp->q.len ||
+ ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
+ goto err;
+ qp->q.flags |= INET_FRAG_LAST_IN;
+ qp->q.len = end;
+ } else {
+ if (end&7) {
+ end &= ~7;
+ if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+ if (end > qp->q.len) {
+ /* Some bits beyond end -> corruption. */
+ if (qp->q.flags & INET_FRAG_LAST_IN)
+ goto err;
+ qp->q.len = end;
+ }
+ }
+ if (end == offset)
+ goto err;
+
+ err = -ENOMEM;
+ if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
+ goto err;
+
+ err = pskb_trim_rcsum(skb, end - offset);
+ if (err)
+ goto err;
+
+ /* Find out which fragments are in front and at the back of us
+ * in the chain of fragments so far. We must know where to put
+ * this fragment, right?
+ */
+ prev = qp->q.fragments_tail;
+ if (!prev || KLP_FRAG_CB(prev)->offset < offset) {
+ next = NULL;
+ goto found;
+ }
+ prev = NULL;
+ for (next = qp->q.fragments; next != NULL; next = next->next) {
+ if (KLP_FRAG_CB(next)->offset >= offset)
+ break; /* bingo! */
+ prev = next;
+ }
+
+found:
+ /*
+ * Fix CVE-2018-5391
+ * -54 lines, +17 lines
+ */
+ /* RFC5722, Section 4, amended by Errata ID : 3089
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments) MUST be silently discarded.
+ *
+ * We do the same here for IPv4.
+ */
+
+ /* Is there an overlap with the previous fragment? */
+ if (prev &&
+ (KLP_FRAG_CB(prev)->offset + prev->len) > offset)
+ goto discard_qp;
+
+ /* Is there an overlap with the next fragment? */
+ if (next && KLP_FRAG_CB(next)->offset < end)
+ goto discard_qp;
+
+ KLP_FRAG_CB(skb)->offset = offset;
+
+ /* Insert this fragment in the chain of fragments. */
+ skb->next = next;
+ if (!next)
+ qp->q.fragments_tail = skb;
+ if (prev)
+ prev->next = skb;
+ else
+ qp->q.fragments = skb;
+
+ dev = skb->dev;
+ if (dev) {
+ qp->iif = dev->ifindex;
+ skb->dev = NULL;
+ }
+ qp->q.stamp = skb->tstamp;
+ qp->q.meat += skb->len;
+ qp->ecn |= ecn;
+ add_frag_mem_limit(qp->q.net, skb->truesize);
+ if (offset == 0)
+ qp->q.flags |= INET_FRAG_FIRST_IN;
+
+ fragsize = skb->len + ihl;
+
+ if (fragsize > qp->q.max_size)
+ qp->q.max_size = fragsize;
+
+ if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
+ fragsize > qp->max_df_size)
+ qp->max_df_size = fragsize;
+
+ if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+ qp->q.meat == qp->q.len) {
+ unsigned long orefdst = skb->_skb_refdst;
+
+ skb->_skb_refdst = 0UL;
+ err = klp_ip_frag_reasm(qp, prev, dev);
+ skb->_skb_refdst = orefdst;
+ return err;
+ }
+
+ skb_dst_drop(skb);
+ return -EINPROGRESS;
+
+ /*
+ * Fix CVE-2018-5391
+ * +4 lines
+ */
+discard_qp:
+ inet_frag_kill(&qp->q, klp_ip4_frags);
+ err = -EINVAL;
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
+err:
+ kfree_skb(skb);
+ return err;
+}
+
+/* patched, calls inlined ip_frag_queue() */
+int klp_ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
+{
+ struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
+ int vif = l3mdev_master_ifindex_rcu(dev);
+ struct ipq *qp;
+
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
+ skb_orphan(skb);
+
+ /* Lookup (or create) queue header */
+ qp = klp_ip_find(net, ip_hdr(skb), user, vif);
+ if (qp) {
+ int ret;
+
+ spin_lock(&qp->q.lock);
+
+ ret = klp_ip_frag_queue(qp, skb);
+
+ spin_unlock(&qp->q.lock);
+ klp_ipq_put(qp);
+ return ret;
+ }
+
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
+ kfree_skb(skb);
+ return -ENOMEM;
+}
+
+
+#undef pr_fmt
+#define pr_fmt(fmt) klp_ipv6_pr_fmt(fmt)
+
+/* patched */
+int klp_ipv6_frag_rcv(struct sk_buff *skb)
+{
+ struct frag_hdr *fhdr;
+ struct frag_queue *fq;
+ const struct ipv6hdr *hdr = ipv6_hdr(skb);
+ struct net *net = dev_net(skb_dst(skb)->dev);
+
+ if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
+ goto fail_hdr;
+
+ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
+
+ /* Jumbo payload inhibits frag. header */
+ if (hdr->payload_len == 0)
+ goto fail_hdr;
+
+ if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
+ sizeof(struct frag_hdr))))
+ goto fail_hdr;
+
+ hdr = ipv6_hdr(skb);
+ fhdr = (struct frag_hdr *)skb_transport_header(skb);
+
+ if (!(fhdr->frag_off & htons(0xFFF9))) {
+ /* It is not a fragmented frame */
+ skb->transport_header += sizeof(struct frag_hdr);
+ __IP6_INC_STATS(net,
+ ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
+
+ IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
+ IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
+ return 1;
+ }
+
+ /*
+ * Fix CVE-2018-5391
+ * +4 lines
+ */
+ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
+ fhdr->frag_off & htons(IP6_MF))
+ goto fail_hdr;
+
+ fq = klp_fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
+ skb->dev ? skb->dev->ifindex : 0, klp_ip6_frag_ecn(hdr));
+ if (fq) {
+ int ret;
+
+ spin_lock(&fq->q.lock);
+
+ ret = klp_ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
+
+ spin_unlock(&fq->q.lock);
+ inet_frag_put(&fq->q, klp_ip6_frags);
+ return ret;
+ }
+
+ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
+ kfree_skb(skb);
+ return -1;
+
+fail_hdr:
+ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_INHDRERRORS);
+ klp_icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
+ return -1;
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt) fmt
+
+
+
+int livepatch_bsc1103098_vmlinux_init(void)
+{
+ return __klp_resolve_kallsyms_relocs(klp_funcs, ARRAY_SIZE(klp_funcs));
+}
diff --git a/bsc1103098/livepatch_bsc1103098_vmlinux.h b/bsc1103098/livepatch_bsc1103098_vmlinux.h
new file mode 100644
index 0000000..890090b
--- /dev/null
+++ b/bsc1103098/livepatch_bsc1103098_vmlinux.h
@@ -0,0 +1,20 @@
+#ifndef _LIVEPATCH_BSC1103098_VMLINUX_H
+#define _LIVEPATCH_BSC1103098_VMLINUX_H
+
+#include <linux/types.h>
+
+int livepatch_bsc1103098_vmlinux_init(void);
+
+
+struct netns_frags;
+struct inet_frags;
+struct net;
+struct sk_buff;
+
+struct inet_frag_queue *klp_inet_frag_find(struct netns_frags *nf,
+ struct inet_frags *f, void *key,
+ unsigned int hash);
+int klp_ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
+int klp_ipv6_frag_rcv(struct sk_buff *skb);
+
+#endif /* _LIVEPATCH_BSC1103098_VMLINUX_H */
diff --git a/bsc1103098/patched_funcs.csv b/bsc1103098/patched_funcs.csv
new file mode 100644
index 0000000..514665f
--- /dev/null
+++ b/bsc1103098/patched_funcs.csv
@@ -0,0 +1,4 @@
+vmlinux inet_frag_find klp_inet_frag_find
+vmlinux ip_defrag klp_ip_defrag
+vmlinux ipv6_frag_rcv klp_ipv6_frag_rcv
+nf_defrag_ipv6 nf_ct_frag6_gather klp_nf_ct_frag6_gather