Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2019-05-03 14:02:41 +0200
committerTakashi Iwai <tiwai@suse.de>2019-05-03 14:02:41 +0200
commit91c4171237bd0a1dd5bcda147905080eae60d113 (patch)
tree6e546bafe1a60dc1de40364a1ba6f35fd4c1c8ea
parent6e7841b930f100ee638c8e341358f5ae64106681 (diff)
parent8bbd07af4f8833bcb8ed743f4d894e04fc6fbb55 (diff)
Merge branch 'users/msuchanek/SLE15/for-next' into SLE15
Pull powerpc fixes from Michal Suchanek
-rw-r--r--patches.arch/powerpc-64s-Fix-page-table-fragment-refcount-race-vs.patch105
-rw-r--r--patches.arch/powerpc-mm-Fix-page-table-dump-to-work-on-Radix.patch60
-rw-r--r--patches.arch/powerpc-mm-radix-Display-if-mappings-are-exec-or-not.patch95
-rw-r--r--patches.arch/powerpc-mm-radix-Prettify-mapped-memory-range-print-.patch61
-rw-r--r--patches.arch/powerpc-mm-radix-Split-linear-mapping-on-hot-unplug.patch22
-rw-r--r--patches.kabi/Fix-struct-page-kABI-after-adding-atomic-for-ppc.patch30
-rw-r--r--series.conf5
7 files changed, 367 insertions, 11 deletions
diff --git a/patches.arch/powerpc-64s-Fix-page-table-fragment-refcount-race-vs.patch b/patches.arch/powerpc-64s-Fix-page-table-fragment-refcount-race-vs.patch
new file mode 100644
index 0000000000..766501537f
--- /dev/null
+++ b/patches.arch/powerpc-64s-Fix-page-table-fragment-refcount-race-vs.patch
@@ -0,0 +1,105 @@
+From 2f6d444f6b57b11a777897f35004007e3bd4fc6d Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Fri, 27 Jul 2018 21:48:17 +1000
+Subject: [PATCH] powerpc/64s: Fix page table fragment refcount race vs
+ speculative references
+
+References: bsc#1131326, bsc#1108937
+Patch-mainline: v4.19-rc1
+Git-commit: 4231aba000f5a4583dd9f67057aadb68c3eca99d
+
+The page table fragment allocator uses the main page refcount racily
+with respect to speculative references. A customer observed a BUG due
+to page table page refcount underflow in the fragment allocator. This
+can be caused by the fragment allocator set_page_count stomping on a
+speculative reference, and then the speculative failure handler
+decrements the new reference, and the underflow eventually pops when
+the page tables are freed.
+
+Fix this by using a dedicated field in the struct page for the page
+table fragment allocator.
+
+Fixes: 5c1f6ee9a31c ("powerpc: Reduce PTE table memory wastage")
+Cc: stable@vger.kernel.org # v3.10+
+Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Backported-by: Gustavo Walbon <gwalbon@linux.ibm.com>
+[
+struct page uses another offset in the scruct to store the flag count
+]
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ arch/powerpc/mm/mmu_context_book3s64.c | 4 ++--
+ arch/powerpc/mm/pgtable_64.c | 10 +++++++---
+ include/linux/mm_types.h | 1 +
+ 3 files changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
+index 3f980baade4c..0aff84f8e08d 100644
+--- a/arch/powerpc/mm/mmu_context_book3s64.c
++++ b/arch/powerpc/mm/mmu_context_book3s64.c
+@@ -201,9 +201,9 @@ static void destroy_pagetable_page(struct mm_struct *mm)
+ /* drop all the pending references */
+ count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
+ /* We allow PTE_FRAG_NR fragments from a PTE page */
+- if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
++ if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
+ pgtable_page_dtor(page);
+- free_unref_page(page);
++ __free_page(page);
+ }
+ }
+
+diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
+index 8faaa44bea34..ac990323ecfb 100644
+--- a/arch/powerpc/mm/pgtable_64.c
++++ b/arch/powerpc/mm/pgtable_64.c
+@@ -364,6 +364,8 @@ static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
+ return NULL;
+ }
+
++ atomic_set(&page->pt_frag_refcount, 1);
++
+ ret = page_address(page);
+ spin_lock(&mm->page_table_lock);
+ /*
+@@ -372,7 +374,7 @@ static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
+ * count.
+ */
+ if (likely(!mm->context.pte_frag)) {
+- set_page_count(page, PTE_FRAG_NR);
++ atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
+ mm->context.pte_frag = ret + PTE_FRAG_SIZE;
+ }
+ spin_unlock(&mm->page_table_lock);
+@@ -395,10 +397,12 @@ pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel
+ void pte_fragment_free(unsigned long *table, int kernel)
+ {
+ struct page *page = virt_to_page(table);
+- if (put_page_testzero(page)) {
++
++ BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
++ if (atomic_dec_and_test(&page->pt_frag_refcount)) {
+ if (!kernel)
+ pgtable_page_dtor(page);
+- free_unref_page(page);
++ __free_page(page);
+ }
+ }
+
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 0aacdfa6dc2e..2b9589f02601 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -57,6 +57,7 @@ struct page {
+
+ /* Second double word */
+ union {
++ atomic_t pt_frag_refcount; /* powerpc */
+ pgoff_t index; /* Our offset within mapping. */
+ void *freelist; /* sl[aou]b first free object */
+ /* page_deferred_list().prev -- second tail page */
+--
+2.20.1
+
diff --git a/patches.arch/powerpc-mm-Fix-page-table-dump-to-work-on-Radix.patch b/patches.arch/powerpc-mm-Fix-page-table-dump-to-work-on-Radix.patch
new file mode 100644
index 0000000000..f683210903
--- /dev/null
+++ b/patches.arch/powerpc-mm-Fix-page-table-dump-to-work-on-Radix.patch
@@ -0,0 +1,60 @@
+From 0d923962ab69c27cca664a2d535e90ef655110ca Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Wed, 15 Aug 2018 21:29:45 +1000
+Subject: [PATCH] powerpc/mm: Fix page table dump to work on Radix
+
+References: bsc#1055186, fate#323286, git-fixes
+Patch-mainline: v4.20-rc1
+Git-commit: 0d923962ab69c27cca664a2d535e90ef655110ca
+
+When we're running on Book3S with the Radix MMU enabled the page table
+dump currently prints the wrong addresses because it uses the wrong
+start address.
+
+Fix it to use PAGE_OFFSET rather than KERN_VIRT_START.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ arch/powerpc/mm/dump_linuxpagetables.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
+index e60aa6d7456d..2b74f8adf4d0 100644
+--- a/arch/powerpc/mm/dump_linuxpagetables.c
++++ b/arch/powerpc/mm/dump_linuxpagetables.c
+@@ -267,12 +267,13 @@ static void walk_pagetables(struct pg_state *st)
+ unsigned int i;
+ unsigned long addr;
+
++ addr = st->start_address;
++
+ /*
+ * Traverse the linux pagetable structure and dump pages that are in
+ * the hash pagetable.
+ */
+- for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
+- addr = KERN_VIRT_START + i * PGDIR_SIZE;
++ for (i = 0; i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
+ if (!pgd_none(*pgd) && !pgd_huge(*pgd))
+ /* pgd exists */
+ walk_pud(st, pgd, addr);
+@@ -321,9 +322,14 @@ static int ptdump_show(struct seq_file *m, void *v)
+ {
+ struct pg_state st = {
+ .seq = m,
+- .start_address = KERN_VIRT_START,
+ .marker = address_markers,
+ };
++
++ if (radix_enabled())
++ st.start_address = PAGE_OFFSET;
++ else
++ st.start_address = KERN_VIRT_START;
++
+ /* Traverse kernel page tables */
+ walk_pagetables(&st);
+ note_page(&st, 0, 0, 0);
+--
+2.20.1
+
diff --git a/patches.arch/powerpc-mm-radix-Display-if-mappings-are-exec-or-not.patch b/patches.arch/powerpc-mm-radix-Display-if-mappings-are-exec-or-not.patch
new file mode 100644
index 0000000000..c22d4bc88c
--- /dev/null
+++ b/patches.arch/powerpc-mm-radix-Display-if-mappings-are-exec-or-not.patch
@@ -0,0 +1,95 @@
+From afb6d0647fd250a068efd985987b5ff2c0d1b853 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Wed, 17 Oct 2018 23:53:38 +1100
+Subject: [PATCH] powerpc/mm/radix: Display if mappings are exec or not
+
+References: bsc#1055186, fate#323286, git-fixes
+Patch-mainline: v4.20-rc1
+Git-commit: afb6d0647fd250a068efd985987b5ff2c0d1b853
+
+At boot we print the ranges we've mapped for the linear mapping and
+what page size we've used. Also track whether the range is mapped
+executable or not and display that as well.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ arch/powerpc/mm/pgtable-radix.c | 29 +++++++++++++++++------------
+ 1 file changed, 17 insertions(+), 12 deletions(-)
+
+--- a/arch/powerpc/mm/pgtable-radix.c
++++ b/arch/powerpc/mm/pgtable-radix.c
+@@ -180,9 +180,8 @@ void radix__mark_rodata_ro(void)
+ }
+ #endif /* CONFIG_STRICT_KERNEL_RWX */
+
+-static inline void __meminit print_mapping(unsigned long start,
+- unsigned long end,
+- unsigned long size)
++static inline void __meminit
++print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
+ {
+ char buf[10];
+
+@@ -191,13 +190,15 @@ static inline void __meminit print_mappi
+
+ string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
+
+- pr_info("Mapped 0x%016lx-0x%016lx with %s pages\n", start, end, buf);
++ pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
++ exec ? " (exec)" : "");
+ }
+
+ static int __meminit create_physical_mapping(unsigned long start,
+ unsigned long end)
+ {
+ unsigned long vaddr, addr, mapping_size = 0;
++ bool prev_exec, exec = false;
+ pgprot_t prot;
+ unsigned long max_mapping_size;
+ #ifdef CONFIG_STRICT_KERNEL_RWX
+@@ -213,6 +214,7 @@ static int __meminit create_physical_map
+
+ gap = end - addr;
+ previous_size = mapping_size;
++ prev_exec = exec;
+ max_mapping_size = PUD_SIZE;
+
+ retry:
+@@ -238,25 +240,28 @@ retry:
+ (addr + mapping_size) >= __pa_symbol(_stext))
+ mapping_size = PAGE_SIZE;
+
+- if (mapping_size != previous_size) {
+- print_mapping(start, addr, previous_size);
+- start = addr;
+- }
+-
+ vaddr = (unsigned long)__va(addr);
+
+ if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
+- overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size))
++ overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
+ prot = PAGE_KERNEL_X;
+- else
++ exec = true;
++ } else {
+ prot = PAGE_KERNEL;
++ exec = false;
++ }
++
++ if (mapping_size != previous_size || exec != prev_exec) {
++ print_mapping(start, addr, previous_size, prev_exec);
++ start = addr;
++ }
+
+ rc = radix__map_kernel_page(vaddr, addr, prot, mapping_size);
+ if (rc)
+ return rc;
+ }
+
+- print_mapping(start, addr, mapping_size);
++ print_mapping(start, addr, mapping_size, exec);
+ return 0;
+ }
+
diff --git a/patches.arch/powerpc-mm-radix-Prettify-mapped-memory-range-print-.patch b/patches.arch/powerpc-mm-radix-Prettify-mapped-memory-range-print-.patch
new file mode 100644
index 0000000000..38dd184d2b
--- /dev/null
+++ b/patches.arch/powerpc-mm-radix-Prettify-mapped-memory-range-print-.patch
@@ -0,0 +1,61 @@
+From 6deb6b474bda4d2d3fbee066f20561d3dc7f8b30 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Wed, 30 Aug 2017 17:41:17 +1000
+Subject: [PATCH] powerpc/mm/radix: Prettify mapped memory range print out
+
+References: bsc#1055186, fate#323286, git-fixes
+Patch-mainline: v4.14-rc1
+Git-commit: 6deb6b474bda4d2d3fbee066f20561d3dc7f8b30
+
+When we map memory at boot we print out the ranges of real addresses
+that we mapped and the page size that was used.
+
+Currently it's a bit ugly:
+
+ Mapped range 0x0 - 0x2000000000 with 0x40000000
+ Mapped range 0x200000000000 - 0x202000000000 with 0x40000000
+
+Pad the addresses so they line up, and print the page size using
+actual units, eg:
+
+ Mapped 0x0000000000000000-0x0000000001200000 with 64.0 KiB pages
+ Mapped 0x0000000001200000-0x0000000040000000 with 2.00 MiB pages
+ Mapped 0x0000000040000000-0x0000000100000000 with 1.00 GiB pages
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ arch/powerpc/mm/pgtable-radix.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
+index 5156d2627779..39c252b54d16 100644
+--- a/arch/powerpc/mm/pgtable-radix.c
++++ b/arch/powerpc/mm/pgtable-radix.c
+@@ -16,6 +16,7 @@
+ #include <linux/memblock.h>
+ #include <linux/of_fdt.h>
+ #include <linux/mm.h>
++#include <linux/string_helpers.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/pgalloc.h>
+@@ -187,10 +188,14 @@ static inline void __meminit print_mapping(unsigned long start,
+ unsigned long end,
+ unsigned long size)
+ {
++ char buf[10];
++
+ if (end <= start)
+ return;
+
+- pr_info("Mapped range 0x%lx - 0x%lx with 0x%lx\n", start, end, size);
++ string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
++
++ pr_info("Mapped 0x%016lx-0x%016lx with %s pages\n", start, end, buf);
+ }
+
+ static int __meminit create_physical_mapping(unsigned long start,
+--
+2.20.1
+
diff --git a/patches.arch/powerpc-mm-radix-Split-linear-mapping-on-hot-unplug.patch b/patches.arch/powerpc-mm-radix-Split-linear-mapping-on-hot-unplug.patch
index cc58c45112..c484f937f2 100644
--- a/patches.arch/powerpc-mm-radix-Split-linear-mapping-on-hot-unplug.patch
+++ b/patches.arch/powerpc-mm-radix-Split-linear-mapping-on-hot-unplug.patch
@@ -1,4 +1,4 @@
-From 59b742aed38ea51a742bf5ce0220dba1db969819 Mon Sep 17 00:00:00 2001
+From 4dd5f8a99e791a8c6500e3592f3ce81ae7edcde1 Mon Sep 17 00:00:00 2001
From: Balbir Singh <bsingharora@gmail.com>
Date: Wed, 7 Feb 2018 17:35:51 +1100
Subject: [PATCH] powerpc/mm/radix: Split linear mapping on hot-unplug
@@ -28,22 +28,22 @@ Signed-off-by: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Acked-by: Michal Suchanek <msuchanek@suse.de>
---
- arch/powerpc/mm/pgtable-radix.c | 95 ++++++++++++++++++++++++++++++++---------
+ arch/powerpc/mm/pgtable-radix.c | 95 +++++++++++++++++++++++++--------
1 file changed, 74 insertions(+), 21 deletions(-)
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
-index d51d5dbec290..ff30f341e546 100644
+index 96e07d1f673d..328ff9abc333 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
-@@ -12,6 +12,7 @@
- #include <linux/memblock.h>
+@@ -17,6 +17,7 @@
#include <linux/of_fdt.h>
#include <linux/mm.h>
+ #include <linux/string_helpers.h>
+#include <linux/stop_machine.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
-@@ -676,6 +677,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
+@@ -685,6 +686,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
pud_clear(pud);
}
@@ -74,7 +74,7 @@ index d51d5dbec290..ff30f341e546 100644
static void remove_pte_table(pte_t *pte_start, unsigned long addr,
unsigned long end)
{
-@@ -704,6 +729,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
+@@ -713,6 +738,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
}
}
@@ -127,7 +127,7 @@ index d51d5dbec290..ff30f341e546 100644
static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
unsigned long end)
{
-@@ -719,13 +790,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
+@@ -728,13 +799,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
continue;
if (pmd_huge(*pmd)) {
@@ -142,7 +142,7 @@ index d51d5dbec290..ff30f341e546 100644
continue;
}
-@@ -750,13 +815,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
+@@ -759,13 +824,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
continue;
if (pud_huge(*pud)) {
@@ -157,7 +157,7 @@ index d51d5dbec290..ff30f341e546 100644
continue;
}
-@@ -782,13 +841,7 @@ static void remove_pagetable(unsigned long start, unsigned long end)
+@@ -791,13 +850,7 @@ static void remove_pagetable(unsigned long start, unsigned long end)
continue;
if (pgd_huge(*pgd)) {
@@ -173,5 +173,5 @@ index d51d5dbec290..ff30f341e546 100644
}
--
-2.13.6
+2.20.1
diff --git a/patches.kabi/Fix-struct-page-kABI-after-adding-atomic-for-ppc.patch b/patches.kabi/Fix-struct-page-kABI-after-adding-atomic-for-ppc.patch
new file mode 100644
index 0000000000..f44391f360
--- /dev/null
+++ b/patches.kabi/Fix-struct-page-kABI-after-adding-atomic-for-ppc.patch
@@ -0,0 +1,30 @@
+From e6a2e77e022f47a68fa81fe271d8054de30f0f74 Mon Sep 17 00:00:00 2001
+From: Michal Suchanek <msuchanek@suse.de>
+Date: Wed, 3 Apr 2019 14:45:00 +0200
+Subject: [PATCH] Fix struct page kABI after adding atomic for ppc
+
+References: bsc#1131326, bsc#1108937
+Patch-mainline: no, kabi
+
+Signed-off-by: Michal Suchanek <msuchanek@suse.de>
+---
+ include/linux/mm_types.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 2b9589f02601..772fcb7e176f 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -57,7 +57,9 @@ struct page {
+
+ /* Second double word */
+ union {
++#ifndef __GENKSYMS__
+ atomic_t pt_frag_refcount; /* powerpc */
++#endif
+ pgoff_t index; /* Our offset within mapping. */
+ void *freelist; /* sl[aou]b first free object */
+ /* page_deferred_list().prev -- second tail page */
+--
+2.20.1
+
diff --git a/series.conf b/series.conf
index 297acb2892..47ca9974cc 100644
--- a/series.conf
+++ b/series.conf
@@ -6240,6 +6240,7 @@
patches.arch/powerpc-Machine-check-interrupt-is-a-non-maskable-in.patch
patches.arch/powerpc-xmon-Fix-display-of-SPRs.patch
patches.arch/powerpc-kernel-Change-retrieval-of-pci_dn.patch
+ patches.arch/powerpc-mm-radix-Prettify-mapped-memory-range-print-.patch
patches.arch/powerpc-xmon-Add-ISA-v3.0-SPRs-to-SPR-dump.patch
patches.arch/powerpc-conditionally-compile-platform-specific-serial-drivers.patch
patches.arch/cxl-Fix-driver-use-count.patch
@@ -18454,6 +18455,7 @@
patches.arch/powerpc-64s-Add-support-for-software-count-cache-flu.patch
patches.arch/powerpc-pseries-Query-hypervisor-for-count-cache-flu.patch
patches.arch/powerpc-powernv-Query-firmware-for-count-cache-flush.patch
+ patches.arch/powerpc-64s-Fix-page-table-fragment-refcount-race-vs.patch
patches.arch/powerpc-perf-Remove-sched_task-function-defined-for-.patch
patches.fixes/4.4.156-022-powerpc-Fix-size-calculation-using-resource_s.patch
patches.arch/powerpc-fadump-handle-crash-memory-ranges-array-inde.patch
@@ -19631,6 +19633,8 @@
patches.arch/powerpc-cacheinfo-Report-the-correct-shared_cpu_map-.patch
patches.arch/powerpc-rtas-Fix-a-potential-race-between-CPU-Offlin.patch
patches.arch/powerpc-mm-Add-missing-tracepoint-for-tlbie.patch
+ patches.arch/powerpc-mm-radix-Display-if-mappings-are-exec-or-not.patch
+ patches.arch/powerpc-mm-Fix-page-table-dump-to-work-on-Radix.patch
patches.arch/powerpc-traps-restore-recoverability-of-machine_chec.patch
patches.suse/net-udp-fix-handling-of-CHECKSUM_COMPLETE-packets.patch
patches.suse/ipv6-ndisc-Preserve-IPv6-control-buffer-if-protocol-.patch
@@ -22258,6 +22262,7 @@
patches.kabi/KABI-powerpc-export-__find_linux_pte-as-__find_linux.patch
patches.kabi/KABI-hide-new-member-in-struct-iommu_table-from-genk.patch
patches.kabi/KABI-powerpc-Revert-npu-callback-signature-change.patch
+ patches.kabi/Fix-struct-page-kABI-after-adding-atomic-for-ppc.patch
patches.kabi/perf-x86-fix-data-source-decoding-for-skylake-kabi.patch
patches.kabi/kabi-protect-struct-nf_conn.patch