Home Home > GIT Browse > openSUSE-15.0
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-06-25 15:30:44 +0100
committerJiri Kosina <jkosina@suse.cz>2013-06-25 20:32:49 +0200
commit25740f886f2c6106cf7e0089f15b28805eaba5e0 (patch)
tree64a3d2c6e3337e08419a52b1b2bfd147af751a30
parentf5a0e1f2fc85b4add7542fb720cfb3465bc912e6 (diff)
- patches.fixes/mm-compaction-scan-pfn-caching-KABI-fix.patch:rpm-3.0.80-0.7
mm: compaction: Scan PFN caching KABI workaround (Fix KABI breakage (bnc#825657)).
-rw-r--r--kernel-source.changes7
-rw-r--r--patches.fixes/mm-compaction-scan-pfn-caching-KABI-fix.patch237
-rw-r--r--series.conf1
3 files changed, 245 insertions, 0 deletions
diff --git a/kernel-source.changes b/kernel-source.changes
index d0bc8010d1..67403a0e5a 100644
--- a/kernel-source.changes
+++ b/kernel-source.changes
@@ -1,4 +1,11 @@
-------------------------------------------------------------------
+Tue Jun 25 16:30:30 CEST 2013 - mgorman@suse.de
+
+- patches.fixes/mm-compaction-scan-pfn-caching-KABI-fix.patch:
+ mm: compaction: Scan PFN caching KABI workaround
+ (Fix KABI breakage (bnc#825657)).
+
+-------------------------------------------------------------------
Fri May 31 22:04:26 CEST 2013 - jeffm@suse.com
- patches.fixes/reiserfs-fix-spurious-multiple-fill-in-reiserfs_readdir_dentry:
diff --git a/patches.fixes/mm-compaction-scan-pfn-caching-KABI-fix.patch b/patches.fixes/mm-compaction-scan-pfn-caching-KABI-fix.patch
new file mode 100644
index 0000000000..abce1b5e6d
--- /dev/null
+++ b/patches.fixes/mm-compaction-scan-pfn-caching-KABI-fix.patch
@@ -0,0 +1,237 @@
+From: Mel Gorman <mgorman@suse.de>
+Date: Tue, 25 Jun 2013 14:03:55 +0100
+Subject: [PATCH] mm: compaction: Scan PFN caching KABI workaround
+
+References: Fix KABI breakage (bnc#825657)
+Patch-mainline: No, unnecessary
+
+The patch "mm: compaction: Scan PFN caching KABI workaround"
+(patches.fixes/mm-compaction-scan-pfn-caching-KABI.patch) made a fatal
+mistake. When merged it was only checked that the hole was a suitable size
+on 32 and 64 bit configurations but neglected to take into account that
+spinlock_t and seqlock_t be any size and the number of zones are not fixed
+which alters the size of the available holes. This patch moves the new
+fields out of the zone struct and uses the existing kabi padding to find
+it. The structure is initialised at the same time as the zone and exists
+for the lifetime of the system which is safe as nodes cannot be hot-removed
+in SLES 11 SP2.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ include/linux/mmzone.h | 36 +++++++++++++++++++++++-------------
+ mm/compaction.c | 32 ++++++++++++++++++--------------
+ mm/internal.h | 2 ++
+ mm/page_alloc.c | 19 ++++++++++++++++++-
+ 4 files changed, 61 insertions(+), 28 deletions(-)
+
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index c70b89f..22e8755 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -292,6 +292,19 @@ struct zone_reclaim_stat {
+ unsigned long recent_scanned[2];
+ };
+
++struct compact_cached {
++ /* Set to true when the PG_migrate_skip bits should be cleared */
++ bool blockskip_flush;
++
++ /* pfns where compaction scanners should start */
++ unsigned long free_pfn;
++ unsigned long migrate_pfn;
++};
++
++struct zone_kabi_workaround {
++ struct compact_cached compact_cached;
++};
++
+ struct zone {
+ /* Fields commonly accessed by the page allocator */
+
+@@ -375,19 +388,6 @@ struct zone {
+ */
+ unsigned int inactive_ratio;
+
+-#ifdef __GENKSYMS__
+- /* This is a hole in struct zone that the compaction fields fits in */
+-#else
+-#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+- /* Set to true when the PG_migrate_skip bits should be cleared */
+- bool compact_blockskip_flush;
+-
+- /* pfns where compaction scanners should start */
+- unsigned long compact_cached_free_pfn;
+- unsigned long compact_cached_migrate_pfn;
+-#endif
+-#endif /* __GENKSYMS__ */
+-
+ ZONE_PADDING(_pad2_)
+ /* Rarely used or read-mostly fields */
+
+@@ -443,7 +443,17 @@ struct zone {
+ * rarely used fields:
+ */
+ const char *name;
++#ifdef __GENKSYMS__
+ void *suse_kabi_padding;
++#else
++ /*
++ * kabi padding is now in use for bnc#816451. Take special care if
++ * expanding the structure that the layout of struct zone does not
++ * change.
++ */
++ struct zone_kabi_workaround *kabi_workaround;
++#endif
++
+ } ____cacheline_internodealigned_in_smp;
+
+ typedef enum {
+diff --git a/mm/compaction.c b/mm/compaction.c
+index e923d22..0e805c1 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -93,9 +93,9 @@ static void __reset_isolation_suitable(struct zone *zone)
+ unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ unsigned long pfn;
+
+- zone->compact_cached_migrate_pfn = start_pfn;
+- zone->compact_cached_free_pfn = end_pfn;
+- zone->compact_blockskip_flush = false;
++ zone->kabi_workaround->compact_cached.migrate_pfn = start_pfn;
++ zone->kabi_workaround->compact_cached.free_pfn = end_pfn;
++ zone->kabi_workaround->compact_cached.blockskip_flush = false;
+
+ /* Walk the zone and mark every pageblock as suitable for isolation */
+ for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
+@@ -120,11 +120,11 @@ void reset_isolation_suitable(pg_data_t *pgdat)
+
+ for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
+ struct zone *zone = &pgdat->node_zones[zoneid];
+- if (!populated_zone(zone))
++ if (!populated_zone(zone) || !zone->kabi_workaround)
+ continue;
+
+ /* Only flush if a full compaction finished recently */
+- if (zone->compact_blockskip_flush)
++ if (zone->kabi_workaround->compact_cached.blockskip_flush)
+ __reset_isolation_suitable(zone);
+ }
+ }
+@@ -148,12 +148,12 @@ static void update_pageblock_skip(struct compact_control *cc,
+ /* Update where compaction should restart */
+ if (migrate_scanner) {
+ if (!cc->finished_update_migrate &&
+- pfn > zone->compact_cached_migrate_pfn)
+- zone->compact_cached_migrate_pfn = pfn;
++ pfn > zone->kabi_workaround->compact_cached.migrate_pfn)
++ zone->kabi_workaround->compact_cached.migrate_pfn = pfn;
+ } else {
+ if (!cc->finished_update_free &&
+- pfn < zone->compact_cached_free_pfn)
+- zone->compact_cached_free_pfn = pfn;
++ pfn < zone->kabi_workaround->compact_cached.free_pfn)
++ zone->kabi_workaround->compact_cached.free_pfn = pfn;
+ }
+ }
+ }
+@@ -686,7 +686,7 @@ static int compact_finished(struct zone *zone,
+ * based on an allocation request.
+ */
+ if (!current_is_kswapd())
+- zone->compact_blockskip_flush = true;
++ zone->kabi_workaround->compact_cached.blockskip_flush = true;
+
+ return COMPACT_COMPLETE;
+ }
+@@ -775,6 +775,9 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
+ unsigned long start_pfn = zone->zone_start_pfn;
+ unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+
++ if (WARN_ON_ONCE(!zone->kabi_workaround))
++ return COMPACT_SKIPPED;
++
+ ret = compaction_suitable(zone, cc->order);
+ switch (ret) {
+ case COMPACT_PARTIAL:
+@@ -791,15 +794,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
+ * information on where the scanners should start but check that it
+ * is initialised by ensuring the values are within zone boundaries.
+ */
+- cc->migrate_pfn = zone->compact_cached_migrate_pfn;
+- cc->free_pfn = zone->compact_cached_free_pfn;
++ cc->migrate_pfn = zone->kabi_workaround->compact_cached.migrate_pfn;
++ cc->free_pfn = zone->kabi_workaround->compact_cached.free_pfn;
+ if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
+ cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
+- zone->compact_cached_free_pfn = cc->free_pfn;
++ zone->kabi_workaround->compact_cached.free_pfn = cc->free_pfn;
+ }
+ if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
+ cc->migrate_pfn = start_pfn;
+- zone->compact_cached_migrate_pfn = cc->migrate_pfn;
++ zone->kabi_workaround->compact_cached.migrate_pfn = cc->migrate_pfn;
+ }
+
+ /*
+@@ -924,6 +927,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
+ nodemask) {
+ int status;
+
++ zone_init_compact_cached(zone, gfp_mask);
+ status = compact_zone_order(zone, order, gfp_mask, sync,
+ contended);
+ rc = max(status, rc);
+diff --git a/mm/internal.h b/mm/internal.h
+index 2189af4..134e621 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -309,3 +309,5 @@ extern u64 hwpoison_filter_flags_mask;
+ extern u64 hwpoison_filter_flags_value;
+ extern u64 hwpoison_filter_memcg;
+ extern u32 hwpoison_filter_enable;
++
++void zone_init_compact_cached(struct zone *zone, gfp_t gfp_mask);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 752f3fc..876b237 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1927,6 +1927,22 @@ out:
+ return page;
+ }
+
++void __init_refok zone_init_compact_cached(struct zone *zone, gfp_t gfp_mask)
++{
++ pg_data_t *pgdat;
++
++ if (likely(zone->kabi_workaround))
++ return;
++
++ pgdat = zone->zone_pgdat;
++ if (slab_is_available())
++ zone->kabi_workaround = kzalloc_node(sizeof(struct zone_kabi_workaround),
++ gfp_mask, pgdat->node_id);
++ else
++ zone->kabi_workaround = alloc_bootmem_node(pgdat,
++ sizeof(struct zone_kabi_workaround));
++}
++
+ #ifdef CONFIG_COMPACTION
+ /* Try memory compaction for high-order allocations before reclaim */
+ static struct page *
+@@ -1963,7 +1979,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ alloc_flags, preferred_zone,
+ migratetype);
+ if (page) {
+- preferred_zone->compact_blockskip_flush = false;
++ preferred_zone->kabi_workaround->compact_cached.blockskip_flush = false;
+ preferred_zone->compact_considered = 0;
+ preferred_zone->compact_defer_shift = 0;
+ count_vm_event(COMPACTSUCCESS);
+@@ -4506,6 +4522,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
+
+ set_pageblock_order(pageblock_default_order());
+ setup_usemap(pgdat, zone, zone_start_pfn, size);
++ zone_init_compact_cached(zone, GFP_KERNEL);
+ ret = init_currently_empty_zone(zone, zone_start_pfn,
+ size, MEMMAP_EARLY);
+ BUG_ON(ret);
diff --git a/series.conf b/series.conf
index f9c0c4ede7..73270eba3d 100644
--- a/series.conf
+++ b/series.conf
@@ -822,6 +822,7 @@
patches.fixes/mm-compaction-Restart-compaction-from-near-where-it-left-off.patch
patches.fixes/mm-compaction-clear-PG_migrate_skip-based-on-compaction-and-reclaim-activity.patch
patches.fixes/mm-compaction-scan-pfn-caching-KABI.patch
+ patches.fixes/mm-compaction-scan-pfn-caching-KABI-fix.patch
patches.fixes/mm-page-allocat-remove-thp-guard.patch
# CVE-2012-5517, bnc#789235