Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-06-25 15:30:44 +0100
committerJiri Kosina <jkosina@suse.cz>2013-06-25 20:32:49 +0200
commit3c68328680bc679091d196f4a6b9f2d55048dae2 (patch)
treeedd612cc8a0ce829e63e82781dfe888ce52ea84a
parent7670497f0bfb885ed1641d18a306cf034ff4f122 (diff)
- patches.fixes/mm-compaction-scan-pfn-caching-KABI-fix.patch:rpm-3.0.80-0.7
mm: compaction: Scan PFN caching KABI workaround (Fix KABI breakage (bnc#825657)). suse-commit: 25740f886f2c6106cf7e0089f15b28805eaba5e0
-rw-r--r--include/linux/mmzone.h36
-rw-r--r--mm/compaction.c32
-rw-r--r--mm/internal.h2
-rw-r--r--mm/page_alloc.c19
4 files changed, 61 insertions, 28 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index c70b89f0d827..22e875576b71 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -292,6 +292,19 @@ struct zone_reclaim_stat {
unsigned long recent_scanned[2];
};
+struct compact_cached {
+ /* Set to true when the PG_migrate_skip bits should be cleared */
+ bool blockskip_flush;
+
+ /* pfns where compaction scanners should start */
+ unsigned long free_pfn;
+ unsigned long migrate_pfn;
+};
+
+struct zone_kabi_workaround {
+ struct compact_cached compact_cached;
+};
+
struct zone {
/* Fields commonly accessed by the page allocator */
@@ -375,19 +388,6 @@ struct zone {
*/
unsigned int inactive_ratio;
-#ifdef __GENKSYMS__
- /* This is a hole in struct zone that the compaction fields fits in */
-#else
-#if defined CONFIG_COMPACTION || defined CONFIG_CMA
- /* Set to true when the PG_migrate_skip bits should be cleared */
- bool compact_blockskip_flush;
-
- /* pfns where compaction scanners should start */
- unsigned long compact_cached_free_pfn;
- unsigned long compact_cached_migrate_pfn;
-#endif
-#endif /* __GENKSYMS__ */
-
ZONE_PADDING(_pad2_)
/* Rarely used or read-mostly fields */
@@ -443,7 +443,17 @@ struct zone {
* rarely used fields:
*/
const char *name;
+#ifdef __GENKSYMS__
void *suse_kabi_padding;
+#else
+ /*
+ * kabi padding is now in use for bnc#816451. Take special care if
+ * expanding the structure that the layout of struct zone does not
+ * change.
+ */
+ struct zone_kabi_workaround *kabi_workaround;
+#endif
+
} ____cacheline_internodealigned_in_smp;
typedef enum {
diff --git a/mm/compaction.c b/mm/compaction.c
index e923d224e2cb..0e805c12bd67 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -93,9 +93,9 @@ static void __reset_isolation_suitable(struct zone *zone)
unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
unsigned long pfn;
- zone->compact_cached_migrate_pfn = start_pfn;
- zone->compact_cached_free_pfn = end_pfn;
- zone->compact_blockskip_flush = false;
+ zone->kabi_workaround->compact_cached.migrate_pfn = start_pfn;
+ zone->kabi_workaround->compact_cached.free_pfn = end_pfn;
+ zone->kabi_workaround->compact_cached.blockskip_flush = false;
/* Walk the zone and mark every pageblock as suitable for isolation */
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
@@ -120,11 +120,11 @@ void reset_isolation_suitable(pg_data_t *pgdat)
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
struct zone *zone = &pgdat->node_zones[zoneid];
- if (!populated_zone(zone))
+ if (!populated_zone(zone) || !zone->kabi_workaround)
continue;
/* Only flush if a full compaction finished recently */
- if (zone->compact_blockskip_flush)
+ if (zone->kabi_workaround->compact_cached.blockskip_flush)
__reset_isolation_suitable(zone);
}
}
@@ -148,12 +148,12 @@ static void update_pageblock_skip(struct compact_control *cc,
/* Update where compaction should restart */
if (migrate_scanner) {
if (!cc->finished_update_migrate &&
- pfn > zone->compact_cached_migrate_pfn)
- zone->compact_cached_migrate_pfn = pfn;
+ pfn > zone->kabi_workaround->compact_cached.migrate_pfn)
+ zone->kabi_workaround->compact_cached.migrate_pfn = pfn;
} else {
if (!cc->finished_update_free &&
- pfn < zone->compact_cached_free_pfn)
- zone->compact_cached_free_pfn = pfn;
+ pfn < zone->kabi_workaround->compact_cached.free_pfn)
+ zone->kabi_workaround->compact_cached.free_pfn = pfn;
}
}
}
@@ -686,7 +686,7 @@ static int compact_finished(struct zone *zone,
* based on an allocation request.
*/
if (!current_is_kswapd())
- zone->compact_blockskip_flush = true;
+ zone->kabi_workaround->compact_cached.blockskip_flush = true;
return COMPACT_COMPLETE;
}
@@ -775,6 +775,9 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
unsigned long start_pfn = zone->zone_start_pfn;
unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ if (WARN_ON_ONCE(!zone->kabi_workaround))
+ return COMPACT_SKIPPED;
+
ret = compaction_suitable(zone, cc->order);
switch (ret) {
case COMPACT_PARTIAL:
@@ -791,15 +794,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
* information on where the scanners should start but check that it
* is initialised by ensuring the values are within zone boundaries.
*/
- cc->migrate_pfn = zone->compact_cached_migrate_pfn;
- cc->free_pfn = zone->compact_cached_free_pfn;
+ cc->migrate_pfn = zone->kabi_workaround->compact_cached.migrate_pfn;
+ cc->free_pfn = zone->kabi_workaround->compact_cached.free_pfn;
if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
- zone->compact_cached_free_pfn = cc->free_pfn;
+ zone->kabi_workaround->compact_cached.free_pfn = cc->free_pfn;
}
if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
cc->migrate_pfn = start_pfn;
- zone->compact_cached_migrate_pfn = cc->migrate_pfn;
+ zone->kabi_workaround->compact_cached.migrate_pfn = cc->migrate_pfn;
}
/*
@@ -924,6 +927,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
nodemask) {
int status;
+ zone_init_compact_cached(zone, gfp_mask);
status = compact_zone_order(zone, order, gfp_mask, sync,
contended);
rc = max(status, rc);
diff --git a/mm/internal.h b/mm/internal.h
index 2189af491783..134e621f341b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -309,3 +309,5 @@ extern u64 hwpoison_filter_flags_mask;
extern u64 hwpoison_filter_flags_value;
extern u64 hwpoison_filter_memcg;
extern u32 hwpoison_filter_enable;
+
+void zone_init_compact_cached(struct zone *zone, gfp_t gfp_mask);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 752f3fc3a97d..876b2372eb8a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1927,6 +1927,22 @@ out:
return page;
}
+void __init_refok zone_init_compact_cached(struct zone *zone, gfp_t gfp_mask)
+{
+ pg_data_t *pgdat;
+
+ if (likely(zone->kabi_workaround))
+ return;
+
+ pgdat = zone->zone_pgdat;
+ if (slab_is_available())
+ zone->kabi_workaround = kzalloc_node(sizeof(struct zone_kabi_workaround),
+ gfp_mask, pgdat->node_id);
+ else
+ zone->kabi_workaround = alloc_bootmem_node(pgdat,
+ sizeof(struct zone_kabi_workaround));
+}
+
#ifdef CONFIG_COMPACTION
/* Try memory compaction for high-order allocations before reclaim */
static struct page *
@@ -1963,7 +1979,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
alloc_flags, preferred_zone,
migratetype);
if (page) {
- preferred_zone->compact_blockskip_flush = false;
+ preferred_zone->kabi_workaround->compact_cached.blockskip_flush = false;
preferred_zone->compact_considered = 0;
preferred_zone->compact_defer_shift = 0;
count_vm_event(COMPACTSUCCESS);
@@ -4506,6 +4522,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
set_pageblock_order(pageblock_default_order());
setup_usemap(pgdat, zone, zone_start_pfn, size);
+ zone_init_compact_cached(zone, GFP_KERNEL);
ret = init_currently_empty_zone(zone, zone_start_pfn,
size, MEMMAP_EARLY);
BUG_ON(ret);