Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2017-10-05 16:03:59 +0200
committerJiri Kosina <jkosina@suse.cz>2017-10-05 16:03:59 +0200
commita8d3999dc7eca26e0a45360414d726a4049c6b3f (patch)
tree9494f9c087d90a7200a456edd9c268a39bb12670
parentd543c2bdc30ebc002e99f4c63b64eefc438b7d10 (diff)
parentb0b53e213df10581a8a3867cdfa1dfeacfa64507 (diff)
Merge remote-tracking branch 'origin/users/vbabka/SLE15/for-next' into SLE15
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/swap.c4
-rw-r--r--mm/swap_state.c13
3 files changed, 17 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 136c43670911..792f26daa8b7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4028,10 +4028,11 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
{
struct page *page;
unsigned int alloc_flags = ALLOC_WMARK_LOW;
- gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
+ gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
struct alloc_context ac = { };
gfp_mask &= gfp_allowed_mask;
+ alloc_mask = gfp_mask;
if (!prepare_alloc_pages(gfp_mask, order, zonelist, nodemask, &ac, &alloc_mask, &alloc_flags))
return NULL;
diff --git a/mm/swap.c b/mm/swap.c
index 98d08b4579fa..1efa5a132d5c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -575,7 +575,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
- !PageUnevictable(page)) {
+ !PageSwapCache(page) && !PageUnevictable(page)) {
bool active = PageActive(page);
del_page_from_lru_list(page, lruvec,
@@ -664,7 +664,7 @@ void deactivate_file_page(struct page *page)
void mark_page_lazyfree(struct page *page)
{
if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
- !PageUnevictable(page)) {
+ !PageSwapCache(page) && !PageUnevictable(page)) {
struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
get_page(page);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 539b8885e3d1..4297afabb659 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -208,6 +208,19 @@ int add_to_swap(struct page *page, struct list_head *list)
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
if (!err) {
+ /*
+ * Normally the page will be dirtied in unmap because its pte
+ * should be dirty. A special case is MADV_FREE page. The page'e
+ * pte could have dirty bit cleared but the page's SwapBacked
+ * bit is still set because clearing the dirty bit and
+ * SwapBacked bit has no lock protected. For such page, unmap
+ * will not set dirty bit for it, so page reclaim will not write
+ * the page out. This can cause data corruption when the page is
+ * swap in later. Always setting the dirty bit for the page
+ * solves the problem.
+ */
+ set_page_dirty(page);
+
return 1;
} else { /* -ENOMEM radix-tree allocation failure */
/*