Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2017-10-05 15:53:32 +0200
committerVlastimil Babka <vbabka@suse.cz>2017-10-05 15:53:32 +0200
commitb0b53e213df10581a8a3867cdfa1dfeacfa64507 (patch)
treec51697224a0ab347f5b50ff42caae9ac9f4d8c63
parenta5a4b4797cca570556bab911927bf2c3b25733c9 (diff)
parent99cdf5468e4aec73fd56a2f0d38094ff0017f5f1 (diff)
Merge branch 'users/vbabka/SLE15/bsc1061775' into users/vbabka/SLE15/for-next
-rw-r--r--mm/swap.c4
-rw-r--r--mm/swap_state.c13
2 files changed, 15 insertions, 2 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 98d08b4579fa..1efa5a132d5c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -575,7 +575,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
- !PageUnevictable(page)) {
+ !PageSwapCache(page) && !PageUnevictable(page)) {
bool active = PageActive(page);
del_page_from_lru_list(page, lruvec,
@@ -664,7 +664,7 @@ void deactivate_file_page(struct page *page)
void mark_page_lazyfree(struct page *page)
{
if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
- !PageUnevictable(page)) {
+ !PageSwapCache(page) && !PageUnevictable(page)) {
struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
get_page(page);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 539b8885e3d1..4297afabb659 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -208,6 +208,19 @@ int add_to_swap(struct page *page, struct list_head *list)
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
if (!err) {
+ /*
+ * Normally the page will be dirtied in unmap because its pte
+ * should be dirty. A special case is MADV_FREE page. The page'e
+ * pte could have dirty bit cleared but the page's SwapBacked
+ * bit is still set because clearing the dirty bit and
+ * SwapBacked bit has no lock protected. For such page, unmap
+ * will not set dirty bit for it, so page reclaim will not write
+ * the page out. This can cause data corruption when the page is
+ * swap in later. Always setting the dirty bit for the page
+ * solves the problem.
+ */
+ set_page_dirty(page);
+
return 1;
} else { /* -ENOMEM radix-tree allocation failure */
/*