Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2017-11-29 16:42:27 +0100
committerMichal Hocko <mhocko@suse.com>2017-11-29 16:42:27 +0100
commitdeef723eb671b12a5318ffa47f58c2029c3f0681 (patch)
tree914ae8e7532f57ace906a40538113ea7348ecdf9
parent613f0f05e7ef5549a89d36d3150fd17ef7ea6840 (diff)
Refresh
patches.fixes/mm-thp-do-not-dirty-huge-pte-on-read-fault.patch. Freshly created CoWed pmds really have to be marked dirty because g-u-p will not break out of the loop otherwise. Kudos to Nicolai Stange for noticing this.
-rw-r--r--patches.fixes/mm-thp-do-not-dirty-huge-pte-on-read-fault.patch28
1 files changed, 5 insertions, 23 deletions
diff --git a/patches.fixes/mm-thp-do-not-dirty-huge-pte-on-read-fault.patch b/patches.fixes/mm-thp-do-not-dirty-huge-pte-on-read-fault.patch
index c8245f1d31..d486482680 100644
--- a/patches.fixes/mm-thp-do-not-dirty-huge-pte-on-read-fault.patch
+++ b/patches.fixes/mm-thp-do-not-dirty-huge-pte-on-read-fault.patch
@@ -16,9 +16,9 @@ Signed-off-by: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Michal Hocko <mhocko@suse.com>
---
- mm/huge_memory.c | 25 ++++++++++---------------
+ mm/huge_memory.c | 21 ++++++++-------------
mm/migrate.c | 2 +-
- 2 files changed, 11 insertions(+), 16 deletions(-)
+ 2 files changed, 9 insertions(+), 14 deletions(-)
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -40,25 +40,7 @@ Signed-off-by: Michal Hocko <mhocko@suse.com>
page_add_new_anon_rmap(page, vma, haddr);
mem_cgroup_commit_charge(page, memcg, false);
lru_cache_add_active_or_unevictable(page, vma);
-@@ -1205,7 +1205,7 @@ int do_huge_pmd_wp_page(struct mm_struct
- if (page_mapcount(page) == 1) {
- pmd_t entry;
- entry = pmd_mkyoung(orig_pmd);
-- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-+ entry = maybe_pmd_mkwrite(entry, vma);
- if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
- update_mmu_cache_pmd(vma, address, pmd);
- ret |= VM_FAULT_WRITE;
-@@ -1273,7 +1273,7 @@ alloc:
- } else {
- pmd_t entry;
- entry = mk_huge_pmd(new_page, vma->vm_page_prot);
-- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-+ entry = maybe_pmd_mkwrite(entry, vma);
- pmdp_huge_clear_flush_notify(vma, haddr, pmd);
- page_add_new_anon_rmap(new_page, vma, haddr);
- mem_cgroup_commit_charge(new_page, memcg, false);
-@@ -1335,17 +1335,12 @@ struct page *follow_trans_huge_pmd(struc
+@@ -1334,17 +1334,12 @@ struct page *follow_trans_huge_pmd(struc
VM_BUG_ON_PAGE(!PageHead(page), page);
if (flags & FOLL_TOUCH) {
pmd_t _pmd;
@@ -81,7 +63,7 @@ Signed-off-by: Michal Hocko <mhocko@suse.com>
update_mmu_cache_pmd(vma, addr, pmd);
}
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
-@@ -2686,7 +2681,7 @@ static void collapse_huge_page(struct mm
+@@ -2682,7 +2677,7 @@ static void collapse_huge_page(struct mm
pgtable = pmd_pgtable(_pmd);
_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
@@ -92,7 +74,7 @@ Signed-off-by: Michal Hocko <mhocko@suse.com>
* spin_lock() below is not the equivalent of smp_wmb(), so
--- a/mm/migrate.c
+++ b/mm/migrate.c
-@@ -1814,7 +1814,7 @@ fail_putback:
+@@ -1815,7 +1815,7 @@ fail_putback:
orig_entry = *pmd;
entry = mk_pmd(new_page, vma->vm_page_prot);
entry = pmd_mkhuge(entry);