mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-14 21:01:29 +00:00
mm,ksm: swapoff might need to copy
Before establishing that KSM page migration was the cause of my WARN_ON_ONCE(page_mapped(page))s, I suspected that they came from the lack of a ksm_might_need_to_copy() in swapoff's unuse_pte() - which in many respects is equivalent to faulting in a page. In fact I've never caught that as the cause: but in theory it does at least need the KSM_RUN_UNMERGE check in ksm_might_need_to_copy(), to avoid bringing a KSM page back in when it's not supposed to be. I intended to copy how it's done in do_swap_page(), but have a strong aversion to how "swapcache" ends up being used there: rework it with "page != swapcache". Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Petr Holasek <pholasek@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Izik Eidus <izik.eidus@ravellosystems.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
5117b3b835
commit
9e16b7fb1d
@ -874,11 +874,17 @@ unsigned int count_swap_pages(int type, int free)
|
|||||||
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
||||||
unsigned long addr, swp_entry_t entry, struct page *page)
|
unsigned long addr, swp_entry_t entry, struct page *page)
|
||||||
{
|
{
|
||||||
|
struct page *swapcache;
|
||||||
struct mem_cgroup *memcg;
|
struct mem_cgroup *memcg;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
|
|
||||||
|
swapcache = page;
|
||||||
|
page = ksm_might_need_to_copy(page, vma, addr);
|
||||||
|
if (unlikely(!page))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
|
if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
|
||||||
GFP_KERNEL, &memcg)) {
|
GFP_KERNEL, &memcg)) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
@ -897,7 +903,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
|||||||
get_page(page);
|
get_page(page);
|
||||||
set_pte_at(vma->vm_mm, addr, pte,
|
set_pte_at(vma->vm_mm, addr, pte,
|
||||||
pte_mkold(mk_pte(page, vma->vm_page_prot)));
|
pte_mkold(mk_pte(page, vma->vm_page_prot)));
|
||||||
page_add_anon_rmap(page, vma, addr);
|
if (page == swapcache)
|
||||||
|
page_add_anon_rmap(page, vma, addr);
|
||||||
|
else /* ksm created a completely new copy */
|
||||||
|
page_add_new_anon_rmap(page, vma, addr);
|
||||||
mem_cgroup_commit_charge_swapin(page, memcg);
|
mem_cgroup_commit_charge_swapin(page, memcg);
|
||||||
swap_free(entry);
|
swap_free(entry);
|
||||||
/*
|
/*
|
||||||
@ -908,6 +917,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
|||||||
out:
|
out:
|
||||||
pte_unmap_unlock(pte, ptl);
|
pte_unmap_unlock(pte, ptl);
|
||||||
out_nolock:
|
out_nolock:
|
||||||
|
if (page != swapcache) {
|
||||||
|
unlock_page(page);
|
||||||
|
put_page(page);
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user