mirror of
https://github.com/joel16/android_kernel_sony_msm8994_rework.git
synced 2024-11-23 11:59:58 +00:00
Revert "mm: Enhance per process reclaim to consider shared pages"
This reverts commit bc7c6b9da3
.
This commit is contained in:
parent
90f4514c86
commit
02f67278e4
@ -1243,7 +1243,7 @@ cont:
|
||||
break;
|
||||
}
|
||||
pte_unmap_unlock(pte - 1, ptl);
|
||||
reclaim_pages_from_list(&page_list, vma);
|
||||
reclaim_pages_from_list(&page_list);
|
||||
if (addr != end)
|
||||
goto cont;
|
||||
|
||||
|
@ -75,8 +75,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
|
||||
|
||||
int page_referenced_ksm(struct page *page,
|
||||
struct mem_cgroup *memcg, unsigned long *vm_flags);
|
||||
int try_to_unmap_ksm(struct page *page,
|
||||
enum ttu_flags flags, struct vm_area_struct *vma);
|
||||
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
|
||||
int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
|
||||
struct vm_area_struct *, unsigned long, void *), void *arg);
|
||||
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
|
||||
@ -116,8 +115,7 @@ static inline int page_referenced_ksm(struct page *page,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int try_to_unmap_ksm(struct page *page,
|
||||
enum ttu_flags flags, struct vm_area_struct *target_vma)
|
||||
static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -12,8 +12,7 @@
|
||||
|
||||
extern int isolate_lru_page(struct page *page);
|
||||
extern void putback_lru_page(struct page *page);
|
||||
extern unsigned long reclaim_pages_from_list(struct list_head *page_list,
|
||||
struct vm_area_struct *vma);
|
||||
extern unsigned long reclaim_pages_from_list(struct list_head *page_list);
|
||||
|
||||
/*
|
||||
* The anon_vma heads a list of private "related" vmas, to scan if
|
||||
@ -193,8 +192,7 @@ int page_referenced_one(struct page *, struct vm_area_struct *,
|
||||
|
||||
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
|
||||
|
||||
int try_to_unmap(struct page *, enum ttu_flags flags,
|
||||
struct vm_area_struct *vma);
|
||||
int try_to_unmap(struct page *, enum ttu_flags flags);
|
||||
int try_to_unmap_one(struct page *, struct vm_area_struct *,
|
||||
unsigned long address, enum ttu_flags flags);
|
||||
|
||||
@ -261,7 +259,7 @@ static inline int page_referenced(struct page *page, int is_locked,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define try_to_unmap(page, refs, vma) SWAP_FAIL
|
||||
#define try_to_unmap(page, refs) SWAP_FAIL
|
||||
|
||||
static inline int page_mkclean(struct page *page)
|
||||
{
|
||||
|
@ -284,8 +284,10 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
|
||||
|
||||
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
extern unsigned long vma_address(struct page *page,
|
||||
struct vm_area_struct *vma);
|
||||
#endif
|
||||
#else /* !CONFIG_MMU */
|
||||
static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
|
||||
{
|
||||
|
9
mm/ksm.c
9
mm/ksm.c
@ -2004,8 +2004,7 @@ out:
|
||||
return referenced;
|
||||
}
|
||||
|
||||
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags,
|
||||
struct vm_area_struct *target_vma)
|
||||
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
|
||||
{
|
||||
struct stable_node *stable_node;
|
||||
struct rmap_item *rmap_item;
|
||||
@ -2018,12 +2017,6 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags,
|
||||
stable_node = page_stable_node(page);
|
||||
if (!stable_node)
|
||||
return SWAP_FAIL;
|
||||
|
||||
if (target_vma) {
|
||||
unsigned long address = vma_address(page, target_vma);
|
||||
ret = try_to_unmap_one(page, target_vma, address, flags);
|
||||
goto out;
|
||||
}
|
||||
again:
|
||||
hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
|
||||
struct anon_vma *anon_vma = rmap_item->anon_vma;
|
||||
|
@ -970,7 +970,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||
if (kill)
|
||||
collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED);
|
||||
|
||||
ret = try_to_unmap(ppage, ttu, NULL);
|
||||
ret = try_to_unmap(ppage, ttu);
|
||||
if (ret != SWAP_SUCCESS)
|
||||
printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
|
||||
pfn, page_mapcount(ppage));
|
||||
|
@ -848,8 +848,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
||||
}
|
||||
|
||||
/* Establish migration ptes or remove ptes */
|
||||
try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS,
|
||||
NULL);
|
||||
try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
|
||||
|
||||
skip_unmap:
|
||||
if (!page_mapped(page))
|
||||
@ -985,8 +984,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
||||
if (PageAnon(hpage))
|
||||
anon_vma = page_get_anon_vma(hpage);
|
||||
|
||||
try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS,
|
||||
NULL);
|
||||
try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
|
||||
|
||||
if (!page_mapped(hpage))
|
||||
rc = move_to_new_page(new_hpage, hpage, 1, mode);
|
||||
|
57
mm/rmap.c
57
mm/rmap.c
@ -1451,16 +1451,13 @@ bool is_vma_temporary_stack(struct vm_area_struct *vma)
|
||||
|
||||
/**
|
||||
* try_to_unmap_anon - unmap or unlock anonymous page using the object-based
|
||||
* rmap method if @vma is NULL
|
||||
* rmap method
|
||||
* @page: the page to unmap/unlock
|
||||
* @flags: action and flags
|
||||
* @target_vma: vma for unmapping a @page
|
||||
*
|
||||
* Find all the mappings of a page using the mapping pointer and the vma chains
|
||||
* contained in the anon_vma struct it points to.
|
||||
*
|
||||
* If @target_vma isn't NULL, this function unmap a page from the vma
|
||||
*
|
||||
* This function is only called from try_to_unmap/try_to_munlock for
|
||||
* anonymous pages.
|
||||
* When called from try_to_munlock(), the mmap_sem of the mm containing the vma
|
||||
@ -1468,19 +1465,12 @@ bool is_vma_temporary_stack(struct vm_area_struct *vma)
|
||||
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
|
||||
* 'LOCKED.
|
||||
*/
|
||||
static int try_to_unmap_anon(struct page *page, enum ttu_flags flags,
|
||||
struct vm_area_struct *target_vma)
|
||||
static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
|
||||
{
|
||||
int ret = SWAP_AGAIN;
|
||||
unsigned long address;
|
||||
struct anon_vma *anon_vma;
|
||||
pgoff_t pgoff;
|
||||
struct anon_vma_chain *avc;
|
||||
|
||||
if (target_vma) {
|
||||
address = vma_address(page, target_vma);
|
||||
return try_to_unmap_one(page, target_vma, address, flags);
|
||||
}
|
||||
int ret = SWAP_AGAIN;
|
||||
|
||||
anon_vma = page_lock_anon_vma_read(page);
|
||||
if (!anon_vma)
|
||||
@ -1489,6 +1479,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags,
|
||||
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
||||
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
|
||||
struct vm_area_struct *vma = avc->vma;
|
||||
unsigned long address;
|
||||
|
||||
/*
|
||||
* During exec, a temporary VMA is setup and later moved.
|
||||
@ -1516,7 +1507,6 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags,
|
||||
* try_to_unmap_file - unmap/unlock file page using the object-based rmap method
|
||||
* @page: the page to unmap/unlock
|
||||
* @flags: action and flags
|
||||
* @target_vma: vma for unmapping @page
|
||||
*
|
||||
* Find all the mappings of a page using the mapping pointer and the vma chains
|
||||
* contained in the address_space struct it points to.
|
||||
@ -1528,8 +1518,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags,
|
||||
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
|
||||
* 'LOCKED.
|
||||
*/
|
||||
static int try_to_unmap_file(struct page *page, enum ttu_flags flags,
|
||||
struct vm_area_struct *target_vma)
|
||||
static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
||||
@ -1539,26 +1528,16 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags,
|
||||
unsigned long max_nl_cursor = 0;
|
||||
unsigned long max_nl_size = 0;
|
||||
unsigned int mapcount;
|
||||
unsigned long address;
|
||||
|
||||
if (PageHuge(page))
|
||||
pgoff = page->index << compound_order(page);
|
||||
|
||||
mutex_lock(&mapping->i_mmap_mutex);
|
||||
if (target_vma) {
|
||||
/* We don't handle non-linear vma on ramfs */
|
||||
if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
|
||||
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
||||
unsigned long address = vma_address(page, vma);
|
||||
ret = try_to_unmap_one(page, vma, address, flags);
|
||||
if (ret != SWAP_AGAIN || !page_mapped(page))
|
||||
goto out;
|
||||
address = vma_address(page, target_vma);
|
||||
ret = try_to_unmap_one(page, target_vma, address, flags);
|
||||
goto out;
|
||||
} else {
|
||||
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
||||
address = vma_address(page, vma);
|
||||
ret = try_to_unmap_one(page, vma, address, flags);
|
||||
if (ret != SWAP_AGAIN || !page_mapped(page))
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (list_empty(&mapping->i_mmap_nonlinear))
|
||||
@ -1639,12 +1618,9 @@ out:
|
||||
* try_to_unmap - try to remove all page table mappings to a page
|
||||
* @page: the page to get unmapped
|
||||
* @flags: action and flags
|
||||
* @vma : target vma for reclaim
|
||||
*
|
||||
* Tries to remove all the page table entries which are mapping this
|
||||
* page, used in the pageout path. Caller must hold the page lock.
|
||||
* If @vma is not NULL, this function try to remove @page from only @vma
|
||||
* without peeking all mapped vma for @page.
|
||||
* Return values are:
|
||||
*
|
||||
* SWAP_SUCCESS - we succeeded in removing all mappings
|
||||
@ -1652,8 +1628,7 @@ out:
|
||||
* SWAP_FAIL - the page is unswappable
|
||||
* SWAP_MLOCK - page is mlocked.
|
||||
*/
|
||||
int try_to_unmap(struct page *page, enum ttu_flags flags,
|
||||
struct vm_area_struct *vma)
|
||||
int try_to_unmap(struct page *page, enum ttu_flags flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -1661,11 +1636,11 @@ int try_to_unmap(struct page *page, enum ttu_flags flags,
|
||||
VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
|
||||
|
||||
if (unlikely(PageKsm(page)))
|
||||
ret = try_to_unmap_ksm(page, flags, vma);
|
||||
ret = try_to_unmap_ksm(page, flags);
|
||||
else if (PageAnon(page))
|
||||
ret = try_to_unmap_anon(page, flags, vma);
|
||||
ret = try_to_unmap_anon(page, flags);
|
||||
else
|
||||
ret = try_to_unmap_file(page, flags, vma);
|
||||
ret = try_to_unmap_file(page, flags);
|
||||
if (ret != SWAP_MLOCK && !page_mapped(page))
|
||||
ret = SWAP_SUCCESS;
|
||||
return ret;
|
||||
@ -1691,11 +1666,11 @@ int try_to_munlock(struct page *page)
|
||||
VM_BUG_ON(!PageLocked(page) || PageLRU(page));
|
||||
|
||||
if (unlikely(PageKsm(page)))
|
||||
return try_to_unmap_ksm(page, TTU_MUNLOCK, NULL);
|
||||
return try_to_unmap_ksm(page, TTU_MUNLOCK);
|
||||
else if (PageAnon(page))
|
||||
return try_to_unmap_anon(page, TTU_MUNLOCK, NULL);
|
||||
return try_to_unmap_anon(page, TTU_MUNLOCK);
|
||||
else
|
||||
return try_to_unmap_file(page, TTU_MUNLOCK, NULL);
|
||||
return try_to_unmap_file(page, TTU_MUNLOCK);
|
||||
}
|
||||
|
||||
void __put_anon_vma(struct anon_vma *anon_vma)
|
||||
|
14
mm/vmscan.c
14
mm/vmscan.c
@ -99,13 +99,6 @@ struct scan_control {
|
||||
* are scanned.
|
||||
*/
|
||||
nodemask_t *nodemask;
|
||||
|
||||
/*
|
||||
* Reclaim pages from a vma. If the page is shared by other tasks
|
||||
* it is zapped from a vma without reclaim so it ends up remaining
|
||||
* on memory until last task zap it.
|
||||
*/
|
||||
struct vm_area_struct *target_vma;
|
||||
};
|
||||
|
||||
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
|
||||
@ -1012,8 +1005,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
||||
* processes. Try to unmap it here.
|
||||
*/
|
||||
if (page_mapped(page) && mapping) {
|
||||
switch (try_to_unmap(page,
|
||||
ttu_flags, sc->target_vma)) {
|
||||
switch (try_to_unmap(page, ttu_flags)) {
|
||||
case SWAP_FAIL:
|
||||
goto activate_locked;
|
||||
case SWAP_AGAIN:
|
||||
@ -1229,8 +1221,7 @@ static unsigned long shrink_page(struct page *page,
|
||||
return reclaimed;
|
||||
}
|
||||
|
||||
unsigned long reclaim_pages_from_list(struct list_head *page_list,
|
||||
struct vm_area_struct *vma)
|
||||
unsigned long reclaim_pages_from_list(struct list_head *page_list)
|
||||
{
|
||||
struct scan_control sc = {
|
||||
.gfp_mask = GFP_KERNEL,
|
||||
@ -1238,7 +1229,6 @@ unsigned long reclaim_pages_from_list(struct list_head *page_list,
|
||||
.may_writepage = 1,
|
||||
.may_unmap = 1,
|
||||
.may_swap = 1,
|
||||
.target_vma = vma,
|
||||
};
|
||||
|
||||
LIST_HEAD(ret_pages);
|
||||
|
Loading…
Reference in New Issue
Block a user