mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-29 21:05:13 +00:00
449dd6984d
Previously, page cache radix tree nodes were freed after reclaim emptied out their page pointers. But now reclaim stores shadow entries in their place, which are only reclaimed when the inodes themselves are reclaimed. This is problematic for bigger files that are still in use after they have a significant amount of their cache reclaimed, without any of those pages actually refaulting. The shadow entries will just sit there and waste memory. In the worst case, the shadow entries will accumulate until the machine runs out of memory. To get this under control, the VM will track radix tree nodes exclusively containing shadow entries on a per-NUMA node list. Per-NUMA rather than global because we expect the radix tree nodes themselves to be allocated node-locally and we want to reduce cross-node references of otherwise independent cache workloads. A simple shrinker will then reclaim these nodes on memory pressure. A few things need to be stored in the radix tree node to implement the shadow node LRU and allow tree deletions coming from the list: 1. There is no index available that would describe the reverse path from the node up to the tree root, which is needed to perform a deletion. To solve this, encode in each node its offset inside the parent. This can be stored in the unused upper bits of the same member that stores the node's height at no extra space cost. 2. The number of shadow entries needs to be counted in addition to the regular entries, to quickly detect when the node is ready to go to the shadow node LRU list. The current entry count is an unsigned int but the maximum number of entries is 64, so a shadow counter can easily be stored in the unused upper bits. 3. Tree modification needs tree lock and tree root, which are located in the address space, so store an address_space backpointer in the node. The parent pointer of the node is in a union with the 2-word rcu_head, so the backpointer comes at no extra cost as well. 4. The node needs to be linked to an LRU list, which requires a list head inside the node. This does increase the size of the node, but it does not change the number of objects that fit into a slab page. [akpm@linux-foundation.org: export the right function] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan@kernel.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Bob Liu <bob.liu@oracle.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Dave Chinner <david@fromorbit.com> Cc: Greg Thelen <gthelen@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Luigi Semenzato <semenzato@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Metin Doslu <metin@citusdata.com> Cc: Michel Lespinasse <walken@google.com> Cc: Ozgun Erdogan <ozgun@citusdata.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Roman Gushchin <klamm@yandex-team.ru> Cc: Ryan Mallon <rmallon@gmail.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
153 lines
3.4 KiB
C
153 lines
3.4 KiB
C
/*
|
|
* Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
|
|
* Authors: David Chinner and Glauber Costa
|
|
*
|
|
* Generic LRU infrastructure
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/list_lru.h>
|
|
#include <linux/slab.h>
|
|
|
|
bool list_lru_add(struct list_lru *lru, struct list_head *item)
|
|
{
|
|
int nid = page_to_nid(virt_to_page(item));
|
|
struct list_lru_node *nlru = &lru->node[nid];
|
|
|
|
spin_lock(&nlru->lock);
|
|
WARN_ON_ONCE(nlru->nr_items < 0);
|
|
if (list_empty(item)) {
|
|
list_add_tail(item, &nlru->list);
|
|
if (nlru->nr_items++ == 0)
|
|
node_set(nid, lru->active_nodes);
|
|
spin_unlock(&nlru->lock);
|
|
return true;
|
|
}
|
|
spin_unlock(&nlru->lock);
|
|
return false;
|
|
}
|
|
EXPORT_SYMBOL_GPL(list_lru_add);
|
|
|
|
bool list_lru_del(struct list_lru *lru, struct list_head *item)
|
|
{
|
|
int nid = page_to_nid(virt_to_page(item));
|
|
struct list_lru_node *nlru = &lru->node[nid];
|
|
|
|
spin_lock(&nlru->lock);
|
|
if (!list_empty(item)) {
|
|
list_del_init(item);
|
|
if (--nlru->nr_items == 0)
|
|
node_clear(nid, lru->active_nodes);
|
|
WARN_ON_ONCE(nlru->nr_items < 0);
|
|
spin_unlock(&nlru->lock);
|
|
return true;
|
|
}
|
|
spin_unlock(&nlru->lock);
|
|
return false;
|
|
}
|
|
EXPORT_SYMBOL_GPL(list_lru_del);
|
|
|
|
unsigned long
|
|
list_lru_count_node(struct list_lru *lru, int nid)
|
|
{
|
|
unsigned long count = 0;
|
|
struct list_lru_node *nlru = &lru->node[nid];
|
|
|
|
spin_lock(&nlru->lock);
|
|
WARN_ON_ONCE(nlru->nr_items < 0);
|
|
count += nlru->nr_items;
|
|
spin_unlock(&nlru->lock);
|
|
|
|
return count;
|
|
}
|
|
EXPORT_SYMBOL_GPL(list_lru_count_node);
|
|
|
|
unsigned long
|
|
list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
|
|
void *cb_arg, unsigned long *nr_to_walk)
|
|
{
|
|
|
|
struct list_lru_node *nlru = &lru->node[nid];
|
|
struct list_head *item, *n;
|
|
unsigned long isolated = 0;
|
|
|
|
spin_lock(&nlru->lock);
|
|
restart:
|
|
list_for_each_safe(item, n, &nlru->list) {
|
|
enum lru_status ret;
|
|
|
|
/*
|
|
* decrement nr_to_walk first so that we don't livelock if we
|
|
* get stuck on large numbesr of LRU_RETRY items
|
|
*/
|
|
if (!*nr_to_walk)
|
|
break;
|
|
--*nr_to_walk;
|
|
|
|
ret = isolate(item, &nlru->lock, cb_arg);
|
|
switch (ret) {
|
|
case LRU_REMOVED_RETRY:
|
|
assert_spin_locked(&nlru->lock);
|
|
case LRU_REMOVED:
|
|
if (--nlru->nr_items == 0)
|
|
node_clear(nid, lru->active_nodes);
|
|
WARN_ON_ONCE(nlru->nr_items < 0);
|
|
isolated++;
|
|
/*
|
|
* If the lru lock has been dropped, our list
|
|
* traversal is now invalid and so we have to
|
|
* restart from scratch.
|
|
*/
|
|
if (ret == LRU_REMOVED_RETRY)
|
|
goto restart;
|
|
break;
|
|
case LRU_ROTATE:
|
|
list_move_tail(item, &nlru->list);
|
|
break;
|
|
case LRU_SKIP:
|
|
break;
|
|
case LRU_RETRY:
|
|
/*
|
|
* The lru lock has been dropped, our list traversal is
|
|
* now invalid and so we have to restart from scratch.
|
|
*/
|
|
assert_spin_locked(&nlru->lock);
|
|
goto restart;
|
|
default:
|
|
BUG();
|
|
}
|
|
}
|
|
|
|
spin_unlock(&nlru->lock);
|
|
return isolated;
|
|
}
|
|
EXPORT_SYMBOL_GPL(list_lru_walk_node);
|
|
|
|
int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key)
|
|
{
|
|
int i;
|
|
size_t size = sizeof(*lru->node) * nr_node_ids;
|
|
|
|
lru->node = kzalloc(size, GFP_KERNEL);
|
|
if (!lru->node)
|
|
return -ENOMEM;
|
|
|
|
nodes_clear(lru->active_nodes);
|
|
for (i = 0; i < nr_node_ids; i++) {
|
|
spin_lock_init(&lru->node[i].lock);
|
|
if (key)
|
|
lockdep_set_class(&lru->node[i].lock, key);
|
|
INIT_LIST_HEAD(&lru->node[i].list);
|
|
lru->node[i].nr_items = 0;
|
|
}
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(list_lru_init_key);
|
|
|
|
void list_lru_destroy(struct list_lru *lru)
|
|
{
|
|
kfree(lru->node);
|
|
}
|
|
EXPORT_SYMBOL_GPL(list_lru_destroy);
|