mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-02-28 16:36:32 +00:00
revert "memcg, vmscan: do not fall into reclaim-all pass too quickly"
Revert commit e975de998b96 ("memcg, vmscan: do not fall into reclaim-all pass too quickly") I merged this prematurely - Michal and Johannes still disagree about the overall design direction and the future remains unclear. Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
99d7a8824a
commit
20ba27f52e
17
mm/vmscan.c
17
mm/vmscan.c
@ -2176,11 +2176,10 @@ static inline bool should_continue_reclaim(struct zone *zone,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static void
|
||||||
__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
|
__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
|
||||||
{
|
{
|
||||||
unsigned long nr_reclaimed, nr_scanned;
|
unsigned long nr_reclaimed, nr_scanned;
|
||||||
int groups_scanned = 0;
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
struct mem_cgroup *root = sc->target_mem_cgroup;
|
struct mem_cgroup *root = sc->target_mem_cgroup;
|
||||||
@ -2198,7 +2197,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
|
|||||||
while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) {
|
while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) {
|
||||||
struct lruvec *lruvec;
|
struct lruvec *lruvec;
|
||||||
|
|
||||||
groups_scanned++;
|
|
||||||
lruvec = mem_cgroup_zone_lruvec(zone, memcg);
|
lruvec = mem_cgroup_zone_lruvec(zone, memcg);
|
||||||
|
|
||||||
shrink_lruvec(lruvec, sc);
|
shrink_lruvec(lruvec, sc);
|
||||||
@ -2226,8 +2224,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
|
|||||||
|
|
||||||
} while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
|
} while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
|
||||||
sc->nr_scanned - nr_scanned, sc));
|
sc->nr_scanned - nr_scanned, sc));
|
||||||
|
|
||||||
return groups_scanned;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -2235,18 +2231,7 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc)
|
|||||||
{
|
{
|
||||||
bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
|
bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
|
||||||
unsigned long nr_scanned = sc->nr_scanned;
|
unsigned long nr_scanned = sc->nr_scanned;
|
||||||
int scanned_groups;
|
|
||||||
|
|
||||||
scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim);
|
|
||||||
/*
|
|
||||||
* memcg iterator might race with other reclaimer or start from
|
|
||||||
* a incomplete tree walk so the tree walk in __shrink_zone
|
|
||||||
* might have missed groups that are above the soft limit. Try
|
|
||||||
* another loop to catch up with others. Do it just once to
|
|
||||||
* prevent from reclaim latencies when other reclaimers always
|
|
||||||
* preempt this one.
|
|
||||||
*/
|
|
||||||
if (do_soft_reclaim && !scanned_groups)
|
|
||||||
__shrink_zone(zone, sc, do_soft_reclaim);
|
__shrink_zone(zone, sc, do_soft_reclaim);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
x
Reference in New Issue
Block a user