mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-10 19:43:29 +00:00
Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "7 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/memory_hotplug.c: initialize per_cpu_nodestats for hotadded pgdats mm, oom: fix uninitialized ret in task_will_free_mem() kasan: remove the unnecessary WARN_ONCE from quarantine.c mm: memcontrol: fix memcg id ref counter on swap charge move mm: memcontrol: fix swap counter leak on swapout from offline cgroup proc, meminfo: use correct helpers for calculating LRU sizes in meminfo mm/hugetlb: fix incorrect hugepages count during mem hotplug
This commit is contained in:
commit
4b9eaf33d8
@ -46,7 +46,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
|
||||
cached = 0;
|
||||
|
||||
for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
|
||||
pages[lru] = global_page_state(NR_LRU_BASE + lru);
|
||||
pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
|
||||
|
||||
available = si_mem_available();
|
||||
|
||||
|
@ -1448,6 +1448,7 @@ static void dissolve_free_huge_page(struct page *page)
|
||||
list_del(&page->lru);
|
||||
h->free_huge_pages--;
|
||||
h->free_huge_pages_node[nid]--;
|
||||
h->max_huge_pages--;
|
||||
update_and_free_page(h, page);
|
||||
}
|
||||
spin_unlock(&hugetlb_lock);
|
||||
|
@ -217,11 +217,8 @@ void quarantine_reduce(void)
|
||||
new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
|
||||
QUARANTINE_FRACTION;
|
||||
percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
|
||||
if (WARN_ONCE(new_quarantine_size < percpu_quarantines,
|
||||
"Too little memory, disabling global KASAN quarantine.\n"))
|
||||
new_quarantine_size = 0;
|
||||
else
|
||||
new_quarantine_size -= percpu_quarantines;
|
||||
new_quarantine_size = (new_quarantine_size < percpu_quarantines) ?
|
||||
0 : new_quarantine_size - percpu_quarantines;
|
||||
WRITE_ONCE(quarantine_size, new_quarantine_size);
|
||||
|
||||
last = global_quarantine.head;
|
||||
|
@ -4077,14 +4077,32 @@ static struct cftype mem_cgroup_legacy_files[] = {
|
||||
|
||||
static DEFINE_IDR(mem_cgroup_idr);
|
||||
|
||||
static void mem_cgroup_id_get(struct mem_cgroup *memcg)
|
||||
static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
|
||||
{
|
||||
atomic_inc(&memcg->id.ref);
|
||||
atomic_add(n, &memcg->id.ref);
|
||||
}
|
||||
|
||||
static void mem_cgroup_id_put(struct mem_cgroup *memcg)
|
||||
static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
|
||||
{
|
||||
if (atomic_dec_and_test(&memcg->id.ref)) {
|
||||
while (!atomic_inc_not_zero(&memcg->id.ref)) {
|
||||
/*
|
||||
* The root cgroup cannot be destroyed, so it's refcount must
|
||||
* always be >= 1.
|
||||
*/
|
||||
if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
|
||||
VM_BUG_ON(1);
|
||||
break;
|
||||
}
|
||||
memcg = parent_mem_cgroup(memcg);
|
||||
if (!memcg)
|
||||
memcg = root_mem_cgroup;
|
||||
}
|
||||
return memcg;
|
||||
}
|
||||
|
||||
static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
|
||||
{
|
||||
if (atomic_sub_and_test(n, &memcg->id.ref)) {
|
||||
idr_remove(&mem_cgroup_idr, memcg->id.id);
|
||||
memcg->id.id = 0;
|
||||
|
||||
@ -4093,6 +4111,16 @@ static void mem_cgroup_id_put(struct mem_cgroup *memcg)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
|
||||
{
|
||||
mem_cgroup_id_get_many(memcg, 1);
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
|
||||
{
|
||||
mem_cgroup_id_put_many(memcg, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* mem_cgroup_from_id - look up a memcg from a memcg id
|
||||
* @id: the memcg id to look up
|
||||
@ -4727,6 +4755,8 @@ static void __mem_cgroup_clear_mc(void)
|
||||
if (!mem_cgroup_is_root(mc.from))
|
||||
page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
|
||||
|
||||
mem_cgroup_id_put_many(mc.from, mc.moved_swap);
|
||||
|
||||
/*
|
||||
* we charged both to->memory and to->memsw, so we
|
||||
* should uncharge to->memory.
|
||||
@ -4734,9 +4764,9 @@ static void __mem_cgroup_clear_mc(void)
|
||||
if (!mem_cgroup_is_root(mc.to))
|
||||
page_counter_uncharge(&mc.to->memory, mc.moved_swap);
|
||||
|
||||
css_put_many(&mc.from->css, mc.moved_swap);
|
||||
mem_cgroup_id_get_many(mc.to, mc.moved_swap);
|
||||
css_put_many(&mc.to->css, mc.moved_swap);
|
||||
|
||||
/* we've already done css_get(mc.to) */
|
||||
mc.moved_swap = 0;
|
||||
}
|
||||
memcg_oom_recover(from);
|
||||
@ -5800,7 +5830,7 @@ subsys_initcall(mem_cgroup_init);
|
||||
*/
|
||||
void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
|
||||
{
|
||||
struct mem_cgroup *memcg;
|
||||
struct mem_cgroup *memcg, *swap_memcg;
|
||||
unsigned short oldid;
|
||||
|
||||
VM_BUG_ON_PAGE(PageLRU(page), page);
|
||||
@ -5815,16 +5845,27 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
|
||||
if (!memcg)
|
||||
return;
|
||||
|
||||
mem_cgroup_id_get(memcg);
|
||||
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
|
||||
/*
|
||||
* In case the memcg owning these pages has been offlined and doesn't
|
||||
* have an ID allocated to it anymore, charge the closest online
|
||||
* ancestor for the swap instead and transfer the memory+swap charge.
|
||||
*/
|
||||
swap_memcg = mem_cgroup_id_get_online(memcg);
|
||||
oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
|
||||
VM_BUG_ON_PAGE(oldid, page);
|
||||
mem_cgroup_swap_statistics(memcg, true);
|
||||
mem_cgroup_swap_statistics(swap_memcg, true);
|
||||
|
||||
page->mem_cgroup = NULL;
|
||||
|
||||
if (!mem_cgroup_is_root(memcg))
|
||||
page_counter_uncharge(&memcg->memory, 1);
|
||||
|
||||
if (memcg != swap_memcg) {
|
||||
if (!mem_cgroup_is_root(swap_memcg))
|
||||
page_counter_charge(&swap_memcg->memsw, 1);
|
||||
page_counter_uncharge(&memcg->memsw, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupts should be disabled here because the caller holds the
|
||||
* mapping->tree_lock lock which is taken with interrupts-off. It is
|
||||
@ -5863,11 +5904,14 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
|
||||
if (!memcg)
|
||||
return 0;
|
||||
|
||||
if (!mem_cgroup_is_root(memcg) &&
|
||||
!page_counter_try_charge(&memcg->swap, 1, &counter))
|
||||
return -ENOMEM;
|
||||
memcg = mem_cgroup_id_get_online(memcg);
|
||||
|
||||
if (!mem_cgroup_is_root(memcg) &&
|
||||
!page_counter_try_charge(&memcg->swap, 1, &counter)) {
|
||||
mem_cgroup_id_put(memcg);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mem_cgroup_id_get(memcg);
|
||||
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
|
||||
VM_BUG_ON_PAGE(oldid, page);
|
||||
mem_cgroup_swap_statistics(memcg, true);
|
||||
|
@ -1219,6 +1219,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
|
||||
|
||||
/* init node's zones as empty zones, we don't have any present pages.*/
|
||||
free_area_init_node(nid, zones_size, start_pfn, zholes_size);
|
||||
pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
|
||||
|
||||
/*
|
||||
* The node we allocated has no zone fallback lists. For avoiding
|
||||
@ -1249,6 +1250,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
|
||||
static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
|
||||
{
|
||||
arch_refresh_nodedata(nid, NULL);
|
||||
free_percpu(pgdat->per_cpu_nodestats);
|
||||
arch_free_nodedata(pgdat);
|
||||
return;
|
||||
}
|
||||
|
@ -764,7 +764,7 @@ bool task_will_free_mem(struct task_struct *task)
|
||||
{
|
||||
struct mm_struct *mm = task->mm;
|
||||
struct task_struct *p;
|
||||
bool ret;
|
||||
bool ret = true;
|
||||
|
||||
/*
|
||||
* Skip tasks without mm because it might have passed its exit_mm and
|
||||
|
@ -4060,7 +4060,7 @@ long si_mem_available(void)
|
||||
int lru;
|
||||
|
||||
for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
|
||||
pages[lru] = global_page_state(NR_LRU_BASE + lru);
|
||||
pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
|
||||
|
||||
for_each_zone(zone)
|
||||
wmark_low += zone->watermark[WMARK_LOW];
|
||||
|
Loading…
x
Reference in New Issue
Block a user