mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-18 15:09:53 +00:00
hugetlb: rename max_hstate to hugetlb_max_hstate
This patchset implements a cgroup resource controller for HugeTLB pages. The controller allows to limit the HugeTLB usage per control group and enforces the controller limit during page fault. Since HugeTLB doesn't support page reclaim, enforcing the limit at page fault time implies that, the application will get SIGBUS signal if it tries to access HugeTLB pages beyond its limit. This requires the application to know beforehand how much HugeTLB pages it would require for its use. The goal is to control how many HugeTLB pages a group of task can allocate. It can be looked at as an extension of the existing quota interface which limits the number of HugeTLB pages per hugetlbfs superblock. HPC job scheduler requires jobs to specify their resource requirements in the job file. Once their requirements can be met, job schedulers like (SLURM) will schedule the job. We need to make sure that the jobs won't consume more resources than requested. If they do we should either error out or kill the application. This patch: Rename max_hstate to hugetlb_max_hstate. We will be using this from other subsystems like hugetlb controller in later patches. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: David Rientjes <rientjes@google.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Hillf Danton <dhillf@gmail.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3965c9ae47
commit
47d38344ab
14
mm/hugetlb.c
14
mm/hugetlb.c
@ -34,7 +34,7 @@ const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
|
||||
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
|
||||
unsigned long hugepages_treat_as_movable;
|
||||
|
||||
static int max_hstate;
|
||||
static int hugetlb_max_hstate;
|
||||
unsigned int default_hstate_idx;
|
||||
struct hstate hstates[HUGE_MAX_HSTATE];
|
||||
|
||||
@ -46,7 +46,7 @@ static unsigned long __initdata default_hstate_max_huge_pages;
|
||||
static unsigned long __initdata default_hstate_size;
|
||||
|
||||
#define for_each_hstate(h) \
|
||||
for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
|
||||
for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
|
||||
|
||||
/*
|
||||
* Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
|
||||
@ -1897,9 +1897,9 @@ void __init hugetlb_add_hstate(unsigned order)
|
||||
printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
|
||||
return;
|
||||
}
|
||||
BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
|
||||
BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
|
||||
BUG_ON(order == 0);
|
||||
h = &hstates[max_hstate++];
|
||||
h = &hstates[hugetlb_max_hstate++];
|
||||
h->order = order;
|
||||
h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
|
||||
h->nr_huge_pages = 0;
|
||||
@ -1920,10 +1920,10 @@ static int __init hugetlb_nrpages_setup(char *s)
|
||||
static unsigned long *last_mhp;
|
||||
|
||||
/*
|
||||
* !max_hstate means we haven't parsed a hugepagesz= parameter yet,
|
||||
* !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
|
||||
* so this hugepages= parameter goes to the "default hstate".
|
||||
*/
|
||||
if (!max_hstate)
|
||||
if (!hugetlb_max_hstate)
|
||||
mhp = &default_hstate_max_huge_pages;
|
||||
else
|
||||
mhp = &parsed_hstate->max_huge_pages;
|
||||
@ -1942,7 +1942,7 @@ static int __init hugetlb_nrpages_setup(char *s)
|
||||
* But we need to allocate >= MAX_ORDER hstates here early to still
|
||||
* use the bootmem allocator.
|
||||
*/
|
||||
if (max_hstate && parsed_hstate->order >= MAX_ORDER)
|
||||
if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
|
||||
hugetlb_hstate_alloc_pages(parsed_hstate);
|
||||
|
||||
last_mhp = mhp;
|
||||
|
Loading…
Reference in New Issue
Block a user