mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-11-27 09:31:31 +00:00
tlb: mmu_gather: Remove start/end arguments from tlb_gather_mmu()
The 'start' and 'end' arguments to tlb_gather_mmu() are no longer needed now that there is a separate function for 'fullmm' flushing. Remove the unused arguments and update all callers. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Yu Zhao <yuzhao@google.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Link: https://lore.kernel.org/r/CAHk-=wjQWa14_4UpfDf=fiineNP+RH74kZeDMo_f1D35xNzq9w@mail.gmail.com
This commit is contained in:
parent
d8b450530b
commit
a72afd8730
@ -23,7 +23,7 @@
|
||||
* unmapping a portion of the virtual address space, these hooks are called according to
|
||||
* the following template:
|
||||
*
|
||||
* tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
|
||||
* tlb <- tlb_gather_mmu(mm); // start unmap for address space MM
|
||||
* {
|
||||
* for each vma that needs a shootdown do {
|
||||
* tlb_start_vma(tlb, vma);
|
||||
|
@ -398,7 +398,7 @@ static void free_ldt_pgtables(struct mm_struct *mm)
|
||||
if (!boot_cpu_has(X86_FEATURE_PTI))
|
||||
return;
|
||||
|
||||
tlb_gather_mmu(&tlb, mm, start, end);
|
||||
tlb_gather_mmu(&tlb, mm);
|
||||
free_pgd_range(&tlb, start, end, start, end);
|
||||
tlb_finish_mmu(&tlb);
|
||||
#endif
|
||||
|
@ -708,7 +708,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
|
||||
return -ENOMEM;
|
||||
|
||||
lru_add_drain();
|
||||
tlb_gather_mmu(&tlb, mm, old_start, old_end);
|
||||
tlb_gather_mmu(&tlb, mm);
|
||||
if (new_end > old_start) {
|
||||
/*
|
||||
* when the old and new regions overlap clear from new_end.
|
||||
|
@ -588,8 +588,7 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
|
||||
}
|
||||
|
||||
struct mmu_gather;
|
||||
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end);
|
||||
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
|
||||
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
|
||||
extern void tlb_finish_mmu(struct mmu_gather *tlb);
|
||||
|
||||
|
16
mm/hugetlb.c
16
mm/hugetlb.c
@ -3967,23 +3967,9 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
||||
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end, struct page *ref_page)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
struct mmu_gather tlb;
|
||||
unsigned long tlb_start = start;
|
||||
unsigned long tlb_end = end;
|
||||
|
||||
/*
|
||||
* If shared PMDs were possibly used within this vma range, adjust
|
||||
* start/end for worst case tlb flushing.
|
||||
* Note that we can not be sure if PMDs are shared until we try to
|
||||
* unmap pages. However, we want to make sure TLB flushing covers
|
||||
* the largest possible range.
|
||||
*/
|
||||
adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
|
||||
|
||||
mm = vma->vm_mm;
|
||||
|
||||
tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
|
||||
tlb_gather_mmu(&tlb, vma->vm_mm);
|
||||
__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
|
||||
tlb_finish_mmu(&tlb);
|
||||
}
|
||||
|
@ -506,7 +506,7 @@ static long madvise_cold(struct vm_area_struct *vma,
|
||||
return -EINVAL;
|
||||
|
||||
lru_add_drain();
|
||||
tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
|
||||
tlb_gather_mmu(&tlb, mm);
|
||||
madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
|
||||
tlb_finish_mmu(&tlb);
|
||||
|
||||
@ -558,7 +558,7 @@ static long madvise_pageout(struct vm_area_struct *vma,
|
||||
return 0;
|
||||
|
||||
lru_add_drain();
|
||||
tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
|
||||
tlb_gather_mmu(&tlb, mm);
|
||||
madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
|
||||
tlb_finish_mmu(&tlb);
|
||||
|
||||
@ -723,7 +723,7 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
|
||||
range.start, range.end);
|
||||
|
||||
lru_add_drain();
|
||||
tlb_gather_mmu(&tlb, mm, range.start, range.end);
|
||||
tlb_gather_mmu(&tlb, mm);
|
||||
update_hiwater_rss(mm);
|
||||
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
@ -1534,7 +1534,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
|
||||
lru_add_drain();
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
|
||||
start, start + size);
|
||||
tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
|
||||
tlb_gather_mmu(&tlb, vma->vm_mm);
|
||||
update_hiwater_rss(vma->vm_mm);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
|
||||
@ -1561,7 +1561,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
|
||||
lru_add_drain();
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
|
||||
address, address + size);
|
||||
tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
|
||||
tlb_gather_mmu(&tlb, vma->vm_mm);
|
||||
update_hiwater_rss(vma->vm_mm);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
unmap_single_vma(&tlb, vma, address, range.end, details);
|
||||
|
@ -2671,7 +2671,7 @@ static void unmap_region(struct mm_struct *mm,
|
||||
struct mmu_gather tlb;
|
||||
|
||||
lru_add_drain();
|
||||
tlb_gather_mmu(&tlb, mm, start, end);
|
||||
tlb_gather_mmu(&tlb, mm);
|
||||
update_hiwater_rss(mm);
|
||||
unmap_vmas(&tlb, vma, start, end);
|
||||
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
|
||||
|
@ -253,21 +253,17 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
|
||||
* tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
|
||||
* @tlb: the mmu_gather structure to initialize
|
||||
* @mm: the mm_struct of the target address space
|
||||
* @start: start of the region that will be removed from the page-table
|
||||
* @end: end of the region that will be removed from the page-table
|
||||
* @fullmm: @mm is without users and we're going to destroy the full address
|
||||
* space (exit/execve)
|
||||
*
|
||||
* Called to initialize an (on-stack) mmu_gather structure for page-table
|
||||
* tear-down from @mm. The @start and @end are set to 0 and -1
|
||||
* respectively when @mm is without users and we're going to destroy
|
||||
* the full address space (exit/execve).
|
||||
* tear-down from @mm.
|
||||
*/
|
||||
static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
bool fullmm)
|
||||
{
|
||||
tlb->mm = mm;
|
||||
|
||||
/* Is it from 0 to ~0? */
|
||||
tlb->fullmm = !(start | (end+1));
|
||||
tlb->fullmm = fullmm;
|
||||
|
||||
#ifndef CONFIG_MMU_GATHER_NO_GATHER
|
||||
tlb->need_flush_all = 0;
|
||||
@ -287,16 +283,14 @@ static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
inc_tlb_flush_pending(tlb->mm);
|
||||
}
|
||||
|
||||
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
|
||||
{
|
||||
WARN_ON(!(start | (end + 1))); /* Use _fullmm() instead */
|
||||
__tlb_gather_mmu(tlb, mm, start, end);
|
||||
__tlb_gather_mmu(tlb, mm, false);
|
||||
}
|
||||
|
||||
void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm)
|
||||
{
|
||||
__tlb_gather_mmu(tlb, mm, 0, -1);
|
||||
__tlb_gather_mmu(tlb, mm, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -546,7 +546,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
|
||||
vma, mm, vma->vm_start,
|
||||
vma->vm_end);
|
||||
tlb_gather_mmu(&tlb, mm, range.start, range.end);
|
||||
tlb_gather_mmu(&tlb, mm);
|
||||
if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
|
||||
tlb_finish_mmu(&tlb);
|
||||
ret = false;
|
||||
|
Loading…
Reference in New Issue
Block a user