mirror of
https://github.com/joel16/android_kernel_sony_msm8994_rework.git
synced 2024-11-23 11:59:58 +00:00
mm: mempolicy: turn vma_set_policy() into vma_dup_policy()
Simple cleanup. Every user of vma_set_policy() does the same work, this looks a bit annoying imho. And the new trivial helper which does mpol_dup() + vma_set_policy() to simplify the callers. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Change-Id: Ie4e7939ebb449eee284a6aa74869dbc61513e188
This commit is contained in:
parent
2db8f5b5b5
commit
bbf5ccd1e4
@ -91,7 +91,6 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
|
||||
}
|
||||
|
||||
#define vma_policy(vma) ((vma)->vm_policy)
|
||||
#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
|
||||
|
||||
static inline void mpol_get(struct mempolicy *pol)
|
||||
{
|
||||
@ -126,6 +125,7 @@ struct shared_policy {
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
|
||||
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
|
||||
int mpol_set_shared_policy(struct shared_policy *info,
|
||||
struct vm_area_struct *vma,
|
||||
@ -240,7 +240,12 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
|
||||
}
|
||||
|
||||
#define vma_policy(vma) NULL
|
||||
#define vma_set_policy(vma, pol) do {} while(0)
|
||||
|
||||
static inline int
|
||||
vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void numa_policy_init(void)
|
||||
{
|
||||
|
@ -378,7 +378,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
struct rb_node **rb_link, *rb_parent;
|
||||
int retval;
|
||||
unsigned long charge;
|
||||
struct mempolicy *pol;
|
||||
|
||||
uprobe_start_dup_mmap();
|
||||
down_write(&oldmm->mmap_sem);
|
||||
@ -427,11 +426,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
goto fail_nomem;
|
||||
*tmp = *mpnt;
|
||||
INIT_LIST_HEAD(&tmp->anon_vma_chain);
|
||||
pol = mpol_dup(vma_policy(mpnt));
|
||||
retval = PTR_ERR(pol);
|
||||
if (IS_ERR(pol))
|
||||
retval = vma_dup_policy(mpnt, tmp);
|
||||
if (retval)
|
||||
goto fail_nomem_policy;
|
||||
vma_set_policy(tmp, pol);
|
||||
tmp->vm_mm = mm;
|
||||
if (anon_vma_fork(tmp, mpnt))
|
||||
goto fail_nomem_anon_vma_fork;
|
||||
@ -499,7 +496,7 @@ out:
|
||||
uprobe_end_dup_mmap();
|
||||
return retval;
|
||||
fail_nomem_anon_vma_fork:
|
||||
mpol_put(pol);
|
||||
mpol_put(vma_policy(tmp));
|
||||
fail_nomem_policy:
|
||||
kmem_cache_free(vm_area_cachep, tmp);
|
||||
fail_nomem:
|
||||
|
@ -2055,6 +2055,16 @@ retry_cpuset:
|
||||
}
|
||||
EXPORT_SYMBOL(alloc_pages_current);
|
||||
|
||||
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
|
||||
{
|
||||
struct mempolicy *pol = mpol_dup(vma_policy(src));
|
||||
|
||||
if (IS_ERR(pol))
|
||||
return PTR_ERR(pol);
|
||||
dst->vm_policy = pol;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
|
||||
* rebinds the mempolicy its copying by calling mpol_rebind_policy()
|
||||
|
17
mm/mmap.c
17
mm/mmap.c
@ -2430,7 +2430,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
|
||||
unsigned long addr, int new_below)
|
||||
{
|
||||
struct mempolicy *pol;
|
||||
struct vm_area_struct *new;
|
||||
int err = -ENOMEM;
|
||||
|
||||
@ -2454,12 +2453,9 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
|
||||
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
pol = mpol_dup(vma_policy(vma));
|
||||
if (IS_ERR(pol)) {
|
||||
err = PTR_ERR(pol);
|
||||
err = vma_dup_policy(vma, new);
|
||||
if (err)
|
||||
goto out_free_vma;
|
||||
}
|
||||
vma_set_policy(new, pol);
|
||||
|
||||
if (anon_vma_clone(new, vma))
|
||||
goto out_free_mpol;
|
||||
@ -2487,7 +2483,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
|
||||
fput(new->vm_file);
|
||||
unlink_anon_vmas(new);
|
||||
out_free_mpol:
|
||||
mpol_put(pol);
|
||||
mpol_put(vma_policy(new));
|
||||
out_free_vma:
|
||||
kmem_cache_free(vm_area_cachep, new);
|
||||
out_err:
|
||||
@ -2830,7 +2826,6 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct vm_area_struct *new_vma, *prev;
|
||||
struct rb_node **rb_link, *rb_parent;
|
||||
struct mempolicy *pol;
|
||||
bool faulted_in_anon_vma = true;
|
||||
|
||||
/*
|
||||
@ -2876,10 +2871,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
|
||||
new_vma->vm_start = addr;
|
||||
new_vma->vm_end = addr + len;
|
||||
new_vma->vm_pgoff = pgoff;
|
||||
pol = mpol_dup(vma_policy(vma));
|
||||
if (IS_ERR(pol))
|
||||
if (vma_dup_policy(vma, new_vma))
|
||||
goto out_free_vma;
|
||||
vma_set_policy(new_vma, pol);
|
||||
INIT_LIST_HEAD(&new_vma->anon_vma_chain);
|
||||
if (anon_vma_clone(new_vma, vma))
|
||||
goto out_free_mempol;
|
||||
@ -2894,7 +2887,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
|
||||
return new_vma;
|
||||
|
||||
out_free_mempol:
|
||||
mpol_put(pol);
|
||||
mpol_put(vma_policy(new_vma));
|
||||
out_free_vma:
|
||||
kmem_cache_free(vm_area_cachep, new_vma);
|
||||
return NULL;
|
||||
|
Loading…
Reference in New Issue
Block a user