From acce1041cd757a6c02987a29b2990932368af6bf Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Mon, 11 Nov 2013 17:25:33 -0800 Subject: [PATCH] mm: make is_vmalloc_addr work properly. There was a typo in the config guard for CONFIG_ENABLE_VMALLOC_SAVING which meant that the code was never actually being compiled. As a result, it was never noticed that the code had major flaws. Fix the code to actually work as intended. Change-Id: Ief3c00d16cf54e3b945ffb1bfde6b1fea2fa142e Signed-off-by: Laura Abbott --- mm/vmalloc.c | 73 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 42 insertions(+), 31 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index eb5ed0e703b..4d95f4d93c5 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -204,36 +204,6 @@ static int vmap_page_range(unsigned long start, unsigned long end, return ret; } -#ifdef ENABLE_VMALLOC_SAVING -int is_vmalloc_addr(const void *x) -{ - struct rb_node *n; - struct vmap_area *va; - int ret = 0; - - spin_lock(&vmap_area_lock); - - for (n = rb_first(vmap_area_root); n; rb_next(n)) { - va = rb_entry(n, struct vmap_area, rb_node); - if (x >= va->va_start && x < va->va_end) { - ret = 1; - break; - } - } - - spin_unlock(&vmap_area_lock); - return ret; -} -#else -int is_vmalloc_addr(const void *x) -{ - unsigned long addr = (unsigned long)x; - - return addr >= VMALLOC_START && addr < VMALLOC_END; -} -#endif -EXPORT_SYMBOL(is_vmalloc_addr); - int is_vmalloc_or_module_addr(const void *x) { /* @@ -299,9 +269,9 @@ EXPORT_SYMBOL(vmalloc_to_pfn); #define VM_LAZY_FREEING 0x02 #define VM_VM_AREA 0x04 -static DEFINE_SPINLOCK(vmap_area_lock); /* Export for kexec only */ LIST_HEAD(vmap_area_list); +static DEFINE_SPINLOCK(vmap_area_lock); static struct rb_root vmap_area_root = RB_ROOT; /* The vmap cache globals are protected by vmap_area_lock */ @@ -312,6 +282,47 @@ static unsigned long cached_align; static unsigned long vmap_area_pcpu_hole; +#ifdef CONFIG_ENABLE_VMALLOC_SAVING +int is_vmalloc_addr(const void *x) +{ + struct vmap_area *va; + int ret = 0; + + spin_lock(&vmap_area_lock); + list_for_each_entry(va, &vmap_area_list, list) { + if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING)) + continue; + + if (!(va->flags & VM_VM_AREA)) + continue; + + if (va->vm == NULL) + continue; + + if (va->vm->flags & VM_LOWMEM) + continue; + + if ((unsigned long)x >= va->va_start && + (unsigned long)x < va->va_end) { + ret = 1; + break; + } + } + spin_unlock(&vmap_area_lock); + return ret; +} +#else +int is_vmalloc_addr(const void *x) +{ + unsigned long addr = (unsigned long)x; + + return addr >= VMALLOC_START && addr < VMALLOC_END; +} +#endif +EXPORT_SYMBOL(is_vmalloc_addr); + + + static struct vmap_area *__find_vmap_area(unsigned long addr) { struct rb_node *n = vmap_area_root.rb_node;