mirror of
https://github.com/joel16/android_kernel_sony_msm8994_rework.git
synced 2024-12-29 07:44:24 +00:00
[PATCH] uml: inline mk_pte and various friends
Turns out that, for UML, a *lot* of VM-related trivial functions are not inlined but rather normal functions. In other sections of UML code, this is justified by having files which interact with the host and cannot therefore include kernel headers, but in this case there's no such justification. I've had to turn many of them to macros because of missing declarations. While doing this, I've decided to reuse some already existing macros. Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
4413a511f2
commit
d99c4022f6
@ -13,7 +13,17 @@ extern int physmem_subst_mapping(void *virt, int fd, __u64 offset, int w);
|
||||
extern int is_remapped(void *virt);
|
||||
extern int physmem_remove_mapping(void *virt);
|
||||
extern void physmem_forget_descriptor(int fd);
|
||||
extern unsigned long to_phys(void *virt);
|
||||
|
||||
extern unsigned long uml_physmem;
|
||||
static inline unsigned long to_phys(void *virt)
|
||||
{
|
||||
return(((unsigned long) virt) - uml_physmem);
|
||||
}
|
||||
|
||||
static inline void *to_virt(unsigned long phys)
|
||||
{
|
||||
return((void *) uml_physmem + phys);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -34,14 +34,9 @@ EXPORT_SYMBOL(host_task_size);
|
||||
EXPORT_SYMBOL(arch_validate);
|
||||
EXPORT_SYMBOL(get_kmem_end);
|
||||
|
||||
EXPORT_SYMBOL(page_to_phys);
|
||||
EXPORT_SYMBOL(phys_to_page);
|
||||
EXPORT_SYMBOL(high_physmem);
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
EXPORT_SYMBOL(um_virt_to_phys);
|
||||
EXPORT_SYMBOL(__virt_to_page);
|
||||
EXPORT_SYMBOL(to_phys);
|
||||
EXPORT_SYMBOL(to_virt);
|
||||
EXPORT_SYMBOL(mode_tt);
|
||||
EXPORT_SYMBOL(handle_page_fault);
|
||||
EXPORT_SYMBOL(find_iomem);
|
||||
|
@ -248,16 +248,6 @@ unsigned long high_physmem;
|
||||
|
||||
extern unsigned long physmem_size;
|
||||
|
||||
void *to_virt(unsigned long phys)
|
||||
{
|
||||
return((void *) uml_physmem + phys);
|
||||
}
|
||||
|
||||
unsigned long to_phys(void *virt)
|
||||
{
|
||||
return(((unsigned long) virt) - uml_physmem);
|
||||
}
|
||||
|
||||
int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
|
||||
{
|
||||
struct page *p, *map;
|
||||
@ -298,31 +288,6 @@ int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
|
||||
return(0);
|
||||
}
|
||||
|
||||
struct page *phys_to_page(const unsigned long phys)
|
||||
{
|
||||
return(&mem_map[phys >> PAGE_SHIFT]);
|
||||
}
|
||||
|
||||
struct page *__virt_to_page(const unsigned long virt)
|
||||
{
|
||||
return(&mem_map[__pa(virt) >> PAGE_SHIFT]);
|
||||
}
|
||||
|
||||
phys_t page_to_phys(struct page *page)
|
||||
{
|
||||
return((page - mem_map) << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
pte_t mk_pte(struct page *page, pgprot_t pgprot)
|
||||
{
|
||||
pte_t pte;
|
||||
|
||||
pte_set_val(pte, page_to_phys(page), pgprot);
|
||||
if(pte_present(pte))
|
||||
pte_mknewprot(pte_mknewpage(pte));
|
||||
return(pte);
|
||||
}
|
||||
|
||||
/* Changed during early boot */
|
||||
static unsigned long kmem_top = 0;
|
||||
|
||||
|
@ -96,8 +96,7 @@ extern unsigned long uml_physmem;
|
||||
|
||||
#define __va_space (8*1024*1024)
|
||||
|
||||
extern unsigned long to_phys(void *virt);
|
||||
extern void *to_virt(unsigned long phys);
|
||||
#include "mem.h"
|
||||
|
||||
/* Cast to unsigned long before casting to void * to avoid a warning from
|
||||
* mmap_kmem about cutting a long long down to a void *. Not sure that
|
||||
|
@ -326,14 +326,22 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
|
||||
}
|
||||
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
|
||||
|
||||
extern phys_t page_to_phys(struct page *page);
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
*/
|
||||
|
||||
extern pte_t mk_pte(struct page *page, pgprot_t pgprot);
|
||||
#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
|
||||
#define __virt_to_page(virt) phys_to_page(__pa(virt))
|
||||
#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
|
||||
|
||||
#define mk_pte(page, pgprot) \
|
||||
({ pte_t pte; \
|
||||
\
|
||||
pte_set_val(pte, page_to_phys(page), (pgprot)); \
|
||||
if (pte_present(pte)) \
|
||||
pte_mknewprot(pte_mknewpage(pte)); \
|
||||
pte;})
|
||||
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
@ -410,8 +418,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
extern struct page *phys_to_page(const unsigned long phys);
|
||||
extern struct page *__virt_to_page(const unsigned long virt);
|
||||
#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user