mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-18 06:50:08 +00:00
powerpc: Replace mem_init_done with slab_is_available()
We have a powerpc specific global called mem_init_done which is "set on boot once kmalloc can be called". But that's not *quite* true. We set it at the bottom of mem_init(), and rely on the fact that mm_init() calls kmem_cache_init() immediately after that, and nothing is running in parallel. So replace it with the generic and 100% correct slab_is_available(). Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
4f9c53c8cc
commit
f691fa1080
@ -7,7 +7,6 @@
|
|||||||
extern void ppc_printk_progress(char *s, unsigned short hex);
|
extern void ppc_printk_progress(char *s, unsigned short hex);
|
||||||
|
|
||||||
extern unsigned int rtas_data;
|
extern unsigned int rtas_data;
|
||||||
extern int mem_init_done; /* set on boot once kmalloc can be called */
|
|
||||||
extern unsigned long long memory_limit;
|
extern unsigned long long memory_limit;
|
||||||
extern unsigned long klimit;
|
extern unsigned long klimit;
|
||||||
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
|
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
|
||||||
|
@ -76,7 +76,7 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
|
|||||||
list_add_tail(&phb->list_node, &hose_list);
|
list_add_tail(&phb->list_node, &hose_list);
|
||||||
spin_unlock(&hose_spinlock);
|
spin_unlock(&hose_spinlock);
|
||||||
phb->dn = dev;
|
phb->dn = dev;
|
||||||
phb->is_dynamic = mem_init_done;
|
phb->is_dynamic = slab_is_available();
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
if (dev) {
|
if (dev) {
|
||||||
int nid = of_node_to_nid(dev);
|
int nid = of_node_to_nid(dev);
|
||||||
|
@ -401,7 +401,7 @@ static char *__fetch_rtas_last_error(char *altbuf)
|
|||||||
buf = altbuf;
|
buf = altbuf;
|
||||||
} else {
|
} else {
|
||||||
buf = rtas_err_buf;
|
buf = rtas_err_buf;
|
||||||
if (mem_init_done)
|
if (slab_is_available())
|
||||||
buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
|
buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
|
||||||
}
|
}
|
||||||
if (buf)
|
if (buf)
|
||||||
@ -461,7 +461,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
|
|||||||
|
|
||||||
if (buff_copy) {
|
if (buff_copy) {
|
||||||
log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
|
log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
|
||||||
if (mem_init_done)
|
if (slab_is_available())
|
||||||
kfree(buff_copy);
|
kfree(buff_copy);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -10,7 +10,7 @@ void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
|
|||||||
{
|
{
|
||||||
void *p;
|
void *p;
|
||||||
|
|
||||||
if (mem_init_done)
|
if (slab_is_available())
|
||||||
p = kzalloc(size, mask);
|
p = kzalloc(size, mask);
|
||||||
else {
|
else {
|
||||||
p = memblock_virt_alloc(size, 0);
|
p = memblock_virt_alloc(size, 0);
|
||||||
|
@ -61,7 +61,6 @@
|
|||||||
#define CPU_FTR_NOEXECUTE 0
|
#define CPU_FTR_NOEXECUTE 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int mem_init_done;
|
|
||||||
unsigned long long memory_limit;
|
unsigned long long memory_limit;
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
@ -377,8 +376,6 @@ void __init mem_init(void)
|
|||||||
pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
|
pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
|
||||||
VMALLOC_START, VMALLOC_END);
|
VMALLOC_START, VMALLOC_END);
|
||||||
#endif /* CONFIG_PPC32 */
|
#endif /* CONFIG_PPC32 */
|
||||||
|
|
||||||
mem_init_done = 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_initmem(void)
|
void free_initmem(void)
|
||||||
|
@ -107,9 +107,8 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|||||||
__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
||||||
{
|
{
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
extern int mem_init_done;
|
|
||||||
|
|
||||||
if (mem_init_done) {
|
if (slab_is_available()) {
|
||||||
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
|
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
|
||||||
} else {
|
} else {
|
||||||
pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
|
pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||||
@ -216,7 +215,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
|
|||||||
* Don't allow anybody to remap normal RAM that we're using.
|
* Don't allow anybody to remap normal RAM that we're using.
|
||||||
* mem_init() sets high_memory so only do the check after that.
|
* mem_init() sets high_memory so only do the check after that.
|
||||||
*/
|
*/
|
||||||
if (mem_init_done && (p < virt_to_phys(high_memory)) &&
|
if (slab_is_available() && (p < virt_to_phys(high_memory)) &&
|
||||||
!(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
|
!(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
|
||||||
printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
|
printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
|
||||||
(unsigned long long)p, __builtin_return_address(0));
|
(unsigned long long)p, __builtin_return_address(0));
|
||||||
@ -244,7 +243,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
|
|||||||
if ((v = p_mapped_by_tlbcam(p)))
|
if ((v = p_mapped_by_tlbcam(p)))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (mem_init_done) {
|
if (slab_is_available()) {
|
||||||
struct vm_struct *area;
|
struct vm_struct *area;
|
||||||
area = get_vm_area_caller(size, VM_IOREMAP, caller);
|
area = get_vm_area_caller(size, VM_IOREMAP, caller);
|
||||||
if (area == 0)
|
if (area == 0)
|
||||||
@ -263,7 +262,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
|
|||||||
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
|
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
|
||||||
err = map_page(v+i, p+i, flags);
|
err = map_page(v+i, p+i, flags);
|
||||||
if (err) {
|
if (err) {
|
||||||
if (mem_init_done)
|
if (slab_is_available())
|
||||||
vunmap((void *)v);
|
vunmap((void *)v);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -231,7 +231,7 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
|
|||||||
if ((size == 0) || (paligned == 0))
|
if ((size == 0) || (paligned == 0))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (mem_init_done) {
|
if (slab_is_available()) {
|
||||||
struct vm_struct *area;
|
struct vm_struct *area;
|
||||||
|
|
||||||
area = __get_vm_area_caller(size, VM_IOREMAP,
|
area = __get_vm_area_caller(size, VM_IOREMAP,
|
||||||
@ -315,7 +315,7 @@ void __iounmap(volatile void __iomem *token)
|
|||||||
{
|
{
|
||||||
void *addr;
|
void *addr;
|
||||||
|
|
||||||
if (!mem_init_done)
|
if (!slab_is_available())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
addr = (void *) ((unsigned long __force)
|
addr = (void *) ((unsigned long __force)
|
||||||
|
Loading…
Reference in New Issue
Block a user