mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-12 12:22:42 +00:00
47f2c3604f
This problem was introduced by changeset 14778d9072e53d2171f66ffd9657daff41acfaed Unlike the hugetlb code paths, the normal fault code is not setup to propagate PTE changes for large page sizes correctly like the ones we make for I/O mappings in io_remap_pfn_range(). It is absolutely necessary to update all sub-ptes of a largepage mapping on a fault. Adding special handling for this would add considerably complexity to tlb_batch_add(). So let's just side-step the issue and forcefully dirty any writable PTEs created by io_remap_pfn_range(). The only other real option would be to disable to large PTE code of io_remap_pfn_range() and we really don't want to do that. Much thanks to Mikael Pettersson for tracking down this problem and testing debug patches. Signed-off-by: David S. Miller <davem@davemloft.net>
164 lines
4.3 KiB
C
164 lines
4.3 KiB
C
/* $Id: generic.c,v 1.18 2001/12/21 04:56:15 davem Exp $
|
|
* generic.c: Generic Sparc mm routines that are not dependent upon
|
|
* MMU type but are Sparc specific.
|
|
*
|
|
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/page.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
/* Remap IO memory, the same way as remap_pfn_range(), but use
|
|
* the obio memory space.
|
|
*
|
|
* They use a pgprot that sets PAGE_IO and does not check the
|
|
* mem_map table as this is independent of normal memory.
|
|
*/
|
|
static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
|
|
unsigned long address,
|
|
unsigned long size,
|
|
unsigned long offset, pgprot_t prot,
|
|
int space)
|
|
{
|
|
unsigned long end;
|
|
|
|
/* clear hack bit that was used as a write_combine side-effect flag */
|
|
offset &= ~0x1UL;
|
|
address &= ~PMD_MASK;
|
|
end = address + size;
|
|
if (end > PMD_SIZE)
|
|
end = PMD_SIZE;
|
|
do {
|
|
pte_t entry;
|
|
unsigned long curend = address + PAGE_SIZE;
|
|
|
|
entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
|
|
if (!(address & 0xffff)) {
|
|
if (PAGE_SIZE < (4 * 1024 * 1024) &&
|
|
!(address & 0x3fffff) &&
|
|
!(offset & 0x3ffffe) &&
|
|
end >= address + 0x400000) {
|
|
entry = mk_pte_io(offset, prot, space,
|
|
4 * 1024 * 1024);
|
|
curend = address + 0x400000;
|
|
offset += 0x400000;
|
|
} else if (PAGE_SIZE < (512 * 1024) &&
|
|
!(address & 0x7ffff) &&
|
|
!(offset & 0x7fffe) &&
|
|
end >= address + 0x80000) {
|
|
entry = mk_pte_io(offset, prot, space,
|
|
512 * 1024 * 1024);
|
|
curend = address + 0x80000;
|
|
offset += 0x80000;
|
|
} else if (PAGE_SIZE < (64 * 1024) &&
|
|
!(offset & 0xfffe) &&
|
|
end >= address + 0x10000) {
|
|
entry = mk_pte_io(offset, prot, space,
|
|
64 * 1024);
|
|
curend = address + 0x10000;
|
|
offset += 0x10000;
|
|
} else
|
|
offset += PAGE_SIZE;
|
|
} else
|
|
offset += PAGE_SIZE;
|
|
|
|
if (pte_write(entry))
|
|
entry = pte_mkdirty(entry);
|
|
do {
|
|
BUG_ON(!pte_none(*pte));
|
|
set_pte_at(mm, address, pte, entry);
|
|
address += PAGE_SIZE;
|
|
pte_val(entry) += PAGE_SIZE;
|
|
pte++;
|
|
} while (address < curend);
|
|
} while (address < end);
|
|
}
|
|
|
|
static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
|
|
unsigned long offset, pgprot_t prot, int space)
|
|
{
|
|
unsigned long end;
|
|
|
|
address &= ~PGDIR_MASK;
|
|
end = address + size;
|
|
if (end > PGDIR_SIZE)
|
|
end = PGDIR_SIZE;
|
|
offset -= address;
|
|
do {
|
|
pte_t * pte = pte_alloc_map(mm, pmd, address);
|
|
if (!pte)
|
|
return -ENOMEM;
|
|
io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
|
|
pte_unmap(pte);
|
|
address = (address + PMD_SIZE) & PMD_MASK;
|
|
pmd++;
|
|
} while (address < end);
|
|
return 0;
|
|
}
|
|
|
|
static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
|
|
unsigned long offset, pgprot_t prot, int space)
|
|
{
|
|
unsigned long end;
|
|
|
|
address &= ~PUD_MASK;
|
|
end = address + size;
|
|
if (end > PUD_SIZE)
|
|
end = PUD_SIZE;
|
|
offset -= address;
|
|
do {
|
|
pmd_t *pmd = pmd_alloc(mm, pud, address);
|
|
if (!pud)
|
|
return -ENOMEM;
|
|
io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
|
|
address = (address + PUD_SIZE) & PUD_MASK;
|
|
pud++;
|
|
} while (address < end);
|
|
return 0;
|
|
}
|
|
|
|
int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
|
|
unsigned long pfn, unsigned long size, pgprot_t prot)
|
|
{
|
|
int error = 0;
|
|
pgd_t * dir;
|
|
unsigned long beg = from;
|
|
unsigned long end = from + size;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
int space = GET_IOSPACE(pfn);
|
|
unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
|
|
unsigned long phys_base;
|
|
|
|
phys_base = offset | (((unsigned long) space) << 32UL);
|
|
|
|
/* See comment in mm/memory.c remap_pfn_range */
|
|
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
|
|
vma->vm_pgoff = phys_base >> PAGE_SHIFT;
|
|
|
|
offset -= from;
|
|
dir = pgd_offset(mm, from);
|
|
flush_cache_range(vma, beg, end);
|
|
|
|
while (from < end) {
|
|
pud_t *pud = pud_alloc(mm, dir, from);
|
|
error = -ENOMEM;
|
|
if (!pud)
|
|
break;
|
|
error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
|
|
if (error)
|
|
break;
|
|
from = (from + PGDIR_SIZE) & PGDIR_MASK;
|
|
dir++;
|
|
}
|
|
|
|
flush_tlb_range(vma, beg, end);
|
|
return error;
|
|
}
|