mirror of
https://github.com/joel16/android_kernel_sony_msm8994.git
synced 2024-11-24 12:40:24 +00:00
[PATCH] CRIS update: pci
Patches to make it possible to add PCI support. Signed-off-by: Mikael Starvik <starvik@axis.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
4f18cfbf09
commit
59c61138a5
@ -1,125 +1,179 @@
|
||||
/* DMA mapping. Nothing tricky here, just virt_to_phys */
|
||||
|
||||
#ifndef _ASM_CRIS_DMA_MAPPING_H
|
||||
#define _ASM_CRIS_DMA_MAPPING_H
|
||||
|
||||
#include "scatterlist.h"
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
static inline int
|
||||
dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
#include <asm/cache.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/scatterlist.h>
|
||||
|
||||
static inline int
|
||||
dma_set_mask(struct device *dev, u64 dma_mask)
|
||||
{
|
||||
BUG();
|
||||
return 1;
|
||||
}
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, int flag);
|
||||
|
||||
void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle);
|
||||
#else
|
||||
static inline void *
|
||||
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
int flag)
|
||||
int flag)
|
||||
{
|
||||
BUG();
|
||||
return NULL;
|
||||
BUG();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle)
|
||||
dma_addr_t dma_handle)
|
||||
{
|
||||
BUG();
|
||||
BUG();
|
||||
}
|
||||
|
||||
#endif
|
||||
static inline dma_addr_t
|
||||
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
|
||||
dma_map_single(struct device *dev, void *ptr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
return virt_to_phys(ptr);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline dma_addr_t
|
||||
dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG();
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG();
|
||||
return 1;
|
||||
printk("Map sg\n");
|
||||
return nents;
|
||||
}
|
||||
|
||||
static inline dma_addr_t
|
||||
dma_map_page(struct device *dev, struct page *page, unsigned long offset,
|
||||
size_t size, enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
return page_to_phys(page) + offset;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG();
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction direction)
|
||||
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Now for the API extensions over the pci_ one */
|
||||
static inline void
|
||||
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
#define dma_is_consistent(d) (1)
|
||||
static inline void
|
||||
dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_mapping_error(dma_addr_t dma_addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
/*
|
||||
* we fall back to GFP_DMA when the mask isn't all 1s,
|
||||
* so we can't guarantee allocations that must be
|
||||
* within a tighter range than GFP_DMA..
|
||||
*/
|
||||
if(mask < 0x00ffffff)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_set_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
if(!dev->dma_mask || !dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
|
||||
*dev->dma_mask = mask;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_get_cache_alignment(void)
|
||||
{
|
||||
/* no easy way to get cache size on all processors, so return
|
||||
* the maximum possible, to be safe */
|
||||
return (1 << L1_CACHE_SHIFT_MAX);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_single_range(struct device *dev, dma_addr_t dma_handle,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
#define dma_is_consistent(d) (1)
|
||||
|
||||
static inline void
|
||||
dma_cache_sync(void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
#endif
|
||||
#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
|
||||
extern int
|
||||
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
||||
dma_addr_t device_addr, size_t size, int flags);
|
||||
|
||||
extern void
|
||||
dma_release_declared_memory(struct device *dev);
|
||||
|
||||
extern void *
|
||||
dma_mark_declared_memory_occupied(struct device *dev,
|
||||
dma_addr_t device_addr, size_t size);
|
||||
|
||||
#endif
|
||||
|
@ -10,4 +10,12 @@
|
||||
|
||||
#define MAX_DMA_ADDRESS PAGE_OFFSET
|
||||
|
||||
/* From PCI */
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
extern int isa_dma_bridge_buggy;
|
||||
#else
|
||||
#define isa_dma_bridge_buggy (0)
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_DMA_H */
|
||||
|
@ -3,6 +3,21 @@
|
||||
|
||||
#include <asm/page.h> /* for __va, __pa */
|
||||
#include <asm/arch/io.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
struct cris_io_operations
|
||||
{
|
||||
u32 (*read_mem)(void *addr, int size);
|
||||
void (*write_mem)(u32 val, int size, void *addr);
|
||||
u32 (*read_io)(u32 port, void *addr, int size, int count);
|
||||
void (*write_io)(u32 port, void *addr, int size, int count);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
extern struct cris_io_operations *cris_iops;
|
||||
#else
|
||||
#define cris_iops ((struct cris_io_operations*)NULL)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Change virtual addresses to physical addresses and vv.
|
||||
@ -18,14 +33,17 @@ extern inline void * phys_to_virt(unsigned long address)
|
||||
return __va(address);
|
||||
}
|
||||
|
||||
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
|
||||
extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
|
||||
extern void __iomem * __ioremap_prot(unsigned long phys_addr, unsigned long size, pgprot_t prot);
|
||||
|
||||
extern inline void * ioremap (unsigned long offset, unsigned long size)
|
||||
extern inline void __iomem * ioremap (unsigned long offset, unsigned long size)
|
||||
{
|
||||
return __ioremap(offset, size, 0);
|
||||
}
|
||||
|
||||
extern void iounmap(void *addr);
|
||||
extern void iounmap(volatile void * __iomem addr);
|
||||
|
||||
extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);
|
||||
|
||||
/*
|
||||
* IO bus memory addresses are also 1:1 with the physical address
|
||||
@ -39,9 +57,32 @@ extern void iounmap(void *addr);
|
||||
* differently. On the CRIS architecture, we just read/write the
|
||||
* memory location directly.
|
||||
*/
|
||||
#define readb(addr) (*(volatile unsigned char *) (addr))
|
||||
#define readw(addr) (*(volatile unsigned short *) (addr))
|
||||
#define readl(addr) (*(volatile unsigned int *) (addr))
|
||||
#ifdef CONFIG_PCI
|
||||
#define PCI_SPACE(x) ((((unsigned)(x)) & 0x10000000) == 0x10000000)
|
||||
#else
|
||||
#define PCI_SPACE(x) 0
|
||||
#endif
|
||||
static inline unsigned char readb(const volatile void __iomem *addr)
|
||||
{
|
||||
if (PCI_SPACE(addr) && cris_iops)
|
||||
return cris_iops->read_mem((void*)addr, 1);
|
||||
else
|
||||
return *(volatile unsigned char __force *) addr;
|
||||
}
|
||||
static inline unsigned short readw(const volatile void __iomem *addr)
|
||||
{
|
||||
if (PCI_SPACE(addr) && cris_iops)
|
||||
return cris_iops->read_mem((void*)addr, 2);
|
||||
else
|
||||
return *(volatile unsigned short __force *) addr;
|
||||
}
|
||||
static inline unsigned int readl(const volatile void __iomem *addr)
|
||||
{
|
||||
if (PCI_SPACE(addr) && cris_iops)
|
||||
return cris_iops->read_mem((void*)addr, 4);
|
||||
else
|
||||
return *(volatile unsigned int __force *) addr;
|
||||
}
|
||||
#define readb_relaxed(addr) readb(addr)
|
||||
#define readw_relaxed(addr) readw(addr)
|
||||
#define readl_relaxed(addr) readl(addr)
|
||||
@ -49,9 +90,27 @@ extern void iounmap(void *addr);
|
||||
#define __raw_readw readw
|
||||
#define __raw_readl readl
|
||||
|
||||
#define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b))
|
||||
#define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b))
|
||||
#define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b))
|
||||
static inline void writeb(unsigned char b, volatile void __iomem *addr)
|
||||
{
|
||||
if (PCI_SPACE(addr) && cris_iops)
|
||||
cris_iops->write_mem(b, 1, (void*)addr);
|
||||
else
|
||||
*(volatile unsigned char __force *) addr = b;
|
||||
}
|
||||
static inline void writew(unsigned short b, volatile void __iomem *addr)
|
||||
{
|
||||
if (PCI_SPACE(addr) && cris_iops)
|
||||
cris_iops->write_mem(b, 2, (void*)addr);
|
||||
else
|
||||
*(volatile unsigned short __force *) addr = b;
|
||||
}
|
||||
static inline void writel(unsigned int b, volatile void __iomem *addr)
|
||||
{
|
||||
if (PCI_SPACE(addr) && cris_iops)
|
||||
cris_iops->write_mem(b, 4, (void*)addr);
|
||||
else
|
||||
*(volatile unsigned int __force *) addr = b;
|
||||
}
|
||||
#define __raw_writeb writeb
|
||||
#define __raw_writew writew
|
||||
#define __raw_writel writel
|
||||
@ -66,25 +125,25 @@ extern void iounmap(void *addr);
|
||||
* Again, CRIS does not require mem IO specific function.
|
||||
*/
|
||||
|
||||
#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d))
|
||||
#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(b),(c),(d))
|
||||
|
||||
/* The following is junk needed for the arch-independent code but which
|
||||
* we never use in the CRIS port
|
||||
*/
|
||||
|
||||
#define IO_SPACE_LIMIT 0xffff
|
||||
#define inb(x) (0)
|
||||
#define inw(x) (0)
|
||||
#define inl(x) (0)
|
||||
#define outb(x,y)
|
||||
#define outw(x,y)
|
||||
#define outl(x,y)
|
||||
#define insb(x,y,z)
|
||||
#define insw(x,y,z)
|
||||
#define insl(x,y,z)
|
||||
#define outsb(x,y,z)
|
||||
#define outsw(x,y,z)
|
||||
#define outsl(x,y,z)
|
||||
#define inb(port) (cris_iops ? cris_iops->read_io(port,NULL,1,1) : 0)
|
||||
#define inw(port) (cris_iops ? cris_iops->read_io(port,NULL,2,1) : 0)
|
||||
#define inl(port) (cris_iops ? cris_iops->read_io(port,NULL,4,1) : 0)
|
||||
#define insb(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,1,count) : 0)
|
||||
#define insw(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,2,count) : 0)
|
||||
#define insl(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,4,count) : 0)
|
||||
#define outb(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,1,1)
|
||||
#define outw(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,2,1)
|
||||
#define outl(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,4,1)
|
||||
#define outsb(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,1,count)
|
||||
#define outsw(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,2,count)
|
||||
#define outsl(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,3,count)
|
||||
|
||||
/*
|
||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||
|
@ -1,13 +1,105 @@
|
||||
#ifndef __ASM_CRIS_PCI_H
|
||||
#define __ASM_CRIS_PCI_H
|
||||
|
||||
#include <linux/config.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/mm.h> /* for struct page */
|
||||
|
||||
/* Can be used to override the logic in pci_scan_bus for skipping
|
||||
already-configured bus numbers - to be used for buggy BIOSes
|
||||
or architectures with incomplete PCI setup by the loader */
|
||||
|
||||
#define pcibios_assign_all_busses(void) 1
|
||||
|
||||
extern unsigned long pci_mem_start;
|
||||
#define PCIBIOS_MIN_IO 0x1000
|
||||
#define PCIBIOS_MIN_MEM 0x10000000
|
||||
|
||||
#define PCIBIOS_MIN_CARDBUS_IO 0x4000
|
||||
|
||||
void pcibios_config_init(void);
|
||||
struct pci_bus * pcibios_scan_root(int bus);
|
||||
int pcibios_assign_resources(void);
|
||||
|
||||
void pcibios_set_master(struct pci_dev *dev);
|
||||
void pcibios_penalize_isa_irq(int irq);
|
||||
struct irq_routing_table *pcibios_get_irq_routing_table(void);
|
||||
int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
|
||||
|
||||
/* Dynamic DMA mapping stuff.
|
||||
* i386 has everything mapped statically.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
struct pci_dev;
|
||||
|
||||
/* The PCI address space does equal the physical memory
|
||||
* address space. The networking and block device layers use
|
||||
* this boolean for bounce buffer decisions.
|
||||
*/
|
||||
#define PCI_DMA_BUS_IS_PHYS (1)
|
||||
|
||||
/* pci_unmap_{page,single} is a nop so... */
|
||||
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
|
||||
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
|
||||
#define pci_unmap_addr(PTR, ADDR_NAME) (0)
|
||||
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
|
||||
#define pci_unmap_len(PTR, LEN_NAME) (0)
|
||||
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
|
||||
|
||||
/* This is always fine. */
|
||||
#define pci_dac_dma_supported(pci_dev, mask) (1)
|
||||
|
||||
static inline dma64_addr_t
|
||||
pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
|
||||
{
|
||||
return ((dma64_addr_t) page_to_phys(page) +
|
||||
(dma64_addr_t) offset);
|
||||
}
|
||||
|
||||
static inline struct page *
|
||||
pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
|
||||
{
|
||||
return pfn_to_page(dma_addr >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
|
||||
{
|
||||
return (dma_addr & ~PAGE_MASK);
|
||||
}
|
||||
|
||||
static inline void
|
||||
pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
|
||||
{
|
||||
}
|
||||
|
||||
#define HAVE_PCI_MMAP
|
||||
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
||||
enum pci_mmap_state mmap_state, int write_combine);
|
||||
|
||||
|
||||
static inline void pcibios_add_platform_entries(struct pci_dev *dev)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
/* implement the pci_ DMA API in terms of the generic device dma_ one */
|
||||
#include <asm-generic/pci-dma-compat.h>
|
||||
|
||||
/* ETRAX chips don't have a PCI bus. This file is just here because some stupid .c code
|
||||
* includes it even if CONFIG_PCI is not set.
|
||||
*/
|
||||
#define PCI_DMA_BUS_IS_PHYS (1)
|
||||
/* generic pci stuff */
|
||||
#include <asm-generic/pci.h>
|
||||
|
||||
#endif /* __ASM_CRIS_PCI_H */
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user