mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-23 09:56:00 +00:00
ARM: dma-mapping: remove redundant code and do the cleanup
This patch just performs a global cleanup in DMA mapping implementation for ARM architecture. Some of the tiny helper functions have been moved to the caller code, some have been merged together. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
This commit is contained in:
parent
15237e1f50
commit
51fde3499b
@ -40,64 +40,12 @@
|
||||
* the CPU does do speculative prefetches, which means we clean caches
|
||||
* before transfers and delay cache invalidation until transfer completion.
|
||||
*
|
||||
* Private support functions: these are not part of the API and are
|
||||
* liable to change. Drivers must not use these.
|
||||
*/
|
||||
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
extern void ___dma_single_cpu_to_dev(const void *, size_t,
|
||||
enum dma_data_direction);
|
||||
|
||||
if (!arch_is_coherent())
|
||||
___dma_single_cpu_to_dev(kaddr, size, dir);
|
||||
}
|
||||
|
||||
static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
extern void ___dma_single_dev_to_cpu(const void *, size_t,
|
||||
enum dma_data_direction);
|
||||
|
||||
if (!arch_is_coherent())
|
||||
___dma_single_dev_to_cpu(kaddr, size, dir);
|
||||
}
|
||||
|
||||
static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
|
||||
static void __dma_page_cpu_to_dev(struct page *, unsigned long,
|
||||
size_t, enum dma_data_direction);
|
||||
|
||||
if (!arch_is_coherent())
|
||||
___dma_page_cpu_to_dev(page, off, size, dir);
|
||||
}
|
||||
|
||||
static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
|
||||
static void __dma_page_dev_to_cpu(struct page *, unsigned long,
|
||||
size_t, enum dma_data_direction);
|
||||
|
||||
if (!arch_is_coherent())
|
||||
___dma_page_dev_to_cpu(page, off, size, dir);
|
||||
}
|
||||
|
||||
|
||||
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
||||
}
|
||||
|
||||
static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
|
||||
handle & ~PAGE_MASK, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_dma_map_page - map a portion of a page for streaming DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
@ -112,11 +60,13 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
* The device owns this memory once this call has completed. The CPU
|
||||
* can regain ownership by calling dma_unmap_page().
|
||||
*/
|
||||
static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
|
||||
static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
return __dma_map_page(dev, page, offset, size, dir);
|
||||
if (!arch_is_coherent())
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -133,27 +83,31 @@ static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
|
||||
* After this call, reads by the CPU to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
static inline void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
__dma_unmap_page(dev, handle, size, dir);
|
||||
if (!arch_is_coherent())
|
||||
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
|
||||
handle & ~PAGE_MASK, size, dir);
|
||||
}
|
||||
|
||||
static inline void arm_dma_sync_single_for_cpu(struct device *dev,
|
||||
static void arm_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
unsigned int offset = handle & (PAGE_SIZE - 1);
|
||||
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
|
||||
__dma_page_dev_to_cpu(page, offset, size, dir);
|
||||
if (!arch_is_coherent())
|
||||
__dma_page_dev_to_cpu(page, offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void arm_dma_sync_single_for_device(struct device *dev,
|
||||
static void arm_dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
unsigned int offset = handle & (PAGE_SIZE - 1);
|
||||
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
if (!arch_is_coherent())
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
}
|
||||
|
||||
static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
|
||||
@ -647,7 +601,13 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
||||
} while (left);
|
||||
}
|
||||
|
||||
void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
||||
/*
|
||||
* Make an area consistent for devices.
|
||||
* Note: Drivers should NOT use this function directly, as it will break
|
||||
* platforms with CONFIG_DMABOUNCE.
|
||||
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
|
||||
*/
|
||||
static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
unsigned long paddr;
|
||||
@ -663,7 +623,7 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
||||
/* FIXME: non-speculating: flush on bidirectional mappings? */
|
||||
}
|
||||
|
||||
void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
||||
static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
unsigned long paddr = page_to_phys(page) + off;
|
||||
|
Loading…
Reference in New Issue
Block a user