mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-30 07:37:49 +00:00
iommu sg: powerpc: convert iommu to use the IOMMU helper
This patch converts PPC's IOMMU to use the IOMMU helper functions. The IOMMU doesn't allocate a memory area spanning LLD's segment boundary anymore. iseries_hv_alloc and iseries_hv_map don't have proper device struct. 4GB boundary is used for them. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Jeff Garzik <jeff@garzik.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0291df8cc9
commit
fb3475e9b6
@ -256,6 +256,9 @@ config IOMMU_VMERGE
|
|||||||
|
|
||||||
Most drivers don't have this problem; it is safe to say Y here.
|
Most drivers don't have this problem; it is safe to say Y here.
|
||||||
|
|
||||||
|
config IOMMU_HELPER
|
||||||
|
def_bool PPC64
|
||||||
|
|
||||||
config HOTPLUG_CPU
|
config HOTPLUG_CPU
|
||||||
bool "Support for enabling/disabling CPUs"
|
bool "Support for enabling/disabling CPUs"
|
||||||
depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
|
depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
|
||||||
|
@ -31,8 +31,8 @@ static inline unsigned long device_to_mask(struct device *dev)
|
|||||||
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
||||||
dma_addr_t *dma_handle, gfp_t flag)
|
dma_addr_t *dma_handle, gfp_t flag)
|
||||||
{
|
{
|
||||||
return iommu_alloc_coherent(dev->archdata.dma_data, size, dma_handle,
|
return iommu_alloc_coherent(dev, dev->archdata.dma_data, size,
|
||||||
device_to_mask(dev), flag,
|
dma_handle, device_to_mask(dev), flag,
|
||||||
dev->archdata.numa_node);
|
dev->archdata.numa_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -52,7 +52,7 @@ static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
|
|||||||
size_t size,
|
size_t size,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
return iommu_map_single(dev->archdata.dma_data, vaddr, size,
|
return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size,
|
||||||
device_to_mask(dev), direction);
|
device_to_mask(dev), direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
|
#include <linux/iommu-helper.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/prom.h>
|
#include <asm/prom.h>
|
||||||
#include <asm/iommu.h>
|
#include <asm/iommu.h>
|
||||||
@ -81,17 +82,19 @@ static int __init setup_iommu(char *str)
|
|||||||
__setup("protect4gb=", setup_protect4gb);
|
__setup("protect4gb=", setup_protect4gb);
|
||||||
__setup("iommu=", setup_iommu);
|
__setup("iommu=", setup_iommu);
|
||||||
|
|
||||||
static unsigned long iommu_range_alloc(struct iommu_table *tbl,
|
static unsigned long iommu_range_alloc(struct device *dev,
|
||||||
|
struct iommu_table *tbl,
|
||||||
unsigned long npages,
|
unsigned long npages,
|
||||||
unsigned long *handle,
|
unsigned long *handle,
|
||||||
unsigned long mask,
|
unsigned long mask,
|
||||||
unsigned int align_order)
|
unsigned int align_order)
|
||||||
{
|
{
|
||||||
unsigned long n, end, i, start;
|
unsigned long n, end, start;
|
||||||
unsigned long limit;
|
unsigned long limit;
|
||||||
int largealloc = npages > 15;
|
int largealloc = npages > 15;
|
||||||
int pass = 0;
|
int pass = 0;
|
||||||
unsigned long align_mask;
|
unsigned long align_mask;
|
||||||
|
unsigned long boundary_size;
|
||||||
|
|
||||||
align_mask = 0xffffffffffffffffl >> (64 - align_order);
|
align_mask = 0xffffffffffffffffl >> (64 - align_order);
|
||||||
|
|
||||||
@ -136,14 +139,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
|
|||||||
start &= mask;
|
start &= mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
n = find_next_zero_bit(tbl->it_map, limit, start);
|
if (dev)
|
||||||
|
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
||||||
|
1 << IOMMU_PAGE_SHIFT);
|
||||||
|
else
|
||||||
|
boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
|
||||||
|
/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
|
||||||
|
|
||||||
/* Align allocation */
|
n = iommu_area_alloc(tbl->it_map, limit, start, npages,
|
||||||
n = (n + align_mask) & ~align_mask;
|
tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
|
||||||
|
align_mask);
|
||||||
end = n + npages;
|
if (n == -1) {
|
||||||
|
|
||||||
if (unlikely(end >= limit)) {
|
|
||||||
if (likely(pass < 2)) {
|
if (likely(pass < 2)) {
|
||||||
/* First failure, just rescan the half of the table.
|
/* First failure, just rescan the half of the table.
|
||||||
* Second failure, rescan the other half of the table.
|
* Second failure, rescan the other half of the table.
|
||||||
@ -158,14 +164,7 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = n; i < end; i++)
|
end = n + npages;
|
||||||
if (test_bit(i, tbl->it_map)) {
|
|
||||||
start = i+1;
|
|
||||||
goto again;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = n; i < end; i++)
|
|
||||||
__set_bit(i, tbl->it_map);
|
|
||||||
|
|
||||||
/* Bump the hint to a new block for small allocs. */
|
/* Bump the hint to a new block for small allocs. */
|
||||||
if (largealloc) {
|
if (largealloc) {
|
||||||
@ -184,16 +183,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
|
|||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
|
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
||||||
unsigned int npages, enum dma_data_direction direction,
|
void *page, unsigned int npages,
|
||||||
unsigned long mask, unsigned int align_order)
|
enum dma_data_direction direction,
|
||||||
|
unsigned long mask, unsigned int align_order)
|
||||||
{
|
{
|
||||||
unsigned long entry, flags;
|
unsigned long entry, flags;
|
||||||
dma_addr_t ret = DMA_ERROR_CODE;
|
dma_addr_t ret = DMA_ERROR_CODE;
|
||||||
|
|
||||||
spin_lock_irqsave(&(tbl->it_lock), flags);
|
spin_lock_irqsave(&(tbl->it_lock), flags);
|
||||||
|
|
||||||
entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);
|
entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
|
||||||
|
|
||||||
if (unlikely(entry == DMA_ERROR_CODE)) {
|
if (unlikely(entry == DMA_ERROR_CODE)) {
|
||||||
spin_unlock_irqrestore(&(tbl->it_lock), flags);
|
spin_unlock_irqrestore(&(tbl->it_lock), flags);
|
||||||
@ -224,7 +224,6 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
|||||||
unsigned int npages)
|
unsigned int npages)
|
||||||
{
|
{
|
||||||
unsigned long entry, free_entry;
|
unsigned long entry, free_entry;
|
||||||
unsigned long i;
|
|
||||||
|
|
||||||
entry = dma_addr >> IOMMU_PAGE_SHIFT;
|
entry = dma_addr >> IOMMU_PAGE_SHIFT;
|
||||||
free_entry = entry - tbl->it_offset;
|
free_entry = entry - tbl->it_offset;
|
||||||
@ -246,9 +245,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ppc_md.tce_free(tbl, entry, npages);
|
ppc_md.tce_free(tbl, entry, npages);
|
||||||
|
iommu_area_free(tbl->it_map, free_entry, npages);
|
||||||
for (i = 0; i < npages; i++)
|
|
||||||
__clear_bit(free_entry+i, tbl->it_map);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
||||||
@ -317,7 +314,7 @@ int iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||||||
if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
|
if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
|
||||||
(vaddr & ~PAGE_MASK) == 0)
|
(vaddr & ~PAGE_MASK) == 0)
|
||||||
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
|
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
|
||||||
entry = iommu_range_alloc(tbl, npages, &handle,
|
entry = iommu_range_alloc(dev, tbl, npages, &handle,
|
||||||
mask >> IOMMU_PAGE_SHIFT, align);
|
mask >> IOMMU_PAGE_SHIFT, align);
|
||||||
|
|
||||||
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
|
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
|
||||||
@ -574,9 +571,9 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
|
|||||||
* need not be page aligned, the dma_addr_t returned will point to the same
|
* need not be page aligned, the dma_addr_t returned will point to the same
|
||||||
* byte within the page as vaddr.
|
* byte within the page as vaddr.
|
||||||
*/
|
*/
|
||||||
dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
|
dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
|
||||||
size_t size, unsigned long mask,
|
void *vaddr, size_t size, unsigned long mask,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
dma_addr_t dma_handle = DMA_ERROR_CODE;
|
dma_addr_t dma_handle = DMA_ERROR_CODE;
|
||||||
unsigned long uaddr;
|
unsigned long uaddr;
|
||||||
@ -593,7 +590,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
|
|||||||
((unsigned long)vaddr & ~PAGE_MASK) == 0)
|
((unsigned long)vaddr & ~PAGE_MASK) == 0)
|
||||||
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
|
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
|
||||||
|
|
||||||
dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
|
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
|
||||||
mask >> IOMMU_PAGE_SHIFT, align);
|
mask >> IOMMU_PAGE_SHIFT, align);
|
||||||
if (dma_handle == DMA_ERROR_CODE) {
|
if (dma_handle == DMA_ERROR_CODE) {
|
||||||
if (printk_ratelimit()) {
|
if (printk_ratelimit()) {
|
||||||
@ -625,8 +622,9 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
|
|||||||
* Returns the virtual address of the buffer and sets dma_handle
|
* Returns the virtual address of the buffer and sets dma_handle
|
||||||
* to the dma address (mapping) of the first page.
|
* to the dma address (mapping) of the first page.
|
||||||
*/
|
*/
|
||||||
void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
|
void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
|
||||||
dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
|
size_t size, dma_addr_t *dma_handle,
|
||||||
|
unsigned long mask, gfp_t flag, int node)
|
||||||
{
|
{
|
||||||
void *ret = NULL;
|
void *ret = NULL;
|
||||||
dma_addr_t mapping;
|
dma_addr_t mapping;
|
||||||
@ -660,7 +658,7 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
|
|||||||
/* Set up tces to cover the allocated range */
|
/* Set up tces to cover the allocated range */
|
||||||
nio_pages = size >> IOMMU_PAGE_SHIFT;
|
nio_pages = size >> IOMMU_PAGE_SHIFT;
|
||||||
io_order = get_iommu_order(size);
|
io_order = get_iommu_order(size);
|
||||||
mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
|
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
|
||||||
mask >> IOMMU_PAGE_SHIFT, io_order);
|
mask >> IOMMU_PAGE_SHIFT, io_order);
|
||||||
if (mapping == DMA_ERROR_CODE) {
|
if (mapping == DMA_ERROR_CODE) {
|
||||||
free_pages((unsigned long)ret, order);
|
free_pages((unsigned long)ret, order);
|
||||||
|
@ -199,7 +199,7 @@ static struct iommu_table vio_iommu_table;
|
|||||||
|
|
||||||
void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
|
void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
|
||||||
{
|
{
|
||||||
return iommu_alloc_coherent(&vio_iommu_table, size, dma_handle,
|
return iommu_alloc_coherent(NULL, &vio_iommu_table, size, dma_handle,
|
||||||
DMA_32BIT_MASK, flag, -1);
|
DMA_32BIT_MASK, flag, -1);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iseries_hv_alloc);
|
EXPORT_SYMBOL_GPL(iseries_hv_alloc);
|
||||||
@ -213,7 +213,7 @@ EXPORT_SYMBOL_GPL(iseries_hv_free);
|
|||||||
dma_addr_t iseries_hv_map(void *vaddr, size_t size,
|
dma_addr_t iseries_hv_map(void *vaddr, size_t size,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
return iommu_map_single(&vio_iommu_table, vaddr, size,
|
return iommu_map_single(NULL, &vio_iommu_table, vaddr, size,
|
||||||
DMA_32BIT_MASK, direction);
|
DMA_32BIT_MASK, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,13 +85,13 @@ extern int iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||||||
extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||||
int nelems, enum dma_data_direction direction);
|
int nelems, enum dma_data_direction direction);
|
||||||
|
|
||||||
extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
|
extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
|
||||||
dma_addr_t *dma_handle, unsigned long mask,
|
size_t size, dma_addr_t *dma_handle,
|
||||||
gfp_t flag, int node);
|
unsigned long mask, gfp_t flag, int node);
|
||||||
extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
|
extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
|
||||||
void *vaddr, dma_addr_t dma_handle);
|
void *vaddr, dma_addr_t dma_handle);
|
||||||
extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
|
extern dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
|
||||||
size_t size, unsigned long mask,
|
void *vaddr, size_t size, unsigned long mask,
|
||||||
enum dma_data_direction direction);
|
enum dma_data_direction direction);
|
||||||
extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
|
extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
|
||||||
size_t size, enum dma_data_direction direction);
|
size_t size, enum dma_data_direction direction);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user