iommu/amd: Optimize dma_ops_free_addresses

Don't flush the iommu tlb when we free something behind the
current next_bit pointer. Update the next_bit pointer
instead and let the flush happen on the next wraparound in
the allocation path.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Joerg Roedel 2015-12-22 12:15:35 +01:00
parent ab7032bb9c
commit 4eeca8c5e7

View File

@ -1633,8 +1633,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
return; return;
#endif #endif
if (amd_iommu_unmap_flush || if (amd_iommu_unmap_flush) {
(address + pages > range->next_bit)) {
domain_flush_tlb(&dom->domain); domain_flush_tlb(&dom->domain);
domain_flush_complete(&dom->domain); domain_flush_complete(&dom->domain);
} }
@ -1642,6 +1641,8 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT; address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
spin_lock_irqsave(&range->bitmap_lock, flags); spin_lock_irqsave(&range->bitmap_lock, flags);
if (address + pages > range->next_bit)
range->next_bit = address + pages;
bitmap_clear(range->bitmap, address, pages); bitmap_clear(range->bitmap, address, pages);
spin_unlock_irqrestore(&range->bitmap_lock, flags); spin_unlock_irqrestore(&range->bitmap_lock, flags);