mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-02 20:59:30 +00:00
iommu: use the __iommu_attach_device() directly for deferred attach
Currently, because domain attach allows to be deferred from iommu driver to device driver, and when iommu initializes, the devices on the bus will be scanned and the default groups will be allocated. Due to the above changes, some devices could be added to the same group as below: [ 3.859417] pci 0000:01:00.0: Adding to iommu group 16 [ 3.864572] pci 0000:01:00.1: Adding to iommu group 16 [ 3.869738] pci 0000:02:00.0: Adding to iommu group 17 [ 3.874892] pci 0000:02:00.1: Adding to iommu group 17 But when attaching these devices, it doesn't allow that a group has more than one device, otherwise it will return an error. This conflicts with the deferred attaching. Unfortunately, it has two devices in the same group for my side, for example: [ 9.627014] iommu_group_device_count(): device name[0]:0000:01:00.0 [ 9.633545] iommu_group_device_count(): device name[1]:0000:01:00.1 ... [ 10.255609] iommu_group_device_count(): device name[0]:0000:02:00.0 [ 10.262144] iommu_group_device_count(): device name[1]:0000:02:00.1 Finally, which caused the failure of tg3 driver when tg3 driver calls the dma_alloc_coherent() to allocate coherent memory in the tg3_test_dma(). [ 9.660310] tg3 0000:01:00.0: DMA engine test failed, aborting [ 9.754085] tg3: probe of 0000:01:00.0 failed with error -12 [ 9.997512] tg3 0000:01:00.1: DMA engine test failed, aborting [ 10.043053] tg3: probe of 0000:01:00.1 failed with error -12 [ 10.288905] tg3 0000:02:00.0: DMA engine test failed, aborting [ 10.334070] tg3: probe of 0000:02:00.0 failed with error -12 [ 10.578303] tg3 0000:02:00.1: DMA engine test failed, aborting [ 10.622629] tg3: probe of 0000:02:00.1 failed with error -12 In addition, the similar situations also occur in other drivers such as the bnxt_en driver. That can be reproduced easily in kdump kernel when SME is active. Let's move the handling currently in iommu_dma_deferred_attach() into the iommu core code so that it can call the __iommu_attach_device() directly instead of the iommu_attach_device(). The external interface iommu_attach_device() is not suitable for handling this situation. Signed-off-by: Lianbo Jiang <lijiang@redhat.com> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Link: https://lore.kernel.org/r/20210126115337.20068-3-lijiang@redhat.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
a8e8af35c9
commit
3ab6572916
@ -380,18 +380,6 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
return iova_reserve_iommu_regions(dev, domain);
|
||||
}
|
||||
|
||||
static int iommu_dma_deferred_attach(struct device *dev,
|
||||
struct iommu_domain *domain)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
|
||||
if (unlikely(ops->is_attach_deferred &&
|
||||
ops->is_attach_deferred(domain, dev)))
|
||||
return iommu_attach_device(domain, dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
|
||||
* page flags.
|
||||
@ -535,7 +523,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
||||
dma_addr_t iova;
|
||||
|
||||
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
|
||||
iommu_dma_deferred_attach(dev, domain))
|
||||
iommu_deferred_attach(dev, domain))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
size = iova_align(iovad, size + iova_off);
|
||||
@ -694,7 +682,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
|
||||
*dma_handle = DMA_MAPPING_ERROR;
|
||||
|
||||
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
|
||||
iommu_dma_deferred_attach(dev, domain))
|
||||
iommu_deferred_attach(dev, domain))
|
||||
return NULL;
|
||||
|
||||
min_size = alloc_sizes & -alloc_sizes;
|
||||
@ -978,7 +966,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int i;
|
||||
|
||||
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
|
||||
iommu_dma_deferred_attach(dev, domain))
|
||||
iommu_deferred_attach(dev, domain))
|
||||
return 0;
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
|
@ -1980,6 +1980,16 @@ out_unlock:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_attach_device);
|
||||
|
||||
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
|
||||
if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))
|
||||
return __iommu_attach_device(domain, dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check flags and other user provided data for valid combinations. We also
|
||||
* make sure no reserved fields or unused flags are set. This is to ensure
|
||||
|
@ -376,6 +376,7 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
|
||||
void iommu_device_sysfs_remove(struct iommu_device *iommu);
|
||||
int iommu_device_link(struct iommu_device *iommu, struct device *link);
|
||||
void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
|
||||
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
|
||||
|
||||
static inline void __iommu_device_set_ops(struct iommu_device *iommu,
|
||||
const struct iommu_ops *ops)
|
||||
|
Loading…
Reference in New Issue
Block a user