mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-14 21:01:29 +00:00
IOMMU Updates for Linux v4.16
Including: - 5-level page-table support for the Intel IOMMU. - Error reporting improvements for the AMD IOMMU driver - Additional DT bindings for ipmmu-vmsa (Renesas) - Smaller fixes and cleanups -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABAgAGBQJafGLMAAoJECvwRC2XARrjPTUP/0g/n8H5j35DevM56G62MrNq fNweMxPm7AqZQR/dnIkPnlH5NWfP1z5PZ47H/nAMAqd7cKHVOfUmzoufiUSGP92V eweFF4ufjqA+V5fluGcnt0UNxgbEGs+cEgf9jbEkUlpmFisV7BwOCGIJbVdHMrxG jkrr/L17iX82uqIru9JmfB2K0pEPBtBHQSZpooGHAyGsR4xU6nX1X64mV/a9Oh/2 qzfzRsAbF5ZtAszktVz9j2AMfp40BrrAcHzmvepjS5yTjlH9t5J8UdM48GHWU+Zp ptmlJ3fJybe0yUI6GDfG9M6+/RX0T/xMvV1QcSJW6KP0q/i9p4hrIQufoOzstMYM uCsFPlhMLFSDcQy6CZ3M6VEsU5mdJ0KMn0xAN8rBLAok1ScGKrlP5qWpXJLeUJRp Ie7R4WVT+Ly/SLppoiLagiTW3ZD/gQh+YPNgYwXptMdDmiqSRdXm0nF6bzTiKk1Z 8h8oEj2ittwBTC+fXuP+1C/wOKYL6KJUGnykLcHBDO+/wkEWOP0KM6939+T7IjHt zkiUapRegRvWyDOq1HFVl0tBCRLo1dqwG/3PFpqHUkj6Iyqyhd8y/V5IM3GTSI+d 1tHBz6dXin62N/xYu/ScpmPMerpjP/AtMqd3dvx7Q+9vgNIAVSPMKFqeXhQ3P2ph +p1CdWvPYPb7wUhTvcja =+LFh -----END PGP SIGNATURE----- Merge tag 'iommu-updates-v4.16' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu Pull IOMMU updates from Joerg Roedel: "This time there are not a lot of changes coming from the IOMMU side. That is partly because I returned from my parental leave late in the development process and probably partly because everyone was busy with Spectre and Meltdown mitigation work and didn't find the time for IOMMU work. So here are the few changes that queued up for this merge window: - 5-level page-table support for the Intel IOMMU. - error reporting improvements for the AMD IOMMU driver - additional DT bindings for ipmmu-vmsa (Renesas) - small fixes and cleanups" * tag 'iommu-updates-v4.16' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: iommu: Clean up of_iommu_init_fn iommu/ipmmu-vmsa: Remove redundant of_iommu_init_fn hook iommu/msm: Claim bus ops on probe iommu/vt-d: Enable 5-level paging mode in the PASID entry iommu/vt-d: Add a check for 5-level paging support iommu/vt-d: Add a check for 1GB page support iommu/vt-d: Enable upto 57 bits of domain address width iommu/vt-d: Use domain instead of cache fetching iommu/exynos: Don't unconditionally steal bus ops iommu/omap: Fix debugfs_create_*() usage iommu/vt-d: clean up pr_irq if request_threaded_irq fails iommu: Check the result of iommu_group_get() for NULL iommu/ipmmu-vmsa: Add r8a779(70|95) DT bindings iommu/ipmmu-vmsa: Add r8a7796 DT binding iommu/amd: Set the device table entry PPR bit for IOMMU V2 devices iommu/amd - Record more information about unknown events
This commit is contained in:
commit
ef9417e8a9
@ -16,6 +16,9 @@ Required Properties:
|
|||||||
- "renesas,ipmmu-r8a7793" for the R8A7793 (R-Car M2-N) IPMMU.
|
- "renesas,ipmmu-r8a7793" for the R8A7793 (R-Car M2-N) IPMMU.
|
||||||
- "renesas,ipmmu-r8a7794" for the R8A7794 (R-Car E2) IPMMU.
|
- "renesas,ipmmu-r8a7794" for the R8A7794 (R-Car E2) IPMMU.
|
||||||
- "renesas,ipmmu-r8a7795" for the R8A7795 (R-Car H3) IPMMU.
|
- "renesas,ipmmu-r8a7795" for the R8A7795 (R-Car H3) IPMMU.
|
||||||
|
- "renesas,ipmmu-r8a7796" for the R8A7796 (R-Car M3-W) IPMMU.
|
||||||
|
- "renesas,ipmmu-r8a77970" for the R8A77970 (R-Car V3M) IPMMU.
|
||||||
|
- "renesas,ipmmu-r8a77995" for the R8A77995 (R-Car D3) IPMMU.
|
||||||
- "renesas,ipmmu-vmsa" for generic R-Car Gen2 VMSA-compatible IPMMU.
|
- "renesas,ipmmu-vmsa" for generic R-Car Gen2 VMSA-compatible IPMMU.
|
||||||
|
|
||||||
- reg: Base address and size of the IPMMU registers.
|
- reg: Base address and size of the IPMMU registers.
|
||||||
|
@ -617,7 +617,9 @@ retry:
|
|||||||
address, flags);
|
address, flags);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
|
printk(KERN_ERR "UNKNOWN type=0x%02x event[0]=0x%08x "
|
||||||
|
"event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
|
||||||
|
type, event[0], event[1], event[2], event[3]);
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(__evt, 0, 4 * sizeof(u32));
|
memset(__evt, 0, 4 * sizeof(u32));
|
||||||
@ -1816,7 +1818,8 @@ static bool dma_ops_domain(struct protection_domain *domain)
|
|||||||
return domain->flags & PD_DMA_OPS_MASK;
|
return domain->flags & PD_DMA_OPS_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
|
static void set_dte_entry(u16 devid, struct protection_domain *domain,
|
||||||
|
bool ats, bool ppr)
|
||||||
{
|
{
|
||||||
u64 pte_root = 0;
|
u64 pte_root = 0;
|
||||||
u64 flags = 0;
|
u64 flags = 0;
|
||||||
@ -1833,6 +1836,13 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
|
|||||||
if (ats)
|
if (ats)
|
||||||
flags |= DTE_FLAG_IOTLB;
|
flags |= DTE_FLAG_IOTLB;
|
||||||
|
|
||||||
|
if (ppr) {
|
||||||
|
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
|
||||||
|
|
||||||
|
if (iommu_feature(iommu, FEATURE_EPHSUP))
|
||||||
|
pte_root |= 1ULL << DEV_ENTRY_PPR;
|
||||||
|
}
|
||||||
|
|
||||||
if (domain->flags & PD_IOMMUV2_MASK) {
|
if (domain->flags & PD_IOMMUV2_MASK) {
|
||||||
u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
|
u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
|
||||||
u64 glx = domain->glx;
|
u64 glx = domain->glx;
|
||||||
@ -1895,9 +1905,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
|
|||||||
domain->dev_cnt += 1;
|
domain->dev_cnt += 1;
|
||||||
|
|
||||||
/* Update device table */
|
/* Update device table */
|
||||||
set_dte_entry(dev_data->devid, domain, ats);
|
set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2);
|
||||||
if (alias != dev_data->devid)
|
if (alias != dev_data->devid)
|
||||||
set_dte_entry(alias, domain, ats);
|
set_dte_entry(alias, domain, ats, dev_data->iommu_v2);
|
||||||
|
|
||||||
device_flush_dte(dev_data);
|
device_flush_dte(dev_data);
|
||||||
}
|
}
|
||||||
@ -2276,13 +2286,15 @@ static void update_device_table(struct protection_domain *domain)
|
|||||||
struct iommu_dev_data *dev_data;
|
struct iommu_dev_data *dev_data;
|
||||||
|
|
||||||
list_for_each_entry(dev_data, &domain->dev_list, list) {
|
list_for_each_entry(dev_data, &domain->dev_list, list) {
|
||||||
set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
|
set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled,
|
||||||
|
dev_data->iommu_v2);
|
||||||
|
|
||||||
if (dev_data->devid == dev_data->alias)
|
if (dev_data->devid == dev_data->alias)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* There is an alias, update device table entry for it */
|
/* There is an alias, update device table entry for it */
|
||||||
set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled);
|
set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled,
|
||||||
|
dev_data->iommu_v2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,6 +98,7 @@
|
|||||||
#define FEATURE_HE (1ULL<<8)
|
#define FEATURE_HE (1ULL<<8)
|
||||||
#define FEATURE_PC (1ULL<<9)
|
#define FEATURE_PC (1ULL<<9)
|
||||||
#define FEATURE_GAM_VAPIC (1ULL<<21)
|
#define FEATURE_GAM_VAPIC (1ULL<<21)
|
||||||
|
#define FEATURE_EPHSUP (1ULL<<50)
|
||||||
|
|
||||||
#define FEATURE_PASID_SHIFT 32
|
#define FEATURE_PASID_SHIFT 32
|
||||||
#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
|
#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
|
||||||
@ -192,6 +193,7 @@
|
|||||||
/* macros and definitions for device table entries */
|
/* macros and definitions for device table entries */
|
||||||
#define DEV_ENTRY_VALID 0x00
|
#define DEV_ENTRY_VALID 0x00
|
||||||
#define DEV_ENTRY_TRANSLATION 0x01
|
#define DEV_ENTRY_TRANSLATION 0x01
|
||||||
|
#define DEV_ENTRY_PPR 0x34
|
||||||
#define DEV_ENTRY_IR 0x3d
|
#define DEV_ENTRY_IR 0x3d
|
||||||
#define DEV_ENTRY_IW 0x3e
|
#define DEV_ENTRY_IW 0x3e
|
||||||
#define DEV_ENTRY_NO_PAGE_FAULT 0x62
|
#define DEV_ENTRY_NO_PAGE_FAULT 0x62
|
||||||
|
@ -2971,7 +2971,7 @@ static struct platform_driver arm_smmu_driver = {
|
|||||||
};
|
};
|
||||||
module_platform_driver(arm_smmu_driver);
|
module_platform_driver(arm_smmu_driver);
|
||||||
|
|
||||||
IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", NULL);
|
IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3");
|
||||||
|
|
||||||
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
|
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
|
||||||
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
|
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
|
||||||
|
@ -2211,12 +2211,12 @@ static struct platform_driver arm_smmu_driver = {
|
|||||||
};
|
};
|
||||||
module_platform_driver(arm_smmu_driver);
|
module_platform_driver(arm_smmu_driver);
|
||||||
|
|
||||||
IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", NULL);
|
IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1");
|
||||||
IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", NULL);
|
IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2");
|
||||||
IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", NULL);
|
IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400");
|
||||||
IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", NULL);
|
IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401");
|
||||||
IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", NULL);
|
IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500");
|
||||||
IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", NULL);
|
IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2");
|
||||||
|
|
||||||
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
|
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
|
||||||
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
|
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
|
||||||
|
@ -1353,8 +1353,15 @@ static const struct iommu_ops exynos_iommu_ops = {
|
|||||||
|
|
||||||
static int __init exynos_iommu_init(void)
|
static int __init exynos_iommu_init(void)
|
||||||
{
|
{
|
||||||
|
struct device_node *np;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
np = of_find_matching_node(NULL, sysmmu_of_match);
|
||||||
|
if (!np)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
of_node_put(np);
|
||||||
|
|
||||||
lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
|
lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
|
||||||
LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
|
LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
|
||||||
if (!lv2table_kmem_cache) {
|
if (!lv2table_kmem_cache) {
|
||||||
@ -1394,4 +1401,4 @@ err_reg_driver:
|
|||||||
}
|
}
|
||||||
core_initcall(exynos_iommu_init);
|
core_initcall(exynos_iommu_init);
|
||||||
|
|
||||||
IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu", NULL);
|
IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu");
|
||||||
|
@ -64,7 +64,7 @@
|
|||||||
#define IOAPIC_RANGE_END (0xfeefffff)
|
#define IOAPIC_RANGE_END (0xfeefffff)
|
||||||
#define IOVA_START_ADDR (0x1000)
|
#define IOVA_START_ADDR (0x1000)
|
||||||
|
|
||||||
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
|
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
|
||||||
|
|
||||||
#define MAX_AGAW_WIDTH 64
|
#define MAX_AGAW_WIDTH 64
|
||||||
#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
|
#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
|
||||||
@ -1601,8 +1601,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
|
|||||||
* flush. However, device IOTLB doesn't need to be flushed in this case.
|
* flush. However, device IOTLB doesn't need to be flushed in this case.
|
||||||
*/
|
*/
|
||||||
if (!cap_caching_mode(iommu->cap) || !map)
|
if (!cap_caching_mode(iommu->cap) || !map)
|
||||||
iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
|
iommu_flush_dev_iotlb(domain, addr, mask);
|
||||||
addr, mask);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_flush_iova(struct iova_domain *iovad)
|
static void iommu_flush_iova(struct iova_domain *iovad)
|
||||||
|
@ -26,6 +26,10 @@
|
|||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
|
||||||
|
#define PASID_ENTRY_P BIT_ULL(0)
|
||||||
|
#define PASID_ENTRY_FLPM_5LP BIT_ULL(9)
|
||||||
|
#define PASID_ENTRY_SRE BIT_ULL(11)
|
||||||
|
|
||||||
static irqreturn_t prq_event_thread(int irq, void *d);
|
static irqreturn_t prq_event_thread(int irq, void *d);
|
||||||
|
|
||||||
struct pasid_entry {
|
struct pasid_entry {
|
||||||
@ -41,6 +45,14 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
|
|||||||
struct page *pages;
|
struct page *pages;
|
||||||
int order;
|
int order;
|
||||||
|
|
||||||
|
if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
|
||||||
|
!cap_fl1gp_support(iommu->cap))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (cpu_feature_enabled(X86_FEATURE_LA57) &&
|
||||||
|
!cap_5lp_support(iommu->cap))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* Start at 2 because it's defined as 2^(1+PSS) */
|
/* Start at 2 because it's defined as 2^(1+PSS) */
|
||||||
iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
|
iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
|
||||||
|
|
||||||
@ -129,6 +141,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
|
|||||||
pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
|
pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
|
||||||
iommu->name);
|
iommu->name);
|
||||||
dmar_free_hwirq(irq);
|
dmar_free_hwirq(irq);
|
||||||
|
iommu->pr_irq = 0;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
|
dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
|
||||||
@ -144,9 +157,11 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
|
|||||||
dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
|
dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
|
||||||
dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
|
dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
|
||||||
|
|
||||||
free_irq(iommu->pr_irq, iommu);
|
if (iommu->pr_irq) {
|
||||||
dmar_free_hwirq(iommu->pr_irq);
|
free_irq(iommu->pr_irq, iommu);
|
||||||
iommu->pr_irq = 0;
|
dmar_free_hwirq(iommu->pr_irq);
|
||||||
|
iommu->pr_irq = 0;
|
||||||
|
}
|
||||||
|
|
||||||
free_pages((unsigned long)iommu->prq, PRQ_ORDER);
|
free_pages((unsigned long)iommu->prq, PRQ_ORDER);
|
||||||
iommu->prq = NULL;
|
iommu->prq = NULL;
|
||||||
@ -290,6 +305,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
|
|||||||
struct intel_svm_dev *sdev;
|
struct intel_svm_dev *sdev;
|
||||||
struct intel_svm *svm = NULL;
|
struct intel_svm *svm = NULL;
|
||||||
struct mm_struct *mm = NULL;
|
struct mm_struct *mm = NULL;
|
||||||
|
u64 pasid_entry_val;
|
||||||
int pasid_max;
|
int pasid_max;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -396,9 +412,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
|
|||||||
kfree(sdev);
|
kfree(sdev);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
|
pasid_entry_val = (u64)__pa(mm->pgd) | PASID_ENTRY_P;
|
||||||
} else
|
} else
|
||||||
iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
|
pasid_entry_val = (u64)__pa(init_mm.pgd) |
|
||||||
|
PASID_ENTRY_P | PASID_ENTRY_SRE;
|
||||||
|
if (cpu_feature_enabled(X86_FEATURE_LA57))
|
||||||
|
pasid_entry_val |= PASID_ENTRY_FLPM_5LP;
|
||||||
|
|
||||||
|
iommu->pasid_table[svm->pasid].val = pasid_entry_val;
|
||||||
|
|
||||||
wmb();
|
wmb();
|
||||||
/* In caching mode, we still have to flush with PASID 0 when
|
/* In caching mode, we still have to flush with PASID 0 when
|
||||||
* a PASID table entry becomes present. Not entirely clear
|
* a PASID table entry becomes present. Not entirely clear
|
||||||
|
@ -1303,6 +1303,9 @@ int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
group = iommu_group_get(dev);
|
group = iommu_group_get(dev);
|
||||||
|
if (!group)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock the group to make sure the device-count doesn't
|
* Lock the group to make sure the device-count doesn't
|
||||||
* change while we are attaching
|
* change while we are attaching
|
||||||
@ -1341,6 +1344,8 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
|
|||||||
struct iommu_group *group;
|
struct iommu_group *group;
|
||||||
|
|
||||||
group = iommu_group_get(dev);
|
group = iommu_group_get(dev);
|
||||||
|
if (!group)
|
||||||
|
return;
|
||||||
|
|
||||||
mutex_lock(&group->mutex);
|
mutex_lock(&group->mutex);
|
||||||
if (iommu_group_device_count(group) != 1) {
|
if (iommu_group_device_count(group) != 1) {
|
||||||
|
@ -1108,18 +1108,8 @@ static void __exit ipmmu_exit(void)
|
|||||||
subsys_initcall(ipmmu_init);
|
subsys_initcall(ipmmu_init);
|
||||||
module_exit(ipmmu_exit);
|
module_exit(ipmmu_exit);
|
||||||
|
|
||||||
#ifdef CONFIG_IOMMU_DMA
|
IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa");
|
||||||
static int __init ipmmu_vmsa_iommu_of_setup(struct device_node *np)
|
IOMMU_OF_DECLARE(ipmmu_r8a7795_iommu_of, "renesas,ipmmu-r8a7795");
|
||||||
{
|
|
||||||
ipmmu_init();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa",
|
|
||||||
ipmmu_vmsa_iommu_of_setup);
|
|
||||||
IOMMU_OF_DECLARE(ipmmu_r8a7795_iommu_of, "renesas,ipmmu-r8a7795",
|
|
||||||
ipmmu_vmsa_iommu_of_setup);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
|
MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
|
||||||
MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
|
MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
|
||||||
|
@ -823,6 +823,8 @@ static int msm_iommu_probe(struct platform_device *pdev)
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
|
||||||
|
|
||||||
pr_info("device mapped at %p, irq %d with %d ctx banks\n",
|
pr_info("device mapped at %p, irq %d with %d ctx banks\n",
|
||||||
iommu->base, iommu->irq, iommu->ncb);
|
iommu->base, iommu->irq, iommu->ncb);
|
||||||
|
|
||||||
@ -875,19 +877,7 @@ static void __exit msm_iommu_driver_exit(void)
|
|||||||
subsys_initcall(msm_iommu_driver_init);
|
subsys_initcall(msm_iommu_driver_init);
|
||||||
module_exit(msm_iommu_driver_exit);
|
module_exit(msm_iommu_driver_exit);
|
||||||
|
|
||||||
static int __init msm_iommu_init(void)
|
IOMMU_OF_DECLARE(msm_iommu_of, "qcom,apq8064-iommu");
|
||||||
{
|
|
||||||
bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init msm_iommu_of_setup(struct device_node *np)
|
|
||||||
{
|
|
||||||
msm_iommu_init();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
IOMMU_OF_DECLARE(msm_iommu_of, "qcom,apq8064-iommu", msm_iommu_of_setup);
|
|
||||||
|
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
|
MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
|
||||||
|
@ -231,19 +231,3 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
|
|||||||
|
|
||||||
return ops;
|
return ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init of_iommu_init(void)
|
|
||||||
{
|
|
||||||
struct device_node *np;
|
|
||||||
const struct of_device_id *match, *matches = &__iommu_of_table;
|
|
||||||
|
|
||||||
for_each_matching_node_and_match(np, matches, &match) {
|
|
||||||
const of_iommu_init_fn init_fn = match->data;
|
|
||||||
|
|
||||||
if (init_fn && init_fn(np))
|
|
||||||
pr_err("Failed to initialise IOMMU %pOF\n", np);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
postcore_initcall_sync(of_iommu_init);
|
|
||||||
|
@ -274,8 +274,8 @@ void omap_iommu_debugfs_add(struct omap_iommu *obj)
|
|||||||
if (!obj->debug_dir)
|
if (!obj->debug_dir)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
d = debugfs_create_u8("nr_tlb_entries", 0400, obj->debug_dir,
|
d = debugfs_create_u32("nr_tlb_entries", 0400, obj->debug_dir,
|
||||||
(u8 *)&obj->nr_tlb_entries);
|
&obj->nr_tlb_entries);
|
||||||
if (!d)
|
if (!d)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -947,7 +947,7 @@ static void __exit qcom_iommu_exit(void)
|
|||||||
module_init(qcom_iommu_init);
|
module_init(qcom_iommu_init);
|
||||||
module_exit(qcom_iommu_exit);
|
module_exit(qcom_iommu_exit);
|
||||||
|
|
||||||
IOMMU_OF_DECLARE(qcom_iommu_dev, "qcom,msm-iommu-v1", NULL);
|
IOMMU_OF_DECLARE(qcom_iommu_dev, "qcom,msm-iommu-v1");
|
||||||
|
|
||||||
MODULE_DESCRIPTION("IOMMU API for QCOM IOMMU v1 implementations");
|
MODULE_DESCRIPTION("IOMMU API for QCOM IOMMU v1 implementations");
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
|
@ -83,7 +83,9 @@
|
|||||||
/*
|
/*
|
||||||
* Decoding Capability Register
|
* Decoding Capability Register
|
||||||
*/
|
*/
|
||||||
|
#define cap_5lp_support(c) (((c) >> 60) & 1)
|
||||||
#define cap_pi_support(c) (((c) >> 59) & 1)
|
#define cap_pi_support(c) (((c) >> 59) & 1)
|
||||||
|
#define cap_fl1gp_support(c) (((c) >> 56) & 1)
|
||||||
#define cap_read_drain(c) (((c) >> 55) & 1)
|
#define cap_read_drain(c) (((c) >> 55) & 1)
|
||||||
#define cap_write_drain(c) (((c) >> 54) & 1)
|
#define cap_write_drain(c) (((c) >> 54) & 1)
|
||||||
#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
|
#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
|
||||||
|
@ -34,9 +34,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
|
|||||||
|
|
||||||
extern struct of_device_id __iommu_of_table;
|
extern struct of_device_id __iommu_of_table;
|
||||||
|
|
||||||
typedef int (*of_iommu_init_fn)(struct device_node *);
|
#define IOMMU_OF_DECLARE(name, compat) OF_DECLARE_1(iommu, name, compat, NULL)
|
||||||
|
|
||||||
#define IOMMU_OF_DECLARE(name, compat, fn) \
|
|
||||||
_OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn)
|
|
||||||
|
|
||||||
#endif /* __OF_IOMMU_H */
|
#endif /* __OF_IOMMU_H */
|
||||||
|
Loading…
Reference in New Issue
Block a user