mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-22 17:33:01 +00:00
iommu/tegra-gart: Make use of domain_alloc and domain_free
Implement domain_alloc and domain_free iommu-ops as a replacement for domain_init/domain_destroy. Tested-by: Thierry Reding <treding@nvidia.com> Acked-by: Thierry Reding <treding@nvidia.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
d5f1a81ccb
commit
b5cbb386ae
@ -63,11 +63,21 @@ struct gart_device {
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
struct gart_domain {
|
||||
struct iommu_domain domain; /* generic domain handle */
|
||||
struct gart_device *gart; /* link to gart device */
|
||||
};
|
||||
|
||||
static struct gart_device *gart_handle; /* unique for a system */
|
||||
|
||||
#define GART_PTE(_pfn) \
|
||||
(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
|
||||
|
||||
static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
|
||||
{
|
||||
return container_of(dom, struct gart_domain, domain);
|
||||
}
|
||||
|
||||
/*
|
||||
* Any interaction between any block on PPSB and a block on APB or AHB
|
||||
* must have these read-back to ensure the APB/AHB bus transaction is
|
||||
@ -156,6 +166,7 @@ static inline bool gart_iova_range_valid(struct gart_device *gart,
|
||||
static int gart_iommu_attach_dev(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
struct gart_domain *gart_domain = to_gart_domain(domain);
|
||||
struct gart_device *gart;
|
||||
struct gart_client *client, *c;
|
||||
int err = 0;
|
||||
@ -163,7 +174,7 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
|
||||
gart = gart_handle;
|
||||
if (!gart)
|
||||
return -EINVAL;
|
||||
domain->priv = gart;
|
||||
gart_domain->gart = gart;
|
||||
|
||||
domain->geometry.aperture_start = gart->iovmm_base;
|
||||
domain->geometry.aperture_end = gart->iovmm_base +
|
||||
@ -198,7 +209,8 @@ fail:
|
||||
static void gart_iommu_detach_dev(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
struct gart_device *gart = domain->priv;
|
||||
struct gart_domain *gart_domain = to_gart_domain(domain);
|
||||
struct gart_device *gart = gart_domain->gart;
|
||||
struct gart_client *c;
|
||||
|
||||
spin_lock(&gart->client_lock);
|
||||
@ -216,33 +228,44 @@ out:
|
||||
spin_unlock(&gart->client_lock);
|
||||
}
|
||||
|
||||
static int gart_iommu_domain_init(struct iommu_domain *domain)
|
||||
static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
|
||||
{
|
||||
return 0;
|
||||
struct gart_domain *gart_domain;
|
||||
|
||||
if (type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return NULL;
|
||||
|
||||
gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
|
||||
if (!gart_domain)
|
||||
return NULL;
|
||||
|
||||
return &gart_domain->domain;
|
||||
}
|
||||
|
||||
static void gart_iommu_domain_destroy(struct iommu_domain *domain)
|
||||
static void gart_iommu_domain_free(struct iommu_domain *domain)
|
||||
{
|
||||
struct gart_device *gart = domain->priv;
|
||||
struct gart_domain *gart_domain = to_gart_domain(domain);
|
||||
struct gart_device *gart = gart_domain->gart;
|
||||
|
||||
if (!gart)
|
||||
return;
|
||||
if (gart) {
|
||||
spin_lock(&gart->client_lock);
|
||||
if (!list_empty(&gart->client)) {
|
||||
struct gart_client *c;
|
||||
|
||||
spin_lock(&gart->client_lock);
|
||||
if (!list_empty(&gart->client)) {
|
||||
struct gart_client *c;
|
||||
|
||||
list_for_each_entry(c, &gart->client, list)
|
||||
gart_iommu_detach_dev(domain, c->dev);
|
||||
list_for_each_entry(c, &gart->client, list)
|
||||
gart_iommu_detach_dev(domain, c->dev);
|
||||
}
|
||||
spin_unlock(&gart->client_lock);
|
||||
}
|
||||
spin_unlock(&gart->client_lock);
|
||||
domain->priv = NULL;
|
||||
|
||||
kfree(gart_domain);
|
||||
}
|
||||
|
||||
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t pa, size_t bytes, int prot)
|
||||
{
|
||||
struct gart_device *gart = domain->priv;
|
||||
struct gart_domain *gart_domain = to_gart_domain(domain);
|
||||
struct gart_device *gart = gart_domain->gart;
|
||||
unsigned long flags;
|
||||
unsigned long pfn;
|
||||
|
||||
@ -265,7 +288,8 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t bytes)
|
||||
{
|
||||
struct gart_device *gart = domain->priv;
|
||||
struct gart_domain *gart_domain = to_gart_domain(domain);
|
||||
struct gart_device *gart = gart_domain->gart;
|
||||
unsigned long flags;
|
||||
|
||||
if (!gart_iova_range_valid(gart, iova, bytes))
|
||||
@ -281,7 +305,8 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
dma_addr_t iova)
|
||||
{
|
||||
struct gart_device *gart = domain->priv;
|
||||
struct gart_domain *gart_domain = to_gart_domain(domain);
|
||||
struct gart_device *gart = gart_domain->gart;
|
||||
unsigned long pte;
|
||||
phys_addr_t pa;
|
||||
unsigned long flags;
|
||||
@ -310,8 +335,8 @@ static bool gart_iommu_capable(enum iommu_cap cap)
|
||||
|
||||
static const struct iommu_ops gart_iommu_ops = {
|
||||
.capable = gart_iommu_capable,
|
||||
.domain_init = gart_iommu_domain_init,
|
||||
.domain_destroy = gart_iommu_domain_destroy,
|
||||
.domain_alloc = gart_iommu_domain_alloc,
|
||||
.domain_free = gart_iommu_domain_free,
|
||||
.attach_dev = gart_iommu_attach_dev,
|
||||
.detach_dev = gart_iommu_detach_dev,
|
||||
.map = gart_iommu_map,
|
||||
|
Loading…
Reference in New Issue
Block a user