mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-14 12:49:08 +00:00
amd-iommu: introduce for_each_iommu* macros
This patch introduces the for_each_iommu and for_each_iommu_safe macros to simplify the developers life when having to iterate over all AMD IOMMUs in the system. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
parent
41fb454ebe
commit
3bd221724a
@ -195,6 +195,14 @@
|
|||||||
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
|
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
|
||||||
domain for an IOMMU */
|
domain for an IOMMU */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make iterating over all IOMMUs easier
|
||||||
|
*/
|
||||||
|
#define for_each_iommu(iommu) \
|
||||||
|
list_for_each_entry((iommu), &amd_iommu_list, list)
|
||||||
|
#define for_each_iommu_safe(iommu, next) \
|
||||||
|
list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This structure contains generic data for IOMMU protection domains
|
* This structure contains generic data for IOMMU protection domains
|
||||||
* independent of their use.
|
* independent of their use.
|
||||||
|
@ -213,7 +213,7 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data)
|
|||||||
{
|
{
|
||||||
struct amd_iommu *iommu;
|
struct amd_iommu *iommu;
|
||||||
|
|
||||||
list_for_each_entry(iommu, &amd_iommu_list, list)
|
for_each_iommu(iommu)
|
||||||
iommu_poll_events(iommu);
|
iommu_poll_events(iommu);
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
@ -440,7 +440,7 @@ static void iommu_flush_domain(u16 domid)
|
|||||||
__iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
|
__iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
|
||||||
domid, 1, 1);
|
domid, 1, 1);
|
||||||
|
|
||||||
list_for_each_entry(iommu, &amd_iommu_list, list) {
|
for_each_iommu(iommu) {
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
__iommu_queue_command(iommu, &cmd);
|
__iommu_queue_command(iommu, &cmd);
|
||||||
__iommu_completion_wait(iommu);
|
__iommu_completion_wait(iommu);
|
||||||
@ -1672,7 +1672,7 @@ int __init amd_iommu_init_dma_ops(void)
|
|||||||
* found in the system. Devices not assigned to any other
|
* found in the system. Devices not assigned to any other
|
||||||
* protection domain will be assigned to the default one.
|
* protection domain will be assigned to the default one.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(iommu, &amd_iommu_list, list) {
|
for_each_iommu(iommu) {
|
||||||
iommu->default_dom = dma_ops_domain_alloc(iommu, order);
|
iommu->default_dom = dma_ops_domain_alloc(iommu, order);
|
||||||
if (iommu->default_dom == NULL)
|
if (iommu->default_dom == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -1710,7 +1710,7 @@ int __init amd_iommu_init_dma_ops(void)
|
|||||||
|
|
||||||
free_domains:
|
free_domains:
|
||||||
|
|
||||||
list_for_each_entry(iommu, &amd_iommu_list, list) {
|
for_each_iommu(iommu) {
|
||||||
if (iommu->default_dom)
|
if (iommu->default_dom)
|
||||||
dma_ops_domain_free(iommu->default_dom);
|
dma_ops_domain_free(iommu->default_dom);
|
||||||
}
|
}
|
||||||
|
@ -679,7 +679,7 @@ static void __init free_iommu_all(void)
|
|||||||
{
|
{
|
||||||
struct amd_iommu *iommu, *next;
|
struct amd_iommu *iommu, *next;
|
||||||
|
|
||||||
list_for_each_entry_safe(iommu, next, &amd_iommu_list, list) {
|
for_each_iommu_safe(iommu, next) {
|
||||||
list_del(&iommu->list);
|
list_del(&iommu->list);
|
||||||
free_iommu_one(iommu);
|
free_iommu_one(iommu);
|
||||||
kfree(iommu);
|
kfree(iommu);
|
||||||
@ -779,7 +779,7 @@ static int __init iommu_setup_msix(struct amd_iommu *iommu)
|
|||||||
struct msix_entry entries[32]; /* only 32 supported by AMD IOMMU */
|
struct msix_entry entries[32]; /* only 32 supported by AMD IOMMU */
|
||||||
int nvec = 0, i;
|
int nvec = 0, i;
|
||||||
|
|
||||||
list_for_each_entry(curr, &amd_iommu_list, list) {
|
for_each_iommu(curr) {
|
||||||
if (curr->dev == iommu->dev) {
|
if (curr->dev == iommu->dev) {
|
||||||
entries[nvec].entry = curr->evt_msi_num;
|
entries[nvec].entry = curr->evt_msi_num;
|
||||||
entries[nvec].vector = 0;
|
entries[nvec].vector = 0;
|
||||||
@ -818,7 +818,7 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu)
|
|||||||
int r;
|
int r;
|
||||||
struct amd_iommu *curr;
|
struct amd_iommu *curr;
|
||||||
|
|
||||||
list_for_each_entry(curr, &amd_iommu_list, list) {
|
for_each_iommu(curr) {
|
||||||
if (curr->dev == iommu->dev)
|
if (curr->dev == iommu->dev)
|
||||||
curr->int_enabled = true;
|
curr->int_enabled = true;
|
||||||
}
|
}
|
||||||
@ -971,7 +971,7 @@ static void __init enable_iommus(void)
|
|||||||
{
|
{
|
||||||
struct amd_iommu *iommu;
|
struct amd_iommu *iommu;
|
||||||
|
|
||||||
list_for_each_entry(iommu, &amd_iommu_list, list) {
|
for_each_iommu(iommu) {
|
||||||
iommu_set_exclusion_range(iommu);
|
iommu_set_exclusion_range(iommu);
|
||||||
iommu_init_msi(iommu);
|
iommu_init_msi(iommu);
|
||||||
iommu_enable_event_logging(iommu);
|
iommu_enable_event_logging(iommu);
|
||||||
|
Loading…
Reference in New Issue
Block a user