]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
x86/amd-iommu: Reimplement flush_all_domains_on_iommu()
authorJoerg Roedel <joerg.roedel@amd.com>
Fri, 20 Nov 2009 16:02:44 +0000 (17:02 +0100)
committerJoerg Roedel <joerg.roedel@amd.com>
Fri, 27 Nov 2009 13:16:28 +0000 (14:16 +0100)
This patch reimplements the function
flush_all_domains_on_iommu to use the global protection
domain list.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
arch/x86/kernel/amd_iommu.c

index 5141f5608c5c92b47d0ac19af809725455d3ad75..a1bd99d390ab12ba71e4e2e9e1c5088285f7c4ab 100644 (file)
@@ -499,43 +499,48 @@ static void iommu_flush_tlb_pde(struct protection_domain *domain)
 }
 
 /*
- * This function flushes one domain on one IOMMU
+ * This function flushes all domains that have devices on the given IOMMU
  */
-static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid)
+static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
 {
-       struct iommu_cmd cmd;
+       u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
+       struct protection_domain *domain;
        unsigned long flags;
 
-       __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
-                                     domid, 1, 1);
-
-       spin_lock_irqsave(&iommu->lock, flags);
-       __iommu_queue_command(iommu, &cmd);
-       __iommu_completion_wait(iommu);
-       __iommu_wait_for_completion(iommu);
-       spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
-{
-       int i;
+       spin_lock_irqsave(&amd_iommu_pd_lock, flags);
 
-       for (i = 1; i < MAX_DOMAIN_ID; ++i) {
-               if (!test_bit(i, amd_iommu_pd_alloc_bitmap))
+       list_for_each_entry(domain, &amd_iommu_pd_list, list) {
+               if (domain->dev_iommu[iommu->index] == 0)
                        continue;
-               flush_domain_on_iommu(iommu, i);
+
+               spin_lock(&domain->lock);
+               iommu_queue_inv_iommu_pages(iommu, address, domain->id, 1, 1);
+               iommu_flush_complete(domain);
+               spin_unlock(&domain->lock);
        }
 
+       spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
 }
 
+/*
+ * This function uses heavy locking and may disable irqs for some time. But
+ * this is no issue because it is only called during resume.
+ */
 void amd_iommu_flush_all_domains(void)
 {
        struct protection_domain *domain;
+       unsigned long flags;
+
+       spin_lock_irqsave(&amd_iommu_pd_lock, flags);
 
        list_for_each_entry(domain, &amd_iommu_pd_list, list) {
+               spin_lock(&domain->lock);
                iommu_flush_tlb_pde(domain);
                iommu_flush_complete(domain);
+               spin_unlock(&domain->lock);
        }
+
+       spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
 }
 
 static void flush_all_devices_for_iommu(struct amd_iommu *iommu)