]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
iommu/vt-d: Update IOMMU state when memory hotplug happens
authorJiang Liu <jiang.liu@linux.intel.com>
Wed, 19 Feb 2014 06:07:37 +0000 (14:07 +0800)
committerJoerg Roedel <joro@8bytes.org>
Tue, 4 Mar 2014 16:51:06 +0000 (17:51 +0100)
If static identity domain is created, IOMMU driver needs to update
si_domain page table when memory hotplug event happens. Otherwise
PCI device DMA operations can't access the hot-added memory regions.

Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
Signed-off-by: Joerg Roedel <joro@8bytes.org>
drivers/iommu/intel-iommu.c
drivers/iommu/iova.c
include/linux/iova.h

index dd576c067d0d4fea39aee4554313a47399837421..484d669d27207572943fa48c02fc3f42e26749fe 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/dmar.h>
 #include <linux/dma-mapping.h>
 #include <linux/mempool.h>
+#include <linux/memory.h>
 #include <linux/timer.h>
 #include <linux/iova.h>
 #include <linux/iommu.h>
@@ -3683,6 +3684,73 @@ static struct notifier_block device_nb = {
        .notifier_call = device_notifier,
 };
 
+static int intel_iommu_memory_notifier(struct notifier_block *nb,
+                                      unsigned long val, void *v)
+{
+       struct memory_notify *mhp = v;
+       unsigned long long start, end;
+       unsigned long start_vpfn, last_vpfn;
+
+       switch (val) {
+       case MEM_GOING_ONLINE:
+               start = mhp->start_pfn << PAGE_SHIFT;
+               end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
+               if (iommu_domain_identity_map(si_domain, start, end)) {
+                       pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
+                               start, end);
+                       return NOTIFY_BAD;
+               }
+               break;
+
+       case MEM_OFFLINE:
+       case MEM_CANCEL_ONLINE:
+               start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
+               last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
+               while (start_vpfn <= last_vpfn) {
+                       struct iova *iova;
+                       struct dmar_drhd_unit *drhd;
+                       struct intel_iommu *iommu;
+
+                       iova = find_iova(&si_domain->iovad, start_vpfn);
+                       if (iova == NULL) {
+                               pr_debug("dmar: failed get IOVA for PFN %lx\n",
+                                        start_vpfn);
+                               break;
+                       }
+
+                       iova = split_and_remove_iova(&si_domain->iovad, iova,
+                                                    start_vpfn, last_vpfn);
+                       if (iova == NULL) {
+                               pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
+                                       start_vpfn, last_vpfn);
+                               return NOTIFY_BAD;
+                       }
+
+                       rcu_read_lock();
+                       for_each_active_iommu(iommu, drhd)
+                               iommu_flush_iotlb_psi(iommu, si_domain->id,
+                                       iova->pfn_lo,
+                                       iova->pfn_hi - iova->pfn_lo + 1, 0);
+                       rcu_read_unlock();
+                       dma_pte_clear_range(si_domain, iova->pfn_lo,
+                                           iova->pfn_hi);
+                       dma_pte_free_pagetable(si_domain, iova->pfn_lo,
+                                              iova->pfn_hi);
+
+                       start_vpfn = iova->pfn_hi + 1;
+                       free_iova_mem(iova);
+               }
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block intel_iommu_memory_nb = {
+       .notifier_call = intel_iommu_memory_notifier,
+       .priority = 0
+};
+
 int __init intel_iommu_init(void)
 {
        int ret = -ENODEV;
@@ -3755,8 +3823,9 @@ int __init intel_iommu_init(void)
        init_iommu_pm_ops();
 
        bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
-
        bus_register_notifier(&pci_bus_type, &device_nb);
+       if (si_domain && !hw_pass_through)
+               register_memory_notifier(&intel_iommu_memory_nb);
 
        intel_iommu_enabled = 1;
 
index 67da6cff74e8610cc04f5fa03b84fa8f612e9406..f6b17e6af2fb261f84eafd974d652a76716dcb95 100644 (file)
@@ -342,19 +342,30 @@ __is_range_overlap(struct rb_node *node,
        return 0;
 }
 
+static inline struct iova *
+alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
+{
+       struct iova *iova;
+
+       iova = alloc_iova_mem();
+       if (iova) {
+               iova->pfn_lo = pfn_lo;
+               iova->pfn_hi = pfn_hi;
+       }
+
+       return iova;
+}
+
 static struct iova *
 __insert_new_range(struct iova_domain *iovad,
        unsigned long pfn_lo, unsigned long pfn_hi)
 {
        struct iova *iova;
 
-       iova = alloc_iova_mem();
-       if (!iova)
-               return iova;
+       iova = alloc_and_init_iova(pfn_lo, pfn_hi);
+       if (iova)
+               iova_insert_rbtree(&iovad->rbroot, iova);
 
-       iova->pfn_hi = pfn_hi;
-       iova->pfn_lo = pfn_lo;
-       iova_insert_rbtree(&iovad->rbroot, iova);
        return iova;
 }
 
@@ -433,3 +444,44 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
        }
        spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
 }
+
+struct iova *
+split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
+                     unsigned long pfn_lo, unsigned long pfn_hi)
+{
+       unsigned long flags;
+       struct iova *prev = NULL, *next = NULL;
+
+       spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+       if (iova->pfn_lo < pfn_lo) {
+               prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
+               if (prev == NULL)
+                       goto error;
+       }
+       if (iova->pfn_hi > pfn_hi) {
+               next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
+               if (next == NULL)
+                       goto error;
+       }
+
+       __cached_rbnode_delete_update(iovad, iova);
+       rb_erase(&iova->node, &iovad->rbroot);
+
+       if (prev) {
+               iova_insert_rbtree(&iovad->rbroot, prev);
+               iova->pfn_lo = pfn_lo;
+       }
+       if (next) {
+               iova_insert_rbtree(&iovad->rbroot, next);
+               iova->pfn_hi = pfn_hi;
+       }
+       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+
+       return iova;
+
+error:
+       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+       if (prev)
+               free_iova_mem(prev);
+       return NULL;
+}
index 76a0759e88ec4650fe554ddb1988bbeff99dfc26..3277f4711349570745822d86152725c3eeaba24c 100644 (file)
@@ -47,5 +47,7 @@ void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
 void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
 void put_iova_domain(struct iova_domain *iovad);
+struct iova *split_and_remove_iova(struct iova_domain *iovad,
+       struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
 
 #endif