diff options
| author | Jason Gunthorpe <jgg@nvidia.com> | 2025-07-14 12:50:24 +0800 |
|---|---|---|
| committer | Will Deacon <will@kernel.org> | 2025-07-14 11:18:03 +0100 |
| commit | b33125296b5047115469b8a3b74c0fdbf4976548 (patch) | |
| tree | 25ba4920498963e7b6e941998a4e4e2f4959cf6e /drivers/iommu/intel/iommu.c | |
| parent | b9434ba97c44f5744ea537adfd1f9f3fe102681c (diff) | |
iommu/vt-d: Create unique domain ops for each stage
Use the domain ops pointer to tell what kind of domain it is instead of
the internal use_first_level indication. This also protects against
wrongly using a SVA/nested/IDENTITY/BLOCKED domain type in places they
should not be.
The only remaining uses of use_first_level outside the paging domain are in
paging_domain_compatible() and intel_iommu_enforce_cache_coherency().
Thus, remove the useless sets of use_first_level in
intel_svm_domain_alloc() and intel_iommu_domain_alloc_nested(). None of
the unique ops for these domain types ever reference it on their call
chains.
Add a WARN_ON() check in domain_context_mapping_one() as it only works
with second stage.
This is preparation for iommupt which will have different ops for each of
the stages.
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/5-v3-dbbe6f7e7ae3+124ffe-vtd_prep_jgg@nvidia.com
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/20250714045028.958850-8-baolu.lu@linux.intel.com
Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'drivers/iommu/intel/iommu.c')
| -rw-r--r-- | drivers/iommu/intel/iommu.c | 60 |
1 files changed, 42 insertions, 18 deletions
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 553f0e32be9a..940d1c6c2248 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1462,6 +1462,9 @@ static int domain_context_mapping_one(struct dmar_domain *domain, struct context_entry *context; int ret; + if (WARN_ON(!intel_domain_is_ss_paging(domain))) + return -EINVAL; + pr_debug("Set context mapping for %02x:%02x.%d\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); @@ -1780,7 +1783,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu, static bool domain_need_iotlb_sync_map(struct dmar_domain *domain, struct intel_iommu *iommu) { - if (cap_caching_mode(iommu->cap) && !domain->use_first_level) + if (cap_caching_mode(iommu->cap) && intel_domain_is_ss_paging(domain)) return true; if (rwbf_quirk || cap_rwbf(iommu->cap)) @@ -1812,12 +1815,14 @@ static int dmar_domain_attach_device(struct dmar_domain *domain, if (!sm_supported(iommu)) ret = domain_context_mapping(domain, dev); - else if (domain->use_first_level) + else if (intel_domain_is_fs_paging(domain)) ret = domain_setup_first_level(iommu, domain, dev, IOMMU_NO_PASID, NULL); - else + else if (intel_domain_is_ss_paging(domain)) ret = domain_setup_second_level(iommu, domain, dev, IOMMU_NO_PASID, NULL); + else if (WARN_ON(true)) + ret = -EINVAL; if (ret) goto out_block_translation; @@ -3288,7 +3293,6 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st domain->use_first_level = first_stage; domain->domain.type = IOMMU_DOMAIN_UNMANAGED; - domain->domain.ops = intel_iommu_ops.default_domain_ops; /* calculate the address width */ addr_width = agaw_to_width(iommu->agaw); @@ -3346,6 +3350,8 @@ intel_iommu_domain_alloc_first_stage(struct device *dev, dmar_domain = paging_domain_alloc(dev, true); if (IS_ERR(dmar_domain)) return ERR_CAST(dmar_domain); + + dmar_domain->domain.ops = &intel_fs_paging_domain_ops; return &dmar_domain->domain; } @@ -3374,6 +3380,7 @@ intel_iommu_domain_alloc_second_stage(struct device *dev, if (IS_ERR(dmar_domain)) return ERR_CAST(dmar_domain); + dmar_domain->domain.ops = &intel_ss_paging_domain_ops; dmar_domain->nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT; if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) @@ -4098,12 +4105,15 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, if (ret) goto out_remove_dev_pasid; - if (dmar_domain->use_first_level) + if (intel_domain_is_fs_paging(dmar_domain)) ret = domain_setup_first_level(iommu, dmar_domain, dev, pasid, old); - else + else if (intel_domain_is_ss_paging(dmar_domain)) ret = domain_setup_second_level(iommu, dmar_domain, dev, pasid, old); + else if (WARN_ON(true)) + ret = -EINVAL; + if (ret) goto out_unwind_iopf; @@ -4378,6 +4388,32 @@ static struct iommu_domain identity_domain = { }, }; +const struct iommu_domain_ops intel_fs_paging_domain_ops = { + .attach_dev = intel_iommu_attach_device, + .set_dev_pasid = intel_iommu_set_dev_pasid, + .map_pages = intel_iommu_map_pages, + .unmap_pages = intel_iommu_unmap_pages, + .iotlb_sync_map = intel_iommu_iotlb_sync_map, + .flush_iotlb_all = intel_flush_iotlb_all, + .iotlb_sync = intel_iommu_tlb_sync, + .iova_to_phys = intel_iommu_iova_to_phys, + .free = intel_iommu_domain_free, + .enforce_cache_coherency = intel_iommu_enforce_cache_coherency, +}; + +const struct iommu_domain_ops intel_ss_paging_domain_ops = { + .attach_dev = intel_iommu_attach_device, + .set_dev_pasid = intel_iommu_set_dev_pasid, + .map_pages = intel_iommu_map_pages, + .unmap_pages = intel_iommu_unmap_pages, + .iotlb_sync_map = intel_iommu_iotlb_sync_map, + .flush_iotlb_all = intel_flush_iotlb_all, + .iotlb_sync = intel_iommu_tlb_sync, + .iova_to_phys = intel_iommu_iova_to_phys, + .free = intel_iommu_domain_free, + .enforce_cache_coherency = intel_iommu_enforce_cache_coherency, +}; + const struct iommu_ops intel_iommu_ops = { .blocked_domain = &blocking_domain, .release_domain = &blocking_domain, @@ -4396,18 +4432,6 @@ const struct iommu_ops intel_iommu_ops = { .def_domain_type = device_def_domain_type, .pgsize_bitmap = SZ_4K, .page_response = intel_iommu_page_response, - .default_domain_ops = &(const struct iommu_domain_ops) { - .attach_dev = intel_iommu_attach_device, - .set_dev_pasid = intel_iommu_set_dev_pasid, - .map_pages = intel_iommu_map_pages, - .unmap_pages = intel_iommu_unmap_pages, - .iotlb_sync_map = intel_iommu_iotlb_sync_map, - .flush_iotlb_all = intel_flush_iotlb_all, - .iotlb_sync = intel_iommu_tlb_sync, - .iova_to_phys = intel_iommu_iova_to_phys, - .free = intel_iommu_domain_free, - .enforce_cache_coherency = intel_iommu_enforce_cache_coherency, - } }; static void quirk_iommu_igfx(struct pci_dev *dev) |