diff options
| author | Jason Gunthorpe <jgg@nvidia.com> | 2025-04-08 13:53:59 -0300 |
|---|---|---|
| committer | Joerg Roedel <jroedel@suse.de> | 2025-04-17 16:22:42 +0200 |
| commit | 868240c34eb113e4121d4ad8ad7458d8caad87d3 (patch) | |
| tree | a3de8e6744b3adcd9b6d26f38b0ada2b87f9f142 /drivers/iommu/intel/iommu.c | |
| parent | c70637cdd8cf6942ca20d92a27eddb23d78066ca (diff) | |
iommu: Change iommu_iotlb_gather to use iommu_page_list
This converts the remaining places using list of pages to the new API.
The Intel free path was shared with its gather path, so it is converted at
the same time.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/11-v4-c8663abbb606+3f7-iommu_pages_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/intel/iommu.c')
| -rw-r--r-- | drivers/iommu/intel/iommu.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index e66fd919501f..0de9580372a2 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -894,18 +894,16 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, The 'pte' argument is the *parent* PTE, pointing to the page that is to be freed. */ static void dma_pte_list_pagetables(struct dmar_domain *domain, - int level, struct dma_pte *pte, - struct list_head *freelist) + int level, struct dma_pte *parent_pte, + struct iommu_pages_list *freelist) { - struct page *pg; + struct dma_pte *pte = phys_to_virt(dma_pte_addr(parent_pte)); - pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT); - list_add_tail(&pg->lru, freelist); + iommu_pages_list_add(freelist, pte); if (level == 1) return; - pte = page_address(pg); do { if (dma_pte_present(pte) && !dma_pte_superpage(pte)) dma_pte_list_pagetables(domain, level - 1, pte, freelist); @@ -916,7 +914,7 @@ static void dma_pte_list_pagetables(struct dmar_domain *domain, static void dma_pte_clear_level(struct dmar_domain *domain, int level, struct dma_pte *pte, unsigned long pfn, unsigned long start_pfn, unsigned long last_pfn, - struct list_head *freelist) + struct iommu_pages_list *freelist) { struct dma_pte *first_pte = NULL, *last_pte = NULL; @@ -961,7 +959,8 @@ next: the page tables, and may have cached the intermediate levels. The pages can only be freed after the IOTLB flush has been done. */ static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, - unsigned long last_pfn, struct list_head *freelist) + unsigned long last_pfn, + struct iommu_pages_list *freelist) { if (WARN_ON(!domain_pfn_supported(domain, last_pfn)) || WARN_ON(start_pfn > last_pfn)) @@ -973,8 +972,7 @@ static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, /* free pgd */ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { - struct page *pgd_page = virt_to_page(domain->pgd); - list_add_tail(&pgd_page->lru, freelist); + iommu_pages_list_add(freelist, domain->pgd); domain->pgd = NULL; } } @@ -1449,7 +1447,8 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) static void domain_exit(struct dmar_domain *domain) { if (domain->pgd) { - LIST_HEAD(freelist); + struct iommu_pages_list freelist = + IOMMU_PAGES_LIST_INIT(freelist); domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist); iommu_put_pages_list(&freelist); @@ -3603,7 +3602,8 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *gather) { cache_tag_flush_range(to_dmar_domain(domain), gather->start, - gather->end, list_empty(&gather->freelist)); + gather->end, + iommu_pages_list_empty(&gather->freelist)); iommu_put_pages_list(&gather->freelist); } |