On 9/4/20 4:18 AM, Tom Murphy wrote:
+static int intel_iommu_needs_bounce_buffer(struct device *d)
+{
+       return !intel_no_bounce && dev_is_pci(d) && to_pci_dev(d)->untrusted;
+}
+
+
  static void intel_iommu_probe_finalize(struct device *dev)
  {
-       struct iommu_domain *domain;
+       dma_addr_t base = IOVA_START_PFN << VTD_PAGE_SHIFT;
+       struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- domain = iommu_get_domain_for_dev(dev);
-       if (device_needs_bounce(dev))
-               set_dma_ops(dev, &bounce_dma_ops);
-       else if (domain && domain->type == IOMMU_DOMAIN_DMA)
-               set_dma_ops(dev, &intel_dma_ops);
+       if (intel_iommu_needs_bounce_buffer(dev) ||

For untrusted devices, the DMA type of domain is enforced. There's no
need to check again here.

Best regards,
baolu

+                       (domain && domain->type == IOMMU_DOMAIN_DMA))
+               iommu_setup_dma_ops(dev, base,
+                               __DOMAIN_MAX_ADDR(dmar_domain->gaw) - base);
        else
                set_dma_ops(dev, NULL);
  }
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to