Re: [PATCH RFC 09/19] iommu/amd: Access/Dirty bit support in IOPTEs

2022-05-31 Thread Joao Martins
On 5/31/22 12:34, Suravee Suthikulpanit wrote:
> Joao,
> 
> On 4/29/22 4:09 AM, Joao Martins wrote:
>> .
>> +static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
>> +bool enable)
>> +{
>> +struct protection_domain *pdomain = to_pdomain(domain);
>> +struct iommu_dev_data *dev_data;
>> +bool dom_flush = false;
>> +
>> +if (!amd_iommu_had_support)
>> +return -EOPNOTSUPP;
>> +
>> +list_for_each_entry(dev_data, >dev_list, list) {
> 
> Since we iterate through device list for the domain, we would need to
> call spin_lock_irqsave(>lock, flags) here.
> 
Ugh, yes. Will fix.

>> +struct amd_iommu *iommu;
>> +u64 pte_root;
>> +
>> +iommu = amd_iommu_rlookup_table[dev_data->devid];
>> +pte_root = amd_iommu_dev_table[dev_data->devid].data[0];
>> +
>> +/* No change? */
>> +if (!(enable ^ !!(pte_root & DTE_FLAG_HAD)))
>> +continue;
>> +
>> +pte_root = (enable ?
>> +pte_root | DTE_FLAG_HAD : pte_root & ~DTE_FLAG_HAD);
>> +
>> +/* Flush device DTE */
>> +amd_iommu_dev_table[dev_data->devid].data[0] = pte_root;
>> +device_flush_dte(dev_data);
>> +dom_flush = true;
>> +}
>> +
>> +/* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
>> +if (dom_flush) {
>> +unsigned long flags;
>> +
>> +spin_lock_irqsave(>lock, flags);
>> +amd_iommu_domain_flush_tlb_pde(pdomain);
>> +amd_iommu_domain_flush_complete(pdomain);
>> +spin_unlock_irqrestore(>lock, flags);
>> +}
> 
> And call spin_unlock_irqrestore(>lock, flags); here.

ack

Additionally, something that I am thinking for v2 was going to have
@had bool field in iommu_dev_data. That would align better with the
rest of amd iommu code rather than me introducing this pattern of
using hardware location of PTE roots. Let me know if you disagree.

>> +
>> +return 0;
>> +}
>> +
>> +static bool amd_iommu_get_dirty_tracking(struct iommu_domain *domain)
>> +{
>> +struct protection_domain *pdomain = to_pdomain(domain);
>> +struct iommu_dev_data *dev_data;
>> +u64 dte;
>> +
> 
> Also call spin_lock_irqsave(>lock, flags) here
> 
ack
>> +list_for_each_entry(dev_data, >dev_list, list) {
>> +dte = amd_iommu_dev_table[dev_data->devid].data[0];
>> +if (!(dte & DTE_FLAG_HAD))
>> +return false;
>> +}
>> +
> 
> And call spin_unlock_irqsave(>lock, flags) here
> 
ack

Same comment as I was saying above, and replace the @dte checking
to just instead check this new variable.

>> +return true;
>> +}
>> +
>> +static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain,
>> +  unsigned long iova, size_t size,
>> +  struct iommu_dirty_bitmap *dirty)
>> +{
>> +struct protection_domain *pdomain = to_pdomain(domain);
>> +struct io_pgtable_ops *ops = >iop.iop.ops;
>> +
>> +if (!amd_iommu_get_dirty_tracking(domain))
>> +return -EOPNOTSUPP;
>> +
>> +if (!ops || !ops->read_and_clear_dirty)
>> +return -ENODEV;
> 
> We move this check before the amd_iommu_get_dirty_tracking().
> 

Yeap, better fail earlier.

> Best Regards,
> Suravee
> 
>> +
>> +return ops->read_and_clear_dirty(ops, iova, size, dirty);
>> +}
>> +
>> +
>>   static void amd_iommu_get_resv_regions(struct device *dev,
>> struct list_head *head)
>>   {
>> @@ -2293,6 +2368,8 @@ const struct iommu_ops amd_iommu_ops = {
>>  .flush_iotlb_all = amd_iommu_flush_iotlb_all,
>>  .iotlb_sync = amd_iommu_iotlb_sync,
>>  .free   = amd_iommu_domain_free,
>> +.set_dirty_tracking = amd_iommu_set_dirty_tracking,
>> +.read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
>>  }
>>   };
>>   
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH RFC 09/19] iommu/amd: Access/Dirty bit support in IOPTEs

2022-05-31 Thread Baolu Lu

Hi Suravee ,

On 2022/5/31 19:34, Suravee Suthikulpanit wrote:

On 4/29/22 4:09 AM, Joao Martins wrote:

.
+static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
+    bool enable)
+{
+    struct protection_domain *pdomain = to_pdomain(domain);
+    struct iommu_dev_data *dev_data;
+    bool dom_flush = false;
+
+    if (!amd_iommu_had_support)
+    return -EOPNOTSUPP;
+
+    list_for_each_entry(dev_data, >dev_list, list) {


Since we iterate through device list for the domain, we would need to
call spin_lock_irqsave(>lock, flags) here.


Not related, just out of curiosity. Does it really need to disable the
interrupt while holding this lock? Any case this list would be traversed
in any interrupt context? Perhaps I missed anything?

Best regards,
baolu
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Re: [PATCH RFC 09/19] iommu/amd: Access/Dirty bit support in IOPTEs

2022-05-31 Thread Suravee Suthikulpanit via iommu

Joao,

On 4/29/22 4:09 AM, Joao Martins wrote:

.
+static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
+   bool enable)
+{
+   struct protection_domain *pdomain = to_pdomain(domain);
+   struct iommu_dev_data *dev_data;
+   bool dom_flush = false;
+
+   if (!amd_iommu_had_support)
+   return -EOPNOTSUPP;
+
+   list_for_each_entry(dev_data, >dev_list, list) {


Since we iterate through device list for the domain, we would need to
call spin_lock_irqsave(>lock, flags) here.


+   struct amd_iommu *iommu;
+   u64 pte_root;
+
+   iommu = amd_iommu_rlookup_table[dev_data->devid];
+   pte_root = amd_iommu_dev_table[dev_data->devid].data[0];
+
+   /* No change? */
+   if (!(enable ^ !!(pte_root & DTE_FLAG_HAD)))
+   continue;
+
+   pte_root = (enable ?
+   pte_root | DTE_FLAG_HAD : pte_root & ~DTE_FLAG_HAD);
+
+   /* Flush device DTE */
+   amd_iommu_dev_table[dev_data->devid].data[0] = pte_root;
+   device_flush_dte(dev_data);
+   dom_flush = true;
+   }
+
+   /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
+   if (dom_flush) {
+   unsigned long flags;
+
+   spin_lock_irqsave(>lock, flags);
+   amd_iommu_domain_flush_tlb_pde(pdomain);
+   amd_iommu_domain_flush_complete(pdomain);
+   spin_unlock_irqrestore(>lock, flags);
+   }


And call spin_unlock_irqrestore(>lock, flags); here.

+
+   return 0;
+}
+
+static bool amd_iommu_get_dirty_tracking(struct iommu_domain *domain)
+{
+   struct protection_domain *pdomain = to_pdomain(domain);
+   struct iommu_dev_data *dev_data;
+   u64 dte;
+


Also call spin_lock_irqsave(>lock, flags) here


+   list_for_each_entry(dev_data, >dev_list, list) {
+   dte = amd_iommu_dev_table[dev_data->devid].data[0];
+   if (!(dte & DTE_FLAG_HAD))
+   return false;
+   }
+


And call spin_unlock_irqsave(>lock, flags) here


+   return true;
+}
+
+static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ struct iommu_dirty_bitmap *dirty)
+{
+   struct protection_domain *pdomain = to_pdomain(domain);
+   struct io_pgtable_ops *ops = >iop.iop.ops;
+
+   if (!amd_iommu_get_dirty_tracking(domain))
+   return -EOPNOTSUPP;
+
+   if (!ops || !ops->read_and_clear_dirty)
+   return -ENODEV;


We move this check before the amd_iommu_get_dirty_tracking().

Best Regards,
Suravee


+
+   return ops->read_and_clear_dirty(ops, iova, size, dirty);
+}
+
+
  static void amd_iommu_get_resv_regions(struct device *dev,
   struct list_head *head)
  {
@@ -2293,6 +2368,8 @@ const struct iommu_ops amd_iommu_ops = {
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
.iotlb_sync = amd_iommu_iotlb_sync,
.free   = amd_iommu_domain_free,
+   .set_dirty_tracking = amd_iommu_set_dirty_tracking,
+   .read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
}
  };
  

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu