Hi Eric,

>-----Original Message-----
>From: Eric Auger <[email protected]>
>Subject: Re: [PATCH v7 02/23] intel_iommu: Delete RPS capability related
>supporting code
>
>Hi Zhenzhong,
>
>On 10/24/25 10:43 AM, Zhenzhong Duan wrote:
>> RID-PASID Support(RPS) is not set in vIOMMU ECAP register, the supporting
>> code is there but never takes effect.
>>
>> Meanwhile, according to VTD spec section 3.4.3:
>> "Implementations not supporting RID_PASID capability (ECAP_REG.RPS is
>0b),
>> use a PASID value of 0 to perform address translation for requests without
>> PASID."
>>
>> We should delete the supporting code which fetches RID_PASID field from
>> scalable context entry and use 0 as RID_PASID directly, because RID_PASID
>> field is ignored if no RPS support according to spec.
>>
>> This simplifies the code and doesn't bring any penalty.
>>
>> Suggested-by: Yi Liu <[email protected]>
>> Signed-off-by: Zhenzhong Duan <[email protected]>
>> ---
>>  hw/i386/intel_iommu_internal.h |  1 -
>>  hw/i386/intel_iommu.c          | 82 +++++++++++-----------------------
>>  2 files changed, 27 insertions(+), 56 deletions(-)
>>
>> diff --git a/hw/i386/intel_iommu_internal.h
>b/hw/i386/intel_iommu_internal.h
>> index 75bafdf0cd..bf8fb2aa80 100644
>> --- a/hw/i386/intel_iommu_internal.h
>> +++ b/hw/i386/intel_iommu_internal.h
>> @@ -609,7 +609,6 @@ typedef struct VTDRootEntry VTDRootEntry;
>>  #define VTD_CTX_ENTRY_LEGACY_SIZE     16
>>  #define VTD_CTX_ENTRY_SCALABLE_SIZE   32
>>
>> -#define VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK 0xfffff
>>  #define VTD_SM_CONTEXT_ENTRY_RSVD_VAL0(aw)  (0x1e0ULL |
>~VTD_HAW_MASK(aw))
>>  #define VTD_SM_CONTEXT_ENTRY_RSVD_VAL1
>0xffffffffffe00000ULL
>>  #define VTD_SM_CONTEXT_ENTRY_PRE            0x10ULL
>> diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
>> index 70746e3080..06065d16b6 100644
>> --- a/hw/i386/intel_iommu.c
>> +++ b/hw/i386/intel_iommu.c
>> @@ -42,8 +42,7 @@
>>  #include "trace.h"
>>
>>  /* context entry operations */
>> -#define VTD_CE_GET_RID2PASID(ce) \
>> -    ((ce)->val[1] & VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK)
>> +#define PASID_0    0
>>  #define VTD_CE_GET_PASID_DIR_TABLE(ce) \
>>      ((ce)->val[0] & VTD_PASID_DIR_BASE_ADDR_MASK)
>>  #define VTD_CE_GET_PRE(ce) \
>> @@ -963,7 +962,7 @@ static int vtd_ce_get_pasid_entry(IntelIOMMUState
>*s, VTDContextEntry *ce,
>>      int ret = 0;
>while you are at it, get rid of ret and simply return
>vtd_get_pe_from_pasid_table()?

Sure

>>
>>      if (pasid == PCI_NO_PASID) {
>> -        pasid = VTD_CE_GET_RID2PASID(ce);
>> +        pasid = PASID_0;
>>      }
>>      pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce);
>>      ret = vtd_get_pe_from_pasid_table(s, pasid_dir_base, pasid, pe);
>> @@ -982,7 +981,7 @@ static int vtd_ce_get_pasid_fpd(IntelIOMMUState
>*s,
>>      VTDPASIDEntry pe;
>>
>>      if (pasid == PCI_NO_PASID) {
>> -        pasid = VTD_CE_GET_RID2PASID(ce);
>> +        pasid = PASID_0;
>>      }
>>      pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce);
>>
>> @@ -1522,17 +1521,15 @@ static inline int
>vtd_context_entry_rsvd_bits_check(IntelIOMMUState *s,
>>      return 0;
>>  }
>>
>> -static int vtd_ce_rid2pasid_check(IntelIOMMUState *s,
>> -                                  VTDContextEntry *ce)
>> +static int vtd_ce_pasid_0_check(IntelIOMMUState *s, VTDContextEntry
>*ce)
>>  {
>>      VTDPASIDEntry pe;
>>
>>      /*
>>       * Make sure in Scalable Mode, a present context entry
>> -     * has valid rid2pasid setting, which includes valid
>> -     * rid2pasid field and corresponding pasid entry setting
>> +     * has valid pasid entry setting at PASID_0.
>>       */
>> -    return vtd_ce_get_pasid_entry(s, ce, &pe, PCI_NO_PASID);
>> +    return vtd_ce_get_pasid_entry(s, ce, &pe, PASID_0);
>>  }
>>
>>  /* Map a device to its corresponding domain (context-entry) */
>> @@ -1593,12 +1590,11 @@ static int
>vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
>>          }
>>      } else {
>>          /*
>> -         * Check if the programming of context-entry.rid2pasid
>> -         * and corresponding pasid setting is valid, and thus
>> -         * avoids to check pasid entry fetching result in future
>> -         * helper function calling.
>> +         * Check if the programming of pasid setting of PASID_0
>> +         * is valid, and thus avoids to check pasid entry fetching
>> +         * result in future helper function calling.
>>           */
>> -        ret_fr = vtd_ce_rid2pasid_check(s, ce);
>> +        ret_fr = vtd_ce_pasid_0_check(s, ce);
>I guess you should be able to return vtd_ce_pasid_0_check(s, ce)
>directly too.

Yes.

>>          if (ret_fr) {
>>              return ret_fr;
>>          }
>> @@ -2110,7 +2106,6 @@ static bool
>vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
>>      bool reads = true;
>>      bool writes = true;
>>      uint8_t access_flags, pgtt;
>> -    bool rid2pasid = (pasid == PCI_NO_PASID) && s->root_scalable;
>>      VTDIOTLBEntry *iotlb_entry;
>>      uint64_t xlat, size;
>>
>> @@ -2122,21 +2117,23 @@ static bool
>vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
>>
>>      vtd_iommu_lock(s);
>>
>> -    cc_entry = &vtd_as->context_cache_entry;
>any reason why cc_entry setting was moved? Seems a spurious change.

I'd like to initialize cc_entry right before it will be dereferenced, no need 
to initialize it early because we can 'goto out' early.

>> +    if (pasid == PCI_NO_PASID && s->root_scalable) {
>> +        pasid = PASID_0;
>> +    }
>>
>> -    /* Try to fetch pte from IOTLB, we don't need RID2PASID logic */
>> -    if (!rid2pasid) {
>> -        iotlb_entry = vtd_lookup_iotlb(s, source_id, pasid, addr);
>> -        if (iotlb_entry) {
>> -            trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->pte,
>> -                                     iotlb_entry->domain_id);
>> -            pte = iotlb_entry->pte;
>> -            access_flags = iotlb_entry->access_flags;
>> -            page_mask = iotlb_entry->mask;
>> -            goto out;
>> -        }
>> +    /* Try to fetch pte from IOTLB */
>> +    iotlb_entry = vtd_lookup_iotlb(s, source_id, pasid, addr);
>> +    if (iotlb_entry) {
>> +        trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->pte,
>> +                                 iotlb_entry->domain_id);
>> +        pte = iotlb_entry->pte;
>> +        access_flags = iotlb_entry->access_flags;
>> +        page_mask = iotlb_entry->mask;
>> +        goto out;
>>      }
>>
>> +    cc_entry = &vtd_as->context_cache_entry;
>> +
>>      /* Try to fetch context-entry from cache first */
>>      if (cc_entry->context_cache_gen == s->context_cache_gen) {
>>          trace_vtd_iotlb_cc_hit(bus_num, devfn,
>cc_entry->context_entry.hi,
>> @@ -2173,10 +2170,6 @@ static bool
>vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
>>          cc_entry->context_cache_gen = s->context_cache_gen;
>>      }
>>
>> -    if (rid2pasid) {
>> -        pasid = VTD_CE_GET_RID2PASID(&ce);
>> -    }
>> -
>>      /*
>>       * We don't need to translate for pass-through context entries.
>>       * Also, let's ignore IOTLB caching as well for PT devices.
>> @@ -2202,19 +2195,6 @@ static bool
>vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
>>          return true;
>>      }
>>
>> -    /* Try to fetch pte from IOTLB for RID2PASID slow path */
>> -    if (rid2pasid) {
>> -        iotlb_entry = vtd_lookup_iotlb(s, source_id, pasid, addr);
>> -        if (iotlb_entry) {
>> -            trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->pte,
>> -                                     iotlb_entry->domain_id);
>> -            pte = iotlb_entry->pte;
>> -            access_flags = iotlb_entry->access_flags;
>> -            page_mask = iotlb_entry->mask;
>> -            goto out;
>> -        }
>> -    }
>> -
>>      if (s->flts && s->root_scalable) {
>>          ret_fr = vtd_iova_to_flpte(s, &ce, addr, is_write, &pte, &level,
>>                                     &reads, &writes, s->aw_bits,
>pasid);
>> @@ -2477,20 +2457,14 @@ static void
>vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s,
>>          ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
>>                                         vtd_as->devfn, &ce);
>>          if (!ret && domain_id == vtd_get_domain_id(s, &ce,
>vtd_as->pasid)) {
>> -            uint32_t rid2pasid = PCI_NO_PASID;
>> -
>> -            if (s->root_scalable) {
>> -                rid2pasid = VTD_CE_GET_RID2PASID(&ce);
>> -            }
>> -
>>              /*
>>               * In legacy mode, vtd_as->pasid == pasid is always true.
>>               * In scalable mode, for vtd address space backing a PCI
>>               * device without pasid, needs to compare pasid with
>> -             * rid2pasid of this device.
>> +             * PASID_0 of this device.
>>               */
>>              if (!(vtd_as->pasid == pasid ||
>> -                  (vtd_as->pasid == PCI_NO_PASID && pasid ==
>rid2pasid))) {
>> +                  (vtd_as->pasid == PCI_NO_PASID && pasid ==
>PASID_0))) {
>don't you need to check you are in s->root_scalable mode too?

I think no need, this combination check can handle both scalable and legacy 
modes,
because if s->root_scalable=false, pasid always is PCI_NO_PASID,
'vtd_as->pasid == pasid' becomes vtd_'as->pasid == PCI_NO_PASID', it's a 
superset
of the remaining check.

So the remaining check is already for s->root_scalable=true.

Thanks
Zhenzhong

Reply via email to