On Mon,  4 Dec 2023 18:33:00 +0100
Boris Brezillon <boris.brezil...@collabora.com> wrote:

> +/**
> + * panthor_vm_active() - Flag a VM as active
> + * @VM: VM to flag as active.
> + *
> + * Assigns an address space to a VM so it can be used by the GPU/MCU.
> + *
> + * Return: 0 on success, a negative error code otherwise.
> + */
> +int panthor_vm_active(struct panthor_vm *vm)
> +{
> +     struct panthor_device *ptdev = vm->ptdev;
> +     struct io_pgtable_cfg *cfg = 
> &io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg;
> +     int ret = 0, as, cookie;
> +     u64 transtab, transcfg;
> +
> +     if (!drm_dev_enter(&ptdev->base, &cookie))
> +             return -ENODEV;
> +
> +     mutex_lock(&ptdev->mmu->as.slots_lock);
> +
> +     as = vm->as.id;
> +     if (as >= 0) {
> +             /* Unhandled pagefault on this AS, the MMU was disabled. We 
> need to
> +              * re-enable the MMU after clearing+unmasking the AS interrupts.
> +              */
> +             if (ptdev->mmu->as.faulty_mask & 
> panthor_mmu_as_fault_mask(ptdev, as))
> +                     goto out_enable_as;
> +

When the context was idle and we make it active, we never remove the VM
from the LRU list, which might cause this AS to be re-assigned to
someone else while the GPU is still using the AS. I'll fix that along
the active_cnt issue I mentioned in my previous reply.

> +             goto out_unlock;
> +     }
> +
> +     /* Check for a free AS */
> +     if (vm->for_mcu) {
> +             drm_WARN_ON(&ptdev->base, ptdev->mmu->as.alloc_mask & BIT(0));
> +             as = 0;
> +     } else {
> +             as = ffz(ptdev->mmu->as.alloc_mask | BIT(0));
> +     }
> +
> +     if (!(BIT(as) & ptdev->gpu_info.as_present)) {
> +             struct panthor_vm *lru_vm;
> +
> +             lru_vm = list_first_entry_or_null(&ptdev->mmu->as.lru_list,
> +                                               struct panthor_vm,
> +                                               as.lru_node);
> +             if (drm_WARN_ON(&ptdev->base, !lru_vm)) {
> +                     ret = -EBUSY;
> +                     goto out_unlock;
> +             }
> +
> +             list_del_init(&lru_vm->as.lru_node);
> +             as = lru_vm->as.id;
> +
> +             lru_vm->as.id = -1;
> +     } else {
> +             set_bit(as, &ptdev->mmu->as.alloc_mask);
> +     }
> +
> +     /* Assign the free or reclaimed AS to the FD */
> +     vm->as.id = as;
> +     ptdev->mmu->as.slots[as].vm = vm;
> +
> +out_enable_as:
> +     transtab = cfg->arm_lpae_s1_cfg.ttbr;
> +     transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
> +                AS_TRANSCFG_PTW_RA |
> +                AS_TRANSCFG_ADRMODE_AARCH64_4K;
> +     if (ptdev->coherent)
> +             transcfg |= AS_TRANSCFG_PTW_SH_OS;
> +
> +     /* If the VM is re-activated, we clear the fault. */
> +     vm->unhandled_fault = false;
> +
> +     /* Unhandled pagefault on this AS, clear the fault and re-enable 
> interrupts
> +      * before enabling the AS.
> +      */
> +     if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) {
> +             gpu_write(ptdev, MMU_INT_CLEAR, 
> panthor_mmu_as_fault_mask(ptdev, as));
> +             ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, 
> as);
> +             gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask);
> +     }
> +
> +     ret = panthor_mmu_as_enable(vm->ptdev, vm->as.id, transtab, transcfg, 
> vm->memattr);
> +
> +out_unlock:
> +     mutex_unlock(&ptdev->mmu->as.slots_lock);
> +     drm_dev_exit(cookie);
> +     return ret;
> +}

Reply via email to