Hi Mike,

On Mon, Jul 27, 2020 at 07:29:31PM +0300, Mike Rapoport wrote:
> For instance, the following example will create an uncached mapping (error
> handling is omitted):
> 
>       fd = memfd_secret(SECRETMEM_UNCACHED);
>       ftruncate(fd, MAP_SIZE);
>       ptr = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
[...]
> +static struct page *secretmem_alloc_page(gfp_t gfp)
> +{
> +     /*
> +      * FIXME: use a cache of large pages to reduce the direct map
> +      * fragmentation
> +      */
> +     return alloc_page(gfp);
> +}
> +
> +static vm_fault_t secretmem_fault(struct vm_fault *vmf)
> +{
> +     struct address_space *mapping = vmf->vma->vm_file->f_mapping;
> +     struct inode *inode = file_inode(vmf->vma->vm_file);
> +     pgoff_t offset = vmf->pgoff;
> +     unsigned long addr;
> +     struct page *page;
> +     int ret = 0;
> +
> +     if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
> +             return vmf_error(-EINVAL);
> +
> +     page = find_get_entry(mapping, offset);
> +     if (!page) {
> +             page = secretmem_alloc_page(vmf->gfp_mask);
> +             if (!page)
> +                     return vmf_error(-ENOMEM);
> +
> +             ret = add_to_page_cache(page, mapping, offset, vmf->gfp_mask);
> +             if (unlikely(ret))
> +                     goto err_put_page;
> +
> +             ret = set_direct_map_invalid_noflush(page);
> +             if (ret)
> +                     goto err_del_page_cache;
> +
> +             addr = (unsigned long)page_address(page);
> +             flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
> +
> +             __SetPageUptodate(page);
> +
> +             ret = VM_FAULT_LOCKED;
> +     }
> +
> +     vmf->page = page;
> +     return ret;
> +
> +err_del_page_cache:
> +     delete_from_page_cache(page);
> +err_put_page:
> +     put_page(page);
> +     return vmf_error(ret);
> +}
> +
> +static const struct vm_operations_struct secretmem_vm_ops = {
> +     .fault = secretmem_fault,
> +};
> +
> +static int secretmem_mmap(struct file *file, struct vm_area_struct *vma)
> +{
> +     struct secretmem_ctx *ctx = file->private_data;
> +     unsigned long mode = ctx->mode;
> +     unsigned long len = vma->vm_end - vma->vm_start;
> +
> +     if (!mode)
> +             return -EINVAL;
> +
> +     if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
> +             return -EINVAL;
> +
> +     if (mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len))
> +             return -EAGAIN;
> +
> +     switch (mode) {
> +     case SECRETMEM_UNCACHED:
> +             vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
> +             fallthrough;
> +     case SECRETMEM_EXCLUSIVE:
> +             vma->vm_ops = &secretmem_vm_ops;
> +             break;
> +     default:
> +             return -EINVAL;
> +     }
> +
> +     vma->vm_flags |= VM_LOCKED;
> +
> +     return 0;
> +}

I think the uncached mapping is not the right thing for arm/arm64. First
of all, pgprot_noncached() gives us Strongly Ordered (Device memory)
semantics together with not allowing unaligned accesses. I suspect the
semantics are different on x86.

The second, more serious problem, is that I can't find any place where
the caches are flushed for the page mapped on fault. When a page is
allocated, assuming GFP_ZERO, only the caches are guaranteed to be
zeroed. Exposing this subsequently to user space as uncached would allow
the user to read stale data prior to zeroing. The arm64
set_direct_map_default_noflush() doesn't do any cache maintenance.

-- 
Catalin

Reply via email to