On Sat, Oct 03, 2020 at 07:50:46AM +0300, Jarkko Sakkinen wrote:
> +     XA_STATE(xas, &encl->page_array, idx_start);
> +
> +     /*
> +      * Disallow READ_IMPLIES_EXEC tasks as their VMA permissions might
> +      * conflict with the enclave page permissions.
> +      */
> +     if (current->personality & READ_IMPLIES_EXEC)
> +             return -EACCES;
> +
> +     xas_for_each(&xas, page, idx_end)
> +             if (!page || (~page->vm_max_prot_bits & vm_prot_bits))
> +                     return -EACCES;

You're iterating the array without holding any lock that the XArray knows
about.  If you're OK with another thread adding/removing pages behind your
back, or there's a higher level lock (the mmap_sem?) protecting the XArray
from being modified while you walk it, then hold the rcu_read_lock()
while walking the array.  Otherwise you can prevent modification by
calling xas_lock(&xas) and xas_unlock()..

> +     return 0;
> +}
> +
> +static int sgx_vma_mprotect(struct vm_area_struct *vma,
> +                         struct vm_area_struct **pprev, unsigned long start,
> +                         unsigned long end, unsigned long newflags)
> +{
> +     int ret;
> +
> +     ret = sgx_encl_may_map(vma->vm_private_data, start, end, newflags);
> +     if (ret)
> +             return ret;
> +
> +     return mprotect_fixup(vma, pprev, start, end, newflags);
> +}
> +
> +const struct vm_operations_struct sgx_vm_ops = {
> +     .open = sgx_vma_open,
> +     .fault = sgx_vma_fault,
> +     .mprotect = sgx_vma_mprotect,
> +};
> +
> +/**
> + * sgx_encl_find - find an enclave
> + * @mm:              mm struct of the current process
> + * @addr:    address in the ELRANGE
> + * @vma:     the resulting VMA
> + *
> + * Find an enclave identified by the given address. Give back a VMA that is
> + * part of the enclave and located in that address. The VMA is given back if 
> it
> + * is a proper enclave VMA even if an &sgx_encl instance does not exist yet
> + * (enclave creation has not been performed).
> + *
> + * Return:
> + *   0 on success,
> + *   -EINVAL if an enclave was not found,
> + *   -ENOENT if the enclave has not been created yet
> + */
> +int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
> +               struct vm_area_struct **vma)
> +{
> +     struct vm_area_struct *result;
> +     struct sgx_encl *encl;
> +
> +     result = find_vma(mm, addr);
> +     if (!result || result->vm_ops != &sgx_vm_ops || addr < result->vm_start)
> +             return -EINVAL;
> +
> +     encl = result->vm_private_data;
> +     *vma = result;
> +
> +     return encl ? 0 : -ENOENT;
> +}
> +
> +/**
> + * sgx_encl_destroy() - destroy enclave resources
> + * @encl:    an enclave pointer
> + */
> +void sgx_encl_destroy(struct sgx_encl *encl)
> +{
> +     struct sgx_encl_page *entry;
> +     unsigned long index;
> +
> +     atomic_or(SGX_ENCL_DEAD, &encl->flags);
> +
> +     xa_for_each(&encl->page_array, index, entry) {
> +             if (entry->epc_page) {
> +                     sgx_free_epc_page(entry->epc_page);
> +                     encl->secs_child_cnt--;
> +                     entry->epc_page = NULL;
> +             }
> +
> +             kfree(entry);
> +     }
> +
> +     xa_destroy(&encl->page_array);
> +
> +     if (!encl->secs_child_cnt && encl->secs.epc_page) {
> +             sgx_free_epc_page(encl->secs.epc_page);
> +             encl->secs.epc_page = NULL;
> +     }
> +}
> +
> +/**
> + * sgx_encl_release - Destroy an enclave instance
> + * @kref:    address of a kref inside &sgx_encl
> + *
> + * Used together with kref_put(). Frees all the resources associated with the
> + * enclave and the instance itself.
> + */
> +void sgx_encl_release(struct kref *ref)
> +{
> +     struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
> +
> +     sgx_encl_destroy(encl);
> +
> +     if (encl->backing)
> +             fput(encl->backing);
> +
> +     cleanup_srcu_struct(&encl->srcu);
> +
> +     WARN_ON_ONCE(!list_empty(&encl->mm_list));
> +
> +     /* Detect EPC page leak's. */
> +     WARN_ON_ONCE(encl->secs_child_cnt);
> +     WARN_ON_ONCE(encl->secs.epc_page);
> +
> +     kfree(encl);
> +}
> diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
> new file mode 100644
> index 000000000000..8ff445476657
> --- /dev/null
> +++ b/arch/x86/kernel/cpu/sgx/encl.h
> @@ -0,0 +1,85 @@
> +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
> +/**
> + * Copyright(c) 2016-19 Intel Corporation.
> + */
> +#ifndef _X86_ENCL_H
> +#define _X86_ENCL_H
> +
> +#include <linux/cpumask.h>
> +#include <linux/kref.h>
> +#include <linux/list.h>
> +#include <linux/mm_types.h>
> +#include <linux/mmu_notifier.h>
> +#include <linux/mutex.h>
> +#include <linux/notifier.h>
> +#include <linux/srcu.h>
> +#include <linux/workqueue.h>
> +#include <linux/xarray.h>
> +#include "sgx.h"
> +
> +/**
> + * enum sgx_encl_page_desc - defines bits for an enclave page's descriptor
> + * %SGX_ENCL_PAGE_ADDR_MASK:         Holds the virtual address of the page.
> + *
> + * The page address for SECS is zero and is used by the subsystem to 
> recognize
> + * the SECS page.
> + */
> +enum sgx_encl_page_desc {
> +     /* Bits 11:3 are available when the page is not swapped. */
> +     SGX_ENCL_PAGE_ADDR_MASK         = PAGE_MASK,
> +};
> +
> +#define SGX_ENCL_PAGE_ADDR(page) \
> +     ((page)->desc & SGX_ENCL_PAGE_ADDR_MASK)
> +
> +struct sgx_encl_page {
> +     unsigned long desc;
> +     unsigned long vm_max_prot_bits;
> +     struct sgx_epc_page *epc_page;
> +     struct sgx_encl *encl;
> +};
> +
> +enum sgx_encl_flags {
> +     SGX_ENCL_CREATED        = BIT(0),
> +     SGX_ENCL_INITIALIZED    = BIT(1),
> +     SGX_ENCL_DEBUG          = BIT(2),
> +     SGX_ENCL_DEAD           = BIT(3),
> +     SGX_ENCL_IOCTL          = BIT(4),
> +};
> +
> +struct sgx_encl_mm {
> +     struct sgx_encl *encl;
> +     struct mm_struct *mm;
> +     struct list_head list;
> +     struct mmu_notifier mmu_notifier;
> +};
> +
> +struct sgx_encl {
> +     atomic_t flags;
> +     unsigned int page_cnt;
> +     unsigned int secs_child_cnt;
> +     struct mutex lock;
> +     struct list_head mm_list;
> +     spinlock_t mm_lock;
> +     struct file *backing;
> +     struct kref refcount;
> +     struct srcu_struct srcu;
> +     unsigned long base;
> +     unsigned long size;
> +     unsigned long ssaframesize;
> +     struct xarray page_array;
> +     struct sgx_encl_page secs;
> +     cpumask_t cpumask;
> +};
> +
> +extern const struct vm_operations_struct sgx_vm_ops;
> +
> +int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
> +               struct vm_area_struct **vma);
> +void sgx_encl_destroy(struct sgx_encl *encl);
> +void sgx_encl_release(struct kref *ref);
> +int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
> +int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
> +                  unsigned long end, unsigned long vm_flags);
> +
> +#endif /* _X86_ENCL_H */
> diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
> index 97c6895fb6c9..4137254fb29e 100644
> --- a/arch/x86/kernel/cpu/sgx/main.c
> +++ b/arch/x86/kernel/cpu/sgx/main.c
> @@ -9,6 +9,8 @@
>  #include <linux/sched/mm.h>
>  #include <linux/sched/signal.h>
>  #include <linux/slab.h>
> +#include "driver.h"
> +#include "encl.h"
>  #include "encls.h"
>  
>  struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
> @@ -260,6 +262,8 @@ static bool __init sgx_page_cache_init(void)
>  
>  static void __init sgx_init(void)
>  {
> +     int ret;
> +
>       if (!boot_cpu_has(X86_FEATURE_SGX))
>               return;
>  
> @@ -269,8 +273,15 @@ static void __init sgx_init(void)
>       if (!sgx_page_reclaimer_init())
>               goto err_page_cache;
>  
> +     ret = sgx_drv_init();
> +     if (ret)
> +             goto err_kthread;
> +
>       return;
>  
> +err_kthread:
> +     kthread_stop(ksgxswapd_tsk);
> +
>  err_page_cache:
>       sgx_page_cache_teardown();
>  }
> -- 
> 2.25.1
> 

Reply via email to