On Thu, Sep 17, 2020 at 12:34:18AM -0500, Haitao Huang wrote:
> On Tue, 15 Sep 2020 06:05:11 -0500, Jarkko Sakkinen
> <jarkko.sakki...@linux.intel.com> wrote:
> ...
> 
> > +static int __sgx_encl_add_page(struct sgx_encl *encl,
> > +                          struct sgx_encl_page *encl_page,
> > +                          struct sgx_epc_page *epc_page,
> > +                          struct sgx_secinfo *secinfo, unsigned long src)
> > +{
> > +   struct sgx_pageinfo pginfo;
> > +   struct vm_area_struct *vma;
> > +   struct page *src_page;
> > +   int ret;
> > +
> > +   /* Query vma's VM_MAYEXEC as an indirect path_noexec() check. */
> > +   if (encl_page->vm_max_prot_bits & VM_EXEC) {
> > +           vma = find_vma(current->mm, src);
> > +           if (!vma)
> > +                   return -EFAULT;
> > +
> > +           if (!(vma->vm_flags & VM_MAYEXEC))
> > +                   return -EACCES;
> > +   }
> > +
> > +   ret = get_user_pages(src, 1, 0, &src_page, NULL);
> > +   if (ret < 1)
> > +           return ret;
> > +
> > +   pginfo.secs = (unsigned long)sgx_get_epc_addr(encl->secs.epc_page);
> > +   pginfo.addr = SGX_ENCL_PAGE_ADDR(encl_page);
> > +   pginfo.metadata = (unsigned long)secinfo;
> > +   pginfo.contents = (unsigned long)kmap_atomic(src_page);
> > +
> > +   ret = __eadd(&pginfo, sgx_get_epc_addr(epc_page));
> > +
> > +   kunmap_atomic((void *)pginfo.contents);
> > +   put_page(src_page);
> > +
> > +   return ret ? -EIO : 0;
> > +}
> > +
> > +/*
> > + * If the caller requires measurement of the page as a proof for the
> > content,
> > + * use EEXTEND to add a measurement for 256 bytes of the page. Repeat
> > this
> > + * operation until the entire page is measured."
> > + */
> > +static int __sgx_encl_extend(struct sgx_encl *encl,
> > +                        struct sgx_epc_page *epc_page)
> > +{
> > +   int ret;
> > +   int i;
> > +
> > +   for (i = 0; i < 16; i++) {
> > +           ret = __eextend(sgx_get_epc_addr(encl->secs.epc_page),
> > +                           sgx_get_epc_addr(epc_page) + (i * 0x100));
> > +           if (ret) {
> > +                   if (encls_failed(ret))
> > +                           ENCLS_WARN(ret, "EEXTEND");
> > +                   return -EIO;
> > +           }
> > +   }
> > +
> > +   return 0;
> > +}
> > +
> > +static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
> > +                        unsigned long offset, struct sgx_secinfo *secinfo,
> > +                        unsigned long flags)
> > +{
> > +   struct sgx_encl_page *encl_page;
> > +   struct sgx_epc_page *epc_page;
> > +   int ret;
> > +
> > +   encl_page = sgx_encl_page_alloc(encl, offset, secinfo->flags);
> > +   if (IS_ERR(encl_page))
> > +           return PTR_ERR(encl_page);
> > +
> > +   epc_page = __sgx_alloc_epc_page();
> > +   if (IS_ERR(epc_page)) {
> > +           kfree(encl_page);
> > +           return PTR_ERR(epc_page);
> > +   }
> > +
> > +   mmap_read_lock(current->mm);
> > +   mutex_lock(&encl->lock);
> > +
> > +   /*
> > +    * Insert prior to EADD in case of OOM.  EADD modifies MRENCLAVE, i.e.
> > +    * can't be gracefully unwound, while failure on EADD/EXTEND is limited
> > +    * to userspace errors (or kernel/hardware bugs).
> > +    */
> > +   ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc),
> > +                   encl_page, GFP_KERNEL);
> > +   if (ret)
> > +           goto err_out_unlock;
> > +
> > +   ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo,
> > +                             src);
> > +   if (ret)
> > +           goto err_out;
> > +
> > +   /*
> > +    * Complete the "add" before doing the "extend" so that the "add"
> > +    * isn't in a half-baked state in the extremely unlikely scenario
> > +    * the enclave will be destroyed in response to EEXTEND failure.
> > +    */
> > +   encl_page->encl = encl;
> > +   encl_page->epc_page = epc_page;
> > +   encl->secs_child_cnt++;
> > +
> > +   if (flags & SGX_PAGE_MEASURE) {
> > +           ret = __sgx_encl_extend(encl, epc_page);
> > +           if (ret)
> > +                   goto err_out;
> > +   }
> > +
> > +   mutex_unlock(&encl->lock);
> > +   mmap_read_unlock(current->mm);
> > +   return ret;
> > +
> > +err_out:
> > +   xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));
> > +
> > +err_out_unlock:
> > +   mutex_unlock(&encl->lock);
> > +   mmap_read_unlock(current->mm);
> > +
> > +   sgx_free_epc_page(epc_page);
> > +   kfree(encl_page);
> > +
> > +   /*
> > +    * Destroy enclave on ENCLS failure as this means that EPC has been
> > +    * invalidated.
> > +    */
> > +   if (ret == -EIO) {
> > +           mutex_lock(&encl->lock);
> > +           sgx_encl_destroy(encl);
> > +           mutex_unlock(&encl->lock);
> > +   }
> > +
> 
> I think originally we return both count of succeeded EADDs and the errors.
> So we only destroy enclaves in cases of fatal ENCLS failures.
> 
> Now we only return errors in all failures other than interrupted operations
> or SGX_MAX_ADD_PAGES_LENGTH is reached.
> 
> So, for the new design we should destroy enclaves in all cases here, not
> just for -EIO.
> 
> On the other hand, I do like the old way returning both the count and error
> better. It helps greatly for debugging any issues in enclave image or user
> space code, and also keeps flexibility for user space to recover in certain
> errors, such as out of EPC.

Right, I do get the OOM case but wouldn't in that case the reasonable
thing to do destroy the enclave that is not even running? I mean that
means that we are globally out of EPC.

/Jarkko

Reply via email to