Noticed couple of minor glitches.

On Wed, Nov 04, 2020 at 04:54:17PM +0200, Jarkko Sakkinen wrote:
> +int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
> +                  unsigned long end, unsigned long vm_flags)
> +{
> +     unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
> +     struct sgx_encl_page *page;
> +     unsigned long count = 0;
> +     int ret = 0;
> +
> +     XA_STATE(xas, &encl->page_array, PFN_DOWN(start));
> +
> +     /*
> +      * Disallow READ_IMPLIES_EXEC tasks as their VMA permissions might
> +      * conflict with the enclave page permissions.
> +      */
> +     if (current->personality & READ_IMPLIES_EXEC)
> +             return -EACCES;
> +
> +     mutex_lock(&encl->lock);
> +     xas_lock(&xas);
> +     xas_for_each(&xas, page, PFN_DOWN(end - 1)) {
> +             if (!page)
> +                     break;

A redundant check, can be removed.

> +
> +             if (~page->vm_max_prot_bits & vm_prot_bits) {
> +                     ret = -EACCES;
> +                     break;
> +             }
> +
> +             /* Reschedule on every XA_CHECK_SCHED iteration. */
> +             if (!(++count % XA_CHECK_SCHED)) {
> +                     xas_pause(&xas);
> +                     xas_unlock(&xas);
> +                     mutex_unlock(&encl->lock);
> +
> +                     cond_resched();
> +
> +                     mutex_lock(&encl->lock);
> +                     xas_lock(&xas);
> +             }
> +     }
> +     xas_unlock(&xas);
> +     mutex_unlock(&encl->lock);
> +
> +     return ret;
> +}
> +
> +static int sgx_vma_mprotect(struct vm_area_struct *vma,
> +                         struct vm_area_struct **pprev, unsigned long start,
> +                         unsigned long end, unsigned long newflags)
> +{
> +     int ret;
> +
> +     ret = sgx_encl_may_map(vma->vm_private_data, start, end, newflags);
> +     if (ret)
> +             return ret;
> +
> +     return mprotect_fixup(vma, pprev, start, end, newflags);
> +}
> +
> +const struct vm_operations_struct sgx_vm_ops = {
> +     .fault = sgx_vma_fault,
> +     .mprotect = sgx_vma_mprotect,
> +};
> +
> +/**
> + * sgx_encl_find - find an enclave
> + * @mm:              mm struct of the current process
> + * @addr:    address in the ELRANGE
> + * @vma:     the resulting VMA
> + *
> + * Find an enclave identified by the given address. Give back a VMA that is
> + * part of the enclave and located in that address. The VMA is given back if 
> it
> + * is a proper enclave VMA even if an &sgx_encl instance does not exist yet
> + * (enclave creation has not been performed).
> + *
> + * Return:
> + *   0 on success,
> + *   -EINVAL if an enclave was not found,
> + *   -ENOENT if the enclave has not been created yet
> + */
> +int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
> +               struct vm_area_struct **vma)
> +{
> +     struct vm_area_struct *result;
> +     struct sgx_encl *encl;
> +
> +     result = find_vma(mm, addr);
> +     if (!result || result->vm_ops != &sgx_vm_ops || addr < result->vm_start)
> +             return -EINVAL;
> +
> +     encl = result->vm_private_data;
> +     *vma = result;
> +
> +     return encl ? 0 : -ENOENT;
> +}

Since v20 there has been 1:1 assocition between enclaves and files.
In other words, this can never return -ENOENT.

With this reduction the function turns into:

int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
                  struct vm_area_struct **vma)
{
        struct vm_area_struct *result;

        result = find_vma(mm, addr);
        if (!result || result->vm_ops != &sgx_vm_ops || addr < result->vm_start)
                return -EINVAL;

        *vma = result;

        return 0;
}

There are only two call sites:

1. sgx_encl_test_and_clear_young()
2. sgx_reclaimer_block()

I.e. would not be a big trouble to tune the signature a bit:

struct vm_area_struct *sgx_encl_find_vma(struct mm_struct *mm, unsigned long 
addr)
{
        struct vm_area_struct *result;

        result = find_vma(mm, addr);
        if (!result || result->vm_ops != &sgx_vm_ops || addr < result->vm_start)
                return NULL;

        return result;
}

There is a function called sgx_encl_find_mm(), which is *unrelated* to
this function and has only one call sites. Its flow is very linear. In
order to avoid confusion, I'd open code that into sgx_encl_mm_add().

/Jarkko

Reply via email to