On Tue, Sep 10, 2024, Nikolas Wipper wrote:
> +int kvm_arch_vcpu_ioctl_translate2(struct kvm_vcpu *vcpu,
> +                                 struct kvm_translation2 *tr)
> +{
> +     int idx, set_bit_mode = 0, access = 0;
> +     struct x86_exception exception = { };
> +     gva_t vaddr = tr->linear_address;
> +     u16 status = 0;
> +     gpa_t gpa;
> +
> +     if (tr->flags & KVM_TRANSLATE_FLAGS_SET_ACCESSED)
> +             set_bit_mode |= PWALK_SET_ACCESSED;
> +     if (tr->flags & KVM_TRANSLATE_FLAGS_SET_DIRTY)
> +             set_bit_mode |= PWALK_SET_DIRTY;
> +     if (tr->flags & KVM_TRANSLATE_FLAGS_FORCE_SET_ACCESSED)
> +             set_bit_mode |= PWALK_FORCE_SET_ACCESSED;
> +
> +     if (tr->access & KVM_TRANSLATE_ACCESS_WRITE)
> +             access |= PFERR_WRITE_MASK;
> +     if (tr->access & KVM_TRANSLATE_ACCESS_USER)
> +             access |= PFERR_USER_MASK;
> +     if (tr->access & KVM_TRANSLATE_ACCESS_EXEC)
> +             access |= PFERR_FETCH_MASK;

WRITE and FETCH accesses need to be mutually exclusive.

Reply via email to