On Mon, Feb 23, 2026, Jim Mattson wrote:
> +static void svm_set_pat(struct kvm_vcpu *vcpu, bool from_host, u64 data)
> +{
> + struct vcpu_svm *svm = to_svm(vcpu);
> +
> + if (svm_pat_accesses_gpat(vcpu, from_host)) {
> + vmcb_set_gpat(svm->vmcb, data);
> + } else {
> + svm->vcpu.arch.pat = data;
> + if (npt_enabled) {
> + vmcb_set_gpat(svm->vmcb01.ptr, data);
> + if (is_guest_mode(&svm->vcpu) &&
> + !nested_npt_enabled(svm))
> + vmcb_set_gpat(svm->vmcb, data);
> + }
> + }
Overall, this LGTM. For this particular code, any objection to using early
returns to reduce indentation? The else branch above is a bit gnarly,
especially
when legacy_gpat_semantics comes along.
I.e. end up with this
static void svm_set_pat(struct kvm_vcpu *vcpu, bool from_host, u64 data)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (svm_pat_accesses_gpat(vcpu, from_host)) {
vmcb_set_gpat(svm->vmcb, data);
return;
}
svm->vcpu.arch.pat = data;
if (!npt_enabled)
return;
vmcb_set_gpat(svm->vmcb01.ptr, data);
if (is_guest_mode(&svm->vcpu) &&
(svm->nested.legacy_gpat_semantics || !nested_npt_enabled(svm)))
vmcb_set_gpat(svm->vmcb, data);
}
I can fixup when applying (unless you and/or Yosry object).