Skip to content

Commit d2dcf25

Browse files
yang-weijiangsean-jc
authored andcommitted
KVM: x86: Rename kvm_{g,s}et_msr()* to show that they emulate guest accesses
Rename kvm_{g,s}et_msr_with_filter() kvm_{g,s}et_msr() to kvm_emulate_msr_{read,write} __kvm_emulate_msr_{read,write} to make it more obvious that KVM uses these helpers to emulate guest behaviors, i.e., host_initiated == false in these helpers. Suggested-by: Sean Christopherson <seanjc@google.com> Suggested-by: Chao Gao <chao.gao@intel.com> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com> Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> Reviewed-by: Chao Gao <chao.gao@intel.com> Tested-by: Mathias Krause <minipli@grsecurity.net> Tested-by: John Allen <john.allen@amd.com> Signed-off-by: Chao Gao <chao.gao@intel.com> Tested-by: Rick Edgecombe <rick.p.edgecombe@intel.com> Link: https://lore.kernel.org/r/20250812025606.74625-2-chao.gao@intel.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent d90ebf5 commit d2dcf25

4 files changed

Lines changed: 27 additions & 26 deletions

File tree

arch/x86/include/asm/kvm_host.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2154,11 +2154,11 @@ void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa);
21542154

21552155
void kvm_enable_efer_bits(u64);
21562156
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
2157-
int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2158-
int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data);
2157+
int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2158+
int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data);
21592159
int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
2160-
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2161-
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
2160+
int __kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2161+
int __kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data);
21622162
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
21632163
int kvm_emulate_rdmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg);
21642164
int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);

arch/x86/kvm/smm.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -529,7 +529,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
529529

530530
vcpu->arch.smbase = smstate->smbase;
531531

532-
if (kvm_set_msr(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA))
532+
if (__kvm_emulate_msr_write(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA))
533533
return X86EMUL_UNHANDLEABLE;
534534

535535
rsm_load_seg_64(vcpu, &smstate->tr, VCPU_SREG_TR);
@@ -620,7 +620,7 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
620620

621621
/* And finally go back to 32-bit mode. */
622622
efer = 0;
623-
kvm_set_msr(vcpu, MSR_EFER, efer);
623+
__kvm_emulate_msr_write(vcpu, MSR_EFER, efer);
624624
}
625625
#endif
626626

arch/x86/kvm/vmx/nested.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -997,7 +997,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
997997
__func__, i, e.index, e.reserved);
998998
goto fail;
999999
}
1000-
if (kvm_set_msr_with_filter(vcpu, e.index, e.value)) {
1000+
if (kvm_emulate_msr_write(vcpu, e.index, e.value)) {
10011001
pr_debug_ratelimited(
10021002
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
10031003
__func__, i, e.index, e.value);
@@ -1033,7 +1033,7 @@ static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
10331033
}
10341034
}
10351035

1036-
if (kvm_get_msr_with_filter(vcpu, msr_index, data)) {
1036+
if (kvm_emulate_msr_read(vcpu, msr_index, data)) {
10371037
pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
10381038
msr_index);
10391039
return false;
@@ -2770,8 +2770,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
27702770

27712771
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
27722772
kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) &&
2773-
WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
2774-
vmcs12->guest_ia32_perf_global_ctrl))) {
2773+
WARN_ON_ONCE(__kvm_emulate_msr_write(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
2774+
vmcs12->guest_ia32_perf_global_ctrl))) {
27752775
*entry_failure_code = ENTRY_FAIL_DEFAULT;
27762776
return -EINVAL;
27772777
}
@@ -4758,8 +4758,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
47584758
}
47594759
if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
47604760
kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)))
4761-
WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
4762-
vmcs12->host_ia32_perf_global_ctrl));
4761+
WARN_ON_ONCE(__kvm_emulate_msr_write(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
4762+
vmcs12->host_ia32_perf_global_ctrl));
47634763

47644764
/* Set L1 segment info according to Intel SDM
47654765
27.5.2 Loading Host Segment and Descriptor-Table Registers */
@@ -4937,7 +4937,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
49374937
goto vmabort;
49384938
}
49394939

4940-
if (kvm_set_msr_with_filter(vcpu, h.index, h.value)) {
4940+
if (kvm_emulate_msr_write(vcpu, h.index, h.value)) {
49414941
pr_debug_ratelimited(
49424942
"%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
49434943
__func__, j, h.index, h.value);

arch/x86/kvm/x86.c

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1933,33 +1933,33 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
19331933
__kvm_get_msr);
19341934
}
19351935

1936-
int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
1936+
int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data)
19371937
{
19381938
if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
19391939
return KVM_MSR_RET_FILTERED;
19401940
return kvm_get_msr_ignored_check(vcpu, index, data, false);
19411941
}
1942-
EXPORT_SYMBOL_GPL(kvm_get_msr_with_filter);
1942+
EXPORT_SYMBOL_GPL(kvm_emulate_msr_read);
19431943

1944-
int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
1944+
int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data)
19451945
{
19461946
if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
19471947
return KVM_MSR_RET_FILTERED;
19481948
return kvm_set_msr_ignored_check(vcpu, index, data, false);
19491949
}
1950-
EXPORT_SYMBOL_GPL(kvm_set_msr_with_filter);
1950+
EXPORT_SYMBOL_GPL(kvm_emulate_msr_write);
19511951

1952-
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
1952+
int __kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data)
19531953
{
19541954
return kvm_get_msr_ignored_check(vcpu, index, data, false);
19551955
}
1956-
EXPORT_SYMBOL_GPL(kvm_get_msr);
1956+
EXPORT_SYMBOL_GPL(__kvm_emulate_msr_read);
19571957

1958-
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
1958+
int __kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data)
19591959
{
19601960
return kvm_set_msr_ignored_check(vcpu, index, data, false);
19611961
}
1962-
EXPORT_SYMBOL_GPL(kvm_set_msr);
1962+
EXPORT_SYMBOL_GPL(__kvm_emulate_msr_write);
19631963

19641964
static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu)
19651965
{
@@ -2040,7 +2040,8 @@ static int __kvm_emulate_rdmsr(struct kvm_vcpu *vcpu, u32 msr, int reg,
20402040
u64 data;
20412041
int r;
20422042

2043-
r = kvm_get_msr_with_filter(vcpu, msr, &data);
2043+
r = kvm_emulate_msr_read(vcpu, msr, &data);
2044+
20442045
if (!r) {
20452046
trace_kvm_msr_read(msr, data);
20462047

@@ -2080,7 +2081,7 @@ static int __kvm_emulate_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
20802081
{
20812082
int r;
20822083

2083-
r = kvm_set_msr_with_filter(vcpu, msr, data);
2084+
r = kvm_emulate_msr_write(vcpu, msr, data);
20842085
if (!r) {
20852086
trace_kvm_msr_write(msr, data);
20862087
} else {
@@ -8366,7 +8367,7 @@ static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt,
83668367
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
83678368
int r;
83688369

8369-
r = kvm_get_msr_with_filter(vcpu, msr_index, pdata);
8370+
r = kvm_emulate_msr_read(vcpu, msr_index, pdata);
83708371
if (r < 0)
83718372
return X86EMUL_UNHANDLEABLE;
83728373

@@ -8389,7 +8390,7 @@ static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
83898390
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
83908391
int r;
83918392

8392-
r = kvm_set_msr_with_filter(vcpu, msr_index, data);
8393+
r = kvm_emulate_msr_write(vcpu, msr_index, data);
83938394
if (r < 0)
83948395
return X86EMUL_UNHANDLEABLE;
83958396

@@ -8409,7 +8410,7 @@ static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
84098410
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
84108411
u32 msr_index, u64 *pdata)
84118412
{
8412-
return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
8413+
return __kvm_emulate_msr_read(emul_to_vcpu(ctxt), msr_index, pdata);
84138414
}
84148415

84158416
static int emulator_check_rdpmc_early(struct x86_emulate_ctxt *ctxt, u32 pmc)

0 commit comments

Comments
 (0)