|
24 | 24 | #include <asm/realmode.h> |
25 | 25 | #include <asm/e820/api.h> |
26 | 26 | #include <asm/desc.h> |
| 27 | +#include <uapi/asm/vmx.h> |
27 | 28 |
|
28 | 29 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
29 | 30 |
|
@@ -186,7 +187,7 @@ bool hv_ghcb_negotiate_protocol(void) |
186 | 187 | return true; |
187 | 188 | } |
188 | 189 |
|
189 | | -void hv_ghcb_msr_write(u64 msr, u64 value) |
| 190 | +static void hv_ghcb_msr_write(u64 msr, u64 value) |
190 | 191 | { |
191 | 192 | union hv_ghcb *hv_ghcb; |
192 | 193 | void **ghcb_base; |
@@ -214,9 +215,8 @@ void hv_ghcb_msr_write(u64 msr, u64 value) |
214 | 215 |
|
215 | 216 | local_irq_restore(flags); |
216 | 217 | } |
217 | | -EXPORT_SYMBOL_GPL(hv_ghcb_msr_write); |
218 | 218 |
|
219 | | -void hv_ghcb_msr_read(u64 msr, u64 *value) |
| 219 | +static void hv_ghcb_msr_read(u64 msr, u64 *value) |
220 | 220 | { |
221 | 221 | union hv_ghcb *hv_ghcb; |
222 | 222 | void **ghcb_base; |
@@ -246,10 +246,71 @@ void hv_ghcb_msr_read(u64 msr, u64 *value) |
246 | 246 | | ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32); |
247 | 247 | local_irq_restore(flags); |
248 | 248 | } |
249 | | -EXPORT_SYMBOL_GPL(hv_ghcb_msr_read); |
250 | 249 |
|
| 250 | +#else |
| 251 | +static inline void hv_ghcb_msr_write(u64 msr, u64 value) {} |
| 252 | +static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {} |
251 | 253 | #endif /* CONFIG_AMD_MEM_ENCRYPT */ |
252 | 254 |
|
| 255 | +#ifdef CONFIG_INTEL_TDX_GUEST |
| 256 | +static void hv_tdx_msr_write(u64 msr, u64 val) |
| 257 | +{ |
| 258 | + struct tdx_hypercall_args args = { |
| 259 | + .r10 = TDX_HYPERCALL_STANDARD, |
| 260 | + .r11 = EXIT_REASON_MSR_WRITE, |
| 261 | + .r12 = msr, |
| 262 | + .r13 = val, |
| 263 | + }; |
| 264 | + |
| 265 | + u64 ret = __tdx_hypercall(&args); |
| 266 | + |
| 267 | + WARN_ONCE(ret, "Failed to emulate MSR write: %lld\n", ret); |
| 268 | +} |
| 269 | + |
| 270 | +static void hv_tdx_msr_read(u64 msr, u64 *val) |
| 271 | +{ |
| 272 | + struct tdx_hypercall_args args = { |
| 273 | + .r10 = TDX_HYPERCALL_STANDARD, |
| 274 | + .r11 = EXIT_REASON_MSR_READ, |
| 275 | + .r12 = msr, |
| 276 | + }; |
| 277 | + |
| 278 | + u64 ret = __tdx_hypercall_ret(&args); |
| 279 | + |
| 280 | + if (WARN_ONCE(ret, "Failed to emulate MSR read: %lld\n", ret)) |
| 281 | + *val = 0; |
| 282 | + else |
| 283 | + *val = args.r11; |
| 284 | +} |
| 285 | +#else |
| 286 | +static inline void hv_tdx_msr_write(u64 msr, u64 value) {} |
| 287 | +static inline void hv_tdx_msr_read(u64 msr, u64 *value) {} |
| 288 | +#endif /* CONFIG_INTEL_TDX_GUEST */ |
| 289 | + |
| 290 | +#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) |
| 291 | +void hv_ivm_msr_write(u64 msr, u64 value) |
| 292 | +{ |
| 293 | + if (!ms_hyperv.paravisor_present) |
| 294 | + return; |
| 295 | + |
| 296 | + if (hv_isolation_type_tdx()) |
| 297 | + hv_tdx_msr_write(msr, value); |
| 298 | + else if (hv_isolation_type_snp()) |
| 299 | + hv_ghcb_msr_write(msr, value); |
| 300 | +} |
| 301 | + |
| 302 | +void hv_ivm_msr_read(u64 msr, u64 *value) |
| 303 | +{ |
| 304 | + if (!ms_hyperv.paravisor_present) |
| 305 | + return; |
| 306 | + |
| 307 | + if (hv_isolation_type_tdx()) |
| 308 | + hv_tdx_msr_read(msr, value); |
| 309 | + else if (hv_isolation_type_snp()) |
| 310 | + hv_ghcb_msr_read(msr, value); |
| 311 | +} |
| 312 | +#endif |
| 313 | + |
253 | 314 | #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) |
254 | 315 | /* |
255 | 316 | * hv_mark_gpa_visibility - Set pages visible to host via hvcall. |
|
0 commit comments