VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMAll.cpp@ 73389

Last change on this file since 73389 was 73389, checked in by vboxsync, 6 years ago

VMM, SUPDrv: Nested VMX: bugref:9180 Implement some of the VMX MSRs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.5 KB
Line 
1/* $Id: HMAll.cpp 73389 2018-07-28 07:03:03Z vboxsync $ */
2/** @file
3 * HM - All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <VBox/vmm/hm.h>
25#include <VBox/vmm/pgm.h>
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/hm_vmx.h>
29#include <VBox/vmm/hm_svm.h>
30#include <VBox/err.h>
31#include <VBox/log.h>
32#include <iprt/param.h>
33#include <iprt/assert.h>
34#include <iprt/asm.h>
35#include <iprt/string.h>
36#include <iprt/thread.h>
37#include <iprt/x86.h>
38#include <iprt/asm-amd64-x86.h>
39
40
41/**
42 * Checks whether HM (VT-x/AMD-V) is being used by this VM.
43 *
44 * @retval true if used.
45 * @retval false if software virtualization (raw-mode) is used.
46 * @param pVM The cross context VM structure.
47 * @sa HMIsEnabled, HMR3IsEnabled
48 * @internal
49 */
50VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM)
51{
52 Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET);
53 return pVM->fHMEnabled;
54}
55
56
57/**
58 * Queues a guest page for invalidation.
59 *
60 * @returns VBox status code.
61 * @param pVCpu The cross context virtual CPU structure.
62 * @param GCVirt Page to invalidate.
63 */
64static void hmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
65{
66 /* Nothing to do if a TLB flush is already pending */
67 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
68 return;
69 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
70 NOREF(GCVirt);
71}
72
73
74/**
75 * Invalidates a guest page.
76 *
77 * @returns VBox status code.
78 * @param pVCpu The cross context virtual CPU structure.
79 * @param GCVirt Page to invalidate.
80 */
81VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
82{
83 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
84#ifdef IN_RING0
85 return HMR0InvalidatePage(pVCpu, GCVirt);
86#else
87 hmQueueInvlPage(pVCpu, GCVirt);
88 return VINF_SUCCESS;
89#endif
90}
91
92
93#ifdef IN_RING0
94
95/**
96 * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used.
97 *
98 */
99static DECLCALLBACK(void) hmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
100{
101 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
102 return;
103}
104
105
106/**
107 * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED.
108 */
109static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
110{
111 uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits);
112
113 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatPoke, x);
114 int rc = RTMpPokeCpu(idHostCpu);
115 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPoke, x);
116
117 /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
118 back to a less efficient implementation (broadcast). */
119 if (rc == VERR_NOT_SUPPORTED)
120 {
121 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
122 /* synchronous. */
123 RTMpOnSpecific(idHostCpu, hmFlushHandler, 0, 0);
124 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
125 }
126 else
127 {
128 if (rc == VINF_SUCCESS)
129 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
130 else
131 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPokeFailed, z);
132
133/** @todo If more than one CPU is going to be poked, we could optimize this
134 * operation by poking them first and wait afterwards. Would require
135 * recording who to poke and their current cWorldSwitchExits values,
136 * that's something not suitable for stack... So, pVCpu->hm.s.something
137 * then. */
138 /* Spin until the VCPU has switched back (poking is async). */
139 while ( ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush)
140 && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits))
141 ASMNopPause();
142
143 if (rc == VINF_SUCCESS)
144 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
145 else
146 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPokeFailed, z);
147 }
148}
149
150#endif /* IN_RING0 */
151#ifndef IN_RC
152
153/**
154 * Flushes the guest TLB.
155 *
156 * @returns VBox status code.
157 * @param pVCpu The cross context virtual CPU structure.
158 */
159VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu)
160{
161 LogFlow(("HMFlushTLB\n"));
162
163 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
164 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual);
165 return VINF_SUCCESS;
166}
167
168/**
169 * Poke an EMT so it can perform the appropriate TLB shootdowns.
170 *
171 * @param pVCpu The cross context virtual CPU structure of the
172 * EMT poke.
173 * @param fAccountFlushStat Whether to account the call to
174 * StatTlbShootdownFlush or StatTlbShootdown.
175 */
176static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
177{
178 if (ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush))
179 {
180 if (fAccountFlushStat)
181 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdownFlush);
182 else
183 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
184#ifdef IN_RING0
185 RTCPUID idHostCpu = pVCpu->hm.s.idEnteredCpu;
186 if (idHostCpu != NIL_RTCPUID)
187 hmR0PokeCpu(pVCpu, idHostCpu);
188#else
189 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
190#endif
191 }
192 else
193 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
194}
195
196
197/**
198 * Invalidates a guest page on all VCPUs.
199 *
200 * @returns VBox status code.
201 * @param pVM The cross context VM structure.
202 * @param GCVirt Page to invalidate.
203 */
204VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCVirt)
205{
206 /*
207 * The VT-x/AMD-V code will be flushing TLB each time a VCPU migrates to a different
208 * host CPU, see hmR0VmxFlushTaggedTlbBoth() and hmR0SvmFlushTaggedTlb().
209 *
210 * This is the reason why we do not care about thread preemption here and just
211 * execute HMInvalidatePage() assuming it might be the 'right' CPU.
212 */
213 VMCPUID idCurCpu = VMMGetCpuId(pVM);
214 STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hm.s.StatFlushPage);
215
216 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
217 {
218 PVMCPU pVCpu = &pVM->aCpus[idCpu];
219
220 /* Nothing to do if a TLB flush is already pending; the VCPU should
221 have already been poked if it were active. */
222 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
223 continue;
224
225 if (pVCpu->idCpu == idCurCpu)
226 HMInvalidatePage(pVCpu, GCVirt);
227 else
228 {
229 hmQueueInvlPage(pVCpu, GCVirt);
230 hmPokeCpuForTlbFlush(pVCpu, false /* fAccountFlushStat */);
231 }
232 }
233
234 return VINF_SUCCESS;
235}
236
237
238/**
239 * Flush the TLBs of all VCPUs.
240 *
241 * @returns VBox status code.
242 * @param pVM The cross context VM structure.
243 */
244VMM_INT_DECL(int) HMFlushTLBOnAllVCpus(PVM pVM)
245{
246 if (pVM->cCpus == 1)
247 return HMFlushTLB(&pVM->aCpus[0]);
248
249 VMCPUID idThisCpu = VMMGetCpuId(pVM);
250
251 STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hm.s.StatFlushTlb);
252
253 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
254 {
255 PVMCPU pVCpu = &pVM->aCpus[idCpu];
256
257 /* Nothing to do if a TLB flush is already pending; the VCPU should
258 have already been poked if it were active. */
259 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
260 {
261 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
262 if (idThisCpu != idCpu)
263 hmPokeCpuForTlbFlush(pVCpu, true /* fAccountFlushStat */);
264 }
265 }
266
267 return VINF_SUCCESS;
268}
269
270
271/**
272 * Invalidates a guest page by physical address.
273 *
274 * @returns VBox status code.
275 * @param pVM The cross context VM structure.
276 * @param GCPhys Page to invalidate.
277 *
278 * @remarks Assumes the current instruction references this physical page
279 * though a virtual address!
280 */
281VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
282{
283 if (!HMIsNestedPagingActive(pVM))
284 return VINF_SUCCESS;
285
286 /*
287 * AMD-V: Doesn't support invalidation with guest physical addresses.
288 *
289 * VT-x: Doesn't support invalidation with guest physical addresses.
290 * INVVPID instruction takes only a linear address while invept only flushes by EPT
291 * not individual addresses.
292 *
293 * We update the force flag and flush before the next VM-entry, see @bugref{6568}.
294 */
295 RT_NOREF(GCPhys);
296 /** @todo Remove or figure out to way to update the Phys STAT counter. */
297 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys); */
298 return HMFlushTLBOnAllVCpus(pVM);
299}
300
301
302/**
303 * Checks if nested paging is enabled.
304 *
305 * @returns true if nested paging is active, false otherwise.
306 * @param pVM The cross context VM structure.
307 *
308 * @remarks Works before hmR3InitFinalizeR0.
309 */
310VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM)
311{
312 return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
313}
314
315
316/**
317 * Checks if both nested paging and unhampered guest execution are enabled.
318 *
319 * The almost complete guest execution in hardware is only applicable to VT-x.
320 *
321 * @returns true if we have both enabled, otherwise false.
322 * @param pVM The cross context VM structure.
323 *
324 * @remarks Works before hmR3InitFinalizeR0.
325 */
326VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM)
327{
328 return HMIsEnabled(pVM)
329 && pVM->hm.s.fNestedPaging
330 && ( pVM->hm.s.vmx.fUnrestrictedGuest
331 || pVM->hm.s.svm.fSupported);
332}
333
334
335/**
336 * Checks if this VM is using HM and is long-mode capable.
337 *
338 * Use VMR3IsLongModeAllowed() instead of this, when possible.
339 *
340 * @returns true if long mode is allowed, false otherwise.
341 * @param pVM The cross context VM structure.
342 * @sa VMR3IsLongModeAllowed, NEMHCIsLongModeAllowed
343 */
344VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM)
345{
346 return HMIsEnabled(pVM) && pVM->hm.s.fAllow64BitGuests;
347}
348
349
350/**
351 * Checks if MSR bitmaps are active. It is assumed that when it's available
352 * it will be used as well.
353 *
354 * @returns true if MSR bitmaps are available, false otherwise.
355 * @param pVM The cross context VM structure.
356 */
357VMM_INT_DECL(bool) HMIsMsrBitmapActive(PVM pVM)
358{
359 if (HMIsEnabled(pVM))
360 {
361 if (pVM->hm.s.svm.fSupported)
362 return true;
363
364 if ( pVM->hm.s.vmx.fSupported
365 && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS))
366 return true;
367 }
368 return false;
369}
370
371
372/**
373 * Checks if AMD-V is active.
374 *
375 * @returns true if AMD-V is active.
376 * @param pVM The cross context VM structure.
377 *
378 * @remarks Works before hmR3InitFinalizeR0.
379 */
380VMM_INT_DECL(bool) HMIsSvmActive(PVM pVM)
381{
382 return pVM->hm.s.svm.fSupported && HMIsEnabled(pVM);
383}
384
385
386/**
387 * Checks if VT-x is active.
388 *
389 * @returns true if VT-x is active.
390 * @param pVM The cross context VM structure.
391 *
392 * @remarks Works before hmR3InitFinalizeR0.
393 */
394VMM_INT_DECL(bool) HMIsVmxActive(PVM pVM)
395{
396 return HMIsVmxSupported(pVM) && HMIsEnabled(pVM);
397}
398
399
400/**
401 * Checks if VT-x is supported by the host CPU.
402 *
403 * @returns true if VT-x is supported, false otherwise.
404 * @param pVM The cross context VM structure.
405 *
406 * @remarks Works before hmR3InitFinalizeR0.
407 */
408VMM_INT_DECL(bool) HMIsVmxSupported(PVM pVM)
409{
410 return pVM->hm.s.vmx.fSupported;
411}
412
413#endif /* !IN_RC */
414
415/**
416 * Checks if an interrupt event is currently pending.
417 *
418 * @returns Interrupt event pending state.
419 * @param pVM The cross context VM structure.
420 */
421VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM)
422{
423 PVMCPU pVCpu = VMMGetCpu(pVM);
424 return !!pVCpu->hm.s.Event.fPending;
425}
426
427
428/**
429 * Return the PAE PDPE entries.
430 *
431 * @returns Pointer to the PAE PDPE array.
432 * @param pVCpu The cross context virtual CPU structure.
433 */
434VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu)
435{
436 return &pVCpu->hm.s.aPdpes[0];
437}
438
439
440/**
441 * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,
442 * incorrect code bytes may be fetched after a world-switch".
443 *
444 * @param pu32Family Where to store the CPU family (can be NULL).
445 * @param pu32Model Where to store the CPU model (can be NULL).
446 * @param pu32Stepping Where to store the CPU stepping (can be NULL).
447 * @returns true if the erratum applies, false otherwise.
448 */
449VMM_INT_DECL(int) HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
450{
451 /*
452 * Erratum 170 which requires a forced TLB flush for each world switch:
453 * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".
454 *
455 * All BH-G1/2 and DH-G1/2 models include a fix:
456 * Athlon X2: 0x6b 1/2
457 * 0x68 1/2
458 * Athlon 64: 0x7f 1
459 * 0x6f 2
460 * Sempron: 0x7f 1/2
461 * 0x6f 2
462 * 0x6c 2
463 * 0x7c 2
464 * Turion 64: 0x68 2
465 */
466 uint32_t u32Dummy;
467 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
468 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
469 u32BaseFamily = (u32Version >> 8) & 0xf;
470 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
471 u32Model = ((u32Version >> 4) & 0xf);
472 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
473 u32Stepping = u32Version & 0xf;
474
475 bool fErratumApplies = false;
476 if ( u32Family == 0xf
477 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
478 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
479 {
480 fErratumApplies = true;
481 }
482
483 if (pu32Family)
484 *pu32Family = u32Family;
485 if (pu32Model)
486 *pu32Model = u32Model;
487 if (pu32Stepping)
488 *pu32Stepping = u32Stepping;
489
490 return fErratumApplies;
491}
492
493
494/**
495 * Sets or clears the single instruction flag.
496 *
497 * When set, HM will try its best to return to ring-3 after executing a single
498 * instruction. This can be used for debugging. See also
499 * EMR3HmSingleInstruction.
500 *
501 * @returns The old flag state.
502 * @param pVM The cross context VM structure.
503 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
504 * @param fEnable The new flag state.
505 */
506VMM_INT_DECL(bool) HMSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
507{
508 VMCPU_ASSERT_EMT(pVCpu);
509 bool fOld = pVCpu->hm.s.fSingleInstruction;
510 pVCpu->hm.s.fSingleInstruction = fEnable;
511 pVCpu->hm.s.fUseDebugLoop = fEnable || pVM->hm.s.fUseDebugLoop;
512 return fOld;
513}
514
515
516/**
517 * Notifies HM that GIM provider wants to trap \#UD.
518 *
519 * @param pVCpu The cross context virtual CPU structure.
520 */
521VMM_INT_DECL(void) HMTrapXcptUDForGIMEnable(PVMCPU pVCpu)
522{
523 pVCpu->hm.s.fGIMTrapXcptUD = true;
524 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
525 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
526 else
527 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS);
528}
529
530
531/**
532 * Notifies HM that GIM provider no longer wants to trap \#UD.
533 *
534 * @param pVCpu The cross context virtual CPU structure.
535 */
536VMM_INT_DECL(void) HMTrapXcptUDForGIMDisable(PVMCPU pVCpu)
537{
538 pVCpu->hm.s.fGIMTrapXcptUD = false;
539 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
540 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
541 else
542 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS);
543}
544
545
546/**
547 * VMX nested-guest VM-exit handler.
548 *
549 * @param pVCpu The cross context virtual CPU structure.
550 * @param uBasicExitReason The basic exit reason.
551 */
552VMM_INT_DECL(void) HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason)
553{
554 RT_NOREF2(pVCpu, uBasicExitReason);
555}
556
557
558/**
559 * Gets a copy of the VMX host MSRs that were read by HM during ring-0
560 * initialization.
561 *
562 * @return VBox status code.
563 * @param pVM The cross context VM structure.
564 * @param pVmxMsrs Where to store the VMXMSRS struct (only valid when
565 * VINF_SUCCESS is returned).
566 *
567 * @remarks Caller needs to take care not to call this function too early. Call
568 * after HM initialization is fully complete.
569 */
570VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs)
571{
572 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
573 AssertPtrReturn(pVmxMsrs, VERR_INVALID_PARAMETER);
574 if (pVM->hm.s.vmx.fSupported)
575 {
576 *pVmxMsrs = pVM->hm.s.vmx.Msrs;
577 return VINF_SUCCESS;
578 }
579 return VERR_VMX_NOT_SUPPORTED;
580}
581
582
583#if 0 /** @todo Update comment hm_vmx.h VMXMSRS struct if this is removed. */
584/**
585 * Gets the specified VMX host MSR that was read by HM during ring-0
586 * initialization.
587 *
588 * @return VBox status code.
589 * @param pVM The cross context VM structure.
590 * @param idMsr The MSR.
591 * @param puValue Where to store the MSR value (only updated when VINF_SUCCESS
592 * is returned).
593 *
594 * @remarks Caller needs to take care not to call this function too early. Call
595 * after HM initialization is fully complete.
596 */
597VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue)
598{
599 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
600 AssertPtrReturn(puValue, VERR_INVALID_PARAMETER);
601
602 if (!pVM->hm.s.vmx.fSupported)
603 return VERR_VMX_NOT_SUPPORTED;
604
605 PCVMXMSRS pVmxMsrs = &pVM->hm.s.vmx.Msrs;
606 switch (idMsr)
607 {
608 case MSR_IA32_FEATURE_CONTROL: *puValue = pVmxMsrs->u64FeatCtrl; break;
609 case MSR_IA32_VMX_BASIC: *puValue = pVmxMsrs->u64Basic; break;
610 case MSR_IA32_VMX_PINBASED_CTLS: *puValue = pVmxMsrs->PinCtls.u; break;
611 case MSR_IA32_VMX_PROCBASED_CTLS: *puValue = pVmxMsrs->ProcCtls.u; break;
612 case MSR_IA32_VMX_PROCBASED_CTLS2: *puValue = pVmxMsrs->ProcCtls2.u; break;
613 case MSR_IA32_VMX_EXIT_CTLS: *puValue = pVmxMsrs->ExitCtls.u; break;
614 case MSR_IA32_VMX_ENTRY_CTLS: *puValue = pVmxMsrs->EntryCtls.u; break;
615 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: *puValue = pVmxMsrs->TruePinCtls.u; break;
616 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *puValue = pVmxMsrs->TrueProcCtls.u; break;
617 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: *puValue = pVmxMsrs->TrueEntryCtls.u; break;
618 case MSR_IA32_VMX_TRUE_EXIT_CTLS: *puValue = pVmxMsrs->TrueExitCtls.u; break;
619 case MSR_IA32_VMX_MISC: *puValue = pVmxMsrs->u64Misc; break;
620 case MSR_IA32_VMX_CR0_FIXED0: *puValue = pVmxMsrs->u64Cr0Fixed0; break;
621 case MSR_IA32_VMX_CR0_FIXED1: *puValue = pVmxMsrs->u64Cr0Fixed1; break;
622 case MSR_IA32_VMX_CR4_FIXED0: *puValue = pVmxMsrs->u64Cr4Fixed0; break;
623 case MSR_IA32_VMX_CR4_FIXED1: *puValue = pVmxMsrs->u64Cr4Fixed1; break;
624 case MSR_IA32_VMX_VMCS_ENUM: *puValue = pVmxMsrs->u64VmcsEnum; break;
625 case MSR_IA32_VMX_VMFUNC: *puValue = pVmxMsrs->u64VmFunc; break;
626 case MSR_IA32_VMX_EPT_VPID_CAP: *puValue = pVmxMsrs->u64EptVpidCaps; break;
627 default:
628 {
629 AssertMsgFailed(("Invalid MSR %#x\n", idMsr));
630 return VERR_NOT_FOUND;
631 }
632 }
633 return VINF_SUCCESS;
634}
635#endif
636
637
638#ifndef IN_RC
639/**
640 * Notification callback which is called whenever there is a chance that a CR3
641 * value might have changed.
642 *
643 * This is called by PGM.
644 *
645 * @param pVM The cross context VM structure.
646 * @param pVCpu The cross context virtual CPU structure.
647 * @param enmShadowMode New shadow paging mode.
648 * @param enmGuestMode New guest paging mode.
649 */
650VMM_INT_DECL(void) HMHCPagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
651{
652# ifdef IN_RING3
653 /* Ignore page mode changes during state loading. */
654 if (VMR3GetState(pVM) == VMSTATE_LOADING)
655 return;
656# endif
657
658 pVCpu->hm.s.enmShadowMode = enmShadowMode;
659
660 /*
661 * If the guest left protected mode VMX execution, we'll have to be
662 * extra careful if/when the guest switches back to protected mode.
663 */
664 if (enmGuestMode == PGMMODE_REAL)
665 pVCpu->hm.s.vmx.fWasInRealMode = true;
666
667# ifdef IN_RING0
668 /*
669 * We need to tickle SVM and VT-x state updates.
670 *
671 * Note! We could probably reduce this depending on what exactly changed.
672 */
673 if (VM_IS_HM_ENABLED(pVM))
674 {
675 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER); /* No recursion! */
676 uint64_t fChanged = HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_CR3 | HM_CHANGED_GUEST_CR4 | HM_CHANGED_GUEST_EFER_MSR;
677 if (pVM->hm.s.svm.fSupported)
678 fChanged |= HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS;
679 else
680 fChanged |= HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS;
681 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, fChanged);
682 }
683# endif
684
685 Log4(("HMHCPagingModeChanged: Guest paging mode '%s', shadow paging mode '%s'\n", PGMGetModeName(enmGuestMode),
686 PGMGetModeName(enmShadowMode)));
687}
688#endif /* !IN_RC */
689
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette