VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMAll.cpp@ 65904

Last change on this file since 65904 was 65904, checked in by vboxsync, 8 years ago

VMM: Nested Hw.virt: Started with tweaking the AMD bits and laying the groundwork.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.4 KB
Line 
1/* $Id: HMAll.cpp 65904 2017-03-01 10:21:38Z vboxsync $ */
2/** @file
3 * HM - All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#include <VBox/vmm/hm.h>
24#include <VBox/vmm/pgm.h>
25#include "HMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/hm_svm.h>
29#include <VBox/err.h>
30#include <VBox/log.h>
31#include <iprt/param.h>
32#include <iprt/assert.h>
33#include <iprt/asm.h>
34#include <iprt/string.h>
35#include <iprt/thread.h>
36#include <iprt/x86.h>
37#include <iprt/asm-amd64-x86.h>
38
39
40/**
41 * Checks whether HM (VT-x/AMD-V) is being used by this VM.
42 *
43 * @retval true if used.
44 * @retval false if software virtualization (raw-mode) is used.
45 * @param pVM The cross context VM structure.
46 * @sa HMIsEnabled, HMR3IsEnabled
47 * @internal
48 */
49VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM)
50{
51 Assert(pVM->fHMEnabledFixed);
52 return pVM->fHMEnabled;
53}
54
55
56/**
57 * Queues a guest page for invalidation.
58 *
59 * @returns VBox status code.
60 * @param pVCpu The cross context virtual CPU structure.
61 * @param GCVirt Page to invalidate.
62 */
63static void hmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
64{
65 /* Nothing to do if a TLB flush is already pending */
66 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
67 return;
68 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
69 NOREF(GCVirt);
70}
71
72
73/**
74 * Invalidates a guest page.
75 *
76 * @returns VBox status code.
77 * @param pVCpu The cross context virtual CPU structure.
78 * @param GCVirt Page to invalidate.
79 */
80VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
81{
82 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
83#ifdef IN_RING0
84 PVM pVM = pVCpu->CTX_SUFF(pVM);
85 if (pVM->hm.s.vmx.fSupported)
86 return VMXR0InvalidatePage(pVM, pVCpu, GCVirt);
87
88 Assert(pVM->hm.s.svm.fSupported);
89 return SVMR0InvalidatePage(pVM, pVCpu, GCVirt);
90
91#else
92 hmQueueInvlPage(pVCpu, GCVirt);
93 return VINF_SUCCESS;
94#endif
95}
96
97
98/**
99 * Flushes the guest TLB.
100 *
101 * @returns VBox status code.
102 * @param pVCpu The cross context virtual CPU structure.
103 */
104VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu)
105{
106 LogFlow(("HMFlushTLB\n"));
107
108 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
109 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual);
110 return VINF_SUCCESS;
111}
112
113#ifdef IN_RING0
114
115/**
116 * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used.
117 *
118 */
119static DECLCALLBACK(void) hmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
120{
121 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
122 return;
123}
124
125
126/**
127 * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED.
128 */
129static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
130{
131 uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits);
132
133 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatPoke, x);
134 int rc = RTMpPokeCpu(idHostCpu);
135 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPoke, x);
136
137 /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
138 back to a less efficient implementation (broadcast). */
139 if (rc == VERR_NOT_SUPPORTED)
140 {
141 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
142 /* synchronous. */
143 RTMpOnSpecific(idHostCpu, hmFlushHandler, 0, 0);
144 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
145 }
146 else
147 {
148 if (rc == VINF_SUCCESS)
149 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
150 else
151 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPokeFailed, z);
152
153/** @todo If more than one CPU is going to be poked, we could optimize this
154 * operation by poking them first and wait afterwards. Would require
155 * recording who to poke and their current cWorldSwitchExits values,
156 * that's something not suitable for stack... So, pVCpu->hm.s.something
157 * then. */
158 /* Spin until the VCPU has switched back (poking is async). */
159 while ( ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush)
160 && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits))
161 ASMNopPause();
162
163 if (rc == VINF_SUCCESS)
164 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
165 else
166 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPokeFailed, z);
167 }
168}
169
170#endif /* IN_RING0 */
171#ifndef IN_RC
172
173/**
174 * Poke an EMT so it can perform the appropriate TLB shootdowns.
175 *
176 * @param pVCpu The cross context virtual CPU structure of the
177 * EMT poke.
178 * @param fAccountFlushStat Whether to account the call to
179 * StatTlbShootdownFlush or StatTlbShootdown.
180 */
181static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
182{
183 if (ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush))
184 {
185 if (fAccountFlushStat)
186 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdownFlush);
187 else
188 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
189#ifdef IN_RING0
190 RTCPUID idHostCpu = pVCpu->hm.s.idEnteredCpu;
191 if (idHostCpu != NIL_RTCPUID)
192 hmR0PokeCpu(pVCpu, idHostCpu);
193#else
194 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
195#endif
196 }
197 else
198 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
199}
200
201
202/**
203 * Invalidates a guest page on all VCPUs.
204 *
205 * @returns VBox status code.
206 * @param pVM The cross context VM structure.
207 * @param GCVirt Page to invalidate.
208 */
209VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCVirt)
210{
211 /*
212 * The VT-x/AMD-V code will be flushing TLB each time a VCPU migrates to a different
213 * host CPU, see hmR0VmxFlushTaggedTlbBoth() and hmR0SvmFlushTaggedTlb().
214 *
215 * This is the reason why we do not care about thread preemption here and just
216 * execute HMInvalidatePage() assuming it might be the 'right' CPU.
217 */
218 VMCPUID idCurCpu = VMMGetCpuId(pVM);
219 STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hm.s.StatFlushPage);
220
221 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
222 {
223 PVMCPU pVCpu = &pVM->aCpus[idCpu];
224
225 /* Nothing to do if a TLB flush is already pending; the VCPU should
226 have already been poked if it were active. */
227 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
228 continue;
229
230 if (pVCpu->idCpu == idCurCpu)
231 HMInvalidatePage(pVCpu, GCVirt);
232 else
233 {
234 hmQueueInvlPage(pVCpu, GCVirt);
235 hmPokeCpuForTlbFlush(pVCpu, false /* fAccountFlushStat */);
236 }
237 }
238
239 return VINF_SUCCESS;
240}
241
242
243/**
244 * Flush the TLBs of all VCPUs.
245 *
246 * @returns VBox status code.
247 * @param pVM The cross context VM structure.
248 */
249VMM_INT_DECL(int) HMFlushTLBOnAllVCpus(PVM pVM)
250{
251 if (pVM->cCpus == 1)
252 return HMFlushTLB(&pVM->aCpus[0]);
253
254 VMCPUID idThisCpu = VMMGetCpuId(pVM);
255
256 STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hm.s.StatFlushTlb);
257
258 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
259 {
260 PVMCPU pVCpu = &pVM->aCpus[idCpu];
261
262 /* Nothing to do if a TLB flush is already pending; the VCPU should
263 have already been poked if it were active. */
264 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
265 {
266 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
267 if (idThisCpu != idCpu)
268 hmPokeCpuForTlbFlush(pVCpu, true /* fAccountFlushStat */);
269 }
270 }
271
272 return VINF_SUCCESS;
273}
274
275#endif /* !IN_RC */
276
277/**
278 * Checks if nested paging is enabled.
279 *
280 * @returns true if nested paging is active, false otherwise.
281 * @param pVM The cross context VM structure.
282 *
283 * @remarks Works before hmR3InitFinalizeR0.
284 */
285VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM)
286{
287 return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
288}
289
290
291/**
292 * Checks if both nested paging and unhampered guest execution are enabled.
293 *
294 * The almost complete guest execution in hardware is only applicable to VT-x.
295 *
296 * @returns true if we have both enabled, otherwise false.
297 * @param pVM The cross context VM structure.
298 *
299 * @remarks Works before hmR3InitFinalizeR0.
300 */
301VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM)
302{
303 return HMIsEnabled(pVM)
304 && pVM->hm.s.fNestedPaging
305 && ( pVM->hm.s.vmx.fUnrestrictedGuest
306 || pVM->hm.s.svm.fSupported);
307}
308
309
310/**
311 * Checks if this VM is long-mode capable.
312 *
313 * @returns true if long mode is allowed, false otherwise.
314 * @param pVM The cross context VM structure.
315 */
316VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM)
317{
318 return HMIsEnabled(pVM) && pVM->hm.s.fAllow64BitGuests;
319}
320
321
322/**
323 * Checks if MSR bitmaps are available. It is assumed that when it's available
324 * it will be used as well.
325 *
326 * @returns true if MSR bitmaps are available, false otherwise.
327 * @param pVM The cross context VM structure.
328 */
329VMM_INT_DECL(bool) HMAreMsrBitmapsAvailable(PVM pVM)
330{
331 if (HMIsEnabled(pVM))
332 {
333 if (pVM->hm.s.svm.fSupported)
334 return true;
335
336 if ( pVM->hm.s.vmx.fSupported
337 && (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
338 {
339 return true;
340 }
341 }
342 return false;
343}
344
345
346/**
347 * Return the shadow paging mode for nested paging/ept
348 *
349 * @returns shadow paging mode
350 * @param pVM The cross context VM structure.
351 */
352VMM_INT_DECL(PGMMODE) HMGetShwPagingMode(PVM pVM)
353{
354 Assert(HMIsNestedPagingActive(pVM));
355 if (pVM->hm.s.svm.fSupported)
356 return PGMMODE_NESTED;
357
358 Assert(pVM->hm.s.vmx.fSupported);
359 return PGMMODE_EPT;
360}
361
362
363/**
364 * Invalidates a guest page by physical address.
365 *
366 * @returns VBox status code.
367 * @param pVM The cross context VM structure.
368 * @param GCPhys Page to invalidate.
369 *
370 * @remarks Assumes the current instruction references this physical page
371 * though a virtual address!
372 */
373VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
374{
375 if (!HMIsNestedPagingActive(pVM))
376 return VINF_SUCCESS;
377
378#ifdef IN_RING0
379 if (pVM->hm.s.vmx.fSupported)
380 {
381 VMCPUID idThisCpu = VMMGetCpuId(pVM);
382
383 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
384 {
385 PVMCPU pVCpu = &pVM->aCpus[idCpu];
386
387 if (idThisCpu == idCpu)
388 {
389 /** @todo r=ramshankar: Intel does not support flushing by guest physical
390 * address either. See comment in VMXR0InvalidatePhysPage(). Fix this. */
391 VMXR0InvalidatePhysPage(pVM, pVCpu, GCPhys);
392 }
393 else
394 {
395 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
396 hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/);
397 }
398 }
399 return VINF_SUCCESS;
400 }
401
402 /* AMD-V doesn't support invalidation with guest physical addresses; see
403 comment in SVMR0InvalidatePhysPage. */
404 Assert(pVM->hm.s.svm.fSupported);
405#else
406 NOREF(GCPhys);
407#endif
408
409 HMFlushTLBOnAllVCpus(pVM);
410 return VINF_SUCCESS;
411}
412
413/**
414 * Checks if an interrupt event is currently pending.
415 *
416 * @returns Interrupt event pending state.
417 * @param pVM The cross context VM structure.
418 */
419VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM)
420{
421 PVMCPU pVCpu = VMMGetCpu(pVM);
422 return !!pVCpu->hm.s.Event.fPending;
423}
424
425
426/**
427 * Return the PAE PDPE entries.
428 *
429 * @returns Pointer to the PAE PDPE array.
430 * @param pVCpu The cross context virtual CPU structure.
431 */
432VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu)
433{
434 return &pVCpu->hm.s.aPdpes[0];
435}
436
437
438/**
439 * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,
440 * incorrect code bytes may be fetched after a world-switch".
441 *
442 * @param pu32Family Where to store the CPU family (can be NULL).
443 * @param pu32Model Where to store the CPU model (can be NULL).
444 * @param pu32Stepping Where to store the CPU stepping (can be NULL).
445 * @returns true if the erratum applies, false otherwise.
446 */
447VMM_INT_DECL(int) HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
448{
449 /*
450 * Erratum 170 which requires a forced TLB flush for each world switch:
451 * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".
452 *
453 * All BH-G1/2 and DH-G1/2 models include a fix:
454 * Athlon X2: 0x6b 1/2
455 * 0x68 1/2
456 * Athlon 64: 0x7f 1
457 * 0x6f 2
458 * Sempron: 0x7f 1/2
459 * 0x6f 2
460 * 0x6c 2
461 * 0x7c 2
462 * Turion 64: 0x68 2
463 */
464 uint32_t u32Dummy;
465 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
466 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
467 u32BaseFamily = (u32Version >> 8) & 0xf;
468 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
469 u32Model = ((u32Version >> 4) & 0xf);
470 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
471 u32Stepping = u32Version & 0xf;
472
473 bool fErratumApplies = false;
474 if ( u32Family == 0xf
475 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
476 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
477 {
478 fErratumApplies = true;
479 }
480
481 if (pu32Family)
482 *pu32Family = u32Family;
483 if (pu32Model)
484 *pu32Model = u32Model;
485 if (pu32Stepping)
486 *pu32Stepping = u32Stepping;
487
488 return fErratumApplies;
489}
490
491
492/**
493 * Sets or clears the single instruction flag.
494 *
495 * When set, HM will try its best to return to ring-3 after executing a single
496 * instruction. This can be used for debugging. See also
497 * EMR3HmSingleInstruction.
498 *
499 * @returns The old flag state.
500 * @param pVM The cross context VM structure.
501 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
502 * @param fEnable The new flag state.
503 */
504VMM_INT_DECL(bool) HMSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
505{
506 VMCPU_ASSERT_EMT(pVCpu);
507 bool fOld = pVCpu->hm.s.fSingleInstruction;
508 pVCpu->hm.s.fSingleInstruction = fEnable;
509 pVCpu->hm.s.fUseDebugLoop = fEnable || pVM->hm.s.fUseDebugLoop;
510 return fOld;
511}
512
513
514/**
515 * Notifies HM that paravirtualized hypercalls are now enabled.
516 *
517 * @param pVCpu The cross context virtual CPU structure.
518 */
519VMM_INT_DECL(void) HMHypercallsEnable(PVMCPU pVCpu)
520{
521 pVCpu->hm.s.fHypercallsEnabled = true;
522}
523
524
525/**
526 * Notifies HM that paravirtualized hypercalls are now disabled.
527 *
528 * @param pVCpu The cross context virtual CPU structure.
529 */
530VMM_INT_DECL(void) HMHypercallsDisable(PVMCPU pVCpu)
531{
532 pVCpu->hm.s.fHypercallsEnabled = false;
533}
534
535
536/**
537 * Notifies HM that GIM provider wants to trap \#UD.
538 *
539 * @param pVCpu The cross context virtual CPU structure.
540 */
541VMM_INT_DECL(void) HMTrapXcptUDForGIMEnable(PVMCPU pVCpu)
542{
543 pVCpu->hm.s.fGIMTrapXcptUD = true;
544 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
545}
546
547
548/**
549 * Notifies HM that GIM provider no longer wants to trap \#UD.
550 *
551 * @param pVCpu The cross context virtual CPU structure.
552 */
553VMM_INT_DECL(void) HMTrapXcptUDForGIMDisable(PVMCPU pVCpu)
554{
555 pVCpu->hm.s.fGIMTrapXcptUD = false;
556 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
557}
558
559
560
561/**
562 * SVM nested-guest #VMEXIT handler.
563 *
564 * @param pVCpu The cross context virtual CPU structure.
565 * @param uExitCode The exit reason.
566 */
567VMM_INT_DECL(void) HMNstGstSvmVmExit(PVMCPU pVCpu, uint64_t uExitCode)
568{
569 RT_NOREF2(pVCpu, uExitCode);
570}
571
572
573/**
574 * VMX nested-guest VM-exit handler.
575 *
576 * @param pVCpu The cross context virtual CPU structure.
577 * @param uBasicExitReason The basic exit reason.
578 */
579VMM_INT_DECL(void) HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason)
580{
581 RT_NOREF2(pVCpu, uBasicExitReason);
582}
583
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette