VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp@ 77807

Last change on this file since 77807 was 77714, checked in by vboxsync, 6 years ago

VMM/HMSVM: VMM_INT_DECL for hmEmulateSvmMovTpr.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.5 KB
Line 
1/* $Id: HMSVMAll.cpp 77714 2019-03-15 07:58:02Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2017-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include "HMInternal.h"
25#include <VBox/vmm/apic.h>
26#include <VBox/vmm/gim.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/vm.h>
29
30#include <VBox/err.h>
31
32
33#ifndef IN_RC
34
35/**
36 * Emulates a simple MOV TPR (CR8) instruction.
37 *
38 * Used for TPR patching on 32-bit guests. This simply looks up the patch record
39 * at EIP and does the required.
40 *
41 * This VMMCALL is used a fallback mechanism when mov to/from cr8 isn't exactly
42 * like how we want it to be (e.g. not followed by shr 4 as is usually done for
43 * TPR). See hmR3ReplaceTprInstr() for the details.
44 *
45 * @returns VBox status code.
46 * @retval VINF_SUCCESS if the access was handled successfully, RIP + RFLAGS updated.
47 * @retval VERR_NOT_FOUND if no patch record for this RIP could be found.
48 * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE if the found patch type is invalid.
49 *
50 * @param pVCpu The cross context virtual CPU structure.
51 * @param pCtx Pointer to the guest-CPU context.
52 */
53VMM_INT_DECL(int) hmEmulateSvmMovTpr(PVMCPU pVCpu)
54{
55 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
56 Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip));
57
58 /*
59 * We do this in a loop as we increment the RIP after a successful emulation
60 * and the new RIP may be a patched instruction which needs emulation as well.
61 */
62 bool fPatchFound = false;
63 PVM pVM = pVCpu->CTX_SUFF(pVM);
64 for (;;)
65 {
66 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
67 if (!pPatch)
68 break;
69 fPatchFound = true;
70
71 uint8_t u8Tpr;
72 switch (pPatch->enmType)
73 {
74 case HMTPRINSTR_READ:
75 {
76 bool fPending;
77 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */);
78 AssertRC(rc);
79
80 rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
81 AssertRC(rc);
82 pCtx->rip += pPatch->cbOp;
83 pCtx->eflags.Bits.u1RF = 0;
84 break;
85 }
86
87 case HMTPRINSTR_WRITE_REG:
88 case HMTPRINSTR_WRITE_IMM:
89 {
90 if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
91 {
92 uint32_t u32Val;
93 int rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val);
94 AssertRC(rc);
95 u8Tpr = u32Val;
96 }
97 else
98 u8Tpr = (uint8_t)pPatch->uSrcOperand;
99
100 int rc2 = APICSetTpr(pVCpu, u8Tpr);
101 AssertRC(rc2);
102 pCtx->rip += pPatch->cbOp;
103 pCtx->eflags.Bits.u1RF = 0;
104 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR
105 | HM_CHANGED_GUEST_RIP
106 | HM_CHANGED_GUEST_RFLAGS);
107 break;
108 }
109
110 default:
111 {
112 AssertMsgFailed(("Unexpected patch type %d\n", pPatch->enmType));
113 pVCpu->hm.s.u32HMError = pPatch->enmType;
114 return VERR_SVM_UNEXPECTED_PATCH_TYPE;
115 }
116 }
117 }
118
119 return fPatchFound ? VINF_SUCCESS : VERR_NOT_FOUND;
120}
121
122# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
123/**
124 * Notification callback for when a \#VMEXIT happens outside SVM R0 code (e.g.
125 * in IEM).
126 *
127 * @param pVCpu The cross context virtual CPU structure.
128 * @param pCtx Pointer to the guest-CPU context.
129 *
130 * @sa hmR0SvmVmRunCacheVmcb.
131 */
132VMM_INT_DECL(void) HMNotifySvmNstGstVmexit(PVMCPU pVCpu, PCPUMCTX pCtx)
133{
134 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
135 if (pVmcbNstGstCache->fCacheValid)
136 {
137 /*
138 * Restore fields as our own code might look at the VMCB controls as part
139 * of the #VMEXIT handling in IEM. Otherwise, strictly speaking we don't need to
140 * restore these fields because currently none of them are written back to memory
141 * by a physical CPU on #VMEXIT.
142 */
143 PSVMVMCBCTRL pVmcbNstGstCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
144 pVmcbNstGstCtrl->u16InterceptRdCRx = pVmcbNstGstCache->u16InterceptRdCRx;
145 pVmcbNstGstCtrl->u16InterceptWrCRx = pVmcbNstGstCache->u16InterceptWrCRx;
146 pVmcbNstGstCtrl->u16InterceptRdDRx = pVmcbNstGstCache->u16InterceptRdDRx;
147 pVmcbNstGstCtrl->u16InterceptWrDRx = pVmcbNstGstCache->u16InterceptWrDRx;
148 pVmcbNstGstCtrl->u16PauseFilterThreshold = pVmcbNstGstCache->u16PauseFilterThreshold;
149 pVmcbNstGstCtrl->u16PauseFilterCount = pVmcbNstGstCache->u16PauseFilterCount;
150 pVmcbNstGstCtrl->u32InterceptXcpt = pVmcbNstGstCache->u32InterceptXcpt;
151 pVmcbNstGstCtrl->u64InterceptCtrl = pVmcbNstGstCache->u64InterceptCtrl;
152 pVmcbNstGstCtrl->u64TSCOffset = pVmcbNstGstCache->u64TSCOffset;
153 pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = pVmcbNstGstCache->fVIntrMasking;
154 pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVmcbNstGstCache->fNestedPaging;
155 pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcbNstGstCache->fLbrVirt;
156 pVmcbNstGstCache->fCacheValid = false;
157 }
158
159 /*
160 * Transitions to ring-3 flag a full CPU-state change except if we transition to ring-3
161 * in response to a physical CPU interrupt as no changes to the guest-CPU state are
162 * expected (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3).
163 *
164 * However, with nested-guests, the state -can- change on trips to ring-3 for we might
165 * try to inject a nested-guest physical interrupt and cause a SVM_EXIT_INTR #VMEXIT for
166 * the nested-guest from ring-3. Import the complete state here as we will be swapping
167 * to the guest VMCB after the #VMEXIT.
168 */
169 CPUMImportGuestStateOnDemand(pVCpu, CPUMCTX_EXTRN_ALL);
170 AssertMsg(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL),
171 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", pVCpu->cpum.GstCtx.fExtrn, CPUMCTX_EXTRN_ALL));
172 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
173}
174# endif
175
176/**
177 * Checks if the Virtual GIF (Global Interrupt Flag) feature is supported and
178 * enabled for the VM.
179 *
180 * @returns @c true if VGIF is enabled, @c false otherwise.
181 * @param pVM The cross context VM structure.
182 *
183 * @remarks This value returned by this functions is expected by the callers not
184 * to change throughout the lifetime of the VM.
185 */
186VMM_INT_DECL(bool) HMIsSvmVGifActive(PVM pVM)
187{
188 bool const fVGif = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF);
189 bool const fUseVGif = fVGif && pVM->hm.s.svm.fVGif;
190 return fVGif && fUseVGif;
191}
192
193
194/**
195 * Applies the TSC offset of an SVM nested-guest if any and returns the new TSC
196 * value for the nested-guest.
197 *
198 * @returns The TSC offset after applying any nested-guest TSC offset.
199 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
200 * @param uTicks The guest TSC.
201 *
202 * @remarks This function looks at the VMCB cache rather than directly at the
203 * nested-guest VMCB. The latter may have been modified for executing
204 * using hardware-assisted SVM.
205 *
206 * @note If you make any changes to this function, please check if
207 * hmR0SvmNstGstUndoTscOffset() needs adjusting.
208 *
209 * @sa CPUMApplyNestedGuestTscOffset(), hmR0SvmNstGstUndoTscOffset().
210 */
211VMM_INT_DECL(uint64_t) HMApplySvmNstGstTscOffset(PVMCPU pVCpu, uint64_t uTicks)
212{
213 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
214 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); RT_NOREF(pCtx);
215 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
216 Assert(pVmcbNstGstCache->fCacheValid);
217 return uTicks + pVmcbNstGstCache->u64TSCOffset;
218}
219
220
221/**
222 * Interface used by IEM to handle patched TPR accesses.
223 *
224 * @returns VBox status code
225 * @retval VINF_SUCCESS if hypercall was handled, RIP + RFLAGS all dealt with.
226 * @retval VERR_NOT_FOUND if hypercall was _not_ handled.
227 * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE on IPE.
228 *
229 * @param pVCpu The cross context virtual CPU structure.
230 */
231VMM_INT_DECL(int) HMHCMaybeMovTprSvmHypercall(PVMCPU pVCpu)
232{
233 PVM pVM = pVCpu->CTX_SUFF(pVM);
234 if (pVM->hm.s.fTprPatchingAllowed)
235 {
236 int rc = hmEmulateSvmMovTpr(pVCpu);
237 if (RT_SUCCESS(rc))
238 return VINF_SUCCESS;
239 return rc;
240 }
241 return VERR_NOT_FOUND;
242}
243
244
245/**
246 * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,
247 * incorrect code bytes may be fetched after a world-switch".
248 *
249 * @param pu32Family Where to store the CPU family (can be NULL).
250 * @param pu32Model Where to store the CPU model (can be NULL).
251 * @param pu32Stepping Where to store the CPU stepping (can be NULL).
252 * @returns true if the erratum applies, false otherwise.
253 */
254VMM_INT_DECL(int) HMIsSubjectToSvmErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
255{
256 /*
257 * Erratum 170 which requires a forced TLB flush for each world switch:
258 * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".
259 *
260 * All BH-G1/2 and DH-G1/2 models include a fix:
261 * Athlon X2: 0x6b 1/2
262 * 0x68 1/2
263 * Athlon 64: 0x7f 1
264 * 0x6f 2
265 * Sempron: 0x7f 1/2
266 * 0x6f 2
267 * 0x6c 2
268 * 0x7c 2
269 * Turion 64: 0x68 2
270 */
271 uint32_t u32Dummy;
272 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
273 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
274 u32BaseFamily = (u32Version >> 8) & 0xf;
275 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
276 u32Model = ((u32Version >> 4) & 0xf);
277 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
278 u32Stepping = u32Version & 0xf;
279
280 bool fErratumApplies = false;
281 if ( u32Family == 0xf
282 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
283 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
284 {
285 fErratumApplies = true;
286 }
287
288 if (pu32Family)
289 *pu32Family = u32Family;
290 if (pu32Model)
291 *pu32Model = u32Model;
292 if (pu32Stepping)
293 *pu32Stepping = u32Stepping;
294
295 return fErratumApplies;
296}
297
298#endif /* !IN_RC */
299
300/**
301 * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
302 *
303 * @returns VBox status code.
304 * @param idMsr The MSR being requested.
305 * @param pbOffMsrpm Where to store the byte offset in the MSR permission
306 * bitmap for @a idMsr.
307 * @param puMsrpmBit Where to store the bit offset starting at the byte
308 * returned in @a pbOffMsrpm.
309 */
310VMM_INT_DECL(int) HMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
311{
312 Assert(pbOffMsrpm);
313 Assert(puMsrpmBit);
314
315 /*
316 * MSRPM Layout:
317 * Byte offset MSR range
318 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
319 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
320 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
321 * 0x1800 - 0x1fff Reserved
322 *
323 * Each MSR is represented by 2 permission bits (read and write).
324 */
325 if (idMsr <= 0x00001fff)
326 {
327 /* Pentium-compatible MSRs. */
328 uint32_t const bitoffMsr = idMsr << 1;
329 *pbOffMsrpm = bitoffMsr >> 3;
330 *puMsrpmBit = bitoffMsr & 7;
331 return VINF_SUCCESS;
332 }
333
334 if ( idMsr >= 0xc0000000
335 && idMsr <= 0xc0001fff)
336 {
337 /* AMD Sixth Generation x86 Processor MSRs. */
338 uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
339 *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
340 *puMsrpmBit = bitoffMsr & 7;
341 return VINF_SUCCESS;
342 }
343
344 if ( idMsr >= 0xc0010000
345 && idMsr <= 0xc0011fff)
346 {
347 /* AMD Seventh and Eighth Generation Processor MSRs. */
348 uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
349 *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
350 *puMsrpmBit = bitoffMsr & 7;
351 return VINF_SUCCESS;
352 }
353
354 *pbOffMsrpm = 0;
355 *puMsrpmBit = 0;
356 return VERR_OUT_OF_RANGE;
357}
358
359
360/**
361 * Determines whether an IOIO intercept is active for the nested-guest or not.
362 *
363 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
364 * @param u16Port The IO port being accessed.
365 * @param enmIoType The type of IO access.
366 * @param cbReg The IO operand size in bytes.
367 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
368 * @param iEffSeg The effective segment number.
369 * @param fRep Whether this is a repeating IO instruction (REP prefix).
370 * @param fStrIo Whether this is a string IO instruction.
371 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
372 * Optional, can be NULL.
373 */
374VMM_INT_DECL(bool) HMIsSvmIoInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
375 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
376 PSVMIOIOEXITINFO pIoExitInfo)
377{
378 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
379 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
380
381 /*
382 * The IOPM layout:
383 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
384 * two 4K pages.
385 *
386 * For IO instructions that access more than a single byte, the permission bits
387 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
388 *
389 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
390 * we need 3 extra bits beyond the second 4K page.
391 */
392 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
393
394 uint16_t const offIopm = u16Port >> 3;
395 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
396 uint8_t const cShift = u16Port - (offIopm << 3);
397 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
398
399 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
400 Assert(pbIopm);
401 pbIopm += offIopm;
402 uint16_t const u16Iopm = *(uint16_t *)pbIopm;
403 if (u16Iopm & fIopmMask)
404 {
405 if (pIoExitInfo)
406 {
407 static const uint32_t s_auIoOpSize[] =
408 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
409
410 static const uint32_t s_auIoAddrSize[] =
411 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
412
413 pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
414 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
415 pIoExitInfo->n.u1Str = fStrIo;
416 pIoExitInfo->n.u1Rep = fRep;
417 pIoExitInfo->n.u3Seg = iEffSeg & 7;
418 pIoExitInfo->n.u1Type = enmIoType;
419 pIoExitInfo->n.u16Port = u16Port;
420 }
421 return true;
422 }
423
424 /** @todo remove later (for debugging as VirtualBox always traps all IO
425 * intercepts). */
426 AssertMsgFailed(("CPUMSvmIsIOInterceptActive: We expect an IO intercept here!\n"));
427 return false;
428}
429
430
431/**
432 * Converts an SVM event type to a TRPM event type.
433 *
434 * @returns The TRPM event type.
435 * @retval TRPM_32BIT_HACK if the specified type of event isn't among the set
436 * of recognized trap types.
437 *
438 * @param pEvent Pointer to the SVM event.
439 */
440VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pEvent)
441{
442 uint8_t const uType = pEvent->n.u3Type;
443 switch (uType)
444 {
445 case SVM_EVENT_EXTERNAL_IRQ: return TRPM_HARDWARE_INT;
446 case SVM_EVENT_SOFTWARE_INT: return TRPM_SOFTWARE_INT;
447 case SVM_EVENT_EXCEPTION:
448 case SVM_EVENT_NMI: return TRPM_TRAP;
449 default:
450 break;
451 }
452 AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType));
453 return TRPM_32BIT_HACK;
454}
455
456
457/**
458 * Returns whether HM has cached the nested-guest VMCB.
459 *
460 * If the VMCB is cached by HM, it means HM may have potentially modified the
461 * VMCB for execution using hardware-assisted SVM.
462 *
463 * @returns true if HM has cached the nested-guest VMCB, false otherwise.
464 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
465 */
466VMM_INT_DECL(bool) HMHasGuestSvmVmcbCached(PVMCPU pVCpu)
467{
468 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
469 return pVmcbNstGstCache->fCacheValid;
470}
471
472
473/**
474 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
475 * active.
476 *
477 * @returns @c true if in intercept is set, @c false otherwise.
478 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
479 * @param fIntercept The SVM control/instruction intercept, see
480 * SVM_CTRL_INTERCEPT_*.
481 */
482VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, uint64_t fIntercept)
483{
484 Assert(HMHasGuestSvmVmcbCached(pVCpu));
485 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
486 return RT_BOOL(pVmcbNstGstCache->u64InterceptCtrl & fIntercept);
487}
488
489
490/**
491 * Checks if the nested-guest VMCB has the specified CR read intercept active.
492 *
493 * @returns @c true if in intercept is set, @c false otherwise.
494 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
495 * @param uCr The CR register number (0 to 15).
496 */
497VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr)
498{
499 Assert(uCr < 16);
500 Assert(HMHasGuestSvmVmcbCached(pVCpu));
501 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
502 return RT_BOOL(pVmcbNstGstCache->u16InterceptRdCRx & (1 << uCr));
503}
504
505
506/**
507 * Checks if the nested-guest VMCB has the specified CR write intercept active.
508 *
509 * @returns @c true if in intercept is set, @c false otherwise.
510 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
511 * @param uCr The CR register number (0 to 15).
512 */
513VMM_INT_DECL(bool) HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr)
514{
515 Assert(uCr < 16);
516 Assert(HMHasGuestSvmVmcbCached(pVCpu));
517 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
518 return RT_BOOL(pVmcbNstGstCache->u16InterceptWrCRx & (1 << uCr));
519}
520
521
522/**
523 * Checks if the nested-guest VMCB has the specified DR read intercept active.
524 *
525 * @returns @c true if in intercept is set, @c false otherwise.
526 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
527 * @param uDr The DR register number (0 to 15).
528 */
529VMM_INT_DECL(bool) HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr)
530{
531 Assert(uDr < 16);
532 Assert(HMHasGuestSvmVmcbCached(pVCpu));
533 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
534 return RT_BOOL(pVmcbNstGstCache->u16InterceptRdDRx & (1 << uDr));
535}
536
537
538/**
539 * Checks if the nested-guest VMCB has the specified DR write intercept active.
540 *
541 * @returns @c true if in intercept is set, @c false otherwise.
542 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
543 * @param uDr The DR register number (0 to 15).
544 */
545VMM_INT_DECL(bool) HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr)
546{
547 Assert(uDr < 16);
548 Assert(HMHasGuestSvmVmcbCached(pVCpu));
549 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
550 return RT_BOOL(pVmcbNstGstCache->u16InterceptWrDRx & (1 << uDr));
551}
552
553
554/**
555 * Checks if the nested-guest VMCB has the specified exception intercept active.
556 *
557 * @returns true if in intercept is active, false otherwise.
558 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
559 * @param uVector The exception / interrupt vector.
560 */
561VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, uint8_t uVector)
562{
563 Assert(uVector < 32);
564 Assert(HMHasGuestSvmVmcbCached(pVCpu));
565 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
566 return RT_BOOL(pVmcbNstGstCache->u32InterceptXcpt & (1 << uVector));
567}
568
569
570/**
571 * Checks if the nested-guest VMCB has virtual-interrupts masking enabled.
572 *
573 * @returns true if virtual-interrupts are masked, @c false otherwise.
574 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
575 */
576VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu)
577{
578 Assert(HMHasGuestSvmVmcbCached(pVCpu));
579 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
580 return pVmcbNstGstCache->fVIntrMasking;
581}
582
583
584/**
585 * Checks if the nested-guest VMCB has nested-paging enabled.
586 *
587 * @returns true if nested-paging is enabled, @c false otherwise.
588 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
589 */
590VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu)
591{
592 Assert(HMHasGuestSvmVmcbCached(pVCpu));
593 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
594 return pVmcbNstGstCache->fNestedPaging;
595}
596
597
598/**
599 * Returns the nested-guest VMCB pause-filter count.
600 *
601 * @returns The pause-filter count.
602 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
603 */
604VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu)
605{
606 Assert(HMHasGuestSvmVmcbCached(pVCpu));
607 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
608 return pVmcbNstGstCache->u16PauseFilterCount;
609}
610
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette