VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMMTests.cpp@ 62869

Last change on this file since 62869 was 62647, checked in by vboxsync, 8 years ago

VMMR3: warnings

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 36.9 KB
Line 
1/* $Id: VMMTests.cpp 62647 2016-07-28 22:02:27Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core, Tests.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18//#define NO_SUPCALLR0VMM
19
20
21/*********************************************************************************************************************************
22* Header Files *
23*********************************************************************************************************************************/
24#define LOG_GROUP LOG_GROUP_VMM
25#include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP */
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/cpum.h>
29#include <VBox/dbg.h>
30#include <VBox/vmm/hm.h>
31#include <VBox/vmm/mm.h>
32#include <VBox/vmm/trpm.h>
33#include <VBox/vmm/selm.h>
34#include "VMMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/err.h>
37#include <VBox/param.h>
38
39#include <iprt/assert.h>
40#include <iprt/asm.h>
41#include <iprt/time.h>
42#include <iprt/stream.h>
43#include <iprt/string.h>
44#include <iprt/x86.h>
45
46static void vmmR3TestClearStack(PVMCPU pVCpu)
47{
48 /* We leave the first 64 bytes of the stack alone because of strict
49 ring-0 long jump code uses it. */
50 memset(pVCpu->vmm.s.pbEMTStackR3 + 64, 0xaa, VMM_STACK_SIZE - 64);
51}
52
53
54#ifdef VBOX_WITH_RAW_MODE
55
56static int vmmR3ReportMsrRange(PVM pVM, uint32_t uMsr, uint64_t cMsrs, PRTSTREAM pReportStrm, uint32_t *pcMsrsFound)
57{
58 /*
59 * Preps.
60 */
61 RTRCPTR RCPtrEP;
62 int rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "VMMRCTestReadMsrs", &RCPtrEP);
63 AssertMsgRCReturn(rc, ("Failed to resolved VMMRC.rc::VMMRCEntry(), rc=%Rrc\n", rc), rc);
64
65 uint32_t const cMsrsPerCall = 16384;
66 uint32_t cbResults = cMsrsPerCall * sizeof(VMMTESTMSRENTRY);
67 PVMMTESTMSRENTRY paResults;
68 rc = MMHyperAlloc(pVM, cbResults, 0, MM_TAG_VMM, (void **)&paResults);
69 AssertMsgRCReturn(rc, ("Error allocating %#x bytes off the hyper heap: %Rrc\n", cbResults, rc), rc);
70 /*
71 * The loop.
72 */
73 RTRCPTR RCPtrResults = MMHyperR3ToRC(pVM, paResults);
74 uint32_t cMsrsFound = 0;
75 uint32_t uLastMsr = uMsr;
76 uint64_t uNsTsStart = RTTimeNanoTS();
77
78 for (;;)
79 {
80 if ( pReportStrm
81 && uMsr - uLastMsr > _64K
82 && (uMsr & (_4M - 1)) == 0)
83 {
84 if (uMsr - uLastMsr < 16U*_1M)
85 RTStrmFlush(pReportStrm);
86 RTPrintf("... %#010x [%u ns/msr] ...\n", uMsr, (RTTimeNanoTS() - uNsTsStart) / uMsr);
87 }
88
89 /*RT_BZERO(paResults, cbResults);*/
90 uint32_t const cBatch = RT_MIN(cMsrsPerCall, cMsrs);
91 rc = VMMR3CallRC(pVM, RCPtrEP, 4, pVM->pVMRC, uMsr, cBatch, RCPtrResults);
92 if (RT_FAILURE(rc))
93 {
94 RTPrintf("VMM: VMMR3CallRC failed rc=%Rrc, uMsr=%#x\n", rc, uMsr);
95 break;
96 }
97
98 for (uint32_t i = 0; i < cBatch; i++)
99 if (paResults[i].uMsr != UINT64_MAX)
100 {
101 if (paResults[i].uValue == 0)
102 {
103 if (pReportStrm)
104 RTStrmPrintf(pReportStrm,
105 " MVO(%#010llx, \"MSR\", UINT64_C(%#018llx)),\n", paResults[i].uMsr, paResults[i].uValue);
106 RTPrintf("%#010llx = 0\n", paResults[i].uMsr);
107 }
108 else
109 {
110 if (pReportStrm)
111 RTStrmPrintf(pReportStrm,
112 " MVO(%#010llx, \"MSR\", UINT64_C(%#018llx)),\n", paResults[i].uMsr, paResults[i].uValue);
113 RTPrintf("%#010llx = %#010x`%08x\n", paResults[i].uMsr,
114 (uint32_t)(paResults[i].uValue >> 32), (uint32_t)paResults[i].uValue);
115 }
116 cMsrsFound++;
117 uLastMsr = paResults[i].uMsr;
118 }
119
120 /* Advance. */
121 if (cMsrs <= cMsrsPerCall)
122 break;
123 cMsrs -= cMsrsPerCall;
124 uMsr += cMsrsPerCall;
125 }
126
127 *pcMsrsFound += cMsrsFound;
128 MMHyperFree(pVM, paResults);
129 return rc;
130}
131
132
133/**
134 * Produces a quick report of MSRs.
135 *
136 * @returns VBox status code.
137 * @param pVM The cross context VM structure.
138 * @param pReportStrm Pointer to the report output stream. Optional.
139 * @param fWithCpuId Whether CPUID should be included.
140 */
141static int vmmR3DoMsrQuickReport(PVM pVM, PRTSTREAM pReportStrm, bool fWithCpuId)
142{
143 uint64_t uTsStart = RTTimeNanoTS();
144 RTPrintf("=== MSR Quick Report Start ===\n");
145 RTStrmFlush(g_pStdOut);
146 if (fWithCpuId)
147 {
148 DBGFR3InfoStdErr(pVM->pUVM, "cpuid", "verbose");
149 RTPrintf("\n");
150 }
151 if (pReportStrm)
152 RTStrmPrintf(pReportStrm, "\n\n{\n");
153
154 static struct { uint32_t uFirst, cMsrs; } const s_aRanges[] =
155 {
156 { 0x00000000, 0x00042000 },
157 { 0x10000000, 0x00001000 },
158 { 0x20000000, 0x00001000 },
159 { 0x40000000, 0x00012000 },
160 { 0x80000000, 0x00012000 },
161// Need 0xc0000000..0xc001106f (at least), but trouble on solaris w/ 10h and 0fh family cpus:
162// { 0xc0000000, 0x00022000 },
163 { 0xc0000000, 0x00010000 },
164 { 0xc0010000, 0x00001040 },
165 { 0xc0011040, 0x00004040 }, /* should cause trouble... */
166 };
167 uint32_t cMsrsFound = 0;
168 int rc = VINF_SUCCESS;
169 for (unsigned i = 0; i < RT_ELEMENTS(s_aRanges) && RT_SUCCESS(rc); i++)
170 {
171//if (i >= 3)
172//{
173//RTStrmFlush(g_pStdOut);
174//RTThreadSleep(40);
175//}
176 rc = vmmR3ReportMsrRange(pVM, s_aRanges[i].uFirst, s_aRanges[i].cMsrs, pReportStrm, &cMsrsFound);
177 }
178
179 if (pReportStrm)
180 RTStrmPrintf(pReportStrm, "}; /* %u (%#x) MSRs; rc=%Rrc */\n", cMsrsFound, cMsrsFound, rc);
181 RTPrintf("Total %u (%#x) MSRs\n", cMsrsFound, cMsrsFound);
182 RTPrintf("=== MSR Quick Report End (rc=%Rrc, %'llu ns) ===\n", rc, RTTimeNanoTS() - uTsStart);
183 return rc;
184}
185
186
187/**
188 * Performs a testcase.
189 *
190 * @returns return value from the test.
191 * @param pVM The cross context VM structure.
192 * @param enmTestcase The testcase operation to perform.
193 * @param uVariation The testcase variation id.
194 */
195static int vmmR3DoGCTest(PVM pVM, VMMRCOPERATION enmTestcase, unsigned uVariation)
196{
197 PVMCPU pVCpu = &pVM->aCpus[0];
198
199 RTRCPTR RCPtrEP;
200 int rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "VMMRCEntry", &RCPtrEP);
201 if (RT_FAILURE(rc))
202 return rc;
203
204 Log(("vmmR3DoGCTest: %d %#x\n", enmTestcase, uVariation));
205 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
206 vmmR3TestClearStack(pVCpu);
207 CPUMPushHyper(pVCpu, uVariation);
208 CPUMPushHyper(pVCpu, enmTestcase);
209 CPUMPushHyper(pVCpu, pVM->pVMRC);
210 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
211 CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
212 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
213 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
214
215#if 1
216 /* flush the raw-mode logs. */
217# ifdef LOG_ENABLED
218 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
219 if ( pLogger
220 && pLogger->offScratch > 0)
221 RTLogFlushRC(NULL, pLogger);
222# endif
223# ifdef VBOX_WITH_RC_RELEASE_LOGGING
224 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
225 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
226 RTLogFlushRC(RTLogRelGetDefaultInstance(), pRelLogger);
227# endif
228#endif
229
230 Log(("vmmR3DoGCTest: rc=%Rrc iLastGZRc=%Rrc\n", rc, pVCpu->vmm.s.iLastGZRc));
231 if (RT_LIKELY(rc == VINF_SUCCESS))
232 rc = pVCpu->vmm.s.iLastGZRc;
233 return rc;
234}
235
236
237/**
238 * Performs a trap test.
239 *
240 * @returns Return value from the trap test.
241 * @param pVM The cross context VM structure.
242 * @param u8Trap The trap number to test.
243 * @param uVariation The testcase variation.
244 * @param rcExpect The expected result.
245 * @param u32Eax The expected eax value.
246 * @param pszFaultEIP The fault address. Pass NULL if this isn't available or doesn't apply.
247 * @param pszDesc The test description.
248 */
249static int vmmR3DoTrapTest(PVM pVM, uint8_t u8Trap, unsigned uVariation, int rcExpect, uint32_t u32Eax, const char *pszFaultEIP, const char *pszDesc)
250{
251 PVMCPU pVCpu = &pVM->aCpus[0];
252
253 RTPrintf("VMM: testing 0%x / %d - %s\n", u8Trap, uVariation, pszDesc);
254
255 RTRCPTR RCPtrEP;
256 int rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "VMMRCEntry", &RCPtrEP);
257 if (RT_FAILURE(rc))
258 return rc;
259
260 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
261 vmmR3TestClearStack(pVCpu);
262 CPUMPushHyper(pVCpu, uVariation);
263 CPUMPushHyper(pVCpu, u8Trap + VMMRC_DO_TESTCASE_TRAP_FIRST);
264 CPUMPushHyper(pVCpu, pVM->pVMRC);
265 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
266 CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
267 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
268 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
269 if (RT_LIKELY(rc == VINF_SUCCESS))
270 rc = pVCpu->vmm.s.iLastGZRc;
271 bool fDump = false;
272 if (rc != rcExpect)
273 {
274 RTPrintf("VMM: FAILURE - rc=%Rrc expected %Rrc\n", rc, rcExpect);
275 if (rc != VERR_NOT_IMPLEMENTED)
276 fDump = true;
277 }
278 else if ( rcExpect != VINF_SUCCESS
279 && u8Trap != 8 /* double fault doesn't dare set TrapNo. */
280 && u8Trap != 3 /* guest only, we're not in guest. */
281 && u8Trap != 1 /* guest only, we're not in guest. */
282 && u8Trap != TRPMGetTrapNo(pVCpu))
283 {
284 RTPrintf("VMM: FAILURE - Trap %#x expected %#x\n", TRPMGetTrapNo(pVCpu), u8Trap);
285 fDump = true;
286 }
287 else if (pszFaultEIP)
288 {
289 RTRCPTR RCPtrFault;
290 int rc2 = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, pszFaultEIP, &RCPtrFault);
291 if (RT_FAILURE(rc2))
292 RTPrintf("VMM: FAILURE - Failed to resolve symbol '%s', %Rrc!\n", pszFaultEIP, rc);
293 else if (RCPtrFault != CPUMGetHyperEIP(pVCpu))
294 {
295 RTPrintf("VMM: FAILURE - EIP=%08RX32 expected %RRv (%s)\n", CPUMGetHyperEIP(pVCpu), RCPtrFault, pszFaultEIP);
296 fDump = true;
297 }
298 }
299 else if (rcExpect != VINF_SUCCESS)
300 {
301 if (CPUMGetHyperSS(pVCpu) == SELMGetHyperDS(pVM))
302 RTPrintf("VMM: FAILURE - ss=%x expected %x\n", CPUMGetHyperSS(pVCpu), SELMGetHyperDS(pVM));
303 if (CPUMGetHyperES(pVCpu) == SELMGetHyperDS(pVM))
304 RTPrintf("VMM: FAILURE - es=%x expected %x\n", CPUMGetHyperES(pVCpu), SELMGetHyperDS(pVM));
305 if (CPUMGetHyperDS(pVCpu) == SELMGetHyperDS(pVM))
306 RTPrintf("VMM: FAILURE - ds=%x expected %x\n", CPUMGetHyperDS(pVCpu), SELMGetHyperDS(pVM));
307 if (CPUMGetHyperFS(pVCpu) == SELMGetHyperDS(pVM))
308 RTPrintf("VMM: FAILURE - fs=%x expected %x\n", CPUMGetHyperFS(pVCpu), SELMGetHyperDS(pVM));
309 if (CPUMGetHyperGS(pVCpu) == SELMGetHyperDS(pVM))
310 RTPrintf("VMM: FAILURE - gs=%x expected %x\n", CPUMGetHyperGS(pVCpu), SELMGetHyperDS(pVM));
311 if (CPUMGetHyperEDI(pVCpu) == 0x01234567)
312 RTPrintf("VMM: FAILURE - edi=%x expected %x\n", CPUMGetHyperEDI(pVCpu), 0x01234567);
313 if (CPUMGetHyperESI(pVCpu) == 0x42000042)
314 RTPrintf("VMM: FAILURE - esi=%x expected %x\n", CPUMGetHyperESI(pVCpu), 0x42000042);
315 if (CPUMGetHyperEBP(pVCpu) == 0xffeeddcc)
316 RTPrintf("VMM: FAILURE - ebp=%x expected %x\n", CPUMGetHyperEBP(pVCpu), 0xffeeddcc);
317 if (CPUMGetHyperEBX(pVCpu) == 0x89abcdef)
318 RTPrintf("VMM: FAILURE - ebx=%x expected %x\n", CPUMGetHyperEBX(pVCpu), 0x89abcdef);
319 if (CPUMGetHyperECX(pVCpu) == 0xffffaaaa)
320 RTPrintf("VMM: FAILURE - ecx=%x expected %x\n", CPUMGetHyperECX(pVCpu), 0xffffaaaa);
321 if (CPUMGetHyperEDX(pVCpu) == 0x77778888)
322 RTPrintf("VMM: FAILURE - edx=%x expected %x\n", CPUMGetHyperEDX(pVCpu), 0x77778888);
323 if (CPUMGetHyperEAX(pVCpu) == u32Eax)
324 RTPrintf("VMM: FAILURE - eax=%x expected %x\n", CPUMGetHyperEAX(pVCpu), u32Eax);
325 }
326 if (fDump)
327 VMMR3FatalDump(pVM, pVCpu, rc);
328 return rc;
329}
330
331#endif /* VBOX_WITH_RAW_MODE */
332
333
334/* execute the switch. */
335VMMR3DECL(int) VMMDoTest(PVM pVM)
336{
337 int rc = VINF_SUCCESS;
338
339#ifdef VBOX_WITH_RAW_MODE
340 PVMCPU pVCpu = &pVM->aCpus[0];
341 PUVM pUVM = pVM->pUVM;
342
343# ifdef NO_SUPCALLR0VMM
344 RTPrintf("NO_SUPCALLR0VMM\n");
345 return rc;
346# endif
347
348 /*
349 * Setup stack for calling VMMRCEntry().
350 */
351 RTRCPTR RCPtrEP;
352 rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "VMMRCEntry", &RCPtrEP);
353 if (RT_SUCCESS(rc))
354 {
355 RTPrintf("VMM: VMMRCEntry=%RRv\n", RCPtrEP);
356
357 /*
358 * Test various crashes which we must be able to recover from.
359 */
360 vmmR3DoTrapTest(pVM, 0x3, 0, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3");
361 vmmR3DoTrapTest(pVM, 0x3, 1, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3 WP");
362
363# if 0//defined(DEBUG_bird) /* guess most people would like to skip these since they write to com1. */
364 vmmR3DoTrapTest(pVM, 0x8, 0, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG]");
365 SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */
366 bool f;
367 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "DoubleFault", &f);
368# if !defined(DEBUG_bird)
369 if (RT_SUCCESS(rc) && f)
370# endif
371 {
372 /* see triple fault warnings in SELM and VMMRC.cpp. */
373 vmmR3DoTrapTest(pVM, 0x8, 1, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG] WP");
374 SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */
375 }
376# endif
377
378 vmmR3DoTrapTest(pVM, 0xd, 0, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP");
379 ///@todo find a better \#GP case, on intel ltr will \#PF (busy update?) and not \#GP.
380 //vmmR3DoTrapTest(pVM, 0xd, 1, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP WP");
381
382 vmmR3DoTrapTest(pVM, 0xe, 0, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL)");
383 vmmR3DoTrapTest(pVM, 0xe, 1, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL) WP");
384 vmmR3DoTrapTest(pVM, 0xe, 2, VINF_SUCCESS, 0x00000000, NULL, "#PF w/Tmp Handler");
385 /* This test is no longer relevant as fs and gs are loaded with NULL
386 selectors and we will always return to HC if a #GP occurs while
387 returning to guest code.
388 vmmR3DoTrapTest(pVM, 0xe, 4, VINF_SUCCESS, 0x00000000, NULL, "#PF w/Tmp Handler and bad fs");
389 */
390
391 /*
392 * Set a debug register and perform a context switch.
393 */
394 rc = vmmR3DoGCTest(pVM, VMMRC_DO_TESTCASE_NOP, 0);
395 if (rc != VINF_SUCCESS)
396 {
397 RTPrintf("VMM: Nop test failed, rc=%Rrc not VINF_SUCCESS\n", rc);
398 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
399 }
400
401 /* a harmless breakpoint */
402 RTPrintf("VMM: testing hardware bp at 0x10000 (not hit)\n");
403 DBGFADDRESS Addr;
404 DBGFR3AddrFromFlat(pUVM, &Addr, 0x10000);
405 RTUINT iBp0;
406 rc = DBGFR3BpSetReg(pUVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp0);
407 AssertReleaseRC(rc);
408 rc = vmmR3DoGCTest(pVM, VMMRC_DO_TESTCASE_NOP, 0);
409 if (rc != VINF_SUCCESS)
410 {
411 RTPrintf("VMM: DR0=0x10000 test failed with rc=%Rrc!\n", rc);
412 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
413 }
414
415 /* a bad one at VMMRCEntry */
416 RTPrintf("VMM: testing hardware bp at VMMRCEntry (hit)\n");
417 DBGFR3AddrFromFlat(pUVM, &Addr, RCPtrEP);
418 RTUINT iBp1;
419 rc = DBGFR3BpSetReg(pUVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp1);
420 AssertReleaseRC(rc);
421 rc = vmmR3DoGCTest(pVM, VMMRC_DO_TESTCASE_NOP, 0);
422 if (rc != VINF_EM_DBG_HYPER_BREAKPOINT)
423 {
424 RTPrintf("VMM: DR1=VMMRCEntry test failed with rc=%Rrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc);
425 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
426 }
427
428 /* resume the breakpoint */
429 RTPrintf("VMM: resuming hyper after breakpoint\n");
430 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_RF);
431 rc = VMMR3ResumeHyper(pVM, pVCpu);
432 if (rc != VINF_SUCCESS)
433 {
434 RTPrintf("VMM: failed to resume on hyper breakpoint, rc=%Rrc = KNOWN BUG\n", rc); /** @todo fix VMMR3ResumeHyper */
435 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
436 }
437
438 /* engage the breakpoint again and try single stepping. */
439 RTPrintf("VMM: testing hardware bp at VMMRCEntry + stepping\n");
440 rc = vmmR3DoGCTest(pVM, VMMRC_DO_TESTCASE_NOP, 0);
441 if (rc != VINF_EM_DBG_HYPER_BREAKPOINT)
442 {
443 RTPrintf("VMM: DR1=VMMRCEntry test failed with rc=%Rrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc);
444 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
445 }
446
447 RTGCUINTREG OldPc = CPUMGetHyperEIP(pVCpu);
448 RTPrintf("%RGr=>", OldPc);
449 unsigned i;
450 for (i = 0; i < 8; i++)
451 {
452 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
453 rc = VMMR3ResumeHyper(pVM, pVCpu);
454 if (rc != VINF_EM_DBG_HYPER_STEPPED)
455 {
456 RTPrintf("\nVMM: failed to step on hyper breakpoint, rc=%Rrc\n", rc);
457 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
458 }
459 RTGCUINTREG Pc = CPUMGetHyperEIP(pVCpu);
460 RTPrintf("%RGr=>", Pc);
461 if (Pc == OldPc)
462 {
463 RTPrintf("\nVMM: step failed, PC: %RGr -> %RGr\n", OldPc, Pc);
464 return VERR_GENERAL_FAILURE;
465 }
466 OldPc = Pc;
467 }
468 RTPrintf("ok\n");
469
470 /* done, clear it */
471 if ( RT_FAILURE(DBGFR3BpClear(pUVM, iBp0))
472 || RT_FAILURE(DBGFR3BpClear(pUVM, iBp1)))
473 {
474 RTPrintf("VMM: Failed to clear breakpoints!\n");
475 return VERR_GENERAL_FAILURE;
476 }
477 rc = vmmR3DoGCTest(pVM, VMMRC_DO_TESTCASE_NOP, 0);
478 if (rc != VINF_SUCCESS)
479 {
480 RTPrintf("VMM: NOP failed, rc=%Rrc\n", rc);
481 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
482 }
483
484 /*
485 * Interrupt masking. Failure may indiate NMI watchdog activity.
486 */
487 RTPrintf("VMM: interrupt masking...\n"); RTStrmFlush(g_pStdOut); RTThreadSleep(250);
488 for (i = 0; i < 10000; i++)
489 {
490 uint64_t StartTick = ASMReadTSC();
491 rc = vmmR3DoGCTest(pVM, VMMRC_DO_TESTCASE_INTERRUPT_MASKING, 0);
492 if (rc != VINF_SUCCESS)
493 {
494 RTPrintf("VMM: Interrupt masking failed: rc=%Rrc\n", rc);
495 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
496 }
497 uint64_t Ticks = ASMReadTSC() - StartTick;
498 if (Ticks < (SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage) / 10000))
499 RTPrintf("Warning: Ticks=%RU64 (< %RU64)\n", Ticks, SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage) / 10000);
500 }
501
502 /*
503 * Interrupt forwarding.
504 */
505 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
506 CPUMPushHyper(pVCpu, 0);
507 CPUMPushHyper(pVCpu, VMMRC_DO_TESTCASE_HYPER_INTERRUPT);
508 CPUMPushHyper(pVCpu, pVM->pVMRC);
509 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
510 CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
511 Log(("trampoline=%x\n", pVM->vmm.s.pfnCallTrampolineRC));
512
513 /*
514 * Switch and do da thing.
515 */
516 RTPrintf("VMM: interrupt forwarding...\n"); RTStrmFlush(g_pStdOut); RTThreadSleep(250);
517 i = 0;
518 uint64_t tsBegin = RTTimeNanoTS();
519 uint64_t TickStart = ASMReadTSC();
520 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
521 do
522 {
523 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
524 if (RT_LIKELY(rc == VINF_SUCCESS))
525 rc = pVCpu->vmm.s.iLastGZRc;
526 if (RT_FAILURE(rc))
527 {
528 Log(("VMM: GC returned fatal %Rra in iteration %d\n", rc, i));
529 VMMR3FatalDump(pVM, pVCpu, rc);
530 return rc;
531 }
532 i++;
533 if (!(i % 32))
534 Log(("VMM: iteration %d, esi=%08x edi=%08x ebx=%08x\n",
535 i, CPUMGetHyperESI(pVCpu), CPUMGetHyperEDI(pVCpu), CPUMGetHyperEBX(pVCpu)));
536 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
537 uint64_t TickEnd = ASMReadTSC();
538 uint64_t tsEnd = RTTimeNanoTS();
539
540 uint64_t Elapsed = tsEnd - tsBegin;
541 uint64_t PerIteration = Elapsed / (uint64_t)i;
542 uint64_t cTicksElapsed = TickEnd - TickStart;
543 uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i;
544
545 RTPrintf("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n",
546 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration);
547 Log(("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n",
548 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration));
549
550 /*
551 * These forced actions are not necessary for the test and trigger breakpoints too.
552 */
553 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
554 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
555
556 /*
557 * Profile switching.
558 */
559 RTPrintf("VMM: profiling switcher...\n");
560 Log(("VMM: profiling switcher...\n"));
561 uint64_t TickMin = UINT64_MAX;
562 tsBegin = RTTimeNanoTS();
563 TickStart = ASMReadTSC();
564 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
565 for (i = 0; i < 1000000; i++)
566 {
567 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
568 CPUMPushHyper(pVCpu, 0);
569 CPUMPushHyper(pVCpu, VMMRC_DO_TESTCASE_NOP);
570 CPUMPushHyper(pVCpu, pVM->pVMRC);
571 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
572 CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
573
574 uint64_t TickThisStart = ASMReadTSC();
575 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
576 if (RT_LIKELY(rc == VINF_SUCCESS))
577 rc = pVCpu->vmm.s.iLastGZRc;
578 uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart;
579 if (RT_FAILURE(rc))
580 {
581 Log(("VMM: GC returned fatal %Rra in iteration %d\n", rc, i));
582 VMMR3FatalDump(pVM, pVCpu, rc);
583 return rc;
584 }
585 if (TickThisElapsed < TickMin)
586 TickMin = TickThisElapsed;
587 }
588 TickEnd = ASMReadTSC();
589 tsEnd = RTTimeNanoTS();
590
591 Elapsed = tsEnd - tsBegin;
592 PerIteration = Elapsed / (uint64_t)i;
593 cTicksElapsed = TickEnd - TickStart;
594 cTicksPerIteration = cTicksElapsed / (uint64_t)i;
595
596 RTPrintf("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
597 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin);
598 Log(("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
599 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin));
600
601 rc = VINF_SUCCESS;
602
603#if 0 /* drop this for now as it causes trouble on AMDs (Opteron 2384 and possibly others). */
604 /*
605 * A quick MSR report.
606 */
607 vmmR3DoMsrQuickReport(pVM, NULL, true);
608#endif
609 }
610 else
611 AssertMsgFailed(("Failed to resolved VMMRC.rc::VMMRCEntry(), rc=%Rrc\n", rc));
612#endif
613 return rc;
614}
615
616#define SYNC_SEL(pHyperCtx, reg) \
617 if (pHyperCtx->reg.Sel) \
618 { \
619 DBGFSELINFO selInfo; \
620 int rc2 = SELMR3GetShadowSelectorInfo(pVM, pHyperCtx->reg.Sel, &selInfo); \
621 AssertRC(rc2); \
622 \
623 pHyperCtx->reg.u64Base = selInfo.GCPtrBase; \
624 pHyperCtx->reg.u32Limit = selInfo.cbLimit; \
625 pHyperCtx->reg.Attr.n.u1Present = selInfo.u.Raw.Gen.u1Present; \
626 pHyperCtx->reg.Attr.n.u1DefBig = selInfo.u.Raw.Gen.u1DefBig; \
627 pHyperCtx->reg.Attr.n.u1Granularity = selInfo.u.Raw.Gen.u1Granularity; \
628 pHyperCtx->reg.Attr.n.u4Type = selInfo.u.Raw.Gen.u4Type; \
629 pHyperCtx->reg.Attr.n.u2Dpl = selInfo.u.Raw.Gen.u2Dpl; \
630 pHyperCtx->reg.Attr.n.u1DescType = selInfo.u.Raw.Gen.u1DescType; \
631 pHyperCtx->reg.Attr.n.u1Long = selInfo.u.Raw.Gen.u1Long; \
632 }
633
634/* execute the switch. */
635VMMR3DECL(int) VMMDoHmTest(PVM pVM)
636{
637 uint32_t i;
638 int rc;
639 PCPUMCTX pHyperCtx, pGuestCtx;
640 RTGCPHYS CR3Phys = 0x0; /* fake address */
641 PVMCPU pVCpu = &pVM->aCpus[0];
642
643 if (!HMIsEnabled(pVM))
644 {
645 RTPrintf("VMM: Hardware accelerated test not available!\n");
646 return VERR_ACCESS_DENIED;
647 }
648
649#ifdef VBOX_WITH_RAW_MODE
650 /*
651 * These forced actions are not necessary for the test and trigger breakpoints too.
652 */
653 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
654 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
655#endif
656
657 /* Enable mapping of the hypervisor into the shadow page table. */
658 uint32_t cb;
659 rc = PGMR3MappingsSize(pVM, &cb);
660 AssertRCReturn(rc, rc);
661
662 /* Pretend the mappings are now fixed; to force a refresh of the reserved PDEs. */
663 rc = PGMR3MappingsFix(pVM, MM_HYPER_AREA_ADDRESS, cb);
664 AssertRCReturn(rc, rc);
665
666 pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);
667
668 pHyperCtx->cr0 = X86_CR0_PE | X86_CR0_WP | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
669 pHyperCtx->cr4 = X86_CR4_PGE | X86_CR4_OSFXSR | X86_CR4_OSXMMEEXCPT;
670 PGMChangeMode(pVCpu, pHyperCtx->cr0, pHyperCtx->cr4, pHyperCtx->msrEFER);
671 PGMSyncCR3(pVCpu, pHyperCtx->cr0, CR3Phys, pHyperCtx->cr4, true);
672
673 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
674 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER);
675 VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
676 VM_FF_CLEAR(pVM, VM_FF_REQUEST);
677
678 /*
679 * Setup stack for calling VMMRCEntry().
680 */
681 RTRCPTR RCPtrEP;
682 rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "VMMRCEntry", &RCPtrEP);
683 if (RT_SUCCESS(rc))
684 {
685 RTPrintf("VMM: VMMRCEntry=%RRv\n", RCPtrEP);
686
687 pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);
688
689 /* Fill in hidden selector registers for the hypervisor state. */
690 SYNC_SEL(pHyperCtx, cs);
691 SYNC_SEL(pHyperCtx, ds);
692 SYNC_SEL(pHyperCtx, es);
693 SYNC_SEL(pHyperCtx, fs);
694 SYNC_SEL(pHyperCtx, gs);
695 SYNC_SEL(pHyperCtx, ss);
696 SYNC_SEL(pHyperCtx, tr);
697
698 /*
699 * Profile switching.
700 */
701 RTPrintf("VMM: profiling switcher...\n");
702 Log(("VMM: profiling switcher...\n"));
703 uint64_t TickMin = UINT64_MAX;
704 uint64_t tsBegin = RTTimeNanoTS();
705 uint64_t TickStart = ASMReadTSC();
706 for (i = 0; i < 1000000; i++)
707 {
708 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
709 CPUMPushHyper(pVCpu, 0);
710 CPUMPushHyper(pVCpu, VMMRC_DO_TESTCASE_HM_NOP);
711 CPUMPushHyper(pVCpu, pVM->pVMRC);
712 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
713 CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
714
715 pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);
716 pGuestCtx = CPUMQueryGuestCtxPtr(pVCpu);
717
718 /* Copy the hypervisor context to make sure we have a valid guest context. */
719 *pGuestCtx = *pHyperCtx;
720 pGuestCtx->cr3 = CR3Phys;
721
722 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
723 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER);
724 VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
725
726 uint64_t TickThisStart = ASMReadTSC();
727 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HM_RUN, 0);
728 uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart;
729 if (RT_FAILURE(rc))
730 {
731 Log(("VMM: R0 returned fatal %Rrc in iteration %d\n", rc, i));
732 VMMR3FatalDump(pVM, pVCpu, rc);
733 return rc;
734 }
735 if (TickThisElapsed < TickMin)
736 TickMin = TickThisElapsed;
737 }
738 uint64_t TickEnd = ASMReadTSC();
739 uint64_t tsEnd = RTTimeNanoTS();
740
741 uint64_t Elapsed = tsEnd - tsBegin;
742 uint64_t PerIteration = Elapsed / (uint64_t)i;
743 uint64_t cTicksElapsed = TickEnd - TickStart;
744 uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i;
745
746 RTPrintf("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
747 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin);
748 Log(("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
749 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin));
750
751 rc = VINF_SUCCESS;
752 }
753 else
754 AssertMsgFailed(("Failed to resolved VMMRC.rc::VMMRCEntry(), rc=%Rrc\n", rc));
755
756 return rc;
757}
758
759
760#ifdef VBOX_WITH_RAW_MODE
761
762/**
763 * Used by VMMDoBruteForceMsrs to dump the CPUID info of the host CPU as a
764 * prefix to the MSR report.
765 */
766static DECLCALLBACK(void) vmmDoPrintfVToStream(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list va)
767{
768 PRTSTREAM pOutStrm = ((PRTSTREAM *)pHlp)[-1];
769 RTStrmPrintfV(pOutStrm, pszFormat, va);
770}
771
772/**
773 * Used by VMMDoBruteForceMsrs to dump the CPUID info of the host CPU as a
774 * prefix to the MSR report.
775 */
776static DECLCALLBACK(void) vmmDoPrintfToStream(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
777{
778 va_list va;
779 va_start(va, pszFormat);
780 vmmDoPrintfVToStream(pHlp, pszFormat, va);
781 va_end(va);
782}
783
784#endif
785
786
787/**
788 * Uses raw-mode to query all possible MSRs on the real hardware.
789 *
790 * This generates a msr-report.txt file (appending, no overwriting) as well as
791 * writing the values and process to stdout.
792 *
793 * @returns VBox status code.
794 * @param pVM The cross context VM structure.
795 */
796VMMR3DECL(int) VMMDoBruteForceMsrs(PVM pVM)
797{
798#ifdef VBOX_WITH_RAW_MODE
799 PRTSTREAM pOutStrm;
800 int rc = RTStrmOpen("msr-report.txt", "a", &pOutStrm);
801 if (RT_SUCCESS(rc))
802 {
803 /* Header */
804 struct
805 {
806 PRTSTREAM pOutStrm;
807 DBGFINFOHLP Hlp;
808 } MyHlp = { pOutStrm, { vmmDoPrintfToStream, vmmDoPrintfVToStream } };
809 DBGFR3Info(pVM->pUVM, "cpuid", "verbose", &MyHlp.Hlp);
810 RTStrmPrintf(pOutStrm, "\n");
811
812 uint32_t cMsrsFound = 0;
813 vmmR3ReportMsrRange(pVM, 0, _4G, pOutStrm, &cMsrsFound);
814
815 RTStrmPrintf(pOutStrm, "Total %u (%#x) MSRs\n", cMsrsFound, cMsrsFound);
816 RTPrintf("Total %u (%#x) MSRs\n", cMsrsFound, cMsrsFound);
817
818 RTStrmClose(pOutStrm);
819 }
820 return rc;
821#else
822 return VERR_NOT_SUPPORTED;
823#endif
824}
825
826
827/**
828 * Uses raw-mode to query all known MSRS on the real hardware.
829 *
830 * This generates a known-msr-report.txt file (appending, no overwriting) as
831 * well as writing the values and process to stdout.
832 *
833 * @returns VBox status code.
834 * @param pVM The cross context VM structure.
835 */
836VMMR3DECL(int) VMMDoKnownMsrs(PVM pVM)
837{
838#ifdef VBOX_WITH_RAW_MODE
839 PRTSTREAM pOutStrm;
840 int rc = RTStrmOpen("known-msr-report.txt", "a", &pOutStrm);
841 if (RT_SUCCESS(rc))
842 {
843 vmmR3DoMsrQuickReport(pVM, pOutStrm, false);
844 RTStrmClose(pOutStrm);
845 }
846 return rc;
847#else
848 return VERR_NOT_SUPPORTED;
849#endif
850}
851
852
853/**
854 * MSR experimentation.
855 *
856 * @returns VBox status code.
857 * @param pVM The cross context VM structure.
858 */
859VMMR3DECL(int) VMMDoMsrExperiments(PVM pVM)
860{
861#ifdef VBOX_WITH_RAW_MODE
862 /*
863 * Preps.
864 */
865 RTRCPTR RCPtrEP;
866 int rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "VMMRCTestTestWriteMsr", &RCPtrEP);
867 AssertMsgRCReturn(rc, ("Failed to resolved VMMRC.rc::VMMRCEntry(), rc=%Rrc\n", rc), rc);
868
869 uint64_t *pauValues;
870 rc = MMHyperAlloc(pVM, 2 * sizeof(uint64_t), 0, MM_TAG_VMM, (void **)&pauValues);
871 AssertMsgRCReturn(rc, ("Error allocating %#x bytes off the hyper heap: %Rrc\n", 2 * sizeof(uint64_t), rc), rc);
872 RTRCPTR RCPtrValues = MMHyperR3ToRC(pVM, pauValues);
873
874 /*
875 * Do the experiments.
876 */
877 uint32_t uMsr = 0x00000277;
878 uint64_t uValue = UINT64_C(0x0007010600070106);
879#if 0
880 uValue &= ~(RT_BIT_64(17) | RT_BIT_64(16) | RT_BIT_64(15) | RT_BIT_64(14) | RT_BIT_64(13));
881 uValue |= RT_BIT_64(13);
882 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
883 RCPtrValues, RCPtrValues + sizeof(uint64_t));
884 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
885 uMsr, pauValues[0], uValue, pauValues[1], rc);
886#elif 1
887 const uint64_t uOrgValue = uValue;
888 uint32_t cChanges = 0;
889 for (int iBit = 63; iBit >= 58; iBit--)
890 {
891 uValue = uOrgValue & ~RT_BIT_64(iBit);
892 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
893 RCPtrValues, RCPtrValues + sizeof(uint64_t));
894 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nclear bit=%u -> %s\n",
895 uMsr, pauValues[0], uValue, pauValues[1], rc, iBit,
896 (pauValues[0] ^ pauValues[1]) & RT_BIT_64(iBit) ? "changed" : "unchanged");
897 cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]);
898
899 uValue = uOrgValue | RT_BIT_64(iBit);
900 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
901 RCPtrValues, RCPtrValues + sizeof(uint64_t));
902 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nset bit=%u -> %s\n",
903 uMsr, pauValues[0], uValue, pauValues[1], rc, iBit,
904 (pauValues[0] ^ pauValues[1]) & RT_BIT_64(iBit) ? "changed" : "unchanged");
905 cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]);
906 }
907 RTPrintf("%u change(s)\n", cChanges);
908#else
909 uint64_t fWriteable = 0;
910 for (uint32_t i = 0; i <= 63; i++)
911 {
912 uValue = RT_BIT_64(i);
913# if 0
914 if (uValue & (0x7))
915 continue;
916# endif
917 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
918 RCPtrValues, RCPtrValues + sizeof(uint64_t));
919 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
920 uMsr, pauValues[0], uValue, pauValues[1], rc);
921 if (RT_SUCCESS(rc))
922 fWriteable |= RT_BIT_64(i);
923 }
924
925 uValue = 0;
926 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
927 RCPtrValues, RCPtrValues + sizeof(uint64_t));
928 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
929 uMsr, pauValues[0], uValue, pauValues[1], rc);
930
931 uValue = UINT64_MAX;
932 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
933 RCPtrValues, RCPtrValues + sizeof(uint64_t));
934 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
935 uMsr, pauValues[0], uValue, pauValues[1], rc);
936
937 uValue = fWriteable;
938 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
939 RCPtrValues, RCPtrValues + sizeof(uint64_t));
940 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc [fWriteable]\n",
941 uMsr, pauValues[0], uValue, pauValues[1], rc);
942
943#endif
944
945 /*
946 * Cleanups.
947 */
948 MMHyperFree(pVM, pauValues);
949 return rc;
950#else
951 return VERR_NOT_SUPPORTED;
952#endif
953}
954
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette