VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGFMem.cpp@ 77807

Last change on this file since 77807 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 24.4 KB
Line 
1/* $Id: DBGFMem.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, Memory Methods.
4 */
5
6/*
7 * Copyright (C) 2007-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DBGF
23#include <VBox/vmm/dbgf.h>
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/hm.h>
27#include "DBGFInternal.h"
28#include <VBox/vmm/vm.h>
29#include <VBox/vmm/uvm.h>
30#include <VBox/err.h>
31#include <VBox/log.h>
32#include <VBox/vmm/mm.h>
33
34
35
36/**
37 * Scan guest memory for an exact byte string.
38 *
39 * @returns VBox status code.
40 * @param pUVM The user mode VM handle.
41 * @param idCpu The ID of the CPU context to search in.
42 * @param pAddress Where to store the mixed address.
43 * @param puAlign The alignment restriction imposed on the search result.
44 * @param pcbRange The number of bytes to scan. Passed as a pointer because
45 * it may be 64-bit.
46 * @param pabNeedle What to search for - exact search.
47 * @param cbNeedle Size of the search byte string.
48 * @param pHitAddress Where to put the address of the first hit.
49 */
50static DECLCALLBACK(int) dbgfR3MemScan(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, PCRTGCUINTPTR pcbRange,
51 RTGCUINTPTR *puAlign, const uint8_t *pabNeedle, size_t cbNeedle, PDBGFADDRESS pHitAddress)
52{
53 PVM pVM = pUVM->pVM;
54 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
55 Assert(idCpu == VMMGetCpuId(pVM));
56
57 /*
58 * Validate the input we use, PGM does the rest.
59 */
60 RTGCUINTPTR cbRange = *pcbRange;
61 if (!DBGFR3AddrIsValid(pUVM, pAddress))
62 return VERR_INVALID_POINTER;
63 if (!VALID_PTR(pHitAddress))
64 return VERR_INVALID_POINTER;
65 if (DBGFADDRESS_IS_HMA(pAddress))
66 return VERR_INVALID_POINTER;
67
68 /*
69 * Select DBGF worker by addressing mode.
70 */
71 int rc;
72 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
73 PGMMODE enmMode = PGMGetGuestMode(pVCpu);
74 if ( enmMode == PGMMODE_REAL
75 || enmMode == PGMMODE_PROTECTED
76 || DBGFADDRESS_IS_PHYS(pAddress)
77 )
78 {
79 RTGCPHYS GCPhysAlign = *puAlign;
80 if (GCPhysAlign != *puAlign)
81 return VERR_OUT_OF_RANGE;
82 RTGCPHYS PhysHit;
83 rc = PGMR3DbgScanPhysical(pVM, pAddress->FlatPtr, cbRange, GCPhysAlign, pabNeedle, cbNeedle, &PhysHit);
84 if (RT_SUCCESS(rc))
85 DBGFR3AddrFromPhys(pUVM, pHitAddress, PhysHit);
86 }
87 else
88 {
89#if GC_ARCH_BITS > 32
90 if ( ( pAddress->FlatPtr >= _4G
91 || pAddress->FlatPtr + cbRange > _4G)
92 && enmMode != PGMMODE_AMD64
93 && enmMode != PGMMODE_AMD64_NX)
94 return VERR_DBGF_MEM_NOT_FOUND;
95#endif
96 RTGCUINTPTR GCPtrHit;
97 rc = PGMR3DbgScanVirtual(pVM, pVCpu, pAddress->FlatPtr, cbRange, *puAlign, pabNeedle, cbNeedle, &GCPtrHit);
98 if (RT_SUCCESS(rc))
99 DBGFR3AddrFromFlat(pUVM, pHitAddress, GCPtrHit);
100 }
101
102 return rc;
103}
104
105
106/**
107 * Scan guest memory for an exact byte string.
108 *
109 * @returns VBox status codes:
110 * @retval VINF_SUCCESS and *pGCPtrHit on success.
111 * @retval VERR_DBGF_MEM_NOT_FOUND if not found.
112 * @retval VERR_INVALID_POINTER if any of the pointer arguments are invalid.
113 * @retval VERR_INVALID_ARGUMENT if any other arguments are invalid.
114 *
115 * @param pUVM The user mode VM handle.
116 * @param idCpu The ID of the CPU context to search in.
117 * @param pAddress Where to store the mixed address.
118 * @param cbRange The number of bytes to scan.
119 * @param uAlign The alignment restriction imposed on the result.
120 * Usually set to 1.
121 * @param pvNeedle What to search for - exact search.
122 * @param cbNeedle Size of the search byte string.
123 * @param pHitAddress Where to put the address of the first hit.
124 *
125 * @thread Any thread.
126 */
127VMMR3DECL(int) DBGFR3MemScan(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, RTGCUINTPTR cbRange, RTGCUINTPTR uAlign,
128 const void *pvNeedle, size_t cbNeedle, PDBGFADDRESS pHitAddress)
129{
130 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
131 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
132 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemScan, 8,
133 pUVM, idCpu, pAddress, &cbRange, &uAlign, pvNeedle, cbNeedle, pHitAddress);
134
135}
136
137
138/**
139 * Read guest memory.
140 *
141 * @returns VBox status code.
142 * @param pUVM The user mode VM handle.
143 * @param idCpu The ID of the CPU context to read memory from.
144 * @param pAddress Where to start reading.
145 * @param pvBuf Where to store the data we've read.
146 * @param cbRead The number of bytes to read.
147 */
148static DECLCALLBACK(int) dbgfR3MemRead(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void *pvBuf, size_t cbRead)
149{
150 PVM pVM = pUVM->pVM;
151 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
152 Assert(idCpu == VMMGetCpuId(pVM));
153
154 /*
155 * Validate the input we use, PGM does the rest.
156 */
157 if (!DBGFR3AddrIsValid(pUVM, pAddress))
158 return VERR_INVALID_POINTER;
159 if (!VALID_PTR(pvBuf))
160 return VERR_INVALID_POINTER;
161
162 /*
163 * HMA is special.
164 */
165 int rc;
166 if (DBGFADDRESS_IS_HMA(pAddress))
167 {
168 if (DBGFADDRESS_IS_PHYS(pAddress))
169 rc = VERR_INVALID_POINTER;
170 else
171 rc = MMR3HyperReadGCVirt(pVM, pvBuf, pAddress->FlatPtr, cbRead);
172 }
173 else
174 {
175 /*
176 * Select PGM worker by addressing mode.
177 */
178 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
179 PGMMODE enmMode = PGMGetGuestMode(pVCpu);
180 if ( enmMode == PGMMODE_REAL
181 || enmMode == PGMMODE_PROTECTED
182 || DBGFADDRESS_IS_PHYS(pAddress) )
183 rc = PGMPhysSimpleReadGCPhys(pVM, pvBuf, pAddress->FlatPtr, cbRead);
184 else
185 {
186#if GC_ARCH_BITS > 32
187 if ( ( pAddress->FlatPtr >= _4G
188 || pAddress->FlatPtr + cbRead > _4G)
189 && enmMode != PGMMODE_AMD64
190 && enmMode != PGMMODE_AMD64_NX)
191 return VERR_PAGE_TABLE_NOT_PRESENT;
192#endif
193 rc = PGMPhysSimpleReadGCPtr(pVCpu, pvBuf, pAddress->FlatPtr, cbRead);
194 }
195 }
196 return rc;
197}
198
199
200/**
201 * Read guest memory.
202 *
203 * @returns VBox status code.
204 *
205 * @param pUVM The user mode VM handle.
206 * @param idCpu The ID of the source CPU context (for the address).
207 * @param pAddress Where to start reading.
208 * @param pvBuf Where to store the data we've read.
209 * @param cbRead The number of bytes to read.
210 */
211VMMR3DECL(int) DBGFR3MemRead(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void *pvBuf, size_t cbRead)
212{
213 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
214 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
215
216 if ((pAddress->fFlags & DBGFADDRESS_FLAGS_TYPE_MASK) == DBGFADDRESS_FLAGS_RING0)
217 {
218 AssertCompile(sizeof(RTHCUINTPTR) <= sizeof(pAddress->FlatPtr));
219 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
220 return VMMR3ReadR0Stack(pUVM->pVM, idCpu, (RTHCUINTPTR)pAddress->FlatPtr, pvBuf, cbRead);
221 }
222 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemRead, 5, pUVM, idCpu, pAddress, pvBuf, cbRead);
223}
224
225
226/**
227 * Read a zero terminated string from guest memory.
228 *
229 * @returns VBox status code.
230 *
231 * @param pUVM The user mode VM handle.
232 * @param idCpu The ID of the source CPU context (for the address).
233 * @param pAddress Where to start reading.
234 * @param pszBuf Where to store the string.
235 * @param cchBuf The size of the buffer.
236 */
237static DECLCALLBACK(int) dbgfR3MemReadString(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, char *pszBuf, size_t cchBuf)
238{
239 /*
240 * Validate the input we use, PGM does the rest.
241 */
242 if (!DBGFR3AddrIsValid(pUVM, pAddress))
243 return VERR_INVALID_POINTER;
244 if (!VALID_PTR(pszBuf))
245 return VERR_INVALID_POINTER;
246
247 /*
248 * Let dbgfR3MemRead do the job.
249 */
250 int rc = dbgfR3MemRead(pUVM, idCpu, pAddress, pszBuf, cchBuf);
251
252 /*
253 * Make sure the result is terminated and that overflow is signaled.
254 * This may look a bit reckless with the rc but, it should be fine.
255 */
256 if (!RTStrEnd(pszBuf, cchBuf))
257 {
258 pszBuf[cchBuf - 1] = '\0';
259 rc = VINF_BUFFER_OVERFLOW;
260 }
261 /*
262 * Handle partial reads (not perfect).
263 */
264 else if (RT_FAILURE(rc))
265 {
266 if (pszBuf[0])
267 rc = VINF_SUCCESS;
268 }
269
270 return rc;
271}
272
273
274/**
275 * Read a zero terminated string from guest memory.
276 *
277 * @returns VBox status code.
278 *
279 * @param pUVM The user mode VM handle.
280 * @param idCpu The ID of the source CPU context (for the address).
281 * @param pAddress Where to start reading.
282 * @param pszBuf Where to store the string.
283 * @param cchBuf The size of the buffer.
284 */
285VMMR3DECL(int) DBGFR3MemReadString(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, char *pszBuf, size_t cchBuf)
286{
287 /*
288 * Validate and zero output.
289 */
290 if (!VALID_PTR(pszBuf))
291 return VERR_INVALID_POINTER;
292 if (cchBuf <= 0)
293 return VERR_INVALID_PARAMETER;
294 memset(pszBuf, 0, cchBuf);
295 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
296 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
297
298 /*
299 * Pass it on to the EMT.
300 */
301 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemReadString, 5, pUVM, idCpu, pAddress, pszBuf, cchBuf);
302}
303
304
305/**
306 * Writes guest memory.
307 *
308 * @returns VBox status code.
309 *
310 * @param pUVM The user mode VM handle.
311 * @param idCpu The ID of the target CPU context (for the address).
312 * @param pAddress Where to start writing.
313 * @param pvBuf The data to write.
314 * @param cbWrite The number of bytes to write.
315 */
316static DECLCALLBACK(int) dbgfR3MemWrite(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void const *pvBuf, size_t cbWrite)
317{
318 /*
319 * Validate the input we use, PGM does the rest.
320 */
321 if (!DBGFR3AddrIsValid(pUVM, pAddress))
322 return VERR_INVALID_POINTER;
323 if (!VALID_PTR(pvBuf))
324 return VERR_INVALID_POINTER;
325 PVM pVM = pUVM->pVM;
326 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
327
328 /*
329 * HMA is always special.
330 */
331 int rc;
332 if (DBGFADDRESS_IS_HMA(pAddress))
333 {
334 /** @todo write to HMA. */
335 rc = VERR_ACCESS_DENIED;
336 }
337 else
338 {
339 /*
340 * Select PGM function by addressing mode.
341 */
342 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
343 PGMMODE enmMode = PGMGetGuestMode(pVCpu);
344 if ( enmMode == PGMMODE_REAL
345 || enmMode == PGMMODE_PROTECTED
346 || DBGFADDRESS_IS_PHYS(pAddress) )
347 rc = PGMPhysSimpleWriteGCPhys(pVM, pAddress->FlatPtr, pvBuf, cbWrite);
348 else
349 {
350#if GC_ARCH_BITS > 32
351 if ( ( pAddress->FlatPtr >= _4G
352 || pAddress->FlatPtr + cbWrite > _4G)
353 && enmMode != PGMMODE_AMD64
354 && enmMode != PGMMODE_AMD64_NX)
355 return VERR_PAGE_TABLE_NOT_PRESENT;
356#endif
357 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pAddress->FlatPtr, pvBuf, cbWrite);
358 }
359 }
360 return rc;
361}
362
363
364/**
365 * Read guest memory.
366 *
367 * @returns VBox status code.
368 *
369 * @param pUVM The user mode VM handle.
370 * @param idCpu The ID of the target CPU context (for the address).
371 * @param pAddress Where to start writing.
372 * @param pvBuf The data to write.
373 * @param cbWrite The number of bytes to write.
374 */
375VMMR3DECL(int) DBGFR3MemWrite(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void const *pvBuf, size_t cbWrite)
376{
377 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
378 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
379 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemWrite, 5, pUVM, idCpu, pAddress, pvBuf, cbWrite);
380}
381
382
383/**
384 * Worker for DBGFR3SelQueryInfo that calls into SELM.
385 */
386static DECLCALLBACK(int) dbgfR3SelQueryInfo(PUVM pUVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo)
387{
388 PVM pVM = pUVM->pVM;
389 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
390
391 /*
392 * Make the query.
393 */
394 int rc;
395 if (!(fFlags & DBGFSELQI_FLAGS_DT_SHADOW))
396 {
397 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
398 VMCPU_ASSERT_EMT(pVCpu);
399 rc = SELMR3GetSelectorInfo(pVM, pVCpu, Sel, pSelInfo);
400
401 /*
402 * 64-bit mode HACKS for making data and stack selectors wide open when
403 * queried. This is voodoo magic.
404 */
405 if (fFlags & DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE)
406 {
407 /* Expand 64-bit data and stack selectors. The check is a bit bogus... */
408 if ( RT_SUCCESS(rc)
409 && (pSelInfo->fFlags & ( DBGFSELINFO_FLAGS_LONG_MODE | DBGFSELINFO_FLAGS_REAL_MODE | DBGFSELINFO_FLAGS_PROT_MODE
410 | DBGFSELINFO_FLAGS_GATE | DBGFSELINFO_FLAGS_HYPER
411 | DBGFSELINFO_FLAGS_INVALID | DBGFSELINFO_FLAGS_NOT_PRESENT))
412 == DBGFSELINFO_FLAGS_LONG_MODE
413 && pSelInfo->cbLimit != ~(RTGCPTR)0
414 && CPUMIsGuestIn64BitCode(pVCpu) )
415 {
416 pSelInfo->GCPtrBase = 0;
417 pSelInfo->cbLimit = ~(RTGCPTR)0;
418 }
419 else if ( Sel == 0
420 && CPUMIsGuestIn64BitCode(pVCpu))
421 {
422 pSelInfo->GCPtrBase = 0;
423 pSelInfo->cbLimit = ~(RTGCPTR)0;
424 pSelInfo->Sel = 0;
425 pSelInfo->SelGate = 0;
426 pSelInfo->fFlags = DBGFSELINFO_FLAGS_LONG_MODE;
427 pSelInfo->u.Raw64.Gen.u1Present = 1;
428 pSelInfo->u.Raw64.Gen.u1Long = 1;
429 pSelInfo->u.Raw64.Gen.u1DescType = 1;
430 rc = VINF_SUCCESS;
431 }
432 }
433 }
434 else
435 {
436 if (!VM_IS_RAW_MODE_ENABLED(pVM))
437 rc = VERR_INVALID_STATE;
438 else
439 rc = SELMR3GetShadowSelectorInfo(pVM, Sel, pSelInfo);
440 }
441 return rc;
442}
443
444
445/**
446 * Gets information about a selector.
447 *
448 * Intended for the debugger mostly and will prefer the guest
449 * descriptor tables over the shadow ones.
450 *
451 * @returns VBox status code, the following are the common ones.
452 * @retval VINF_SUCCESS on success.
453 * @retval VERR_INVALID_SELECTOR if the selector isn't fully inside the
454 * descriptor table.
455 * @retval VERR_SELECTOR_NOT_PRESENT if the LDT is invalid or not present. This
456 * is not returned if the selector itself isn't present, you have to
457 * check that for yourself (see DBGFSELINFO::fFlags).
458 * @retval VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the
459 * pagetable or page backing the selector table wasn't present.
460 *
461 * @param pUVM The user mode VM handle.
462 * @param idCpu The ID of the virtual CPU context.
463 * @param Sel The selector to get info about.
464 * @param fFlags Flags, see DBGFQSEL_FLAGS_*.
465 * @param pSelInfo Where to store the information. This will always be
466 * updated.
467 *
468 * @remarks This is a wrapper around SELMR3GetSelectorInfo and
469 * SELMR3GetShadowSelectorInfo.
470 */
471VMMR3DECL(int) DBGFR3SelQueryInfo(PUVM pUVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo)
472{
473 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
474 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
475 AssertReturn(!(fFlags & ~(DBGFSELQI_FLAGS_DT_GUEST | DBGFSELQI_FLAGS_DT_SHADOW | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE)), VERR_INVALID_PARAMETER);
476 AssertReturn( (fFlags & (DBGFSELQI_FLAGS_DT_SHADOW | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE))
477 != (DBGFSELQI_FLAGS_DT_SHADOW | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE), VERR_INVALID_PARAMETER);
478
479 /* Clear the return data here on this thread. */
480 memset(pSelInfo, 0, sizeof(*pSelInfo));
481
482 /*
483 * Dispatch the request to a worker running on the target CPU.
484 */
485 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3SelQueryInfo, 5, pUVM, idCpu, Sel, fFlags, pSelInfo);
486}
487
488
489/**
490 * Validates a CS selector.
491 *
492 * @returns VBox status code.
493 * @param pSelInfo Pointer to the selector information for the CS selector.
494 * @param SelCPL The selector defining the CPL (SS).
495 */
496VMMDECL(int) DBGFR3SelInfoValidateCS(PCDBGFSELINFO pSelInfo, RTSEL SelCPL)
497{
498 /*
499 * Check if present.
500 */
501 if (pSelInfo->u.Raw.Gen.u1Present)
502 {
503 /*
504 * Type check.
505 */
506 if ( pSelInfo->u.Raw.Gen.u1DescType == 1
507 && (pSelInfo->u.Raw.Gen.u4Type & X86_SEL_TYPE_CODE))
508 {
509 /*
510 * Check level.
511 */
512 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, pSelInfo->Sel & X86_SEL_RPL);
513 if ( !(pSelInfo->u.Raw.Gen.u4Type & X86_SEL_TYPE_CONF)
514 ? uLevel <= pSelInfo->u.Raw.Gen.u2Dpl
515 : uLevel >= pSelInfo->u.Raw.Gen.u2Dpl /* hope I got this right now... */
516 )
517 return VINF_SUCCESS;
518 return VERR_INVALID_RPL;
519 }
520 return VERR_NOT_CODE_SELECTOR;
521 }
522 return VERR_SELECTOR_NOT_PRESENT;
523}
524
525
526/**
527 * Converts a PGM paging mode to a set of DBGFPGDMP_XXX flags.
528 *
529 * @returns Flags. UINT32_MAX if the mode is invalid (asserted).
530 * @param enmMode The mode.
531 */
532static uint32_t dbgfR3PagingDumpModeToFlags(PGMMODE enmMode)
533{
534 switch (enmMode)
535 {
536 case PGMMODE_32_BIT:
537 return DBGFPGDMP_FLAGS_PSE;
538 case PGMMODE_PAE:
539 return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE;
540 case PGMMODE_PAE_NX:
541 return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_NXE;
542 case PGMMODE_AMD64:
543 return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_LME;
544 case PGMMODE_AMD64_NX:
545 return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_LME | DBGFPGDMP_FLAGS_NXE;
546 case PGMMODE_NESTED_32BIT:
547 return DBGFPGDMP_FLAGS_NP | DBGFPGDMP_FLAGS_PSE;
548 case PGMMODE_NESTED_PAE:
549 return DBGFPGDMP_FLAGS_NP | DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_NXE;
550 case PGMMODE_NESTED_AMD64:
551 return DBGFPGDMP_FLAGS_NP | DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_LME | DBGFPGDMP_FLAGS_NXE;
552 case PGMMODE_EPT:
553 return DBGFPGDMP_FLAGS_EPT;
554 case PGMMODE_NONE:
555 return 0;
556 default:
557 AssertFailedReturn(UINT32_MAX);
558 }
559}
560
561
562/**
563 * EMT worker for DBGFR3PagingDumpEx.
564 *
565 * @returns VBox status code.
566 * @param pUVM The shared VM handle.
567 * @param idCpu The current CPU ID.
568 * @param fFlags The flags, DBGFPGDMP_FLAGS_XXX. Valid.
569 * @param pcr3 The CR3 to use (unless we're getting the current
570 * state, see @a fFlags).
571 * @param pu64FirstAddr The first address.
572 * @param pu64LastAddr The last address.
573 * @param cMaxDepth The depth.
574 * @param pHlp The output callbacks.
575 */
576static DECLCALLBACK(int) dbgfR3PagingDumpEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, uint64_t *pcr3,
577 uint64_t *pu64FirstAddr, uint64_t *pu64LastAddr,
578 uint32_t cMaxDepth, PCDBGFINFOHLP pHlp)
579{
580 /*
581 * Implement dumping both context by means of recursion.
582 */
583 if ((fFlags & (DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_SHADOW)) == (DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_SHADOW))
584 {
585 int rc1 = dbgfR3PagingDumpEx(pUVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_GUEST,
586 pcr3, pu64FirstAddr, pu64LastAddr, cMaxDepth, pHlp);
587 int rc2 = dbgfR3PagingDumpEx(pUVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_SHADOW,
588 pcr3, pu64FirstAddr, pu64LastAddr, cMaxDepth, pHlp);
589 return RT_FAILURE(rc1) ? rc1 : rc2;
590 }
591
592 PVM pVM = pUVM->pVM;
593 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
594
595 /*
596 * Get the current CR3/mode if required.
597 */
598 uint64_t cr3 = *pcr3;
599 if (fFlags & (DBGFPGDMP_FLAGS_CURRENT_CR3 | DBGFPGDMP_FLAGS_CURRENT_MODE))
600 {
601 PVMCPU pVCpu = &pVM->aCpus[idCpu];
602 if (fFlags & DBGFPGDMP_FLAGS_SHADOW)
603 {
604 if (PGMGetShadowMode(pVCpu) == PGMMODE_NONE)
605 {
606 pHlp->pfnPrintf(pHlp, "Shadow paging mode is 'none' (NEM)\n");
607 return VINF_SUCCESS;
608 }
609
610 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_CR3)
611 cr3 = PGMGetHyperCR3(pVCpu);
612 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE)
613 fFlags |= dbgfR3PagingDumpModeToFlags(PGMGetShadowMode(pVCpu));
614 }
615 else
616 {
617 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_CR3)
618 cr3 = CPUMGetGuestCR3(pVCpu);
619 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE)
620 {
621 AssertCompile(DBGFPGDMP_FLAGS_PSE == X86_CR4_PSE); AssertCompile(DBGFPGDMP_FLAGS_PAE == X86_CR4_PAE);
622 fFlags |= CPUMGetGuestCR4(pVCpu) & (X86_CR4_PSE | X86_CR4_PAE);
623 AssertCompile(DBGFPGDMP_FLAGS_LME == MSR_K6_EFER_LME); AssertCompile(DBGFPGDMP_FLAGS_NXE == MSR_K6_EFER_NXE);
624 fFlags |= CPUMGetGuestEFER(pVCpu) & (MSR_K6_EFER_LME | MSR_K6_EFER_NXE);
625 }
626 }
627 }
628 fFlags &= ~(DBGFPGDMP_FLAGS_CURRENT_MODE | DBGFPGDMP_FLAGS_CURRENT_CR3);
629
630 /*
631 * Call PGM to do the real work.
632 */
633 int rc;
634 if (fFlags & DBGFPGDMP_FLAGS_SHADOW)
635 rc = PGMR3DumpHierarchyShw(pVM, cr3, fFlags, *pu64FirstAddr, *pu64LastAddr, cMaxDepth, pHlp);
636 else
637 rc = PGMR3DumpHierarchyGst(pVM, cr3, fFlags, *pu64FirstAddr, *pu64LastAddr, cMaxDepth, pHlp);
638 return rc;
639}
640
641
642/**
643 * Dump paging structures.
644 *
645 * This API can be used to dump both guest and shadow structures.
646 *
647 * @returns VBox status code.
648 * @param pUVM The user mode VM handle.
649 * @param idCpu The current CPU ID.
650 * @param fFlags The flags, DBGFPGDMP_FLAGS_XXX.
651 * @param cr3 The CR3 to use (unless we're getting the current
652 * state, see @a fFlags).
653 * @param u64FirstAddr The address to start dumping at.
654 * @param u64LastAddr The address to end dumping after.
655 * @param cMaxDepth The depth.
656 * @param pHlp The output callbacks. Defaults to the debug log if
657 * NULL.
658 */
659VMMDECL(int) DBGFR3PagingDumpEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, uint64_t cr3, uint64_t u64FirstAddr,
660 uint64_t u64LastAddr, uint32_t cMaxDepth, PCDBGFINFOHLP pHlp)
661{
662 /*
663 * Input validation.
664 */
665 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
666 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
667 AssertReturn(!(fFlags & ~DBGFPGDMP_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
668 AssertReturn(fFlags & (DBGFPGDMP_FLAGS_SHADOW | DBGFPGDMP_FLAGS_GUEST), VERR_INVALID_PARAMETER);
669 AssertReturn((fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE) || !(fFlags & DBGFPGDMP_FLAGS_MODE_MASK), VERR_INVALID_PARAMETER);
670 AssertReturn( !(fFlags & DBGFPGDMP_FLAGS_EPT)
671 || !(fFlags & (DBGFPGDMP_FLAGS_LME | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_NXE))
672 , VERR_INVALID_PARAMETER);
673 AssertPtrReturn(pHlp, VERR_INVALID_POINTER);
674 AssertReturn(cMaxDepth, VERR_INVALID_PARAMETER);
675
676 /*
677 * Forward the request to the target CPU.
678 */
679 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3PagingDumpEx, 8,
680 pUVM, idCpu, fFlags, &cr3, &u64FirstAddr, &u64LastAddr, cMaxDepth, pHlp);
681}
682
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette