VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGFStack.cpp@ 73490

Last change on this file since 73490 was 73490, checked in by vboxsync, 7 years ago

DBGFStack.cpp: register collection fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 61.3 KB
Line 
1/* $Id: DBGFStack.cpp 73490 2018-08-03 14:50:42Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, Call Stack Analyser.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DBGF
23#include <VBox/vmm/dbgf.h>
24#include <VBox/vmm/selm.h>
25#include <VBox/vmm/mm.h>
26#include "DBGFInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/uvm.h>
29#include <VBox/err.h>
30#include <VBox/log.h>
31#include <iprt/param.h>
32#include <iprt/assert.h>
33#include <iprt/alloca.h>
34#include <iprt/mem.h>
35#include <iprt/string.h>
36#include <iprt/formats/pecoff.h>
37
38
39/*********************************************************************************************************************************
40* Structures and Typedefs *
41*********************************************************************************************************************************/
42static DECLCALLBACK(int) dbgfR3StackReadCallback(PRTDBGUNWINDSTATE pThis, RTUINTPTR uSp, size_t cbToRead, void *pvDst);
43
44/**
45 * Unwind context.
46 *
47 * @note Using a constructor and destructor here for simple+safe cleanup.
48 *
49 * @todo Generalize and move to IPRT or some such place.
50 */
51typedef struct DBGFUNWINDCTX
52{
53 PUVM m_pUVM;
54 VMCPUID m_idCpu;
55 RTDBGAS m_hAs;
56 PCCPUMCTX m_pInitialCtx;
57 bool m_fIsHostRing0;
58 uint64_t m_uOsScratch; /**< For passing to DBGFOSREG::pfnStackUnwindAssist. */
59
60 RTDBGUNWINDSTATE m_State;
61
62 RTDBGMOD m_hCached;
63 RTUINTPTR m_uCachedMapping;
64 RTUINTPTR m_cbCachedMapping;
65 uint8_t *m_pbCachedInfo;
66 size_t m_cbCachedInfo;
67
68 /** Function table for PE/AMD64 (entire m_pbCachedInfo) . */
69 PCIMAGE_RUNTIME_FUNCTION_ENTRY m_paFunctions;
70 /** Number functions in m_paFunctions. */
71 size_t m_cFunctions;
72
73 DBGFUNWINDCTX(PUVM pUVM, VMCPUID idCpu, PCCPUMCTX pInitialCtx, RTDBGAS hAs)
74 {
75 m_State.u32Magic = RTDBGUNWINDSTATE_MAGIC;
76 m_State.enmArch = RTLDRARCH_AMD64;
77 m_State.pfnReadStack = dbgfR3StackReadCallback;
78 m_State.pvUser = this;
79 RT_ZERO(m_State.u);
80 if (pInitialCtx)
81 {
82 m_State.u.x86.auRegs[X86_GREG_xAX] = pInitialCtx->rax;
83 m_State.u.x86.auRegs[X86_GREG_xCX] = pInitialCtx->rcx;
84 m_State.u.x86.auRegs[X86_GREG_xDX] = pInitialCtx->rdx;
85 m_State.u.x86.auRegs[X86_GREG_xBX] = pInitialCtx->rbx;
86 m_State.u.x86.auRegs[X86_GREG_xSP] = pInitialCtx->rsp;
87 m_State.u.x86.auRegs[X86_GREG_xBP] = pInitialCtx->rbp;
88 m_State.u.x86.auRegs[X86_GREG_xSI] = pInitialCtx->rsi;
89 m_State.u.x86.auRegs[X86_GREG_xDI] = pInitialCtx->rdi;
90 m_State.u.x86.auRegs[X86_GREG_x8 ] = pInitialCtx->r8;
91 m_State.u.x86.auRegs[X86_GREG_x9 ] = pInitialCtx->r9;
92 m_State.u.x86.auRegs[X86_GREG_x10] = pInitialCtx->r10;
93 m_State.u.x86.auRegs[X86_GREG_x11] = pInitialCtx->r11;
94 m_State.u.x86.auRegs[X86_GREG_x12] = pInitialCtx->r12;
95 m_State.u.x86.auRegs[X86_GREG_x13] = pInitialCtx->r13;
96 m_State.u.x86.auRegs[X86_GREG_x14] = pInitialCtx->r14;
97 m_State.u.x86.auRegs[X86_GREG_x15] = pInitialCtx->r15;
98 m_State.uPc = pInitialCtx->rip;
99 m_State.u.x86.uRFlags = pInitialCtx->rflags.u;
100 m_State.u.x86.auSegs[X86_SREG_ES] = pInitialCtx->es.Sel;
101 m_State.u.x86.auSegs[X86_SREG_CS] = pInitialCtx->cs.Sel;
102 m_State.u.x86.auSegs[X86_SREG_SS] = pInitialCtx->ss.Sel;
103 m_State.u.x86.auSegs[X86_SREG_DS] = pInitialCtx->ds.Sel;
104 m_State.u.x86.auSegs[X86_SREG_GS] = pInitialCtx->gs.Sel;
105 m_State.u.x86.auSegs[X86_SREG_FS] = pInitialCtx->fs.Sel;
106 m_State.u.x86.fRealOrV86 = CPUMIsGuestInRealOrV86ModeEx(pInitialCtx);
107 }
108 else if (hAs == DBGF_AS_R0)
109 VMMR3InitR0StackUnwindState(pUVM, idCpu, &m_State);
110
111 m_pUVM = pUVM;
112 m_idCpu = idCpu;
113 m_hAs = DBGFR3AsResolveAndRetain(pUVM, hAs);
114 m_pInitialCtx = pInitialCtx;
115 m_fIsHostRing0 = hAs == DBGF_AS_R0;
116 m_uOsScratch = 0;
117
118 m_hCached = NIL_RTDBGMOD;
119 m_uCachedMapping = 0;
120 m_cbCachedMapping = 0;
121 m_pbCachedInfo = NULL;
122 m_cbCachedInfo = 0;
123 m_paFunctions = NULL;
124 m_cFunctions = 0;
125 }
126
127 ~DBGFUNWINDCTX();
128
129} DBGFUNWINDCTX;
130/** Pointer to unwind context. */
131typedef DBGFUNWINDCTX *PDBGFUNWINDCTX;
132
133
134static void dbgfR3UnwindCtxFlushCache(PDBGFUNWINDCTX pUnwindCtx)
135{
136 if (pUnwindCtx->m_hCached != NIL_RTDBGMOD)
137 {
138 RTDbgModRelease(pUnwindCtx->m_hCached);
139 pUnwindCtx->m_hCached = NIL_RTDBGMOD;
140 }
141 if (pUnwindCtx->m_pbCachedInfo)
142 {
143 RTMemFree(pUnwindCtx->m_pbCachedInfo);
144 pUnwindCtx->m_pbCachedInfo = NULL;
145 }
146 pUnwindCtx->m_cbCachedInfo = 0;
147 pUnwindCtx->m_paFunctions = NULL;
148 pUnwindCtx->m_cFunctions = 0;
149}
150
151
152DBGFUNWINDCTX::~DBGFUNWINDCTX()
153{
154 dbgfR3UnwindCtxFlushCache(this);
155 if (m_hAs != NIL_RTDBGAS)
156 {
157 RTDbgAsRelease(m_hAs);
158 m_hAs = NIL_RTDBGAS;
159 }
160}
161
162
163/**
164 * @interface_method_impl{RTDBGUNWINDSTATE,pfnReadStack}
165 */
166static DECLCALLBACK(int) dbgfR3StackReadCallback(PRTDBGUNWINDSTATE pThis, RTUINTPTR uSp, size_t cbToRead, void *pvDst)
167{
168 Assert( pThis->enmArch == RTLDRARCH_AMD64
169 || pThis->enmArch == RTLDRARCH_X86_32);
170
171 PDBGFUNWINDCTX pUnwindCtx = (PDBGFUNWINDCTX)pThis->pvUser;
172 DBGFADDRESS SrcAddr;
173 int rc = VINF_SUCCESS;
174 if (pUnwindCtx->m_fIsHostRing0)
175 DBGFR3AddrFromHostR0(&SrcAddr, uSp);
176 else
177 {
178 if ( pThis->enmArch == RTLDRARCH_X86_32
179 || pThis->enmArch == RTLDRARCH_X86_16)
180 {
181 if (!pThis->u.x86.fRealOrV86)
182 rc = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &SrcAddr, pThis->u.x86.auSegs[X86_SREG_SS], uSp);
183 else
184 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSp + ((uint32_t)pThis->u.x86.auSegs[X86_SREG_SS] << 4));
185 }
186 else
187 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSp);
188 }
189 if (RT_SUCCESS(rc))
190 rc = DBGFR3MemRead(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &SrcAddr, pvDst, cbToRead);
191 return rc;
192}
193
194
195/**
196 * Sets PC and SP.
197 *
198 * @returns true.
199 * @param pUnwindCtx The unwind context.
200 * @param pAddrPC The program counter (PC) value to set.
201 * @param pAddrStack The stack pointer (SP) value to set.
202 */
203static bool dbgfR3UnwindCtxSetPcAndSp(PDBGFUNWINDCTX pUnwindCtx, PCDBGFADDRESS pAddrPC, PCDBGFADDRESS pAddrStack)
204{
205 Assert( pUnwindCtx->m_State.enmArch == RTLDRARCH_AMD64
206 || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_32);
207
208 if (!DBGFADDRESS_IS_FAR(pAddrPC))
209 pUnwindCtx->m_State.uPc = pAddrPC->FlatPtr;
210 else
211 {
212 pUnwindCtx->m_State.uPc = pAddrPC->off;
213 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_CS] = pAddrPC->Sel;
214 }
215 if (!DBGFADDRESS_IS_FAR(pAddrStack))
216 pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP] = pAddrStack->FlatPtr;
217 else
218 {
219 pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP] = pAddrStack->off;
220 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] = pAddrStack->Sel;
221 }
222 return true;
223}
224
225
226/**
227 * Try read a 16-bit value off the stack.
228 *
229 * @returns pfnReadStack result.
230 * @param pThis The unwind state.
231 * @param uSrcAddr The stack address.
232 * @param puDst The read destination.
233 */
234DECLINLINE(int) dbgUnwindLoadStackU16(PRTDBGUNWINDSTATE pThis, uint64_t uSrcAddr, uint16_t *puDst)
235{
236 return pThis->pfnReadStack(pThis, uSrcAddr, sizeof(*puDst), puDst);
237}
238
239
240/**
241 * Try read a 64-bit value off the stack.
242 *
243 * @returns pfnReadStack result.
244 * @param pThis The unwind state.
245 * @param uSrcAddr The stack address.
246 * @param puDst The read destination.
247 */
248DECLINLINE(int) dbgUnwindLoadStackU64(PRTDBGUNWINDSTATE pThis, uint64_t uSrcAddr, uint64_t *puDst)
249{
250 return pThis->pfnReadStack(pThis, uSrcAddr, sizeof(*puDst), puDst);
251}
252
253
254/**
255 * Binary searches the lookup table.
256 *
257 * @returns RVA of unwind info on success, UINT32_MAX on failure.
258 * @param paFunctions The table to lookup @a uRva in.
259 * @param iEnd Size of the table.
260 * @param uRva The RVA of the function we want.
261 */
262DECLINLINE(PCIMAGE_RUNTIME_FUNCTION_ENTRY)
263dbgfR3UnwindCtxLookupUnwindInfoRva(PCIMAGE_RUNTIME_FUNCTION_ENTRY paFunctions, size_t iEnd, uint32_t uRva)
264{
265 size_t iBegin = 0;
266 while (iBegin < iEnd)
267 {
268 size_t const i = iBegin + (iEnd - iBegin) / 2;
269 PCIMAGE_RUNTIME_FUNCTION_ENTRY pEntry = &paFunctions[i];
270 if (uRva < pEntry->BeginAddress)
271 iEnd = i;
272 else if (uRva > pEntry->EndAddress)
273 iBegin = i + 1;
274 else
275 return pEntry;
276 }
277 return NULL;
278}
279
280
281/**
282 * Processes an IRET frame.
283 *
284 * @returns true.
285 * @param pThis The unwind state being worked.
286 * @param fErrCd Non-zero if there is an error code on the stack.
287 */
288static bool dbgUnwindPeAmd64DoOneIRet(PRTDBGUNWINDSTATE pThis, uint8_t fErrCd)
289{
290 Assert(fErrCd <= 1);
291 if (!fErrCd)
292 pThis->u.x86.Loaded.s.fErrCd = 0;
293 else
294 {
295 pThis->u.x86.uErrCd = 0;
296 pThis->u.x86.Loaded.s.fErrCd = 1;
297 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->u.x86.uErrCd);
298 pThis->u.x86.auRegs[X86_GREG_xSP] += 8;
299 }
300
301 pThis->enmRetType = RTDBGRETURNTYPE_IRET64;
302 pThis->u.x86.FrameAddr.off = pThis->u.x86.auRegs[X86_GREG_xSP] - /* pretend rbp is pushed on the stack */ 8;
303 pThis->u.x86.FrameAddr.sel = pThis->u.x86.auSegs[X86_SREG_SS];
304
305 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->uPc);
306 pThis->u.x86.auRegs[X86_GREG_xSP] += 8; /* RIP */
307
308 dbgUnwindLoadStackU16(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->u.x86.auSegs[X86_SREG_CS]);
309 pThis->u.x86.auRegs[X86_GREG_xSP] += 8; /* CS */
310
311 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->u.x86.uRFlags);
312 pThis->u.x86.auRegs[X86_GREG_xSP] += 8; /* EFLAGS */
313
314 uint64_t uNewRsp = (pThis->u.x86.auRegs[X86_GREG_xSP] - 8) & ~(uint64_t)15;
315 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &uNewRsp);
316 pThis->u.x86.auRegs[X86_GREG_xSP] += 8; /* RSP */
317
318 dbgUnwindLoadStackU16(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->u.x86.auSegs[X86_SREG_SS]);
319 pThis->u.x86.auRegs[X86_GREG_xSP] += 8; /* SS */
320
321 pThis->u.x86.auRegs[X86_GREG_xSP] = uNewRsp;
322
323 pThis->u.x86.Loaded.s.fRegs |= RT_BIT(X86_GREG_xSP);
324 pThis->u.x86.Loaded.s.fSegs |= RT_BIT(X86_SREG_CS) | RT_BIT(X86_SREG_SS);
325 pThis->u.x86.Loaded.s.fPc = 1;
326 pThis->u.x86.Loaded.s.fFrameAddr = 1;
327 pThis->u.x86.Loaded.s.fRFlags = 1;
328 return true;
329}
330
331
332/**
333 * Unwinds one frame using cached module info.
334 *
335 * @returns true on success, false on failure.
336 * @param hMod The debug module to retrieve unwind info from.
337 * @param paFunctions The table to lookup @a uRvaRip in.
338 * @param cFunctions Size of the lookup table.
339 * @param pThis The unwind state.
340 * @param uRvaRip The RVA of the RIP.
341 *
342 * @todo Move this down to IPRT in the ldrPE.cpp / dbgmodcodeview.cpp area.
343 */
344static bool dbgUnwindPeAmd64DoOne(RTDBGMOD hMod, PCIMAGE_RUNTIME_FUNCTION_ENTRY paFunctions, size_t cFunctions,
345 PRTDBGUNWINDSTATE pThis, uint32_t uRvaRip)
346{
347 /*
348 * Lookup the unwind info RVA and try read it.
349 */
350 PCIMAGE_RUNTIME_FUNCTION_ENTRY pEntry = dbgfR3UnwindCtxLookupUnwindInfoRva(paFunctions, cFunctions, uRvaRip);
351 if (pEntry)
352 {
353 IMAGE_RUNTIME_FUNCTION_ENTRY ChainedEntry;
354 unsigned iFrameReg = ~0U;
355 unsigned offFrameReg = 0;
356
357 int fInEpilog = -1; /* -1: not-determined-assume-false; 0: false; 1: true. */
358 uint8_t cbEpilog = 0;
359 uint8_t offEpilog = UINT8_MAX;
360 for (unsigned cChainLoops = 0; ; cChainLoops++)
361 {
362 /*
363 * Get the info.
364 */
365 union
366 {
367 uint32_t uRva;
368 uint8_t ab[ RT_OFFSETOF(IMAGE_UNWIND_INFO, aOpcodes)
369 + sizeof(IMAGE_UNWIND_CODE) * 256
370 + sizeof(IMAGE_RUNTIME_FUNCTION_ENTRY)];
371 } uBuf;
372
373 uBuf.uRva = pEntry->UnwindInfoAddress;
374 size_t cbBuf = sizeof(uBuf);
375 int rc = RTDbgModImageQueryProp(hMod, RTLDRPROP_UNWIND_INFO, &uBuf, cbBuf, &cbBuf);
376 if (RT_FAILURE(rc))
377 return false;
378
379 /*
380 * Check the info.
381 */
382 ASMCompilerBarrier(); /* we're aliasing */
383 PCIMAGE_UNWIND_INFO pInfo = (PCIMAGE_UNWIND_INFO)&uBuf;
384
385 if (pInfo->Version != 1 && pInfo->Version != 2)
386 return false;
387
388 /*
389 * Execute the opcodes.
390 */
391 unsigned const cOpcodes = pInfo->CountOfCodes;
392 unsigned iOpcode = 0;
393
394 /*
395 * Check for epilog opcodes at the start and see if we're in an epilog.
396 */
397 if ( pInfo->Version >= 2
398 && iOpcode < cOpcodes
399 && pInfo->aOpcodes[iOpcode].u.UnwindOp == IMAGE_AMD64_UWOP_EPILOG)
400 {
401 if (fInEpilog == -1)
402 {
403 cbEpilog = pInfo->aOpcodes[iOpcode].u.CodeOffset;
404 Assert(cbEpilog > 0);
405
406 uint32_t uRvaEpilog = pEntry->EndAddress - cbEpilog;
407 iOpcode++;
408 if ( (pInfo->aOpcodes[iOpcode - 1].u.OpInfo & 1)
409 && uRvaRip >= uRvaEpilog)
410 {
411 offEpilog = uRvaRip - uRvaEpilog;
412 fInEpilog = 1;
413 }
414 else
415 {
416 fInEpilog = 0;
417 while (iOpcode < cOpcodes && pInfo->aOpcodes[iOpcode].u.UnwindOp == IMAGE_AMD64_UWOP_EPILOG)
418 {
419 uRvaEpilog = pEntry->EndAddress
420 - (pInfo->aOpcodes[iOpcode].u.CodeOffset + (pInfo->aOpcodes[iOpcode].u.OpInfo << 8));
421 iOpcode++;
422 if (uRvaRip - uRvaEpilog < cbEpilog)
423 {
424 offEpilog = uRvaRip - uRvaEpilog;
425 fInEpilog = 1;
426 break;
427 }
428 }
429 }
430 }
431 while (iOpcode < cOpcodes && pInfo->aOpcodes[iOpcode].u.UnwindOp == IMAGE_AMD64_UWOP_EPILOG)
432 iOpcode++;
433 }
434 if (fInEpilog != 1)
435 {
436 /*
437 * Skip opcodes that doesn't apply to us if we're in the prolog.
438 */
439 uint32_t offPc = uRvaRip - pEntry->BeginAddress;
440 if (offPc < pInfo->SizeOfProlog)
441 while (iOpcode < cOpcodes && pInfo->aOpcodes[iOpcode].u.CodeOffset > offPc)
442 iOpcode++;
443
444 /*
445 * Execute the opcodes.
446 */
447 if (pInfo->FrameRegister != 0)
448 {
449 iFrameReg = pInfo->FrameRegister;
450 offFrameReg = pInfo->FrameOffset * 16;
451 }
452 while (iOpcode < cOpcodes)
453 {
454 Assert(pInfo->aOpcodes[iOpcode].u.CodeOffset <= offPc);
455 uint8_t const uOpInfo = pInfo->aOpcodes[iOpcode].u.OpInfo;
456 uint8_t const uUnwindOp = pInfo->aOpcodes[iOpcode].u.UnwindOp;
457 switch (uUnwindOp)
458 {
459 case IMAGE_AMD64_UWOP_PUSH_NONVOL:
460 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->u.x86.auRegs[uOpInfo]);
461 pThis->u.x86.Loaded.s.fRegs |= RT_BIT(uOpInfo);
462 pThis->u.x86.auRegs[X86_GREG_xSP] += 8;
463 iOpcode++;
464 break;
465
466 case IMAGE_AMD64_UWOP_ALLOC_LARGE:
467 if (uOpInfo == 0)
468 {
469 iOpcode += 2;
470 AssertBreak(iOpcode <= cOpcodes);
471 pThis->u.x86.auRegs[X86_GREG_xSP] += pInfo->aOpcodes[iOpcode - 1].FrameOffset * 8;
472 }
473 else
474 {
475 iOpcode += 3;
476 AssertBreak(iOpcode <= cOpcodes);
477 pThis->u.x86.auRegs[X86_GREG_xSP] += RT_MAKE_U32(pInfo->aOpcodes[iOpcode - 2].FrameOffset,
478 pInfo->aOpcodes[iOpcode - 1].FrameOffset);
479 }
480 break;
481
482 case IMAGE_AMD64_UWOP_ALLOC_SMALL:
483 AssertBreak(iOpcode <= cOpcodes);
484 pThis->u.x86.auRegs[X86_GREG_xSP] += uOpInfo * 8 + 8;
485 iOpcode++;
486 break;
487
488 case IMAGE_AMD64_UWOP_SET_FPREG:
489 iFrameReg = uOpInfo;
490 offFrameReg = pInfo->FrameOffset * 16;
491 pThis->u.x86.auRegs[X86_GREG_xSP] = pThis->u.x86.auRegs[iFrameReg] - offFrameReg;
492 iOpcode++;
493 break;
494
495 case IMAGE_AMD64_UWOP_SAVE_NONVOL:
496 case IMAGE_AMD64_UWOP_SAVE_NONVOL_FAR:
497 {
498 uint32_t off = 0;
499 iOpcode++;
500 if (iOpcode < cOpcodes)
501 {
502 off = pInfo->aOpcodes[iOpcode].FrameOffset;
503 iOpcode++;
504 if (uUnwindOp == IMAGE_AMD64_UWOP_SAVE_NONVOL_FAR && iOpcode < cOpcodes)
505 {
506 off |= (uint32_t)pInfo->aOpcodes[iOpcode].FrameOffset << 16;
507 iOpcode++;
508 }
509 }
510 off *= 8;
511 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP] + off, &pThis->u.x86.auRegs[uOpInfo]);
512 pThis->u.x86.Loaded.s.fRegs |= RT_BIT(uOpInfo);
513 break;
514 }
515
516 case IMAGE_AMD64_UWOP_SAVE_XMM128:
517 iOpcode += 2;
518 break;
519
520 case IMAGE_AMD64_UWOP_SAVE_XMM128_FAR:
521 iOpcode += 3;
522 break;
523
524 case IMAGE_AMD64_UWOP_PUSH_MACHFRAME:
525 return dbgUnwindPeAmd64DoOneIRet(pThis, uOpInfo);
526
527 case IMAGE_AMD64_UWOP_EPILOG:
528 iOpcode += 1;
529 break;
530
531 case IMAGE_AMD64_UWOP_RESERVED_7:
532 AssertFailedReturn(false);
533
534 default:
535 AssertMsgFailedReturn(("%u\n", uUnwindOp), false);
536 }
537 }
538 }
539 else
540 {
541 /*
542 * We're in the POP sequence of an epilog. The POP sequence should
543 * mirror the PUSH sequence exactly.
544 *
545 * Note! We should only end up here for the initial frame (just consider
546 * RSP, stack allocations, non-volatile register restores, ++).
547 */
548 while (iOpcode < cOpcodes)
549 {
550 uint8_t const uOpInfo = pInfo->aOpcodes[iOpcode].u.OpInfo;
551 uint8_t const uUnwindOp = pInfo->aOpcodes[iOpcode].u.UnwindOp;
552 switch (uUnwindOp)
553 {
554 case IMAGE_AMD64_UWOP_PUSH_NONVOL:
555 pThis->u.x86.auRegs[X86_GREG_xSP] += 8;
556 if (offEpilog == 0)
557 {
558 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->u.x86.auRegs[uOpInfo]);
559 pThis->u.x86.Loaded.s.fRegs |= RT_BIT(uOpInfo);
560 }
561 else
562 {
563 /* Decrement offEpilog by estimated POP instruction length. */
564 offEpilog -= 1;
565 if (offEpilog > 0 && uOpInfo >= 8)
566 offEpilog -= 1;
567 }
568 iOpcode++;
569 break;
570
571 case IMAGE_AMD64_UWOP_PUSH_MACHFRAME: /* Must terminate an epilog, so always execute this. */
572 return dbgUnwindPeAmd64DoOneIRet(pThis, uOpInfo);
573
574 case IMAGE_AMD64_UWOP_ALLOC_SMALL:
575 case IMAGE_AMD64_UWOP_SET_FPREG:
576 case IMAGE_AMD64_UWOP_EPILOG:
577 iOpcode++;
578 break;
579 case IMAGE_AMD64_UWOP_SAVE_NONVOL:
580 case IMAGE_AMD64_UWOP_SAVE_XMM128:
581 iOpcode += 2;
582 break;
583 case IMAGE_AMD64_UWOP_ALLOC_LARGE:
584 case IMAGE_AMD64_UWOP_SAVE_NONVOL_FAR:
585 case IMAGE_AMD64_UWOP_SAVE_XMM128_FAR:
586 iOpcode += 3;
587 break;
588
589 default:
590 AssertMsgFailedReturn(("%u\n", uUnwindOp), false);
591 }
592 }
593 }
594
595 /*
596 * Chained stuff?
597 */
598 if (!(pInfo->Flags & IMAGE_UNW_FLAGS_CHAININFO))
599 break;
600 ChainedEntry = *(PCIMAGE_RUNTIME_FUNCTION_ENTRY)&pInfo->aOpcodes[(cOpcodes + 1) & ~1];
601 pEntry = &ChainedEntry;
602 AssertReturn(cChainLoops < 32, false);
603 }
604
605 /*
606 * RSP should now give us the return address, so perform a RET.
607 */
608 pThis->enmRetType = RTDBGRETURNTYPE_NEAR64;
609
610 pThis->u.x86.FrameAddr.off = pThis->u.x86.auRegs[X86_GREG_xSP] - /* pretend rbp is pushed on the stack */ 8;
611 pThis->u.x86.FrameAddr.sel = pThis->u.x86.auSegs[X86_SREG_SS];
612 pThis->u.x86.Loaded.s.fFrameAddr = 1;
613
614 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->uPc);
615 pThis->u.x86.auRegs[X86_GREG_xSP] += 8;
616 pThis->u.x86.Loaded.s.fPc = 1;
617 return true;
618 }
619
620 return false;
621}
622
623
624/**
625 * Tries to unwind one frame using unwind info.
626 *
627 * @returns true on success, false on failure.
628 * @param pUnwindCtx The unwind context.
629 */
630static bool dbgfR3UnwindCtxDoOneFrame(PDBGFUNWINDCTX pUnwindCtx)
631{
632 /*
633 * Hope for the same module as last time around.
634 */
635 RTUINTPTR offCache = pUnwindCtx->m_State.uPc - pUnwindCtx->m_uCachedMapping;
636 if (offCache < pUnwindCtx->m_cbCachedMapping)
637 return dbgUnwindPeAmd64DoOne(pUnwindCtx->m_hCached, pUnwindCtx->m_paFunctions, pUnwindCtx->m_cFunctions,
638 &pUnwindCtx->m_State, offCache);
639
640 /*
641 * Try locate the module.
642 */
643 RTDBGMOD hDbgMod = NIL_RTDBGMOD;
644 RTUINTPTR uBase = 0;
645 RTDBGSEGIDX idxSeg = NIL_RTDBGSEGIDX;
646 int rc = RTDbgAsModuleByAddr(pUnwindCtx->m_hAs, pUnwindCtx->m_State.uPc, &hDbgMod, &uBase, &idxSeg);
647 if (RT_SUCCESS(rc))
648 {
649 /* We cache the module regardless of unwind info. */
650 dbgfR3UnwindCtxFlushCache(pUnwindCtx);
651 pUnwindCtx->m_hCached = hDbgMod;
652 pUnwindCtx->m_uCachedMapping = uBase;
653 pUnwindCtx->m_cbCachedMapping = idxSeg == NIL_RTDBGSEGIDX ? RTDbgModImageSize(hDbgMod)
654 : RTDbgModSegmentSize(hDbgMod, idxSeg);
655
656 /* Play simple for now. */
657 if ( idxSeg == NIL_RTDBGSEGIDX
658 && RTDbgModImageGetFormat(hDbgMod) == RTLDRFMT_PE
659 && RTDbgModImageGetArch(hDbgMod) == RTLDRARCH_AMD64)
660 {
661 /*
662 * Try query the unwind data.
663 */
664 uint32_t uDummy;
665 size_t cbNeeded = 0;
666 rc = RTDbgModImageQueryProp(hDbgMod, RTLDRPROP_UNWIND_TABLE, &uDummy, 0, &cbNeeded);
667 if ( rc == VERR_BUFFER_OVERFLOW
668 && cbNeeded >= sizeof(*pUnwindCtx->m_paFunctions)
669 && cbNeeded < _64M)
670 {
671 void *pvBuf = RTMemAllocZ(cbNeeded + 32);
672 if (pvBuf)
673 {
674 rc = RTDbgModImageQueryProp(hDbgMod, RTLDRPROP_UNWIND_TABLE, pvBuf, cbNeeded + 32, &cbNeeded);
675 if (RT_SUCCESS(rc))
676 {
677 pUnwindCtx->m_pbCachedInfo = (uint8_t *)pvBuf;
678 pUnwindCtx->m_cbCachedInfo = cbNeeded;
679 pUnwindCtx->m_paFunctions = (PCIMAGE_RUNTIME_FUNCTION_ENTRY)pvBuf;
680 pUnwindCtx->m_cFunctions = cbNeeded / sizeof(*pUnwindCtx->m_paFunctions);
681
682 return dbgUnwindPeAmd64DoOne(pUnwindCtx->m_hCached, pUnwindCtx->m_paFunctions, pUnwindCtx->m_cFunctions,
683 &pUnwindCtx->m_State, pUnwindCtx->m_State.uPc - pUnwindCtx->m_uCachedMapping);
684 }
685 RTMemFree(pvBuf);
686 }
687 }
688 }
689 }
690 return false;
691}
692
693
694/**
695 * Read stack memory, will init entire buffer.
696 */
697DECLINLINE(int) dbgfR3StackRead(PUVM pUVM, VMCPUID idCpu, void *pvBuf, PCDBGFADDRESS pSrcAddr, size_t cb, size_t *pcbRead)
698{
699 int rc = DBGFR3MemRead(pUVM, idCpu, pSrcAddr, pvBuf, cb);
700 if (RT_FAILURE(rc))
701 {
702 /* fallback: byte by byte and zero the ones we fail to read. */
703 size_t cbRead;
704 for (cbRead = 0; cbRead < cb; cbRead++)
705 {
706 DBGFADDRESS Addr = *pSrcAddr;
707 rc = DBGFR3MemRead(pUVM, idCpu, DBGFR3AddrAdd(&Addr, cbRead), (uint8_t *)pvBuf + cbRead, 1);
708 if (RT_FAILURE(rc))
709 break;
710 }
711 if (cbRead)
712 rc = VINF_SUCCESS;
713 memset((char *)pvBuf + cbRead, 0, cb - cbRead);
714 *pcbRead = cbRead;
715 }
716 else
717 *pcbRead = cb;
718 return rc;
719}
720
721/**
722 * Collects sure registers on frame exit.
723 *
724 * @returns VINF_SUCCESS or VERR_NO_MEMORY.
725 * @param pUVM The user mode VM handle for the allocation.
726 * @param pFrame The frame in question.
727 * @param pState The unwind state.
728 */
729static int dbgfR3StackWalkCollectRegisterChanges(PUVM pUVM, PDBGFSTACKFRAME pFrame, PRTDBGUNWINDSTATE pState)
730{
731 pFrame->cSureRegs = 0;
732 pFrame->paSureRegs = NULL;
733
734 if ( pState->enmArch == RTLDRARCH_AMD64
735 || pState->enmArch == RTLDRARCH_X86_32
736 || pState->enmArch == RTLDRARCH_X86_16)
737 {
738 if (pState->u.x86.Loaded.fAll)
739 {
740 /*
741 * Count relevant registers.
742 */
743 uint32_t cRegs = 0;
744 if (pState->u.x86.Loaded.s.fRegs)
745 for (uint32_t f = 1; f < RT_BIT_32(RT_ELEMENTS(pState->u.x86.auRegs)); f <<= 1)
746 if (pState->u.x86.Loaded.s.fRegs & f)
747 cRegs++;
748 if (pState->u.x86.Loaded.s.fSegs)
749 for (uint32_t f = 1; f < RT_BIT_32(RT_ELEMENTS(pState->u.x86.auSegs)); f <<= 1)
750 if (pState->u.x86.Loaded.s.fSegs & f)
751 cRegs++;
752 if (pState->u.x86.Loaded.s.fRFlags)
753 cRegs++;
754 if (pState->u.x86.Loaded.s.fErrCd)
755 cRegs++;
756 if (cRegs > 0)
757 {
758 /*
759 * Allocate the arrays.
760 */
761 PDBGFREGVALEX paSureRegs = (PDBGFREGVALEX)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_STACK, sizeof(DBGFREGVALEX) * cRegs);
762 AssertReturn(paSureRegs, VERR_NO_MEMORY);
763 pFrame->paSureRegs = paSureRegs;
764 pFrame->cSureRegs = cRegs;
765
766 /*
767 * Popuplate the arrays.
768 */
769 uint32_t iReg = 0;
770 if (pState->u.x86.Loaded.s.fRegs)
771 for (uint32_t i = 0; i < RT_ELEMENTS(pState->u.x86.auRegs); i++)
772 if (pState->u.x86.Loaded.s.fRegs & RT_BIT(i))
773 {
774 paSureRegs[iReg].Value.u64 = pState->u.x86.auRegs[i];
775 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
776 paSureRegs[iReg].enmReg = (DBGFREG)(DBGFREG_RAX + i);
777 iReg++;
778 }
779
780 if (pState->u.x86.Loaded.s.fSegs)
781 for (uint32_t i = 0; i < RT_ELEMENTS(pState->u.x86.auSegs); i++)
782 if (pState->u.x86.Loaded.s.fSegs & RT_BIT(i))
783 {
784 paSureRegs[iReg].Value.u16 = pState->u.x86.auSegs[i];
785 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U16;
786 switch (i)
787 {
788 case X86_SREG_ES: paSureRegs[iReg].enmReg = DBGFREG_ES; break;
789 case X86_SREG_CS: paSureRegs[iReg].enmReg = DBGFREG_CS; break;
790 case X86_SREG_SS: paSureRegs[iReg].enmReg = DBGFREG_SS; break;
791 case X86_SREG_DS: paSureRegs[iReg].enmReg = DBGFREG_DS; break;
792 case X86_SREG_FS: paSureRegs[iReg].enmReg = DBGFREG_FS; break;
793 case X86_SREG_GS: paSureRegs[iReg].enmReg = DBGFREG_GS; break;
794 default: AssertFailedBreak();
795 }
796 iReg++;
797 }
798
799 if (iReg < cRegs)
800 {
801 if (pState->u.x86.Loaded.s.fRFlags)
802 {
803 paSureRegs[iReg].Value.u64 = pState->u.x86.uRFlags;
804 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
805 paSureRegs[iReg].enmReg = DBGFREG_RFLAGS;
806 iReg++;
807 }
808 if (pState->u.x86.Loaded.s.fErrCd)
809 {
810 paSureRegs[iReg].Value.u64 = pState->u.x86.uErrCd;
811 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
812 paSureRegs[iReg].enmReg = DBGFREG_END;
813 paSureRegs[iReg].pszName = "trap-errcd";
814 iReg++;
815 }
816 }
817 Assert(iReg == cRegs);
818 }
819 }
820 }
821
822 return VINF_SUCCESS;
823}
824
825
826/**
827 * Internal worker routine.
828 *
829 * On x86 the typical stack frame layout is like this:
830 * .. ..
831 * 16 parameter 2
832 * 12 parameter 1
833 * 8 parameter 0
834 * 4 return address
835 * 0 old ebp; current ebp points here
836 */
837DECL_NO_INLINE(static, int) dbgfR3StackWalk(PDBGFUNWINDCTX pUnwindCtx, PDBGFSTACKFRAME pFrame, bool fFirst)
838{
839 /*
840 * Stop if we got a read error in the previous run.
841 */
842 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST)
843 return VERR_NO_MORE_FILES;
844
845 /*
846 * Advance the frame (except for the first).
847 */
848 if (!fFirst) /** @todo we can probably eliminate this fFirst business... */
849 {
850 /* frame, pc and stack is taken from the existing frames return members. */
851 pFrame->AddrFrame = pFrame->AddrReturnFrame;
852 pFrame->AddrPC = pFrame->AddrReturnPC;
853 pFrame->pSymPC = pFrame->pSymReturnPC;
854 pFrame->pLinePC = pFrame->pLineReturnPC;
855
856 /* increment the frame number. */
857 pFrame->iFrame++;
858
859 /* UNWIND_INFO_RET -> USED_UNWIND; return type */
860 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET))
861 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
862 else
863 {
864 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
865 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET;
866 if (pFrame->enmReturnFrameReturnType != RTDBGRETURNTYPE_INVALID)
867 {
868 pFrame->enmReturnType = pFrame->enmReturnFrameReturnType;
869 pFrame->enmReturnFrameReturnType = RTDBGRETURNTYPE_INVALID;
870 }
871 }
872 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_TRAP_FRAME;
873 }
874
875 /*
876 * Figure the return address size and use the old PC to guess stack item size.
877 */
878 /** @todo this is bogus... */
879 unsigned cbRetAddr = RTDbgReturnTypeSize(pFrame->enmReturnType);
880 unsigned cbStackItem;
881 switch (pFrame->AddrPC.fFlags & DBGFADDRESS_FLAGS_TYPE_MASK)
882 {
883 case DBGFADDRESS_FLAGS_FAR16: cbStackItem = 2; break;
884 case DBGFADDRESS_FLAGS_FAR32: cbStackItem = 4; break;
885 case DBGFADDRESS_FLAGS_FAR64: cbStackItem = 8; break;
886 case DBGFADDRESS_FLAGS_RING0: cbStackItem = sizeof(RTHCUINTPTR); break;
887 default:
888 switch (pFrame->enmReturnType)
889 {
890 case RTDBGRETURNTYPE_FAR16:
891 case RTDBGRETURNTYPE_IRET16:
892 case RTDBGRETURNTYPE_IRET32_V86:
893 case RTDBGRETURNTYPE_NEAR16: cbStackItem = 2; break;
894
895 case RTDBGRETURNTYPE_FAR32:
896 case RTDBGRETURNTYPE_IRET32:
897 case RTDBGRETURNTYPE_IRET32_PRIV:
898 case RTDBGRETURNTYPE_NEAR32: cbStackItem = 4; break;
899
900 case RTDBGRETURNTYPE_FAR64:
901 case RTDBGRETURNTYPE_IRET64:
902 case RTDBGRETURNTYPE_NEAR64: cbStackItem = 8; break;
903
904 default:
905 AssertMsgFailed(("%d\n", pFrame->enmReturnType));
906 cbStackItem = 4;
907 break;
908 }
909 }
910
911 /*
912 * Read the raw frame data.
913 * We double cbRetAddr in case we have a far return.
914 */
915 union
916 {
917 uint64_t *pu64;
918 uint32_t *pu32;
919 uint16_t *pu16;
920 uint8_t *pb;
921 void *pv;
922 } u, uRet, uArgs, uBp;
923 size_t cbRead = cbRetAddr*2 + cbStackItem + sizeof(pFrame->Args);
924 u.pv = alloca(cbRead);
925 uBp = u;
926 uRet.pb = u.pb + cbStackItem;
927 uArgs.pb = u.pb + cbStackItem + cbRetAddr;
928
929 Assert(DBGFADDRESS_IS_VALID(&pFrame->AddrFrame));
930 int rc = dbgfR3StackRead(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, u.pv, &pFrame->AddrFrame, cbRead, &cbRead);
931 if ( RT_FAILURE(rc)
932 || cbRead < cbRetAddr + cbStackItem)
933 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_LAST;
934
935 /*
936 * Return Frame address.
937 *
938 * If we used unwind info to get here, the unwind register context will be
939 * positioned after the return instruction has been executed. We start by
940 * picking up the rBP register here for return frame and will try improve
941 * on it further down by using unwind info.
942 */
943 pFrame->AddrReturnFrame = pFrame->AddrFrame;
944 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
945 {
946 if ( pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_PRIV
947 || pFrame->enmReturnType == RTDBGRETURNTYPE_IRET64)
948 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnFrame,
949 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS], pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP]);
950 else if (pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_V86)
951 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnFrame,
952 ((uint32_t)pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] << 4)
953 + pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP]);
954 else
955 {
956 pFrame->AddrReturnFrame.off = pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP];
957 pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off;
958 }
959 }
960 else
961 {
962 switch (cbStackItem)
963 {
964 case 2: pFrame->AddrReturnFrame.off = *uBp.pu16; break;
965 case 4: pFrame->AddrReturnFrame.off = *uBp.pu32; break;
966 case 8: pFrame->AddrReturnFrame.off = *uBp.pu64; break;
967 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_1);
968 }
969
970 /* Watcom tries to keep the frame pointer odd for far returns. */
971 if ( cbStackItem <= 4
972 && !(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO))
973 {
974 if (pFrame->AddrReturnFrame.off & 1)
975 {
976 pFrame->AddrReturnFrame.off &= ~(RTGCUINTPTR)1;
977 if (pFrame->enmReturnType == RTDBGRETURNTYPE_NEAR16)
978 {
979 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
980 pFrame->enmReturnType = RTDBGRETURNTYPE_FAR16;
981 cbRetAddr = 4;
982 }
983 else if (pFrame->enmReturnType == RTDBGRETURNTYPE_NEAR32)
984 {
985 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
986 pFrame->enmReturnType = RTDBGRETURNTYPE_FAR32;
987 cbRetAddr = 8;
988 }
989 }
990 else if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN)
991 {
992 if (pFrame->enmReturnType == RTDBGRETURNTYPE_FAR16)
993 {
994 pFrame->enmReturnType = RTDBGRETURNTYPE_NEAR16;
995 cbRetAddr = 2;
996 }
997 else if (pFrame->enmReturnType == RTDBGRETURNTYPE_NEAR32)
998 {
999 pFrame->enmReturnType = RTDBGRETURNTYPE_FAR32;
1000 cbRetAddr = 4;
1001 }
1002 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
1003 }
1004 uArgs.pb = u.pb + cbStackItem + cbRetAddr;
1005 }
1006
1007 pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off;
1008 }
1009
1010 /*
1011 * Return Stack Address.
1012 */
1013 pFrame->AddrReturnStack = pFrame->AddrReturnFrame;
1014 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
1015 {
1016 if ( pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_PRIV
1017 || pFrame->enmReturnType == RTDBGRETURNTYPE_IRET64)
1018 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnStack,
1019 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS], pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP]);
1020 else if (pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_V86)
1021 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnStack,
1022 ((uint32_t)pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] << 4)
1023 + pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP]);
1024 else
1025 {
1026 pFrame->AddrReturnStack.off = pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP];
1027 pFrame->AddrReturnStack.FlatPtr += pFrame->AddrReturnStack.off - pFrame->AddrStack.off;
1028 }
1029 }
1030 else
1031 {
1032 pFrame->AddrReturnStack.off += cbStackItem + cbRetAddr;
1033 pFrame->AddrReturnStack.FlatPtr += cbStackItem + cbRetAddr;
1034 }
1035
1036 /*
1037 * Return PC.
1038 */
1039 pFrame->AddrReturnPC = pFrame->AddrPC;
1040 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
1041 {
1042 if (RTDbgReturnTypeIsNear(pFrame->enmReturnType))
1043 {
1044 pFrame->AddrReturnPC.off = pUnwindCtx->m_State.uPc;
1045 pFrame->AddrReturnPC.FlatPtr += pFrame->AddrReturnPC.off - pFrame->AddrPC.off;
1046 }
1047 else
1048 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC,
1049 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_CS], pUnwindCtx->m_State.uPc);
1050 }
1051 else
1052 switch (pFrame->enmReturnType)
1053 {
1054 case RTDBGRETURNTYPE_NEAR16:
1055 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
1056 {
1057 pFrame->AddrReturnPC.FlatPtr += *uRet.pu16 - pFrame->AddrReturnPC.off;
1058 pFrame->AddrReturnPC.off = *uRet.pu16;
1059 }
1060 else
1061 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu16);
1062 break;
1063 case RTDBGRETURNTYPE_NEAR32:
1064 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
1065 {
1066 pFrame->AddrReturnPC.FlatPtr += *uRet.pu32 - pFrame->AddrReturnPC.off;
1067 pFrame->AddrReturnPC.off = *uRet.pu32;
1068 }
1069 else
1070 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu32);
1071 break;
1072 case RTDBGRETURNTYPE_NEAR64:
1073 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
1074 {
1075 pFrame->AddrReturnPC.FlatPtr += *uRet.pu64 - pFrame->AddrReturnPC.off;
1076 pFrame->AddrReturnPC.off = *uRet.pu64;
1077 }
1078 else
1079 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu64);
1080 break;
1081 case RTDBGRETURNTYPE_FAR16:
1082 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
1083 break;
1084 case RTDBGRETURNTYPE_FAR32:
1085 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
1086 break;
1087 case RTDBGRETURNTYPE_FAR64:
1088 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
1089 break;
1090 case RTDBGRETURNTYPE_IRET16:
1091 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
1092 break;
1093 case RTDBGRETURNTYPE_IRET32:
1094 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
1095 break;
1096 case RTDBGRETURNTYPE_IRET32_PRIV:
1097 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
1098 break;
1099 case RTDBGRETURNTYPE_IRET32_V86:
1100 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
1101 break;
1102 case RTDBGRETURNTYPE_IRET64:
1103 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
1104 break;
1105 default:
1106 AssertMsgFailed(("enmReturnType=%d\n", pFrame->enmReturnType));
1107 return VERR_INVALID_PARAMETER;
1108 }
1109
1110
1111 pFrame->pSymReturnPC = DBGFR3AsSymbolByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC,
1112 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1113 NULL /*poffDisp*/, NULL /*phMod*/);
1114 pFrame->pLineReturnPC = DBGFR3AsLineByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC,
1115 NULL /*poffDisp*/, NULL /*phMod*/);
1116
1117 /*
1118 * Frame bitness flag.
1119 */
1120 /** @todo use previous return type for this? */
1121 pFrame->fFlags &= ~(DBGFSTACKFRAME_FLAGS_16BIT | DBGFSTACKFRAME_FLAGS_32BIT | DBGFSTACKFRAME_FLAGS_64BIT);
1122 switch (cbStackItem)
1123 {
1124 case 2: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_16BIT; break;
1125 case 4: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_32BIT; break;
1126 case 8: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_64BIT; break;
1127 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_2);
1128 }
1129
1130 /*
1131 * The arguments.
1132 */
1133 memcpy(&pFrame->Args, uArgs.pv, sizeof(pFrame->Args));
1134
1135 /*
1136 * Collect register changes.
1137 * Then call the OS layer to assist us (e.g. NT trap frames).
1138 */
1139 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
1140 {
1141 rc = dbgfR3StackWalkCollectRegisterChanges(pUnwindCtx->m_pUVM, pFrame, &pUnwindCtx->m_State);
1142 if (RT_FAILURE(rc))
1143 return rc;
1144
1145 if ( pUnwindCtx->m_pInitialCtx
1146 && pUnwindCtx->m_hAs != NIL_RTDBGAS)
1147 {
1148 rc = dbgfR3OSStackUnwindAssist(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, pFrame, &pUnwindCtx->m_State,
1149 pUnwindCtx->m_pInitialCtx, pUnwindCtx->m_hAs, &pUnwindCtx->m_uOsScratch);
1150 if (RT_FAILURE(rc))
1151 return rc;
1152 }
1153 }
1154
1155 /*
1156 * Try use unwind information to locate the return frame pointer (for the
1157 * next loop iteration).
1158 */
1159 Assert(!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET));
1160 pFrame->enmReturnFrameReturnType = RTDBGRETURNTYPE_INVALID;
1161 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST))
1162 {
1163 /* Set PC and SP if we didn't unwind our way here (context will then point
1164 and the return PC and SP already). */
1165 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO))
1166 {
1167 dbgfR3UnwindCtxSetPcAndSp(pUnwindCtx, &pFrame->AddrReturnPC, &pFrame->AddrReturnStack);
1168 }
1169 /** @todo Reevaluate CS if the previous frame return type isn't near. */
1170 if ( pUnwindCtx->m_State.enmArch == RTLDRARCH_AMD64
1171 || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_32
1172 || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_16)
1173 pUnwindCtx->m_State.u.x86.Loaded.fAll = 0;
1174 else
1175 AssertFailed();
1176 if (dbgfR3UnwindCtxDoOneFrame(pUnwindCtx))
1177 {
1178 if (pUnwindCtx->m_fIsHostRing0)
1179 DBGFR3AddrFromHostR0(&pFrame->AddrReturnFrame, pUnwindCtx->m_State.u.x86.FrameAddr.off);
1180 else
1181 {
1182 DBGFADDRESS AddrReturnFrame = pFrame->AddrReturnFrame;
1183 rc = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &AddrReturnFrame,
1184 pUnwindCtx->m_State.u.x86.FrameAddr.sel, pUnwindCtx->m_State.u.x86.FrameAddr.off);
1185 if (RT_SUCCESS(rc))
1186 pFrame->AddrReturnFrame = AddrReturnFrame;
1187 }
1188 pFrame->enmReturnFrameReturnType = pUnwindCtx->m_State.enmRetType;
1189 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET;
1190 }
1191 }
1192
1193 return VINF_SUCCESS;
1194}
1195
1196
1197/**
1198 * Walks the entire stack allocating memory as we walk.
1199 */
1200static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PUVM pUVM, VMCPUID idCpu, PCCPUMCTX pCtx, RTDBGAS hAs,
1201 DBGFCODETYPE enmCodeType,
1202 PCDBGFADDRESS pAddrFrame,
1203 PCDBGFADDRESS pAddrStack,
1204 PCDBGFADDRESS pAddrPC,
1205 RTDBGRETURNTYPE enmReturnType,
1206 PCDBGFSTACKFRAME *ppFirstFrame)
1207{
1208 DBGFUNWINDCTX UnwindCtx(pUVM, idCpu, pCtx, hAs);
1209
1210 /* alloc first frame. */
1211 PDBGFSTACKFRAME pCur = (PDBGFSTACKFRAME)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pCur));
1212 if (!pCur)
1213 return VERR_NO_MEMORY;
1214
1215 /*
1216 * Initialize the frame.
1217 */
1218 pCur->pNextInternal = NULL;
1219 pCur->pFirstInternal = pCur;
1220
1221 int rc = VINF_SUCCESS;
1222 if (pAddrPC)
1223 pCur->AddrPC = *pAddrPC;
1224 else if (enmCodeType != DBGFCODETYPE_GUEST)
1225 DBGFR3AddrFromFlat(pUVM, &pCur->AddrPC, pCtx->rip);
1226 else
1227 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrPC, pCtx->cs.Sel, pCtx->rip);
1228 if (RT_SUCCESS(rc))
1229 {
1230 uint64_t fAddrMask;
1231 if (enmCodeType == DBGFCODETYPE_RING0)
1232 fAddrMask = HC_ARCH_BITS == 64 ? UINT64_MAX : UINT32_MAX;
1233 else if (enmCodeType == DBGFCODETYPE_HYPER)
1234 fAddrMask = UINT32_MAX;
1235 else if (DBGFADDRESS_IS_FAR16(&pCur->AddrPC))
1236 fAddrMask = UINT16_MAX;
1237 else if (DBGFADDRESS_IS_FAR32(&pCur->AddrPC))
1238 fAddrMask = UINT32_MAX;
1239 else if (DBGFADDRESS_IS_FAR64(&pCur->AddrPC))
1240 fAddrMask = UINT64_MAX;
1241 else
1242 {
1243 PVMCPU pVCpu = VMMGetCpuById(pUVM->pVM, idCpu);
1244 CPUMMODE enmCpuMode = CPUMGetGuestMode(pVCpu);
1245 if (enmCpuMode == CPUMMODE_REAL)
1246 {
1247 fAddrMask = UINT16_MAX;
1248 if (enmReturnType == RTDBGRETURNTYPE_INVALID)
1249 pCur->enmReturnType = RTDBGRETURNTYPE_NEAR16;
1250 }
1251 else if ( enmCpuMode == CPUMMODE_PROTECTED
1252 || !CPUMIsGuestIn64BitCode(pVCpu))
1253 {
1254 fAddrMask = UINT32_MAX;
1255 if (enmReturnType == RTDBGRETURNTYPE_INVALID)
1256 pCur->enmReturnType = RTDBGRETURNTYPE_NEAR32;
1257 }
1258 else
1259 {
1260 fAddrMask = UINT64_MAX;
1261 if (enmReturnType == RTDBGRETURNTYPE_INVALID)
1262 pCur->enmReturnType = RTDBGRETURNTYPE_NEAR64;
1263 }
1264 }
1265
1266 if (enmReturnType == RTDBGRETURNTYPE_INVALID)
1267 switch (pCur->AddrPC.fFlags & DBGFADDRESS_FLAGS_TYPE_MASK)
1268 {
1269 case DBGFADDRESS_FLAGS_FAR16: pCur->enmReturnType = RTDBGRETURNTYPE_NEAR16; break;
1270 case DBGFADDRESS_FLAGS_FAR32: pCur->enmReturnType = RTDBGRETURNTYPE_NEAR32; break;
1271 case DBGFADDRESS_FLAGS_FAR64: pCur->enmReturnType = RTDBGRETURNTYPE_NEAR64; break;
1272 case DBGFADDRESS_FLAGS_RING0:
1273 pCur->enmReturnType = HC_ARCH_BITS == 64 ? RTDBGRETURNTYPE_NEAR64 : RTDBGRETURNTYPE_NEAR32;
1274 break;
1275 default:
1276 pCur->enmReturnType = RTDBGRETURNTYPE_NEAR32;
1277 break;
1278 }
1279
1280
1281 if (pAddrStack)
1282 pCur->AddrStack = *pAddrStack;
1283 else if (enmCodeType != DBGFCODETYPE_GUEST)
1284 DBGFR3AddrFromFlat(pUVM, &pCur->AddrStack, pCtx->rsp & fAddrMask);
1285 else
1286 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrStack, pCtx->ss.Sel, pCtx->rsp & fAddrMask);
1287
1288 Assert(!(pCur->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO));
1289 if (pAddrFrame)
1290 pCur->AddrFrame = *pAddrFrame;
1291 else if (enmCodeType != DBGFCODETYPE_GUEST)
1292 DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, pCtx->rbp & fAddrMask);
1293 else if (RT_SUCCESS(rc))
1294 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrFrame, pCtx->ss.Sel, pCtx->rbp & fAddrMask);
1295
1296 /*
1297 * Try unwind and get a better frame pointer and state.
1298 */
1299 if ( RT_SUCCESS(rc)
1300 && dbgfR3UnwindCtxSetPcAndSp(&UnwindCtx, &pCur->AddrPC, &pCur->AddrStack)
1301 && dbgfR3UnwindCtxDoOneFrame(&UnwindCtx))
1302 {
1303 pCur->enmReturnType = UnwindCtx.m_State.enmRetType;
1304 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
1305 if (!UnwindCtx.m_fIsHostRing0)
1306 rc = DBGFR3AddrFromSelOff(UnwindCtx.m_pUVM, UnwindCtx.m_idCpu, &pCur->AddrFrame,
1307 UnwindCtx.m_State.u.x86.FrameAddr.sel, UnwindCtx.m_State.u.x86.FrameAddr.off);
1308 else
1309 DBGFR3AddrFromHostR0(&pCur->AddrFrame, UnwindCtx.m_State.u.x86.FrameAddr.off);
1310 }
1311 /*
1312 * The first frame.
1313 */
1314 if (RT_SUCCESS(rc))
1315 {
1316 if (DBGFADDRESS_IS_VALID(&pCur->AddrPC))
1317 {
1318 pCur->pSymPC = DBGFR3AsSymbolByAddrA(pUVM, hAs, &pCur->AddrPC,
1319 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1320 NULL /*poffDisp*/, NULL /*phMod*/);
1321 pCur->pLinePC = DBGFR3AsLineByAddrA(pUVM, hAs, &pCur->AddrPC, NULL /*poffDisp*/, NULL /*phMod*/);
1322 }
1323
1324 rc = dbgfR3StackWalk(&UnwindCtx, pCur, true /*fFirst*/);
1325 }
1326 }
1327 else
1328 pCur->enmReturnType = enmReturnType;
1329 if (RT_FAILURE(rc))
1330 {
1331 DBGFR3StackWalkEnd(pCur);
1332 return rc;
1333 }
1334
1335 /*
1336 * The other frames.
1337 */
1338 DBGFSTACKFRAME Next = *pCur;
1339 while (!(pCur->fFlags & (DBGFSTACKFRAME_FLAGS_LAST | DBGFSTACKFRAME_FLAGS_MAX_DEPTH | DBGFSTACKFRAME_FLAGS_LOOP)))
1340 {
1341 Next.cSureRegs = 0;
1342 Next.paSureRegs = NULL;
1343
1344 /* try walk. */
1345 rc = dbgfR3StackWalk(&UnwindCtx, &Next, false /*fFirst*/);
1346 if (RT_FAILURE(rc))
1347 break;
1348
1349 /* add the next frame to the chain. */
1350 PDBGFSTACKFRAME pNext = (PDBGFSTACKFRAME)MMR3HeapAllocU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pNext));
1351 if (!pNext)
1352 {
1353 DBGFR3StackWalkEnd(pCur);
1354 return VERR_NO_MEMORY;
1355 }
1356 *pNext = Next;
1357 pCur->pNextInternal = pNext;
1358 pCur = pNext;
1359 Assert(pCur->pNextInternal == NULL);
1360
1361 /* check for loop */
1362 for (PCDBGFSTACKFRAME pLoop = pCur->pFirstInternal;
1363 pLoop && pLoop != pCur;
1364 pLoop = pLoop->pNextInternal)
1365 if (pLoop->AddrFrame.FlatPtr == pCur->AddrFrame.FlatPtr)
1366 {
1367 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_LOOP;
1368 break;
1369 }
1370
1371 /* check for insane recursion */
1372 if (pCur->iFrame >= 2048)
1373 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_MAX_DEPTH;
1374 }
1375
1376 *ppFirstFrame = pCur->pFirstInternal;
1377 return rc;
1378}
1379
1380
1381/**
1382 * Common worker for DBGFR3StackWalkBeginGuestEx, DBGFR3StackWalkBeginHyperEx,
1383 * DBGFR3StackWalkBeginGuest and DBGFR3StackWalkBeginHyper.
1384 */
1385static int dbgfR3StackWalkBeginCommon(PUVM pUVM,
1386 VMCPUID idCpu,
1387 DBGFCODETYPE enmCodeType,
1388 PCDBGFADDRESS pAddrFrame,
1389 PCDBGFADDRESS pAddrStack,
1390 PCDBGFADDRESS pAddrPC,
1391 RTDBGRETURNTYPE enmReturnType,
1392 PCDBGFSTACKFRAME *ppFirstFrame)
1393{
1394 /*
1395 * Validate parameters.
1396 */
1397 *ppFirstFrame = NULL;
1398 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1399 PVM pVM = pUVM->pVM;
1400 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1401 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1402 if (pAddrFrame)
1403 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrFrame), VERR_INVALID_PARAMETER);
1404 if (pAddrStack)
1405 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrStack), VERR_INVALID_PARAMETER);
1406 if (pAddrPC)
1407 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrPC), VERR_INVALID_PARAMETER);
1408 AssertReturn(enmReturnType >= RTDBGRETURNTYPE_INVALID && enmReturnType < RTDBGRETURNTYPE_END, VERR_INVALID_PARAMETER);
1409
1410 /*
1411 * Get the CPUM context pointer and pass it on the specified EMT.
1412 */
1413 RTDBGAS hAs;
1414 PCCPUMCTX pCtx;
1415 switch (enmCodeType)
1416 {
1417 case DBGFCODETYPE_GUEST:
1418 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpuById(pVM, idCpu));
1419 hAs = DBGF_AS_GLOBAL;
1420 break;
1421 case DBGFCODETYPE_HYPER:
1422 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpuById(pVM, idCpu));
1423 hAs = DBGF_AS_RC_AND_GC_GLOBAL;
1424 break;
1425 case DBGFCODETYPE_RING0:
1426 pCtx = NULL; /* No valid context present. */
1427 hAs = DBGF_AS_R0;
1428 break;
1429 default:
1430 AssertFailedReturn(VERR_INVALID_PARAMETER);
1431 }
1432 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3StackWalkCtxFull, 10,
1433 pUVM, idCpu, pCtx, hAs, enmCodeType,
1434 pAddrFrame, pAddrStack, pAddrPC, enmReturnType, ppFirstFrame);
1435}
1436
1437
1438/**
1439 * Begins a guest stack walk, extended version.
1440 *
1441 * This will walk the current stack, constructing a list of info frames which is
1442 * returned to the caller. The caller uses DBGFR3StackWalkNext to traverse the
1443 * list and DBGFR3StackWalkEnd to release it.
1444 *
1445 * @returns VINF_SUCCESS on success.
1446 * @returns VERR_NO_MEMORY if we're out of memory.
1447 *
1448 * @param pUVM The user mode VM handle.
1449 * @param idCpu The ID of the virtual CPU which stack we want to walk.
1450 * @param enmCodeType Code type
1451 * @param pAddrFrame Frame address to start at. (Optional)
1452 * @param pAddrStack Stack address to start at. (Optional)
1453 * @param pAddrPC Program counter to start at. (Optional)
1454 * @param enmReturnType The return address type. (Optional)
1455 * @param ppFirstFrame Where to return the pointer to the first info frame.
1456 */
1457VMMR3DECL(int) DBGFR3StackWalkBeginEx(PUVM pUVM,
1458 VMCPUID idCpu,
1459 DBGFCODETYPE enmCodeType,
1460 PCDBGFADDRESS pAddrFrame,
1461 PCDBGFADDRESS pAddrStack,
1462 PCDBGFADDRESS pAddrPC,
1463 RTDBGRETURNTYPE enmReturnType,
1464 PCDBGFSTACKFRAME *ppFirstFrame)
1465{
1466 return dbgfR3StackWalkBeginCommon(pUVM, idCpu, enmCodeType, pAddrFrame, pAddrStack, pAddrPC, enmReturnType, ppFirstFrame);
1467}
1468
1469
1470/**
1471 * Begins a guest stack walk.
1472 *
1473 * This will walk the current stack, constructing a list of info frames which is
1474 * returned to the caller. The caller uses DBGFR3StackWalkNext to traverse the
1475 * list and DBGFR3StackWalkEnd to release it.
1476 *
1477 * @returns VINF_SUCCESS on success.
1478 * @returns VERR_NO_MEMORY if we're out of memory.
1479 *
1480 * @param pUVM The user mode VM handle.
1481 * @param idCpu The ID of the virtual CPU which stack we want to walk.
1482 * @param enmCodeType Code type
1483 * @param ppFirstFrame Where to return the pointer to the first info frame.
1484 */
1485VMMR3DECL(int) DBGFR3StackWalkBegin(PUVM pUVM, VMCPUID idCpu, DBGFCODETYPE enmCodeType, PCDBGFSTACKFRAME *ppFirstFrame)
1486{
1487 return dbgfR3StackWalkBeginCommon(pUVM, idCpu, enmCodeType, NULL, NULL, NULL, RTDBGRETURNTYPE_INVALID, ppFirstFrame);
1488}
1489
1490/**
1491 * Gets the next stack frame.
1492 *
1493 * @returns Pointer to the info for the next stack frame.
1494 * NULL if no more frames.
1495 *
1496 * @param pCurrent Pointer to the current stack frame.
1497 *
1498 */
1499VMMR3DECL(PCDBGFSTACKFRAME) DBGFR3StackWalkNext(PCDBGFSTACKFRAME pCurrent)
1500{
1501 return pCurrent
1502 ? pCurrent->pNextInternal
1503 : NULL;
1504}
1505
1506
1507/**
1508 * Ends a stack walk process.
1509 *
1510 * This *must* be called after a successful first call to any of the stack
1511 * walker functions. If not called we will leak memory or other resources.
1512 *
1513 * @param pFirstFrame The frame returned by one of the begin functions.
1514 */
1515VMMR3DECL(void) DBGFR3StackWalkEnd(PCDBGFSTACKFRAME pFirstFrame)
1516{
1517 if ( !pFirstFrame
1518 || !pFirstFrame->pFirstInternal)
1519 return;
1520
1521 PDBGFSTACKFRAME pFrame = (PDBGFSTACKFRAME)pFirstFrame->pFirstInternal;
1522 while (pFrame)
1523 {
1524 PDBGFSTACKFRAME pCur = pFrame;
1525 pFrame = (PDBGFSTACKFRAME)pCur->pNextInternal;
1526 if (pFrame)
1527 {
1528 if (pCur->pSymReturnPC == pFrame->pSymPC)
1529 pFrame->pSymPC = NULL;
1530 if (pCur->pSymReturnPC == pFrame->pSymReturnPC)
1531 pFrame->pSymReturnPC = NULL;
1532
1533 if (pCur->pSymPC == pFrame->pSymPC)
1534 pFrame->pSymPC = NULL;
1535 if (pCur->pSymPC == pFrame->pSymReturnPC)
1536 pFrame->pSymReturnPC = NULL;
1537
1538 if (pCur->pLineReturnPC == pFrame->pLinePC)
1539 pFrame->pLinePC = NULL;
1540 if (pCur->pLineReturnPC == pFrame->pLineReturnPC)
1541 pFrame->pLineReturnPC = NULL;
1542
1543 if (pCur->pLinePC == pFrame->pLinePC)
1544 pFrame->pLinePC = NULL;
1545 if (pCur->pLinePC == pFrame->pLineReturnPC)
1546 pFrame->pLineReturnPC = NULL;
1547 }
1548
1549 RTDbgSymbolFree(pCur->pSymPC);
1550 RTDbgSymbolFree(pCur->pSymReturnPC);
1551 RTDbgLineFree(pCur->pLinePC);
1552 RTDbgLineFree(pCur->pLineReturnPC);
1553
1554 if (pCur->paSureRegs)
1555 {
1556 MMR3HeapFree(pCur->paSureRegs);
1557 pCur->paSureRegs = NULL;
1558 pCur->cSureRegs = 0;
1559 }
1560
1561 pCur->pNextInternal = NULL;
1562 pCur->pFirstInternal = NULL;
1563 pCur->fFlags = 0;
1564 MMR3HeapFree(pCur);
1565 }
1566}
1567
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette