VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGF.cpp@ 93901

Last change on this file since 93901 was 93901, checked in by vboxsync, 3 years ago

VMM,Main,++: Removed VM_IS_RAW_MODE_ENABLED/VM_EXEC_ENGINE_RAW_MODE and added VM_IS_EXEC_ENGINE_IEM/VM_EXEC_ENGINE_IEM instead. In IMachineDebugger::getExecutionEngine VMExecutionEngine_RawMode was removed and VMExecutionEngine_Emulated added. Removed dead code and updated frontends accordingly. On darwin.arm64 HM now falls back on IEM execution since neither HM or NEM is availble there. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 77.1 KB
Line 
1/* $Id: DBGF.cpp 93901 2022-02-23 15:35:26Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_dbgf DBGF - The Debugger Facility
20 *
21 * The purpose of the DBGF is to provide an interface for debuggers to
22 * manipulate the VMM without having to mess up the source code for each of
23 * them. The DBGF is always built in and will always work when a debugger
24 * attaches to the VM. The DBGF provides the basic debugger features, such as
25 * halting execution, handling breakpoints, single step execution, instruction
26 * disassembly, info querying, OS specific diggers, symbol and module
27 * management.
28 *
29 * The interface is working in a manner similar to the win32, linux and os2
30 * debugger interfaces. The interface has an asynchronous nature. This comes
31 * from the fact that the VMM and the Debugger are running in different threads.
32 * They are referred to as the "emulation thread" and the "debugger thread", or
33 * as the "ping thread" and the "pong thread, respectivly. (The last set of
34 * names comes from the use of the Ping-Pong synchronization construct from the
35 * RTSem API.)
36 *
37 * @see grp_dbgf
38 *
39 *
40 * @section sec_dbgf_scenario Usage Scenario
41 *
42 * The debugger starts by attaching to the VM. For practical reasons we limit the
43 * number of concurrently attached debuggers to 1 per VM. The action of
44 * attaching to the VM causes the VM to check and generate debug events.
45 *
46 * The debugger then will wait/poll for debug events and issue commands.
47 *
48 * The waiting and polling is done by the DBGFEventWait() function. It will wait
49 * for the emulation thread to send a ping, thus indicating that there is an
50 * event waiting to be processed.
51 *
52 * An event can be a response to a command issued previously, the hitting of a
53 * breakpoint, or running into a bad/fatal VMM condition. The debugger now has
54 * the ping and must respond to the event at hand - the VMM is waiting. This
55 * usually means that the user of the debugger must do something, but it doesn't
56 * have to. The debugger is free to call any DBGF function (nearly at least)
57 * while processing the event.
58 *
59 * Typically the user will issue a request for the execution to be resumed, so
60 * the debugger calls DBGFResume() and goes back to waiting/polling for events.
61 *
62 * When the user eventually terminates the debugging session or selects another
63 * VM, the debugger detaches from the VM. This means that breakpoints are
64 * disabled and that the emulation thread no longer polls for debugger commands.
65 *
66 */
67
68
69/*********************************************************************************************************************************
70* Header Files *
71*********************************************************************************************************************************/
72#define LOG_GROUP LOG_GROUP_DBGF
73#include <VBox/vmm/dbgf.h>
74#include <VBox/vmm/selm.h>
75#include <VBox/vmm/em.h>
76#include <VBox/vmm/hm.h>
77#include <VBox/vmm/mm.h>
78#include <VBox/vmm/nem.h>
79#include "DBGFInternal.h"
80#include <VBox/vmm/vm.h>
81#include <VBox/vmm/uvm.h>
82#include <VBox/err.h>
83
84#include <VBox/log.h>
85#include <iprt/semaphore.h>
86#include <iprt/thread.h>
87#include <iprt/asm.h>
88#include <iprt/time.h>
89#include <iprt/assert.h>
90#include <iprt/stream.h>
91#include <iprt/env.h>
92
93
94/*********************************************************************************************************************************
95* Structures and Typedefs *
96*********************************************************************************************************************************/
97/**
98 * Instruction type returned by dbgfStepGetCurInstrType.
99 */
100typedef enum DBGFSTEPINSTRTYPE
101{
102 DBGFSTEPINSTRTYPE_INVALID = 0,
103 DBGFSTEPINSTRTYPE_OTHER,
104 DBGFSTEPINSTRTYPE_RET,
105 DBGFSTEPINSTRTYPE_CALL,
106 DBGFSTEPINSTRTYPE_END,
107 DBGFSTEPINSTRTYPE_32BIT_HACK = 0x7fffffff
108} DBGFSTEPINSTRTYPE;
109
110
111/*********************************************************************************************************************************
112* Internal Functions *
113*********************************************************************************************************************************/
114DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx);
115DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu);
116static int dbgfR3CpuWait(PVMCPU pVCpu);
117static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution);
118static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu);
119static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu);
120static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude);
121
122
123
124/**
125 * Initializes the DBGF.
126 *
127 * @returns VBox status code.
128 * @param pVM The cross context VM structure.
129 */
130VMMR3_INT_DECL(int) DBGFR3Init(PVM pVM)
131{
132 PUVM pUVM = pVM->pUVM;
133 AssertCompile(sizeof(pUVM->dbgf.s) <= sizeof(pUVM->dbgf.padding));
134 AssertCompile(sizeof(pUVM->aCpus[0].dbgf.s) <= sizeof(pUVM->aCpus[0].dbgf.padding));
135
136 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
137
138 /*
139 * The usual sideways mountain climbing style of init:
140 */
141 int rc = dbgfR3InfoInit(pUVM); /* (First, initalizes the shared critical section.) */
142 if (RT_SUCCESS(rc))
143 {
144 rc = dbgfR3TraceInit(pVM);
145 if (RT_SUCCESS(rc))
146 {
147 rc = dbgfR3RegInit(pUVM);
148 if (RT_SUCCESS(rc))
149 {
150 rc = dbgfR3AsInit(pUVM);
151 if (RT_SUCCESS(rc))
152 {
153 rc = dbgfR3BpInit(pUVM);
154 if (RT_SUCCESS(rc))
155 {
156 rc = dbgfR3OSInit(pUVM);
157 if (RT_SUCCESS(rc))
158 {
159 rc = dbgfR3PlugInInit(pUVM);
160 if (RT_SUCCESS(rc))
161 {
162 rc = dbgfR3BugCheckInit(pVM);
163 if (RT_SUCCESS(rc))
164 {
165#ifdef VBOX_WITH_DBGF_TRACING
166 rc = dbgfR3TracerInit(pVM);
167#endif
168 if (RT_SUCCESS(rc))
169 {
170 return VINF_SUCCESS;
171 }
172 }
173 dbgfR3PlugInTerm(pUVM);
174 }
175 dbgfR3OSTermPart1(pUVM);
176 dbgfR3OSTermPart2(pUVM);
177 }
178 dbgfR3BpTerm(pUVM);
179 }
180 dbgfR3AsTerm(pUVM);
181 }
182 dbgfR3RegTerm(pUVM);
183 }
184 dbgfR3TraceTerm(pVM);
185 }
186 dbgfR3InfoTerm(pUVM);
187 }
188 return rc;
189}
190
191
192/**
193 * Terminates and cleans up resources allocated by the DBGF.
194 *
195 * @returns VBox status code.
196 * @param pVM The cross context VM structure.
197 */
198VMMR3_INT_DECL(int) DBGFR3Term(PVM pVM)
199{
200 PUVM pUVM = pVM->pUVM;
201
202#ifdef VBOX_WITH_DBGF_TRACING
203 dbgfR3TracerTerm(pVM);
204#endif
205 dbgfR3OSTermPart1(pUVM);
206 dbgfR3PlugInTerm(pUVM);
207 dbgfR3OSTermPart2(pUVM);
208 dbgfR3BpTerm(pUVM);
209 dbgfR3AsTerm(pUVM);
210 dbgfR3RegTerm(pUVM);
211 dbgfR3TraceTerm(pVM);
212 dbgfR3InfoTerm(pUVM);
213
214 return VINF_SUCCESS;
215}
216
217
218/**
219 * This is for tstCFGM and others to avoid trigger leak detection.
220 *
221 * @returns VBox status code.
222 * @param pUVM The user mode VM structure.
223 */
224VMMR3DECL(void) DBGFR3TermUVM(PUVM pUVM)
225{
226 dbgfR3InfoTerm(pUVM);
227}
228
229
230/**
231 * Called when the VM is powered off to detach debuggers.
232 *
233 * @param pVM The cross context VM structure.
234 */
235VMMR3_INT_DECL(void) DBGFR3PowerOff(PVM pVM)
236{
237 /*
238 * Send a termination event to any attached debugger.
239 */
240 if (pVM->dbgf.s.fAttached)
241 {
242 PVMCPU pVCpu = VMMGetCpu(pVM);
243 int rc = dbgfR3SendEventWait(pVM, pVCpu, DBGFEVENT_POWERING_OFF, DBGFEVENTCTX_OTHER);
244 AssertLogRelRC(rc);
245
246 /*
247 * Clear the FF so we won't get confused later on.
248 */
249 VM_FF_CLEAR(pVM, VM_FF_DBGF);
250 }
251}
252
253
254/**
255 * Applies relocations to data and code managed by this
256 * component. This function will be called at init and
257 * whenever the VMM need to relocate it self inside the GC.
258 *
259 * @param pVM The cross context VM structure.
260 * @param offDelta Relocation delta relative to old location.
261 */
262VMMR3_INT_DECL(void) DBGFR3Relocate(PVM pVM, RTGCINTPTR offDelta)
263{
264 dbgfR3TraceRelocate(pVM);
265 dbgfR3AsRelocate(pVM->pUVM, offDelta);
266}
267
268
269/**
270 * Waits a little while for a debuggger to attach.
271 *
272 * @returns True is a debugger have attached.
273 * @param pVM The cross context VM structure.
274 * @param pVCpu The cross context per CPU structure.
275 * @param enmEvent Event.
276 *
277 * @thread EMT(pVCpu)
278 */
279bool dbgfR3WaitForAttach(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
280{
281 /*
282 * First a message.
283 */
284#if !defined(DEBUG)
285 int cWait = 10;
286#else
287 int cWait = RTEnvExist("VBOX_DBGF_NO_WAIT_FOR_ATTACH")
288 || ( ( enmEvent == DBGFEVENT_ASSERTION_HYPER
289 || enmEvent == DBGFEVENT_FATAL_ERROR)
290 && !RTEnvExist("VBOX_DBGF_WAIT_FOR_ATTACH"))
291 ? 10
292 : 150;
293#endif
294 RTStrmPrintf(g_pStdErr,
295 "DBGF: No debugger attached, waiting %d second%s for one to attach (event=%d)\n"
296#ifdef DEBUG
297 " Set VBOX_DBGF_NO_WAIT_FOR_ATTACH=1 for short wait or VBOX_DBGF_WAIT_FOR_ATTACH=1 longer.\n"
298#endif
299 ,
300 cWait / 10, cWait != 10 ? "s" : "", enmEvent);
301 RTStrmFlush(g_pStdErr);
302 while (cWait > 0)
303 {
304 RTThreadSleep(100);
305 if (pVM->dbgf.s.fAttached)
306 {
307 RTStrmPrintf(g_pStdErr, "Attached!\n");
308 RTStrmFlush(g_pStdErr);
309 return true;
310 }
311
312 /* Process rendezvous (debugger attaching involves such). */
313 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
314 {
315 int rc = VMMR3EmtRendezvousFF(pVM, pVCpu); AssertRC(rc);
316 if (rc != VINF_SUCCESS)
317 {
318 /** @todo Ignoring these could be bad. */
319 RTStrmPrintf(g_pStdErr, "[rcRendezvous=%Rrc, ignored!]", rc);
320 RTStrmFlush(g_pStdErr);
321 }
322 }
323
324 /* Process priority stuff. */
325 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
326 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
327 {
328 int rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/);
329 if (rc == VINF_SUCCESS)
330 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, true /*fPriorityOnly*/);
331 if (rc != VINF_SUCCESS)
332 {
333 /** @todo Ignoring these could be bad. */
334 RTStrmPrintf(g_pStdErr, "[rcReq=%Rrc, ignored!]", rc);
335 RTStrmFlush(g_pStdErr);
336 }
337 }
338
339 /* next */
340 if (!(cWait % 10))
341 {
342 RTStrmPrintf(g_pStdErr, "%d.", cWait / 10);
343 RTStrmFlush(g_pStdErr);
344 }
345 cWait--;
346 }
347
348 RTStrmPrintf(g_pStdErr, "Stopping the VM!\n");
349 RTStrmFlush(g_pStdErr);
350 return false;
351}
352
353
354/**
355 * Forced action callback.
356 *
357 * The VMM will call this from it's main loop when either VM_FF_DBGF or
358 * VMCPU_FF_DBGF are set.
359 *
360 * The function checks for and executes pending commands from the debugger.
361 * Then it checks for pending debug events and serves these.
362 *
363 * @returns VINF_SUCCESS normally.
364 * @returns VERR_DBGF_RAISE_FATAL_ERROR to pretend a fatal error happened.
365 * @param pVM The cross context VM structure.
366 * @param pVCpu The cross context per CPU structure.
367 */
368VMMR3_INT_DECL(int) DBGFR3VMMForcedAction(PVM pVM, PVMCPU pVCpu)
369{
370 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
371
372 /*
373 * Dispatch pending events.
374 */
375 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_DBGF))
376 {
377 if ( pVCpu->dbgf.s.cEvents > 0
378 && pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT)
379 {
380 rcStrict = DBGFR3EventHandlePending(pVM, pVCpu);
381 /** @todo may end up with VERR_DBGF_NOT_ATTACHED here, which will prove fatal... */
382 }
383
384 /*
385 * Command pending? Process it.
386 */
387 PUVMCPU pUVCpu = pVCpu->pUVCpu;
388 if (pUVCpu->dbgf.s.enmDbgfCmd != DBGFCMD_NO_COMMAND)
389 {
390 bool fResumeExecution;
391 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
392 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
393 VBOXSTRICTRC rcStrict2 = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
394 if (!fResumeExecution)
395 rcStrict2 = dbgfR3CpuWait(pVCpu);
396 if ( rcStrict2 != VINF_SUCCESS
397 && ( rcStrict == VINF_SUCCESS
398 || RT_FAILURE(rcStrict2)
399 || rcStrict2 < rcStrict) ) /** @todo oversimplified? */
400 rcStrict = rcStrict2;
401 }
402 }
403
404 return VBOXSTRICTRC_TODO(rcStrict);
405}
406
407
408/**
409 * Try to determine the event context.
410 *
411 * @returns debug event context.
412 * @param pVCpu The cross context vCPU structure.
413 */
414static DBGFEVENTCTX dbgfR3FigureEventCtx(PVMCPU pVCpu)
415{
416 switch (EMGetState(pVCpu))
417 {
418 case EMSTATE_HM:
419 case EMSTATE_NEM:
420 case EMSTATE_DEBUG_GUEST_HM:
421 case EMSTATE_DEBUG_GUEST_NEM:
422 return DBGFEVENTCTX_HM;
423
424 case EMSTATE_IEM:
425 case EMSTATE_RAW:
426 case EMSTATE_IEM_THEN_REM:
427 case EMSTATE_DEBUG_GUEST_IEM:
428 case EMSTATE_DEBUG_GUEST_RAW:
429 return DBGFEVENTCTX_RAW;
430
431
432 case EMSTATE_REM:
433 case EMSTATE_DEBUG_GUEST_REM:
434 return DBGFEVENTCTX_REM;
435
436 case EMSTATE_DEBUG_HYPER:
437 case EMSTATE_GURU_MEDITATION:
438 return DBGFEVENTCTX_HYPER;
439
440 default:
441 return DBGFEVENTCTX_OTHER;
442 }
443}
444
445
446/**
447 * Sends the event to the debugger (i.e. adds it to the event ring buffer).
448 *
449 * @returns VBox status code.
450 * @param pVM The cross context VM structure.
451 * @param pVCpu The CPU sending the event.
452 * @param enmType The event type to send.
453 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
454 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
455 * @param cbPayload The size of the event payload, optional.
456 */
457static int dbgfR3SendEventWorker(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
458 void const *pvPayload, size_t cbPayload)
459{
460 PUVM pUVM = pVM->pUVM;
461 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID; /** @todo per vCPU stepping filter. */
462
463 /*
464 * Massage the input a little.
465 */
466 AssertStmt(cbPayload <= RT_SIZEOFMEMB(DBGFEVENT, u), cbPayload = RT_SIZEOFMEMB(DBGFEVENT, u));
467 if (enmCtx == DBGFEVENTCTX_INVALID)
468 enmCtx = dbgfR3FigureEventCtx(pVCpu);
469
470 /*
471 * Put the event into the ring buffer.
472 */
473 RTSemFastMutexRequest(pUVM->dbgf.s.hMtxDbgEvtWr);
474
475 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
476 uint32_t const idxDbgEvtWrite = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite);
477 uint32_t const idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
478 /** @todo Handle full buffer. */ RT_NOREF(idxDbgEvtRead);
479
480 PDBGFEVENT pEvent = &pUVM->dbgf.s.paDbgEvts[idxDbgEvtWrite % cDbgEvtMax];
481
482#ifdef DEBUG
483 ASMMemFill32(pEvent, sizeof(*pEvent), UINT32_C(0xdeadbeef));
484#endif
485 pEvent->enmType = enmType;
486 pEvent->enmCtx = enmCtx;
487 pEvent->idCpu = pVCpu->idCpu;
488 pEvent->uReserved = 0;
489 if (cbPayload)
490 memcpy(&pEvent->u, pvPayload, cbPayload);
491
492 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtWrite, (idxDbgEvtWrite + 1) % cDbgEvtMax);
493
494 RTSemFastMutexRelease(pUVM->dbgf.s.hMtxDbgEvtWr);
495
496 /*
497 * Signal the debugger.
498 */
499 return RTSemEventSignal(pUVM->dbgf.s.hEvtWait);
500}
501
502
503/**
504 * Send event and wait for the debugger to respond.
505 *
506 * @returns Strict VBox status code.
507 * @param pVM The cross context VM structure.
508 * @param pVCpu The CPU sending the event.
509 * @param enmType The event type to send.
510 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
511 */
512DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
513{
514 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
515 if (RT_SUCCESS(rc))
516 rc = dbgfR3CpuWait(pVCpu);
517 return rc;
518}
519
520
521/**
522 * Send event and wait for the debugger to respond, extended version.
523 *
524 * @returns Strict VBox status code.
525 * @param pVM The cross context VM structure.
526 * @param pVCpu The CPU sending the event.
527 * @param enmType The event type to send.
528 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
529 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
530 * @param cbPayload The size of the event payload, optional.
531 */
532DECLINLINE(int) dbgfR3SendEventWaitEx(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
533 void const *pvPayload, size_t cbPayload)
534{
535 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, pvPayload, cbPayload);
536 if (RT_SUCCESS(rc))
537 rc = dbgfR3CpuWait(pVCpu);
538 return rc;
539}
540
541
542/**
543 * Send event but do NOT wait for the debugger.
544 *
545 * Currently only used by dbgfR3CpuCmd().
546 *
547 * @param pVM The cross context VM structure.
548 * @param pVCpu The CPU sending the event.
549 * @param enmType The event type to send.
550 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
551 */
552DECLINLINE(int) dbgfR3SendEventNoWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
553{
554 return dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
555}
556
557
558/**
559 * The common event prologue code.
560 *
561 * It will make sure someone is attached, and perhaps process any high priority
562 * pending actions (none yet).
563 *
564 * @returns VBox status code.
565 * @param pVM The cross context VM structure.
566 * @param pVCpu The vCPU cross context structure.
567 * @param enmEvent The event to be sent.
568 */
569static int dbgfR3EventPrologue(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
570{
571 /*
572 * Check if a debugger is attached.
573 */
574 if ( !pVM->dbgf.s.fAttached
575 && !dbgfR3WaitForAttach(pVM, pVCpu, enmEvent))
576 {
577 Log(("dbgfR3EventPrologue: enmEvent=%d - debugger not attached\n", enmEvent));
578 return VERR_DBGF_NOT_ATTACHED;
579 }
580
581 /*
582 * Look thru pending commands and finish those which make sense now.
583 */
584 /** @todo Process/purge pending commands. */
585 //int rc = DBGFR3VMMForcedAction(pVM);
586 return VINF_SUCCESS;
587}
588
589
590/**
591 * Processes a pending event on the current CPU.
592 *
593 * This is called by EM in response to VINF_EM_DBG_EVENT.
594 *
595 * @returns Strict VBox status code.
596 * @param pVM The cross context VM structure.
597 * @param pVCpu The cross context per CPU structure.
598 *
599 * @thread EMT(pVCpu)
600 */
601VMMR3_INT_DECL(VBOXSTRICTRC) DBGFR3EventHandlePending(PVM pVM, PVMCPU pVCpu)
602{
603 VMCPU_ASSERT_EMT(pVCpu);
604 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
605
606 /*
607 * Check that we've got an event first.
608 */
609 AssertReturn(pVCpu->dbgf.s.cEvents > 0, VINF_SUCCESS);
610 AssertReturn(pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT, VINF_SUCCESS);
611 PDBGFEVENT pEvent = &pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].Event;
612
613 /*
614 * Make sure we've got a debugger and is allowed to speak to it.
615 */
616 int rc = dbgfR3EventPrologue(pVM, pVCpu, pEvent->enmType);
617 if (RT_FAILURE(rc))
618 {
619 /** @todo drop them events? */
620 return rc; /** @todo this will cause trouble if we're here via an FF! */
621 }
622
623 /*
624 * Send the event and mark it as ignore.
625 * ASSUMES no new events get generate while dbgfR3CpuWait is executing!
626 */
627 VBOXSTRICTRC rcStrict = dbgfR3SendEventWaitEx(pVM, pVCpu, pEvent->enmType, pEvent->enmCtx, &pEvent->u, sizeof(pEvent->u));
628 pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState = DBGFEVENTSTATE_IGNORE;
629 return rcStrict;
630}
631
632
633/**
634 * Send a generic debugger event which takes no data.
635 *
636 * @returns VBox status code.
637 * @param pVM The cross context VM structure.
638 * @param enmEvent The event to send.
639 * @internal
640 */
641VMMR3DECL(int) DBGFR3Event(PVM pVM, DBGFEVENTTYPE enmEvent)
642{
643 PVMCPU pVCpu = VMMGetCpu(pVM);
644 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
645
646 /*
647 * Do stepping filtering.
648 */
649 /** @todo Would be better if we did some of this inside the execution
650 * engines. */
651 if ( enmEvent == DBGFEVENT_STEPPED
652 || enmEvent == DBGFEVENT_STEPPED_HYPER)
653 {
654 if (!dbgfStepAreWeThereYet(pVM, pVCpu))
655 return VINF_EM_DBG_STEP;
656 }
657
658 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
659 if (RT_FAILURE(rc))
660 return rc;
661
662 /*
663 * Send the event and process the reply communication.
664 */
665 return dbgfR3SendEventWait(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID);
666}
667
668
669/**
670 * Send a debugger event which takes the full source file location.
671 *
672 * @returns VBox status code.
673 * @param pVM The cross context VM structure.
674 * @param enmEvent The event to send.
675 * @param pszFile Source file.
676 * @param uLine Line number in source file.
677 * @param pszFunction Function name.
678 * @param pszFormat Message which accompanies the event.
679 * @param ... Message arguments.
680 * @internal
681 */
682VMMR3DECL(int) DBGFR3EventSrc(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, ...)
683{
684 va_list args;
685 va_start(args, pszFormat);
686 int rc = DBGFR3EventSrcV(pVM, enmEvent, pszFile, uLine, pszFunction, pszFormat, args);
687 va_end(args);
688 return rc;
689}
690
691
692/**
693 * Send a debugger event which takes the full source file location.
694 *
695 * @returns VBox status code.
696 * @param pVM The cross context VM structure.
697 * @param enmEvent The event to send.
698 * @param pszFile Source file.
699 * @param uLine Line number in source file.
700 * @param pszFunction Function name.
701 * @param pszFormat Message which accompanies the event.
702 * @param args Message arguments.
703 * @internal
704 */
705VMMR3DECL(int) DBGFR3EventSrcV(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, va_list args)
706{
707 PVMCPU pVCpu = VMMGetCpu(pVM);
708 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
709
710 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
711 if (RT_FAILURE(rc))
712 return rc;
713
714 /*
715 * Format the message.
716 */
717 char *pszMessage = NULL;
718 char szMessage[8192];
719 if (pszFormat && *pszFormat)
720 {
721 pszMessage = &szMessage[0];
722 RTStrPrintfV(szMessage, sizeof(szMessage), pszFormat, args);
723 }
724
725 /*
726 * Send the event and process the reply communication.
727 */
728 DBGFEVENT DbgEvent; /** @todo split up DBGFEVENT so we can skip the dead wait on the stack? */
729 DbgEvent.u.Src.pszFile = pszFile;
730 DbgEvent.u.Src.uLine = uLine;
731 DbgEvent.u.Src.pszFunction = pszFunction;
732 DbgEvent.u.Src.pszMessage = pszMessage;
733 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Src));
734}
735
736
737/**
738 * Send a debugger event which takes the two assertion messages.
739 *
740 * @returns VBox status code.
741 * @param pVM The cross context VM structure.
742 * @param enmEvent The event to send.
743 * @param pszMsg1 First assertion message.
744 * @param pszMsg2 Second assertion message.
745 */
746VMMR3_INT_DECL(int) DBGFR3EventAssertion(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszMsg1, const char *pszMsg2)
747{
748 PVMCPU pVCpu = VMMGetCpu(pVM);
749 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
750
751 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
752 if (RT_FAILURE(rc))
753 return rc;
754
755 /*
756 * Send the event and process the reply communication.
757 */
758 DBGFEVENT DbgEvent;
759 DbgEvent.u.Assert.pszMsg1 = pszMsg1;
760 DbgEvent.u.Assert.pszMsg2 = pszMsg2;
761 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Assert));
762}
763
764
765/**
766 * Breakpoint was hit somewhere.
767 * Figure out which breakpoint it is and notify the debugger.
768 *
769 * @returns VBox status code.
770 * @param pVM The cross context VM structure.
771 * @param enmEvent DBGFEVENT_BREAKPOINT_HYPER or DBGFEVENT_BREAKPOINT.
772 */
773VMMR3_INT_DECL(int) DBGFR3EventBreakpoint(PVM pVM, DBGFEVENTTYPE enmEvent)
774{
775 PVMCPU pVCpu = VMMGetCpu(pVM);
776 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
777
778 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
779 if (RT_FAILURE(rc))
780 return rc;
781
782 /*
783 * Halt all other vCPUs as well to give the user the ability to inspect other
784 * vCPU states as well.
785 */
786 rc = dbgfR3EventHaltAllVCpus(pVM, pVCpu);
787 if (RT_FAILURE(rc))
788 return rc;
789
790 /*
791 * Send the event and process the reply communication.
792 */
793 DBGFEVENT DbgEvent;
794 DbgEvent.u.Bp.hBp = pVCpu->dbgf.s.hBpActive;
795 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
796 if (DbgEvent.u.Bp.hBp != NIL_DBGFBP)
797 {
798 DbgEvent.enmCtx = DBGFEVENTCTX_RAW;
799 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
800 }
801
802 return VERR_DBGF_IPE_1;
803}
804
805
806/**
807 * Returns whether the given vCPU is waiting for the debugger.
808 *
809 * @returns Flags whether the vCPU is currently waiting for the debugger.
810 * @param pUVCpu The user mode vCPU structure.
811 */
812DECLINLINE(bool) dbgfR3CpuIsHalted(PUVMCPU pUVCpu)
813{
814 return ASMAtomicReadBool(&pUVCpu->dbgf.s.fStopped);
815}
816
817
818/**
819 * Checks whether the given vCPU is waiting in the debugger.
820 *
821 * @returns Flag whether the indicated vCPU is halted, when VMCPUID_ALL
822 * is given true is returned when at least one vCPU is halted.
823 * @param pUVM The user mode VM structure.
824 * @param idCpu The CPU ID to check, VMCPUID_ALL to check all vCPUs.
825 */
826DECLINLINE(bool) dbgfR3CpuAreAnyHaltedByCpuId(PUVM pUVM, VMCPUID idCpu)
827{
828 AssertReturn(idCpu < pUVM->cCpus || idCpu == VMCPUID_ALL, false);
829
830 /* Check that either the given vCPU or all are actually halted. */
831 if (idCpu != VMCPUID_ALL)
832 return dbgfR3CpuIsHalted(&pUVM->aCpus[idCpu]);
833
834 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
835 if (dbgfR3CpuIsHalted(&pUVM->aCpus[i]))
836 return true;
837 return false;
838}
839
840
841/**
842 * Gets the pending debug command for this EMT/CPU, replacing it with
843 * DBGFCMD_NO_COMMAND.
844 *
845 * @returns Pending command.
846 * @param pUVCpu The user mode virtual CPU structure.
847 * @thread EMT(pUVCpu)
848 */
849DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu)
850{
851 DBGFCMD enmCmd = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, DBGFCMD_NO_COMMAND);
852 Log2(("DBGF: Getting command: %d\n", enmCmd));
853 return enmCmd;
854}
855
856
857/**
858 * Send a debug command to a CPU, making sure to notify it.
859 *
860 * @returns VBox status code.
861 * @param pUVCpu The user mode virtual CPU structure.
862 * @param enmCmd The command to submit to the CPU.
863 */
864DECLINLINE(int) dbgfR3CpuSetCmdAndNotify(PUVMCPU pUVCpu, DBGFCMD enmCmd)
865{
866 Log2(("DBGF: Setting command to %d\n", enmCmd));
867 Assert(enmCmd != DBGFCMD_NO_COMMAND);
868 AssertMsg(pUVCpu->dbgf.s.enmDbgfCmd == DBGFCMD_NO_COMMAND, ("enmCmd=%d enmDbgfCmd=%d\n", enmCmd, pUVCpu->dbgf.s.enmDbgfCmd));
869
870 ASMAtomicWriteU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, enmCmd);
871 VMCPU_FF_SET(pUVCpu->pVCpu, VMCPU_FF_DBGF);
872
873 VMR3NotifyCpuFFU(pUVCpu, 0 /*fFlags*/);
874 return VINF_SUCCESS;
875}
876
877
878/**
879 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
880 */
881static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventHaltEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
882{
883 RT_NOREF(pvUser);
884
885 VMCPU_ASSERT_EMT(pVCpu);
886 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
887
888 PUVMCPU pUVCpu = pVCpu->pUVCpu;
889 if ( pVCpu != (PVMCPU)pvUser
890 && !dbgfR3CpuIsHalted(pUVCpu))
891 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
892
893 return VINF_SUCCESS;
894}
895
896
897/**
898 * Halts all vCPUs of the given VM except for the given one.
899 *
900 * @returns VBox status code.
901 * @param pVM The cross context VM structure.
902 * @param pVCpuExclude The vCPU cross context structure of the vCPU to exclude.
903 */
904static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude)
905{
906 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3EventHaltEmtWorker, pVCpuExclude);
907}
908
909
910/**
911 * Waits for the debugger to respond.
912 *
913 * @returns VBox status code. (clearify)
914 * @param pVCpu The cross context vCPU structure.
915 */
916static int dbgfR3CpuWait(PVMCPU pVCpu)
917{
918 PVM pVM = pVCpu->CTX_SUFF(pVM);
919 PUVMCPU pUVCpu = pVCpu->pUVCpu;
920
921 LogFlow(("dbgfR3CpuWait:\n"));
922 int rcRet = VINF_SUCCESS;
923
924 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, true);
925
926 /*
927 * Waits for the debugger to reply (i.e. issue an command).
928 */
929 for (;;)
930 {
931 /*
932 * Wait.
933 */
934 for (;;)
935 {
936 /*
937 * Process forced flags before we go sleep.
938 */
939 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_DBGF | VMCPU_FF_REQUEST)
940 || VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VMCPU_FF_REQUEST | VM_FF_CHECK_VM_STATE))
941 {
942 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
943 break;
944
945 int rc;
946 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
947 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
948 else if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
949 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
950 {
951 LogFlow(("dbgfR3CpuWait: Processes requests...\n"));
952 rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
953 if (rc == VINF_SUCCESS)
954 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
955 LogFlow(("dbgfR3CpuWait: VMR3ReqProcess -> %Rrc rcRet=%Rrc\n", rc, rcRet));
956 }
957 else if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
958 {
959 VMSTATE enmState = VMR3GetState(pVM);
960 switch (enmState)
961 {
962 case VMSTATE_FATAL_ERROR:
963 case VMSTATE_FATAL_ERROR_LS:
964 case VMSTATE_GURU_MEDITATION:
965 case VMSTATE_GURU_MEDITATION_LS:
966 rc = VINF_EM_SUSPEND;
967 break;
968 case VMSTATE_DESTROYING:
969 rc = VINF_EM_TERMINATE;
970 break;
971 default:
972 rc = VERR_DBGF_IPE_1;
973 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
974 }
975 }
976 else
977 rc = VINF_SUCCESS;
978 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
979 {
980 switch (rc)
981 {
982 case VINF_EM_DBG_BREAKPOINT:
983 case VINF_EM_DBG_STEPPED:
984 case VINF_EM_DBG_STEP:
985 case VINF_EM_DBG_STOP:
986 case VINF_EM_DBG_EVENT:
987 AssertMsgFailed(("rc=%Rrc\n", rc));
988 break;
989
990 /* return straight away */
991 case VINF_EM_TERMINATE:
992 case VINF_EM_OFF:
993 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
994 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
995 return rc;
996
997 /* remember return code. */
998 default:
999 AssertReleaseMsgFailed(("rc=%Rrc is not in the switch!\n", rc));
1000 RT_FALL_THRU();
1001 case VINF_EM_RESET:
1002 case VINF_EM_SUSPEND:
1003 case VINF_EM_HALT:
1004 case VINF_EM_RESUME:
1005 case VINF_EM_RESCHEDULE:
1006 case VINF_EM_RESCHEDULE_REM:
1007 case VINF_EM_RESCHEDULE_RAW:
1008 if (rc < rcRet || rcRet == VINF_SUCCESS)
1009 rcRet = rc;
1010 break;
1011 }
1012 }
1013 else if (RT_FAILURE(rc))
1014 {
1015 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
1016 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1017 return rc;
1018 }
1019 }
1020 else if (pVM->dbgf.s.fAttached)
1021 {
1022 int rc = VMR3WaitU(pUVCpu);
1023 if (RT_FAILURE(rc))
1024 {
1025 LogFlow(("dbgfR3CpuWait: returns %Rrc (VMR3WaitU)\n", rc));
1026 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1027 return rc;
1028 }
1029 }
1030 else
1031 {
1032 LogFlow(("dbgfR3CpuWait: Debugger detached, continuing normal execution (%Rrc)\n", rcRet));
1033 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1034 return rcRet;
1035 }
1036 }
1037
1038 /*
1039 * Process the command.
1040 */
1041 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
1042 bool fResumeExecution;
1043 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
1044 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
1045 int rc = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
1046 if (fResumeExecution)
1047 {
1048 if (RT_FAILURE(rc))
1049 rcRet = rc;
1050 else if ( rc >= VINF_EM_FIRST
1051 && rc <= VINF_EM_LAST
1052 && (rc < rcRet || rcRet == VINF_SUCCESS))
1053 rcRet = rc;
1054 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rcRet));
1055 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1056 return rcRet;
1057 }
1058 }
1059}
1060
1061
1062/**
1063 * Executes command from debugger.
1064 *
1065 * The caller is responsible for waiting or resuming execution based on the
1066 * value returned in the *pfResumeExecution indicator.
1067 *
1068 * @returns VBox status code. (clearify!)
1069 * @param pVCpu The cross context vCPU structure.
1070 * @param enmCmd The command in question.
1071 * @param pCmdData Pointer to the command data.
1072 * @param pfResumeExecution Where to store the resume execution / continue waiting indicator.
1073 */
1074static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution)
1075{
1076 RT_NOREF(pCmdData); /* for later */
1077
1078 /*
1079 * The cases in this switch returns directly if no event to send.
1080 */
1081 DBGFEVENTTYPE enmEvent;
1082 DBGFEVENTCTX enmCtx = DBGFEVENTCTX_INVALID;
1083 switch (enmCmd)
1084 {
1085 /*
1086 * Halt is answered by an event say that we've halted.
1087 */
1088 case DBGFCMD_HALT:
1089 {
1090 *pfResumeExecution = false;
1091 enmEvent = DBGFEVENT_HALT_DONE;
1092 break;
1093 }
1094
1095
1096 /*
1097 * Resume is not answered, we just resume execution.
1098 */
1099 case DBGFCMD_GO:
1100 {
1101 pVCpu->dbgf.s.fSingleSteppingRaw = false;
1102 *pfResumeExecution = true;
1103 return VINF_SUCCESS;
1104 }
1105
1106 /** @todo implement (and define) the rest of the commands. */
1107
1108 /*
1109 * Single step, with trace into.
1110 */
1111 case DBGFCMD_SINGLE_STEP:
1112 {
1113 Log2(("Single step\n"));
1114 PVM pVM = pVCpu->CTX_SUFF(pVM);
1115 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1116 {
1117 if (dbgfStepGetCurInstrType(pVM, pVCpu) == DBGFSTEPINSTRTYPE_CALL)
1118 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1119 }
1120 if (pVM->dbgf.s.SteppingFilter.cMaxSteps > 0)
1121 {
1122 pVCpu->dbgf.s.fSingleSteppingRaw = true;
1123 *pfResumeExecution = true;
1124 return VINF_EM_DBG_STEP;
1125 }
1126 /* Stop after zero steps. Nonsense, but whatever. */
1127 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1128 *pfResumeExecution = false;
1129 enmCtx = dbgfR3FigureEventCtx(pVCpu);
1130 enmEvent = enmCtx != DBGFEVENTCTX_HYPER ? DBGFEVENT_STEPPED : DBGFEVENT_STEPPED_HYPER;
1131 break;
1132 }
1133
1134 /*
1135 * Default is to send an invalid command event.
1136 */
1137 default:
1138 {
1139 *pfResumeExecution = false;
1140 enmEvent = DBGFEVENT_INVALID_COMMAND;
1141 break;
1142 }
1143 }
1144
1145 /*
1146 * Send the pending event.
1147 */
1148 Log2(("DBGF: Emulation thread: sending event %d\n", enmEvent));
1149 int rc = dbgfR3SendEventNoWait(pVCpu->CTX_SUFF(pVM), pVCpu, enmEvent, enmCtx);
1150 AssertRCStmt(rc, *pfResumeExecution = true);
1151 return rc;
1152}
1153
1154
1155/**
1156 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1157 * EMT rendezvous worker for DBGFR3Attach - only called on one EMT.}
1158 */
1159static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Attach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1160{
1161 PUVM pUVM = pVM->pUVM;
1162 int *prcAttach = (int *)pvUser;
1163 RT_NOREF(pVCpu);
1164
1165 if (pVM->dbgf.s.fAttached)
1166 {
1167 Log(("dbgfR3Attach: Debugger already attached\n"));
1168 *prcAttach = VERR_DBGF_ALREADY_ATTACHED;
1169 return VINF_SUCCESS;
1170 }
1171
1172 /*
1173 * The per-CPU bits.
1174 */
1175 for (uint32_t i = 0; i < pUVM->cCpus; i++)
1176 {
1177 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1178
1179 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1180 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1181 }
1182
1183 /*
1184 * Init of the VM -> Debugger communication part living in the global VM structure.
1185 */
1186 pUVM->dbgf.s.cDbgEvtMax = pVM->cCpus * 5 + 10; /* Initial size of event ring, increased when being full. */
1187 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1188 pUVM->dbgf.s.idxDbgEvtRead = 0;
1189 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1190 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1191 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1192 int rc;
1193 pUVM->dbgf.s.paDbgEvts = (PDBGFEVENT)MMR3HeapAllocU(pUVM, MM_TAG_DBGF, pUVM->dbgf.s.cDbgEvtMax * sizeof(DBGFEVENT));
1194 if (pUVM->dbgf.s.paDbgEvts)
1195 {
1196 rc = RTSemEventCreate(&pUVM->dbgf.s.hEvtWait);
1197 if (RT_SUCCESS(rc))
1198 {
1199 rc = RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxDbgEvtWr);
1200 if (RT_SUCCESS(rc))
1201 {
1202 rc = RTSemEventMultiCreate(&pUVM->dbgf.s.hEvtRingBufFull);
1203 if (RT_SUCCESS(rc))
1204 {
1205 /*
1206 * At last, set the attached flag.
1207 */
1208 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, true);
1209 *prcAttach = VINF_SUCCESS;
1210 return VINF_SUCCESS;
1211 }
1212
1213 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1214 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1215 }
1216 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1217 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1218 }
1219 }
1220 else
1221 rc = VERR_NO_MEMORY;
1222
1223 *prcAttach = rc;
1224 return VINF_SUCCESS;
1225}
1226
1227
1228/**
1229 * Attaches a debugger to the specified VM.
1230 *
1231 * Only one debugger at a time.
1232 *
1233 * @returns VBox status code.
1234 * @param pUVM The user mode VM handle.
1235 */
1236VMMR3DECL(int) DBGFR3Attach(PUVM pUVM)
1237{
1238 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1239 PVM pVM = pUVM->pVM;
1240 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1241
1242 /*
1243 * Call the VM, use EMT rendezvous for serialization.
1244 */
1245 int rcAttach = VERR_IPE_UNINITIALIZED_STATUS;
1246 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Attach, &rcAttach);
1247 if (RT_SUCCESS(rc))
1248 rc = rcAttach;
1249
1250 return rc;
1251}
1252
1253
1254/**
1255 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1256 * EMT rendezvous worker for DBGFR3Detach - called on all EMTs (why?).}
1257 */
1258static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Detach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1259{
1260 if (pVCpu->idCpu == 0)
1261 {
1262 PUVM pUVM = (PUVM)pvUser;
1263
1264 /*
1265 * Per-CPU cleanup.
1266 */
1267 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1268 {
1269 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1270
1271 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1272 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1273 }
1274
1275 /*
1276 * De-init of the VM -> Debugger communication part living in the global VM structure.
1277 */
1278 if (pUVM->dbgf.s.paDbgEvts)
1279 {
1280 MMR3HeapFree(pUVM->dbgf.s.paDbgEvts);
1281 pUVM->dbgf.s.paDbgEvts = NULL;
1282 }
1283
1284 if (pUVM->dbgf.s.hEvtWait != NIL_RTSEMEVENT)
1285 {
1286 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1287 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1288 }
1289
1290 if (pUVM->dbgf.s.hMtxDbgEvtWr != NIL_RTSEMFASTMUTEX)
1291 {
1292 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1293 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1294 }
1295
1296 if (pUVM->dbgf.s.hEvtRingBufFull != NIL_RTSEMEVENTMULTI)
1297 {
1298 RTSemEventMultiDestroy(pUVM->dbgf.s.hEvtRingBufFull);
1299 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1300 }
1301
1302 pUVM->dbgf.s.cDbgEvtMax = 0;
1303 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1304 pUVM->dbgf.s.idxDbgEvtRead = 0;
1305 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1306 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1307 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1308
1309 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, false);
1310 }
1311
1312 return VINF_SUCCESS;
1313}
1314
1315
1316/**
1317 * Detaches a debugger from the specified VM.
1318 *
1319 * Caller must be attached to the VM.
1320 *
1321 * @returns VBox status code.
1322 * @param pUVM The user mode VM handle.
1323 */
1324VMMR3DECL(int) DBGFR3Detach(PUVM pUVM)
1325{
1326 LogFlow(("DBGFR3Detach:\n"));
1327
1328 /*
1329 * Validate input. The UVM handle shall be valid, the VM handle might be
1330 * in the processes of being destroyed already, so deal quietly with that.
1331 */
1332 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1333 PVM pVM = pUVM->pVM;
1334 if (!VM_IS_VALID_EXT(pVM))
1335 return VERR_INVALID_VM_HANDLE;
1336
1337 /*
1338 * Check if attached.
1339 */
1340 if (!pVM->dbgf.s.fAttached)
1341 return VERR_DBGF_NOT_ATTACHED;
1342
1343 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Detach, pUVM);
1344}
1345
1346
1347/**
1348 * Wait for a debug event.
1349 *
1350 * @returns VBox status code. Will not return VBOX_INTERRUPTED.
1351 * @param pUVM The user mode VM handle.
1352 * @param cMillies Number of millis to wait.
1353 * @param pEvent Where to store the event data.
1354 */
1355VMMR3DECL(int) DBGFR3EventWait(PUVM pUVM, RTMSINTERVAL cMillies, PDBGFEVENT pEvent)
1356{
1357 /*
1358 * Check state.
1359 */
1360 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1361 PVM pVM = pUVM->pVM;
1362 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1363 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1364
1365 RT_BZERO(pEvent, sizeof(*pEvent));
1366
1367 /*
1368 * Wait for an event to arrive if there are none.
1369 */
1370 int rc = VINF_SUCCESS;
1371 uint32_t idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
1372 if (idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite))
1373 {
1374 do
1375 {
1376 rc = RTSemEventWait(pUVM->dbgf.s.hEvtWait, cMillies);
1377 } while ( RT_SUCCESS(rc)
1378 && idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1379 }
1380
1381 if (RT_SUCCESS(rc))
1382 {
1383 Assert(idxDbgEvtRead != ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1384
1385 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
1386 memcpy(pEvent, &pUVM->dbgf.s.paDbgEvts[idxDbgEvtRead % cDbgEvtMax], sizeof(*pEvent));
1387 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtRead, (idxDbgEvtRead + 1) % cDbgEvtMax);
1388 }
1389
1390 Log2(("DBGFR3EventWait: rc=%Rrc (event type %d)\n", rc, pEvent->enmType));
1391 return rc;
1392}
1393
1394
1395/**
1396 * Halts VM execution.
1397 *
1398 * After calling this the VM isn't actually halted till an DBGFEVENT_HALT_DONE
1399 * arrives. Until that time it's not possible to issue any new commands.
1400 *
1401 * @returns VBox status code.
1402 * @retval VWRN_DBGF_ALREADY_HALTED if @a idCpu is VMCPUID_ALL and all vCPUs
1403 * are halted.
1404 * @param pUVM The user mode VM handle.
1405 * @param idCpu The vCPU to halt, VMCPUID_ALL halts all still running vCPUs.
1406 */
1407VMMR3DECL(int) DBGFR3Halt(PUVM pUVM, VMCPUID idCpu)
1408{
1409 /*
1410 * Check state.
1411 */
1412 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1413 PVM pVM = pUVM->pVM;
1414 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1415 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1416 AssertReturn(idCpu == VMCPUID_ALL || idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1417
1418 /*
1419 * Halt the requested CPUs as needed.
1420 */
1421 int rc;
1422 if (idCpu != VMCPUID_ALL)
1423 {
1424 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1425 if (!dbgfR3CpuIsHalted(pUVCpu))
1426 {
1427 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1428 rc = VINF_SUCCESS;
1429 }
1430 else
1431 rc = VWRN_DBGF_ALREADY_HALTED;
1432 }
1433 else
1434 {
1435 rc = VWRN_DBGF_ALREADY_HALTED;
1436 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1437 {
1438 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1439 if (!dbgfR3CpuIsHalted(pUVCpu))
1440 {
1441 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1442 rc = VINF_SUCCESS;
1443 }
1444 }
1445 }
1446
1447 return rc;
1448}
1449
1450
1451/**
1452 * Checks if any of the specified vCPUs have been halted by the debugger.
1453 *
1454 * @returns True if at least one halted vCPUs.
1455 * @returns False if no halted vCPUs.
1456 * @param pUVM The user mode VM handle.
1457 * @param idCpu The CPU id to check for, VMCPUID_ALL will return true if
1458 * at least a single vCPU is halted in the debugger.
1459 */
1460VMMR3DECL(bool) DBGFR3IsHalted(PUVM pUVM, VMCPUID idCpu)
1461{
1462 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1463 PVM pVM = pUVM->pVM;
1464 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1465 AssertReturn(pVM->dbgf.s.fAttached, false);
1466
1467 return dbgfR3CpuAreAnyHaltedByCpuId(pUVM, idCpu);
1468}
1469
1470
1471/**
1472 * Checks if the debugger can wait for events or not.
1473 *
1474 * This function is only used by lazy, multiplexing debuggers. :-)
1475 *
1476 * @returns VBox status code.
1477 * @retval VINF_SUCCESS if waitable.
1478 * @retval VERR_SEM_OUT_OF_TURN if not waitable.
1479 * @retval VERR_INVALID_VM_HANDLE if the VM is being (/ has been) destroyed
1480 * (not asserted) or if the handle is invalid (asserted).
1481 * @retval VERR_DBGF_NOT_ATTACHED if not attached.
1482 *
1483 * @param pUVM The user mode VM handle.
1484 */
1485VMMR3DECL(int) DBGFR3QueryWaitable(PUVM pUVM)
1486{
1487 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1488
1489 /* Note! There is a slight race here, unfortunately. */
1490 PVM pVM = pUVM->pVM;
1491 if (!RT_VALID_PTR(pVM))
1492 return VERR_INVALID_VM_HANDLE;
1493 if (pVM->enmVMState >= VMSTATE_DESTROYING)
1494 return VERR_INVALID_VM_HANDLE;
1495 if (!pVM->dbgf.s.fAttached)
1496 return VERR_DBGF_NOT_ATTACHED;
1497
1498 /** @todo was: if (!RTSemPongShouldWait(...)) return VERR_SEM_OUT_OF_TURN; */
1499 return VINF_SUCCESS;
1500}
1501
1502
1503/**
1504 * Resumes VM execution.
1505 *
1506 * There is no receipt event on this command.
1507 *
1508 * @returns VBox status code.
1509 * @retval VWRN_DBGF_ALREADY_RUNNING if the specified vCPUs are all running.
1510 * @param pUVM The user mode VM handle.
1511 * @param idCpu The vCPU to resume, VMCPUID_ALL resumes all still halted vCPUs.
1512 */
1513VMMR3DECL(int) DBGFR3Resume(PUVM pUVM, VMCPUID idCpu)
1514{
1515 /*
1516 * Validate input and attachment state.
1517 */
1518 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1519 PVM pVM = pUVM->pVM;
1520 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1521 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1522
1523 /*
1524 * Ping the halted emulation threads, telling them to run.
1525 */
1526 int rc = VWRN_DBGF_ALREADY_RUNNING;
1527 if (idCpu != VMCPUID_ALL)
1528 {
1529 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1530 if (dbgfR3CpuIsHalted(pUVCpu))
1531 {
1532 rc = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1533 AssertRC(rc);
1534 }
1535 }
1536 else
1537 {
1538 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1539 {
1540 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1541 if (dbgfR3CpuIsHalted(pUVCpu))
1542 {
1543 int rc2 = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1544 AssertRC(rc2);
1545 if (rc == VWRN_DBGF_ALREADY_RUNNING || RT_FAILURE(rc2))
1546 rc = rc2;
1547 }
1548 }
1549 }
1550
1551 return rc;
1552}
1553
1554
1555/**
1556 * Classifies the current instruction.
1557 *
1558 * @returns Type of instruction.
1559 * @param pVM The cross context VM structure.
1560 * @param pVCpu The current CPU.
1561 * @thread EMT(pVCpu)
1562 */
1563static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu)
1564{
1565 /*
1566 * Read the instruction.
1567 */
1568 size_t cbRead = 0;
1569 uint8_t abOpcode[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1570 int rc = PGMR3DbgReadGCPtr(pVM, abOpcode, CPUMGetGuestFlatPC(pVCpu), sizeof(abOpcode) - 1, 0 /*fFlags*/, &cbRead);
1571 if (RT_SUCCESS(rc))
1572 {
1573 /*
1574 * Do minimal parsing. No real need to involve the disassembler here.
1575 */
1576 uint8_t *pb = abOpcode;
1577 for (;;)
1578 {
1579 switch (*pb++)
1580 {
1581 default:
1582 return DBGFSTEPINSTRTYPE_OTHER;
1583
1584 case 0xe8: /* call rel16/32 */
1585 case 0x9a: /* call farptr */
1586 case 0xcc: /* int3 */
1587 case 0xcd: /* int xx */
1588 // case 0xce: /* into */
1589 return DBGFSTEPINSTRTYPE_CALL;
1590
1591 case 0xc2: /* ret xx */
1592 case 0xc3: /* ret */
1593 case 0xca: /* retf xx */
1594 case 0xcb: /* retf */
1595 case 0xcf: /* iret */
1596 return DBGFSTEPINSTRTYPE_RET;
1597
1598 case 0xff:
1599 if ( ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 2 /* call indir */
1600 || ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 3) /* call indir-farptr */
1601 return DBGFSTEPINSTRTYPE_CALL;
1602 return DBGFSTEPINSTRTYPE_OTHER;
1603
1604 case 0x0f:
1605 switch (*pb++)
1606 {
1607 case 0x05: /* syscall */
1608 case 0x34: /* sysenter */
1609 return DBGFSTEPINSTRTYPE_CALL;
1610 case 0x07: /* sysret */
1611 case 0x35: /* sysexit */
1612 return DBGFSTEPINSTRTYPE_RET;
1613 }
1614 break;
1615
1616 /* Must handle some REX prefixes. So we do all normal prefixes. */
1617 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
1618 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
1619 if (!CPUMIsGuestIn64BitCode(pVCpu))
1620 return DBGFSTEPINSTRTYPE_OTHER;
1621 break;
1622
1623 case 0x2e: /* CS */
1624 case 0x36: /* SS */
1625 case 0x3e: /* DS */
1626 case 0x26: /* ES */
1627 case 0x64: /* FS */
1628 case 0x65: /* GS */
1629 case 0x66: /* op size */
1630 case 0x67: /* addr size */
1631 case 0xf0: /* lock */
1632 case 0xf2: /* REPNZ */
1633 case 0xf3: /* REPZ */
1634 break;
1635 }
1636 }
1637 }
1638
1639 return DBGFSTEPINSTRTYPE_INVALID;
1640}
1641
1642
1643/**
1644 * Checks if the stepping has reached a stop point.
1645 *
1646 * Called when raising a stepped event.
1647 *
1648 * @returns true if the event should be raised, false if we should take one more
1649 * step first.
1650 * @param pVM The cross context VM structure.
1651 * @param pVCpu The cross context per CPU structure of the calling EMT.
1652 * @thread EMT(pVCpu)
1653 */
1654static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu)
1655{
1656 /*
1657 * Check valid pVCpu and that it matches the CPU one stepping.
1658 */
1659 if (pVCpu)
1660 {
1661 if (pVCpu->idCpu == pVM->dbgf.s.SteppingFilter.idCpu)
1662 {
1663 /*
1664 * Increase the number of steps and see if we've reached the max.
1665 */
1666 pVM->dbgf.s.SteppingFilter.cSteps++;
1667 if (pVM->dbgf.s.SteppingFilter.cSteps < pVM->dbgf.s.SteppingFilter.cMaxSteps)
1668 {
1669 /*
1670 * Check PC and SP address filtering.
1671 */
1672 if (pVM->dbgf.s.SteppingFilter.fFlags & (DBGF_STEP_F_STOP_ON_ADDRESS | DBGF_STEP_F_STOP_ON_STACK_POP))
1673 {
1674 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1675 && pVM->dbgf.s.SteppingFilter.AddrPc == CPUMGetGuestFlatPC(pVCpu))
1676 return true;
1677 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1678 && CPUMGetGuestFlatSP(pVCpu) - pVM->dbgf.s.SteppingFilter.AddrStackPop
1679 < pVM->dbgf.s.SteppingFilter.cbStackPop)
1680 return true;
1681 }
1682
1683 /*
1684 * Do step-over filtering separate from the step-into one.
1685 */
1686 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1687 {
1688 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1689 switch (enmType)
1690 {
1691 default:
1692 if ( pVM->dbgf.s.SteppingFilter.uCallDepth != 0
1693 || (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_FILTER_MASK))
1694 break;
1695 return true;
1696 case DBGFSTEPINSTRTYPE_CALL:
1697 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1698 && pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1699 return true;
1700 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1701 break;
1702 case DBGFSTEPINSTRTYPE_RET:
1703 if (pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1704 {
1705 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1706 return true;
1707 /* If after return, we use the cMaxStep limit to stop the next time. */
1708 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1709 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1710 }
1711 else if (pVM->dbgf.s.SteppingFilter.uCallDepth > 0)
1712 pVM->dbgf.s.SteppingFilter.uCallDepth--;
1713 break;
1714 }
1715 return false;
1716 }
1717 /*
1718 * Filtered step-into.
1719 */
1720 else if ( pVM->dbgf.s.SteppingFilter.fFlags
1721 & (DBGF_STEP_F_STOP_ON_CALL | DBGF_STEP_F_STOP_ON_RET | DBGF_STEP_F_STOP_AFTER_RET))
1722 {
1723 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1724 switch (enmType)
1725 {
1726 default:
1727 break;
1728 case DBGFSTEPINSTRTYPE_CALL:
1729 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1730 return true;
1731 break;
1732 case DBGFSTEPINSTRTYPE_RET:
1733 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1734 return true;
1735 /* If after return, we use the cMaxStep limit to stop the next time. */
1736 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1737 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1738 break;
1739 }
1740 return false;
1741 }
1742 }
1743 }
1744 }
1745
1746 return true;
1747}
1748
1749
1750/**
1751 * Step Into.
1752 *
1753 * A single step event is generated from this command.
1754 * The current implementation is not reliable, so don't rely on the event coming.
1755 *
1756 * @returns VBox status code.
1757 * @param pUVM The user mode VM handle.
1758 * @param idCpu The ID of the CPU to single step on.
1759 */
1760VMMR3DECL(int) DBGFR3Step(PUVM pUVM, VMCPUID idCpu)
1761{
1762 return DBGFR3StepEx(pUVM, idCpu, DBGF_STEP_F_INTO, NULL, NULL, 0, 1);
1763}
1764
1765
1766/**
1767 * Full fleged step.
1768 *
1769 * This extended stepping API allows for doing multiple steps before raising an
1770 * event, helping implementing step over, step out and other more advanced
1771 * features.
1772 *
1773 * Like the DBGFR3Step() API, this will normally generate a DBGFEVENT_STEPPED or
1774 * DBGFEVENT_STEPPED_EVENT. However the stepping may be interrupted by other
1775 * events, which will abort the stepping.
1776 *
1777 * The stop on pop area feature is for safeguarding step out.
1778 *
1779 * Please note though, that it will always use stepping and never breakpoints.
1780 * While this allows for a much greater flexibility it can at times be rather
1781 * slow.
1782 *
1783 * @returns VBox status code.
1784 * @param pUVM The user mode VM handle.
1785 * @param idCpu The ID of the CPU to single step on.
1786 * @param fFlags Flags controlling the stepping, DBGF_STEP_F_XXX.
1787 * Either DBGF_STEP_F_INTO or DBGF_STEP_F_OVER must
1788 * always be specified.
1789 * @param pStopPcAddr Address to stop executing at. Completely ignored
1790 * unless DBGF_STEP_F_STOP_ON_ADDRESS is specified.
1791 * @param pStopPopAddr Stack address that SP must be lower than when
1792 * performing DBGF_STEP_F_STOP_ON_STACK_POP filtering.
1793 * @param cbStopPop The range starting at @a pStopPopAddr which is
1794 * considered to be within the same thread stack. Note
1795 * that the API allows @a pStopPopAddr and @a cbStopPop
1796 * to form an area that wraps around and it will
1797 * consider the part starting at 0 as included.
1798 * @param cMaxSteps The maximum number of steps to take. This is to
1799 * prevent stepping for ever, so passing UINT32_MAX is
1800 * not recommended.
1801 *
1802 * @remarks The two address arguments must be guest context virtual addresses,
1803 * or HMA. The code doesn't make much of a point of out HMA, though.
1804 */
1805VMMR3DECL(int) DBGFR3StepEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, PCDBGFADDRESS pStopPcAddr,
1806 PCDBGFADDRESS pStopPopAddr, RTGCUINTPTR cbStopPop, uint32_t cMaxSteps)
1807{
1808 /*
1809 * Check state.
1810 */
1811 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1812 PVM pVM = pUVM->pVM;
1813 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1814 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
1815 AssertReturn(!(fFlags & ~DBGF_STEP_F_VALID_MASK), VERR_INVALID_FLAGS);
1816 AssertReturn(RT_BOOL(fFlags & DBGF_STEP_F_INTO) != RT_BOOL(fFlags & DBGF_STEP_F_OVER), VERR_INVALID_FLAGS);
1817 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1818 {
1819 AssertReturn(RT_VALID_PTR(pStopPcAddr), VERR_INVALID_POINTER);
1820 AssertReturn(DBGFADDRESS_IS_VALID(pStopPcAddr), VERR_INVALID_PARAMETER);
1821 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPcAddr), VERR_INVALID_PARAMETER);
1822 }
1823 AssertReturn(!(fFlags & DBGF_STEP_F_STOP_ON_STACK_POP) || RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1824 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1825 {
1826 AssertReturn(RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1827 AssertReturn(DBGFADDRESS_IS_VALID(pStopPopAddr), VERR_INVALID_PARAMETER);
1828 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPopAddr), VERR_INVALID_PARAMETER);
1829 AssertReturn(cbStopPop > 0, VERR_INVALID_PARAMETER);
1830 }
1831
1832 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1833 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1834 if (RT_LIKELY(dbgfR3CpuIsHalted(pUVCpu)))
1835 { /* likely */ }
1836 else
1837 return VERR_SEM_OUT_OF_TURN;
1838 Assert(pVM->dbgf.s.SteppingFilter.idCpu == NIL_VMCPUID);
1839
1840 /*
1841 * Send the emulation thread a single-step command.
1842 */
1843 if (fFlags == DBGF_STEP_F_INTO)
1844 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1845 else
1846 pVM->dbgf.s.SteppingFilter.idCpu = idCpu;
1847 pVM->dbgf.s.SteppingFilter.fFlags = fFlags;
1848 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1849 pVM->dbgf.s.SteppingFilter.AddrPc = pStopPcAddr->FlatPtr;
1850 else
1851 pVM->dbgf.s.SteppingFilter.AddrPc = 0;
1852 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1853 {
1854 pVM->dbgf.s.SteppingFilter.AddrStackPop = pStopPopAddr->FlatPtr;
1855 pVM->dbgf.s.SteppingFilter.cbStackPop = cbStopPop;
1856 }
1857 else
1858 {
1859 pVM->dbgf.s.SteppingFilter.AddrStackPop = 0;
1860 pVM->dbgf.s.SteppingFilter.cbStackPop = RTGCPTR_MAX;
1861 }
1862
1863 pVM->dbgf.s.SteppingFilter.cMaxSteps = cMaxSteps;
1864 pVM->dbgf.s.SteppingFilter.cSteps = 0;
1865 pVM->dbgf.s.SteppingFilter.uCallDepth = 0;
1866
1867 Assert(dbgfR3CpuIsHalted(pUVCpu));
1868 return dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_SINGLE_STEP);
1869}
1870
1871
1872
1873/**
1874 * dbgfR3EventConfigEx argument packet.
1875 */
1876typedef struct DBGFR3EVENTCONFIGEXARGS
1877{
1878 PCDBGFEVENTCONFIG paConfigs;
1879 size_t cConfigs;
1880 int rc;
1881} DBGFR3EVENTCONFIGEXARGS;
1882/** Pointer to a dbgfR3EventConfigEx argument packet. */
1883typedef DBGFR3EVENTCONFIGEXARGS *PDBGFR3EVENTCONFIGEXARGS;
1884
1885
1886/**
1887 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Worker for DBGFR3EventConfigEx.}
1888 */
1889static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
1890{
1891 if (pVCpu->idCpu == 0)
1892 {
1893 PDBGFR3EVENTCONFIGEXARGS pArgs = (PDBGFR3EVENTCONFIGEXARGS)pvUser;
1894 DBGFEVENTCONFIG volatile const *paConfigs = pArgs->paConfigs;
1895 size_t cConfigs = pArgs->cConfigs;
1896
1897 /*
1898 * Apply the changes.
1899 */
1900 unsigned cChanges = 0;
1901 for (uint32_t i = 0; i < cConfigs; i++)
1902 {
1903 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
1904 AssertReturn(enmType >= DBGFEVENT_FIRST_SELECTABLE && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1905 if (paConfigs[i].fEnabled)
1906 cChanges += ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, enmType) == false;
1907 else
1908 cChanges += ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, enmType) == true;
1909 }
1910
1911 /*
1912 * Inform HM about changes.
1913 */
1914 if (cChanges > 0)
1915 {
1916 if (HMIsEnabled(pVM))
1917 {
1918 HMR3NotifyDebugEventChanged(pVM);
1919 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1920 }
1921 else if (VM_IS_NEM_ENABLED(pVM))
1922 {
1923 NEMR3NotifyDebugEventChanged(pVM);
1924 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1925 }
1926 }
1927 }
1928 else if (HMIsEnabled(pVM))
1929 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1930 else if (VM_IS_NEM_ENABLED(pVM))
1931 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1932
1933 return VINF_SUCCESS;
1934}
1935
1936
1937/**
1938 * Configures (enables/disables) multiple selectable debug events.
1939 *
1940 * @returns VBox status code.
1941 * @param pUVM The user mode VM handle.
1942 * @param paConfigs The event to configure and their new state.
1943 * @param cConfigs Number of entries in @a paConfigs.
1944 */
1945VMMR3DECL(int) DBGFR3EventConfigEx(PUVM pUVM, PCDBGFEVENTCONFIG paConfigs, size_t cConfigs)
1946{
1947 /*
1948 * Validate input.
1949 */
1950 size_t i = cConfigs;
1951 while (i-- > 0)
1952 {
1953 AssertReturn(paConfigs[i].enmType >= DBGFEVENT_FIRST_SELECTABLE, VERR_INVALID_PARAMETER);
1954 AssertReturn(paConfigs[i].enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1955 }
1956 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1957 PVM pVM = pUVM->pVM;
1958 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1959
1960 /*
1961 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
1962 * can sync their data and execution with new debug state.
1963 */
1964 DBGFR3EVENTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
1965 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
1966 dbgfR3EventConfigEx, &Args);
1967 if (RT_SUCCESS(rc))
1968 rc = Args.rc;
1969 return rc;
1970}
1971
1972
1973/**
1974 * Enables or disables a selectable debug event.
1975 *
1976 * @returns VBox status code.
1977 * @param pUVM The user mode VM handle.
1978 * @param enmEvent The selectable debug event.
1979 * @param fEnabled The new state.
1980 */
1981VMMR3DECL(int) DBGFR3EventConfig(PUVM pUVM, DBGFEVENTTYPE enmEvent, bool fEnabled)
1982{
1983 /*
1984 * Convert to an array call.
1985 */
1986 DBGFEVENTCONFIG EvtCfg = { enmEvent, fEnabled };
1987 return DBGFR3EventConfigEx(pUVM, &EvtCfg, 1);
1988}
1989
1990
1991/**
1992 * Checks if the given selectable event is enabled.
1993 *
1994 * @returns true if enabled, false if not or invalid input.
1995 * @param pUVM The user mode VM handle.
1996 * @param enmEvent The selectable debug event.
1997 * @sa DBGFR3EventQuery
1998 */
1999VMMR3DECL(bool) DBGFR3EventIsEnabled(PUVM pUVM, DBGFEVENTTYPE enmEvent)
2000{
2001 /*
2002 * Validate input.
2003 */
2004 AssertReturn( enmEvent >= DBGFEVENT_HALT_DONE
2005 && enmEvent < DBGFEVENT_END, false);
2006 Assert( enmEvent >= DBGFEVENT_FIRST_SELECTABLE
2007 || enmEvent == DBGFEVENT_BREAKPOINT
2008 || enmEvent == DBGFEVENT_BREAKPOINT_IO
2009 || enmEvent == DBGFEVENT_BREAKPOINT_MMIO);
2010
2011 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2012 PVM pVM = pUVM->pVM;
2013 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2014
2015 /*
2016 * Check the event status.
2017 */
2018 return ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, enmEvent);
2019}
2020
2021
2022/**
2023 * Queries the status of a set of events.
2024 *
2025 * @returns VBox status code.
2026 * @param pUVM The user mode VM handle.
2027 * @param paConfigs The events to query and where to return the state.
2028 * @param cConfigs The number of elements in @a paConfigs.
2029 * @sa DBGFR3EventIsEnabled, DBGF_IS_EVENT_ENABLED
2030 */
2031VMMR3DECL(int) DBGFR3EventQuery(PUVM pUVM, PDBGFEVENTCONFIG paConfigs, size_t cConfigs)
2032{
2033 /*
2034 * Validate input.
2035 */
2036 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2037 PVM pVM = pUVM->pVM;
2038 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2039
2040 for (size_t i = 0; i < cConfigs; i++)
2041 {
2042 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
2043 AssertReturn( enmType >= DBGFEVENT_HALT_DONE
2044 && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
2045 Assert( enmType >= DBGFEVENT_FIRST_SELECTABLE
2046 || enmType == DBGFEVENT_BREAKPOINT
2047 || enmType == DBGFEVENT_BREAKPOINT_IO
2048 || enmType == DBGFEVENT_BREAKPOINT_MMIO);
2049 paConfigs[i].fEnabled = ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, paConfigs[i].enmType);
2050 }
2051
2052 return VINF_SUCCESS;
2053}
2054
2055
2056/**
2057 * dbgfR3InterruptConfigEx argument packet.
2058 */
2059typedef struct DBGFR3INTERRUPTCONFIGEXARGS
2060{
2061 PCDBGFINTERRUPTCONFIG paConfigs;
2062 size_t cConfigs;
2063 int rc;
2064} DBGFR3INTERRUPTCONFIGEXARGS;
2065/** Pointer to a dbgfR3InterruptConfigEx argument packet. */
2066typedef DBGFR3INTERRUPTCONFIGEXARGS *PDBGFR3INTERRUPTCONFIGEXARGS;
2067
2068/**
2069 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
2070 * Worker for DBGFR3InterruptConfigEx.}
2071 */
2072static DECLCALLBACK(VBOXSTRICTRC) dbgfR3InterruptConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
2073{
2074 if (pVCpu->idCpu == 0)
2075 {
2076 PDBGFR3INTERRUPTCONFIGEXARGS pArgs = (PDBGFR3INTERRUPTCONFIGEXARGS)pvUser;
2077 PCDBGFINTERRUPTCONFIG paConfigs = pArgs->paConfigs;
2078 size_t cConfigs = pArgs->cConfigs;
2079
2080 /*
2081 * Apply the changes.
2082 */
2083 bool fChanged = false;
2084 bool fThis;
2085 for (uint32_t i = 0; i < cConfigs; i++)
2086 {
2087 /*
2088 * Hardware interrupts.
2089 */
2090 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2091 {
2092 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == false;
2093 if (fThis)
2094 {
2095 Assert(pVM->dbgf.s.cHardIntBreakpoints < 256);
2096 pVM->dbgf.s.cHardIntBreakpoints++;
2097 }
2098 }
2099 else if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_DISABLED)
2100 {
2101 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == true;
2102 if (fThis)
2103 {
2104 Assert(pVM->dbgf.s.cHardIntBreakpoints > 0);
2105 pVM->dbgf.s.cHardIntBreakpoints--;
2106 }
2107 }
2108
2109 /*
2110 * Software interrupts.
2111 */
2112 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2113 {
2114 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == false;
2115 if (fThis)
2116 {
2117 Assert(pVM->dbgf.s.cSoftIntBreakpoints < 256);
2118 pVM->dbgf.s.cSoftIntBreakpoints++;
2119 }
2120 }
2121 else if (paConfigs[i].enmSoftState == DBGFINTERRUPTSTATE_DISABLED)
2122 {
2123 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == true;
2124 if (fThis)
2125 {
2126 Assert(pVM->dbgf.s.cSoftIntBreakpoints > 0);
2127 pVM->dbgf.s.cSoftIntBreakpoints--;
2128 }
2129 }
2130 }
2131
2132 /*
2133 * Update the event bitmap entries.
2134 */
2135 if (pVM->dbgf.s.cHardIntBreakpoints > 0)
2136 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == false;
2137 else
2138 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == true;
2139
2140 if (pVM->dbgf.s.cSoftIntBreakpoints > 0)
2141 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == false;
2142 else
2143 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == true;
2144
2145 /*
2146 * Inform HM about changes.
2147 */
2148 if (fChanged)
2149 {
2150 if (HMIsEnabled(pVM))
2151 {
2152 HMR3NotifyDebugEventChanged(pVM);
2153 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2154 }
2155 else if (VM_IS_NEM_ENABLED(pVM))
2156 {
2157 NEMR3NotifyDebugEventChanged(pVM);
2158 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2159 }
2160 }
2161 }
2162 else if (HMIsEnabled(pVM))
2163 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2164 else if (VM_IS_NEM_ENABLED(pVM))
2165 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2166
2167 return VINF_SUCCESS;
2168}
2169
2170
2171/**
2172 * Changes
2173 *
2174 * @returns VBox status code.
2175 * @param pUVM The user mode VM handle.
2176 * @param paConfigs The events to query and where to return the state.
2177 * @param cConfigs The number of elements in @a paConfigs.
2178 * @sa DBGFR3InterruptConfigHardware, DBGFR3InterruptConfigSoftware
2179 */
2180VMMR3DECL(int) DBGFR3InterruptConfigEx(PUVM pUVM, PCDBGFINTERRUPTCONFIG paConfigs, size_t cConfigs)
2181{
2182 /*
2183 * Validate input.
2184 */
2185 size_t i = cConfigs;
2186 while (i-- > 0)
2187 {
2188 AssertReturn(paConfigs[i].enmHardState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2189 AssertReturn(paConfigs[i].enmSoftState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2190 }
2191
2192 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2193 PVM pVM = pUVM->pVM;
2194 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2195
2196 /*
2197 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
2198 * can sync their data and execution with new debug state.
2199 */
2200 DBGFR3INTERRUPTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
2201 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
2202 dbgfR3InterruptConfigEx, &Args);
2203 if (RT_SUCCESS(rc))
2204 rc = Args.rc;
2205 return rc;
2206}
2207
2208
2209/**
2210 * Configures interception of a hardware interrupt.
2211 *
2212 * @returns VBox status code.
2213 * @param pUVM The user mode VM handle.
2214 * @param iInterrupt The interrupt number.
2215 * @param fEnabled Whether interception is enabled or not.
2216 * @sa DBGFR3InterruptSoftwareConfig, DBGFR3InterruptConfigEx
2217 */
2218VMMR3DECL(int) DBGFR3InterruptHardwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2219{
2220 /*
2221 * Convert to DBGFR3InterruptConfigEx call.
2222 */
2223 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, (uint8_t)fEnabled, DBGFINTERRUPTSTATE_DONT_TOUCH };
2224 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2225}
2226
2227
2228/**
2229 * Configures interception of a software interrupt.
2230 *
2231 * @returns VBox status code.
2232 * @param pUVM The user mode VM handle.
2233 * @param iInterrupt The interrupt number.
2234 * @param fEnabled Whether interception is enabled or not.
2235 * @sa DBGFR3InterruptHardwareConfig, DBGFR3InterruptConfigEx
2236 */
2237VMMR3DECL(int) DBGFR3InterruptSoftwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2238{
2239 /*
2240 * Convert to DBGFR3InterruptConfigEx call.
2241 */
2242 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, DBGFINTERRUPTSTATE_DONT_TOUCH, (uint8_t)fEnabled };
2243 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2244}
2245
2246
2247/**
2248 * Checks whether interception is enabled for a hardware interrupt.
2249 *
2250 * @returns true if enabled, false if not or invalid input.
2251 * @param pUVM The user mode VM handle.
2252 * @param iInterrupt The interrupt number.
2253 * @sa DBGFR3InterruptSoftwareIsEnabled, DBGF_IS_HARDWARE_INT_ENABLED,
2254 * DBGF_IS_SOFTWARE_INT_ENABLED
2255 */
2256VMMR3DECL(int) DBGFR3InterruptHardwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2257{
2258 /*
2259 * Validate input.
2260 */
2261 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2262 PVM pVM = pUVM->pVM;
2263 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2264
2265 /*
2266 * Check it.
2267 */
2268 return ASMBitTest(&pVM->dbgf.s.bmHardIntBreakpoints, iInterrupt);
2269}
2270
2271
2272/**
2273 * Checks whether interception is enabled for a software interrupt.
2274 *
2275 * @returns true if enabled, false if not or invalid input.
2276 * @param pUVM The user mode VM handle.
2277 * @param iInterrupt The interrupt number.
2278 * @sa DBGFR3InterruptHardwareIsEnabled, DBGF_IS_SOFTWARE_INT_ENABLED,
2279 * DBGF_IS_HARDWARE_INT_ENABLED,
2280 */
2281VMMR3DECL(int) DBGFR3InterruptSoftwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2282{
2283 /*
2284 * Validate input.
2285 */
2286 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2287 PVM pVM = pUVM->pVM;
2288 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2289
2290 /*
2291 * Check it.
2292 */
2293 return ASMBitTest(&pVM->dbgf.s.bmSoftIntBreakpoints, iInterrupt);
2294}
2295
2296
2297
2298/**
2299 * Call this to single step programmatically.
2300 *
2301 * You must pass down the return code to the EM loop! That's
2302 * where the actual single stepping take place (at least in the
2303 * current implementation).
2304 *
2305 * @returns VINF_EM_DBG_STEP
2306 *
2307 * @param pVCpu The cross context virtual CPU structure.
2308 *
2309 * @thread VCpu EMT
2310 * @internal
2311 */
2312VMMR3_INT_DECL(int) DBGFR3PrgStep(PVMCPU pVCpu)
2313{
2314 VMCPU_ASSERT_EMT(pVCpu);
2315
2316 pVCpu->dbgf.s.fSingleSteppingRaw = true;
2317 return VINF_EM_DBG_STEP;
2318}
2319
2320
2321/**
2322 * Inject an NMI into a running VM (only VCPU 0!)
2323 *
2324 * @returns VBox status code.
2325 * @param pUVM The user mode VM structure.
2326 * @param idCpu The ID of the CPU to inject the NMI on.
2327 */
2328VMMR3DECL(int) DBGFR3InjectNMI(PUVM pUVM, VMCPUID idCpu)
2329{
2330 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2331 PVM pVM = pUVM->pVM;
2332 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2333 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
2334
2335 /** @todo Implement generic NMI injection. */
2336 /** @todo NEM: NMI injection */
2337 if (!HMIsEnabled(pVM))
2338 return VERR_NOT_SUP_BY_NEM;
2339
2340 VMCPU_FF_SET(pVM->apCpusR3[idCpu], VMCPU_FF_INTERRUPT_NMI);
2341 return VINF_SUCCESS;
2342}
2343
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette