VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGF.cpp@ 91688

Last change on this file since 91688 was 90312, checked in by vboxsync, 3 years ago

VMM/DBGF: Stop all vCPUs when a breakpoint event happens [doxygen fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 76.2 KB
Line 
1/* $Id: DBGF.cpp 90312 2021-07-23 15:23:40Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_dbgf DBGF - The Debugger Facility
20 *
21 * The purpose of the DBGF is to provide an interface for debuggers to
22 * manipulate the VMM without having to mess up the source code for each of
23 * them. The DBGF is always built in and will always work when a debugger
24 * attaches to the VM. The DBGF provides the basic debugger features, such as
25 * halting execution, handling breakpoints, single step execution, instruction
26 * disassembly, info querying, OS specific diggers, symbol and module
27 * management.
28 *
29 * The interface is working in a manner similar to the win32, linux and os2
30 * debugger interfaces. The interface has an asynchronous nature. This comes
31 * from the fact that the VMM and the Debugger are running in different threads.
32 * They are referred to as the "emulation thread" and the "debugger thread", or
33 * as the "ping thread" and the "pong thread, respectivly. (The last set of
34 * names comes from the use of the Ping-Pong synchronization construct from the
35 * RTSem API.)
36 *
37 * @see grp_dbgf
38 *
39 *
40 * @section sec_dbgf_scenario Usage Scenario
41 *
42 * The debugger starts by attaching to the VM. For practical reasons we limit the
43 * number of concurrently attached debuggers to 1 per VM. The action of
44 * attaching to the VM causes the VM to check and generate debug events.
45 *
46 * The debugger then will wait/poll for debug events and issue commands.
47 *
48 * The waiting and polling is done by the DBGFEventWait() function. It will wait
49 * for the emulation thread to send a ping, thus indicating that there is an
50 * event waiting to be processed.
51 *
52 * An event can be a response to a command issued previously, the hitting of a
53 * breakpoint, or running into a bad/fatal VMM condition. The debugger now has
54 * the ping and must respond to the event at hand - the VMM is waiting. This
55 * usually means that the user of the debugger must do something, but it doesn't
56 * have to. The debugger is free to call any DBGF function (nearly at least)
57 * while processing the event.
58 *
59 * Typically the user will issue a request for the execution to be resumed, so
60 * the debugger calls DBGFResume() and goes back to waiting/polling for events.
61 *
62 * When the user eventually terminates the debugging session or selects another
63 * VM, the debugger detaches from the VM. This means that breakpoints are
64 * disabled and that the emulation thread no longer polls for debugger commands.
65 *
66 */
67
68
69/*********************************************************************************************************************************
70* Header Files *
71*********************************************************************************************************************************/
72#define LOG_GROUP LOG_GROUP_DBGF
73#include <VBox/vmm/dbgf.h>
74#include <VBox/vmm/selm.h>
75#include <VBox/vmm/em.h>
76#include <VBox/vmm/hm.h>
77#include <VBox/vmm/mm.h>
78#include "DBGFInternal.h"
79#include <VBox/vmm/vm.h>
80#include <VBox/vmm/uvm.h>
81#include <VBox/err.h>
82
83#include <VBox/log.h>
84#include <iprt/semaphore.h>
85#include <iprt/thread.h>
86#include <iprt/asm.h>
87#include <iprt/time.h>
88#include <iprt/assert.h>
89#include <iprt/stream.h>
90#include <iprt/env.h>
91
92
93/*********************************************************************************************************************************
94* Structures and Typedefs *
95*********************************************************************************************************************************/
96/**
97 * Instruction type returned by dbgfStepGetCurInstrType.
98 */
99typedef enum DBGFSTEPINSTRTYPE
100{
101 DBGFSTEPINSTRTYPE_INVALID = 0,
102 DBGFSTEPINSTRTYPE_OTHER,
103 DBGFSTEPINSTRTYPE_RET,
104 DBGFSTEPINSTRTYPE_CALL,
105 DBGFSTEPINSTRTYPE_END,
106 DBGFSTEPINSTRTYPE_32BIT_HACK = 0x7fffffff
107} DBGFSTEPINSTRTYPE;
108
109
110/*********************************************************************************************************************************
111* Internal Functions *
112*********************************************************************************************************************************/
113DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx);
114DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu);
115static int dbgfR3CpuWait(PVMCPU pVCpu);
116static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution);
117static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu);
118static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu);
119static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude);
120
121
122
123/**
124 * Initializes the DBGF.
125 *
126 * @returns VBox status code.
127 * @param pVM The cross context VM structure.
128 */
129VMMR3_INT_DECL(int) DBGFR3Init(PVM pVM)
130{
131 PUVM pUVM = pVM->pUVM;
132 AssertCompile(sizeof(pUVM->dbgf.s) <= sizeof(pUVM->dbgf.padding));
133 AssertCompile(sizeof(pUVM->aCpus[0].dbgf.s) <= sizeof(pUVM->aCpus[0].dbgf.padding));
134
135 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
136
137 /*
138 * The usual sideways mountain climbing style of init:
139 */
140 int rc = dbgfR3InfoInit(pUVM); /* (First, initalizes the shared critical section.) */
141 if (RT_SUCCESS(rc))
142 {
143 rc = dbgfR3TraceInit(pVM);
144 if (RT_SUCCESS(rc))
145 {
146 rc = dbgfR3RegInit(pUVM);
147 if (RT_SUCCESS(rc))
148 {
149 rc = dbgfR3AsInit(pUVM);
150 if (RT_SUCCESS(rc))
151 {
152 rc = dbgfR3BpInit(pUVM);
153 if (RT_SUCCESS(rc))
154 {
155 rc = dbgfR3OSInit(pUVM);
156 if (RT_SUCCESS(rc))
157 {
158 rc = dbgfR3PlugInInit(pUVM);
159 if (RT_SUCCESS(rc))
160 {
161 rc = dbgfR3BugCheckInit(pVM);
162 if (RT_SUCCESS(rc))
163 {
164#ifdef VBOX_WITH_DBGF_TRACING
165 rc = dbgfR3TracerInit(pVM);
166#endif
167 if (RT_SUCCESS(rc))
168 {
169 return VINF_SUCCESS;
170 }
171 }
172 dbgfR3PlugInTerm(pUVM);
173 }
174 dbgfR3OSTermPart1(pUVM);
175 dbgfR3OSTermPart2(pUVM);
176 }
177 dbgfR3BpTerm(pUVM);
178 }
179 dbgfR3AsTerm(pUVM);
180 }
181 dbgfR3RegTerm(pUVM);
182 }
183 dbgfR3TraceTerm(pVM);
184 }
185 dbgfR3InfoTerm(pUVM);
186 }
187 return rc;
188}
189
190
191/**
192 * Terminates and cleans up resources allocated by the DBGF.
193 *
194 * @returns VBox status code.
195 * @param pVM The cross context VM structure.
196 */
197VMMR3_INT_DECL(int) DBGFR3Term(PVM pVM)
198{
199 PUVM pUVM = pVM->pUVM;
200
201#ifdef VBOX_WITH_DBGF_TRACING
202 dbgfR3TracerTerm(pVM);
203#endif
204 dbgfR3OSTermPart1(pUVM);
205 dbgfR3PlugInTerm(pUVM);
206 dbgfR3OSTermPart2(pUVM);
207 dbgfR3BpTerm(pUVM);
208 dbgfR3AsTerm(pUVM);
209 dbgfR3RegTerm(pUVM);
210 dbgfR3TraceTerm(pVM);
211 dbgfR3InfoTerm(pUVM);
212
213 return VINF_SUCCESS;
214}
215
216
217/**
218 * This is for tstCFGM and others to avoid trigger leak detection.
219 *
220 * @returns VBox status code.
221 * @param pUVM The user mode VM structure.
222 */
223VMMR3DECL(void) DBGFR3TermUVM(PUVM pUVM)
224{
225 dbgfR3InfoTerm(pUVM);
226}
227
228
229/**
230 * Called when the VM is powered off to detach debuggers.
231 *
232 * @param pVM The cross context VM structure.
233 */
234VMMR3_INT_DECL(void) DBGFR3PowerOff(PVM pVM)
235{
236 /*
237 * Send a termination event to any attached debugger.
238 */
239 if (pVM->dbgf.s.fAttached)
240 {
241 PVMCPU pVCpu = VMMGetCpu(pVM);
242 int rc = dbgfR3SendEventWait(pVM, pVCpu, DBGFEVENT_POWERING_OFF, DBGFEVENTCTX_OTHER);
243 AssertLogRelRC(rc);
244
245 /*
246 * Clear the FF so we won't get confused later on.
247 */
248 VM_FF_CLEAR(pVM, VM_FF_DBGF);
249 }
250}
251
252
253/**
254 * Applies relocations to data and code managed by this
255 * component. This function will be called at init and
256 * whenever the VMM need to relocate it self inside the GC.
257 *
258 * @param pVM The cross context VM structure.
259 * @param offDelta Relocation delta relative to old location.
260 */
261VMMR3_INT_DECL(void) DBGFR3Relocate(PVM pVM, RTGCINTPTR offDelta)
262{
263 dbgfR3TraceRelocate(pVM);
264 dbgfR3AsRelocate(pVM->pUVM, offDelta);
265}
266
267
268/**
269 * Waits a little while for a debuggger to attach.
270 *
271 * @returns True is a debugger have attached.
272 * @param pVM The cross context VM structure.
273 * @param pVCpu The cross context per CPU structure.
274 * @param enmEvent Event.
275 *
276 * @thread EMT(pVCpu)
277 */
278bool dbgfR3WaitForAttach(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
279{
280 /*
281 * First a message.
282 */
283#if !defined(DEBUG)
284 int cWait = 10;
285#else
286 int cWait = !VM_IS_RAW_MODE_ENABLED(pVM)
287 && ( enmEvent == DBGFEVENT_ASSERTION_HYPER
288 || enmEvent == DBGFEVENT_FATAL_ERROR)
289 && !RTEnvExist("VBOX_DBGF_WAIT_FOR_ATTACH")
290 ? 10
291 : 150;
292#endif
293 RTStrmPrintf(g_pStdErr, "DBGF: No debugger attached, waiting %d second%s for one to attach (event=%d)\n",
294 cWait / 10, cWait != 10 ? "s" : "", enmEvent);
295 RTStrmFlush(g_pStdErr);
296 while (cWait > 0)
297 {
298 RTThreadSleep(100);
299 if (pVM->dbgf.s.fAttached)
300 {
301 RTStrmPrintf(g_pStdErr, "Attached!\n");
302 RTStrmFlush(g_pStdErr);
303 return true;
304 }
305
306 /* Process rendezvous (debugger attaching involves such). */
307 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
308 {
309 int rc = VMMR3EmtRendezvousFF(pVM, pVCpu); AssertRC(rc);
310 if (rc != VINF_SUCCESS)
311 {
312 /** @todo Ignoring these could be bad. */
313 RTStrmPrintf(g_pStdErr, "[rcRendezvous=%Rrc, ignored!]", rc);
314 RTStrmFlush(g_pStdErr);
315 }
316 }
317
318 /* Process priority stuff. */
319 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
320 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
321 {
322 int rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/);
323 if (rc == VINF_SUCCESS)
324 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, true /*fPriorityOnly*/);
325 if (rc != VINF_SUCCESS)
326 {
327 /** @todo Ignoring these could be bad. */
328 RTStrmPrintf(g_pStdErr, "[rcReq=%Rrc, ignored!]", rc);
329 RTStrmFlush(g_pStdErr);
330 }
331 }
332
333 /* next */
334 if (!(cWait % 10))
335 {
336 RTStrmPrintf(g_pStdErr, "%d.", cWait / 10);
337 RTStrmFlush(g_pStdErr);
338 }
339 cWait--;
340 }
341
342 RTStrmPrintf(g_pStdErr, "Stopping the VM!\n");
343 RTStrmFlush(g_pStdErr);
344 return false;
345}
346
347
348/**
349 * Forced action callback.
350 *
351 * The VMM will call this from it's main loop when either VM_FF_DBGF or
352 * VMCPU_FF_DBGF are set.
353 *
354 * The function checks for and executes pending commands from the debugger.
355 * Then it checks for pending debug events and serves these.
356 *
357 * @returns VINF_SUCCESS normally.
358 * @returns VERR_DBGF_RAISE_FATAL_ERROR to pretend a fatal error happened.
359 * @param pVM The cross context VM structure.
360 * @param pVCpu The cross context per CPU structure.
361 */
362VMMR3_INT_DECL(int) DBGFR3VMMForcedAction(PVM pVM, PVMCPU pVCpu)
363{
364 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
365
366 /*
367 * Dispatch pending events.
368 */
369 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_DBGF))
370 {
371 if ( pVCpu->dbgf.s.cEvents > 0
372 && pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT)
373 {
374 rcStrict = DBGFR3EventHandlePending(pVM, pVCpu);
375 /** @todo may end up with VERR_DBGF_NOT_ATTACHED here, which will prove fatal... */
376 }
377
378 /*
379 * Command pending? Process it.
380 */
381 PUVMCPU pUVCpu = pVCpu->pUVCpu;
382 if (pUVCpu->dbgf.s.enmDbgfCmd != DBGFCMD_NO_COMMAND)
383 {
384 bool fResumeExecution;
385 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
386 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
387 VBOXSTRICTRC rcStrict2 = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
388 if (!fResumeExecution)
389 rcStrict2 = dbgfR3CpuWait(pVCpu);
390 if ( rcStrict2 != VINF_SUCCESS
391 && ( rcStrict == VINF_SUCCESS
392 || RT_FAILURE(rcStrict2)
393 || rcStrict2 < rcStrict) ) /** @todo oversimplified? */
394 rcStrict = rcStrict2;
395 }
396 }
397
398 return VBOXSTRICTRC_TODO(rcStrict);
399}
400
401
402/**
403 * Try to determine the event context.
404 *
405 * @returns debug event context.
406 * @param pVCpu The cross context vCPU structure.
407 */
408static DBGFEVENTCTX dbgfR3FigureEventCtx(PVMCPU pVCpu)
409{
410 switch (EMGetState(pVCpu))
411 {
412 case EMSTATE_HM:
413 case EMSTATE_NEM:
414 case EMSTATE_DEBUG_GUEST_HM:
415 case EMSTATE_DEBUG_GUEST_NEM:
416 return DBGFEVENTCTX_HM;
417
418 case EMSTATE_IEM:
419 case EMSTATE_RAW:
420 case EMSTATE_IEM_THEN_REM:
421 case EMSTATE_DEBUG_GUEST_IEM:
422 case EMSTATE_DEBUG_GUEST_RAW:
423 return DBGFEVENTCTX_RAW;
424
425
426 case EMSTATE_REM:
427 case EMSTATE_DEBUG_GUEST_REM:
428 return DBGFEVENTCTX_REM;
429
430 case EMSTATE_DEBUG_HYPER:
431 case EMSTATE_GURU_MEDITATION:
432 return DBGFEVENTCTX_HYPER;
433
434 default:
435 return DBGFEVENTCTX_OTHER;
436 }
437}
438
439
440/**
441 * Sends the event to the debugger (i.e. adds it to the event ring buffer).
442 *
443 * @returns VBox status code.
444 * @param pVM The cross context VM structure.
445 * @param pVCpu The CPU sending the event.
446 * @param enmType The event type to send.
447 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
448 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
449 * @param cbPayload The size of the event payload, optional.
450 */
451static int dbgfR3SendEventWorker(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
452 void const *pvPayload, size_t cbPayload)
453{
454 PUVM pUVM = pVM->pUVM;
455 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID; /** @todo per vCPU stepping filter. */
456
457 /*
458 * Massage the input a little.
459 */
460 AssertStmt(cbPayload <= RT_SIZEOFMEMB(DBGFEVENT, u), cbPayload = RT_SIZEOFMEMB(DBGFEVENT, u));
461 if (enmCtx == DBGFEVENTCTX_INVALID)
462 enmCtx = dbgfR3FigureEventCtx(pVCpu);
463
464 /*
465 * Put the event into the ring buffer.
466 */
467 RTSemFastMutexRequest(pUVM->dbgf.s.hMtxDbgEvtWr);
468
469 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
470 uint32_t const idxDbgEvtWrite = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite);
471 uint32_t const idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
472 /** @todo Handle full buffer. */ RT_NOREF(idxDbgEvtRead);
473
474 PDBGFEVENT pEvent = &pUVM->dbgf.s.paDbgEvts[idxDbgEvtWrite % cDbgEvtMax];
475
476#ifdef DEBUG
477 ASMMemFill32(pEvent, sizeof(*pEvent), UINT32_C(0xdeadbeef));
478#endif
479 pEvent->enmType = enmType;
480 pEvent->enmCtx = enmCtx;
481 pEvent->idCpu = pVCpu->idCpu;
482 pEvent->uReserved = 0;
483 if (cbPayload)
484 memcpy(&pEvent->u, pvPayload, cbPayload);
485
486 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtWrite, (idxDbgEvtWrite + 1) % cDbgEvtMax);
487
488 RTSemFastMutexRelease(pUVM->dbgf.s.hMtxDbgEvtWr);
489
490 /*
491 * Signal the debugger.
492 */
493 return RTSemEventSignal(pUVM->dbgf.s.hEvtWait);
494}
495
496
497/**
498 * Send event and wait for the debugger to respond.
499 *
500 * @returns Strict VBox status code.
501 * @param pVM The cross context VM structure.
502 * @param pVCpu The CPU sending the event.
503 * @param enmType The event type to send.
504 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
505 */
506DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
507{
508 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
509 if (RT_SUCCESS(rc))
510 rc = dbgfR3CpuWait(pVCpu);
511 return rc;
512}
513
514
515/**
516 * Send event and wait for the debugger to respond, extended version.
517 *
518 * @returns Strict VBox status code.
519 * @param pVM The cross context VM structure.
520 * @param pVCpu The CPU sending the event.
521 * @param enmType The event type to send.
522 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
523 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
524 * @param cbPayload The size of the event payload, optional.
525 */
526DECLINLINE(int) dbgfR3SendEventWaitEx(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
527 void const *pvPayload, size_t cbPayload)
528{
529 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, pvPayload, cbPayload);
530 if (RT_SUCCESS(rc))
531 rc = dbgfR3CpuWait(pVCpu);
532 return rc;
533}
534
535
536/**
537 * Send event but do NOT wait for the debugger.
538 *
539 * Currently only used by dbgfR3CpuCmd().
540 *
541 * @param pVM The cross context VM structure.
542 * @param pVCpu The CPU sending the event.
543 * @param enmType The event type to send.
544 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
545 */
546DECLINLINE(int) dbgfR3SendEventNoWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
547{
548 return dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
549}
550
551
552/**
553 * The common event prologue code.
554 *
555 * It will make sure someone is attached, and perhaps process any high priority
556 * pending actions (none yet).
557 *
558 * @returns VBox status code.
559 * @param pVM The cross context VM structure.
560 * @param pVCpu The vCPU cross context structure.
561 * @param enmEvent The event to be sent.
562 */
563static int dbgfR3EventPrologue(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
564{
565 /*
566 * Check if a debugger is attached.
567 */
568 if ( !pVM->dbgf.s.fAttached
569 && !dbgfR3WaitForAttach(pVM, pVCpu, enmEvent))
570 {
571 Log(("dbgfR3EventPrologue: enmEvent=%d - debugger not attached\n", enmEvent));
572 return VERR_DBGF_NOT_ATTACHED;
573 }
574
575 /*
576 * Look thru pending commands and finish those which make sense now.
577 */
578 /** @todo Process/purge pending commands. */
579 //int rc = DBGFR3VMMForcedAction(pVM);
580 return VINF_SUCCESS;
581}
582
583
584/**
585 * Processes a pending event on the current CPU.
586 *
587 * This is called by EM in response to VINF_EM_DBG_EVENT.
588 *
589 * @returns Strict VBox status code.
590 * @param pVM The cross context VM structure.
591 * @param pVCpu The cross context per CPU structure.
592 *
593 * @thread EMT(pVCpu)
594 */
595VMMR3_INT_DECL(VBOXSTRICTRC) DBGFR3EventHandlePending(PVM pVM, PVMCPU pVCpu)
596{
597 VMCPU_ASSERT_EMT(pVCpu);
598 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
599
600 /*
601 * Check that we've got an event first.
602 */
603 AssertReturn(pVCpu->dbgf.s.cEvents > 0, VINF_SUCCESS);
604 AssertReturn(pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT, VINF_SUCCESS);
605 PDBGFEVENT pEvent = &pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].Event;
606
607 /*
608 * Make sure we've got a debugger and is allowed to speak to it.
609 */
610 int rc = dbgfR3EventPrologue(pVM, pVCpu, pEvent->enmType);
611 if (RT_FAILURE(rc))
612 {
613 /** @todo drop them events? */
614 return rc; /** @todo this will cause trouble if we're here via an FF! */
615 }
616
617 /*
618 * Send the event and mark it as ignore.
619 * ASSUMES no new events get generate while dbgfR3CpuWait is executing!
620 */
621 VBOXSTRICTRC rcStrict = dbgfR3SendEventWaitEx(pVM, pVCpu, pEvent->enmType, pEvent->enmCtx, &pEvent->u, sizeof(pEvent->u));
622 pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState = DBGFEVENTSTATE_IGNORE;
623 return rcStrict;
624}
625
626
627/**
628 * Send a generic debugger event which takes no data.
629 *
630 * @returns VBox status code.
631 * @param pVM The cross context VM structure.
632 * @param enmEvent The event to send.
633 * @internal
634 */
635VMMR3DECL(int) DBGFR3Event(PVM pVM, DBGFEVENTTYPE enmEvent)
636{
637 PVMCPU pVCpu = VMMGetCpu(pVM);
638 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
639
640 /*
641 * Do stepping filtering.
642 */
643 /** @todo Would be better if we did some of this inside the execution
644 * engines. */
645 if ( enmEvent == DBGFEVENT_STEPPED
646 || enmEvent == DBGFEVENT_STEPPED_HYPER)
647 {
648 if (!dbgfStepAreWeThereYet(pVM, pVCpu))
649 return VINF_EM_DBG_STEP;
650 }
651
652 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
653 if (RT_FAILURE(rc))
654 return rc;
655
656 /*
657 * Send the event and process the reply communication.
658 */
659 return dbgfR3SendEventWait(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID);
660}
661
662
663/**
664 * Send a debugger event which takes the full source file location.
665 *
666 * @returns VBox status code.
667 * @param pVM The cross context VM structure.
668 * @param enmEvent The event to send.
669 * @param pszFile Source file.
670 * @param uLine Line number in source file.
671 * @param pszFunction Function name.
672 * @param pszFormat Message which accompanies the event.
673 * @param ... Message arguments.
674 * @internal
675 */
676VMMR3DECL(int) DBGFR3EventSrc(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, ...)
677{
678 va_list args;
679 va_start(args, pszFormat);
680 int rc = DBGFR3EventSrcV(pVM, enmEvent, pszFile, uLine, pszFunction, pszFormat, args);
681 va_end(args);
682 return rc;
683}
684
685
686/**
687 * Send a debugger event which takes the full source file location.
688 *
689 * @returns VBox status code.
690 * @param pVM The cross context VM structure.
691 * @param enmEvent The event to send.
692 * @param pszFile Source file.
693 * @param uLine Line number in source file.
694 * @param pszFunction Function name.
695 * @param pszFormat Message which accompanies the event.
696 * @param args Message arguments.
697 * @internal
698 */
699VMMR3DECL(int) DBGFR3EventSrcV(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, va_list args)
700{
701 PVMCPU pVCpu = VMMGetCpu(pVM);
702 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
703
704 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
705 if (RT_FAILURE(rc))
706 return rc;
707
708 /*
709 * Format the message.
710 */
711 char *pszMessage = NULL;
712 char szMessage[8192];
713 if (pszFormat && *pszFormat)
714 {
715 pszMessage = &szMessage[0];
716 RTStrPrintfV(szMessage, sizeof(szMessage), pszFormat, args);
717 }
718
719 /*
720 * Send the event and process the reply communication.
721 */
722 DBGFEVENT DbgEvent; /** @todo split up DBGFEVENT so we can skip the dead wait on the stack? */
723 DbgEvent.u.Src.pszFile = pszFile;
724 DbgEvent.u.Src.uLine = uLine;
725 DbgEvent.u.Src.pszFunction = pszFunction;
726 DbgEvent.u.Src.pszMessage = pszMessage;
727 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Src));
728}
729
730
731/**
732 * Send a debugger event which takes the two assertion messages.
733 *
734 * @returns VBox status code.
735 * @param pVM The cross context VM structure.
736 * @param enmEvent The event to send.
737 * @param pszMsg1 First assertion message.
738 * @param pszMsg2 Second assertion message.
739 */
740VMMR3_INT_DECL(int) DBGFR3EventAssertion(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszMsg1, const char *pszMsg2)
741{
742 PVMCPU pVCpu = VMMGetCpu(pVM);
743 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
744
745 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
746 if (RT_FAILURE(rc))
747 return rc;
748
749 /*
750 * Send the event and process the reply communication.
751 */
752 DBGFEVENT DbgEvent;
753 DbgEvent.u.Assert.pszMsg1 = pszMsg1;
754 DbgEvent.u.Assert.pszMsg2 = pszMsg2;
755 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Assert));
756}
757
758
759/**
760 * Breakpoint was hit somewhere.
761 * Figure out which breakpoint it is and notify the debugger.
762 *
763 * @returns VBox status code.
764 * @param pVM The cross context VM structure.
765 * @param enmEvent DBGFEVENT_BREAKPOINT_HYPER or DBGFEVENT_BREAKPOINT.
766 */
767VMMR3_INT_DECL(int) DBGFR3EventBreakpoint(PVM pVM, DBGFEVENTTYPE enmEvent)
768{
769 PVMCPU pVCpu = VMMGetCpu(pVM);
770 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
771
772 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
773 if (RT_FAILURE(rc))
774 return rc;
775
776 /*
777 * Halt all other vCPUs as well to give the user the ability to inspect other
778 * vCPU states as well.
779 */
780 rc = dbgfR3EventHaltAllVCpus(pVM, pVCpu);
781 if (RT_FAILURE(rc))
782 return rc;
783
784 /*
785 * Send the event and process the reply communication.
786 */
787 DBGFEVENT DbgEvent;
788 DbgEvent.u.Bp.hBp = pVCpu->dbgf.s.hBpActive;
789 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
790 if (DbgEvent.u.Bp.hBp != NIL_DBGFBP)
791 {
792 DbgEvent.enmCtx = DBGFEVENTCTX_RAW;
793 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
794 }
795
796 return VERR_DBGF_IPE_1;
797}
798
799
800/**
801 * Returns whether the given vCPU is waiting for the debugger.
802 *
803 * @returns Flags whether the vCPU is currently waiting for the debugger.
804 * @param pUVCpu The user mode vCPU structure.
805 */
806DECLINLINE(bool) dbgfR3CpuIsHalted(PUVMCPU pUVCpu)
807{
808 return ASMAtomicReadBool(&pUVCpu->dbgf.s.fStopped);
809}
810
811
812/**
813 * Checks whether the given vCPU is waiting in the debugger.
814 *
815 * @returns Flag whether the indicated vCPU is halted, when VMCPUID_ALL
816 * is given true is returned when at least one vCPU is halted.
817 * @param pUVM The user mode VM structure.
818 * @param idCpu The CPU ID to check, VMCPUID_ALL to check all vCPUs.
819 */
820DECLINLINE(bool) dbgfR3CpuAreAnyHaltedByCpuId(PUVM pUVM, VMCPUID idCpu)
821{
822 AssertReturn(idCpu < pUVM->cCpus || idCpu == VMCPUID_ALL, false);
823
824 /* Check that either the given vCPU or all are actually halted. */
825 if (idCpu != VMCPUID_ALL)
826 return dbgfR3CpuIsHalted(&pUVM->aCpus[idCpu]);
827
828 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
829 if (dbgfR3CpuIsHalted(&pUVM->aCpus[i]))
830 return true;
831 return false;
832}
833
834
835/**
836 * Gets the pending debug command for this EMT/CPU, replacing it with
837 * DBGFCMD_NO_COMMAND.
838 *
839 * @returns Pending command.
840 * @param pUVCpu The user mode virtual CPU structure.
841 * @thread EMT(pUVCpu)
842 */
843DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu)
844{
845 DBGFCMD enmCmd = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, DBGFCMD_NO_COMMAND);
846 Log2(("DBGF: Getting command: %d\n", enmCmd));
847 return enmCmd;
848}
849
850
851/**
852 * Send a debug command to a CPU, making sure to notify it.
853 *
854 * @returns VBox status code.
855 * @param pUVCpu The user mode virtual CPU structure.
856 * @param enmCmd The command to submit to the CPU.
857 */
858DECLINLINE(int) dbgfR3CpuSetCmdAndNotify(PUVMCPU pUVCpu, DBGFCMD enmCmd)
859{
860 Log2(("DBGF: Setting command to %d\n", enmCmd));
861 Assert(enmCmd != DBGFCMD_NO_COMMAND);
862 AssertMsg(pUVCpu->dbgf.s.enmDbgfCmd == DBGFCMD_NO_COMMAND, ("enmCmd=%d enmDbgfCmd=%d\n", enmCmd, pUVCpu->dbgf.s.enmDbgfCmd));
863
864 ASMAtomicWriteU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, enmCmd);
865 VMCPU_FF_SET(pUVCpu->pVCpu, VMCPU_FF_DBGF);
866
867 VMR3NotifyCpuFFU(pUVCpu, 0 /*fFlags*/);
868 return VINF_SUCCESS;
869}
870
871
872/**
873 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
874 */
875static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventHaltEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
876{
877 RT_NOREF(pvUser);
878
879 VMCPU_ASSERT_EMT(pVCpu);
880 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
881
882 PUVMCPU pUVCpu = pVCpu->pUVCpu;
883 if ( pVCpu != (PVMCPU)pvUser
884 && !dbgfR3CpuIsHalted(pUVCpu))
885 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
886
887 return VINF_SUCCESS;
888}
889
890
891/**
892 * Halts all vCPUs of the given VM except for the given one.
893 *
894 * @returns VBox status code.
895 * @param pVM The cross context VM structure.
896 * @param pVCpuExclude The vCPU cross context structure of the vCPU to exclude.
897 */
898static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude)
899{
900 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3EventHaltEmtWorker, pVCpuExclude);
901}
902
903
904/**
905 * Waits for the debugger to respond.
906 *
907 * @returns VBox status code. (clearify)
908 * @param pVCpu The cross context vCPU structure.
909 */
910static int dbgfR3CpuWait(PVMCPU pVCpu)
911{
912 PVM pVM = pVCpu->CTX_SUFF(pVM);
913 PUVMCPU pUVCpu = pVCpu->pUVCpu;
914
915 LogFlow(("dbgfR3CpuWait:\n"));
916 int rcRet = VINF_SUCCESS;
917
918 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, true);
919
920 /*
921 * Waits for the debugger to reply (i.e. issue an command).
922 */
923 for (;;)
924 {
925 /*
926 * Wait.
927 */
928 for (;;)
929 {
930 /*
931 * Process forced flags before we go sleep.
932 */
933 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_DBGF | VMCPU_FF_REQUEST)
934 || VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VMCPU_FF_REQUEST | VM_FF_CHECK_VM_STATE))
935 {
936 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
937 break;
938
939 int rc;
940 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
941 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
942 else if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
943 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
944 {
945 LogFlow(("dbgfR3CpuWait: Processes requests...\n"));
946 rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
947 if (rc == VINF_SUCCESS)
948 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
949 LogFlow(("dbgfR3CpuWait: VMR3ReqProcess -> %Rrc rcRet=%Rrc\n", rc, rcRet));
950 }
951 else if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
952 {
953 VMSTATE enmState = VMR3GetState(pVM);
954 switch (enmState)
955 {
956 case VMSTATE_FATAL_ERROR:
957 case VMSTATE_FATAL_ERROR_LS:
958 case VMSTATE_GURU_MEDITATION:
959 case VMSTATE_GURU_MEDITATION_LS:
960 rc = VINF_EM_SUSPEND;
961 break;
962 case VMSTATE_DESTROYING:
963 rc = VINF_EM_TERMINATE;
964 break;
965 default:
966 rc = VERR_DBGF_IPE_1;
967 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
968 }
969 }
970 else
971 rc = VINF_SUCCESS;
972 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
973 {
974 switch (rc)
975 {
976 case VINF_EM_DBG_BREAKPOINT:
977 case VINF_EM_DBG_STEPPED:
978 case VINF_EM_DBG_STEP:
979 case VINF_EM_DBG_STOP:
980 case VINF_EM_DBG_EVENT:
981 AssertMsgFailed(("rc=%Rrc\n", rc));
982 break;
983
984 /* return straight away */
985 case VINF_EM_TERMINATE:
986 case VINF_EM_OFF:
987 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
988 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
989 return rc;
990
991 /* remember return code. */
992 default:
993 AssertReleaseMsgFailed(("rc=%Rrc is not in the switch!\n", rc));
994 RT_FALL_THRU();
995 case VINF_EM_RESET:
996 case VINF_EM_SUSPEND:
997 case VINF_EM_HALT:
998 case VINF_EM_RESUME:
999 case VINF_EM_RESCHEDULE:
1000 case VINF_EM_RESCHEDULE_REM:
1001 case VINF_EM_RESCHEDULE_RAW:
1002 if (rc < rcRet || rcRet == VINF_SUCCESS)
1003 rcRet = rc;
1004 break;
1005 }
1006 }
1007 else if (RT_FAILURE(rc))
1008 {
1009 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
1010 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1011 return rc;
1012 }
1013 }
1014 else if (pVM->dbgf.s.fAttached)
1015 {
1016 int rc = VMR3WaitU(pUVCpu);
1017 if (RT_FAILURE(rc))
1018 {
1019 LogFlow(("dbgfR3CpuWait: returns %Rrc (VMR3WaitU)\n", rc));
1020 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1021 return rc;
1022 }
1023 }
1024 else
1025 {
1026 LogFlow(("dbgfR3CpuWait: Debugger detached, continuing normal execution (%Rrc)\n", rcRet));
1027 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1028 return rcRet;
1029 }
1030 }
1031
1032 /*
1033 * Process the command.
1034 */
1035 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
1036 bool fResumeExecution;
1037 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
1038 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
1039 int rc = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
1040 if (fResumeExecution)
1041 {
1042 if (RT_FAILURE(rc))
1043 rcRet = rc;
1044 else if ( rc >= VINF_EM_FIRST
1045 && rc <= VINF_EM_LAST
1046 && (rc < rcRet || rcRet == VINF_SUCCESS))
1047 rcRet = rc;
1048 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rcRet));
1049 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1050 return rcRet;
1051 }
1052 }
1053}
1054
1055
1056/**
1057 * Executes command from debugger.
1058 *
1059 * The caller is responsible for waiting or resuming execution based on the
1060 * value returned in the *pfResumeExecution indicator.
1061 *
1062 * @returns VBox status code. (clearify!)
1063 * @param pVCpu The cross context vCPU structure.
1064 * @param enmCmd The command in question.
1065 * @param pCmdData Pointer to the command data.
1066 * @param pfResumeExecution Where to store the resume execution / continue waiting indicator.
1067 */
1068static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution)
1069{
1070 RT_NOREF(pCmdData); /* for later */
1071
1072 /*
1073 * The cases in this switch returns directly if no event to send.
1074 */
1075 DBGFEVENTTYPE enmEvent;
1076 DBGFEVENTCTX enmCtx = DBGFEVENTCTX_INVALID;
1077 switch (enmCmd)
1078 {
1079 /*
1080 * Halt is answered by an event say that we've halted.
1081 */
1082 case DBGFCMD_HALT:
1083 {
1084 *pfResumeExecution = false;
1085 enmEvent = DBGFEVENT_HALT_DONE;
1086 break;
1087 }
1088
1089
1090 /*
1091 * Resume is not answered, we just resume execution.
1092 */
1093 case DBGFCMD_GO:
1094 {
1095 pVCpu->dbgf.s.fSingleSteppingRaw = false;
1096 *pfResumeExecution = true;
1097 return VINF_SUCCESS;
1098 }
1099
1100 /** @todo implement (and define) the rest of the commands. */
1101
1102 /*
1103 * Single step, with trace into.
1104 */
1105 case DBGFCMD_SINGLE_STEP:
1106 {
1107 Log2(("Single step\n"));
1108 PVM pVM = pVCpu->CTX_SUFF(pVM);
1109 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1110 {
1111 if (dbgfStepGetCurInstrType(pVM, pVCpu) == DBGFSTEPINSTRTYPE_CALL)
1112 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1113 }
1114 if (pVM->dbgf.s.SteppingFilter.cMaxSteps > 0)
1115 {
1116 pVCpu->dbgf.s.fSingleSteppingRaw = true;
1117 *pfResumeExecution = true;
1118 return VINF_EM_DBG_STEP;
1119 }
1120 /* Stop after zero steps. Nonsense, but whatever. */
1121 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1122 *pfResumeExecution = false;
1123 enmCtx = dbgfR3FigureEventCtx(pVCpu);
1124 enmEvent = enmCtx != DBGFEVENTCTX_HYPER ? DBGFEVENT_STEPPED : DBGFEVENT_STEPPED_HYPER;
1125 break;
1126 }
1127
1128 /*
1129 * Default is to send an invalid command event.
1130 */
1131 default:
1132 {
1133 *pfResumeExecution = false;
1134 enmEvent = DBGFEVENT_INVALID_COMMAND;
1135 break;
1136 }
1137 }
1138
1139 /*
1140 * Send the pending event.
1141 */
1142 Log2(("DBGF: Emulation thread: sending event %d\n", enmEvent));
1143 int rc = dbgfR3SendEventNoWait(pVCpu->CTX_SUFF(pVM), pVCpu, enmEvent, enmCtx);
1144 AssertRCStmt(rc, *pfResumeExecution = true);
1145 return rc;
1146}
1147
1148
1149/**
1150 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1151 * EMT rendezvous worker for DBGFR3Attach - only called on one EMT.}
1152 */
1153static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Attach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1154{
1155 PUVM pUVM = pVM->pUVM;
1156 int *prcAttach = (int *)pvUser;
1157 RT_NOREF(pVCpu);
1158
1159 if (pVM->dbgf.s.fAttached)
1160 {
1161 Log(("dbgfR3Attach: Debugger already attached\n"));
1162 *prcAttach = VERR_DBGF_ALREADY_ATTACHED;
1163 return VINF_SUCCESS;
1164 }
1165
1166 /*
1167 * The per-CPU bits.
1168 */
1169 for (uint32_t i = 0; i < pUVM->cCpus; i++)
1170 {
1171 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1172
1173 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1174 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1175 }
1176
1177 /*
1178 * Init of the VM -> Debugger communication part living in the global VM structure.
1179 */
1180 pUVM->dbgf.s.cDbgEvtMax = pVM->cCpus * 5 + 10; /* Initial size of event ring, increased when being full. */
1181 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1182 pUVM->dbgf.s.idxDbgEvtRead = 0;
1183 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1184 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1185 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1186 int rc;
1187 pUVM->dbgf.s.paDbgEvts = (PDBGFEVENT)MMR3HeapAllocU(pUVM, MM_TAG_DBGF, pUVM->dbgf.s.cDbgEvtMax * sizeof(DBGFEVENT));
1188 if (pUVM->dbgf.s.paDbgEvts)
1189 {
1190 rc = RTSemEventCreate(&pUVM->dbgf.s.hEvtWait);
1191 if (RT_SUCCESS(rc))
1192 {
1193 rc = RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxDbgEvtWr);
1194 if (RT_SUCCESS(rc))
1195 {
1196 rc = RTSemEventMultiCreate(&pUVM->dbgf.s.hEvtRingBufFull);
1197 if (RT_SUCCESS(rc))
1198 {
1199 /*
1200 * At last, set the attached flag.
1201 */
1202 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, true);
1203 *prcAttach = VINF_SUCCESS;
1204 return VINF_SUCCESS;
1205 }
1206
1207 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1208 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1209 }
1210 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1211 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1212 }
1213 }
1214 else
1215 rc = VERR_NO_MEMORY;
1216
1217 *prcAttach = rc;
1218 return VINF_SUCCESS;
1219}
1220
1221
1222/**
1223 * Attaches a debugger to the specified VM.
1224 *
1225 * Only one debugger at a time.
1226 *
1227 * @returns VBox status code.
1228 * @param pUVM The user mode VM handle.
1229 */
1230VMMR3DECL(int) DBGFR3Attach(PUVM pUVM)
1231{
1232 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1233 PVM pVM = pUVM->pVM;
1234 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1235
1236 /*
1237 * Call the VM, use EMT rendezvous for serialization.
1238 */
1239 int rcAttach = VERR_IPE_UNINITIALIZED_STATUS;
1240 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Attach, &rcAttach);
1241 if (RT_SUCCESS(rc))
1242 rc = rcAttach;
1243
1244 return rc;
1245}
1246
1247
1248/**
1249 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1250 * EMT rendezvous worker for DBGFR3Detach - called on all EMTs (why?).}
1251 */
1252static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Detach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1253{
1254 if (pVCpu->idCpu == 0)
1255 {
1256 PUVM pUVM = (PUVM)pvUser;
1257
1258 /*
1259 * Per-CPU cleanup.
1260 */
1261 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1262 {
1263 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1264
1265 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1266 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1267 }
1268
1269 /*
1270 * De-init of the VM -> Debugger communication part living in the global VM structure.
1271 */
1272 if (pUVM->dbgf.s.paDbgEvts)
1273 {
1274 MMR3HeapFree(pUVM->dbgf.s.paDbgEvts);
1275 pUVM->dbgf.s.paDbgEvts = NULL;
1276 }
1277
1278 if (pUVM->dbgf.s.hEvtWait != NIL_RTSEMEVENT)
1279 {
1280 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1281 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1282 }
1283
1284 if (pUVM->dbgf.s.hMtxDbgEvtWr != NIL_RTSEMFASTMUTEX)
1285 {
1286 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1287 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1288 }
1289
1290 if (pUVM->dbgf.s.hEvtRingBufFull != NIL_RTSEMEVENTMULTI)
1291 {
1292 RTSemEventMultiDestroy(pUVM->dbgf.s.hEvtRingBufFull);
1293 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1294 }
1295
1296 pUVM->dbgf.s.cDbgEvtMax = 0;
1297 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1298 pUVM->dbgf.s.idxDbgEvtRead = 0;
1299 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1300 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1301 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1302
1303 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, false);
1304 }
1305
1306 return VINF_SUCCESS;
1307}
1308
1309
1310/**
1311 * Detaches a debugger from the specified VM.
1312 *
1313 * Caller must be attached to the VM.
1314 *
1315 * @returns VBox status code.
1316 * @param pUVM The user mode VM handle.
1317 */
1318VMMR3DECL(int) DBGFR3Detach(PUVM pUVM)
1319{
1320 LogFlow(("DBGFR3Detach:\n"));
1321
1322 /*
1323 * Validate input. The UVM handle shall be valid, the VM handle might be
1324 * in the processes of being destroyed already, so deal quietly with that.
1325 */
1326 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1327 PVM pVM = pUVM->pVM;
1328 if (!VM_IS_VALID_EXT(pVM))
1329 return VERR_INVALID_VM_HANDLE;
1330
1331 /*
1332 * Check if attached.
1333 */
1334 if (!pVM->dbgf.s.fAttached)
1335 return VERR_DBGF_NOT_ATTACHED;
1336
1337 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Detach, pUVM);
1338}
1339
1340
1341/**
1342 * Wait for a debug event.
1343 *
1344 * @returns VBox status code. Will not return VBOX_INTERRUPTED.
1345 * @param pUVM The user mode VM handle.
1346 * @param cMillies Number of millis to wait.
1347 * @param pEvent Where to store the event data.
1348 */
1349VMMR3DECL(int) DBGFR3EventWait(PUVM pUVM, RTMSINTERVAL cMillies, PDBGFEVENT pEvent)
1350{
1351 /*
1352 * Check state.
1353 */
1354 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1355 PVM pVM = pUVM->pVM;
1356 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1357 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1358
1359 RT_BZERO(pEvent, sizeof(*pEvent));
1360
1361 /*
1362 * Wait for an event to arrive if there are none.
1363 */
1364 int rc = VINF_SUCCESS;
1365 uint32_t idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
1366 if (idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite))
1367 {
1368 do
1369 {
1370 rc = RTSemEventWait(pUVM->dbgf.s.hEvtWait, cMillies);
1371 } while ( RT_SUCCESS(rc)
1372 && idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1373 }
1374
1375 if (RT_SUCCESS(rc))
1376 {
1377 Assert(idxDbgEvtRead != ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1378
1379 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
1380 memcpy(pEvent, &pUVM->dbgf.s.paDbgEvts[idxDbgEvtRead % cDbgEvtMax], sizeof(*pEvent));
1381 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtRead, (idxDbgEvtRead + 1) % cDbgEvtMax);
1382 }
1383
1384 Log2(("DBGFR3EventWait: rc=%Rrc (event type %d)\n", rc, pEvent->enmType));
1385 return rc;
1386}
1387
1388
1389/**
1390 * Halts VM execution.
1391 *
1392 * After calling this the VM isn't actually halted till an DBGFEVENT_HALT_DONE
1393 * arrives. Until that time it's not possible to issue any new commands.
1394 *
1395 * @returns VBox status code.
1396 * @retval VWRN_DBGF_ALREADY_HALTED if @a idCpu is VMCPUID_ALL and all vCPUs
1397 * are halted.
1398 * @param pUVM The user mode VM handle.
1399 * @param idCpu The vCPU to halt, VMCPUID_ALL halts all still running vCPUs.
1400 */
1401VMMR3DECL(int) DBGFR3Halt(PUVM pUVM, VMCPUID idCpu)
1402{
1403 /*
1404 * Check state.
1405 */
1406 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1407 PVM pVM = pUVM->pVM;
1408 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1409 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1410 AssertReturn(idCpu == VMCPUID_ALL || idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1411
1412 /*
1413 * Halt the requested CPUs as needed.
1414 */
1415 int rc;
1416 if (idCpu != VMCPUID_ALL)
1417 {
1418 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1419 if (!dbgfR3CpuIsHalted(pUVCpu))
1420 {
1421 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1422 rc = VINF_SUCCESS;
1423 }
1424 else
1425 rc = VWRN_DBGF_ALREADY_HALTED;
1426 }
1427 else
1428 {
1429 rc = VWRN_DBGF_ALREADY_HALTED;
1430 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1431 {
1432 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1433 if (!dbgfR3CpuIsHalted(pUVCpu))
1434 {
1435 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1436 rc = VINF_SUCCESS;
1437 }
1438 }
1439 }
1440
1441 return rc;
1442}
1443
1444
1445/**
1446 * Checks if any of the specified vCPUs have been halted by the debugger.
1447 *
1448 * @returns True if at least one halted vCPUs.
1449 * @returns False if no halted vCPUs.
1450 * @param pUVM The user mode VM handle.
1451 * @param idCpu The CPU id to check for, VMCPUID_ALL will return true if
1452 * at least a single vCPU is halted in the debugger.
1453 */
1454VMMR3DECL(bool) DBGFR3IsHalted(PUVM pUVM, VMCPUID idCpu)
1455{
1456 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1457 PVM pVM = pUVM->pVM;
1458 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1459 AssertReturn(pVM->dbgf.s.fAttached, false);
1460
1461 return dbgfR3CpuAreAnyHaltedByCpuId(pUVM, idCpu);
1462}
1463
1464
1465/**
1466 * Checks if the debugger can wait for events or not.
1467 *
1468 * This function is only used by lazy, multiplexing debuggers. :-)
1469 *
1470 * @returns VBox status code.
1471 * @retval VINF_SUCCESS if waitable.
1472 * @retval VERR_SEM_OUT_OF_TURN if not waitable.
1473 * @retval VERR_INVALID_VM_HANDLE if the VM is being (/ has been) destroyed
1474 * (not asserted) or if the handle is invalid (asserted).
1475 * @retval VERR_DBGF_NOT_ATTACHED if not attached.
1476 *
1477 * @param pUVM The user mode VM handle.
1478 */
1479VMMR3DECL(int) DBGFR3QueryWaitable(PUVM pUVM)
1480{
1481 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1482
1483 /* Note! There is a slight race here, unfortunately. */
1484 PVM pVM = pUVM->pVM;
1485 if (!RT_VALID_PTR(pVM))
1486 return VERR_INVALID_VM_HANDLE;
1487 if (pVM->enmVMState >= VMSTATE_DESTROYING)
1488 return VERR_INVALID_VM_HANDLE;
1489 if (!pVM->dbgf.s.fAttached)
1490 return VERR_DBGF_NOT_ATTACHED;
1491
1492 /** @todo was: if (!RTSemPongShouldWait(...)) return VERR_SEM_OUT_OF_TURN; */
1493 return VINF_SUCCESS;
1494}
1495
1496
1497/**
1498 * Resumes VM execution.
1499 *
1500 * There is no receipt event on this command.
1501 *
1502 * @returns VBox status code.
1503 * @retval VWRN_DBGF_ALREADY_RUNNING if the specified vCPUs are all running.
1504 * @param pUVM The user mode VM handle.
1505 * @param idCpu The vCPU to resume, VMCPUID_ALL resumes all still halted vCPUs.
1506 */
1507VMMR3DECL(int) DBGFR3Resume(PUVM pUVM, VMCPUID idCpu)
1508{
1509 /*
1510 * Validate input and attachment state.
1511 */
1512 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1513 PVM pVM = pUVM->pVM;
1514 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1515 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1516
1517 /*
1518 * Ping the halted emulation threads, telling them to run.
1519 */
1520 int rc = VWRN_DBGF_ALREADY_RUNNING;
1521 if (idCpu != VMCPUID_ALL)
1522 {
1523 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1524 if (dbgfR3CpuIsHalted(pUVCpu))
1525 {
1526 rc = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1527 AssertRC(rc);
1528 }
1529 }
1530 else
1531 {
1532 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1533 {
1534 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1535 if (dbgfR3CpuIsHalted(pUVCpu))
1536 {
1537 int rc2 = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1538 AssertRC(rc2);
1539 if (rc == VWRN_DBGF_ALREADY_RUNNING || RT_FAILURE(rc2))
1540 rc = rc2;
1541 }
1542 }
1543 }
1544
1545 return rc;
1546}
1547
1548
1549/**
1550 * Classifies the current instruction.
1551 *
1552 * @returns Type of instruction.
1553 * @param pVM The cross context VM structure.
1554 * @param pVCpu The current CPU.
1555 * @thread EMT(pVCpu)
1556 */
1557static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu)
1558{
1559 /*
1560 * Read the instruction.
1561 */
1562 size_t cbRead = 0;
1563 uint8_t abOpcode[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1564 int rc = PGMR3DbgReadGCPtr(pVM, abOpcode, CPUMGetGuestFlatPC(pVCpu), sizeof(abOpcode) - 1, 0 /*fFlags*/, &cbRead);
1565 if (RT_SUCCESS(rc))
1566 {
1567 /*
1568 * Do minimal parsing. No real need to involve the disassembler here.
1569 */
1570 uint8_t *pb = abOpcode;
1571 for (;;)
1572 {
1573 switch (*pb++)
1574 {
1575 default:
1576 return DBGFSTEPINSTRTYPE_OTHER;
1577
1578 case 0xe8: /* call rel16/32 */
1579 case 0x9a: /* call farptr */
1580 case 0xcc: /* int3 */
1581 case 0xcd: /* int xx */
1582 // case 0xce: /* into */
1583 return DBGFSTEPINSTRTYPE_CALL;
1584
1585 case 0xc2: /* ret xx */
1586 case 0xc3: /* ret */
1587 case 0xca: /* retf xx */
1588 case 0xcb: /* retf */
1589 case 0xcf: /* iret */
1590 return DBGFSTEPINSTRTYPE_RET;
1591
1592 case 0xff:
1593 if ( ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 2 /* call indir */
1594 || ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 3) /* call indir-farptr */
1595 return DBGFSTEPINSTRTYPE_CALL;
1596 return DBGFSTEPINSTRTYPE_OTHER;
1597
1598 case 0x0f:
1599 switch (*pb++)
1600 {
1601 case 0x05: /* syscall */
1602 case 0x34: /* sysenter */
1603 return DBGFSTEPINSTRTYPE_CALL;
1604 case 0x07: /* sysret */
1605 case 0x35: /* sysexit */
1606 return DBGFSTEPINSTRTYPE_RET;
1607 }
1608 break;
1609
1610 /* Must handle some REX prefixes. So we do all normal prefixes. */
1611 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
1612 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
1613 if (!CPUMIsGuestIn64BitCode(pVCpu))
1614 return DBGFSTEPINSTRTYPE_OTHER;
1615 break;
1616
1617 case 0x2e: /* CS */
1618 case 0x36: /* SS */
1619 case 0x3e: /* DS */
1620 case 0x26: /* ES */
1621 case 0x64: /* FS */
1622 case 0x65: /* GS */
1623 case 0x66: /* op size */
1624 case 0x67: /* addr size */
1625 case 0xf0: /* lock */
1626 case 0xf2: /* REPNZ */
1627 case 0xf3: /* REPZ */
1628 break;
1629 }
1630 }
1631 }
1632
1633 return DBGFSTEPINSTRTYPE_INVALID;
1634}
1635
1636
1637/**
1638 * Checks if the stepping has reached a stop point.
1639 *
1640 * Called when raising a stepped event.
1641 *
1642 * @returns true if the event should be raised, false if we should take one more
1643 * step first.
1644 * @param pVM The cross context VM structure.
1645 * @param pVCpu The cross context per CPU structure of the calling EMT.
1646 * @thread EMT(pVCpu)
1647 */
1648static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu)
1649{
1650 /*
1651 * Check valid pVCpu and that it matches the CPU one stepping.
1652 */
1653 if (pVCpu)
1654 {
1655 if (pVCpu->idCpu == pVM->dbgf.s.SteppingFilter.idCpu)
1656 {
1657 /*
1658 * Increase the number of steps and see if we've reached the max.
1659 */
1660 pVM->dbgf.s.SteppingFilter.cSteps++;
1661 if (pVM->dbgf.s.SteppingFilter.cSteps < pVM->dbgf.s.SteppingFilter.cMaxSteps)
1662 {
1663 /*
1664 * Check PC and SP address filtering.
1665 */
1666 if (pVM->dbgf.s.SteppingFilter.fFlags & (DBGF_STEP_F_STOP_ON_ADDRESS | DBGF_STEP_F_STOP_ON_STACK_POP))
1667 {
1668 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1669 && pVM->dbgf.s.SteppingFilter.AddrPc == CPUMGetGuestFlatPC(pVCpu))
1670 return true;
1671 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1672 && CPUMGetGuestFlatSP(pVCpu) - pVM->dbgf.s.SteppingFilter.AddrStackPop
1673 < pVM->dbgf.s.SteppingFilter.cbStackPop)
1674 return true;
1675 }
1676
1677 /*
1678 * Do step-over filtering separate from the step-into one.
1679 */
1680 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1681 {
1682 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1683 switch (enmType)
1684 {
1685 default:
1686 if ( pVM->dbgf.s.SteppingFilter.uCallDepth != 0
1687 || (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_FILTER_MASK))
1688 break;
1689 return true;
1690 case DBGFSTEPINSTRTYPE_CALL:
1691 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1692 && pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1693 return true;
1694 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1695 break;
1696 case DBGFSTEPINSTRTYPE_RET:
1697 if (pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1698 {
1699 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1700 return true;
1701 /* If after return, we use the cMaxStep limit to stop the next time. */
1702 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1703 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1704 }
1705 else if (pVM->dbgf.s.SteppingFilter.uCallDepth > 0)
1706 pVM->dbgf.s.SteppingFilter.uCallDepth--;
1707 break;
1708 }
1709 return false;
1710 }
1711 /*
1712 * Filtered step-into.
1713 */
1714 else if ( pVM->dbgf.s.SteppingFilter.fFlags
1715 & (DBGF_STEP_F_STOP_ON_CALL | DBGF_STEP_F_STOP_ON_RET | DBGF_STEP_F_STOP_AFTER_RET))
1716 {
1717 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1718 switch (enmType)
1719 {
1720 default:
1721 break;
1722 case DBGFSTEPINSTRTYPE_CALL:
1723 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1724 return true;
1725 break;
1726 case DBGFSTEPINSTRTYPE_RET:
1727 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1728 return true;
1729 /* If after return, we use the cMaxStep limit to stop the next time. */
1730 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1731 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1732 break;
1733 }
1734 return false;
1735 }
1736 }
1737 }
1738 }
1739
1740 return true;
1741}
1742
1743
1744/**
1745 * Step Into.
1746 *
1747 * A single step event is generated from this command.
1748 * The current implementation is not reliable, so don't rely on the event coming.
1749 *
1750 * @returns VBox status code.
1751 * @param pUVM The user mode VM handle.
1752 * @param idCpu The ID of the CPU to single step on.
1753 */
1754VMMR3DECL(int) DBGFR3Step(PUVM pUVM, VMCPUID idCpu)
1755{
1756 return DBGFR3StepEx(pUVM, idCpu, DBGF_STEP_F_INTO, NULL, NULL, 0, 1);
1757}
1758
1759
1760/**
1761 * Full fleged step.
1762 *
1763 * This extended stepping API allows for doing multiple steps before raising an
1764 * event, helping implementing step over, step out and other more advanced
1765 * features.
1766 *
1767 * Like the DBGFR3Step() API, this will normally generate a DBGFEVENT_STEPPED or
1768 * DBGFEVENT_STEPPED_EVENT. However the stepping may be interrupted by other
1769 * events, which will abort the stepping.
1770 *
1771 * The stop on pop area feature is for safeguarding step out.
1772 *
1773 * Please note though, that it will always use stepping and never breakpoints.
1774 * While this allows for a much greater flexibility it can at times be rather
1775 * slow.
1776 *
1777 * @returns VBox status code.
1778 * @param pUVM The user mode VM handle.
1779 * @param idCpu The ID of the CPU to single step on.
1780 * @param fFlags Flags controlling the stepping, DBGF_STEP_F_XXX.
1781 * Either DBGF_STEP_F_INTO or DBGF_STEP_F_OVER must
1782 * always be specified.
1783 * @param pStopPcAddr Address to stop executing at. Completely ignored
1784 * unless DBGF_STEP_F_STOP_ON_ADDRESS is specified.
1785 * @param pStopPopAddr Stack address that SP must be lower than when
1786 * performing DBGF_STEP_F_STOP_ON_STACK_POP filtering.
1787 * @param cbStopPop The range starting at @a pStopPopAddr which is
1788 * considered to be within the same thread stack. Note
1789 * that the API allows @a pStopPopAddr and @a cbStopPop
1790 * to form an area that wraps around and it will
1791 * consider the part starting at 0 as included.
1792 * @param cMaxSteps The maximum number of steps to take. This is to
1793 * prevent stepping for ever, so passing UINT32_MAX is
1794 * not recommended.
1795 *
1796 * @remarks The two address arguments must be guest context virtual addresses,
1797 * or HMA. The code doesn't make much of a point of out HMA, though.
1798 */
1799VMMR3DECL(int) DBGFR3StepEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, PCDBGFADDRESS pStopPcAddr,
1800 PCDBGFADDRESS pStopPopAddr, RTGCUINTPTR cbStopPop, uint32_t cMaxSteps)
1801{
1802 /*
1803 * Check state.
1804 */
1805 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1806 PVM pVM = pUVM->pVM;
1807 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1808 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
1809 AssertReturn(!(fFlags & ~DBGF_STEP_F_VALID_MASK), VERR_INVALID_FLAGS);
1810 AssertReturn(RT_BOOL(fFlags & DBGF_STEP_F_INTO) != RT_BOOL(fFlags & DBGF_STEP_F_OVER), VERR_INVALID_FLAGS);
1811 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1812 {
1813 AssertReturn(RT_VALID_PTR(pStopPcAddr), VERR_INVALID_POINTER);
1814 AssertReturn(DBGFADDRESS_IS_VALID(pStopPcAddr), VERR_INVALID_PARAMETER);
1815 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPcAddr), VERR_INVALID_PARAMETER);
1816 }
1817 AssertReturn(!(fFlags & DBGF_STEP_F_STOP_ON_STACK_POP) || RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1818 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1819 {
1820 AssertReturn(RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1821 AssertReturn(DBGFADDRESS_IS_VALID(pStopPopAddr), VERR_INVALID_PARAMETER);
1822 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPopAddr), VERR_INVALID_PARAMETER);
1823 AssertReturn(cbStopPop > 0, VERR_INVALID_PARAMETER);
1824 }
1825
1826 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1827 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1828 if (RT_LIKELY(dbgfR3CpuIsHalted(pUVCpu)))
1829 { /* likely */ }
1830 else
1831 return VERR_SEM_OUT_OF_TURN;
1832 Assert(pVM->dbgf.s.SteppingFilter.idCpu == NIL_VMCPUID);
1833
1834 /*
1835 * Send the emulation thread a single-step command.
1836 */
1837 if (fFlags == DBGF_STEP_F_INTO)
1838 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1839 else
1840 pVM->dbgf.s.SteppingFilter.idCpu = idCpu;
1841 pVM->dbgf.s.SteppingFilter.fFlags = fFlags;
1842 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1843 pVM->dbgf.s.SteppingFilter.AddrPc = pStopPcAddr->FlatPtr;
1844 else
1845 pVM->dbgf.s.SteppingFilter.AddrPc = 0;
1846 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1847 {
1848 pVM->dbgf.s.SteppingFilter.AddrStackPop = pStopPopAddr->FlatPtr;
1849 pVM->dbgf.s.SteppingFilter.cbStackPop = cbStopPop;
1850 }
1851 else
1852 {
1853 pVM->dbgf.s.SteppingFilter.AddrStackPop = 0;
1854 pVM->dbgf.s.SteppingFilter.cbStackPop = RTGCPTR_MAX;
1855 }
1856
1857 pVM->dbgf.s.SteppingFilter.cMaxSteps = cMaxSteps;
1858 pVM->dbgf.s.SteppingFilter.cSteps = 0;
1859 pVM->dbgf.s.SteppingFilter.uCallDepth = 0;
1860
1861 Assert(dbgfR3CpuIsHalted(pUVCpu));
1862 return dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_SINGLE_STEP);
1863}
1864
1865
1866
1867/**
1868 * dbgfR3EventConfigEx argument packet.
1869 */
1870typedef struct DBGFR3EVENTCONFIGEXARGS
1871{
1872 PCDBGFEVENTCONFIG paConfigs;
1873 size_t cConfigs;
1874 int rc;
1875} DBGFR3EVENTCONFIGEXARGS;
1876/** Pointer to a dbgfR3EventConfigEx argument packet. */
1877typedef DBGFR3EVENTCONFIGEXARGS *PDBGFR3EVENTCONFIGEXARGS;
1878
1879
1880/**
1881 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Worker for DBGFR3EventConfigEx.}
1882 */
1883static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
1884{
1885 if (pVCpu->idCpu == 0)
1886 {
1887 PDBGFR3EVENTCONFIGEXARGS pArgs = (PDBGFR3EVENTCONFIGEXARGS)pvUser;
1888 DBGFEVENTCONFIG volatile const *paConfigs = pArgs->paConfigs;
1889 size_t cConfigs = pArgs->cConfigs;
1890
1891 /*
1892 * Apply the changes.
1893 */
1894 unsigned cChanges = 0;
1895 for (uint32_t i = 0; i < cConfigs; i++)
1896 {
1897 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
1898 AssertReturn(enmType >= DBGFEVENT_FIRST_SELECTABLE && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1899 if (paConfigs[i].fEnabled)
1900 cChanges += ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, enmType) == false;
1901 else
1902 cChanges += ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, enmType) == true;
1903 }
1904
1905 /*
1906 * Inform HM about changes.
1907 */
1908 if (cChanges > 0 && HMIsEnabled(pVM))
1909 {
1910 HMR3NotifyDebugEventChanged(pVM);
1911 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1912 }
1913 }
1914 else if (HMIsEnabled(pVM))
1915 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1916
1917 return VINF_SUCCESS;
1918}
1919
1920
1921/**
1922 * Configures (enables/disables) multiple selectable debug events.
1923 *
1924 * @returns VBox status code.
1925 * @param pUVM The user mode VM handle.
1926 * @param paConfigs The event to configure and their new state.
1927 * @param cConfigs Number of entries in @a paConfigs.
1928 */
1929VMMR3DECL(int) DBGFR3EventConfigEx(PUVM pUVM, PCDBGFEVENTCONFIG paConfigs, size_t cConfigs)
1930{
1931 /*
1932 * Validate input.
1933 */
1934 size_t i = cConfigs;
1935 while (i-- > 0)
1936 {
1937 AssertReturn(paConfigs[i].enmType >= DBGFEVENT_FIRST_SELECTABLE, VERR_INVALID_PARAMETER);
1938 AssertReturn(paConfigs[i].enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1939 }
1940 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1941 PVM pVM = pUVM->pVM;
1942 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1943
1944 /*
1945 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
1946 * can sync their data and execution with new debug state.
1947 */
1948 DBGFR3EVENTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
1949 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
1950 dbgfR3EventConfigEx, &Args);
1951 if (RT_SUCCESS(rc))
1952 rc = Args.rc;
1953 return rc;
1954}
1955
1956
1957/**
1958 * Enables or disables a selectable debug event.
1959 *
1960 * @returns VBox status code.
1961 * @param pUVM The user mode VM handle.
1962 * @param enmEvent The selectable debug event.
1963 * @param fEnabled The new state.
1964 */
1965VMMR3DECL(int) DBGFR3EventConfig(PUVM pUVM, DBGFEVENTTYPE enmEvent, bool fEnabled)
1966{
1967 /*
1968 * Convert to an array call.
1969 */
1970 DBGFEVENTCONFIG EvtCfg = { enmEvent, fEnabled };
1971 return DBGFR3EventConfigEx(pUVM, &EvtCfg, 1);
1972}
1973
1974
1975/**
1976 * Checks if the given selectable event is enabled.
1977 *
1978 * @returns true if enabled, false if not or invalid input.
1979 * @param pUVM The user mode VM handle.
1980 * @param enmEvent The selectable debug event.
1981 * @sa DBGFR3EventQuery
1982 */
1983VMMR3DECL(bool) DBGFR3EventIsEnabled(PUVM pUVM, DBGFEVENTTYPE enmEvent)
1984{
1985 /*
1986 * Validate input.
1987 */
1988 AssertReturn( enmEvent >= DBGFEVENT_HALT_DONE
1989 && enmEvent < DBGFEVENT_END, false);
1990 Assert( enmEvent >= DBGFEVENT_FIRST_SELECTABLE
1991 || enmEvent == DBGFEVENT_BREAKPOINT
1992 || enmEvent == DBGFEVENT_BREAKPOINT_IO
1993 || enmEvent == DBGFEVENT_BREAKPOINT_MMIO);
1994
1995 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1996 PVM pVM = pUVM->pVM;
1997 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1998
1999 /*
2000 * Check the event status.
2001 */
2002 return ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, enmEvent);
2003}
2004
2005
2006/**
2007 * Queries the status of a set of events.
2008 *
2009 * @returns VBox status code.
2010 * @param pUVM The user mode VM handle.
2011 * @param paConfigs The events to query and where to return the state.
2012 * @param cConfigs The number of elements in @a paConfigs.
2013 * @sa DBGFR3EventIsEnabled, DBGF_IS_EVENT_ENABLED
2014 */
2015VMMR3DECL(int) DBGFR3EventQuery(PUVM pUVM, PDBGFEVENTCONFIG paConfigs, size_t cConfigs)
2016{
2017 /*
2018 * Validate input.
2019 */
2020 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2021 PVM pVM = pUVM->pVM;
2022 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2023
2024 for (size_t i = 0; i < cConfigs; i++)
2025 {
2026 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
2027 AssertReturn( enmType >= DBGFEVENT_HALT_DONE
2028 && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
2029 Assert( enmType >= DBGFEVENT_FIRST_SELECTABLE
2030 || enmType == DBGFEVENT_BREAKPOINT
2031 || enmType == DBGFEVENT_BREAKPOINT_IO
2032 || enmType == DBGFEVENT_BREAKPOINT_MMIO);
2033 paConfigs[i].fEnabled = ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, paConfigs[i].enmType);
2034 }
2035
2036 return VINF_SUCCESS;
2037}
2038
2039
2040/**
2041 * dbgfR3InterruptConfigEx argument packet.
2042 */
2043typedef struct DBGFR3INTERRUPTCONFIGEXARGS
2044{
2045 PCDBGFINTERRUPTCONFIG paConfigs;
2046 size_t cConfigs;
2047 int rc;
2048} DBGFR3INTERRUPTCONFIGEXARGS;
2049/** Pointer to a dbgfR3InterruptConfigEx argument packet. */
2050typedef DBGFR3INTERRUPTCONFIGEXARGS *PDBGFR3INTERRUPTCONFIGEXARGS;
2051
2052/**
2053 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
2054 * Worker for DBGFR3InterruptConfigEx.}
2055 */
2056static DECLCALLBACK(VBOXSTRICTRC) dbgfR3InterruptConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
2057{
2058 if (pVCpu->idCpu == 0)
2059 {
2060 PDBGFR3INTERRUPTCONFIGEXARGS pArgs = (PDBGFR3INTERRUPTCONFIGEXARGS)pvUser;
2061 PCDBGFINTERRUPTCONFIG paConfigs = pArgs->paConfigs;
2062 size_t cConfigs = pArgs->cConfigs;
2063
2064 /*
2065 * Apply the changes.
2066 */
2067 bool fChanged = false;
2068 bool fThis;
2069 for (uint32_t i = 0; i < cConfigs; i++)
2070 {
2071 /*
2072 * Hardware interrupts.
2073 */
2074 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2075 {
2076 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == false;
2077 if (fThis)
2078 {
2079 Assert(pVM->dbgf.s.cHardIntBreakpoints < 256);
2080 pVM->dbgf.s.cHardIntBreakpoints++;
2081 }
2082 }
2083 else if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_DISABLED)
2084 {
2085 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == true;
2086 if (fThis)
2087 {
2088 Assert(pVM->dbgf.s.cHardIntBreakpoints > 0);
2089 pVM->dbgf.s.cHardIntBreakpoints--;
2090 }
2091 }
2092
2093 /*
2094 * Software interrupts.
2095 */
2096 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2097 {
2098 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == false;
2099 if (fThis)
2100 {
2101 Assert(pVM->dbgf.s.cSoftIntBreakpoints < 256);
2102 pVM->dbgf.s.cSoftIntBreakpoints++;
2103 }
2104 }
2105 else if (paConfigs[i].enmSoftState == DBGFINTERRUPTSTATE_DISABLED)
2106 {
2107 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == true;
2108 if (fThis)
2109 {
2110 Assert(pVM->dbgf.s.cSoftIntBreakpoints > 0);
2111 pVM->dbgf.s.cSoftIntBreakpoints--;
2112 }
2113 }
2114 }
2115
2116 /*
2117 * Update the event bitmap entries.
2118 */
2119 if (pVM->dbgf.s.cHardIntBreakpoints > 0)
2120 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == false;
2121 else
2122 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == true;
2123
2124 if (pVM->dbgf.s.cSoftIntBreakpoints > 0)
2125 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == false;
2126 else
2127 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == true;
2128
2129 /*
2130 * Inform HM about changes.
2131 */
2132 if (fChanged && HMIsEnabled(pVM))
2133 {
2134 HMR3NotifyDebugEventChanged(pVM);
2135 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2136 }
2137 }
2138 else if (HMIsEnabled(pVM))
2139 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2140
2141 return VINF_SUCCESS;
2142}
2143
2144
2145/**
2146 * Changes
2147 *
2148 * @returns VBox status code.
2149 * @param pUVM The user mode VM handle.
2150 * @param paConfigs The events to query and where to return the state.
2151 * @param cConfigs The number of elements in @a paConfigs.
2152 * @sa DBGFR3InterruptConfigHardware, DBGFR3InterruptConfigSoftware
2153 */
2154VMMR3DECL(int) DBGFR3InterruptConfigEx(PUVM pUVM, PCDBGFINTERRUPTCONFIG paConfigs, size_t cConfigs)
2155{
2156 /*
2157 * Validate input.
2158 */
2159 size_t i = cConfigs;
2160 while (i-- > 0)
2161 {
2162 AssertReturn(paConfigs[i].enmHardState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2163 AssertReturn(paConfigs[i].enmSoftState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2164 }
2165
2166 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2167 PVM pVM = pUVM->pVM;
2168 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2169
2170 /*
2171 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
2172 * can sync their data and execution with new debug state.
2173 */
2174 DBGFR3INTERRUPTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
2175 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
2176 dbgfR3InterruptConfigEx, &Args);
2177 if (RT_SUCCESS(rc))
2178 rc = Args.rc;
2179 return rc;
2180}
2181
2182
2183/**
2184 * Configures interception of a hardware interrupt.
2185 *
2186 * @returns VBox status code.
2187 * @param pUVM The user mode VM handle.
2188 * @param iInterrupt The interrupt number.
2189 * @param fEnabled Whether interception is enabled or not.
2190 * @sa DBGFR3InterruptSoftwareConfig, DBGFR3InterruptConfigEx
2191 */
2192VMMR3DECL(int) DBGFR3InterruptHardwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2193{
2194 /*
2195 * Convert to DBGFR3InterruptConfigEx call.
2196 */
2197 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, (uint8_t)fEnabled, DBGFINTERRUPTSTATE_DONT_TOUCH };
2198 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2199}
2200
2201
2202/**
2203 * Configures interception of a software interrupt.
2204 *
2205 * @returns VBox status code.
2206 * @param pUVM The user mode VM handle.
2207 * @param iInterrupt The interrupt number.
2208 * @param fEnabled Whether interception is enabled or not.
2209 * @sa DBGFR3InterruptHardwareConfig, DBGFR3InterruptConfigEx
2210 */
2211VMMR3DECL(int) DBGFR3InterruptSoftwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2212{
2213 /*
2214 * Convert to DBGFR3InterruptConfigEx call.
2215 */
2216 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, DBGFINTERRUPTSTATE_DONT_TOUCH, (uint8_t)fEnabled };
2217 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2218}
2219
2220
2221/**
2222 * Checks whether interception is enabled for a hardware interrupt.
2223 *
2224 * @returns true if enabled, false if not or invalid input.
2225 * @param pUVM The user mode VM handle.
2226 * @param iInterrupt The interrupt number.
2227 * @sa DBGFR3InterruptSoftwareIsEnabled, DBGF_IS_HARDWARE_INT_ENABLED,
2228 * DBGF_IS_SOFTWARE_INT_ENABLED
2229 */
2230VMMR3DECL(int) DBGFR3InterruptHardwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2231{
2232 /*
2233 * Validate input.
2234 */
2235 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2236 PVM pVM = pUVM->pVM;
2237 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2238
2239 /*
2240 * Check it.
2241 */
2242 return ASMBitTest(&pVM->dbgf.s.bmHardIntBreakpoints, iInterrupt);
2243}
2244
2245
2246/**
2247 * Checks whether interception is enabled for a software interrupt.
2248 *
2249 * @returns true if enabled, false if not or invalid input.
2250 * @param pUVM The user mode VM handle.
2251 * @param iInterrupt The interrupt number.
2252 * @sa DBGFR3InterruptHardwareIsEnabled, DBGF_IS_SOFTWARE_INT_ENABLED,
2253 * DBGF_IS_HARDWARE_INT_ENABLED,
2254 */
2255VMMR3DECL(int) DBGFR3InterruptSoftwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2256{
2257 /*
2258 * Validate input.
2259 */
2260 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2261 PVM pVM = pUVM->pVM;
2262 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2263
2264 /*
2265 * Check it.
2266 */
2267 return ASMBitTest(&pVM->dbgf.s.bmSoftIntBreakpoints, iInterrupt);
2268}
2269
2270
2271
2272/**
2273 * Call this to single step programmatically.
2274 *
2275 * You must pass down the return code to the EM loop! That's
2276 * where the actual single stepping take place (at least in the
2277 * current implementation).
2278 *
2279 * @returns VINF_EM_DBG_STEP
2280 *
2281 * @param pVCpu The cross context virtual CPU structure.
2282 *
2283 * @thread VCpu EMT
2284 * @internal
2285 */
2286VMMR3_INT_DECL(int) DBGFR3PrgStep(PVMCPU pVCpu)
2287{
2288 VMCPU_ASSERT_EMT(pVCpu);
2289
2290 pVCpu->dbgf.s.fSingleSteppingRaw = true;
2291 return VINF_EM_DBG_STEP;
2292}
2293
2294
2295/**
2296 * Inject an NMI into a running VM (only VCPU 0!)
2297 *
2298 * @returns VBox status code.
2299 * @param pUVM The user mode VM structure.
2300 * @param idCpu The ID of the CPU to inject the NMI on.
2301 */
2302VMMR3DECL(int) DBGFR3InjectNMI(PUVM pUVM, VMCPUID idCpu)
2303{
2304 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2305 PVM pVM = pUVM->pVM;
2306 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2307 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
2308
2309 /** @todo Implement generic NMI injection. */
2310 /** @todo NEM: NMI injection */
2311 if (!HMIsEnabled(pVM))
2312 return VERR_NOT_SUP_BY_NEM;
2313
2314 VMCPU_FF_SET(pVM->apCpusR3[idCpu], VMCPU_FF_INTERRUPT_NMI);
2315 return VINF_SUCCESS;
2316}
2317
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette