VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGF.cpp@ 99739

Last change on this file since 99739 was 99739, checked in by vboxsync, 19 months ago

*: doxygen corrections (mostly about removing @returns from functions returning void).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 77.3 KB
Line 
1/* $Id: DBGF.cpp 99739 2023-05-11 01:01:08Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_dbgf DBGF - The Debugger Facility
30 *
31 * The purpose of the DBGF is to provide an interface for debuggers to
32 * manipulate the VMM without having to mess up the source code for each of
33 * them. The DBGF is always built in and will always work when a debugger
34 * attaches to the VM. The DBGF provides the basic debugger features, such as
35 * halting execution, handling breakpoints, single step execution, instruction
36 * disassembly, info querying, OS specific diggers, symbol and module
37 * management.
38 *
39 * The interface is working in a manner similar to the win32, linux and os2
40 * debugger interfaces. The interface has an asynchronous nature. This comes
41 * from the fact that the VMM and the Debugger are running in different threads.
42 * They are referred to as the "emulation thread" and the "debugger thread", or
43 * as the "ping thread" and the "pong thread, respectivly. (The last set of
44 * names comes from the use of the Ping-Pong synchronization construct from the
45 * RTSem API.)
46 *
47 * @see grp_dbgf
48 *
49 *
50 * @section sec_dbgf_scenario Usage Scenario
51 *
52 * The debugger starts by attaching to the VM. For practical reasons we limit the
53 * number of concurrently attached debuggers to 1 per VM. The action of
54 * attaching to the VM causes the VM to check and generate debug events.
55 *
56 * The debugger then will wait/poll for debug events and issue commands.
57 *
58 * The waiting and polling is done by the DBGFEventWait() function. It will wait
59 * for the emulation thread to send a ping, thus indicating that there is an
60 * event waiting to be processed.
61 *
62 * An event can be a response to a command issued previously, the hitting of a
63 * breakpoint, or running into a bad/fatal VMM condition. The debugger now has
64 * the ping and must respond to the event at hand - the VMM is waiting. This
65 * usually means that the user of the debugger must do something, but it doesn't
66 * have to. The debugger is free to call any DBGF function (nearly at least)
67 * while processing the event.
68 *
69 * Typically the user will issue a request for the execution to be resumed, so
70 * the debugger calls DBGFResume() and goes back to waiting/polling for events.
71 *
72 * When the user eventually terminates the debugging session or selects another
73 * VM, the debugger detaches from the VM. This means that breakpoints are
74 * disabled and that the emulation thread no longer polls for debugger commands.
75 *
76 */
77
78
79/*********************************************************************************************************************************
80* Header Files *
81*********************************************************************************************************************************/
82#define LOG_GROUP LOG_GROUP_DBGF
83#include <VBox/vmm/dbgf.h>
84#include <VBox/vmm/selm.h>
85#include <VBox/vmm/em.h>
86#include <VBox/vmm/hm.h>
87#include <VBox/vmm/mm.h>
88#include <VBox/vmm/nem.h>
89#include "DBGFInternal.h"
90#include <VBox/vmm/vm.h>
91#include <VBox/vmm/uvm.h>
92#include <VBox/err.h>
93
94#include <VBox/log.h>
95#include <iprt/semaphore.h>
96#include <iprt/thread.h>
97#include <iprt/asm.h>
98#include <iprt/time.h>
99#include <iprt/assert.h>
100#include <iprt/stream.h>
101#include <iprt/env.h>
102
103
104/*********************************************************************************************************************************
105* Structures and Typedefs *
106*********************************************************************************************************************************/
107/**
108 * Instruction type returned by dbgfStepGetCurInstrType.
109 */
110typedef enum DBGFSTEPINSTRTYPE
111{
112 DBGFSTEPINSTRTYPE_INVALID = 0,
113 DBGFSTEPINSTRTYPE_OTHER,
114 DBGFSTEPINSTRTYPE_RET,
115 DBGFSTEPINSTRTYPE_CALL,
116 DBGFSTEPINSTRTYPE_END,
117 DBGFSTEPINSTRTYPE_32BIT_HACK = 0x7fffffff
118} DBGFSTEPINSTRTYPE;
119
120
121/*********************************************************************************************************************************
122* Internal Functions *
123*********************************************************************************************************************************/
124DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx);
125DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu);
126static int dbgfR3CpuWait(PVMCPU pVCpu);
127static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution);
128static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu);
129static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu);
130static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude);
131
132
133
134/**
135 * Initializes the DBGF.
136 *
137 * @returns VBox status code.
138 * @param pVM The cross context VM structure.
139 */
140VMMR3_INT_DECL(int) DBGFR3Init(PVM pVM)
141{
142 PUVM pUVM = pVM->pUVM;
143 AssertCompile(sizeof(pUVM->dbgf.s) <= sizeof(pUVM->dbgf.padding));
144 AssertCompile(sizeof(pUVM->aCpus[0].dbgf.s) <= sizeof(pUVM->aCpus[0].dbgf.padding));
145
146 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
147
148 /*
149 * The usual sideways mountain climbing style of init:
150 */
151 int rc = dbgfR3InfoInit(pUVM); /* (First, initalizes the shared critical section.) */
152 if (RT_SUCCESS(rc))
153 {
154 rc = dbgfR3TraceInit(pVM);
155 if (RT_SUCCESS(rc))
156 {
157 rc = dbgfR3RegInit(pUVM);
158 if (RT_SUCCESS(rc))
159 {
160 rc = dbgfR3AsInit(pUVM);
161 if (RT_SUCCESS(rc))
162 {
163 rc = dbgfR3BpInit(pUVM);
164 if (RT_SUCCESS(rc))
165 {
166 rc = dbgfR3OSInit(pUVM);
167 if (RT_SUCCESS(rc))
168 {
169 rc = dbgfR3PlugInInit(pUVM);
170 if (RT_SUCCESS(rc))
171 {
172 rc = dbgfR3BugCheckInit(pVM);
173 if (RT_SUCCESS(rc))
174 {
175#ifdef VBOX_WITH_DBGF_TRACING
176 rc = dbgfR3TracerInit(pVM);
177#endif
178 if (RT_SUCCESS(rc))
179 {
180 return VINF_SUCCESS;
181 }
182 }
183 dbgfR3PlugInTerm(pUVM);
184 }
185 dbgfR3OSTermPart1(pUVM);
186 dbgfR3OSTermPart2(pUVM);
187 }
188 dbgfR3BpTerm(pUVM);
189 }
190 dbgfR3AsTerm(pUVM);
191 }
192 dbgfR3RegTerm(pUVM);
193 }
194 dbgfR3TraceTerm(pVM);
195 }
196 dbgfR3InfoTerm(pUVM);
197 }
198 return rc;
199}
200
201
202/**
203 * Terminates and cleans up resources allocated by the DBGF.
204 *
205 * @returns VBox status code.
206 * @param pVM The cross context VM structure.
207 */
208VMMR3_INT_DECL(int) DBGFR3Term(PVM pVM)
209{
210 PUVM pUVM = pVM->pUVM;
211
212#ifdef VBOX_WITH_DBGF_TRACING
213 dbgfR3TracerTerm(pVM);
214#endif
215 dbgfR3OSTermPart1(pUVM);
216 dbgfR3PlugInTerm(pUVM);
217 dbgfR3OSTermPart2(pUVM);
218 dbgfR3BpTerm(pUVM);
219 dbgfR3AsTerm(pUVM);
220 dbgfR3RegTerm(pUVM);
221 dbgfR3TraceTerm(pVM);
222 dbgfR3InfoTerm(pUVM);
223
224 return VINF_SUCCESS;
225}
226
227
228/**
229 * This is for tstCFGM and others to avoid trigger leak detection.
230 *
231 * @param pUVM The user mode VM structure.
232 */
233VMMR3DECL(void) DBGFR3TermUVM(PUVM pUVM)
234{
235 dbgfR3InfoTerm(pUVM);
236}
237
238
239/**
240 * Called when the VM is powered off to detach debuggers.
241 *
242 * @param pVM The cross context VM structure.
243 */
244VMMR3_INT_DECL(void) DBGFR3PowerOff(PVM pVM)
245{
246 /*
247 * Send a termination event to any attached debugger.
248 */
249 if (pVM->dbgf.s.fAttached)
250 {
251 PVMCPU pVCpu = VMMGetCpu(pVM);
252 int rc = dbgfR3SendEventWait(pVM, pVCpu, DBGFEVENT_POWERING_OFF, DBGFEVENTCTX_OTHER);
253 AssertLogRelRC(rc);
254
255 /*
256 * Clear the FF so we won't get confused later on.
257 */
258 VM_FF_CLEAR(pVM, VM_FF_DBGF);
259 }
260}
261
262
263/**
264 * Applies relocations to data and code managed by this
265 * component. This function will be called at init and
266 * whenever the VMM need to relocate it self inside the GC.
267 *
268 * @param pVM The cross context VM structure.
269 * @param offDelta Relocation delta relative to old location.
270 */
271VMMR3_INT_DECL(void) DBGFR3Relocate(PVM pVM, RTGCINTPTR offDelta)
272{
273 dbgfR3TraceRelocate(pVM);
274 dbgfR3AsRelocate(pVM->pUVM, offDelta);
275}
276
277
278/**
279 * Waits a little while for a debuggger to attach.
280 *
281 * @returns True is a debugger have attached.
282 * @param pVM The cross context VM structure.
283 * @param pVCpu The cross context per CPU structure.
284 * @param enmEvent Event.
285 *
286 * @thread EMT(pVCpu)
287 */
288bool dbgfR3WaitForAttach(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
289{
290 /*
291 * First a message.
292 */
293#if !defined(DEBUG)
294 int cWait = 10;
295#else
296 int cWait = RTEnvExist("VBOX_DBGF_NO_WAIT_FOR_ATTACH")
297 || ( ( enmEvent == DBGFEVENT_ASSERTION_HYPER
298 || enmEvent == DBGFEVENT_FATAL_ERROR)
299 && !RTEnvExist("VBOX_DBGF_WAIT_FOR_ATTACH"))
300 ? 10
301 : 150;
302#endif
303 RTStrmPrintf(g_pStdErr,
304 "DBGF: No debugger attached, waiting %d second%s for one to attach (event=%d)\n"
305#ifdef DEBUG
306 " Set VBOX_DBGF_NO_WAIT_FOR_ATTACH=1 for short wait or VBOX_DBGF_WAIT_FOR_ATTACH=1 longer.\n"
307#endif
308 ,
309 cWait / 10, cWait != 10 ? "s" : "", enmEvent);
310 RTStrmFlush(g_pStdErr);
311 while (cWait > 0)
312 {
313 RTThreadSleep(100);
314 if (pVM->dbgf.s.fAttached)
315 {
316 RTStrmPrintf(g_pStdErr, "Attached!\n");
317 RTStrmFlush(g_pStdErr);
318 return true;
319 }
320
321 /* Process rendezvous (debugger attaching involves such). */
322 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
323 {
324 int rc = VMMR3EmtRendezvousFF(pVM, pVCpu); AssertRC(rc);
325 if (rc != VINF_SUCCESS)
326 {
327 /** @todo Ignoring these could be bad. */
328 RTStrmPrintf(g_pStdErr, "[rcRendezvous=%Rrc, ignored!]", rc);
329 RTStrmFlush(g_pStdErr);
330 }
331 }
332
333 /* Process priority stuff. */
334 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
335 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
336 {
337 int rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/);
338 if (rc == VINF_SUCCESS)
339 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, true /*fPriorityOnly*/);
340 if (rc != VINF_SUCCESS)
341 {
342 /** @todo Ignoring these could be bad. */
343 RTStrmPrintf(g_pStdErr, "[rcReq=%Rrc, ignored!]", rc);
344 RTStrmFlush(g_pStdErr);
345 }
346 }
347
348 /* next */
349 if (!(cWait % 10))
350 {
351 RTStrmPrintf(g_pStdErr, "%d.", cWait / 10);
352 RTStrmFlush(g_pStdErr);
353 }
354 cWait--;
355 }
356
357 RTStrmPrintf(g_pStdErr, "Stopping the VM!\n");
358 RTStrmFlush(g_pStdErr);
359 return false;
360}
361
362
363/**
364 * Forced action callback.
365 *
366 * The VMM will call this from it's main loop when either VM_FF_DBGF or
367 * VMCPU_FF_DBGF are set.
368 *
369 * The function checks for and executes pending commands from the debugger.
370 * Then it checks for pending debug events and serves these.
371 *
372 * @returns VINF_SUCCESS normally.
373 * @returns VERR_DBGF_RAISE_FATAL_ERROR to pretend a fatal error happened.
374 * @param pVM The cross context VM structure.
375 * @param pVCpu The cross context per CPU structure.
376 */
377VMMR3_INT_DECL(int) DBGFR3VMMForcedAction(PVM pVM, PVMCPU pVCpu)
378{
379 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
380
381 /*
382 * Dispatch pending events.
383 */
384 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_DBGF))
385 {
386 if ( pVCpu->dbgf.s.cEvents > 0
387 && pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT)
388 {
389 rcStrict = DBGFR3EventHandlePending(pVM, pVCpu);
390 /** @todo may end up with VERR_DBGF_NOT_ATTACHED here, which will prove fatal... */
391 }
392
393 /*
394 * Command pending? Process it.
395 */
396 PUVMCPU pUVCpu = pVCpu->pUVCpu;
397 if (pUVCpu->dbgf.s.enmDbgfCmd != DBGFCMD_NO_COMMAND)
398 {
399 bool fResumeExecution;
400 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
401 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
402 VBOXSTRICTRC rcStrict2 = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
403 if (!fResumeExecution)
404 rcStrict2 = dbgfR3CpuWait(pVCpu);
405 if ( rcStrict2 != VINF_SUCCESS
406 && ( rcStrict == VINF_SUCCESS
407 || RT_FAILURE(rcStrict2)
408 || rcStrict2 < rcStrict) ) /** @todo oversimplified? */
409 rcStrict = rcStrict2;
410 }
411 }
412
413 return VBOXSTRICTRC_TODO(rcStrict);
414}
415
416
417/**
418 * Try to determine the event context.
419 *
420 * @returns debug event context.
421 * @param pVCpu The cross context vCPU structure.
422 */
423static DBGFEVENTCTX dbgfR3FigureEventCtx(PVMCPU pVCpu)
424{
425 switch (EMGetState(pVCpu))
426 {
427 case EMSTATE_HM:
428 case EMSTATE_NEM:
429 case EMSTATE_DEBUG_GUEST_HM:
430 case EMSTATE_DEBUG_GUEST_NEM:
431 return DBGFEVENTCTX_HM;
432
433 case EMSTATE_IEM:
434 case EMSTATE_RAW:
435 case EMSTATE_IEM_THEN_REM:
436 case EMSTATE_DEBUG_GUEST_IEM:
437 case EMSTATE_DEBUG_GUEST_RAW:
438 return DBGFEVENTCTX_RAW;
439
440
441 case EMSTATE_REM:
442 case EMSTATE_DEBUG_GUEST_REM:
443 return DBGFEVENTCTX_REM;
444
445 case EMSTATE_DEBUG_HYPER:
446 case EMSTATE_GURU_MEDITATION:
447 return DBGFEVENTCTX_HYPER;
448
449 default:
450 return DBGFEVENTCTX_OTHER;
451 }
452}
453
454
455/**
456 * Sends the event to the debugger (i.e. adds it to the event ring buffer).
457 *
458 * @returns VBox status code.
459 * @param pVM The cross context VM structure.
460 * @param pVCpu The CPU sending the event.
461 * @param enmType The event type to send.
462 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
463 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
464 * @param cbPayload The size of the event payload, optional.
465 */
466static int dbgfR3SendEventWorker(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
467 void const *pvPayload, size_t cbPayload)
468{
469 PUVM pUVM = pVM->pUVM;
470 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID; /** @todo per vCPU stepping filter. */
471
472 /*
473 * Massage the input a little.
474 */
475 AssertStmt(cbPayload <= RT_SIZEOFMEMB(DBGFEVENT, u), cbPayload = RT_SIZEOFMEMB(DBGFEVENT, u));
476 if (enmCtx == DBGFEVENTCTX_INVALID)
477 enmCtx = dbgfR3FigureEventCtx(pVCpu);
478
479 /*
480 * Put the event into the ring buffer.
481 */
482 RTSemFastMutexRequest(pUVM->dbgf.s.hMtxDbgEvtWr);
483
484 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
485 uint32_t const idxDbgEvtWrite = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite);
486 uint32_t const idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
487 /** @todo Handle full buffer. */ RT_NOREF(idxDbgEvtRead);
488
489 PDBGFEVENT pEvent = &pUVM->dbgf.s.paDbgEvts[idxDbgEvtWrite % cDbgEvtMax];
490
491#ifdef DEBUG
492 ASMMemFill32(pEvent, sizeof(*pEvent), UINT32_C(0xdeadbeef));
493#endif
494 pEvent->enmType = enmType;
495 pEvent->enmCtx = enmCtx;
496 pEvent->idCpu = pVCpu->idCpu;
497 pEvent->uReserved = 0;
498 if (cbPayload)
499 memcpy(&pEvent->u, pvPayload, cbPayload);
500
501 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtWrite, (idxDbgEvtWrite + 1) % cDbgEvtMax);
502
503 RTSemFastMutexRelease(pUVM->dbgf.s.hMtxDbgEvtWr);
504
505 /*
506 * Signal the debugger.
507 */
508 return RTSemEventSignal(pUVM->dbgf.s.hEvtWait);
509}
510
511
512/**
513 * Send event and wait for the debugger to respond.
514 *
515 * @returns Strict VBox status code.
516 * @param pVM The cross context VM structure.
517 * @param pVCpu The CPU sending the event.
518 * @param enmType The event type to send.
519 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
520 */
521DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
522{
523 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
524 if (RT_SUCCESS(rc))
525 rc = dbgfR3CpuWait(pVCpu);
526 return rc;
527}
528
529
530/**
531 * Send event and wait for the debugger to respond, extended version.
532 *
533 * @returns Strict VBox status code.
534 * @param pVM The cross context VM structure.
535 * @param pVCpu The CPU sending the event.
536 * @param enmType The event type to send.
537 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
538 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
539 * @param cbPayload The size of the event payload, optional.
540 */
541DECLINLINE(int) dbgfR3SendEventWaitEx(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
542 void const *pvPayload, size_t cbPayload)
543{
544 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, pvPayload, cbPayload);
545 if (RT_SUCCESS(rc))
546 rc = dbgfR3CpuWait(pVCpu);
547 return rc;
548}
549
550
551/**
552 * Send event but do NOT wait for the debugger.
553 *
554 * Currently only used by dbgfR3CpuCmd().
555 *
556 * @param pVM The cross context VM structure.
557 * @param pVCpu The CPU sending the event.
558 * @param enmType The event type to send.
559 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
560 */
561DECLINLINE(int) dbgfR3SendEventNoWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
562{
563 return dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
564}
565
566
567/**
568 * The common event prologue code.
569 *
570 * It will make sure someone is attached, and perhaps process any high priority
571 * pending actions (none yet).
572 *
573 * @returns VBox status code.
574 * @param pVM The cross context VM structure.
575 * @param pVCpu The vCPU cross context structure.
576 * @param enmEvent The event to be sent.
577 */
578static int dbgfR3EventPrologue(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
579{
580 /*
581 * Check if a debugger is attached.
582 */
583 if ( !pVM->dbgf.s.fAttached
584 && !dbgfR3WaitForAttach(pVM, pVCpu, enmEvent))
585 {
586 Log(("dbgfR3EventPrologue: enmEvent=%d - debugger not attached\n", enmEvent));
587 return VERR_DBGF_NOT_ATTACHED;
588 }
589
590 /*
591 * Look thru pending commands and finish those which make sense now.
592 */
593 /** @todo Process/purge pending commands. */
594 //int rc = DBGFR3VMMForcedAction(pVM);
595 return VINF_SUCCESS;
596}
597
598
599/**
600 * Processes a pending event on the current CPU.
601 *
602 * This is called by EM in response to VINF_EM_DBG_EVENT.
603 *
604 * @returns Strict VBox status code.
605 * @param pVM The cross context VM structure.
606 * @param pVCpu The cross context per CPU structure.
607 *
608 * @thread EMT(pVCpu)
609 */
610VMMR3_INT_DECL(VBOXSTRICTRC) DBGFR3EventHandlePending(PVM pVM, PVMCPU pVCpu)
611{
612 VMCPU_ASSERT_EMT(pVCpu);
613 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
614
615 /*
616 * Check that we've got an event first.
617 */
618 AssertReturn(pVCpu->dbgf.s.cEvents > 0, VINF_SUCCESS);
619 AssertReturn(pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT, VINF_SUCCESS);
620 PDBGFEVENT pEvent = &pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].Event;
621
622 /*
623 * Make sure we've got a debugger and is allowed to speak to it.
624 */
625 int rc = dbgfR3EventPrologue(pVM, pVCpu, pEvent->enmType);
626 if (RT_FAILURE(rc))
627 {
628 /** @todo drop them events? */
629 return rc; /** @todo this will cause trouble if we're here via an FF! */
630 }
631
632 /*
633 * Send the event and mark it as ignore.
634 * ASSUMES no new events get generate while dbgfR3CpuWait is executing!
635 */
636 VBOXSTRICTRC rcStrict = dbgfR3SendEventWaitEx(pVM, pVCpu, pEvent->enmType, pEvent->enmCtx, &pEvent->u, sizeof(pEvent->u));
637 pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState = DBGFEVENTSTATE_IGNORE;
638 return rcStrict;
639}
640
641
642/**
643 * Send a generic debugger event which takes no data.
644 *
645 * @returns VBox status code.
646 * @param pVM The cross context VM structure.
647 * @param enmEvent The event to send.
648 * @internal
649 */
650VMMR3DECL(int) DBGFR3Event(PVM pVM, DBGFEVENTTYPE enmEvent)
651{
652 PVMCPU pVCpu = VMMGetCpu(pVM);
653 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
654
655 /*
656 * Do stepping filtering.
657 */
658 /** @todo Would be better if we did some of this inside the execution
659 * engines. */
660 if ( enmEvent == DBGFEVENT_STEPPED
661 || enmEvent == DBGFEVENT_STEPPED_HYPER)
662 {
663 if (!dbgfStepAreWeThereYet(pVM, pVCpu))
664 return VINF_EM_DBG_STEP;
665 }
666
667 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
668 if (RT_FAILURE(rc))
669 return rc;
670
671 /*
672 * Send the event and process the reply communication.
673 */
674 return dbgfR3SendEventWait(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID);
675}
676
677
678/**
679 * Send a debugger event which takes the full source file location.
680 *
681 * @returns VBox status code.
682 * @param pVM The cross context VM structure.
683 * @param enmEvent The event to send.
684 * @param pszFile Source file.
685 * @param uLine Line number in source file.
686 * @param pszFunction Function name.
687 * @param pszFormat Message which accompanies the event.
688 * @param ... Message arguments.
689 * @internal
690 */
691VMMR3DECL(int) DBGFR3EventSrc(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, ...)
692{
693 va_list args;
694 va_start(args, pszFormat);
695 int rc = DBGFR3EventSrcV(pVM, enmEvent, pszFile, uLine, pszFunction, pszFormat, args);
696 va_end(args);
697 return rc;
698}
699
700
701/**
702 * Send a debugger event which takes the full source file location.
703 *
704 * @returns VBox status code.
705 * @param pVM The cross context VM structure.
706 * @param enmEvent The event to send.
707 * @param pszFile Source file.
708 * @param uLine Line number in source file.
709 * @param pszFunction Function name.
710 * @param pszFormat Message which accompanies the event.
711 * @param args Message arguments.
712 * @internal
713 */
714VMMR3DECL(int) DBGFR3EventSrcV(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, va_list args)
715{
716 PVMCPU pVCpu = VMMGetCpu(pVM);
717 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
718
719 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
720 if (RT_FAILURE(rc))
721 return rc;
722
723 /*
724 * Format the message.
725 */
726 char *pszMessage = NULL;
727 char szMessage[8192];
728 if (pszFormat && *pszFormat)
729 {
730 pszMessage = &szMessage[0];
731 RTStrPrintfV(szMessage, sizeof(szMessage), pszFormat, args);
732 }
733
734 /*
735 * Send the event and process the reply communication.
736 */
737 DBGFEVENT DbgEvent; /** @todo split up DBGFEVENT so we can skip the dead wait on the stack? */
738 DbgEvent.u.Src.pszFile = pszFile;
739 DbgEvent.u.Src.uLine = uLine;
740 DbgEvent.u.Src.pszFunction = pszFunction;
741 DbgEvent.u.Src.pszMessage = pszMessage;
742 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Src));
743}
744
745
746/**
747 * Send a debugger event which takes the two assertion messages.
748 *
749 * @returns VBox status code.
750 * @param pVM The cross context VM structure.
751 * @param enmEvent The event to send.
752 * @param pszMsg1 First assertion message.
753 * @param pszMsg2 Second assertion message.
754 */
755VMMR3_INT_DECL(int) DBGFR3EventAssertion(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszMsg1, const char *pszMsg2)
756{
757 PVMCPU pVCpu = VMMGetCpu(pVM);
758 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
759
760 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
761 if (RT_FAILURE(rc))
762 return rc;
763
764 /*
765 * Send the event and process the reply communication.
766 */
767 DBGFEVENT DbgEvent;
768 DbgEvent.u.Assert.pszMsg1 = pszMsg1;
769 DbgEvent.u.Assert.pszMsg2 = pszMsg2;
770 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Assert));
771}
772
773
774/**
775 * Breakpoint was hit somewhere.
776 * Figure out which breakpoint it is and notify the debugger.
777 *
778 * @returns VBox status code.
779 * @param pVM The cross context VM structure.
780 * @param enmEvent DBGFEVENT_BREAKPOINT_HYPER or DBGFEVENT_BREAKPOINT.
781 */
782VMMR3_INT_DECL(int) DBGFR3EventBreakpoint(PVM pVM, DBGFEVENTTYPE enmEvent)
783{
784 PVMCPU pVCpu = VMMGetCpu(pVM);
785 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
786
787 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
788 if (RT_FAILURE(rc))
789 return rc;
790
791 /*
792 * Halt all other vCPUs as well to give the user the ability to inspect other
793 * vCPU states as well.
794 */
795 rc = dbgfR3EventHaltAllVCpus(pVM, pVCpu);
796 if (RT_FAILURE(rc))
797 return rc;
798
799 /*
800 * Send the event and process the reply communication.
801 */
802 DBGFEVENT DbgEvent;
803 DbgEvent.u.Bp.hBp = pVCpu->dbgf.s.hBpActive;
804 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
805 if (DbgEvent.u.Bp.hBp != NIL_DBGFBP)
806 {
807 DbgEvent.enmCtx = DBGFEVENTCTX_RAW;
808 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
809 }
810
811 return VERR_DBGF_IPE_1;
812}
813
814
815/**
816 * Returns whether the given vCPU is waiting for the debugger.
817 *
818 * @returns Flags whether the vCPU is currently waiting for the debugger.
819 * @param pUVCpu The user mode vCPU structure.
820 */
821DECLINLINE(bool) dbgfR3CpuIsHalted(PUVMCPU pUVCpu)
822{
823 return ASMAtomicReadBool(&pUVCpu->dbgf.s.fStopped);
824}
825
826
827/**
828 * Checks whether the given vCPU is waiting in the debugger.
829 *
830 * @returns Flag whether the indicated vCPU is halted, when VMCPUID_ALL
831 * is given true is returned when at least one vCPU is halted.
832 * @param pUVM The user mode VM structure.
833 * @param idCpu The CPU ID to check, VMCPUID_ALL to check all vCPUs.
834 */
835DECLINLINE(bool) dbgfR3CpuAreAnyHaltedByCpuId(PUVM pUVM, VMCPUID idCpu)
836{
837 AssertReturn(idCpu < pUVM->cCpus || idCpu == VMCPUID_ALL, false);
838
839 /* Check that either the given vCPU or all are actually halted. */
840 if (idCpu != VMCPUID_ALL)
841 return dbgfR3CpuIsHalted(&pUVM->aCpus[idCpu]);
842
843 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
844 if (dbgfR3CpuIsHalted(&pUVM->aCpus[i]))
845 return true;
846 return false;
847}
848
849
850/**
851 * Gets the pending debug command for this EMT/CPU, replacing it with
852 * DBGFCMD_NO_COMMAND.
853 *
854 * @returns Pending command.
855 * @param pUVCpu The user mode virtual CPU structure.
856 * @thread EMT(pUVCpu)
857 */
858DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu)
859{
860 DBGFCMD enmCmd = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, DBGFCMD_NO_COMMAND);
861 Log2(("DBGF: Getting command: %d\n", enmCmd));
862 return enmCmd;
863}
864
865
866/**
867 * Send a debug command to a CPU, making sure to notify it.
868 *
869 * @returns VBox status code.
870 * @param pUVCpu The user mode virtual CPU structure.
871 * @param enmCmd The command to submit to the CPU.
872 */
873DECLINLINE(int) dbgfR3CpuSetCmdAndNotify(PUVMCPU pUVCpu, DBGFCMD enmCmd)
874{
875 Log2(("DBGF: Setting command to %d\n", enmCmd));
876 Assert(enmCmd != DBGFCMD_NO_COMMAND);
877 AssertMsg(pUVCpu->dbgf.s.enmDbgfCmd == DBGFCMD_NO_COMMAND, ("enmCmd=%d enmDbgfCmd=%d\n", enmCmd, pUVCpu->dbgf.s.enmDbgfCmd));
878
879 ASMAtomicWriteU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, enmCmd);
880 VMCPU_FF_SET(pUVCpu->pVCpu, VMCPU_FF_DBGF);
881
882 VMR3NotifyCpuFFU(pUVCpu, 0 /*fFlags*/);
883 return VINF_SUCCESS;
884}
885
886
887/**
888 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
889 */
890static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventHaltEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
891{
892 RT_NOREF(pvUser);
893
894 VMCPU_ASSERT_EMT(pVCpu);
895 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
896
897 PUVMCPU pUVCpu = pVCpu->pUVCpu;
898 if ( pVCpu != (PVMCPU)pvUser
899 && !dbgfR3CpuIsHalted(pUVCpu))
900 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
901
902 return VINF_SUCCESS;
903}
904
905
906/**
907 * Halts all vCPUs of the given VM except for the given one.
908 *
909 * @returns VBox status code.
910 * @param pVM The cross context VM structure.
911 * @param pVCpuExclude The vCPU cross context structure of the vCPU to exclude.
912 */
913static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude)
914{
915 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3EventHaltEmtWorker, pVCpuExclude);
916}
917
918
919/**
920 * Waits for the debugger to respond.
921 *
922 * @returns VBox status code. (clearify)
923 * @param pVCpu The cross context vCPU structure.
924 */
925static int dbgfR3CpuWait(PVMCPU pVCpu)
926{
927 PVM pVM = pVCpu->CTX_SUFF(pVM);
928 PUVMCPU pUVCpu = pVCpu->pUVCpu;
929
930 LogFlow(("dbgfR3CpuWait:\n"));
931 int rcRet = VINF_SUCCESS;
932
933 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, true);
934
935 /*
936 * Waits for the debugger to reply (i.e. issue an command).
937 */
938 for (;;)
939 {
940 /*
941 * Wait.
942 */
943 for (;;)
944 {
945 /*
946 * Process forced flags before we go sleep.
947 */
948 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_DBGF | VMCPU_FF_REQUEST)
949 || VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VMCPU_FF_REQUEST | VM_FF_CHECK_VM_STATE))
950 {
951 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
952 break;
953
954 int rc;
955 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
956 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
957 else if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
958 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
959 {
960 LogFlow(("dbgfR3CpuWait: Processes requests...\n"));
961 rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
962 if (rc == VINF_SUCCESS)
963 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
964 LogFlow(("dbgfR3CpuWait: VMR3ReqProcess -> %Rrc rcRet=%Rrc\n", rc, rcRet));
965 }
966 else if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
967 {
968 VMSTATE enmState = VMR3GetState(pVM);
969 switch (enmState)
970 {
971 case VMSTATE_FATAL_ERROR:
972 case VMSTATE_FATAL_ERROR_LS:
973 case VMSTATE_GURU_MEDITATION:
974 case VMSTATE_GURU_MEDITATION_LS:
975 rc = VINF_EM_SUSPEND;
976 break;
977 case VMSTATE_DESTROYING:
978 rc = VINF_EM_TERMINATE;
979 break;
980 default:
981 rc = VERR_DBGF_IPE_1;
982 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
983 }
984 }
985 else
986 rc = VINF_SUCCESS;
987 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
988 {
989 switch (rc)
990 {
991 case VINF_EM_DBG_BREAKPOINT:
992 case VINF_EM_DBG_STEPPED:
993 case VINF_EM_DBG_STEP:
994 case VINF_EM_DBG_STOP:
995 case VINF_EM_DBG_EVENT:
996 AssertMsgFailed(("rc=%Rrc\n", rc));
997 break;
998
999 /* return straight away */
1000 case VINF_EM_TERMINATE:
1001 case VINF_EM_OFF:
1002 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
1003 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1004 return rc;
1005
1006 /* remember return code. */
1007 default:
1008 AssertReleaseMsgFailed(("rc=%Rrc is not in the switch!\n", rc));
1009 RT_FALL_THRU();
1010 case VINF_EM_RESET:
1011 case VINF_EM_SUSPEND:
1012 case VINF_EM_HALT:
1013 case VINF_EM_RESUME:
1014 case VINF_EM_RESCHEDULE:
1015 case VINF_EM_RESCHEDULE_REM:
1016 case VINF_EM_RESCHEDULE_RAW:
1017 if (rc < rcRet || rcRet == VINF_SUCCESS)
1018 rcRet = rc;
1019 break;
1020 }
1021 }
1022 else if (RT_FAILURE(rc))
1023 {
1024 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
1025 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1026 return rc;
1027 }
1028 }
1029 else if (pVM->dbgf.s.fAttached)
1030 {
1031 int rc = VMR3WaitU(pUVCpu);
1032 if (RT_FAILURE(rc))
1033 {
1034 LogFlow(("dbgfR3CpuWait: returns %Rrc (VMR3WaitU)\n", rc));
1035 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1036 return rc;
1037 }
1038 }
1039 else
1040 {
1041 LogFlow(("dbgfR3CpuWait: Debugger detached, continuing normal execution (%Rrc)\n", rcRet));
1042 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1043 return rcRet;
1044 }
1045 }
1046
1047 /*
1048 * Process the command.
1049 */
1050 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
1051 bool fResumeExecution;
1052 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
1053 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
1054 int rc = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
1055 if (fResumeExecution)
1056 {
1057 if (RT_FAILURE(rc))
1058 rcRet = rc;
1059 else if ( rc >= VINF_EM_FIRST
1060 && rc <= VINF_EM_LAST
1061 && (rc < rcRet || rcRet == VINF_SUCCESS))
1062 rcRet = rc;
1063 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rcRet));
1064 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1065 return rcRet;
1066 }
1067 }
1068}
1069
1070
1071/**
1072 * Executes command from debugger.
1073 *
1074 * The caller is responsible for waiting or resuming execution based on the
1075 * value returned in the *pfResumeExecution indicator.
1076 *
1077 * @returns VBox status code. (clearify!)
1078 * @param pVCpu The cross context vCPU structure.
1079 * @param enmCmd The command in question.
1080 * @param pCmdData Pointer to the command data.
1081 * @param pfResumeExecution Where to store the resume execution / continue waiting indicator.
1082 */
1083static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution)
1084{
1085 RT_NOREF(pCmdData); /* for later */
1086
1087 /*
1088 * The cases in this switch returns directly if no event to send.
1089 */
1090 DBGFEVENTTYPE enmEvent;
1091 DBGFEVENTCTX enmCtx = DBGFEVENTCTX_INVALID;
1092 switch (enmCmd)
1093 {
1094 /*
1095 * Halt is answered by an event say that we've halted.
1096 */
1097 case DBGFCMD_HALT:
1098 {
1099 *pfResumeExecution = false;
1100 enmEvent = DBGFEVENT_HALT_DONE;
1101 break;
1102 }
1103
1104
1105 /*
1106 * Resume is not answered, we just resume execution.
1107 */
1108 case DBGFCMD_GO:
1109 {
1110 pVCpu->dbgf.s.fSingleSteppingRaw = false;
1111 *pfResumeExecution = true;
1112 return VINF_SUCCESS;
1113 }
1114
1115 /** @todo implement (and define) the rest of the commands. */
1116
1117 /*
1118 * Single step, with trace into.
1119 */
1120 case DBGFCMD_SINGLE_STEP:
1121 {
1122 Log2(("Single step\n"));
1123 PVM pVM = pVCpu->CTX_SUFF(pVM);
1124 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1125 {
1126 if (dbgfStepGetCurInstrType(pVM, pVCpu) == DBGFSTEPINSTRTYPE_CALL)
1127 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1128 }
1129 if (pVM->dbgf.s.SteppingFilter.cMaxSteps > 0)
1130 {
1131 pVCpu->dbgf.s.fSingleSteppingRaw = true;
1132 *pfResumeExecution = true;
1133 return VINF_EM_DBG_STEP;
1134 }
1135 /* Stop after zero steps. Nonsense, but whatever. */
1136 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1137 *pfResumeExecution = false;
1138 enmCtx = dbgfR3FigureEventCtx(pVCpu);
1139 enmEvent = enmCtx != DBGFEVENTCTX_HYPER ? DBGFEVENT_STEPPED : DBGFEVENT_STEPPED_HYPER;
1140 break;
1141 }
1142
1143 /*
1144 * Default is to send an invalid command event.
1145 */
1146 default:
1147 {
1148 *pfResumeExecution = false;
1149 enmEvent = DBGFEVENT_INVALID_COMMAND;
1150 break;
1151 }
1152 }
1153
1154 /*
1155 * Send the pending event.
1156 */
1157 Log2(("DBGF: Emulation thread: sending event %d\n", enmEvent));
1158 int rc = dbgfR3SendEventNoWait(pVCpu->CTX_SUFF(pVM), pVCpu, enmEvent, enmCtx);
1159 AssertRCStmt(rc, *pfResumeExecution = true);
1160 return rc;
1161}
1162
1163
1164/**
1165 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1166 * EMT rendezvous worker for DBGFR3Attach - only called on one EMT.}
1167 */
1168static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Attach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1169{
1170 PUVM pUVM = pVM->pUVM;
1171 int *prcAttach = (int *)pvUser;
1172 RT_NOREF(pVCpu);
1173
1174 if (pVM->dbgf.s.fAttached)
1175 {
1176 Log(("dbgfR3Attach: Debugger already attached\n"));
1177 *prcAttach = VERR_DBGF_ALREADY_ATTACHED;
1178 return VINF_SUCCESS;
1179 }
1180
1181 /*
1182 * The per-CPU bits.
1183 */
1184 for (uint32_t i = 0; i < pUVM->cCpus; i++)
1185 {
1186 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1187
1188 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1189 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1190 }
1191
1192 /*
1193 * Init of the VM -> Debugger communication part living in the global VM structure.
1194 */
1195 pUVM->dbgf.s.cDbgEvtMax = pVM->cCpus * 5 + 10; /* Initial size of event ring, increased when being full. */
1196 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1197 pUVM->dbgf.s.idxDbgEvtRead = 0;
1198 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1199 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1200 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1201 int rc;
1202 pUVM->dbgf.s.paDbgEvts = (PDBGFEVENT)MMR3HeapAllocU(pUVM, MM_TAG_DBGF, pUVM->dbgf.s.cDbgEvtMax * sizeof(DBGFEVENT));
1203 if (pUVM->dbgf.s.paDbgEvts)
1204 {
1205 rc = RTSemEventCreate(&pUVM->dbgf.s.hEvtWait);
1206 if (RT_SUCCESS(rc))
1207 {
1208 rc = RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxDbgEvtWr);
1209 if (RT_SUCCESS(rc))
1210 {
1211 rc = RTSemEventMultiCreate(&pUVM->dbgf.s.hEvtRingBufFull);
1212 if (RT_SUCCESS(rc))
1213 {
1214 /*
1215 * At last, set the attached flag.
1216 */
1217 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, true);
1218 *prcAttach = VINF_SUCCESS;
1219 return VINF_SUCCESS;
1220 }
1221
1222 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1223 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1224 }
1225 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1226 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1227 }
1228 }
1229 else
1230 rc = VERR_NO_MEMORY;
1231
1232 *prcAttach = rc;
1233 return VINF_SUCCESS;
1234}
1235
1236
1237/**
1238 * Attaches a debugger to the specified VM.
1239 *
1240 * Only one debugger at a time.
1241 *
1242 * @returns VBox status code.
1243 * @param pUVM The user mode VM handle.
1244 */
1245VMMR3DECL(int) DBGFR3Attach(PUVM pUVM)
1246{
1247 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1248 PVM pVM = pUVM->pVM;
1249 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1250
1251 /*
1252 * Call the VM, use EMT rendezvous for serialization.
1253 */
1254 int rcAttach = VERR_IPE_UNINITIALIZED_STATUS;
1255 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Attach, &rcAttach);
1256 if (RT_SUCCESS(rc))
1257 rc = rcAttach;
1258
1259 return rc;
1260}
1261
1262
1263/**
1264 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1265 * EMT rendezvous worker for DBGFR3Detach - called on all EMTs (why?).}
1266 */
1267static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Detach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1268{
1269 if (pVCpu->idCpu == 0)
1270 {
1271 PUVM pUVM = (PUVM)pvUser;
1272
1273 /*
1274 * Per-CPU cleanup.
1275 */
1276 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1277 {
1278 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1279
1280 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1281 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1282 }
1283
1284 /*
1285 * De-init of the VM -> Debugger communication part living in the global VM structure.
1286 */
1287 if (pUVM->dbgf.s.paDbgEvts)
1288 {
1289 MMR3HeapFree(pUVM->dbgf.s.paDbgEvts);
1290 pUVM->dbgf.s.paDbgEvts = NULL;
1291 }
1292
1293 if (pUVM->dbgf.s.hEvtWait != NIL_RTSEMEVENT)
1294 {
1295 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1296 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1297 }
1298
1299 if (pUVM->dbgf.s.hMtxDbgEvtWr != NIL_RTSEMFASTMUTEX)
1300 {
1301 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1302 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1303 }
1304
1305 if (pUVM->dbgf.s.hEvtRingBufFull != NIL_RTSEMEVENTMULTI)
1306 {
1307 RTSemEventMultiDestroy(pUVM->dbgf.s.hEvtRingBufFull);
1308 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1309 }
1310
1311 pUVM->dbgf.s.cDbgEvtMax = 0;
1312 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1313 pUVM->dbgf.s.idxDbgEvtRead = 0;
1314 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1315 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1316 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1317
1318 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, false);
1319 }
1320
1321 return VINF_SUCCESS;
1322}
1323
1324
1325/**
1326 * Detaches a debugger from the specified VM.
1327 *
1328 * Caller must be attached to the VM.
1329 *
1330 * @returns VBox status code.
1331 * @param pUVM The user mode VM handle.
1332 */
1333VMMR3DECL(int) DBGFR3Detach(PUVM pUVM)
1334{
1335 LogFlow(("DBGFR3Detach:\n"));
1336
1337 /*
1338 * Validate input. The UVM handle shall be valid, the VM handle might be
1339 * in the processes of being destroyed already, so deal quietly with that.
1340 */
1341 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1342 PVM pVM = pUVM->pVM;
1343 if (!VM_IS_VALID_EXT(pVM))
1344 return VERR_INVALID_VM_HANDLE;
1345
1346 /*
1347 * Check if attached.
1348 */
1349 if (!pVM->dbgf.s.fAttached)
1350 return VERR_DBGF_NOT_ATTACHED;
1351
1352 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Detach, pUVM);
1353}
1354
1355
1356/**
1357 * Wait for a debug event.
1358 *
1359 * @returns VBox status code. Will not return VBOX_INTERRUPTED.
1360 * @param pUVM The user mode VM handle.
1361 * @param cMillies Number of millis to wait.
1362 * @param pEvent Where to store the event data.
1363 */
1364VMMR3DECL(int) DBGFR3EventWait(PUVM pUVM, RTMSINTERVAL cMillies, PDBGFEVENT pEvent)
1365{
1366 /*
1367 * Check state.
1368 */
1369 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1370 PVM pVM = pUVM->pVM;
1371 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1372 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1373
1374 RT_BZERO(pEvent, sizeof(*pEvent));
1375
1376 /*
1377 * Wait for an event to arrive if there are none.
1378 */
1379 int rc = VINF_SUCCESS;
1380 uint32_t idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
1381 if (idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite))
1382 {
1383 do
1384 {
1385 rc = RTSemEventWait(pUVM->dbgf.s.hEvtWait, cMillies);
1386 } while ( RT_SUCCESS(rc)
1387 && idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1388 }
1389
1390 if (RT_SUCCESS(rc))
1391 {
1392 Assert(idxDbgEvtRead != ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1393
1394 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
1395 memcpy(pEvent, &pUVM->dbgf.s.paDbgEvts[idxDbgEvtRead % cDbgEvtMax], sizeof(*pEvent));
1396 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtRead, (idxDbgEvtRead + 1) % cDbgEvtMax);
1397 }
1398
1399 Log2(("DBGFR3EventWait: rc=%Rrc (event type %d)\n", rc, pEvent->enmType));
1400 return rc;
1401}
1402
1403
1404/**
1405 * Halts VM execution.
1406 *
1407 * After calling this the VM isn't actually halted till an DBGFEVENT_HALT_DONE
1408 * arrives. Until that time it's not possible to issue any new commands.
1409 *
1410 * @returns VBox status code.
1411 * @retval VWRN_DBGF_ALREADY_HALTED if @a idCpu is VMCPUID_ALL and all vCPUs
1412 * are halted.
1413 * @param pUVM The user mode VM handle.
1414 * @param idCpu The vCPU to halt, VMCPUID_ALL halts all still running vCPUs.
1415 */
1416VMMR3DECL(int) DBGFR3Halt(PUVM pUVM, VMCPUID idCpu)
1417{
1418 /*
1419 * Check state.
1420 */
1421 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1422 PVM pVM = pUVM->pVM;
1423 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1424 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1425 AssertReturn(idCpu == VMCPUID_ALL || idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1426
1427 /*
1428 * Halt the requested CPUs as needed.
1429 */
1430 int rc;
1431 if (idCpu != VMCPUID_ALL)
1432 {
1433 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1434 if (!dbgfR3CpuIsHalted(pUVCpu))
1435 {
1436 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1437 rc = VINF_SUCCESS;
1438 }
1439 else
1440 rc = VWRN_DBGF_ALREADY_HALTED;
1441 }
1442 else
1443 {
1444 rc = VWRN_DBGF_ALREADY_HALTED;
1445 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1446 {
1447 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1448 if (!dbgfR3CpuIsHalted(pUVCpu))
1449 {
1450 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1451 rc = VINF_SUCCESS;
1452 }
1453 }
1454 }
1455
1456 return rc;
1457}
1458
1459
1460/**
1461 * Checks if any of the specified vCPUs have been halted by the debugger.
1462 *
1463 * @returns True if at least one halted vCPUs.
1464 * @returns False if no halted vCPUs.
1465 * @param pUVM The user mode VM handle.
1466 * @param idCpu The CPU id to check for, VMCPUID_ALL will return true if
1467 * at least a single vCPU is halted in the debugger.
1468 */
1469VMMR3DECL(bool) DBGFR3IsHalted(PUVM pUVM, VMCPUID idCpu)
1470{
1471 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1472 PVM pVM = pUVM->pVM;
1473 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1474 AssertReturn(pVM->dbgf.s.fAttached, false);
1475
1476 return dbgfR3CpuAreAnyHaltedByCpuId(pUVM, idCpu);
1477}
1478
1479
1480/**
1481 * Checks if the debugger can wait for events or not.
1482 *
1483 * This function is only used by lazy, multiplexing debuggers. :-)
1484 *
1485 * @returns VBox status code.
1486 * @retval VINF_SUCCESS if waitable.
1487 * @retval VERR_SEM_OUT_OF_TURN if not waitable.
1488 * @retval VERR_INVALID_VM_HANDLE if the VM is being (/ has been) destroyed
1489 * (not asserted) or if the handle is invalid (asserted).
1490 * @retval VERR_DBGF_NOT_ATTACHED if not attached.
1491 *
1492 * @param pUVM The user mode VM handle.
1493 */
1494VMMR3DECL(int) DBGFR3QueryWaitable(PUVM pUVM)
1495{
1496 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1497
1498 /* Note! There is a slight race here, unfortunately. */
1499 PVM pVM = pUVM->pVM;
1500 if (!RT_VALID_PTR(pVM))
1501 return VERR_INVALID_VM_HANDLE;
1502 if (pVM->enmVMState >= VMSTATE_DESTROYING)
1503 return VERR_INVALID_VM_HANDLE;
1504 if (!pVM->dbgf.s.fAttached)
1505 return VERR_DBGF_NOT_ATTACHED;
1506
1507 /** @todo was: if (!RTSemPongShouldWait(...)) return VERR_SEM_OUT_OF_TURN; */
1508 return VINF_SUCCESS;
1509}
1510
1511
1512/**
1513 * Resumes VM execution.
1514 *
1515 * There is no receipt event on this command.
1516 *
1517 * @returns VBox status code.
1518 * @retval VWRN_DBGF_ALREADY_RUNNING if the specified vCPUs are all running.
1519 * @param pUVM The user mode VM handle.
1520 * @param idCpu The vCPU to resume, VMCPUID_ALL resumes all still halted vCPUs.
1521 */
1522VMMR3DECL(int) DBGFR3Resume(PUVM pUVM, VMCPUID idCpu)
1523{
1524 /*
1525 * Validate input and attachment state.
1526 */
1527 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1528 PVM pVM = pUVM->pVM;
1529 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1530 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1531
1532 /*
1533 * Ping the halted emulation threads, telling them to run.
1534 */
1535 int rc = VWRN_DBGF_ALREADY_RUNNING;
1536 if (idCpu != VMCPUID_ALL)
1537 {
1538 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1539 if (dbgfR3CpuIsHalted(pUVCpu))
1540 {
1541 rc = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1542 AssertRC(rc);
1543 }
1544 }
1545 else
1546 {
1547 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1548 {
1549 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1550 if (dbgfR3CpuIsHalted(pUVCpu))
1551 {
1552 int rc2 = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1553 AssertRC(rc2);
1554 if (rc == VWRN_DBGF_ALREADY_RUNNING || RT_FAILURE(rc2))
1555 rc = rc2;
1556 }
1557 }
1558 }
1559
1560 return rc;
1561}
1562
1563
1564/**
1565 * Classifies the current instruction.
1566 *
1567 * @returns Type of instruction.
1568 * @param pVM The cross context VM structure.
1569 * @param pVCpu The current CPU.
1570 * @thread EMT(pVCpu)
1571 */
1572static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu)
1573{
1574 /*
1575 * Read the instruction.
1576 */
1577 size_t cbRead = 0;
1578 uint8_t abOpcode[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1579 int rc = PGMR3DbgReadGCPtr(pVM, abOpcode, CPUMGetGuestFlatPC(pVCpu), sizeof(abOpcode) - 1, 0 /*fFlags*/, &cbRead);
1580 if (RT_SUCCESS(rc))
1581 {
1582 /*
1583 * Do minimal parsing. No real need to involve the disassembler here.
1584 */
1585 uint8_t *pb = abOpcode;
1586 for (;;)
1587 {
1588 switch (*pb++)
1589 {
1590 default:
1591 return DBGFSTEPINSTRTYPE_OTHER;
1592
1593 case 0xe8: /* call rel16/32 */
1594 case 0x9a: /* call farptr */
1595 case 0xcc: /* int3 */
1596 case 0xcd: /* int xx */
1597 // case 0xce: /* into */
1598 return DBGFSTEPINSTRTYPE_CALL;
1599
1600 case 0xc2: /* ret xx */
1601 case 0xc3: /* ret */
1602 case 0xca: /* retf xx */
1603 case 0xcb: /* retf */
1604 case 0xcf: /* iret */
1605 return DBGFSTEPINSTRTYPE_RET;
1606
1607 case 0xff:
1608 if ( ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 2 /* call indir */
1609 || ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 3) /* call indir-farptr */
1610 return DBGFSTEPINSTRTYPE_CALL;
1611 return DBGFSTEPINSTRTYPE_OTHER;
1612
1613 case 0x0f:
1614 switch (*pb++)
1615 {
1616 case 0x05: /* syscall */
1617 case 0x34: /* sysenter */
1618 return DBGFSTEPINSTRTYPE_CALL;
1619 case 0x07: /* sysret */
1620 case 0x35: /* sysexit */
1621 return DBGFSTEPINSTRTYPE_RET;
1622 }
1623 break;
1624
1625 /* Must handle some REX prefixes. So we do all normal prefixes. */
1626 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
1627 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
1628 if (!CPUMIsGuestIn64BitCode(pVCpu))
1629 return DBGFSTEPINSTRTYPE_OTHER;
1630 break;
1631
1632 case 0x2e: /* CS */
1633 case 0x36: /* SS */
1634 case 0x3e: /* DS */
1635 case 0x26: /* ES */
1636 case 0x64: /* FS */
1637 case 0x65: /* GS */
1638 case 0x66: /* op size */
1639 case 0x67: /* addr size */
1640 case 0xf0: /* lock */
1641 case 0xf2: /* REPNZ */
1642 case 0xf3: /* REPZ */
1643 break;
1644 }
1645 }
1646 }
1647
1648 return DBGFSTEPINSTRTYPE_INVALID;
1649}
1650
1651
1652/**
1653 * Checks if the stepping has reached a stop point.
1654 *
1655 * Called when raising a stepped event.
1656 *
1657 * @returns true if the event should be raised, false if we should take one more
1658 * step first.
1659 * @param pVM The cross context VM structure.
1660 * @param pVCpu The cross context per CPU structure of the calling EMT.
1661 * @thread EMT(pVCpu)
1662 */
1663static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu)
1664{
1665 /*
1666 * Check valid pVCpu and that it matches the CPU one stepping.
1667 */
1668 if (pVCpu)
1669 {
1670 if (pVCpu->idCpu == pVM->dbgf.s.SteppingFilter.idCpu)
1671 {
1672 /*
1673 * Increase the number of steps and see if we've reached the max.
1674 */
1675 pVM->dbgf.s.SteppingFilter.cSteps++;
1676 if (pVM->dbgf.s.SteppingFilter.cSteps < pVM->dbgf.s.SteppingFilter.cMaxSteps)
1677 {
1678 /*
1679 * Check PC and SP address filtering.
1680 */
1681 if (pVM->dbgf.s.SteppingFilter.fFlags & (DBGF_STEP_F_STOP_ON_ADDRESS | DBGF_STEP_F_STOP_ON_STACK_POP))
1682 {
1683 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1684 && pVM->dbgf.s.SteppingFilter.AddrPc == CPUMGetGuestFlatPC(pVCpu))
1685 return true;
1686 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1687 && CPUMGetGuestFlatSP(pVCpu) - pVM->dbgf.s.SteppingFilter.AddrStackPop
1688 < pVM->dbgf.s.SteppingFilter.cbStackPop)
1689 return true;
1690 }
1691
1692 /*
1693 * Do step-over filtering separate from the step-into one.
1694 */
1695 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1696 {
1697 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1698 switch (enmType)
1699 {
1700 default:
1701 if ( pVM->dbgf.s.SteppingFilter.uCallDepth != 0
1702 || (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_FILTER_MASK))
1703 break;
1704 return true;
1705 case DBGFSTEPINSTRTYPE_CALL:
1706 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1707 && pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1708 return true;
1709 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1710 break;
1711 case DBGFSTEPINSTRTYPE_RET:
1712 if (pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1713 {
1714 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1715 return true;
1716 /* If after return, we use the cMaxStep limit to stop the next time. */
1717 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1718 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1719 }
1720 else if (pVM->dbgf.s.SteppingFilter.uCallDepth > 0)
1721 pVM->dbgf.s.SteppingFilter.uCallDepth--;
1722 break;
1723 }
1724 return false;
1725 }
1726 /*
1727 * Filtered step-into.
1728 */
1729 else if ( pVM->dbgf.s.SteppingFilter.fFlags
1730 & (DBGF_STEP_F_STOP_ON_CALL | DBGF_STEP_F_STOP_ON_RET | DBGF_STEP_F_STOP_AFTER_RET))
1731 {
1732 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1733 switch (enmType)
1734 {
1735 default:
1736 break;
1737 case DBGFSTEPINSTRTYPE_CALL:
1738 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1739 return true;
1740 break;
1741 case DBGFSTEPINSTRTYPE_RET:
1742 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1743 return true;
1744 /* If after return, we use the cMaxStep limit to stop the next time. */
1745 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1746 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1747 break;
1748 }
1749 return false;
1750 }
1751 }
1752 }
1753 }
1754
1755 return true;
1756}
1757
1758
1759/**
1760 * Step Into.
1761 *
1762 * A single step event is generated from this command.
1763 * The current implementation is not reliable, so don't rely on the event coming.
1764 *
1765 * @returns VBox status code.
1766 * @param pUVM The user mode VM handle.
1767 * @param idCpu The ID of the CPU to single step on.
1768 */
1769VMMR3DECL(int) DBGFR3Step(PUVM pUVM, VMCPUID idCpu)
1770{
1771 return DBGFR3StepEx(pUVM, idCpu, DBGF_STEP_F_INTO, NULL, NULL, 0, 1);
1772}
1773
1774
1775/**
1776 * Full fleged step.
1777 *
1778 * This extended stepping API allows for doing multiple steps before raising an
1779 * event, helping implementing step over, step out and other more advanced
1780 * features.
1781 *
1782 * Like the DBGFR3Step() API, this will normally generate a DBGFEVENT_STEPPED or
1783 * DBGFEVENT_STEPPED_EVENT. However the stepping may be interrupted by other
1784 * events, which will abort the stepping.
1785 *
1786 * The stop on pop area feature is for safeguarding step out.
1787 *
1788 * Please note though, that it will always use stepping and never breakpoints.
1789 * While this allows for a much greater flexibility it can at times be rather
1790 * slow.
1791 *
1792 * @returns VBox status code.
1793 * @param pUVM The user mode VM handle.
1794 * @param idCpu The ID of the CPU to single step on.
1795 * @param fFlags Flags controlling the stepping, DBGF_STEP_F_XXX.
1796 * Either DBGF_STEP_F_INTO or DBGF_STEP_F_OVER must
1797 * always be specified.
1798 * @param pStopPcAddr Address to stop executing at. Completely ignored
1799 * unless DBGF_STEP_F_STOP_ON_ADDRESS is specified.
1800 * @param pStopPopAddr Stack address that SP must be lower than when
1801 * performing DBGF_STEP_F_STOP_ON_STACK_POP filtering.
1802 * @param cbStopPop The range starting at @a pStopPopAddr which is
1803 * considered to be within the same thread stack. Note
1804 * that the API allows @a pStopPopAddr and @a cbStopPop
1805 * to form an area that wraps around and it will
1806 * consider the part starting at 0 as included.
1807 * @param cMaxSteps The maximum number of steps to take. This is to
1808 * prevent stepping for ever, so passing UINT32_MAX is
1809 * not recommended.
1810 *
1811 * @remarks The two address arguments must be guest context virtual addresses,
1812 * or HMA. The code doesn't make much of a point of out HMA, though.
1813 */
1814VMMR3DECL(int) DBGFR3StepEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, PCDBGFADDRESS pStopPcAddr,
1815 PCDBGFADDRESS pStopPopAddr, RTGCUINTPTR cbStopPop, uint32_t cMaxSteps)
1816{
1817 /*
1818 * Check state.
1819 */
1820 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1821 PVM pVM = pUVM->pVM;
1822 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1823 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
1824 AssertReturn(!(fFlags & ~DBGF_STEP_F_VALID_MASK), VERR_INVALID_FLAGS);
1825 AssertReturn(RT_BOOL(fFlags & DBGF_STEP_F_INTO) != RT_BOOL(fFlags & DBGF_STEP_F_OVER), VERR_INVALID_FLAGS);
1826 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1827 {
1828 AssertReturn(RT_VALID_PTR(pStopPcAddr), VERR_INVALID_POINTER);
1829 AssertReturn(DBGFADDRESS_IS_VALID(pStopPcAddr), VERR_INVALID_PARAMETER);
1830 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPcAddr), VERR_INVALID_PARAMETER);
1831 }
1832 AssertReturn(!(fFlags & DBGF_STEP_F_STOP_ON_STACK_POP) || RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1833 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1834 {
1835 AssertReturn(RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1836 AssertReturn(DBGFADDRESS_IS_VALID(pStopPopAddr), VERR_INVALID_PARAMETER);
1837 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPopAddr), VERR_INVALID_PARAMETER);
1838 AssertReturn(cbStopPop > 0, VERR_INVALID_PARAMETER);
1839 }
1840
1841 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1842 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1843 if (RT_LIKELY(dbgfR3CpuIsHalted(pUVCpu)))
1844 { /* likely */ }
1845 else
1846 return VERR_SEM_OUT_OF_TURN;
1847 Assert(pVM->dbgf.s.SteppingFilter.idCpu == NIL_VMCPUID);
1848
1849 /*
1850 * Send the emulation thread a single-step command.
1851 */
1852 if (fFlags == DBGF_STEP_F_INTO)
1853 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1854 else
1855 pVM->dbgf.s.SteppingFilter.idCpu = idCpu;
1856 pVM->dbgf.s.SteppingFilter.fFlags = fFlags;
1857 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1858 pVM->dbgf.s.SteppingFilter.AddrPc = pStopPcAddr->FlatPtr;
1859 else
1860 pVM->dbgf.s.SteppingFilter.AddrPc = 0;
1861 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1862 {
1863 pVM->dbgf.s.SteppingFilter.AddrStackPop = pStopPopAddr->FlatPtr;
1864 pVM->dbgf.s.SteppingFilter.cbStackPop = cbStopPop;
1865 }
1866 else
1867 {
1868 pVM->dbgf.s.SteppingFilter.AddrStackPop = 0;
1869 pVM->dbgf.s.SteppingFilter.cbStackPop = RTGCPTR_MAX;
1870 }
1871
1872 pVM->dbgf.s.SteppingFilter.cMaxSteps = cMaxSteps;
1873 pVM->dbgf.s.SteppingFilter.cSteps = 0;
1874 pVM->dbgf.s.SteppingFilter.uCallDepth = 0;
1875
1876 Assert(dbgfR3CpuIsHalted(pUVCpu));
1877 return dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_SINGLE_STEP);
1878}
1879
1880
1881
1882/**
1883 * dbgfR3EventConfigEx argument packet.
1884 */
1885typedef struct DBGFR3EVENTCONFIGEXARGS
1886{
1887 PCDBGFEVENTCONFIG paConfigs;
1888 size_t cConfigs;
1889 int rc;
1890} DBGFR3EVENTCONFIGEXARGS;
1891/** Pointer to a dbgfR3EventConfigEx argument packet. */
1892typedef DBGFR3EVENTCONFIGEXARGS *PDBGFR3EVENTCONFIGEXARGS;
1893
1894
1895/**
1896 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Worker for DBGFR3EventConfigEx.}
1897 */
1898static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
1899{
1900 if (pVCpu->idCpu == 0)
1901 {
1902 PDBGFR3EVENTCONFIGEXARGS pArgs = (PDBGFR3EVENTCONFIGEXARGS)pvUser;
1903 DBGFEVENTCONFIG volatile const *paConfigs = pArgs->paConfigs;
1904 size_t cConfigs = pArgs->cConfigs;
1905
1906 /*
1907 * Apply the changes.
1908 */
1909 unsigned cChanges = 0;
1910 for (uint32_t i = 0; i < cConfigs; i++)
1911 {
1912 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
1913 AssertReturn(enmType >= DBGFEVENT_FIRST_SELECTABLE && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1914 if (paConfigs[i].fEnabled)
1915 cChanges += ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, enmType) == false;
1916 else
1917 cChanges += ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, enmType) == true;
1918 }
1919
1920 /*
1921 * Inform HM about changes.
1922 */
1923 if (cChanges > 0)
1924 {
1925 if (HMIsEnabled(pVM))
1926 {
1927 HMR3NotifyDebugEventChanged(pVM);
1928 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1929 }
1930 else if (VM_IS_NEM_ENABLED(pVM))
1931 {
1932 NEMR3NotifyDebugEventChanged(pVM);
1933 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1934 }
1935 }
1936 }
1937 else if (HMIsEnabled(pVM))
1938 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1939 else if (VM_IS_NEM_ENABLED(pVM))
1940 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1941
1942 return VINF_SUCCESS;
1943}
1944
1945
1946/**
1947 * Configures (enables/disables) multiple selectable debug events.
1948 *
1949 * @returns VBox status code.
1950 * @param pUVM The user mode VM handle.
1951 * @param paConfigs The event to configure and their new state.
1952 * @param cConfigs Number of entries in @a paConfigs.
1953 */
1954VMMR3DECL(int) DBGFR3EventConfigEx(PUVM pUVM, PCDBGFEVENTCONFIG paConfigs, size_t cConfigs)
1955{
1956 /*
1957 * Validate input.
1958 */
1959 size_t i = cConfigs;
1960 while (i-- > 0)
1961 {
1962 AssertReturn(paConfigs[i].enmType >= DBGFEVENT_FIRST_SELECTABLE, VERR_INVALID_PARAMETER);
1963 AssertReturn(paConfigs[i].enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1964 }
1965 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1966 PVM pVM = pUVM->pVM;
1967 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1968
1969 /*
1970 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
1971 * can sync their data and execution with new debug state.
1972 */
1973 DBGFR3EVENTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
1974 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
1975 dbgfR3EventConfigEx, &Args);
1976 if (RT_SUCCESS(rc))
1977 rc = Args.rc;
1978 return rc;
1979}
1980
1981
1982/**
1983 * Enables or disables a selectable debug event.
1984 *
1985 * @returns VBox status code.
1986 * @param pUVM The user mode VM handle.
1987 * @param enmEvent The selectable debug event.
1988 * @param fEnabled The new state.
1989 */
1990VMMR3DECL(int) DBGFR3EventConfig(PUVM pUVM, DBGFEVENTTYPE enmEvent, bool fEnabled)
1991{
1992 /*
1993 * Convert to an array call.
1994 */
1995 DBGFEVENTCONFIG EvtCfg = { enmEvent, fEnabled };
1996 return DBGFR3EventConfigEx(pUVM, &EvtCfg, 1);
1997}
1998
1999
2000/**
2001 * Checks if the given selectable event is enabled.
2002 *
2003 * @returns true if enabled, false if not or invalid input.
2004 * @param pUVM The user mode VM handle.
2005 * @param enmEvent The selectable debug event.
2006 * @sa DBGFR3EventQuery
2007 */
2008VMMR3DECL(bool) DBGFR3EventIsEnabled(PUVM pUVM, DBGFEVENTTYPE enmEvent)
2009{
2010 /*
2011 * Validate input.
2012 */
2013 AssertReturn( enmEvent >= DBGFEVENT_HALT_DONE
2014 && enmEvent < DBGFEVENT_END, false);
2015 Assert( enmEvent >= DBGFEVENT_FIRST_SELECTABLE
2016 || enmEvent == DBGFEVENT_BREAKPOINT
2017 || enmEvent == DBGFEVENT_BREAKPOINT_IO
2018 || enmEvent == DBGFEVENT_BREAKPOINT_MMIO);
2019
2020 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2021 PVM pVM = pUVM->pVM;
2022 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2023
2024 /*
2025 * Check the event status.
2026 */
2027 return ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, enmEvent);
2028}
2029
2030
2031/**
2032 * Queries the status of a set of events.
2033 *
2034 * @returns VBox status code.
2035 * @param pUVM The user mode VM handle.
2036 * @param paConfigs The events to query and where to return the state.
2037 * @param cConfigs The number of elements in @a paConfigs.
2038 * @sa DBGFR3EventIsEnabled, DBGF_IS_EVENT_ENABLED
2039 */
2040VMMR3DECL(int) DBGFR3EventQuery(PUVM pUVM, PDBGFEVENTCONFIG paConfigs, size_t cConfigs)
2041{
2042 /*
2043 * Validate input.
2044 */
2045 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2046 PVM pVM = pUVM->pVM;
2047 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2048
2049 for (size_t i = 0; i < cConfigs; i++)
2050 {
2051 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
2052 AssertReturn( enmType >= DBGFEVENT_HALT_DONE
2053 && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
2054 Assert( enmType >= DBGFEVENT_FIRST_SELECTABLE
2055 || enmType == DBGFEVENT_BREAKPOINT
2056 || enmType == DBGFEVENT_BREAKPOINT_IO
2057 || enmType == DBGFEVENT_BREAKPOINT_MMIO);
2058 paConfigs[i].fEnabled = ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, paConfigs[i].enmType);
2059 }
2060
2061 return VINF_SUCCESS;
2062}
2063
2064
2065/**
2066 * dbgfR3InterruptConfigEx argument packet.
2067 */
2068typedef struct DBGFR3INTERRUPTCONFIGEXARGS
2069{
2070 PCDBGFINTERRUPTCONFIG paConfigs;
2071 size_t cConfigs;
2072 int rc;
2073} DBGFR3INTERRUPTCONFIGEXARGS;
2074/** Pointer to a dbgfR3InterruptConfigEx argument packet. */
2075typedef DBGFR3INTERRUPTCONFIGEXARGS *PDBGFR3INTERRUPTCONFIGEXARGS;
2076
2077/**
2078 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
2079 * Worker for DBGFR3InterruptConfigEx.}
2080 */
2081static DECLCALLBACK(VBOXSTRICTRC) dbgfR3InterruptConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
2082{
2083 if (pVCpu->idCpu == 0)
2084 {
2085 PDBGFR3INTERRUPTCONFIGEXARGS pArgs = (PDBGFR3INTERRUPTCONFIGEXARGS)pvUser;
2086 PCDBGFINTERRUPTCONFIG paConfigs = pArgs->paConfigs;
2087 size_t cConfigs = pArgs->cConfigs;
2088
2089 /*
2090 * Apply the changes.
2091 */
2092 bool fChanged = false;
2093 bool fThis;
2094 for (uint32_t i = 0; i < cConfigs; i++)
2095 {
2096 /*
2097 * Hardware interrupts.
2098 */
2099 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2100 {
2101 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == false;
2102 if (fThis)
2103 {
2104 Assert(pVM->dbgf.s.cHardIntBreakpoints < 256);
2105 pVM->dbgf.s.cHardIntBreakpoints++;
2106 }
2107 }
2108 else if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_DISABLED)
2109 {
2110 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == true;
2111 if (fThis)
2112 {
2113 Assert(pVM->dbgf.s.cHardIntBreakpoints > 0);
2114 pVM->dbgf.s.cHardIntBreakpoints--;
2115 }
2116 }
2117
2118 /*
2119 * Software interrupts.
2120 */
2121 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2122 {
2123 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == false;
2124 if (fThis)
2125 {
2126 Assert(pVM->dbgf.s.cSoftIntBreakpoints < 256);
2127 pVM->dbgf.s.cSoftIntBreakpoints++;
2128 }
2129 }
2130 else if (paConfigs[i].enmSoftState == DBGFINTERRUPTSTATE_DISABLED)
2131 {
2132 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == true;
2133 if (fThis)
2134 {
2135 Assert(pVM->dbgf.s.cSoftIntBreakpoints > 0);
2136 pVM->dbgf.s.cSoftIntBreakpoints--;
2137 }
2138 }
2139 }
2140
2141 /*
2142 * Update the event bitmap entries.
2143 */
2144 if (pVM->dbgf.s.cHardIntBreakpoints > 0)
2145 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == false;
2146 else
2147 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == true;
2148
2149 if (pVM->dbgf.s.cSoftIntBreakpoints > 0)
2150 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == false;
2151 else
2152 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == true;
2153
2154 /*
2155 * Inform HM about changes.
2156 */
2157 if (fChanged)
2158 {
2159 if (HMIsEnabled(pVM))
2160 {
2161 HMR3NotifyDebugEventChanged(pVM);
2162 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2163 }
2164 else if (VM_IS_NEM_ENABLED(pVM))
2165 {
2166 NEMR3NotifyDebugEventChanged(pVM);
2167 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2168 }
2169 }
2170 }
2171 else if (HMIsEnabled(pVM))
2172 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2173 else if (VM_IS_NEM_ENABLED(pVM))
2174 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2175
2176 return VINF_SUCCESS;
2177}
2178
2179
2180/**
2181 * Changes
2182 *
2183 * @returns VBox status code.
2184 * @param pUVM The user mode VM handle.
2185 * @param paConfigs The events to query and where to return the state.
2186 * @param cConfigs The number of elements in @a paConfigs.
2187 * @sa DBGFR3InterruptConfigHardware, DBGFR3InterruptConfigSoftware
2188 */
2189VMMR3DECL(int) DBGFR3InterruptConfigEx(PUVM pUVM, PCDBGFINTERRUPTCONFIG paConfigs, size_t cConfigs)
2190{
2191 /*
2192 * Validate input.
2193 */
2194 size_t i = cConfigs;
2195 while (i-- > 0)
2196 {
2197 AssertReturn(paConfigs[i].enmHardState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2198 AssertReturn(paConfigs[i].enmSoftState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2199 }
2200
2201 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2202 PVM pVM = pUVM->pVM;
2203 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2204
2205 /*
2206 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
2207 * can sync their data and execution with new debug state.
2208 */
2209 DBGFR3INTERRUPTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
2210 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
2211 dbgfR3InterruptConfigEx, &Args);
2212 if (RT_SUCCESS(rc))
2213 rc = Args.rc;
2214 return rc;
2215}
2216
2217
2218/**
2219 * Configures interception of a hardware interrupt.
2220 *
2221 * @returns VBox status code.
2222 * @param pUVM The user mode VM handle.
2223 * @param iInterrupt The interrupt number.
2224 * @param fEnabled Whether interception is enabled or not.
2225 * @sa DBGFR3InterruptSoftwareConfig, DBGFR3InterruptConfigEx
2226 */
2227VMMR3DECL(int) DBGFR3InterruptHardwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2228{
2229 /*
2230 * Convert to DBGFR3InterruptConfigEx call.
2231 */
2232 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, (uint8_t)fEnabled, DBGFINTERRUPTSTATE_DONT_TOUCH };
2233 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2234}
2235
2236
2237/**
2238 * Configures interception of a software interrupt.
2239 *
2240 * @returns VBox status code.
2241 * @param pUVM The user mode VM handle.
2242 * @param iInterrupt The interrupt number.
2243 * @param fEnabled Whether interception is enabled or not.
2244 * @sa DBGFR3InterruptHardwareConfig, DBGFR3InterruptConfigEx
2245 */
2246VMMR3DECL(int) DBGFR3InterruptSoftwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2247{
2248 /*
2249 * Convert to DBGFR3InterruptConfigEx call.
2250 */
2251 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, DBGFINTERRUPTSTATE_DONT_TOUCH, (uint8_t)fEnabled };
2252 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2253}
2254
2255
2256/**
2257 * Checks whether interception is enabled for a hardware interrupt.
2258 *
2259 * @returns true if enabled, false if not or invalid input.
2260 * @param pUVM The user mode VM handle.
2261 * @param iInterrupt The interrupt number.
2262 * @sa DBGFR3InterruptSoftwareIsEnabled, DBGF_IS_HARDWARE_INT_ENABLED,
2263 * DBGF_IS_SOFTWARE_INT_ENABLED
2264 */
2265VMMR3DECL(int) DBGFR3InterruptHardwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2266{
2267 /*
2268 * Validate input.
2269 */
2270 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2271 PVM pVM = pUVM->pVM;
2272 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2273
2274 /*
2275 * Check it.
2276 */
2277 return ASMBitTest(&pVM->dbgf.s.bmHardIntBreakpoints, iInterrupt);
2278}
2279
2280
2281/**
2282 * Checks whether interception is enabled for a software interrupt.
2283 *
2284 * @returns true if enabled, false if not or invalid input.
2285 * @param pUVM The user mode VM handle.
2286 * @param iInterrupt The interrupt number.
2287 * @sa DBGFR3InterruptHardwareIsEnabled, DBGF_IS_SOFTWARE_INT_ENABLED,
2288 * DBGF_IS_HARDWARE_INT_ENABLED,
2289 */
2290VMMR3DECL(int) DBGFR3InterruptSoftwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2291{
2292 /*
2293 * Validate input.
2294 */
2295 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2296 PVM pVM = pUVM->pVM;
2297 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2298
2299 /*
2300 * Check it.
2301 */
2302 return ASMBitTest(&pVM->dbgf.s.bmSoftIntBreakpoints, iInterrupt);
2303}
2304
2305
2306
2307/**
2308 * Call this to single step programmatically.
2309 *
2310 * You must pass down the return code to the EM loop! That's
2311 * where the actual single stepping take place (at least in the
2312 * current implementation).
2313 *
2314 * @returns VINF_EM_DBG_STEP
2315 *
2316 * @param pVCpu The cross context virtual CPU structure.
2317 *
2318 * @thread VCpu EMT
2319 * @internal
2320 */
2321VMMR3_INT_DECL(int) DBGFR3PrgStep(PVMCPU pVCpu)
2322{
2323 VMCPU_ASSERT_EMT(pVCpu);
2324
2325 pVCpu->dbgf.s.fSingleSteppingRaw = true;
2326 return VINF_EM_DBG_STEP;
2327}
2328
2329
2330/**
2331 * Inject an NMI into a running VM (only VCPU 0!)
2332 *
2333 * @returns VBox status code.
2334 * @param pUVM The user mode VM structure.
2335 * @param idCpu The ID of the CPU to inject the NMI on.
2336 */
2337VMMR3DECL(int) DBGFR3InjectNMI(PUVM pUVM, VMCPUID idCpu)
2338{
2339 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2340 PVM pVM = pUVM->pVM;
2341 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2342 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
2343
2344 /** @todo Implement generic NMI injection. */
2345 /** @todo NEM: NMI injection */
2346 if (!HMIsEnabled(pVM))
2347 return VERR_NOT_SUP_BY_NEM;
2348
2349 VMCPU_FF_SET(pVM->apCpusR3[idCpu], VMCPU_FF_INTERRUPT_NMI);
2350 return VINF_SUCCESS;
2351}
2352
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette