VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGF.cpp@ 98103

Last change on this file since 98103 was 98103, checked in by vboxsync, 23 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 77.3 KB
Line 
1/* $Id: DBGF.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_dbgf DBGF - The Debugger Facility
30 *
31 * The purpose of the DBGF is to provide an interface for debuggers to
32 * manipulate the VMM without having to mess up the source code for each of
33 * them. The DBGF is always built in and will always work when a debugger
34 * attaches to the VM. The DBGF provides the basic debugger features, such as
35 * halting execution, handling breakpoints, single step execution, instruction
36 * disassembly, info querying, OS specific diggers, symbol and module
37 * management.
38 *
39 * The interface is working in a manner similar to the win32, linux and os2
40 * debugger interfaces. The interface has an asynchronous nature. This comes
41 * from the fact that the VMM and the Debugger are running in different threads.
42 * They are referred to as the "emulation thread" and the "debugger thread", or
43 * as the "ping thread" and the "pong thread, respectivly. (The last set of
44 * names comes from the use of the Ping-Pong synchronization construct from the
45 * RTSem API.)
46 *
47 * @see grp_dbgf
48 *
49 *
50 * @section sec_dbgf_scenario Usage Scenario
51 *
52 * The debugger starts by attaching to the VM. For practical reasons we limit the
53 * number of concurrently attached debuggers to 1 per VM. The action of
54 * attaching to the VM causes the VM to check and generate debug events.
55 *
56 * The debugger then will wait/poll for debug events and issue commands.
57 *
58 * The waiting and polling is done by the DBGFEventWait() function. It will wait
59 * for the emulation thread to send a ping, thus indicating that there is an
60 * event waiting to be processed.
61 *
62 * An event can be a response to a command issued previously, the hitting of a
63 * breakpoint, or running into a bad/fatal VMM condition. The debugger now has
64 * the ping and must respond to the event at hand - the VMM is waiting. This
65 * usually means that the user of the debugger must do something, but it doesn't
66 * have to. The debugger is free to call any DBGF function (nearly at least)
67 * while processing the event.
68 *
69 * Typically the user will issue a request for the execution to be resumed, so
70 * the debugger calls DBGFResume() and goes back to waiting/polling for events.
71 *
72 * When the user eventually terminates the debugging session or selects another
73 * VM, the debugger detaches from the VM. This means that breakpoints are
74 * disabled and that the emulation thread no longer polls for debugger commands.
75 *
76 */
77
78
79/*********************************************************************************************************************************
80* Header Files *
81*********************************************************************************************************************************/
82#define LOG_GROUP LOG_GROUP_DBGF
83#include <VBox/vmm/dbgf.h>
84#include <VBox/vmm/selm.h>
85#include <VBox/vmm/em.h>
86#include <VBox/vmm/hm.h>
87#include <VBox/vmm/mm.h>
88#include <VBox/vmm/nem.h>
89#include "DBGFInternal.h"
90#include <VBox/vmm/vm.h>
91#include <VBox/vmm/uvm.h>
92#include <VBox/err.h>
93
94#include <VBox/log.h>
95#include <iprt/semaphore.h>
96#include <iprt/thread.h>
97#include <iprt/asm.h>
98#include <iprt/time.h>
99#include <iprt/assert.h>
100#include <iprt/stream.h>
101#include <iprt/env.h>
102
103
104/*********************************************************************************************************************************
105* Structures and Typedefs *
106*********************************************************************************************************************************/
107/**
108 * Instruction type returned by dbgfStepGetCurInstrType.
109 */
110typedef enum DBGFSTEPINSTRTYPE
111{
112 DBGFSTEPINSTRTYPE_INVALID = 0,
113 DBGFSTEPINSTRTYPE_OTHER,
114 DBGFSTEPINSTRTYPE_RET,
115 DBGFSTEPINSTRTYPE_CALL,
116 DBGFSTEPINSTRTYPE_END,
117 DBGFSTEPINSTRTYPE_32BIT_HACK = 0x7fffffff
118} DBGFSTEPINSTRTYPE;
119
120
121/*********************************************************************************************************************************
122* Internal Functions *
123*********************************************************************************************************************************/
124DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx);
125DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu);
126static int dbgfR3CpuWait(PVMCPU pVCpu);
127static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution);
128static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu);
129static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu);
130static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude);
131
132
133
134/**
135 * Initializes the DBGF.
136 *
137 * @returns VBox status code.
138 * @param pVM The cross context VM structure.
139 */
140VMMR3_INT_DECL(int) DBGFR3Init(PVM pVM)
141{
142 PUVM pUVM = pVM->pUVM;
143 AssertCompile(sizeof(pUVM->dbgf.s) <= sizeof(pUVM->dbgf.padding));
144 AssertCompile(sizeof(pUVM->aCpus[0].dbgf.s) <= sizeof(pUVM->aCpus[0].dbgf.padding));
145
146 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
147
148 /*
149 * The usual sideways mountain climbing style of init:
150 */
151 int rc = dbgfR3InfoInit(pUVM); /* (First, initalizes the shared critical section.) */
152 if (RT_SUCCESS(rc))
153 {
154 rc = dbgfR3TraceInit(pVM);
155 if (RT_SUCCESS(rc))
156 {
157 rc = dbgfR3RegInit(pUVM);
158 if (RT_SUCCESS(rc))
159 {
160 rc = dbgfR3AsInit(pUVM);
161 if (RT_SUCCESS(rc))
162 {
163 rc = dbgfR3BpInit(pUVM);
164 if (RT_SUCCESS(rc))
165 {
166 rc = dbgfR3OSInit(pUVM);
167 if (RT_SUCCESS(rc))
168 {
169 rc = dbgfR3PlugInInit(pUVM);
170 if (RT_SUCCESS(rc))
171 {
172 rc = dbgfR3BugCheckInit(pVM);
173 if (RT_SUCCESS(rc))
174 {
175#ifdef VBOX_WITH_DBGF_TRACING
176 rc = dbgfR3TracerInit(pVM);
177#endif
178 if (RT_SUCCESS(rc))
179 {
180 return VINF_SUCCESS;
181 }
182 }
183 dbgfR3PlugInTerm(pUVM);
184 }
185 dbgfR3OSTermPart1(pUVM);
186 dbgfR3OSTermPart2(pUVM);
187 }
188 dbgfR3BpTerm(pUVM);
189 }
190 dbgfR3AsTerm(pUVM);
191 }
192 dbgfR3RegTerm(pUVM);
193 }
194 dbgfR3TraceTerm(pVM);
195 }
196 dbgfR3InfoTerm(pUVM);
197 }
198 return rc;
199}
200
201
202/**
203 * Terminates and cleans up resources allocated by the DBGF.
204 *
205 * @returns VBox status code.
206 * @param pVM The cross context VM structure.
207 */
208VMMR3_INT_DECL(int) DBGFR3Term(PVM pVM)
209{
210 PUVM pUVM = pVM->pUVM;
211
212#ifdef VBOX_WITH_DBGF_TRACING
213 dbgfR3TracerTerm(pVM);
214#endif
215 dbgfR3OSTermPart1(pUVM);
216 dbgfR3PlugInTerm(pUVM);
217 dbgfR3OSTermPart2(pUVM);
218 dbgfR3BpTerm(pUVM);
219 dbgfR3AsTerm(pUVM);
220 dbgfR3RegTerm(pUVM);
221 dbgfR3TraceTerm(pVM);
222 dbgfR3InfoTerm(pUVM);
223
224 return VINF_SUCCESS;
225}
226
227
228/**
229 * This is for tstCFGM and others to avoid trigger leak detection.
230 *
231 * @returns VBox status code.
232 * @param pUVM The user mode VM structure.
233 */
234VMMR3DECL(void) DBGFR3TermUVM(PUVM pUVM)
235{
236 dbgfR3InfoTerm(pUVM);
237}
238
239
240/**
241 * Called when the VM is powered off to detach debuggers.
242 *
243 * @param pVM The cross context VM structure.
244 */
245VMMR3_INT_DECL(void) DBGFR3PowerOff(PVM pVM)
246{
247 /*
248 * Send a termination event to any attached debugger.
249 */
250 if (pVM->dbgf.s.fAttached)
251 {
252 PVMCPU pVCpu = VMMGetCpu(pVM);
253 int rc = dbgfR3SendEventWait(pVM, pVCpu, DBGFEVENT_POWERING_OFF, DBGFEVENTCTX_OTHER);
254 AssertLogRelRC(rc);
255
256 /*
257 * Clear the FF so we won't get confused later on.
258 */
259 VM_FF_CLEAR(pVM, VM_FF_DBGF);
260 }
261}
262
263
264/**
265 * Applies relocations to data and code managed by this
266 * component. This function will be called at init and
267 * whenever the VMM need to relocate it self inside the GC.
268 *
269 * @param pVM The cross context VM structure.
270 * @param offDelta Relocation delta relative to old location.
271 */
272VMMR3_INT_DECL(void) DBGFR3Relocate(PVM pVM, RTGCINTPTR offDelta)
273{
274 dbgfR3TraceRelocate(pVM);
275 dbgfR3AsRelocate(pVM->pUVM, offDelta);
276}
277
278
279/**
280 * Waits a little while for a debuggger to attach.
281 *
282 * @returns True is a debugger have attached.
283 * @param pVM The cross context VM structure.
284 * @param pVCpu The cross context per CPU structure.
285 * @param enmEvent Event.
286 *
287 * @thread EMT(pVCpu)
288 */
289bool dbgfR3WaitForAttach(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
290{
291 /*
292 * First a message.
293 */
294#if !defined(DEBUG)
295 int cWait = 10;
296#else
297 int cWait = RTEnvExist("VBOX_DBGF_NO_WAIT_FOR_ATTACH")
298 || ( ( enmEvent == DBGFEVENT_ASSERTION_HYPER
299 || enmEvent == DBGFEVENT_FATAL_ERROR)
300 && !RTEnvExist("VBOX_DBGF_WAIT_FOR_ATTACH"))
301 ? 10
302 : 150;
303#endif
304 RTStrmPrintf(g_pStdErr,
305 "DBGF: No debugger attached, waiting %d second%s for one to attach (event=%d)\n"
306#ifdef DEBUG
307 " Set VBOX_DBGF_NO_WAIT_FOR_ATTACH=1 for short wait or VBOX_DBGF_WAIT_FOR_ATTACH=1 longer.\n"
308#endif
309 ,
310 cWait / 10, cWait != 10 ? "s" : "", enmEvent);
311 RTStrmFlush(g_pStdErr);
312 while (cWait > 0)
313 {
314 RTThreadSleep(100);
315 if (pVM->dbgf.s.fAttached)
316 {
317 RTStrmPrintf(g_pStdErr, "Attached!\n");
318 RTStrmFlush(g_pStdErr);
319 return true;
320 }
321
322 /* Process rendezvous (debugger attaching involves such). */
323 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
324 {
325 int rc = VMMR3EmtRendezvousFF(pVM, pVCpu); AssertRC(rc);
326 if (rc != VINF_SUCCESS)
327 {
328 /** @todo Ignoring these could be bad. */
329 RTStrmPrintf(g_pStdErr, "[rcRendezvous=%Rrc, ignored!]", rc);
330 RTStrmFlush(g_pStdErr);
331 }
332 }
333
334 /* Process priority stuff. */
335 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
336 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
337 {
338 int rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/);
339 if (rc == VINF_SUCCESS)
340 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, true /*fPriorityOnly*/);
341 if (rc != VINF_SUCCESS)
342 {
343 /** @todo Ignoring these could be bad. */
344 RTStrmPrintf(g_pStdErr, "[rcReq=%Rrc, ignored!]", rc);
345 RTStrmFlush(g_pStdErr);
346 }
347 }
348
349 /* next */
350 if (!(cWait % 10))
351 {
352 RTStrmPrintf(g_pStdErr, "%d.", cWait / 10);
353 RTStrmFlush(g_pStdErr);
354 }
355 cWait--;
356 }
357
358 RTStrmPrintf(g_pStdErr, "Stopping the VM!\n");
359 RTStrmFlush(g_pStdErr);
360 return false;
361}
362
363
364/**
365 * Forced action callback.
366 *
367 * The VMM will call this from it's main loop when either VM_FF_DBGF or
368 * VMCPU_FF_DBGF are set.
369 *
370 * The function checks for and executes pending commands from the debugger.
371 * Then it checks for pending debug events and serves these.
372 *
373 * @returns VINF_SUCCESS normally.
374 * @returns VERR_DBGF_RAISE_FATAL_ERROR to pretend a fatal error happened.
375 * @param pVM The cross context VM structure.
376 * @param pVCpu The cross context per CPU structure.
377 */
378VMMR3_INT_DECL(int) DBGFR3VMMForcedAction(PVM pVM, PVMCPU pVCpu)
379{
380 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
381
382 /*
383 * Dispatch pending events.
384 */
385 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_DBGF))
386 {
387 if ( pVCpu->dbgf.s.cEvents > 0
388 && pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT)
389 {
390 rcStrict = DBGFR3EventHandlePending(pVM, pVCpu);
391 /** @todo may end up with VERR_DBGF_NOT_ATTACHED here, which will prove fatal... */
392 }
393
394 /*
395 * Command pending? Process it.
396 */
397 PUVMCPU pUVCpu = pVCpu->pUVCpu;
398 if (pUVCpu->dbgf.s.enmDbgfCmd != DBGFCMD_NO_COMMAND)
399 {
400 bool fResumeExecution;
401 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
402 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
403 VBOXSTRICTRC rcStrict2 = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
404 if (!fResumeExecution)
405 rcStrict2 = dbgfR3CpuWait(pVCpu);
406 if ( rcStrict2 != VINF_SUCCESS
407 && ( rcStrict == VINF_SUCCESS
408 || RT_FAILURE(rcStrict2)
409 || rcStrict2 < rcStrict) ) /** @todo oversimplified? */
410 rcStrict = rcStrict2;
411 }
412 }
413
414 return VBOXSTRICTRC_TODO(rcStrict);
415}
416
417
418/**
419 * Try to determine the event context.
420 *
421 * @returns debug event context.
422 * @param pVCpu The cross context vCPU structure.
423 */
424static DBGFEVENTCTX dbgfR3FigureEventCtx(PVMCPU pVCpu)
425{
426 switch (EMGetState(pVCpu))
427 {
428 case EMSTATE_HM:
429 case EMSTATE_NEM:
430 case EMSTATE_DEBUG_GUEST_HM:
431 case EMSTATE_DEBUG_GUEST_NEM:
432 return DBGFEVENTCTX_HM;
433
434 case EMSTATE_IEM:
435 case EMSTATE_RAW:
436 case EMSTATE_IEM_THEN_REM:
437 case EMSTATE_DEBUG_GUEST_IEM:
438 case EMSTATE_DEBUG_GUEST_RAW:
439 return DBGFEVENTCTX_RAW;
440
441
442 case EMSTATE_REM:
443 case EMSTATE_DEBUG_GUEST_REM:
444 return DBGFEVENTCTX_REM;
445
446 case EMSTATE_DEBUG_HYPER:
447 case EMSTATE_GURU_MEDITATION:
448 return DBGFEVENTCTX_HYPER;
449
450 default:
451 return DBGFEVENTCTX_OTHER;
452 }
453}
454
455
456/**
457 * Sends the event to the debugger (i.e. adds it to the event ring buffer).
458 *
459 * @returns VBox status code.
460 * @param pVM The cross context VM structure.
461 * @param pVCpu The CPU sending the event.
462 * @param enmType The event type to send.
463 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
464 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
465 * @param cbPayload The size of the event payload, optional.
466 */
467static int dbgfR3SendEventWorker(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
468 void const *pvPayload, size_t cbPayload)
469{
470 PUVM pUVM = pVM->pUVM;
471 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID; /** @todo per vCPU stepping filter. */
472
473 /*
474 * Massage the input a little.
475 */
476 AssertStmt(cbPayload <= RT_SIZEOFMEMB(DBGFEVENT, u), cbPayload = RT_SIZEOFMEMB(DBGFEVENT, u));
477 if (enmCtx == DBGFEVENTCTX_INVALID)
478 enmCtx = dbgfR3FigureEventCtx(pVCpu);
479
480 /*
481 * Put the event into the ring buffer.
482 */
483 RTSemFastMutexRequest(pUVM->dbgf.s.hMtxDbgEvtWr);
484
485 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
486 uint32_t const idxDbgEvtWrite = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite);
487 uint32_t const idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
488 /** @todo Handle full buffer. */ RT_NOREF(idxDbgEvtRead);
489
490 PDBGFEVENT pEvent = &pUVM->dbgf.s.paDbgEvts[idxDbgEvtWrite % cDbgEvtMax];
491
492#ifdef DEBUG
493 ASMMemFill32(pEvent, sizeof(*pEvent), UINT32_C(0xdeadbeef));
494#endif
495 pEvent->enmType = enmType;
496 pEvent->enmCtx = enmCtx;
497 pEvent->idCpu = pVCpu->idCpu;
498 pEvent->uReserved = 0;
499 if (cbPayload)
500 memcpy(&pEvent->u, pvPayload, cbPayload);
501
502 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtWrite, (idxDbgEvtWrite + 1) % cDbgEvtMax);
503
504 RTSemFastMutexRelease(pUVM->dbgf.s.hMtxDbgEvtWr);
505
506 /*
507 * Signal the debugger.
508 */
509 return RTSemEventSignal(pUVM->dbgf.s.hEvtWait);
510}
511
512
513/**
514 * Send event and wait for the debugger to respond.
515 *
516 * @returns Strict VBox status code.
517 * @param pVM The cross context VM structure.
518 * @param pVCpu The CPU sending the event.
519 * @param enmType The event type to send.
520 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
521 */
522DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
523{
524 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
525 if (RT_SUCCESS(rc))
526 rc = dbgfR3CpuWait(pVCpu);
527 return rc;
528}
529
530
531/**
532 * Send event and wait for the debugger to respond, extended version.
533 *
534 * @returns Strict VBox status code.
535 * @param pVM The cross context VM structure.
536 * @param pVCpu The CPU sending the event.
537 * @param enmType The event type to send.
538 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
539 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
540 * @param cbPayload The size of the event payload, optional.
541 */
542DECLINLINE(int) dbgfR3SendEventWaitEx(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
543 void const *pvPayload, size_t cbPayload)
544{
545 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, pvPayload, cbPayload);
546 if (RT_SUCCESS(rc))
547 rc = dbgfR3CpuWait(pVCpu);
548 return rc;
549}
550
551
552/**
553 * Send event but do NOT wait for the debugger.
554 *
555 * Currently only used by dbgfR3CpuCmd().
556 *
557 * @param pVM The cross context VM structure.
558 * @param pVCpu The CPU sending the event.
559 * @param enmType The event type to send.
560 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
561 */
562DECLINLINE(int) dbgfR3SendEventNoWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
563{
564 return dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
565}
566
567
568/**
569 * The common event prologue code.
570 *
571 * It will make sure someone is attached, and perhaps process any high priority
572 * pending actions (none yet).
573 *
574 * @returns VBox status code.
575 * @param pVM The cross context VM structure.
576 * @param pVCpu The vCPU cross context structure.
577 * @param enmEvent The event to be sent.
578 */
579static int dbgfR3EventPrologue(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
580{
581 /*
582 * Check if a debugger is attached.
583 */
584 if ( !pVM->dbgf.s.fAttached
585 && !dbgfR3WaitForAttach(pVM, pVCpu, enmEvent))
586 {
587 Log(("dbgfR3EventPrologue: enmEvent=%d - debugger not attached\n", enmEvent));
588 return VERR_DBGF_NOT_ATTACHED;
589 }
590
591 /*
592 * Look thru pending commands and finish those which make sense now.
593 */
594 /** @todo Process/purge pending commands. */
595 //int rc = DBGFR3VMMForcedAction(pVM);
596 return VINF_SUCCESS;
597}
598
599
600/**
601 * Processes a pending event on the current CPU.
602 *
603 * This is called by EM in response to VINF_EM_DBG_EVENT.
604 *
605 * @returns Strict VBox status code.
606 * @param pVM The cross context VM structure.
607 * @param pVCpu The cross context per CPU structure.
608 *
609 * @thread EMT(pVCpu)
610 */
611VMMR3_INT_DECL(VBOXSTRICTRC) DBGFR3EventHandlePending(PVM pVM, PVMCPU pVCpu)
612{
613 VMCPU_ASSERT_EMT(pVCpu);
614 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
615
616 /*
617 * Check that we've got an event first.
618 */
619 AssertReturn(pVCpu->dbgf.s.cEvents > 0, VINF_SUCCESS);
620 AssertReturn(pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT, VINF_SUCCESS);
621 PDBGFEVENT pEvent = &pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].Event;
622
623 /*
624 * Make sure we've got a debugger and is allowed to speak to it.
625 */
626 int rc = dbgfR3EventPrologue(pVM, pVCpu, pEvent->enmType);
627 if (RT_FAILURE(rc))
628 {
629 /** @todo drop them events? */
630 return rc; /** @todo this will cause trouble if we're here via an FF! */
631 }
632
633 /*
634 * Send the event and mark it as ignore.
635 * ASSUMES no new events get generate while dbgfR3CpuWait is executing!
636 */
637 VBOXSTRICTRC rcStrict = dbgfR3SendEventWaitEx(pVM, pVCpu, pEvent->enmType, pEvent->enmCtx, &pEvent->u, sizeof(pEvent->u));
638 pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState = DBGFEVENTSTATE_IGNORE;
639 return rcStrict;
640}
641
642
643/**
644 * Send a generic debugger event which takes no data.
645 *
646 * @returns VBox status code.
647 * @param pVM The cross context VM structure.
648 * @param enmEvent The event to send.
649 * @internal
650 */
651VMMR3DECL(int) DBGFR3Event(PVM pVM, DBGFEVENTTYPE enmEvent)
652{
653 PVMCPU pVCpu = VMMGetCpu(pVM);
654 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
655
656 /*
657 * Do stepping filtering.
658 */
659 /** @todo Would be better if we did some of this inside the execution
660 * engines. */
661 if ( enmEvent == DBGFEVENT_STEPPED
662 || enmEvent == DBGFEVENT_STEPPED_HYPER)
663 {
664 if (!dbgfStepAreWeThereYet(pVM, pVCpu))
665 return VINF_EM_DBG_STEP;
666 }
667
668 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
669 if (RT_FAILURE(rc))
670 return rc;
671
672 /*
673 * Send the event and process the reply communication.
674 */
675 return dbgfR3SendEventWait(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID);
676}
677
678
679/**
680 * Send a debugger event which takes the full source file location.
681 *
682 * @returns VBox status code.
683 * @param pVM The cross context VM structure.
684 * @param enmEvent The event to send.
685 * @param pszFile Source file.
686 * @param uLine Line number in source file.
687 * @param pszFunction Function name.
688 * @param pszFormat Message which accompanies the event.
689 * @param ... Message arguments.
690 * @internal
691 */
692VMMR3DECL(int) DBGFR3EventSrc(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, ...)
693{
694 va_list args;
695 va_start(args, pszFormat);
696 int rc = DBGFR3EventSrcV(pVM, enmEvent, pszFile, uLine, pszFunction, pszFormat, args);
697 va_end(args);
698 return rc;
699}
700
701
702/**
703 * Send a debugger event which takes the full source file location.
704 *
705 * @returns VBox status code.
706 * @param pVM The cross context VM structure.
707 * @param enmEvent The event to send.
708 * @param pszFile Source file.
709 * @param uLine Line number in source file.
710 * @param pszFunction Function name.
711 * @param pszFormat Message which accompanies the event.
712 * @param args Message arguments.
713 * @internal
714 */
715VMMR3DECL(int) DBGFR3EventSrcV(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, va_list args)
716{
717 PVMCPU pVCpu = VMMGetCpu(pVM);
718 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
719
720 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
721 if (RT_FAILURE(rc))
722 return rc;
723
724 /*
725 * Format the message.
726 */
727 char *pszMessage = NULL;
728 char szMessage[8192];
729 if (pszFormat && *pszFormat)
730 {
731 pszMessage = &szMessage[0];
732 RTStrPrintfV(szMessage, sizeof(szMessage), pszFormat, args);
733 }
734
735 /*
736 * Send the event and process the reply communication.
737 */
738 DBGFEVENT DbgEvent; /** @todo split up DBGFEVENT so we can skip the dead wait on the stack? */
739 DbgEvent.u.Src.pszFile = pszFile;
740 DbgEvent.u.Src.uLine = uLine;
741 DbgEvent.u.Src.pszFunction = pszFunction;
742 DbgEvent.u.Src.pszMessage = pszMessage;
743 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Src));
744}
745
746
747/**
748 * Send a debugger event which takes the two assertion messages.
749 *
750 * @returns VBox status code.
751 * @param pVM The cross context VM structure.
752 * @param enmEvent The event to send.
753 * @param pszMsg1 First assertion message.
754 * @param pszMsg2 Second assertion message.
755 */
756VMMR3_INT_DECL(int) DBGFR3EventAssertion(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszMsg1, const char *pszMsg2)
757{
758 PVMCPU pVCpu = VMMGetCpu(pVM);
759 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
760
761 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
762 if (RT_FAILURE(rc))
763 return rc;
764
765 /*
766 * Send the event and process the reply communication.
767 */
768 DBGFEVENT DbgEvent;
769 DbgEvent.u.Assert.pszMsg1 = pszMsg1;
770 DbgEvent.u.Assert.pszMsg2 = pszMsg2;
771 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Assert));
772}
773
774
775/**
776 * Breakpoint was hit somewhere.
777 * Figure out which breakpoint it is and notify the debugger.
778 *
779 * @returns VBox status code.
780 * @param pVM The cross context VM structure.
781 * @param enmEvent DBGFEVENT_BREAKPOINT_HYPER or DBGFEVENT_BREAKPOINT.
782 */
783VMMR3_INT_DECL(int) DBGFR3EventBreakpoint(PVM pVM, DBGFEVENTTYPE enmEvent)
784{
785 PVMCPU pVCpu = VMMGetCpu(pVM);
786 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
787
788 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
789 if (RT_FAILURE(rc))
790 return rc;
791
792 /*
793 * Halt all other vCPUs as well to give the user the ability to inspect other
794 * vCPU states as well.
795 */
796 rc = dbgfR3EventHaltAllVCpus(pVM, pVCpu);
797 if (RT_FAILURE(rc))
798 return rc;
799
800 /*
801 * Send the event and process the reply communication.
802 */
803 DBGFEVENT DbgEvent;
804 DbgEvent.u.Bp.hBp = pVCpu->dbgf.s.hBpActive;
805 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
806 if (DbgEvent.u.Bp.hBp != NIL_DBGFBP)
807 {
808 DbgEvent.enmCtx = DBGFEVENTCTX_RAW;
809 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
810 }
811
812 return VERR_DBGF_IPE_1;
813}
814
815
816/**
817 * Returns whether the given vCPU is waiting for the debugger.
818 *
819 * @returns Flags whether the vCPU is currently waiting for the debugger.
820 * @param pUVCpu The user mode vCPU structure.
821 */
822DECLINLINE(bool) dbgfR3CpuIsHalted(PUVMCPU pUVCpu)
823{
824 return ASMAtomicReadBool(&pUVCpu->dbgf.s.fStopped);
825}
826
827
828/**
829 * Checks whether the given vCPU is waiting in the debugger.
830 *
831 * @returns Flag whether the indicated vCPU is halted, when VMCPUID_ALL
832 * is given true is returned when at least one vCPU is halted.
833 * @param pUVM The user mode VM structure.
834 * @param idCpu The CPU ID to check, VMCPUID_ALL to check all vCPUs.
835 */
836DECLINLINE(bool) dbgfR3CpuAreAnyHaltedByCpuId(PUVM pUVM, VMCPUID idCpu)
837{
838 AssertReturn(idCpu < pUVM->cCpus || idCpu == VMCPUID_ALL, false);
839
840 /* Check that either the given vCPU or all are actually halted. */
841 if (idCpu != VMCPUID_ALL)
842 return dbgfR3CpuIsHalted(&pUVM->aCpus[idCpu]);
843
844 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
845 if (dbgfR3CpuIsHalted(&pUVM->aCpus[i]))
846 return true;
847 return false;
848}
849
850
851/**
852 * Gets the pending debug command for this EMT/CPU, replacing it with
853 * DBGFCMD_NO_COMMAND.
854 *
855 * @returns Pending command.
856 * @param pUVCpu The user mode virtual CPU structure.
857 * @thread EMT(pUVCpu)
858 */
859DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu)
860{
861 DBGFCMD enmCmd = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, DBGFCMD_NO_COMMAND);
862 Log2(("DBGF: Getting command: %d\n", enmCmd));
863 return enmCmd;
864}
865
866
867/**
868 * Send a debug command to a CPU, making sure to notify it.
869 *
870 * @returns VBox status code.
871 * @param pUVCpu The user mode virtual CPU structure.
872 * @param enmCmd The command to submit to the CPU.
873 */
874DECLINLINE(int) dbgfR3CpuSetCmdAndNotify(PUVMCPU pUVCpu, DBGFCMD enmCmd)
875{
876 Log2(("DBGF: Setting command to %d\n", enmCmd));
877 Assert(enmCmd != DBGFCMD_NO_COMMAND);
878 AssertMsg(pUVCpu->dbgf.s.enmDbgfCmd == DBGFCMD_NO_COMMAND, ("enmCmd=%d enmDbgfCmd=%d\n", enmCmd, pUVCpu->dbgf.s.enmDbgfCmd));
879
880 ASMAtomicWriteU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, enmCmd);
881 VMCPU_FF_SET(pUVCpu->pVCpu, VMCPU_FF_DBGF);
882
883 VMR3NotifyCpuFFU(pUVCpu, 0 /*fFlags*/);
884 return VINF_SUCCESS;
885}
886
887
888/**
889 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
890 */
891static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventHaltEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
892{
893 RT_NOREF(pvUser);
894
895 VMCPU_ASSERT_EMT(pVCpu);
896 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
897
898 PUVMCPU pUVCpu = pVCpu->pUVCpu;
899 if ( pVCpu != (PVMCPU)pvUser
900 && !dbgfR3CpuIsHalted(pUVCpu))
901 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
902
903 return VINF_SUCCESS;
904}
905
906
907/**
908 * Halts all vCPUs of the given VM except for the given one.
909 *
910 * @returns VBox status code.
911 * @param pVM The cross context VM structure.
912 * @param pVCpuExclude The vCPU cross context structure of the vCPU to exclude.
913 */
914static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude)
915{
916 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3EventHaltEmtWorker, pVCpuExclude);
917}
918
919
920/**
921 * Waits for the debugger to respond.
922 *
923 * @returns VBox status code. (clearify)
924 * @param pVCpu The cross context vCPU structure.
925 */
926static int dbgfR3CpuWait(PVMCPU pVCpu)
927{
928 PVM pVM = pVCpu->CTX_SUFF(pVM);
929 PUVMCPU pUVCpu = pVCpu->pUVCpu;
930
931 LogFlow(("dbgfR3CpuWait:\n"));
932 int rcRet = VINF_SUCCESS;
933
934 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, true);
935
936 /*
937 * Waits for the debugger to reply (i.e. issue an command).
938 */
939 for (;;)
940 {
941 /*
942 * Wait.
943 */
944 for (;;)
945 {
946 /*
947 * Process forced flags before we go sleep.
948 */
949 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_DBGF | VMCPU_FF_REQUEST)
950 || VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VMCPU_FF_REQUEST | VM_FF_CHECK_VM_STATE))
951 {
952 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
953 break;
954
955 int rc;
956 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
957 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
958 else if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
959 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
960 {
961 LogFlow(("dbgfR3CpuWait: Processes requests...\n"));
962 rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
963 if (rc == VINF_SUCCESS)
964 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
965 LogFlow(("dbgfR3CpuWait: VMR3ReqProcess -> %Rrc rcRet=%Rrc\n", rc, rcRet));
966 }
967 else if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
968 {
969 VMSTATE enmState = VMR3GetState(pVM);
970 switch (enmState)
971 {
972 case VMSTATE_FATAL_ERROR:
973 case VMSTATE_FATAL_ERROR_LS:
974 case VMSTATE_GURU_MEDITATION:
975 case VMSTATE_GURU_MEDITATION_LS:
976 rc = VINF_EM_SUSPEND;
977 break;
978 case VMSTATE_DESTROYING:
979 rc = VINF_EM_TERMINATE;
980 break;
981 default:
982 rc = VERR_DBGF_IPE_1;
983 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
984 }
985 }
986 else
987 rc = VINF_SUCCESS;
988 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
989 {
990 switch (rc)
991 {
992 case VINF_EM_DBG_BREAKPOINT:
993 case VINF_EM_DBG_STEPPED:
994 case VINF_EM_DBG_STEP:
995 case VINF_EM_DBG_STOP:
996 case VINF_EM_DBG_EVENT:
997 AssertMsgFailed(("rc=%Rrc\n", rc));
998 break;
999
1000 /* return straight away */
1001 case VINF_EM_TERMINATE:
1002 case VINF_EM_OFF:
1003 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
1004 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1005 return rc;
1006
1007 /* remember return code. */
1008 default:
1009 AssertReleaseMsgFailed(("rc=%Rrc is not in the switch!\n", rc));
1010 RT_FALL_THRU();
1011 case VINF_EM_RESET:
1012 case VINF_EM_SUSPEND:
1013 case VINF_EM_HALT:
1014 case VINF_EM_RESUME:
1015 case VINF_EM_RESCHEDULE:
1016 case VINF_EM_RESCHEDULE_REM:
1017 case VINF_EM_RESCHEDULE_RAW:
1018 if (rc < rcRet || rcRet == VINF_SUCCESS)
1019 rcRet = rc;
1020 break;
1021 }
1022 }
1023 else if (RT_FAILURE(rc))
1024 {
1025 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
1026 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1027 return rc;
1028 }
1029 }
1030 else if (pVM->dbgf.s.fAttached)
1031 {
1032 int rc = VMR3WaitU(pUVCpu);
1033 if (RT_FAILURE(rc))
1034 {
1035 LogFlow(("dbgfR3CpuWait: returns %Rrc (VMR3WaitU)\n", rc));
1036 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1037 return rc;
1038 }
1039 }
1040 else
1041 {
1042 LogFlow(("dbgfR3CpuWait: Debugger detached, continuing normal execution (%Rrc)\n", rcRet));
1043 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1044 return rcRet;
1045 }
1046 }
1047
1048 /*
1049 * Process the command.
1050 */
1051 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
1052 bool fResumeExecution;
1053 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
1054 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
1055 int rc = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
1056 if (fResumeExecution)
1057 {
1058 if (RT_FAILURE(rc))
1059 rcRet = rc;
1060 else if ( rc >= VINF_EM_FIRST
1061 && rc <= VINF_EM_LAST
1062 && (rc < rcRet || rcRet == VINF_SUCCESS))
1063 rcRet = rc;
1064 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rcRet));
1065 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1066 return rcRet;
1067 }
1068 }
1069}
1070
1071
1072/**
1073 * Executes command from debugger.
1074 *
1075 * The caller is responsible for waiting or resuming execution based on the
1076 * value returned in the *pfResumeExecution indicator.
1077 *
1078 * @returns VBox status code. (clearify!)
1079 * @param pVCpu The cross context vCPU structure.
1080 * @param enmCmd The command in question.
1081 * @param pCmdData Pointer to the command data.
1082 * @param pfResumeExecution Where to store the resume execution / continue waiting indicator.
1083 */
1084static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution)
1085{
1086 RT_NOREF(pCmdData); /* for later */
1087
1088 /*
1089 * The cases in this switch returns directly if no event to send.
1090 */
1091 DBGFEVENTTYPE enmEvent;
1092 DBGFEVENTCTX enmCtx = DBGFEVENTCTX_INVALID;
1093 switch (enmCmd)
1094 {
1095 /*
1096 * Halt is answered by an event say that we've halted.
1097 */
1098 case DBGFCMD_HALT:
1099 {
1100 *pfResumeExecution = false;
1101 enmEvent = DBGFEVENT_HALT_DONE;
1102 break;
1103 }
1104
1105
1106 /*
1107 * Resume is not answered, we just resume execution.
1108 */
1109 case DBGFCMD_GO:
1110 {
1111 pVCpu->dbgf.s.fSingleSteppingRaw = false;
1112 *pfResumeExecution = true;
1113 return VINF_SUCCESS;
1114 }
1115
1116 /** @todo implement (and define) the rest of the commands. */
1117
1118 /*
1119 * Single step, with trace into.
1120 */
1121 case DBGFCMD_SINGLE_STEP:
1122 {
1123 Log2(("Single step\n"));
1124 PVM pVM = pVCpu->CTX_SUFF(pVM);
1125 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1126 {
1127 if (dbgfStepGetCurInstrType(pVM, pVCpu) == DBGFSTEPINSTRTYPE_CALL)
1128 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1129 }
1130 if (pVM->dbgf.s.SteppingFilter.cMaxSteps > 0)
1131 {
1132 pVCpu->dbgf.s.fSingleSteppingRaw = true;
1133 *pfResumeExecution = true;
1134 return VINF_EM_DBG_STEP;
1135 }
1136 /* Stop after zero steps. Nonsense, but whatever. */
1137 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1138 *pfResumeExecution = false;
1139 enmCtx = dbgfR3FigureEventCtx(pVCpu);
1140 enmEvent = enmCtx != DBGFEVENTCTX_HYPER ? DBGFEVENT_STEPPED : DBGFEVENT_STEPPED_HYPER;
1141 break;
1142 }
1143
1144 /*
1145 * Default is to send an invalid command event.
1146 */
1147 default:
1148 {
1149 *pfResumeExecution = false;
1150 enmEvent = DBGFEVENT_INVALID_COMMAND;
1151 break;
1152 }
1153 }
1154
1155 /*
1156 * Send the pending event.
1157 */
1158 Log2(("DBGF: Emulation thread: sending event %d\n", enmEvent));
1159 int rc = dbgfR3SendEventNoWait(pVCpu->CTX_SUFF(pVM), pVCpu, enmEvent, enmCtx);
1160 AssertRCStmt(rc, *pfResumeExecution = true);
1161 return rc;
1162}
1163
1164
1165/**
1166 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1167 * EMT rendezvous worker for DBGFR3Attach - only called on one EMT.}
1168 */
1169static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Attach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1170{
1171 PUVM pUVM = pVM->pUVM;
1172 int *prcAttach = (int *)pvUser;
1173 RT_NOREF(pVCpu);
1174
1175 if (pVM->dbgf.s.fAttached)
1176 {
1177 Log(("dbgfR3Attach: Debugger already attached\n"));
1178 *prcAttach = VERR_DBGF_ALREADY_ATTACHED;
1179 return VINF_SUCCESS;
1180 }
1181
1182 /*
1183 * The per-CPU bits.
1184 */
1185 for (uint32_t i = 0; i < pUVM->cCpus; i++)
1186 {
1187 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1188
1189 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1190 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1191 }
1192
1193 /*
1194 * Init of the VM -> Debugger communication part living in the global VM structure.
1195 */
1196 pUVM->dbgf.s.cDbgEvtMax = pVM->cCpus * 5 + 10; /* Initial size of event ring, increased when being full. */
1197 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1198 pUVM->dbgf.s.idxDbgEvtRead = 0;
1199 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1200 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1201 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1202 int rc;
1203 pUVM->dbgf.s.paDbgEvts = (PDBGFEVENT)MMR3HeapAllocU(pUVM, MM_TAG_DBGF, pUVM->dbgf.s.cDbgEvtMax * sizeof(DBGFEVENT));
1204 if (pUVM->dbgf.s.paDbgEvts)
1205 {
1206 rc = RTSemEventCreate(&pUVM->dbgf.s.hEvtWait);
1207 if (RT_SUCCESS(rc))
1208 {
1209 rc = RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxDbgEvtWr);
1210 if (RT_SUCCESS(rc))
1211 {
1212 rc = RTSemEventMultiCreate(&pUVM->dbgf.s.hEvtRingBufFull);
1213 if (RT_SUCCESS(rc))
1214 {
1215 /*
1216 * At last, set the attached flag.
1217 */
1218 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, true);
1219 *prcAttach = VINF_SUCCESS;
1220 return VINF_SUCCESS;
1221 }
1222
1223 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1224 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1225 }
1226 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1227 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1228 }
1229 }
1230 else
1231 rc = VERR_NO_MEMORY;
1232
1233 *prcAttach = rc;
1234 return VINF_SUCCESS;
1235}
1236
1237
1238/**
1239 * Attaches a debugger to the specified VM.
1240 *
1241 * Only one debugger at a time.
1242 *
1243 * @returns VBox status code.
1244 * @param pUVM The user mode VM handle.
1245 */
1246VMMR3DECL(int) DBGFR3Attach(PUVM pUVM)
1247{
1248 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1249 PVM pVM = pUVM->pVM;
1250 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1251
1252 /*
1253 * Call the VM, use EMT rendezvous for serialization.
1254 */
1255 int rcAttach = VERR_IPE_UNINITIALIZED_STATUS;
1256 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Attach, &rcAttach);
1257 if (RT_SUCCESS(rc))
1258 rc = rcAttach;
1259
1260 return rc;
1261}
1262
1263
1264/**
1265 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1266 * EMT rendezvous worker for DBGFR3Detach - called on all EMTs (why?).}
1267 */
1268static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Detach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1269{
1270 if (pVCpu->idCpu == 0)
1271 {
1272 PUVM pUVM = (PUVM)pvUser;
1273
1274 /*
1275 * Per-CPU cleanup.
1276 */
1277 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1278 {
1279 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1280
1281 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1282 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1283 }
1284
1285 /*
1286 * De-init of the VM -> Debugger communication part living in the global VM structure.
1287 */
1288 if (pUVM->dbgf.s.paDbgEvts)
1289 {
1290 MMR3HeapFree(pUVM->dbgf.s.paDbgEvts);
1291 pUVM->dbgf.s.paDbgEvts = NULL;
1292 }
1293
1294 if (pUVM->dbgf.s.hEvtWait != NIL_RTSEMEVENT)
1295 {
1296 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1297 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1298 }
1299
1300 if (pUVM->dbgf.s.hMtxDbgEvtWr != NIL_RTSEMFASTMUTEX)
1301 {
1302 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1303 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1304 }
1305
1306 if (pUVM->dbgf.s.hEvtRingBufFull != NIL_RTSEMEVENTMULTI)
1307 {
1308 RTSemEventMultiDestroy(pUVM->dbgf.s.hEvtRingBufFull);
1309 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1310 }
1311
1312 pUVM->dbgf.s.cDbgEvtMax = 0;
1313 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1314 pUVM->dbgf.s.idxDbgEvtRead = 0;
1315 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1316 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1317 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1318
1319 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, false);
1320 }
1321
1322 return VINF_SUCCESS;
1323}
1324
1325
1326/**
1327 * Detaches a debugger from the specified VM.
1328 *
1329 * Caller must be attached to the VM.
1330 *
1331 * @returns VBox status code.
1332 * @param pUVM The user mode VM handle.
1333 */
1334VMMR3DECL(int) DBGFR3Detach(PUVM pUVM)
1335{
1336 LogFlow(("DBGFR3Detach:\n"));
1337
1338 /*
1339 * Validate input. The UVM handle shall be valid, the VM handle might be
1340 * in the processes of being destroyed already, so deal quietly with that.
1341 */
1342 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1343 PVM pVM = pUVM->pVM;
1344 if (!VM_IS_VALID_EXT(pVM))
1345 return VERR_INVALID_VM_HANDLE;
1346
1347 /*
1348 * Check if attached.
1349 */
1350 if (!pVM->dbgf.s.fAttached)
1351 return VERR_DBGF_NOT_ATTACHED;
1352
1353 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Detach, pUVM);
1354}
1355
1356
1357/**
1358 * Wait for a debug event.
1359 *
1360 * @returns VBox status code. Will not return VBOX_INTERRUPTED.
1361 * @param pUVM The user mode VM handle.
1362 * @param cMillies Number of millis to wait.
1363 * @param pEvent Where to store the event data.
1364 */
1365VMMR3DECL(int) DBGFR3EventWait(PUVM pUVM, RTMSINTERVAL cMillies, PDBGFEVENT pEvent)
1366{
1367 /*
1368 * Check state.
1369 */
1370 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1371 PVM pVM = pUVM->pVM;
1372 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1373 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1374
1375 RT_BZERO(pEvent, sizeof(*pEvent));
1376
1377 /*
1378 * Wait for an event to arrive if there are none.
1379 */
1380 int rc = VINF_SUCCESS;
1381 uint32_t idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
1382 if (idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite))
1383 {
1384 do
1385 {
1386 rc = RTSemEventWait(pUVM->dbgf.s.hEvtWait, cMillies);
1387 } while ( RT_SUCCESS(rc)
1388 && idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1389 }
1390
1391 if (RT_SUCCESS(rc))
1392 {
1393 Assert(idxDbgEvtRead != ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1394
1395 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
1396 memcpy(pEvent, &pUVM->dbgf.s.paDbgEvts[idxDbgEvtRead % cDbgEvtMax], sizeof(*pEvent));
1397 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtRead, (idxDbgEvtRead + 1) % cDbgEvtMax);
1398 }
1399
1400 Log2(("DBGFR3EventWait: rc=%Rrc (event type %d)\n", rc, pEvent->enmType));
1401 return rc;
1402}
1403
1404
1405/**
1406 * Halts VM execution.
1407 *
1408 * After calling this the VM isn't actually halted till an DBGFEVENT_HALT_DONE
1409 * arrives. Until that time it's not possible to issue any new commands.
1410 *
1411 * @returns VBox status code.
1412 * @retval VWRN_DBGF_ALREADY_HALTED if @a idCpu is VMCPUID_ALL and all vCPUs
1413 * are halted.
1414 * @param pUVM The user mode VM handle.
1415 * @param idCpu The vCPU to halt, VMCPUID_ALL halts all still running vCPUs.
1416 */
1417VMMR3DECL(int) DBGFR3Halt(PUVM pUVM, VMCPUID idCpu)
1418{
1419 /*
1420 * Check state.
1421 */
1422 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1423 PVM pVM = pUVM->pVM;
1424 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1425 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1426 AssertReturn(idCpu == VMCPUID_ALL || idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1427
1428 /*
1429 * Halt the requested CPUs as needed.
1430 */
1431 int rc;
1432 if (idCpu != VMCPUID_ALL)
1433 {
1434 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1435 if (!dbgfR3CpuIsHalted(pUVCpu))
1436 {
1437 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1438 rc = VINF_SUCCESS;
1439 }
1440 else
1441 rc = VWRN_DBGF_ALREADY_HALTED;
1442 }
1443 else
1444 {
1445 rc = VWRN_DBGF_ALREADY_HALTED;
1446 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1447 {
1448 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1449 if (!dbgfR3CpuIsHalted(pUVCpu))
1450 {
1451 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1452 rc = VINF_SUCCESS;
1453 }
1454 }
1455 }
1456
1457 return rc;
1458}
1459
1460
1461/**
1462 * Checks if any of the specified vCPUs have been halted by the debugger.
1463 *
1464 * @returns True if at least one halted vCPUs.
1465 * @returns False if no halted vCPUs.
1466 * @param pUVM The user mode VM handle.
1467 * @param idCpu The CPU id to check for, VMCPUID_ALL will return true if
1468 * at least a single vCPU is halted in the debugger.
1469 */
1470VMMR3DECL(bool) DBGFR3IsHalted(PUVM pUVM, VMCPUID idCpu)
1471{
1472 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1473 PVM pVM = pUVM->pVM;
1474 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1475 AssertReturn(pVM->dbgf.s.fAttached, false);
1476
1477 return dbgfR3CpuAreAnyHaltedByCpuId(pUVM, idCpu);
1478}
1479
1480
1481/**
1482 * Checks if the debugger can wait for events or not.
1483 *
1484 * This function is only used by lazy, multiplexing debuggers. :-)
1485 *
1486 * @returns VBox status code.
1487 * @retval VINF_SUCCESS if waitable.
1488 * @retval VERR_SEM_OUT_OF_TURN if not waitable.
1489 * @retval VERR_INVALID_VM_HANDLE if the VM is being (/ has been) destroyed
1490 * (not asserted) or if the handle is invalid (asserted).
1491 * @retval VERR_DBGF_NOT_ATTACHED if not attached.
1492 *
1493 * @param pUVM The user mode VM handle.
1494 */
1495VMMR3DECL(int) DBGFR3QueryWaitable(PUVM pUVM)
1496{
1497 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1498
1499 /* Note! There is a slight race here, unfortunately. */
1500 PVM pVM = pUVM->pVM;
1501 if (!RT_VALID_PTR(pVM))
1502 return VERR_INVALID_VM_HANDLE;
1503 if (pVM->enmVMState >= VMSTATE_DESTROYING)
1504 return VERR_INVALID_VM_HANDLE;
1505 if (!pVM->dbgf.s.fAttached)
1506 return VERR_DBGF_NOT_ATTACHED;
1507
1508 /** @todo was: if (!RTSemPongShouldWait(...)) return VERR_SEM_OUT_OF_TURN; */
1509 return VINF_SUCCESS;
1510}
1511
1512
1513/**
1514 * Resumes VM execution.
1515 *
1516 * There is no receipt event on this command.
1517 *
1518 * @returns VBox status code.
1519 * @retval VWRN_DBGF_ALREADY_RUNNING if the specified vCPUs are all running.
1520 * @param pUVM The user mode VM handle.
1521 * @param idCpu The vCPU to resume, VMCPUID_ALL resumes all still halted vCPUs.
1522 */
1523VMMR3DECL(int) DBGFR3Resume(PUVM pUVM, VMCPUID idCpu)
1524{
1525 /*
1526 * Validate input and attachment state.
1527 */
1528 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1529 PVM pVM = pUVM->pVM;
1530 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1531 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1532
1533 /*
1534 * Ping the halted emulation threads, telling them to run.
1535 */
1536 int rc = VWRN_DBGF_ALREADY_RUNNING;
1537 if (idCpu != VMCPUID_ALL)
1538 {
1539 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1540 if (dbgfR3CpuIsHalted(pUVCpu))
1541 {
1542 rc = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1543 AssertRC(rc);
1544 }
1545 }
1546 else
1547 {
1548 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1549 {
1550 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1551 if (dbgfR3CpuIsHalted(pUVCpu))
1552 {
1553 int rc2 = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1554 AssertRC(rc2);
1555 if (rc == VWRN_DBGF_ALREADY_RUNNING || RT_FAILURE(rc2))
1556 rc = rc2;
1557 }
1558 }
1559 }
1560
1561 return rc;
1562}
1563
1564
1565/**
1566 * Classifies the current instruction.
1567 *
1568 * @returns Type of instruction.
1569 * @param pVM The cross context VM structure.
1570 * @param pVCpu The current CPU.
1571 * @thread EMT(pVCpu)
1572 */
1573static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu)
1574{
1575 /*
1576 * Read the instruction.
1577 */
1578 size_t cbRead = 0;
1579 uint8_t abOpcode[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1580 int rc = PGMR3DbgReadGCPtr(pVM, abOpcode, CPUMGetGuestFlatPC(pVCpu), sizeof(abOpcode) - 1, 0 /*fFlags*/, &cbRead);
1581 if (RT_SUCCESS(rc))
1582 {
1583 /*
1584 * Do minimal parsing. No real need to involve the disassembler here.
1585 */
1586 uint8_t *pb = abOpcode;
1587 for (;;)
1588 {
1589 switch (*pb++)
1590 {
1591 default:
1592 return DBGFSTEPINSTRTYPE_OTHER;
1593
1594 case 0xe8: /* call rel16/32 */
1595 case 0x9a: /* call farptr */
1596 case 0xcc: /* int3 */
1597 case 0xcd: /* int xx */
1598 // case 0xce: /* into */
1599 return DBGFSTEPINSTRTYPE_CALL;
1600
1601 case 0xc2: /* ret xx */
1602 case 0xc3: /* ret */
1603 case 0xca: /* retf xx */
1604 case 0xcb: /* retf */
1605 case 0xcf: /* iret */
1606 return DBGFSTEPINSTRTYPE_RET;
1607
1608 case 0xff:
1609 if ( ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 2 /* call indir */
1610 || ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 3) /* call indir-farptr */
1611 return DBGFSTEPINSTRTYPE_CALL;
1612 return DBGFSTEPINSTRTYPE_OTHER;
1613
1614 case 0x0f:
1615 switch (*pb++)
1616 {
1617 case 0x05: /* syscall */
1618 case 0x34: /* sysenter */
1619 return DBGFSTEPINSTRTYPE_CALL;
1620 case 0x07: /* sysret */
1621 case 0x35: /* sysexit */
1622 return DBGFSTEPINSTRTYPE_RET;
1623 }
1624 break;
1625
1626 /* Must handle some REX prefixes. So we do all normal prefixes. */
1627 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
1628 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
1629 if (!CPUMIsGuestIn64BitCode(pVCpu))
1630 return DBGFSTEPINSTRTYPE_OTHER;
1631 break;
1632
1633 case 0x2e: /* CS */
1634 case 0x36: /* SS */
1635 case 0x3e: /* DS */
1636 case 0x26: /* ES */
1637 case 0x64: /* FS */
1638 case 0x65: /* GS */
1639 case 0x66: /* op size */
1640 case 0x67: /* addr size */
1641 case 0xf0: /* lock */
1642 case 0xf2: /* REPNZ */
1643 case 0xf3: /* REPZ */
1644 break;
1645 }
1646 }
1647 }
1648
1649 return DBGFSTEPINSTRTYPE_INVALID;
1650}
1651
1652
1653/**
1654 * Checks if the stepping has reached a stop point.
1655 *
1656 * Called when raising a stepped event.
1657 *
1658 * @returns true if the event should be raised, false if we should take one more
1659 * step first.
1660 * @param pVM The cross context VM structure.
1661 * @param pVCpu The cross context per CPU structure of the calling EMT.
1662 * @thread EMT(pVCpu)
1663 */
1664static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu)
1665{
1666 /*
1667 * Check valid pVCpu and that it matches the CPU one stepping.
1668 */
1669 if (pVCpu)
1670 {
1671 if (pVCpu->idCpu == pVM->dbgf.s.SteppingFilter.idCpu)
1672 {
1673 /*
1674 * Increase the number of steps and see if we've reached the max.
1675 */
1676 pVM->dbgf.s.SteppingFilter.cSteps++;
1677 if (pVM->dbgf.s.SteppingFilter.cSteps < pVM->dbgf.s.SteppingFilter.cMaxSteps)
1678 {
1679 /*
1680 * Check PC and SP address filtering.
1681 */
1682 if (pVM->dbgf.s.SteppingFilter.fFlags & (DBGF_STEP_F_STOP_ON_ADDRESS | DBGF_STEP_F_STOP_ON_STACK_POP))
1683 {
1684 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1685 && pVM->dbgf.s.SteppingFilter.AddrPc == CPUMGetGuestFlatPC(pVCpu))
1686 return true;
1687 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1688 && CPUMGetGuestFlatSP(pVCpu) - pVM->dbgf.s.SteppingFilter.AddrStackPop
1689 < pVM->dbgf.s.SteppingFilter.cbStackPop)
1690 return true;
1691 }
1692
1693 /*
1694 * Do step-over filtering separate from the step-into one.
1695 */
1696 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1697 {
1698 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1699 switch (enmType)
1700 {
1701 default:
1702 if ( pVM->dbgf.s.SteppingFilter.uCallDepth != 0
1703 || (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_FILTER_MASK))
1704 break;
1705 return true;
1706 case DBGFSTEPINSTRTYPE_CALL:
1707 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1708 && pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1709 return true;
1710 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1711 break;
1712 case DBGFSTEPINSTRTYPE_RET:
1713 if (pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1714 {
1715 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1716 return true;
1717 /* If after return, we use the cMaxStep limit to stop the next time. */
1718 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1719 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1720 }
1721 else if (pVM->dbgf.s.SteppingFilter.uCallDepth > 0)
1722 pVM->dbgf.s.SteppingFilter.uCallDepth--;
1723 break;
1724 }
1725 return false;
1726 }
1727 /*
1728 * Filtered step-into.
1729 */
1730 else if ( pVM->dbgf.s.SteppingFilter.fFlags
1731 & (DBGF_STEP_F_STOP_ON_CALL | DBGF_STEP_F_STOP_ON_RET | DBGF_STEP_F_STOP_AFTER_RET))
1732 {
1733 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1734 switch (enmType)
1735 {
1736 default:
1737 break;
1738 case DBGFSTEPINSTRTYPE_CALL:
1739 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1740 return true;
1741 break;
1742 case DBGFSTEPINSTRTYPE_RET:
1743 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1744 return true;
1745 /* If after return, we use the cMaxStep limit to stop the next time. */
1746 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1747 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1748 break;
1749 }
1750 return false;
1751 }
1752 }
1753 }
1754 }
1755
1756 return true;
1757}
1758
1759
1760/**
1761 * Step Into.
1762 *
1763 * A single step event is generated from this command.
1764 * The current implementation is not reliable, so don't rely on the event coming.
1765 *
1766 * @returns VBox status code.
1767 * @param pUVM The user mode VM handle.
1768 * @param idCpu The ID of the CPU to single step on.
1769 */
1770VMMR3DECL(int) DBGFR3Step(PUVM pUVM, VMCPUID idCpu)
1771{
1772 return DBGFR3StepEx(pUVM, idCpu, DBGF_STEP_F_INTO, NULL, NULL, 0, 1);
1773}
1774
1775
1776/**
1777 * Full fleged step.
1778 *
1779 * This extended stepping API allows for doing multiple steps before raising an
1780 * event, helping implementing step over, step out and other more advanced
1781 * features.
1782 *
1783 * Like the DBGFR3Step() API, this will normally generate a DBGFEVENT_STEPPED or
1784 * DBGFEVENT_STEPPED_EVENT. However the stepping may be interrupted by other
1785 * events, which will abort the stepping.
1786 *
1787 * The stop on pop area feature is for safeguarding step out.
1788 *
1789 * Please note though, that it will always use stepping and never breakpoints.
1790 * While this allows for a much greater flexibility it can at times be rather
1791 * slow.
1792 *
1793 * @returns VBox status code.
1794 * @param pUVM The user mode VM handle.
1795 * @param idCpu The ID of the CPU to single step on.
1796 * @param fFlags Flags controlling the stepping, DBGF_STEP_F_XXX.
1797 * Either DBGF_STEP_F_INTO or DBGF_STEP_F_OVER must
1798 * always be specified.
1799 * @param pStopPcAddr Address to stop executing at. Completely ignored
1800 * unless DBGF_STEP_F_STOP_ON_ADDRESS is specified.
1801 * @param pStopPopAddr Stack address that SP must be lower than when
1802 * performing DBGF_STEP_F_STOP_ON_STACK_POP filtering.
1803 * @param cbStopPop The range starting at @a pStopPopAddr which is
1804 * considered to be within the same thread stack. Note
1805 * that the API allows @a pStopPopAddr and @a cbStopPop
1806 * to form an area that wraps around and it will
1807 * consider the part starting at 0 as included.
1808 * @param cMaxSteps The maximum number of steps to take. This is to
1809 * prevent stepping for ever, so passing UINT32_MAX is
1810 * not recommended.
1811 *
1812 * @remarks The two address arguments must be guest context virtual addresses,
1813 * or HMA. The code doesn't make much of a point of out HMA, though.
1814 */
1815VMMR3DECL(int) DBGFR3StepEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, PCDBGFADDRESS pStopPcAddr,
1816 PCDBGFADDRESS pStopPopAddr, RTGCUINTPTR cbStopPop, uint32_t cMaxSteps)
1817{
1818 /*
1819 * Check state.
1820 */
1821 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1822 PVM pVM = pUVM->pVM;
1823 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1824 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
1825 AssertReturn(!(fFlags & ~DBGF_STEP_F_VALID_MASK), VERR_INVALID_FLAGS);
1826 AssertReturn(RT_BOOL(fFlags & DBGF_STEP_F_INTO) != RT_BOOL(fFlags & DBGF_STEP_F_OVER), VERR_INVALID_FLAGS);
1827 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1828 {
1829 AssertReturn(RT_VALID_PTR(pStopPcAddr), VERR_INVALID_POINTER);
1830 AssertReturn(DBGFADDRESS_IS_VALID(pStopPcAddr), VERR_INVALID_PARAMETER);
1831 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPcAddr), VERR_INVALID_PARAMETER);
1832 }
1833 AssertReturn(!(fFlags & DBGF_STEP_F_STOP_ON_STACK_POP) || RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1834 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1835 {
1836 AssertReturn(RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1837 AssertReturn(DBGFADDRESS_IS_VALID(pStopPopAddr), VERR_INVALID_PARAMETER);
1838 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPopAddr), VERR_INVALID_PARAMETER);
1839 AssertReturn(cbStopPop > 0, VERR_INVALID_PARAMETER);
1840 }
1841
1842 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1843 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1844 if (RT_LIKELY(dbgfR3CpuIsHalted(pUVCpu)))
1845 { /* likely */ }
1846 else
1847 return VERR_SEM_OUT_OF_TURN;
1848 Assert(pVM->dbgf.s.SteppingFilter.idCpu == NIL_VMCPUID);
1849
1850 /*
1851 * Send the emulation thread a single-step command.
1852 */
1853 if (fFlags == DBGF_STEP_F_INTO)
1854 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1855 else
1856 pVM->dbgf.s.SteppingFilter.idCpu = idCpu;
1857 pVM->dbgf.s.SteppingFilter.fFlags = fFlags;
1858 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1859 pVM->dbgf.s.SteppingFilter.AddrPc = pStopPcAddr->FlatPtr;
1860 else
1861 pVM->dbgf.s.SteppingFilter.AddrPc = 0;
1862 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1863 {
1864 pVM->dbgf.s.SteppingFilter.AddrStackPop = pStopPopAddr->FlatPtr;
1865 pVM->dbgf.s.SteppingFilter.cbStackPop = cbStopPop;
1866 }
1867 else
1868 {
1869 pVM->dbgf.s.SteppingFilter.AddrStackPop = 0;
1870 pVM->dbgf.s.SteppingFilter.cbStackPop = RTGCPTR_MAX;
1871 }
1872
1873 pVM->dbgf.s.SteppingFilter.cMaxSteps = cMaxSteps;
1874 pVM->dbgf.s.SteppingFilter.cSteps = 0;
1875 pVM->dbgf.s.SteppingFilter.uCallDepth = 0;
1876
1877 Assert(dbgfR3CpuIsHalted(pUVCpu));
1878 return dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_SINGLE_STEP);
1879}
1880
1881
1882
1883/**
1884 * dbgfR3EventConfigEx argument packet.
1885 */
1886typedef struct DBGFR3EVENTCONFIGEXARGS
1887{
1888 PCDBGFEVENTCONFIG paConfigs;
1889 size_t cConfigs;
1890 int rc;
1891} DBGFR3EVENTCONFIGEXARGS;
1892/** Pointer to a dbgfR3EventConfigEx argument packet. */
1893typedef DBGFR3EVENTCONFIGEXARGS *PDBGFR3EVENTCONFIGEXARGS;
1894
1895
1896/**
1897 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Worker for DBGFR3EventConfigEx.}
1898 */
1899static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
1900{
1901 if (pVCpu->idCpu == 0)
1902 {
1903 PDBGFR3EVENTCONFIGEXARGS pArgs = (PDBGFR3EVENTCONFIGEXARGS)pvUser;
1904 DBGFEVENTCONFIG volatile const *paConfigs = pArgs->paConfigs;
1905 size_t cConfigs = pArgs->cConfigs;
1906
1907 /*
1908 * Apply the changes.
1909 */
1910 unsigned cChanges = 0;
1911 for (uint32_t i = 0; i < cConfigs; i++)
1912 {
1913 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
1914 AssertReturn(enmType >= DBGFEVENT_FIRST_SELECTABLE && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1915 if (paConfigs[i].fEnabled)
1916 cChanges += ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, enmType) == false;
1917 else
1918 cChanges += ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, enmType) == true;
1919 }
1920
1921 /*
1922 * Inform HM about changes.
1923 */
1924 if (cChanges > 0)
1925 {
1926 if (HMIsEnabled(pVM))
1927 {
1928 HMR3NotifyDebugEventChanged(pVM);
1929 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1930 }
1931 else if (VM_IS_NEM_ENABLED(pVM))
1932 {
1933 NEMR3NotifyDebugEventChanged(pVM);
1934 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1935 }
1936 }
1937 }
1938 else if (HMIsEnabled(pVM))
1939 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1940 else if (VM_IS_NEM_ENABLED(pVM))
1941 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1942
1943 return VINF_SUCCESS;
1944}
1945
1946
1947/**
1948 * Configures (enables/disables) multiple selectable debug events.
1949 *
1950 * @returns VBox status code.
1951 * @param pUVM The user mode VM handle.
1952 * @param paConfigs The event to configure and their new state.
1953 * @param cConfigs Number of entries in @a paConfigs.
1954 */
1955VMMR3DECL(int) DBGFR3EventConfigEx(PUVM pUVM, PCDBGFEVENTCONFIG paConfigs, size_t cConfigs)
1956{
1957 /*
1958 * Validate input.
1959 */
1960 size_t i = cConfigs;
1961 while (i-- > 0)
1962 {
1963 AssertReturn(paConfigs[i].enmType >= DBGFEVENT_FIRST_SELECTABLE, VERR_INVALID_PARAMETER);
1964 AssertReturn(paConfigs[i].enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1965 }
1966 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1967 PVM pVM = pUVM->pVM;
1968 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1969
1970 /*
1971 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
1972 * can sync their data and execution with new debug state.
1973 */
1974 DBGFR3EVENTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
1975 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
1976 dbgfR3EventConfigEx, &Args);
1977 if (RT_SUCCESS(rc))
1978 rc = Args.rc;
1979 return rc;
1980}
1981
1982
1983/**
1984 * Enables or disables a selectable debug event.
1985 *
1986 * @returns VBox status code.
1987 * @param pUVM The user mode VM handle.
1988 * @param enmEvent The selectable debug event.
1989 * @param fEnabled The new state.
1990 */
1991VMMR3DECL(int) DBGFR3EventConfig(PUVM pUVM, DBGFEVENTTYPE enmEvent, bool fEnabled)
1992{
1993 /*
1994 * Convert to an array call.
1995 */
1996 DBGFEVENTCONFIG EvtCfg = { enmEvent, fEnabled };
1997 return DBGFR3EventConfigEx(pUVM, &EvtCfg, 1);
1998}
1999
2000
2001/**
2002 * Checks if the given selectable event is enabled.
2003 *
2004 * @returns true if enabled, false if not or invalid input.
2005 * @param pUVM The user mode VM handle.
2006 * @param enmEvent The selectable debug event.
2007 * @sa DBGFR3EventQuery
2008 */
2009VMMR3DECL(bool) DBGFR3EventIsEnabled(PUVM pUVM, DBGFEVENTTYPE enmEvent)
2010{
2011 /*
2012 * Validate input.
2013 */
2014 AssertReturn( enmEvent >= DBGFEVENT_HALT_DONE
2015 && enmEvent < DBGFEVENT_END, false);
2016 Assert( enmEvent >= DBGFEVENT_FIRST_SELECTABLE
2017 || enmEvent == DBGFEVENT_BREAKPOINT
2018 || enmEvent == DBGFEVENT_BREAKPOINT_IO
2019 || enmEvent == DBGFEVENT_BREAKPOINT_MMIO);
2020
2021 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2022 PVM pVM = pUVM->pVM;
2023 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2024
2025 /*
2026 * Check the event status.
2027 */
2028 return ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, enmEvent);
2029}
2030
2031
2032/**
2033 * Queries the status of a set of events.
2034 *
2035 * @returns VBox status code.
2036 * @param pUVM The user mode VM handle.
2037 * @param paConfigs The events to query and where to return the state.
2038 * @param cConfigs The number of elements in @a paConfigs.
2039 * @sa DBGFR3EventIsEnabled, DBGF_IS_EVENT_ENABLED
2040 */
2041VMMR3DECL(int) DBGFR3EventQuery(PUVM pUVM, PDBGFEVENTCONFIG paConfigs, size_t cConfigs)
2042{
2043 /*
2044 * Validate input.
2045 */
2046 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2047 PVM pVM = pUVM->pVM;
2048 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2049
2050 for (size_t i = 0; i < cConfigs; i++)
2051 {
2052 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
2053 AssertReturn( enmType >= DBGFEVENT_HALT_DONE
2054 && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
2055 Assert( enmType >= DBGFEVENT_FIRST_SELECTABLE
2056 || enmType == DBGFEVENT_BREAKPOINT
2057 || enmType == DBGFEVENT_BREAKPOINT_IO
2058 || enmType == DBGFEVENT_BREAKPOINT_MMIO);
2059 paConfigs[i].fEnabled = ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, paConfigs[i].enmType);
2060 }
2061
2062 return VINF_SUCCESS;
2063}
2064
2065
2066/**
2067 * dbgfR3InterruptConfigEx argument packet.
2068 */
2069typedef struct DBGFR3INTERRUPTCONFIGEXARGS
2070{
2071 PCDBGFINTERRUPTCONFIG paConfigs;
2072 size_t cConfigs;
2073 int rc;
2074} DBGFR3INTERRUPTCONFIGEXARGS;
2075/** Pointer to a dbgfR3InterruptConfigEx argument packet. */
2076typedef DBGFR3INTERRUPTCONFIGEXARGS *PDBGFR3INTERRUPTCONFIGEXARGS;
2077
2078/**
2079 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
2080 * Worker for DBGFR3InterruptConfigEx.}
2081 */
2082static DECLCALLBACK(VBOXSTRICTRC) dbgfR3InterruptConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
2083{
2084 if (pVCpu->idCpu == 0)
2085 {
2086 PDBGFR3INTERRUPTCONFIGEXARGS pArgs = (PDBGFR3INTERRUPTCONFIGEXARGS)pvUser;
2087 PCDBGFINTERRUPTCONFIG paConfigs = pArgs->paConfigs;
2088 size_t cConfigs = pArgs->cConfigs;
2089
2090 /*
2091 * Apply the changes.
2092 */
2093 bool fChanged = false;
2094 bool fThis;
2095 for (uint32_t i = 0; i < cConfigs; i++)
2096 {
2097 /*
2098 * Hardware interrupts.
2099 */
2100 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2101 {
2102 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == false;
2103 if (fThis)
2104 {
2105 Assert(pVM->dbgf.s.cHardIntBreakpoints < 256);
2106 pVM->dbgf.s.cHardIntBreakpoints++;
2107 }
2108 }
2109 else if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_DISABLED)
2110 {
2111 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == true;
2112 if (fThis)
2113 {
2114 Assert(pVM->dbgf.s.cHardIntBreakpoints > 0);
2115 pVM->dbgf.s.cHardIntBreakpoints--;
2116 }
2117 }
2118
2119 /*
2120 * Software interrupts.
2121 */
2122 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2123 {
2124 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == false;
2125 if (fThis)
2126 {
2127 Assert(pVM->dbgf.s.cSoftIntBreakpoints < 256);
2128 pVM->dbgf.s.cSoftIntBreakpoints++;
2129 }
2130 }
2131 else if (paConfigs[i].enmSoftState == DBGFINTERRUPTSTATE_DISABLED)
2132 {
2133 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == true;
2134 if (fThis)
2135 {
2136 Assert(pVM->dbgf.s.cSoftIntBreakpoints > 0);
2137 pVM->dbgf.s.cSoftIntBreakpoints--;
2138 }
2139 }
2140 }
2141
2142 /*
2143 * Update the event bitmap entries.
2144 */
2145 if (pVM->dbgf.s.cHardIntBreakpoints > 0)
2146 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == false;
2147 else
2148 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == true;
2149
2150 if (pVM->dbgf.s.cSoftIntBreakpoints > 0)
2151 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == false;
2152 else
2153 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == true;
2154
2155 /*
2156 * Inform HM about changes.
2157 */
2158 if (fChanged)
2159 {
2160 if (HMIsEnabled(pVM))
2161 {
2162 HMR3NotifyDebugEventChanged(pVM);
2163 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2164 }
2165 else if (VM_IS_NEM_ENABLED(pVM))
2166 {
2167 NEMR3NotifyDebugEventChanged(pVM);
2168 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2169 }
2170 }
2171 }
2172 else if (HMIsEnabled(pVM))
2173 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2174 else if (VM_IS_NEM_ENABLED(pVM))
2175 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2176
2177 return VINF_SUCCESS;
2178}
2179
2180
2181/**
2182 * Changes
2183 *
2184 * @returns VBox status code.
2185 * @param pUVM The user mode VM handle.
2186 * @param paConfigs The events to query and where to return the state.
2187 * @param cConfigs The number of elements in @a paConfigs.
2188 * @sa DBGFR3InterruptConfigHardware, DBGFR3InterruptConfigSoftware
2189 */
2190VMMR3DECL(int) DBGFR3InterruptConfigEx(PUVM pUVM, PCDBGFINTERRUPTCONFIG paConfigs, size_t cConfigs)
2191{
2192 /*
2193 * Validate input.
2194 */
2195 size_t i = cConfigs;
2196 while (i-- > 0)
2197 {
2198 AssertReturn(paConfigs[i].enmHardState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2199 AssertReturn(paConfigs[i].enmSoftState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2200 }
2201
2202 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2203 PVM pVM = pUVM->pVM;
2204 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2205
2206 /*
2207 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
2208 * can sync their data and execution with new debug state.
2209 */
2210 DBGFR3INTERRUPTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
2211 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
2212 dbgfR3InterruptConfigEx, &Args);
2213 if (RT_SUCCESS(rc))
2214 rc = Args.rc;
2215 return rc;
2216}
2217
2218
2219/**
2220 * Configures interception of a hardware interrupt.
2221 *
2222 * @returns VBox status code.
2223 * @param pUVM The user mode VM handle.
2224 * @param iInterrupt The interrupt number.
2225 * @param fEnabled Whether interception is enabled or not.
2226 * @sa DBGFR3InterruptSoftwareConfig, DBGFR3InterruptConfigEx
2227 */
2228VMMR3DECL(int) DBGFR3InterruptHardwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2229{
2230 /*
2231 * Convert to DBGFR3InterruptConfigEx call.
2232 */
2233 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, (uint8_t)fEnabled, DBGFINTERRUPTSTATE_DONT_TOUCH };
2234 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2235}
2236
2237
2238/**
2239 * Configures interception of a software interrupt.
2240 *
2241 * @returns VBox status code.
2242 * @param pUVM The user mode VM handle.
2243 * @param iInterrupt The interrupt number.
2244 * @param fEnabled Whether interception is enabled or not.
2245 * @sa DBGFR3InterruptHardwareConfig, DBGFR3InterruptConfigEx
2246 */
2247VMMR3DECL(int) DBGFR3InterruptSoftwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2248{
2249 /*
2250 * Convert to DBGFR3InterruptConfigEx call.
2251 */
2252 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, DBGFINTERRUPTSTATE_DONT_TOUCH, (uint8_t)fEnabled };
2253 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2254}
2255
2256
2257/**
2258 * Checks whether interception is enabled for a hardware interrupt.
2259 *
2260 * @returns true if enabled, false if not or invalid input.
2261 * @param pUVM The user mode VM handle.
2262 * @param iInterrupt The interrupt number.
2263 * @sa DBGFR3InterruptSoftwareIsEnabled, DBGF_IS_HARDWARE_INT_ENABLED,
2264 * DBGF_IS_SOFTWARE_INT_ENABLED
2265 */
2266VMMR3DECL(int) DBGFR3InterruptHardwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2267{
2268 /*
2269 * Validate input.
2270 */
2271 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2272 PVM pVM = pUVM->pVM;
2273 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2274
2275 /*
2276 * Check it.
2277 */
2278 return ASMBitTest(&pVM->dbgf.s.bmHardIntBreakpoints, iInterrupt);
2279}
2280
2281
2282/**
2283 * Checks whether interception is enabled for a software interrupt.
2284 *
2285 * @returns true if enabled, false if not or invalid input.
2286 * @param pUVM The user mode VM handle.
2287 * @param iInterrupt The interrupt number.
2288 * @sa DBGFR3InterruptHardwareIsEnabled, DBGF_IS_SOFTWARE_INT_ENABLED,
2289 * DBGF_IS_HARDWARE_INT_ENABLED,
2290 */
2291VMMR3DECL(int) DBGFR3InterruptSoftwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2292{
2293 /*
2294 * Validate input.
2295 */
2296 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2297 PVM pVM = pUVM->pVM;
2298 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2299
2300 /*
2301 * Check it.
2302 */
2303 return ASMBitTest(&pVM->dbgf.s.bmSoftIntBreakpoints, iInterrupt);
2304}
2305
2306
2307
2308/**
2309 * Call this to single step programmatically.
2310 *
2311 * You must pass down the return code to the EM loop! That's
2312 * where the actual single stepping take place (at least in the
2313 * current implementation).
2314 *
2315 * @returns VINF_EM_DBG_STEP
2316 *
2317 * @param pVCpu The cross context virtual CPU structure.
2318 *
2319 * @thread VCpu EMT
2320 * @internal
2321 */
2322VMMR3_INT_DECL(int) DBGFR3PrgStep(PVMCPU pVCpu)
2323{
2324 VMCPU_ASSERT_EMT(pVCpu);
2325
2326 pVCpu->dbgf.s.fSingleSteppingRaw = true;
2327 return VINF_EM_DBG_STEP;
2328}
2329
2330
2331/**
2332 * Inject an NMI into a running VM (only VCPU 0!)
2333 *
2334 * @returns VBox status code.
2335 * @param pUVM The user mode VM structure.
2336 * @param idCpu The ID of the CPU to inject the NMI on.
2337 */
2338VMMR3DECL(int) DBGFR3InjectNMI(PUVM pUVM, VMCPUID idCpu)
2339{
2340 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2341 PVM pVM = pUVM->pVM;
2342 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2343 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
2344
2345 /** @todo Implement generic NMI injection. */
2346 /** @todo NEM: NMI injection */
2347 if (!HMIsEnabled(pVM))
2348 return VERR_NOT_SUP_BY_NEM;
2349
2350 VMCPU_FF_SET(pVM->apCpusR3[idCpu], VMCPU_FF_INTERRUPT_NMI);
2351 return VINF_SUCCESS;
2352}
2353
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette