VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGF.cpp@ 107044

Last change on this file since 107044 was 107030, checked in by vboxsync, 8 days ago

VMM/DBGF: Classify ARMv8 A64 control flow isntructions properly for call/return stepping, bugref:10393

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 79.4 KB
Line 
1/* $Id: DBGF.cpp 107030 2024-11-18 14:58:29Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_dbgf DBGF - The Debugger Facility
30 *
31 * The purpose of the DBGF is to provide an interface for debuggers to
32 * manipulate the VMM without having to mess up the source code for each of
33 * them. The DBGF is always built in and will always work when a debugger
34 * attaches to the VM. The DBGF provides the basic debugger features, such as
35 * halting execution, handling breakpoints, single step execution, instruction
36 * disassembly, info querying, OS specific diggers, symbol and module
37 * management.
38 *
39 * The interface is working in a manner similar to the win32, linux and os2
40 * debugger interfaces. The interface has an asynchronous nature. This comes
41 * from the fact that the VMM and the Debugger are running in different threads.
42 * They are referred to as the "emulation thread" and the "debugger thread", or
43 * as the "ping thread" and the "pong thread, respectivly. (The last set of
44 * names comes from the use of the Ping-Pong synchronization construct from the
45 * RTSem API.)
46 *
47 * @see grp_dbgf
48 *
49 *
50 * @section sec_dbgf_scenario Usage Scenario
51 *
52 * The debugger starts by attaching to the VM. For practical reasons we limit the
53 * number of concurrently attached debuggers to 1 per VM. The action of
54 * attaching to the VM causes the VM to check and generate debug events.
55 *
56 * The debugger then will wait/poll for debug events and issue commands.
57 *
58 * The waiting and polling is done by the DBGFEventWait() function. It will wait
59 * for the emulation thread to send a ping, thus indicating that there is an
60 * event waiting to be processed.
61 *
62 * An event can be a response to a command issued previously, the hitting of a
63 * breakpoint, or running into a bad/fatal VMM condition. The debugger now has
64 * the ping and must respond to the event at hand - the VMM is waiting. This
65 * usually means that the user of the debugger must do something, but it doesn't
66 * have to. The debugger is free to call any DBGF function (nearly at least)
67 * while processing the event.
68 *
69 * Typically the user will issue a request for the execution to be resumed, so
70 * the debugger calls DBGFResume() and goes back to waiting/polling for events.
71 *
72 * When the user eventually terminates the debugging session or selects another
73 * VM, the debugger detaches from the VM. This means that breakpoints are
74 * disabled and that the emulation thread no longer polls for debugger commands.
75 *
76 */
77
78
79/*********************************************************************************************************************************
80* Header Files *
81*********************************************************************************************************************************/
82#define LOG_GROUP LOG_GROUP_DBGF
83#include <VBox/vmm/dbgf.h>
84#include <VBox/vmm/selm.h>
85#include <VBox/vmm/em.h>
86#include <VBox/vmm/hm.h>
87#include <VBox/vmm/mm.h>
88#include <VBox/vmm/nem.h>
89#include "DBGFInternal.h"
90#include <VBox/vmm/vm.h>
91#include <VBox/vmm/uvm.h>
92#include <VBox/err.h>
93
94#include <VBox/log.h>
95#include <iprt/semaphore.h>
96#include <iprt/thread.h>
97#include <iprt/asm.h>
98#include <iprt/time.h>
99#include <iprt/assert.h>
100#include <iprt/stream.h>
101#include <iprt/env.h>
102
103
104/*********************************************************************************************************************************
105* Structures and Typedefs *
106*********************************************************************************************************************************/
107/**
108 * Instruction type returned by dbgfStepGetCurInstrType.
109 */
110typedef enum DBGFSTEPINSTRTYPE
111{
112 DBGFSTEPINSTRTYPE_INVALID = 0,
113 DBGFSTEPINSTRTYPE_OTHER,
114 DBGFSTEPINSTRTYPE_RET,
115 DBGFSTEPINSTRTYPE_CALL,
116 DBGFSTEPINSTRTYPE_END,
117 DBGFSTEPINSTRTYPE_32BIT_HACK = 0x7fffffff
118} DBGFSTEPINSTRTYPE;
119
120
121/*********************************************************************************************************************************
122* Internal Functions *
123*********************************************************************************************************************************/
124DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx);
125DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu);
126static int dbgfR3CpuWait(PVMCPU pVCpu);
127static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution);
128static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu);
129static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu);
130static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude);
131
132
133
134/**
135 * Initializes the DBGF.
136 *
137 * @returns VBox status code.
138 * @param pVM The cross context VM structure.
139 */
140VMMR3_INT_DECL(int) DBGFR3Init(PVM pVM)
141{
142 PUVM pUVM = pVM->pUVM;
143 AssertCompile(sizeof(pUVM->dbgf.s) <= sizeof(pUVM->dbgf.padding));
144 AssertCompile(sizeof(pUVM->aCpus[0].dbgf.s) <= sizeof(pUVM->aCpus[0].dbgf.padding));
145
146 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
147
148 /*
149 * The usual sideways mountain climbing style of init:
150 */
151 int rc = dbgfR3InfoInit(pUVM); /* (First, initalizes the shared critical section.) */
152 if (RT_SUCCESS(rc))
153 {
154 rc = dbgfR3TraceInit(pVM);
155 if (RT_SUCCESS(rc))
156 {
157 rc = dbgfR3RegInit(pUVM);
158 if (RT_SUCCESS(rc))
159 {
160 rc = dbgfR3AsInit(pUVM);
161 if (RT_SUCCESS(rc))
162 {
163 rc = dbgfR3BpInit(pUVM);
164 if (RT_SUCCESS(rc))
165 {
166 rc = dbgfR3OSInit(pUVM);
167 if (RT_SUCCESS(rc))
168 {
169 rc = dbgfR3PlugInInit(pUVM);
170 if (RT_SUCCESS(rc))
171 {
172 rc = dbgfR3BugCheckInit(pVM);
173 if (RT_SUCCESS(rc))
174 {
175#ifdef VBOX_WITH_DBGF_TRACING
176 rc = dbgfR3TracerInit(pVM);
177#endif
178 if (RT_SUCCESS(rc))
179 {
180 return VINF_SUCCESS;
181 }
182 }
183 dbgfR3PlugInTerm(pUVM);
184 }
185 dbgfR3OSTermPart1(pUVM);
186 dbgfR3OSTermPart2(pUVM);
187 }
188 dbgfR3BpTerm(pUVM);
189 }
190 dbgfR3AsTerm(pUVM);
191 }
192 dbgfR3RegTerm(pUVM);
193 }
194 dbgfR3TraceTerm(pVM);
195 }
196 dbgfR3InfoTerm(pUVM);
197 }
198 return rc;
199}
200
201
202/**
203 * Terminates and cleans up resources allocated by the DBGF.
204 *
205 * @returns VBox status code.
206 * @param pVM The cross context VM structure.
207 */
208VMMR3_INT_DECL(int) DBGFR3Term(PVM pVM)
209{
210 PUVM pUVM = pVM->pUVM;
211
212#ifdef VBOX_WITH_DBGF_TRACING
213 dbgfR3TracerTerm(pVM);
214#endif
215 dbgfR3OSTermPart1(pUVM);
216 dbgfR3PlugInTerm(pUVM);
217 dbgfR3OSTermPart2(pUVM);
218 dbgfR3BpTerm(pUVM);
219 dbgfR3AsTerm(pUVM);
220 dbgfR3RegTerm(pUVM);
221 dbgfR3TraceTerm(pVM);
222 dbgfR3InfoTerm(pUVM);
223
224 return VINF_SUCCESS;
225}
226
227
228/**
229 * This is for tstCFGM and others to avoid trigger leak detection.
230 *
231 * @param pUVM The user mode VM structure.
232 */
233VMMR3DECL(void) DBGFR3TermUVM(PUVM pUVM)
234{
235 dbgfR3InfoTerm(pUVM);
236}
237
238
239/**
240 * Called when the VM is powered off to detach debuggers.
241 *
242 * @param pVM The cross context VM structure.
243 */
244VMMR3_INT_DECL(void) DBGFR3PowerOff(PVM pVM)
245{
246 /*
247 * Send a termination event to any attached debugger.
248 */
249 if (pVM->dbgf.s.fAttached)
250 {
251 PVMCPU pVCpu = VMMGetCpu(pVM);
252 int rc = dbgfR3SendEventWait(pVM, pVCpu, DBGFEVENT_POWERING_OFF, DBGFEVENTCTX_OTHER);
253 AssertLogRelRC(rc);
254
255 /*
256 * Clear the FF so we won't get confused later on.
257 */
258 VM_FF_CLEAR(pVM, VM_FF_DBGF);
259 }
260}
261
262
263/**
264 * Applies relocations to data and code managed by this
265 * component. This function will be called at init and
266 * whenever the VMM need to relocate it self inside the GC.
267 *
268 * @param pVM The cross context VM structure.
269 * @param offDelta Relocation delta relative to old location.
270 */
271VMMR3_INT_DECL(void) DBGFR3Relocate(PVM pVM, RTGCINTPTR offDelta)
272{
273 dbgfR3TraceRelocate(pVM);
274 dbgfR3AsRelocate(pVM->pUVM, offDelta);
275}
276
277
278/**
279 * Waits a little while for a debuggger to attach.
280 *
281 * @returns True is a debugger have attached.
282 * @param pVM The cross context VM structure.
283 * @param pVCpu The cross context per CPU structure.
284 * @param enmEvent Event.
285 *
286 * @thread EMT(pVCpu)
287 */
288static bool dbgfR3WaitForAttach(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
289{
290 /*
291 * First a message.
292 */
293#if !defined(DEBUG)
294 int cWait = 10;
295#else
296 int cWait = RTEnvExist("VBOX_DBGF_NO_WAIT_FOR_ATTACH")
297 || ( ( enmEvent == DBGFEVENT_ASSERTION_HYPER
298 || enmEvent == DBGFEVENT_FATAL_ERROR)
299 && !RTEnvExist("VBOX_DBGF_WAIT_FOR_ATTACH"))
300 ? 10
301 : 150;
302#endif
303 RTStrmPrintf(g_pStdErr,
304 "DBGF: No debugger attached, waiting %d second%s for one to attach (event=%d)\n"
305#ifdef DEBUG
306 " Set VBOX_DBGF_NO_WAIT_FOR_ATTACH=1 for short wait or VBOX_DBGF_WAIT_FOR_ATTACH=1 longer.\n"
307#endif
308 ,
309 cWait / 10, cWait != 10 ? "s" : "", enmEvent);
310 RTStrmFlush(g_pStdErr);
311 while (cWait > 0)
312 {
313 RTThreadSleep(100);
314 if (pVM->dbgf.s.fAttached)
315 {
316 RTStrmPrintf(g_pStdErr, "Attached!\n");
317 RTStrmFlush(g_pStdErr);
318 return true;
319 }
320
321 /* Process rendezvous (debugger attaching involves such). */
322 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
323 {
324 int rc = VMMR3EmtRendezvousFF(pVM, pVCpu); AssertRC(rc);
325 if (rc != VINF_SUCCESS)
326 {
327 /** @todo Ignoring these could be bad. */
328 RTStrmPrintf(g_pStdErr, "[rcRendezvous=%Rrc, ignored!]", rc);
329 RTStrmFlush(g_pStdErr);
330 }
331 }
332
333 /* Process priority stuff. */
334 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
335 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
336 {
337 int rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/);
338 if (rc == VINF_SUCCESS)
339 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, true /*fPriorityOnly*/);
340 if (rc != VINF_SUCCESS)
341 {
342 /** @todo Ignoring these could be bad. */
343 RTStrmPrintf(g_pStdErr, "[rcReq=%Rrc, ignored!]", rc);
344 RTStrmFlush(g_pStdErr);
345 }
346 }
347
348 /* next */
349 if (!(cWait % 10))
350 {
351 RTStrmPrintf(g_pStdErr, "%d.", cWait / 10);
352 RTStrmFlush(g_pStdErr);
353 }
354 cWait--;
355 }
356
357 RTStrmPrintf(g_pStdErr, "Stopping the VM!\n");
358 RTStrmFlush(g_pStdErr);
359 return false;
360}
361
362
363/**
364 * Forced action callback.
365 *
366 * The VMM will call this from it's main loop when either VM_FF_DBGF or
367 * VMCPU_FF_DBGF are set.
368 *
369 * The function checks for and executes pending commands from the debugger.
370 * Then it checks for pending debug events and serves these.
371 *
372 * @returns VINF_SUCCESS normally.
373 * @returns VERR_DBGF_RAISE_FATAL_ERROR to pretend a fatal error happened.
374 * @param pVM The cross context VM structure.
375 * @param pVCpu The cross context per CPU structure.
376 */
377VMMR3_INT_DECL(int) DBGFR3VMMForcedAction(PVM pVM, PVMCPU pVCpu)
378{
379 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
380
381 /*
382 * Dispatch pending events.
383 */
384 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_DBGF))
385 {
386 if ( pVCpu->dbgf.s.cEvents > 0
387 && pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT)
388 {
389 rcStrict = DBGFR3EventHandlePending(pVM, pVCpu);
390 /** @todo may end up with VERR_DBGF_NOT_ATTACHED here, which will prove fatal... */
391 }
392
393 /*
394 * Command pending? Process it.
395 */
396 PUVMCPU pUVCpu = pVCpu->pUVCpu;
397 if (pUVCpu->dbgf.s.enmDbgfCmd != DBGFCMD_NO_COMMAND)
398 {
399 bool fResumeExecution;
400 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
401 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
402 VBOXSTRICTRC rcStrict2 = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
403 if (!fResumeExecution)
404 rcStrict2 = dbgfR3CpuWait(pVCpu);
405 if ( rcStrict2 != VINF_SUCCESS
406 && ( rcStrict == VINF_SUCCESS
407 || RT_FAILURE(rcStrict2)
408 || rcStrict2 < rcStrict) ) /** @todo oversimplified? */
409 rcStrict = rcStrict2;
410 }
411 }
412
413 return VBOXSTRICTRC_TODO(rcStrict);
414}
415
416
417/**
418 * Try to determine the event context.
419 *
420 * @returns debug event context.
421 * @param pVCpu The cross context vCPU structure.
422 */
423static DBGFEVENTCTX dbgfR3FigureEventCtx(PVMCPU pVCpu)
424{
425 switch (EMGetState(pVCpu))
426 {
427 case EMSTATE_HM:
428 case EMSTATE_NEM:
429 case EMSTATE_DEBUG_GUEST_HM:
430 case EMSTATE_DEBUG_GUEST_NEM:
431 return DBGFEVENTCTX_HM;
432
433 case EMSTATE_IEM:
434 case EMSTATE_DEBUG_GUEST_IEM:
435 case EMSTATE_DEBUG_GUEST_RAW:
436 return DBGFEVENTCTX_RAW;
437
438
439 case EMSTATE_RECOMPILER:
440 case EMSTATE_DEBUG_GUEST_RECOMPILER:
441 return DBGFEVENTCTX_REM;
442
443 case EMSTATE_DEBUG_HYPER:
444 case EMSTATE_GURU_MEDITATION:
445 return DBGFEVENTCTX_HYPER;
446
447 default:
448 return DBGFEVENTCTX_OTHER;
449 }
450}
451
452
453/**
454 * Sends the event to the debugger (i.e. adds it to the event ring buffer).
455 *
456 * @returns VBox status code.
457 * @param pVM The cross context VM structure.
458 * @param pVCpu The CPU sending the event.
459 * @param enmType The event type to send.
460 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
461 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
462 * @param cbPayload The size of the event payload, optional.
463 */
464static int dbgfR3SendEventWorker(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
465 void const *pvPayload, size_t cbPayload)
466{
467 PUVM pUVM = pVM->pUVM;
468 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID; /** @todo per vCPU stepping filter. */
469
470 /*
471 * Massage the input a little.
472 */
473 AssertStmt(cbPayload <= RT_SIZEOFMEMB(DBGFEVENT, u), cbPayload = RT_SIZEOFMEMB(DBGFEVENT, u));
474 if (enmCtx == DBGFEVENTCTX_INVALID)
475 enmCtx = dbgfR3FigureEventCtx(pVCpu);
476
477 /*
478 * Put the event into the ring buffer.
479 */
480 RTSemFastMutexRequest(pUVM->dbgf.s.hMtxDbgEvtWr);
481
482 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
483 uint32_t const idxDbgEvtWrite = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite);
484 uint32_t const idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
485 /** @todo Handle full buffer. */ RT_NOREF(idxDbgEvtRead);
486
487 PDBGFEVENT pEvent = &pUVM->dbgf.s.paDbgEvts[idxDbgEvtWrite % cDbgEvtMax];
488
489#ifdef DEBUG
490 ASMMemFill32(pEvent, sizeof(*pEvent), UINT32_C(0xdeadbeef));
491#endif
492 pEvent->enmType = enmType;
493 pEvent->enmCtx = enmCtx;
494 pEvent->idCpu = pVCpu->idCpu;
495 pEvent->uReserved = 0;
496 if (cbPayload)
497 memcpy(&pEvent->u, pvPayload, cbPayload);
498
499 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtWrite, (idxDbgEvtWrite + 1) % cDbgEvtMax);
500
501 RTSemFastMutexRelease(pUVM->dbgf.s.hMtxDbgEvtWr);
502
503 /*
504 * Signal the debugger.
505 */
506 return RTSemEventSignal(pUVM->dbgf.s.hEvtWait);
507}
508
509
510/**
511 * Send event and wait for the debugger to respond.
512 *
513 * @returns Strict VBox status code.
514 * @param pVM The cross context VM structure.
515 * @param pVCpu The CPU sending the event.
516 * @param enmType The event type to send.
517 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
518 */
519DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
520{
521 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
522 if (RT_SUCCESS(rc))
523 rc = dbgfR3CpuWait(pVCpu);
524 return rc;
525}
526
527
528/**
529 * Send event and wait for the debugger to respond, extended version.
530 *
531 * @returns Strict VBox status code.
532 * @param pVM The cross context VM structure.
533 * @param pVCpu The CPU sending the event.
534 * @param enmType The event type to send.
535 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
536 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
537 * @param cbPayload The size of the event payload, optional.
538 */
539DECLINLINE(int) dbgfR3SendEventWaitEx(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
540 void const *pvPayload, size_t cbPayload)
541{
542 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, pvPayload, cbPayload);
543 if (RT_SUCCESS(rc))
544 rc = dbgfR3CpuWait(pVCpu);
545 return rc;
546}
547
548
549/**
550 * Send event but do NOT wait for the debugger.
551 *
552 * Currently only used by dbgfR3CpuCmd().
553 *
554 * @param pVM The cross context VM structure.
555 * @param pVCpu The CPU sending the event.
556 * @param enmType The event type to send.
557 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
558 */
559DECLINLINE(int) dbgfR3SendEventNoWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
560{
561 return dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
562}
563
564
565/**
566 * The common event prologue code.
567 *
568 * It will make sure someone is attached, and perhaps process any high priority
569 * pending actions (none yet).
570 *
571 * @returns VBox status code.
572 * @param pVM The cross context VM structure.
573 * @param pVCpu The vCPU cross context structure.
574 * @param enmEvent The event to be sent.
575 */
576static int dbgfR3EventPrologue(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
577{
578 /*
579 * Check if a debugger is attached.
580 */
581 if ( !pVM->dbgf.s.fAttached
582 && !dbgfR3WaitForAttach(pVM, pVCpu, enmEvent))
583 {
584 Log(("dbgfR3EventPrologue: enmEvent=%d - debugger not attached\n", enmEvent));
585 return VERR_DBGF_NOT_ATTACHED;
586 }
587
588 /*
589 * Look thru pending commands and finish those which make sense now.
590 */
591 /** @todo Process/purge pending commands. */
592 //int rc = DBGFR3VMMForcedAction(pVM);
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Processes a pending event on the current CPU.
599 *
600 * This is called by EM in response to VINF_EM_DBG_EVENT.
601 *
602 * @returns Strict VBox status code.
603 * @param pVM The cross context VM structure.
604 * @param pVCpu The cross context per CPU structure.
605 *
606 * @thread EMT(pVCpu)
607 */
608VMMR3_INT_DECL(VBOXSTRICTRC) DBGFR3EventHandlePending(PVM pVM, PVMCPU pVCpu)
609{
610 VMCPU_ASSERT_EMT(pVCpu);
611 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
612
613 /*
614 * Check that we've got an event first.
615 */
616 AssertReturn(pVCpu->dbgf.s.cEvents > 0, VINF_SUCCESS);
617 AssertReturn(pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT, VINF_SUCCESS);
618 PDBGFEVENT pEvent = &pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].Event;
619
620 /*
621 * Make sure we've got a debugger and is allowed to speak to it.
622 */
623 int rc = dbgfR3EventPrologue(pVM, pVCpu, pEvent->enmType);
624 if (RT_FAILURE(rc))
625 {
626 /** @todo drop them events? */
627 return rc; /** @todo this will cause trouble if we're here via an FF! */
628 }
629
630 /*
631 * Send the event and mark it as ignore.
632 * ASSUMES no new events get generate while dbgfR3CpuWait is executing!
633 */
634 VBOXSTRICTRC rcStrict = dbgfR3SendEventWaitEx(pVM, pVCpu, pEvent->enmType, pEvent->enmCtx, &pEvent->u, sizeof(pEvent->u));
635 pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState = DBGFEVENTSTATE_IGNORE;
636 return rcStrict;
637}
638
639
640/**
641 * Send a generic debugger event which takes no data.
642 *
643 * @returns VBox status code.
644 * @param pVM The cross context VM structure.
645 * @param enmEvent The event to send.
646 * @internal
647 */
648VMMR3DECL(int) DBGFR3Event(PVM pVM, DBGFEVENTTYPE enmEvent)
649{
650 PVMCPU pVCpu = VMMGetCpu(pVM);
651 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
652
653 /*
654 * Do stepping filtering.
655 */
656 /** @todo Would be better if we did some of this inside the execution
657 * engines. */
658 if ( enmEvent == DBGFEVENT_STEPPED
659 || enmEvent == DBGFEVENT_STEPPED_HYPER)
660 {
661 if (!dbgfStepAreWeThereYet(pVM, pVCpu))
662 return VINF_EM_DBG_STEP;
663 }
664
665 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
666 if (RT_FAILURE(rc))
667 return rc;
668
669 /*
670 * Send the event and process the reply communication.
671 */
672 return dbgfR3SendEventWait(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID);
673}
674
675
676/**
677 * Send a debugger event which takes the full source file location.
678 *
679 * @returns VBox status code.
680 * @param pVM The cross context VM structure.
681 * @param enmEvent The event to send.
682 * @param pszFile Source file.
683 * @param uLine Line number in source file.
684 * @param pszFunction Function name.
685 * @param pszFormat Message which accompanies the event.
686 * @param ... Message arguments.
687 * @internal
688 */
689VMMR3DECL(int) DBGFR3EventSrc(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, ...)
690{
691 va_list args;
692 va_start(args, pszFormat);
693 int rc = DBGFR3EventSrcV(pVM, enmEvent, pszFile, uLine, pszFunction, pszFormat, args);
694 va_end(args);
695 return rc;
696}
697
698
699/**
700 * Send a debugger event which takes the full source file location.
701 *
702 * @returns VBox status code.
703 * @param pVM The cross context VM structure.
704 * @param enmEvent The event to send.
705 * @param pszFile Source file.
706 * @param uLine Line number in source file.
707 * @param pszFunction Function name.
708 * @param pszFormat Message which accompanies the event.
709 * @param args Message arguments.
710 * @internal
711 */
712VMMR3DECL(int) DBGFR3EventSrcV(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, va_list args)
713{
714 PVMCPU pVCpu = VMMGetCpu(pVM);
715 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
716
717 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
718 if (RT_FAILURE(rc))
719 return rc;
720
721 /*
722 * Stop other CPUs for some messages so we can inspect the state accross
723 * all CPUs as best as possible.
724 */
725 /** @todo This isn't entirely sane as we'd need a wait to back out of this
726 * if the debugger goes fishing and such. */
727 switch (enmEvent)
728 {
729 default:
730 break;
731 case DBGFEVENT_DEV_STOP:
732 rc = dbgfR3EventHaltAllVCpus(pVM, pVCpu);
733 if (RT_SUCCESS(rc))
734 break;
735 return rc;
736 }
737
738 /*
739 * Format the message.
740 */
741 char *pszMessage = NULL;
742 char szMessage[8192];
743 if (pszFormat && *pszFormat)
744 {
745 pszMessage = &szMessage[0];
746 RTStrPrintfV(szMessage, sizeof(szMessage), pszFormat, args);
747 }
748
749 /*
750 * Send the event and process the reply communication.
751 */
752 DBGFEVENT DbgEvent; /** @todo split up DBGFEVENT so we can skip the dead wait on the stack? */
753 DbgEvent.u.Src.pszFile = pszFile;
754 DbgEvent.u.Src.uLine = uLine;
755 DbgEvent.u.Src.pszFunction = pszFunction;
756 DbgEvent.u.Src.pszMessage = pszMessage;
757 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Src));
758}
759
760
761/**
762 * Send a debugger event which takes the two assertion messages.
763 *
764 * @returns VBox status code.
765 * @param pVM The cross context VM structure.
766 * @param enmEvent The event to send.
767 * @param pszMsg1 First assertion message.
768 * @param pszMsg2 Second assertion message.
769 */
770VMMR3_INT_DECL(int) DBGFR3EventAssertion(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszMsg1, const char *pszMsg2)
771{
772 PVMCPU pVCpu = VMMGetCpu(pVM);
773 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
774
775 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
776 if (RT_FAILURE(rc))
777 return rc;
778
779 /*
780 * Send the event and process the reply communication.
781 */
782 DBGFEVENT DbgEvent;
783 DbgEvent.u.Assert.pszMsg1 = pszMsg1;
784 DbgEvent.u.Assert.pszMsg2 = pszMsg2;
785 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Assert));
786}
787
788
789/**
790 * Breakpoint was hit somewhere.
791 * Figure out which breakpoint it is and notify the debugger.
792 *
793 * @returns VBox status code.
794 * @param pVM The cross context VM structure.
795 * @param enmEvent DBGFEVENT_BREAKPOINT_HYPER or DBGFEVENT_BREAKPOINT.
796 */
797VMMR3_INT_DECL(int) DBGFR3EventBreakpoint(PVM pVM, DBGFEVENTTYPE enmEvent)
798{
799 PVMCPU pVCpu = VMMGetCpu(pVM);
800 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
801
802 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
803 if (RT_FAILURE(rc))
804 return rc;
805
806 /*
807 * Halt all other vCPUs as well to give the user the ability to inspect other
808 * vCPU states as well.
809 */
810 rc = dbgfR3EventHaltAllVCpus(pVM, pVCpu);
811 if (RT_FAILURE(rc))
812 return rc;
813
814 /*
815 * Send the event and process the reply communication.
816 */
817 DBGFEVENT DbgEvent;
818 DbgEvent.u.Bp.hBp = pVCpu->dbgf.s.hBpActive;
819 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
820 if (DbgEvent.u.Bp.hBp != NIL_DBGFBP)
821 {
822 DbgEvent.enmCtx = DBGFEVENTCTX_RAW;
823 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
824 }
825
826 return VERR_DBGF_IPE_1;
827}
828
829
830/**
831 * Returns whether the given vCPU is waiting for the debugger.
832 *
833 * @returns Flags whether the vCPU is currently waiting for the debugger.
834 * @param pUVCpu The user mode vCPU structure.
835 */
836DECLINLINE(bool) dbgfR3CpuIsHalted(PUVMCPU pUVCpu)
837{
838 return ASMAtomicReadBool(&pUVCpu->dbgf.s.fStopped);
839}
840
841
842/**
843 * Checks whether the given vCPU is waiting in the debugger.
844 *
845 * @returns Flag whether the indicated vCPU is halted, when VMCPUID_ALL
846 * is given true is returned when at least one vCPU is halted.
847 * @param pUVM The user mode VM structure.
848 * @param idCpu The CPU ID to check, VMCPUID_ALL to check all vCPUs.
849 */
850DECLINLINE(bool) dbgfR3CpuAreAnyHaltedByCpuId(PUVM pUVM, VMCPUID idCpu)
851{
852 AssertReturn(idCpu < pUVM->cCpus || idCpu == VMCPUID_ALL, false);
853
854 /* Check that either the given vCPU or all are actually halted. */
855 if (idCpu != VMCPUID_ALL)
856 return dbgfR3CpuIsHalted(&pUVM->aCpus[idCpu]);
857
858 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
859 if (dbgfR3CpuIsHalted(&pUVM->aCpus[i]))
860 return true;
861 return false;
862}
863
864
865/**
866 * Gets the pending debug command for this EMT/CPU, replacing it with
867 * DBGFCMD_NO_COMMAND.
868 *
869 * @returns Pending command.
870 * @param pUVCpu The user mode virtual CPU structure.
871 * @thread EMT(pUVCpu)
872 */
873DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu)
874{
875 DBGFCMD enmCmd = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, DBGFCMD_NO_COMMAND);
876 Log2(("DBGF: Getting command: %d\n", enmCmd));
877 return enmCmd;
878}
879
880
881/**
882 * Send a debug command to a CPU, making sure to notify it.
883 *
884 * @returns VBox status code.
885 * @param pUVCpu The user mode virtual CPU structure.
886 * @param enmCmd The command to submit to the CPU.
887 */
888DECLINLINE(int) dbgfR3CpuSetCmdAndNotify(PUVMCPU pUVCpu, DBGFCMD enmCmd)
889{
890 Log2(("DBGF: Setting command to %d\n", enmCmd));
891 Assert(enmCmd != DBGFCMD_NO_COMMAND);
892 AssertMsg(pUVCpu->dbgf.s.enmDbgfCmd == DBGFCMD_NO_COMMAND, ("enmCmd=%d enmDbgfCmd=%d\n", enmCmd, pUVCpu->dbgf.s.enmDbgfCmd));
893
894 ASMAtomicWriteU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, enmCmd);
895 VMCPU_FF_SET(pUVCpu->pVCpu, VMCPU_FF_DBGF);
896
897 VMR3NotifyCpuFFU(pUVCpu, 0 /*fFlags*/);
898 return VINF_SUCCESS;
899}
900
901
902/**
903 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
904 */
905static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventHaltEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
906{
907 RT_NOREF(pvUser);
908
909 VMCPU_ASSERT_EMT(pVCpu);
910 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
911
912 PUVMCPU pUVCpu = pVCpu->pUVCpu;
913 if ( pVCpu != (PVMCPU)pvUser
914 && !dbgfR3CpuIsHalted(pUVCpu))
915 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
916
917 return VINF_SUCCESS;
918}
919
920
921/**
922 * Halts all vCPUs of the given VM except for the given one.
923 *
924 * @returns VBox status code.
925 * @param pVM The cross context VM structure.
926 * @param pVCpuExclude The vCPU cross context structure of the vCPU to exclude.
927 */
928static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude)
929{
930 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3EventHaltEmtWorker, pVCpuExclude);
931}
932
933
934/**
935 * Waits for the debugger to respond.
936 *
937 * @returns VBox status code. (clearify)
938 * @param pVCpu The cross context vCPU structure.
939 */
940static int dbgfR3CpuWait(PVMCPU pVCpu)
941{
942 PVM pVM = pVCpu->CTX_SUFF(pVM);
943 PUVMCPU pUVCpu = pVCpu->pUVCpu;
944
945 LogFlow(("dbgfR3CpuWait:\n"));
946 int rcRet = VINF_SUCCESS;
947
948 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, true);
949
950 /*
951 * Waits for the debugger to reply (i.e. issue an command).
952 */
953 for (;;)
954 {
955 /*
956 * Wait.
957 */
958 for (;;)
959 {
960 /*
961 * Process forced flags before we go sleep.
962 */
963 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_DBGF | VMCPU_FF_REQUEST)
964 || VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VMCPU_FF_REQUEST | VM_FF_CHECK_VM_STATE))
965 {
966 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
967 break;
968
969 int rc;
970 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
971 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
972 else if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
973 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
974 {
975 LogFlow(("dbgfR3CpuWait: Processes requests...\n"));
976 rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
977 if (rc == VINF_SUCCESS)
978 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
979 LogFlow(("dbgfR3CpuWait: VMR3ReqProcess -> %Rrc rcRet=%Rrc\n", rc, rcRet));
980 }
981 else if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
982 {
983 VMSTATE enmState = VMR3GetState(pVM);
984 switch (enmState)
985 {
986 case VMSTATE_FATAL_ERROR:
987 case VMSTATE_FATAL_ERROR_LS:
988 case VMSTATE_GURU_MEDITATION:
989 case VMSTATE_GURU_MEDITATION_LS:
990 rc = VINF_EM_SUSPEND;
991 break;
992 case VMSTATE_DESTROYING:
993 rc = VINF_EM_TERMINATE;
994 break;
995 default:
996 rc = VERR_DBGF_IPE_1;
997 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
998 }
999 }
1000 else
1001 rc = VINF_SUCCESS;
1002 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1003 {
1004 switch (rc)
1005 {
1006 case VINF_EM_DBG_BREAKPOINT:
1007 case VINF_EM_DBG_STEPPED:
1008 case VINF_EM_DBG_STEP:
1009 case VINF_EM_DBG_STOP:
1010 case VINF_EM_DBG_EVENT:
1011 AssertMsgFailed(("rc=%Rrc\n", rc));
1012 break;
1013
1014 /* return straight away */
1015 case VINF_EM_TERMINATE:
1016 case VINF_EM_OFF:
1017 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
1018 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1019 return rc;
1020
1021 /* remember return code. */
1022 default:
1023 AssertReleaseMsgFailed(("rc=%Rrc is not in the switch!\n", rc));
1024 RT_FALL_THRU();
1025 case VINF_EM_RESET:
1026 case VINF_EM_SUSPEND:
1027 case VINF_EM_HALT:
1028 case VINF_EM_RESUME:
1029 case VINF_EM_RESCHEDULE:
1030 case VINF_EM_RESCHEDULE_REM:
1031 if (rc < rcRet || rcRet == VINF_SUCCESS)
1032 rcRet = rc;
1033 break;
1034 }
1035 }
1036 else if (RT_FAILURE(rc))
1037 {
1038 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
1039 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1040 return rc;
1041 }
1042 }
1043 else if (pVM->dbgf.s.fAttached)
1044 {
1045 int rc = VMR3WaitU(pUVCpu);
1046 if (RT_FAILURE(rc))
1047 {
1048 LogFlow(("dbgfR3CpuWait: returns %Rrc (VMR3WaitU)\n", rc));
1049 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1050 return rc;
1051 }
1052 }
1053 else
1054 {
1055 LogFlow(("dbgfR3CpuWait: Debugger detached, continuing normal execution (%Rrc)\n", rcRet));
1056 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1057 return rcRet;
1058 }
1059 }
1060
1061 /*
1062 * Process the command.
1063 */
1064 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
1065 bool fResumeExecution;
1066 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
1067 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
1068 int rc = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
1069 if (fResumeExecution)
1070 {
1071 if (RT_FAILURE(rc))
1072 rcRet = rc;
1073 else if ( rc >= VINF_EM_FIRST
1074 && rc <= VINF_EM_LAST
1075 && (rc < rcRet || rcRet == VINF_SUCCESS))
1076 rcRet = rc;
1077 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rcRet));
1078 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1079 return rcRet;
1080 }
1081 }
1082}
1083
1084
1085/**
1086 * Executes command from debugger.
1087 *
1088 * The caller is responsible for waiting or resuming execution based on the
1089 * value returned in the *pfResumeExecution indicator.
1090 *
1091 * @returns VBox status code. (clearify!)
1092 * @param pVCpu The cross context vCPU structure.
1093 * @param enmCmd The command in question.
1094 * @param pCmdData Pointer to the command data.
1095 * @param pfResumeExecution Where to store the resume execution / continue waiting indicator.
1096 */
1097static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution)
1098{
1099 RT_NOREF(pCmdData); /* for later */
1100
1101 /*
1102 * The cases in this switch returns directly if no event to send.
1103 */
1104 DBGFEVENTTYPE enmEvent;
1105 DBGFEVENTCTX enmCtx = DBGFEVENTCTX_INVALID;
1106 switch (enmCmd)
1107 {
1108 /*
1109 * Halt is answered by an event say that we've halted.
1110 */
1111 case DBGFCMD_HALT:
1112 {
1113 *pfResumeExecution = false;
1114 enmEvent = DBGFEVENT_HALT_DONE;
1115 break;
1116 }
1117
1118
1119 /*
1120 * Resume is not answered, we just resume execution.
1121 */
1122 case DBGFCMD_GO:
1123 {
1124 pVCpu->dbgf.s.fSingleSteppingRaw = false;
1125 *pfResumeExecution = true;
1126 return VINF_SUCCESS;
1127 }
1128
1129 /** @todo implement (and define) the rest of the commands. */
1130
1131 /*
1132 * Single step, with trace into.
1133 */
1134 case DBGFCMD_SINGLE_STEP:
1135 {
1136 Log2(("Single step\n"));
1137 PVM pVM = pVCpu->CTX_SUFF(pVM);
1138 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1139 {
1140 if (dbgfStepGetCurInstrType(pVM, pVCpu) == DBGFSTEPINSTRTYPE_CALL)
1141 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1142 }
1143 if (pVM->dbgf.s.SteppingFilter.cMaxSteps > 0)
1144 {
1145 pVCpu->dbgf.s.fSingleSteppingRaw = true;
1146 *pfResumeExecution = true;
1147 return VINF_EM_DBG_STEP;
1148 }
1149 /* Stop after zero steps. Nonsense, but whatever. */
1150 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1151 *pfResumeExecution = false;
1152 enmCtx = dbgfR3FigureEventCtx(pVCpu);
1153 enmEvent = enmCtx != DBGFEVENTCTX_HYPER ? DBGFEVENT_STEPPED : DBGFEVENT_STEPPED_HYPER;
1154 break;
1155 }
1156
1157 /*
1158 * Default is to send an invalid command event.
1159 */
1160 default:
1161 {
1162 *pfResumeExecution = false;
1163 enmEvent = DBGFEVENT_INVALID_COMMAND;
1164 break;
1165 }
1166 }
1167
1168 /*
1169 * Send the pending event.
1170 */
1171 Log2(("DBGF: Emulation thread: sending event %d\n", enmEvent));
1172 int rc = dbgfR3SendEventNoWait(pVCpu->CTX_SUFF(pVM), pVCpu, enmEvent, enmCtx);
1173 AssertRCStmt(rc, *pfResumeExecution = true);
1174 return rc;
1175}
1176
1177
1178/**
1179 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1180 * EMT rendezvous worker for DBGFR3Attach - only called on one EMT.}
1181 */
1182static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Attach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1183{
1184 PUVM pUVM = pVM->pUVM;
1185 int *prcAttach = (int *)pvUser;
1186 RT_NOREF(pVCpu);
1187
1188 if (pVM->dbgf.s.fAttached)
1189 {
1190 Log(("dbgfR3Attach: Debugger already attached\n"));
1191 *prcAttach = VERR_DBGF_ALREADY_ATTACHED;
1192 return VINF_SUCCESS;
1193 }
1194
1195 /*
1196 * The per-CPU bits.
1197 */
1198 for (uint32_t i = 0; i < pUVM->cCpus; i++)
1199 {
1200 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1201
1202 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1203 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1204 }
1205
1206 /*
1207 * Init of the VM -> Debugger communication part living in the global VM structure.
1208 */
1209 pUVM->dbgf.s.cDbgEvtMax = pVM->cCpus * 5 + 10; /* Initial size of event ring, increased when being full. */
1210 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1211 pUVM->dbgf.s.idxDbgEvtRead = 0;
1212 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1213 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1214 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1215 int rc;
1216 pUVM->dbgf.s.paDbgEvts = (PDBGFEVENT)MMR3HeapAllocU(pUVM, MM_TAG_DBGF, pUVM->dbgf.s.cDbgEvtMax * sizeof(DBGFEVENT));
1217 if (pUVM->dbgf.s.paDbgEvts)
1218 {
1219 rc = RTSemEventCreate(&pUVM->dbgf.s.hEvtWait);
1220 if (RT_SUCCESS(rc))
1221 {
1222 rc = RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxDbgEvtWr);
1223 if (RT_SUCCESS(rc))
1224 {
1225 rc = RTSemEventMultiCreate(&pUVM->dbgf.s.hEvtRingBufFull);
1226 if (RT_SUCCESS(rc))
1227 {
1228 /*
1229 * At last, set the attached flag.
1230 */
1231 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, true);
1232 *prcAttach = VINF_SUCCESS;
1233 return VINF_SUCCESS;
1234 }
1235
1236 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1237 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1238 }
1239 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1240 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1241 }
1242 }
1243 else
1244 rc = VERR_NO_MEMORY;
1245
1246 *prcAttach = rc;
1247 return VINF_SUCCESS;
1248}
1249
1250
1251/**
1252 * Attaches a debugger to the specified VM.
1253 *
1254 * Only one debugger at a time.
1255 *
1256 * @returns VBox status code.
1257 * @param pUVM The user mode VM handle.
1258 */
1259VMMR3DECL(int) DBGFR3Attach(PUVM pUVM)
1260{
1261 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1262 PVM pVM = pUVM->pVM;
1263 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1264
1265 /*
1266 * Call the VM, use EMT rendezvous for serialization.
1267 */
1268 int rcAttach = VERR_IPE_UNINITIALIZED_STATUS;
1269 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Attach, &rcAttach);
1270 if (RT_SUCCESS(rc))
1271 rc = rcAttach;
1272
1273 return rc;
1274}
1275
1276
1277/**
1278 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1279 * EMT rendezvous worker for DBGFR3Detach - called on all EMTs (why?).}
1280 */
1281static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Detach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1282{
1283 if (pVCpu->idCpu == 0)
1284 {
1285 PUVM pUVM = (PUVM)pvUser;
1286
1287 /*
1288 * Per-CPU cleanup.
1289 */
1290 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1291 {
1292 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1293
1294 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1295 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1296 }
1297
1298 /*
1299 * De-init of the VM -> Debugger communication part living in the global VM structure.
1300 */
1301 if (pUVM->dbgf.s.paDbgEvts)
1302 {
1303 MMR3HeapFree(pUVM->dbgf.s.paDbgEvts);
1304 pUVM->dbgf.s.paDbgEvts = NULL;
1305 }
1306
1307 if (pUVM->dbgf.s.hEvtWait != NIL_RTSEMEVENT)
1308 {
1309 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1310 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1311 }
1312
1313 if (pUVM->dbgf.s.hMtxDbgEvtWr != NIL_RTSEMFASTMUTEX)
1314 {
1315 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1316 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1317 }
1318
1319 if (pUVM->dbgf.s.hEvtRingBufFull != NIL_RTSEMEVENTMULTI)
1320 {
1321 RTSemEventMultiDestroy(pUVM->dbgf.s.hEvtRingBufFull);
1322 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1323 }
1324
1325 pUVM->dbgf.s.cDbgEvtMax = 0;
1326 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1327 pUVM->dbgf.s.idxDbgEvtRead = 0;
1328 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1329 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1330 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1331
1332 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, false);
1333 }
1334
1335 return VINF_SUCCESS;
1336}
1337
1338
1339/**
1340 * Detaches a debugger from the specified VM.
1341 *
1342 * Caller must be attached to the VM.
1343 *
1344 * @returns VBox status code.
1345 * @param pUVM The user mode VM handle.
1346 */
1347VMMR3DECL(int) DBGFR3Detach(PUVM pUVM)
1348{
1349 LogFlow(("DBGFR3Detach:\n"));
1350
1351 /*
1352 * Validate input. The UVM handle shall be valid, the VM handle might be
1353 * in the processes of being destroyed already, so deal quietly with that.
1354 */
1355 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1356 PVM pVM = pUVM->pVM;
1357 if (!VM_IS_VALID_EXT(pVM))
1358 return VERR_INVALID_VM_HANDLE;
1359
1360 /*
1361 * Check if attached.
1362 */
1363 if (!pVM->dbgf.s.fAttached)
1364 return VERR_DBGF_NOT_ATTACHED;
1365
1366 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Detach, pUVM);
1367}
1368
1369
1370/**
1371 * Wait for a debug event.
1372 *
1373 * @returns VBox status code. Will not return VBOX_INTERRUPTED.
1374 * @param pUVM The user mode VM handle.
1375 * @param cMillies Number of millis to wait.
1376 * @param pEvent Where to store the event data.
1377 */
1378VMMR3DECL(int) DBGFR3EventWait(PUVM pUVM, RTMSINTERVAL cMillies, PDBGFEVENT pEvent)
1379{
1380 /*
1381 * Check state.
1382 */
1383 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1384 PVM pVM = pUVM->pVM;
1385 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1386 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1387
1388 RT_BZERO(pEvent, sizeof(*pEvent));
1389
1390 /*
1391 * Wait for an event to arrive if there are none.
1392 */
1393 int rc = VINF_SUCCESS;
1394 uint32_t idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
1395 if (idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite))
1396 {
1397 do
1398 {
1399 rc = RTSemEventWait(pUVM->dbgf.s.hEvtWait, cMillies);
1400 } while ( RT_SUCCESS(rc)
1401 && idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1402 }
1403
1404 if (RT_SUCCESS(rc))
1405 {
1406 Assert(idxDbgEvtRead != ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1407
1408 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
1409 memcpy(pEvent, &pUVM->dbgf.s.paDbgEvts[idxDbgEvtRead % cDbgEvtMax], sizeof(*pEvent));
1410 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtRead, (idxDbgEvtRead + 1) % cDbgEvtMax);
1411 }
1412
1413 Log2(("DBGFR3EventWait: rc=%Rrc (event type %d)\n", rc, pEvent->enmType));
1414 return rc;
1415}
1416
1417
1418/**
1419 * Halts VM execution.
1420 *
1421 * After calling this the VM isn't actually halted till an DBGFEVENT_HALT_DONE
1422 * arrives. Until that time it's not possible to issue any new commands.
1423 *
1424 * @returns VBox status code.
1425 * @retval VWRN_DBGF_ALREADY_HALTED if @a idCpu is VMCPUID_ALL and all vCPUs
1426 * are halted.
1427 * @param pUVM The user mode VM handle.
1428 * @param idCpu The vCPU to halt, VMCPUID_ALL halts all still running vCPUs.
1429 */
1430VMMR3DECL(int) DBGFR3Halt(PUVM pUVM, VMCPUID idCpu)
1431{
1432 /*
1433 * Check state.
1434 */
1435 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1436 PVM pVM = pUVM->pVM;
1437 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1438 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1439 AssertReturn(idCpu == VMCPUID_ALL || idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1440
1441 /*
1442 * Halt the requested CPUs as needed.
1443 */
1444 int rc;
1445 if (idCpu != VMCPUID_ALL)
1446 {
1447 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1448 if (!dbgfR3CpuIsHalted(pUVCpu))
1449 {
1450 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1451 rc = VINF_SUCCESS;
1452 }
1453 else
1454 rc = VWRN_DBGF_ALREADY_HALTED;
1455 }
1456 else
1457 {
1458 rc = VWRN_DBGF_ALREADY_HALTED;
1459 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1460 {
1461 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1462 if (!dbgfR3CpuIsHalted(pUVCpu))
1463 {
1464 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1465 rc = VINF_SUCCESS;
1466 }
1467 }
1468 }
1469
1470 return rc;
1471}
1472
1473
1474/**
1475 * Checks if any of the specified vCPUs have been halted by the debugger.
1476 *
1477 * @returns True if at least one halted vCPUs.
1478 * @returns False if no halted vCPUs.
1479 * @param pUVM The user mode VM handle.
1480 * @param idCpu The CPU id to check for, VMCPUID_ALL will return true if
1481 * at least a single vCPU is halted in the debugger.
1482 */
1483VMMR3DECL(bool) DBGFR3IsHalted(PUVM pUVM, VMCPUID idCpu)
1484{
1485 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1486 PVM pVM = pUVM->pVM;
1487 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1488 AssertReturn(pVM->dbgf.s.fAttached, false);
1489
1490 return dbgfR3CpuAreAnyHaltedByCpuId(pUVM, idCpu);
1491}
1492
1493
1494/**
1495 * Checks if the debugger can wait for events or not.
1496 *
1497 * This function is only used by lazy, multiplexing debuggers. :-)
1498 *
1499 * @returns VBox status code.
1500 * @retval VINF_SUCCESS if waitable.
1501 * @retval VERR_SEM_OUT_OF_TURN if not waitable.
1502 * @retval VERR_INVALID_VM_HANDLE if the VM is being (/ has been) destroyed
1503 * (not asserted) or if the handle is invalid (asserted).
1504 * @retval VERR_DBGF_NOT_ATTACHED if not attached.
1505 *
1506 * @param pUVM The user mode VM handle.
1507 */
1508VMMR3DECL(int) DBGFR3QueryWaitable(PUVM pUVM)
1509{
1510 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1511
1512 /* Note! There is a slight race here, unfortunately. */
1513 PVM pVM = pUVM->pVM;
1514 if (!RT_VALID_PTR(pVM))
1515 return VERR_INVALID_VM_HANDLE;
1516 if (pVM->enmVMState >= VMSTATE_DESTROYING)
1517 return VERR_INVALID_VM_HANDLE;
1518 if (!pVM->dbgf.s.fAttached)
1519 return VERR_DBGF_NOT_ATTACHED;
1520
1521 /** @todo was: if (!RTSemPongShouldWait(...)) return VERR_SEM_OUT_OF_TURN; */
1522 return VINF_SUCCESS;
1523}
1524
1525
1526/**
1527 * Resumes VM execution.
1528 *
1529 * There is no receipt event on this command.
1530 *
1531 * @returns VBox status code.
1532 * @retval VWRN_DBGF_ALREADY_RUNNING if the specified vCPUs are all running.
1533 * @param pUVM The user mode VM handle.
1534 * @param idCpu The vCPU to resume, VMCPUID_ALL resumes all still halted vCPUs.
1535 */
1536VMMR3DECL(int) DBGFR3Resume(PUVM pUVM, VMCPUID idCpu)
1537{
1538 /*
1539 * Validate input and attachment state.
1540 */
1541 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1542 PVM pVM = pUVM->pVM;
1543 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1544 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1545
1546 /*
1547 * Ping the halted emulation threads, telling them to run.
1548 */
1549 int rc = VWRN_DBGF_ALREADY_RUNNING;
1550 if (idCpu != VMCPUID_ALL)
1551 {
1552 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1553 if (dbgfR3CpuIsHalted(pUVCpu))
1554 {
1555 rc = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1556 AssertRC(rc);
1557 }
1558 }
1559 else
1560 {
1561 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1562 {
1563 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1564 if (dbgfR3CpuIsHalted(pUVCpu))
1565 {
1566 int rc2 = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1567 AssertRC(rc2);
1568 if (rc == VWRN_DBGF_ALREADY_RUNNING || RT_FAILURE(rc2))
1569 rc = rc2;
1570 }
1571 }
1572 }
1573
1574 return rc;
1575}
1576
1577
1578/**
1579 * Classifies the current instruction.
1580 *
1581 * @returns Type of instruction.
1582 * @param pVM The cross context VM structure.
1583 * @param pVCpu The current CPU.
1584 * @thread EMT(pVCpu)
1585 */
1586static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu)
1587{
1588#ifdef VBOX_VMM_TARGET_ARMV8
1589 /*
1590 * Read the instruction, this ASSUMES running in A64 mode.
1591 */
1592 size_t cbRead = 0;
1593 uint32_t u32Insn = 0;
1594 int rc = PGMR3DbgReadGCPtr(pVM, &u32Insn, CPUMGetGuestFlatPC(pVCpu), u32Insn, 0 /*fFlags*/, &cbRead);
1595 if (RT_SUCCESS(rc))
1596 {
1597 /*
1598 * Do minimal parsing. No real need to involve the disassembler here.
1599 */
1600 if ( (u32Insn & 0xfffffc1f) == 0xd65f0000 /* RET */
1601 || (u32Insn & 0xfffffc1f) == 0xd65f081f /* RETAA */
1602 || (u32Insn & 0xfffffc1f) == 0xd65f0c1f /* RETAB */
1603 || (u32Insn & 0xffffffff) == 0xd69f03e0 /* ERET */
1604 || (u32Insn & 0xffffffff) == 0xd69f0bff /* ERETAA */
1605 || (u32Insn & 0xffffffff) == 0xd69f0fff /* ERETAB */)
1606 return DBGFSTEPINSTRTYPE_RET;
1607 else if ( (u32Insn & 0xfffffc1f) == 0xd63f0000 /* BLR */
1608 || (u32Insn & 0xfffffc1f) == 0xd63f081f /* BLRAAZ */
1609 || (u32Insn & 0xfffffc1f) == 0xd63f0c1f /* BLRABZ */
1610 || (u32Insn & 0xfffffc00) == 0xd73f0800 /* BLRAA */
1611 || (u32Insn & 0xfffffc00) == 0xd73f0c00 /* BLRAB */
1612 || (u32Insn & 0xfc000000) == 0x14000000 /* BL */
1613 || (u32Insn & 0xffe0001f) == 0xd4000001 /* SVC */
1614 || (u32Insn & 0xffe0001f) == 0xd4000002 /* HVC */
1615 || (u32Insn & 0xffe0001f) == 0xd4000003 /* SMC */
1616 || (u32Insn & 0xffe0001f) == 0xd4200000 /* BRK */
1617 || (u32Insn & 0xffe0001f) == 0xd4400000 /* HLT */)
1618 return DBGFSTEPINSTRTYPE_CALL;
1619 else
1620 return DBGFSTEPINSTRTYPE_OTHER;
1621 }
1622#else
1623 /*
1624 * Read the instruction.
1625 */
1626 size_t cbRead = 0;
1627 uint8_t abOpcode[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1628 int rc = PGMR3DbgReadGCPtr(pVM, abOpcode, CPUMGetGuestFlatPC(pVCpu), sizeof(abOpcode) - 1, 0 /*fFlags*/, &cbRead);
1629 if (RT_SUCCESS(rc))
1630 {
1631 /*
1632 * Do minimal parsing. No real need to involve the disassembler here.
1633 */
1634 uint8_t *pb = abOpcode;
1635 for (;;)
1636 {
1637 switch (*pb++)
1638 {
1639 default:
1640 return DBGFSTEPINSTRTYPE_OTHER;
1641
1642 case 0xe8: /* call rel16/32 */
1643 case 0x9a: /* call farptr */
1644 case 0xcc: /* int3 */
1645 case 0xcd: /* int xx */
1646 // case 0xce: /* into */
1647 return DBGFSTEPINSTRTYPE_CALL;
1648
1649 case 0xc2: /* ret xx */
1650 case 0xc3: /* ret */
1651 case 0xca: /* retf xx */
1652 case 0xcb: /* retf */
1653 case 0xcf: /* iret */
1654 return DBGFSTEPINSTRTYPE_RET;
1655
1656 case 0xff:
1657 if ( ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 2 /* call indir */
1658 || ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 3) /* call indir-farptr */
1659 return DBGFSTEPINSTRTYPE_CALL;
1660 return DBGFSTEPINSTRTYPE_OTHER;
1661
1662 case 0x0f:
1663 switch (*pb++)
1664 {
1665 case 0x05: /* syscall */
1666 case 0x34: /* sysenter */
1667 return DBGFSTEPINSTRTYPE_CALL;
1668 case 0x07: /* sysret */
1669 case 0x35: /* sysexit */
1670 return DBGFSTEPINSTRTYPE_RET;
1671 }
1672 break;
1673
1674 /* Must handle some REX prefixes. So we do all normal prefixes. */
1675 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
1676 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
1677 if (!CPUMIsGuestIn64BitCode(pVCpu))
1678 return DBGFSTEPINSTRTYPE_OTHER;
1679 break;
1680
1681 case 0x2e: /* CS */
1682 case 0x36: /* SS */
1683 case 0x3e: /* DS */
1684 case 0x26: /* ES */
1685 case 0x64: /* FS */
1686 case 0x65: /* GS */
1687 case 0x66: /* op size */
1688 case 0x67: /* addr size */
1689 case 0xf0: /* lock */
1690 case 0xf2: /* REPNZ */
1691 case 0xf3: /* REPZ */
1692 break;
1693 }
1694 }
1695 }
1696#endif
1697
1698 return DBGFSTEPINSTRTYPE_INVALID;
1699}
1700
1701
1702/**
1703 * Checks if the stepping has reached a stop point.
1704 *
1705 * Called when raising a stepped event.
1706 *
1707 * @returns true if the event should be raised, false if we should take one more
1708 * step first.
1709 * @param pVM The cross context VM structure.
1710 * @param pVCpu The cross context per CPU structure of the calling EMT.
1711 * @thread EMT(pVCpu)
1712 */
1713static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu)
1714{
1715 /*
1716 * Check valid pVCpu and that it matches the CPU one stepping.
1717 */
1718 if (pVCpu)
1719 {
1720 if (pVCpu->idCpu == pVM->dbgf.s.SteppingFilter.idCpu)
1721 {
1722 /*
1723 * Increase the number of steps and see if we've reached the max.
1724 */
1725 pVM->dbgf.s.SteppingFilter.cSteps++;
1726 if (pVM->dbgf.s.SteppingFilter.cSteps < pVM->dbgf.s.SteppingFilter.cMaxSteps)
1727 {
1728 /*
1729 * Check PC and SP address filtering.
1730 */
1731 if (pVM->dbgf.s.SteppingFilter.fFlags & (DBGF_STEP_F_STOP_ON_ADDRESS | DBGF_STEP_F_STOP_ON_STACK_POP))
1732 {
1733 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1734 && pVM->dbgf.s.SteppingFilter.AddrPc == CPUMGetGuestFlatPC(pVCpu))
1735 return true;
1736 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1737 && CPUMGetGuestFlatSP(pVCpu) - pVM->dbgf.s.SteppingFilter.AddrStackPop
1738 < pVM->dbgf.s.SteppingFilter.cbStackPop)
1739 return true;
1740 }
1741
1742 /*
1743 * Do step-over filtering separate from the step-into one.
1744 */
1745 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1746 {
1747 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1748 switch (enmType)
1749 {
1750 default:
1751 if ( pVM->dbgf.s.SteppingFilter.uCallDepth != 0
1752 || (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_FILTER_MASK))
1753 break;
1754 return true;
1755 case DBGFSTEPINSTRTYPE_CALL:
1756 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1757 && pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1758 return true;
1759 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1760 break;
1761 case DBGFSTEPINSTRTYPE_RET:
1762 if (pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1763 {
1764 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1765 return true;
1766 /* If after return, we use the cMaxStep limit to stop the next time. */
1767 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1768 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1769 }
1770 else if (pVM->dbgf.s.SteppingFilter.uCallDepth > 0)
1771 pVM->dbgf.s.SteppingFilter.uCallDepth--;
1772 break;
1773 }
1774 return false;
1775 }
1776 /*
1777 * Filtered step-into.
1778 */
1779 else if ( pVM->dbgf.s.SteppingFilter.fFlags
1780 & (DBGF_STEP_F_STOP_ON_CALL | DBGF_STEP_F_STOP_ON_RET | DBGF_STEP_F_STOP_AFTER_RET))
1781 {
1782 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1783 switch (enmType)
1784 {
1785 default:
1786 break;
1787 case DBGFSTEPINSTRTYPE_CALL:
1788 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1789 return true;
1790 break;
1791 case DBGFSTEPINSTRTYPE_RET:
1792 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1793 return true;
1794 /* If after return, we use the cMaxStep limit to stop the next time. */
1795 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1796 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1797 break;
1798 }
1799 return false;
1800 }
1801 }
1802 }
1803 }
1804
1805 return true;
1806}
1807
1808
1809/**
1810 * Step Into.
1811 *
1812 * A single step event is generated from this command.
1813 * The current implementation is not reliable, so don't rely on the event coming.
1814 *
1815 * @returns VBox status code.
1816 * @param pUVM The user mode VM handle.
1817 * @param idCpu The ID of the CPU to single step on.
1818 */
1819VMMR3DECL(int) DBGFR3Step(PUVM pUVM, VMCPUID idCpu)
1820{
1821 return DBGFR3StepEx(pUVM, idCpu, DBGF_STEP_F_INTO, NULL, NULL, 0, 1);
1822}
1823
1824
1825/**
1826 * Full fleged step.
1827 *
1828 * This extended stepping API allows for doing multiple steps before raising an
1829 * event, helping implementing step over, step out and other more advanced
1830 * features.
1831 *
1832 * Like the DBGFR3Step() API, this will normally generate a DBGFEVENT_STEPPED or
1833 * DBGFEVENT_STEPPED_EVENT. However the stepping may be interrupted by other
1834 * events, which will abort the stepping.
1835 *
1836 * The stop on pop area feature is for safeguarding step out.
1837 *
1838 * Please note though, that it will always use stepping and never breakpoints.
1839 * While this allows for a much greater flexibility it can at times be rather
1840 * slow.
1841 *
1842 * @returns VBox status code.
1843 * @param pUVM The user mode VM handle.
1844 * @param idCpu The ID of the CPU to single step on.
1845 * @param fFlags Flags controlling the stepping, DBGF_STEP_F_XXX.
1846 * Either DBGF_STEP_F_INTO or DBGF_STEP_F_OVER must
1847 * always be specified.
1848 * @param pStopPcAddr Address to stop executing at. Completely ignored
1849 * unless DBGF_STEP_F_STOP_ON_ADDRESS is specified.
1850 * @param pStopPopAddr Stack address that SP must be lower than when
1851 * performing DBGF_STEP_F_STOP_ON_STACK_POP filtering.
1852 * @param cbStopPop The range starting at @a pStopPopAddr which is
1853 * considered to be within the same thread stack. Note
1854 * that the API allows @a pStopPopAddr and @a cbStopPop
1855 * to form an area that wraps around and it will
1856 * consider the part starting at 0 as included.
1857 * @param cMaxSteps The maximum number of steps to take. This is to
1858 * prevent stepping for ever, so passing UINT32_MAX is
1859 * not recommended.
1860 *
1861 * @remarks The two address arguments must be guest context virtual addresses,
1862 * or HMA. The code doesn't make much of a point of out HMA, though.
1863 */
1864VMMR3DECL(int) DBGFR3StepEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, PCDBGFADDRESS pStopPcAddr,
1865 PCDBGFADDRESS pStopPopAddr, RTGCUINTPTR cbStopPop, uint32_t cMaxSteps)
1866{
1867 /*
1868 * Check state.
1869 */
1870 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1871 PVM pVM = pUVM->pVM;
1872 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1873 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
1874 AssertReturn(!(fFlags & ~DBGF_STEP_F_VALID_MASK), VERR_INVALID_FLAGS);
1875 AssertReturn(RT_BOOL(fFlags & DBGF_STEP_F_INTO) != RT_BOOL(fFlags & DBGF_STEP_F_OVER), VERR_INVALID_FLAGS);
1876 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1877 {
1878 AssertReturn(RT_VALID_PTR(pStopPcAddr), VERR_INVALID_POINTER);
1879 AssertReturn(DBGFADDRESS_IS_VALID(pStopPcAddr), VERR_INVALID_PARAMETER);
1880 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPcAddr), VERR_INVALID_PARAMETER);
1881 }
1882 AssertReturn(!(fFlags & DBGF_STEP_F_STOP_ON_STACK_POP) || RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1883 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1884 {
1885 AssertReturn(RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1886 AssertReturn(DBGFADDRESS_IS_VALID(pStopPopAddr), VERR_INVALID_PARAMETER);
1887 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPopAddr), VERR_INVALID_PARAMETER);
1888 AssertReturn(cbStopPop > 0, VERR_INVALID_PARAMETER);
1889 }
1890
1891 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1892 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1893 if (RT_LIKELY(dbgfR3CpuIsHalted(pUVCpu)))
1894 { /* likely */ }
1895 else
1896 return VERR_SEM_OUT_OF_TURN;
1897 Assert(pVM->dbgf.s.SteppingFilter.idCpu == NIL_VMCPUID);
1898
1899 /*
1900 * Send the emulation thread a single-step command.
1901 */
1902 if (fFlags == DBGF_STEP_F_INTO)
1903 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1904 else
1905 pVM->dbgf.s.SteppingFilter.idCpu = idCpu;
1906 pVM->dbgf.s.SteppingFilter.fFlags = fFlags;
1907 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1908 pVM->dbgf.s.SteppingFilter.AddrPc = pStopPcAddr->FlatPtr;
1909 else
1910 pVM->dbgf.s.SteppingFilter.AddrPc = 0;
1911 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1912 {
1913 pVM->dbgf.s.SteppingFilter.AddrStackPop = pStopPopAddr->FlatPtr;
1914 pVM->dbgf.s.SteppingFilter.cbStackPop = cbStopPop;
1915 }
1916 else
1917 {
1918 pVM->dbgf.s.SteppingFilter.AddrStackPop = 0;
1919 pVM->dbgf.s.SteppingFilter.cbStackPop = RTGCPTR_MAX;
1920 }
1921
1922 pVM->dbgf.s.SteppingFilter.cMaxSteps = cMaxSteps;
1923 pVM->dbgf.s.SteppingFilter.cSteps = 0;
1924 pVM->dbgf.s.SteppingFilter.uCallDepth = 0;
1925
1926 Assert(dbgfR3CpuIsHalted(pUVCpu));
1927 return dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_SINGLE_STEP);
1928}
1929
1930
1931
1932/**
1933 * dbgfR3EventConfigEx argument packet.
1934 */
1935typedef struct DBGFR3EVENTCONFIGEXARGS
1936{
1937 PCDBGFEVENTCONFIG paConfigs;
1938 size_t cConfigs;
1939 int rc;
1940} DBGFR3EVENTCONFIGEXARGS;
1941/** Pointer to a dbgfR3EventConfigEx argument packet. */
1942typedef DBGFR3EVENTCONFIGEXARGS *PDBGFR3EVENTCONFIGEXARGS;
1943
1944
1945/**
1946 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Worker for DBGFR3EventConfigEx.}
1947 */
1948static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
1949{
1950 if (pVCpu->idCpu == 0)
1951 {
1952 PDBGFR3EVENTCONFIGEXARGS pArgs = (PDBGFR3EVENTCONFIGEXARGS)pvUser;
1953 DBGFEVENTCONFIG volatile const *paConfigs = pArgs->paConfigs;
1954 size_t cConfigs = pArgs->cConfigs;
1955
1956 /*
1957 * Apply the changes.
1958 */
1959 unsigned cChanges = 0;
1960 for (uint32_t i = 0; i < cConfigs; i++)
1961 {
1962 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
1963 AssertReturn(enmType >= DBGFEVENT_FIRST_SELECTABLE && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1964 if (paConfigs[i].fEnabled)
1965 cChanges += ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, enmType) == false;
1966 else
1967 cChanges += ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, enmType) == true;
1968 }
1969
1970 /*
1971 * Inform HM about changes.
1972 */
1973 if (cChanges > 0)
1974 {
1975 if (HMIsEnabled(pVM))
1976 {
1977 HMR3NotifyDebugEventChanged(pVM);
1978 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1979 }
1980 else if (VM_IS_NEM_ENABLED(pVM))
1981 {
1982 NEMR3NotifyDebugEventChanged(pVM);
1983 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1984 }
1985 }
1986 }
1987 else if (HMIsEnabled(pVM))
1988 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1989 else if (VM_IS_NEM_ENABLED(pVM))
1990 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1991
1992 return VINF_SUCCESS;
1993}
1994
1995
1996/**
1997 * Configures (enables/disables) multiple selectable debug events.
1998 *
1999 * @returns VBox status code.
2000 * @param pUVM The user mode VM handle.
2001 * @param paConfigs The event to configure and their new state.
2002 * @param cConfigs Number of entries in @a paConfigs.
2003 */
2004VMMR3DECL(int) DBGFR3EventConfigEx(PUVM pUVM, PCDBGFEVENTCONFIG paConfigs, size_t cConfigs)
2005{
2006 /*
2007 * Validate input.
2008 */
2009 size_t i = cConfigs;
2010 while (i-- > 0)
2011 {
2012 AssertReturn(paConfigs[i].enmType >= DBGFEVENT_FIRST_SELECTABLE, VERR_INVALID_PARAMETER);
2013 AssertReturn(paConfigs[i].enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
2014 }
2015 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2016 PVM pVM = pUVM->pVM;
2017 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2018
2019 /*
2020 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
2021 * can sync their data and execution with new debug state.
2022 */
2023 DBGFR3EVENTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
2024 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
2025 dbgfR3EventConfigEx, &Args);
2026 if (RT_SUCCESS(rc))
2027 rc = Args.rc;
2028 return rc;
2029}
2030
2031
2032/**
2033 * Enables or disables a selectable debug event.
2034 *
2035 * @returns VBox status code.
2036 * @param pUVM The user mode VM handle.
2037 * @param enmEvent The selectable debug event.
2038 * @param fEnabled The new state.
2039 */
2040VMMR3DECL(int) DBGFR3EventConfig(PUVM pUVM, DBGFEVENTTYPE enmEvent, bool fEnabled)
2041{
2042 /*
2043 * Convert to an array call.
2044 */
2045 DBGFEVENTCONFIG EvtCfg = { enmEvent, fEnabled };
2046 return DBGFR3EventConfigEx(pUVM, &EvtCfg, 1);
2047}
2048
2049
2050/**
2051 * Checks if the given selectable event is enabled.
2052 *
2053 * @returns true if enabled, false if not or invalid input.
2054 * @param pUVM The user mode VM handle.
2055 * @param enmEvent The selectable debug event.
2056 * @sa DBGFR3EventQuery
2057 */
2058VMMR3DECL(bool) DBGFR3EventIsEnabled(PUVM pUVM, DBGFEVENTTYPE enmEvent)
2059{
2060 /*
2061 * Validate input.
2062 */
2063 AssertReturn( enmEvent >= DBGFEVENT_HALT_DONE
2064 && enmEvent < DBGFEVENT_END, false);
2065 Assert( enmEvent >= DBGFEVENT_FIRST_SELECTABLE
2066 || enmEvent == DBGFEVENT_BREAKPOINT
2067 || enmEvent == DBGFEVENT_BREAKPOINT_IO
2068 || enmEvent == DBGFEVENT_BREAKPOINT_MMIO);
2069
2070 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2071 PVM pVM = pUVM->pVM;
2072 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2073
2074 /*
2075 * Check the event status.
2076 */
2077 return ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, enmEvent);
2078}
2079
2080
2081/**
2082 * Queries the status of a set of events.
2083 *
2084 * @returns VBox status code.
2085 * @param pUVM The user mode VM handle.
2086 * @param paConfigs The events to query and where to return the state.
2087 * @param cConfigs The number of elements in @a paConfigs.
2088 * @sa DBGFR3EventIsEnabled, DBGF_IS_EVENT_ENABLED
2089 */
2090VMMR3DECL(int) DBGFR3EventQuery(PUVM pUVM, PDBGFEVENTCONFIG paConfigs, size_t cConfigs)
2091{
2092 /*
2093 * Validate input.
2094 */
2095 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2096 PVM pVM = pUVM->pVM;
2097 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2098
2099 for (size_t i = 0; i < cConfigs; i++)
2100 {
2101 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
2102 AssertReturn( enmType >= DBGFEVENT_HALT_DONE
2103 && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
2104 Assert( enmType >= DBGFEVENT_FIRST_SELECTABLE
2105 || enmType == DBGFEVENT_BREAKPOINT
2106 || enmType == DBGFEVENT_BREAKPOINT_IO
2107 || enmType == DBGFEVENT_BREAKPOINT_MMIO);
2108 paConfigs[i].fEnabled = ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, paConfigs[i].enmType);
2109 }
2110
2111 return VINF_SUCCESS;
2112}
2113
2114
2115/**
2116 * dbgfR3InterruptConfigEx argument packet.
2117 */
2118typedef struct DBGFR3INTERRUPTCONFIGEXARGS
2119{
2120 PCDBGFINTERRUPTCONFIG paConfigs;
2121 size_t cConfigs;
2122 int rc;
2123} DBGFR3INTERRUPTCONFIGEXARGS;
2124/** Pointer to a dbgfR3InterruptConfigEx argument packet. */
2125typedef DBGFR3INTERRUPTCONFIGEXARGS *PDBGFR3INTERRUPTCONFIGEXARGS;
2126
2127/**
2128 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
2129 * Worker for DBGFR3InterruptConfigEx.}
2130 */
2131static DECLCALLBACK(VBOXSTRICTRC) dbgfR3InterruptConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
2132{
2133 if (pVCpu->idCpu == 0)
2134 {
2135 PDBGFR3INTERRUPTCONFIGEXARGS pArgs = (PDBGFR3INTERRUPTCONFIGEXARGS)pvUser;
2136 PCDBGFINTERRUPTCONFIG paConfigs = pArgs->paConfigs;
2137 size_t cConfigs = pArgs->cConfigs;
2138
2139 /*
2140 * Apply the changes.
2141 */
2142 bool fChanged = false;
2143 bool fThis;
2144 for (uint32_t i = 0; i < cConfigs; i++)
2145 {
2146 /*
2147 * Hardware interrupts.
2148 */
2149 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2150 {
2151 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == false;
2152 if (fThis)
2153 {
2154 Assert(pVM->dbgf.s.cHardIntBreakpoints < 256);
2155 pVM->dbgf.s.cHardIntBreakpoints++;
2156 }
2157 }
2158 else if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_DISABLED)
2159 {
2160 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == true;
2161 if (fThis)
2162 {
2163 Assert(pVM->dbgf.s.cHardIntBreakpoints > 0);
2164 pVM->dbgf.s.cHardIntBreakpoints--;
2165 }
2166 }
2167
2168 /*
2169 * Software interrupts.
2170 */
2171 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2172 {
2173 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == false;
2174 if (fThis)
2175 {
2176 Assert(pVM->dbgf.s.cSoftIntBreakpoints < 256);
2177 pVM->dbgf.s.cSoftIntBreakpoints++;
2178 }
2179 }
2180 else if (paConfigs[i].enmSoftState == DBGFINTERRUPTSTATE_DISABLED)
2181 {
2182 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == true;
2183 if (fThis)
2184 {
2185 Assert(pVM->dbgf.s.cSoftIntBreakpoints > 0);
2186 pVM->dbgf.s.cSoftIntBreakpoints--;
2187 }
2188 }
2189 }
2190
2191 /*
2192 * Update the event bitmap entries.
2193 */
2194 if (pVM->dbgf.s.cHardIntBreakpoints > 0)
2195 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == false;
2196 else
2197 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == true;
2198
2199 if (pVM->dbgf.s.cSoftIntBreakpoints > 0)
2200 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == false;
2201 else
2202 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == true;
2203
2204 /*
2205 * Inform HM about changes.
2206 */
2207 if (fChanged)
2208 {
2209 if (HMIsEnabled(pVM))
2210 {
2211 HMR3NotifyDebugEventChanged(pVM);
2212 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2213 }
2214 else if (VM_IS_NEM_ENABLED(pVM))
2215 {
2216 NEMR3NotifyDebugEventChanged(pVM);
2217 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2218 }
2219 }
2220 }
2221 else if (HMIsEnabled(pVM))
2222 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2223 else if (VM_IS_NEM_ENABLED(pVM))
2224 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2225
2226 return VINF_SUCCESS;
2227}
2228
2229
2230/**
2231 * Changes
2232 *
2233 * @returns VBox status code.
2234 * @param pUVM The user mode VM handle.
2235 * @param paConfigs The events to query and where to return the state.
2236 * @param cConfigs The number of elements in @a paConfigs.
2237 * @sa DBGFR3InterruptConfigHardware, DBGFR3InterruptConfigSoftware
2238 */
2239VMMR3DECL(int) DBGFR3InterruptConfigEx(PUVM pUVM, PCDBGFINTERRUPTCONFIG paConfigs, size_t cConfigs)
2240{
2241 /*
2242 * Validate input.
2243 */
2244 size_t i = cConfigs;
2245 while (i-- > 0)
2246 {
2247 AssertReturn(paConfigs[i].enmHardState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2248 AssertReturn(paConfigs[i].enmSoftState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2249 }
2250
2251 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2252 PVM pVM = pUVM->pVM;
2253 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2254
2255 /*
2256 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
2257 * can sync their data and execution with new debug state.
2258 */
2259 DBGFR3INTERRUPTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
2260 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
2261 dbgfR3InterruptConfigEx, &Args);
2262 if (RT_SUCCESS(rc))
2263 rc = Args.rc;
2264 return rc;
2265}
2266
2267
2268/**
2269 * Configures interception of a hardware interrupt.
2270 *
2271 * @returns VBox status code.
2272 * @param pUVM The user mode VM handle.
2273 * @param iInterrupt The interrupt number.
2274 * @param fEnabled Whether interception is enabled or not.
2275 * @sa DBGFR3InterruptSoftwareConfig, DBGFR3InterruptConfigEx
2276 */
2277VMMR3DECL(int) DBGFR3InterruptHardwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2278{
2279 /*
2280 * Convert to DBGFR3InterruptConfigEx call.
2281 */
2282 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, (uint8_t)fEnabled, DBGFINTERRUPTSTATE_DONT_TOUCH };
2283 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2284}
2285
2286
2287/**
2288 * Configures interception of a software interrupt.
2289 *
2290 * @returns VBox status code.
2291 * @param pUVM The user mode VM handle.
2292 * @param iInterrupt The interrupt number.
2293 * @param fEnabled Whether interception is enabled or not.
2294 * @sa DBGFR3InterruptHardwareConfig, DBGFR3InterruptConfigEx
2295 */
2296VMMR3DECL(int) DBGFR3InterruptSoftwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2297{
2298 /*
2299 * Convert to DBGFR3InterruptConfigEx call.
2300 */
2301 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, DBGFINTERRUPTSTATE_DONT_TOUCH, (uint8_t)fEnabled };
2302 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2303}
2304
2305
2306/**
2307 * Checks whether interception is enabled for a hardware interrupt.
2308 *
2309 * @returns true if enabled, false if not or invalid input.
2310 * @param pUVM The user mode VM handle.
2311 * @param iInterrupt The interrupt number.
2312 * @sa DBGFR3InterruptSoftwareIsEnabled, DBGF_IS_HARDWARE_INT_ENABLED,
2313 * DBGF_IS_SOFTWARE_INT_ENABLED
2314 */
2315VMMR3DECL(int) DBGFR3InterruptHardwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2316{
2317 /*
2318 * Validate input.
2319 */
2320 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2321 PVM pVM = pUVM->pVM;
2322 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2323
2324 /*
2325 * Check it.
2326 */
2327 return ASMBitTest(&pVM->dbgf.s.bmHardIntBreakpoints, iInterrupt);
2328}
2329
2330
2331/**
2332 * Checks whether interception is enabled for a software interrupt.
2333 *
2334 * @returns true if enabled, false if not or invalid input.
2335 * @param pUVM The user mode VM handle.
2336 * @param iInterrupt The interrupt number.
2337 * @sa DBGFR3InterruptHardwareIsEnabled, DBGF_IS_SOFTWARE_INT_ENABLED,
2338 * DBGF_IS_HARDWARE_INT_ENABLED,
2339 */
2340VMMR3DECL(int) DBGFR3InterruptSoftwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2341{
2342 /*
2343 * Validate input.
2344 */
2345 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2346 PVM pVM = pUVM->pVM;
2347 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2348
2349 /*
2350 * Check it.
2351 */
2352 return ASMBitTest(&pVM->dbgf.s.bmSoftIntBreakpoints, iInterrupt);
2353}
2354
2355
2356
2357/**
2358 * Call this to single step programmatically.
2359 *
2360 * You must pass down the return code to the EM loop! That's
2361 * where the actual single stepping take place (at least in the
2362 * current implementation).
2363 *
2364 * @returns VINF_EM_DBG_STEP
2365 *
2366 * @param pVCpu The cross context virtual CPU structure.
2367 *
2368 * @thread VCpu EMT
2369 * @internal
2370 */
2371VMMR3_INT_DECL(int) DBGFR3PrgStep(PVMCPU pVCpu)
2372{
2373 VMCPU_ASSERT_EMT(pVCpu);
2374
2375 pVCpu->dbgf.s.fSingleSteppingRaw = true;
2376 return VINF_EM_DBG_STEP;
2377}
2378
2379
2380/**
2381 * Inject an NMI into a running VM (only VCPU 0!)
2382 *
2383 * @returns VBox status code.
2384 * @param pUVM The user mode VM structure.
2385 * @param idCpu The ID of the CPU to inject the NMI on.
2386 */
2387VMMR3DECL(int) DBGFR3InjectNMI(PUVM pUVM, VMCPUID idCpu)
2388{
2389 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2390 PVM pVM = pUVM->pVM;
2391 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2392 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
2393
2394 /** @todo Implement generic NMI injection. */
2395 /** @todo NEM: NMI injection */
2396 if (!HMIsEnabled(pVM))
2397 return VERR_NOT_SUP_BY_NEM;
2398
2399 VMCPU_FF_SET(pVM->apCpusR3[idCpu], VMCPU_FF_INTERRUPT_NMI);
2400 return VINF_SUCCESS;
2401}
2402
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette