VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/DBGFAll.cpp@ 97441

Last change on this file since 97441 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 14.5 KB
Line 
1/* $Id: DBGFAll.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, All Context Code.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DBGF
33#include <VBox/vmm/dbgf.h>
34#include "DBGFInternal.h"
35#include <VBox/vmm/vmcc.h>
36#include <VBox/err.h>
37#include <iprt/assert.h>
38#include <iprt/asm.h>
39#include <iprt/stdarg.h>
40
41
42/*
43 * Check the read-only VM members.
44 */
45AssertCompileMembersSameSizeAndOffset(VM, dbgf.s.bmSoftIntBreakpoints, VM, dbgf.ro.bmSoftIntBreakpoints);
46AssertCompileMembersSameSizeAndOffset(VM, dbgf.s.bmHardIntBreakpoints, VM, dbgf.ro.bmHardIntBreakpoints);
47AssertCompileMembersSameSizeAndOffset(VM, dbgf.s.bmSelectedEvents, VM, dbgf.ro.bmSelectedEvents);
48AssertCompileMembersSameSizeAndOffset(VM, dbgf.s.cHardIntBreakpoints, VM, dbgf.ro.cHardIntBreakpoints);
49AssertCompileMembersSameSizeAndOffset(VM, dbgf.s.cSoftIntBreakpoints, VM, dbgf.ro.cSoftIntBreakpoints);
50AssertCompileMembersSameSizeAndOffset(VM, dbgf.s.cSelectedEvents, VM, dbgf.ro.cSelectedEvents);
51
52
53/**
54 * Gets the hardware breakpoint configuration as DR7.
55 *
56 * @returns DR7 from the DBGF point of view.
57 * @param pVM The cross context VM structure.
58 */
59VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR7(PVM pVM)
60{
61 RTGCUINTREG uDr7 = X86_DR7_GD | X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
62 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
63 {
64 if ( pVM->dbgf.s.aHwBreakpoints[i].fEnabled
65 && pVM->dbgf.s.aHwBreakpoints[i].hBp != NIL_DBGFBP)
66 {
67 static const uint8_t s_au8Sizes[8] =
68 {
69 X86_DR7_LEN_BYTE, X86_DR7_LEN_BYTE, X86_DR7_LEN_WORD, X86_DR7_LEN_BYTE,
70 X86_DR7_LEN_DWORD,X86_DR7_LEN_BYTE, X86_DR7_LEN_BYTE, X86_DR7_LEN_QWORD
71 };
72 uDr7 |= X86_DR7_G(i)
73 | X86_DR7_RW(i, pVM->dbgf.s.aHwBreakpoints[i].fType)
74 | X86_DR7_LEN(i, s_au8Sizes[pVM->dbgf.s.aHwBreakpoints[i].cb]);
75 }
76 }
77 return uDr7;
78}
79
80
81/**
82 * Gets the address of the hardware breakpoint number 0.
83 *
84 * @returns DR0 from the DBGF point of view.
85 * @param pVM The cross context VM structure.
86 */
87VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR0(PVM pVM)
88{
89 return pVM->dbgf.s.aHwBreakpoints[0].GCPtr;
90}
91
92
93/**
94 * Gets the address of the hardware breakpoint number 1.
95 *
96 * @returns DR1 from the DBGF point of view.
97 * @param pVM The cross context VM structure.
98 */
99VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR1(PVM pVM)
100{
101 return pVM->dbgf.s.aHwBreakpoints[1].GCPtr;
102}
103
104
105/**
106 * Gets the address of the hardware breakpoint number 2.
107 *
108 * @returns DR2 from the DBGF point of view.
109 * @param pVM The cross context VM structure.
110 */
111VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR2(PVM pVM)
112{
113 return pVM->dbgf.s.aHwBreakpoints[2].GCPtr;
114}
115
116
117/**
118 * Gets the address of the hardware breakpoint number 3.
119 *
120 * @returns DR3 from the DBGF point of view.
121 * @param pVM The cross context VM structure.
122 */
123VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR3(PVM pVM)
124{
125 return pVM->dbgf.s.aHwBreakpoints[3].GCPtr;
126}
127
128
129/**
130 * Checks if any of the hardware breakpoints are armed.
131 *
132 * @returns true if armed, false if not.
133 * @param pVM The cross context VM structure.
134 * @remarks Don't call this from CPUMRecalcHyperDRx!
135 */
136VMM_INT_DECL(bool) DBGFBpIsHwArmed(PVM pVM)
137{
138 return pVM->dbgf.s.cEnabledHwBreakpoints > 0;
139}
140
141
142/**
143 * Checks if any of the hardware I/O breakpoints are armed.
144 *
145 * @returns true if armed, false if not.
146 * @param pVM The cross context VM structure.
147 * @remarks Don't call this from CPUMRecalcHyperDRx!
148 */
149VMM_INT_DECL(bool) DBGFBpIsHwIoArmed(PVM pVM)
150{
151 return pVM->dbgf.s.cEnabledHwIoBreakpoints > 0;
152}
153
154
155/**
156 * Checks if any INT3 breakpoints are armed.
157 *
158 * @returns true if armed, false if not.
159 * @param pVM The cross context VM structure.
160 * @remarks Don't call this from CPUMRecalcHyperDRx!
161 */
162VMM_INT_DECL(bool) DBGFBpIsInt3Armed(PVM pVM)
163{
164 /** @todo There was a todo here and returning false when I (bird) removed
165 * VBOX_WITH_LOTS_OF_DBGF_BPS, so this might not be correct. */
166 return pVM->dbgf.s.cEnabledInt3Breakpoints > 0;
167}
168
169
170/**
171 * Checks I/O access for guest or hypervisor breakpoints.
172 *
173 * @returns Strict VBox status code
174 * @retval VINF_SUCCESS no breakpoint.
175 * @retval VINF_EM_DBG_BREAKPOINT hypervisor breakpoint triggered.
176 * @retval VINF_EM_RAW_GUEST_TRAP guest breakpoint triggered, DR6 and DR7 have
177 * been updated appropriately.
178 *
179 * @param pVM The cross context VM structure.
180 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
181 * @param pCtx The CPU context for the calling EMT.
182 * @param uIoPort The I/O port being accessed.
183 * @param cbValue The size/width of the access, in bytes.
184 */
185VMM_INT_DECL(VBOXSTRICTRC) DBGFBpCheckIo(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTIOPORT uIoPort, uint8_t cbValue)
186{
187 uint32_t const uIoPortFirst = uIoPort;
188 uint32_t const uIoPortLast = uIoPortFirst + cbValue - 1;
189
190 /*
191 * Check hyper breakpoints first as the VMM debugger has priority over
192 * the guest.
193 */
194 if (pVM->dbgf.s.cEnabledHwIoBreakpoints > 0)
195 {
196 for (unsigned iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++)
197 {
198 if ( pVM->dbgf.s.aHwBreakpoints[iBp].fType == X86_DR7_RW_IO
199 && pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled
200 && pVM->dbgf.s.aHwBreakpoints[iBp].hBp != NIL_DBGFBP)
201 {
202 uint8_t cbReg = pVM->dbgf.s.aHwBreakpoints[iBp].cb; Assert(RT_IS_POWER_OF_TWO(cbReg));
203 uint64_t uDrXFirst = pVM->dbgf.s.aHwBreakpoints[iBp].GCPtr & ~(uint64_t)(cbReg - 1);
204 uint64_t uDrXLast = uDrXFirst + cbReg - 1;
205 if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst)
206 {
207 /* (See also DBGFRZTrap01Handler.) */
208 pVCpu->dbgf.s.hBpActive = pVM->dbgf.s.aHwBreakpoints[iBp].hBp;
209 pVCpu->dbgf.s.fSingleSteppingRaw = false;
210
211 LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n",
212 iBp, pCtx->cs.Sel, pCtx->rip, uIoPort));
213 return VINF_EM_DBG_BREAKPOINT;
214 }
215 }
216 }
217 }
218
219 /*
220 * Check the guest.
221 */
222 uint32_t const uDr7 = pCtx->dr[7];
223 if ( (uDr7 & X86_DR7_ENABLED_MASK)
224 && X86_DR7_ANY_RW_IO(uDr7)
225 && (pCtx->cr4 & X86_CR4_DE) )
226 {
227 for (unsigned iBp = 0; iBp < 4; iBp++)
228 {
229 if ( (uDr7 & X86_DR7_L_G(iBp))
230 && X86_DR7_GET_RW(uDr7, iBp) == X86_DR7_RW_IO)
231 {
232 /* ASSUME the breakpoint and the I/O width qualifier uses the same encoding (1 2 x 4). */
233 static uint8_t const s_abInvAlign[4] = { 0, 1, 7, 3 };
234 uint8_t cbInvAlign = s_abInvAlign[X86_DR7_GET_LEN(uDr7, iBp)];
235 uint64_t uDrXFirst = pCtx->dr[iBp] & ~(uint64_t)cbInvAlign;
236 uint64_t uDrXLast = uDrXFirst + cbInvAlign;
237
238 if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst)
239 {
240 /*
241 * Update DR6 and DR7.
242 *
243 * See "AMD64 Architecture Programmer's Manual Volume 2",
244 * chapter 13.1.1.3 for details on DR6 bits. The basics is
245 * that the B0..B3 bits are always cleared while the others
246 * must be cleared by software.
247 *
248 * The following sub chapters says the GD bit is always
249 * cleared when generating a #DB so the handler can safely
250 * access the debug registers.
251 */
252 pCtx->dr[6] &= ~X86_DR6_B_MASK;
253 pCtx->dr[6] |= X86_DR6_B(iBp);
254 pCtx->dr[7] &= ~X86_DR7_GD;
255 LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n",
256 iBp, pCtx->cs.Sel, pCtx->rip, uIoPort));
257 return VINF_EM_RAW_GUEST_TRAP;
258 }
259 }
260 }
261 }
262 return VINF_SUCCESS;
263}
264
265
266/**
267 * Returns the single stepping state for a virtual CPU.
268 *
269 * @returns stepping (true) or not (false).
270 *
271 * @param pVCpu The cross context virtual CPU structure.
272 */
273VMM_INT_DECL(bool) DBGFIsStepping(PVMCPU pVCpu)
274{
275 return pVCpu->dbgf.s.fSingleSteppingRaw;
276}
277
278
279/**
280 * Checks if the specified generic event is enabled or not.
281 *
282 * @returns true / false.
283 * @param pVM The cross context VM structure.
284 * @param enmEvent The generic event being raised.
285 * @param uEventArg The argument of that event.
286 */
287DECLINLINE(bool) dbgfEventIsGenericWithArgEnabled(PVM pVM, DBGFEVENTTYPE enmEvent, uint64_t uEventArg)
288{
289 if (DBGF_IS_EVENT_ENABLED(pVM, enmEvent))
290 {
291 switch (enmEvent)
292 {
293 case DBGFEVENT_INTERRUPT_HARDWARE:
294 AssertReturn(uEventArg < 256, false);
295 return ASMBitTest(pVM->dbgf.s.bmHardIntBreakpoints, (uint32_t)uEventArg);
296
297 case DBGFEVENT_INTERRUPT_SOFTWARE:
298 AssertReturn(uEventArg < 256, false);
299 return ASMBitTest(pVM->dbgf.s.bmSoftIntBreakpoints, (uint32_t)uEventArg);
300
301 default:
302 return true;
303
304 }
305 }
306 return false;
307}
308
309
310/**
311 * Raises a generic debug event if enabled and not being ignored.
312 *
313 * @returns Strict VBox status code.
314 * @retval VINF_EM_DBG_EVENT if the event was raised and the caller should
315 * return ASAP to the debugger (via EM). We set VMCPU_FF_DBGF so, it
316 * is okay not to pass this along in some situations.
317 * @retval VINF_SUCCESS if the event was disabled or ignored.
318 *
319 * @param pVM The cross context VM structure.
320 * @param pVCpu The cross context virtual CPU structure.
321 * @param enmEvent The generic event being raised.
322 * @param enmCtx The context in which this event is being raised.
323 * @param cArgs Number of arguments (0 - 6).
324 * @param ... Event arguments.
325 *
326 * @thread EMT(pVCpu)
327 */
328VMM_INT_DECL(VBOXSTRICTRC) DBGFEventGenericWithArgs(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent, DBGFEVENTCTX enmCtx,
329 unsigned cArgs, ...)
330{
331 Assert(cArgs < RT_ELEMENTS(pVCpu->dbgf.s.aEvents[0].Event.u.Generic.auArgs));
332
333 /*
334 * Is it enabled.
335 */
336 va_list va;
337 va_start(va, cArgs);
338 uint64_t uEventArg0 = cArgs ? va_arg(va, uint64_t) : 0;
339 if (dbgfEventIsGenericWithArgEnabled(pVM, enmEvent, uEventArg0))
340 {
341 /*
342 * Any events on the stack. Should the incoming event be ignored?
343 */
344 uint64_t const rip = CPUMGetGuestRIP(pVCpu);
345 uint32_t i = pVCpu->dbgf.s.cEvents;
346 if (i > 0)
347 {
348 while (i-- > 0)
349 {
350 if ( pVCpu->dbgf.s.aEvents[i].Event.enmType == enmEvent
351 && pVCpu->dbgf.s.aEvents[i].enmState == DBGFEVENTSTATE_IGNORE
352 && pVCpu->dbgf.s.aEvents[i].rip == rip)
353 {
354 pVCpu->dbgf.s.aEvents[i].enmState = DBGFEVENTSTATE_RESTORABLE;
355 va_end(va);
356 return VINF_SUCCESS;
357 }
358 Assert(pVCpu->dbgf.s.aEvents[i].enmState != DBGFEVENTSTATE_CURRENT);
359 }
360
361 /*
362 * Trim the event stack.
363 */
364 i = pVCpu->dbgf.s.cEvents;
365 while (i-- > 0)
366 {
367 if ( pVCpu->dbgf.s.aEvents[i].rip == rip
368 && ( pVCpu->dbgf.s.aEvents[i].enmState == DBGFEVENTSTATE_RESTORABLE
369 || pVCpu->dbgf.s.aEvents[i].enmState == DBGFEVENTSTATE_IGNORE) )
370 pVCpu->dbgf.s.aEvents[i].enmState = DBGFEVENTSTATE_IGNORE;
371 else
372 {
373 if (i + 1 != pVCpu->dbgf.s.cEvents)
374 memmove(&pVCpu->dbgf.s.aEvents[i], &pVCpu->dbgf.s.aEvents[i + 1],
375 (pVCpu->dbgf.s.cEvents - i) * sizeof(pVCpu->dbgf.s.aEvents));
376 pVCpu->dbgf.s.cEvents--;
377 }
378 }
379
380 i = pVCpu->dbgf.s.cEvents;
381 AssertStmt(i < RT_ELEMENTS(pVCpu->dbgf.s.aEvents), i = RT_ELEMENTS(pVCpu->dbgf.s.aEvents) - 1);
382 }
383
384 /*
385 * Push the event.
386 */
387 pVCpu->dbgf.s.aEvents[i].enmState = DBGFEVENTSTATE_CURRENT;
388 pVCpu->dbgf.s.aEvents[i].rip = rip;
389 pVCpu->dbgf.s.aEvents[i].Event.enmType = enmEvent;
390 pVCpu->dbgf.s.aEvents[i].Event.enmCtx = enmCtx;
391 pVCpu->dbgf.s.aEvents[i].Event.u.Generic.cArgs = cArgs;
392 pVCpu->dbgf.s.aEvents[i].Event.u.Generic.auArgs[0] = uEventArg0;
393 if (cArgs > 1)
394 {
395 AssertStmt(cArgs < RT_ELEMENTS(pVCpu->dbgf.s.aEvents[i].Event.u.Generic.auArgs),
396 cArgs = RT_ELEMENTS(pVCpu->dbgf.s.aEvents[i].Event.u.Generic.auArgs));
397 for (unsigned iArg = 1; iArg < cArgs; iArg++)
398 pVCpu->dbgf.s.aEvents[i].Event.u.Generic.auArgs[iArg] = va_arg(va, uint64_t);
399 }
400 pVCpu->dbgf.s.cEvents = i + 1;
401
402 VMCPU_FF_SET(pVCpu, VMCPU_FF_DBGF);
403 va_end(va);
404 return VINF_EM_DBG_EVENT;
405 }
406
407 va_end(va);
408 return VINF_SUCCESS;
409}
410
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette