VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/DBGFAll.cpp@ 87436

Last change on this file since 87436 was 86667, checked in by vboxsync, 4 years ago

include/VBox,VMM,DBGF: Some boilerplate for the new breakpoint manager which is disabled by default (can be built with VBOX_WITH_LOTS_OF_DBGF_BPS), bugref:9837 [scm fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 16.6 KB
Line 
1/* $Id: DBGFAll.cpp 86667 2020-10-21 15:19:41Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, All Context Code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DBGF
23#include <VBox/vmm/dbgf.h>
24#include "DBGFInternal.h"
25#include <VBox/vmm/vmcc.h>
26#include <VBox/err.h>
27#include <iprt/assert.h>
28#include <iprt/asm.h>
29#include <iprt/stdarg.h>
30
31
32/*
33 * Check the read-only VM members.
34 */
35AssertCompileMembersSameSizeAndOffset(VM, dbgf.s.bmSoftIntBreakpoints, VM, dbgf.ro.bmSoftIntBreakpoints);
36AssertCompileMembersSameSizeAndOffset(VM, dbgf.s.bmHardIntBreakpoints, VM, dbgf.ro.bmHardIntBreakpoints);
37AssertCompileMembersSameSizeAndOffset(VM, dbgf.s.bmSelectedEvents, VM, dbgf.ro.bmSelectedEvents);
38AssertCompileMembersSameSizeAndOffset(VM, dbgf.s.cHardIntBreakpoints, VM, dbgf.ro.cHardIntBreakpoints);
39AssertCompileMembersSameSizeAndOffset(VM, dbgf.s.cSoftIntBreakpoints, VM, dbgf.ro.cSoftIntBreakpoints);
40AssertCompileMembersSameSizeAndOffset(VM, dbgf.s.cSelectedEvents, VM, dbgf.ro.cSelectedEvents);
41
42
43/**
44 * Gets the hardware breakpoint configuration as DR7.
45 *
46 * @returns DR7 from the DBGF point of view.
47 * @param pVM The cross context VM structure.
48 */
49VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR7(PVM pVM)
50{
51 RTGCUINTREG uDr7 = X86_DR7_GD | X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
52#ifndef VBOX_WITH_LOTS_OF_DBGF_BPS
53 PDBGFBP pBp = &pVM->dbgf.s.aHwBreakpoints[0];
54 unsigned cLeft = RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints);
55 while (cLeft-- > 0)
56 {
57 if ( pBp->enmType == DBGFBPTYPE_REG
58 && pBp->fEnabled)
59 {
60 static const uint8_t s_au8Sizes[8] =
61 {
62 X86_DR7_LEN_BYTE, X86_DR7_LEN_BYTE, X86_DR7_LEN_WORD, X86_DR7_LEN_BYTE,
63 X86_DR7_LEN_DWORD,X86_DR7_LEN_BYTE, X86_DR7_LEN_BYTE, X86_DR7_LEN_QWORD
64 };
65 uDr7 |= X86_DR7_G(pBp->u.Reg.iReg)
66 | X86_DR7_RW(pBp->u.Reg.iReg, pBp->u.Reg.fType)
67 | X86_DR7_LEN(pBp->u.Reg.iReg, s_au8Sizes[pBp->u.Reg.cb]);
68 }
69 pBp++;
70 }
71#else
72 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
73 {
74 PCDBGFBPHW pBp = &pVM->dbgf.s.aHwBreakpoints[i];
75
76 if ( pBp->hBp != NIL_DBGFBP
77 && pBp->fEnabled)
78 {
79 static const uint8_t s_au8Sizes[8] =
80 {
81 X86_DR7_LEN_BYTE, X86_DR7_LEN_BYTE, X86_DR7_LEN_WORD, X86_DR7_LEN_BYTE,
82 X86_DR7_LEN_DWORD,X86_DR7_LEN_BYTE, X86_DR7_LEN_BYTE, X86_DR7_LEN_QWORD
83 };
84 uDr7 |= X86_DR7_G(i)
85 | X86_DR7_RW(i, pBp->fType)
86 | X86_DR7_LEN(i, s_au8Sizes[pBp->cb]);
87 }
88 pBp++;
89 }
90#endif
91 return uDr7;
92}
93
94
95/**
96 * Gets the address of the hardware breakpoint number 0.
97 *
98 * @returns DR0 from the DBGF point of view.
99 * @param pVM The cross context VM structure.
100 */
101VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR0(PVM pVM)
102{
103#ifndef VBOX_WITH_LOTS_OF_DBGF_BPS
104 PCDBGFBP pBp = &pVM->dbgf.s.aHwBreakpoints[0];
105 Assert(pBp->u.Reg.iReg == 0);
106 return pBp->u.Reg.GCPtr;
107#else
108 PCDBGFBPHW pBp = &pVM->dbgf.s.aHwBreakpoints[0];
109 return pBp->GCPtr;
110#endif
111}
112
113
114/**
115 * Gets the address of the hardware breakpoint number 1.
116 *
117 * @returns DR1 from the DBGF point of view.
118 * @param pVM The cross context VM structure.
119 */
120VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR1(PVM pVM)
121{
122#ifndef VBOX_WITH_LOTS_OF_DBGF_BPS
123 PCDBGFBP pBp = &pVM->dbgf.s.aHwBreakpoints[1];
124 Assert(pBp->u.Reg.iReg == 1);
125 return pBp->u.Reg.GCPtr;
126#else
127 PCDBGFBPHW pBp = &pVM->dbgf.s.aHwBreakpoints[1];
128 return pBp->GCPtr;
129#endif
130}
131
132
133/**
134 * Gets the address of the hardware breakpoint number 2.
135 *
136 * @returns DR2 from the DBGF point of view.
137 * @param pVM The cross context VM structure.
138 */
139VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR2(PVM pVM)
140{
141#ifndef VBOX_WITH_LOTS_OF_DBGF_BPS
142 PCDBGFBP pBp = &pVM->dbgf.s.aHwBreakpoints[2];
143 Assert(pBp->u.Reg.iReg == 2);
144 return pBp->u.Reg.GCPtr;
145#else
146 PCDBGFBPHW pBp = &pVM->dbgf.s.aHwBreakpoints[2];
147 return pBp->GCPtr;
148#endif
149}
150
151
152/**
153 * Gets the address of the hardware breakpoint number 3.
154 *
155 * @returns DR3 from the DBGF point of view.
156 * @param pVM The cross context VM structure.
157 */
158VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR3(PVM pVM)
159{
160#ifndef VBOX_WITH_LOTS_OF_DBGF_BPS
161 PCDBGFBP pBp = &pVM->dbgf.s.aHwBreakpoints[3];
162 Assert(pBp->u.Reg.iReg == 3);
163 return pBp->u.Reg.GCPtr;
164#else
165 PCDBGFBPHW pBp = &pVM->dbgf.s.aHwBreakpoints[3];
166 return pBp->GCPtr;
167#endif
168}
169
170
171/**
172 * Checks if any of the hardware breakpoints are armed.
173 *
174 * @returns true if armed, false if not.
175 * @param pVM The cross context VM structure.
176 * @remarks Don't call this from CPUMRecalcHyperDRx!
177 */
178VMM_INT_DECL(bool) DBGFBpIsHwArmed(PVM pVM)
179{
180 return pVM->dbgf.s.cEnabledHwBreakpoints > 0;
181}
182
183
184/**
185 * Checks if any of the hardware I/O breakpoints are armed.
186 *
187 * @returns true if armed, false if not.
188 * @param pVM The cross context VM structure.
189 * @remarks Don't call this from CPUMRecalcHyperDRx!
190 */
191VMM_INT_DECL(bool) DBGFBpIsHwIoArmed(PVM pVM)
192{
193 return pVM->dbgf.s.cEnabledHwIoBreakpoints > 0;
194}
195
196
197/**
198 * Checks if any INT3 breakpoints are armed.
199 *
200 * @returns true if armed, false if not.
201 * @param pVM The cross context VM structure.
202 * @remarks Don't call this from CPUMRecalcHyperDRx!
203 */
204VMM_INT_DECL(bool) DBGFBpIsInt3Armed(PVM pVM)
205{
206#ifndef VBOX_WITH_LOTS_OF_DBGF_BPS
207 return pVM->dbgf.s.cEnabledInt3Breakpoints > 0;
208#else
209 RT_NOREF(pVM);
210 return false; /** @todo */
211#endif
212}
213
214
215/**
216 * Checks I/O access for guest or hypervisor breakpoints.
217 *
218 * @returns Strict VBox status code
219 * @retval VINF_SUCCESS no breakpoint.
220 * @retval VINF_EM_DBG_BREAKPOINT hypervisor breakpoint triggered.
221 * @retval VINF_EM_RAW_GUEST_TRAP guest breakpoint triggered, DR6 and DR7 have
222 * been updated appropriately.
223 *
224 * @param pVM The cross context VM structure.
225 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
226 * @param pCtx The CPU context for the calling EMT.
227 * @param uIoPort The I/O port being accessed.
228 * @param cbValue The size/width of the access, in bytes.
229 */
230VMM_INT_DECL(VBOXSTRICTRC) DBGFBpCheckIo(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTIOPORT uIoPort, uint8_t cbValue)
231{
232 uint32_t const uIoPortFirst = uIoPort;
233 uint32_t const uIoPortLast = uIoPortFirst + cbValue - 1;
234
235 /*
236 * Check hyper breakpoints first as the VMM debugger has priority over
237 * the guest.
238 */
239 if (pVM->dbgf.s.cEnabledHwIoBreakpoints > 0)
240 {
241 for (unsigned iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++)
242 {
243#ifndef VBOX_WITH_LOTS_OF_DBGF_BPS
244 if ( pVM->dbgf.s.aHwBreakpoints[iBp].u.Reg.fType == X86_DR7_RW_IO
245 && pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled
246 && pVM->dbgf.s.aHwBreakpoints[iBp].enmType == DBGFBPTYPE_REG )
247 {
248 uint8_t cbReg = pVM->dbgf.s.aHwBreakpoints[iBp].u.Reg.cb; Assert(RT_IS_POWER_OF_TWO(cbReg));
249 uint64_t uDrXFirst = pVM->dbgf.s.aHwBreakpoints[iBp].u.Reg.GCPtr & ~(uint64_t)(cbReg - 1);
250 uint64_t uDrXLast = uDrXFirst + cbReg - 1;
251 if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst)
252 {
253 /* (See also DBGFRZTrap01Handler.) */
254 pVCpu->dbgf.s.iActiveBp = pVM->dbgf.s.aHwBreakpoints[iBp].iBp;
255 pVCpu->dbgf.s.fSingleSteppingRaw = false;
256
257 LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n",
258 pVM->dbgf.s.aHwBreakpoints[iBp].iBp, pCtx->cs.Sel, pCtx->rip, uIoPort));
259 return VINF_EM_DBG_BREAKPOINT;
260 }
261 }
262#else
263 PCDBGFBPHW pBp = &pVM->dbgf.s.aHwBreakpoints[iBp];
264
265 if ( pBp->fType == X86_DR7_RW_IO
266 && pBp->hBp == NIL_DBGFBP
267 && pBp->fEnabled)
268 {
269 uint8_t cbReg = pBp->cb; Assert(RT_IS_POWER_OF_TWO(cbReg));
270 uint64_t uDrXFirst = pBp->GCPtr & ~(uint64_t)(cbReg - 1);
271 uint64_t uDrXLast = uDrXFirst + cbReg - 1;
272 if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst)
273 {
274 /* (See also DBGFRZTrap01Handler.) */
275 pVCpu->dbgf.s.hBpActive = pBp->hBp;
276 pVCpu->dbgf.s.fSingleSteppingRaw = false;
277
278 LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n",
279 iBp, pCtx->cs.Sel, pCtx->rip, uIoPort));
280 return VINF_EM_DBG_BREAKPOINT;
281 }
282 }
283#endif
284 }
285 }
286
287 /*
288 * Check the guest.
289 */
290 uint32_t const uDr7 = pCtx->dr[7];
291 if ( (uDr7 & X86_DR7_ENABLED_MASK)
292 && X86_DR7_ANY_RW_IO(uDr7)
293 && (pCtx->cr4 & X86_CR4_DE) )
294 {
295 for (unsigned iBp = 0; iBp < 4; iBp++)
296 {
297 if ( (uDr7 & X86_DR7_L_G(iBp))
298 && X86_DR7_GET_RW(uDr7, iBp) == X86_DR7_RW_IO)
299 {
300 /* ASSUME the breakpoint and the I/O width qualifier uses the same encoding (1 2 x 4). */
301 static uint8_t const s_abInvAlign[4] = { 0, 1, 7, 3 };
302 uint8_t cbInvAlign = s_abInvAlign[X86_DR7_GET_LEN(uDr7, iBp)];
303 uint64_t uDrXFirst = pCtx->dr[iBp] & ~(uint64_t)cbInvAlign;
304 uint64_t uDrXLast = uDrXFirst + cbInvAlign;
305
306 if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst)
307 {
308 /*
309 * Update DR6 and DR7.
310 *
311 * See "AMD64 Architecture Programmer's Manual Volume 2",
312 * chapter 13.1.1.3 for details on DR6 bits. The basics is
313 * that the B0..B3 bits are always cleared while the others
314 * must be cleared by software.
315 *
316 * The following sub chapters says the GD bit is always
317 * cleared when generating a #DB so the handler can safely
318 * access the debug registers.
319 */
320 pCtx->dr[6] &= ~X86_DR6_B_MASK;
321 pCtx->dr[6] |= X86_DR6_B(iBp);
322 pCtx->dr[7] &= ~X86_DR7_GD;
323 LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n",
324 iBp, pCtx->cs.Sel, pCtx->rip, uIoPort));
325 return VINF_EM_RAW_GUEST_TRAP;
326 }
327 }
328 }
329 }
330 return VINF_SUCCESS;
331}
332
333
334/**
335 * Returns the single stepping state for a virtual CPU.
336 *
337 * @returns stepping (true) or not (false).
338 *
339 * @param pVCpu The cross context virtual CPU structure.
340 */
341VMM_INT_DECL(bool) DBGFIsStepping(PVMCPU pVCpu)
342{
343 return pVCpu->dbgf.s.fSingleSteppingRaw;
344}
345
346
347/**
348 * Checks if the specified generic event is enabled or not.
349 *
350 * @returns true / false.
351 * @param pVM The cross context VM structure.
352 * @param enmEvent The generic event being raised.
353 * @param uEventArg The argument of that event.
354 */
355DECLINLINE(bool) dbgfEventIsGenericWithArgEnabled(PVM pVM, DBGFEVENTTYPE enmEvent, uint64_t uEventArg)
356{
357 if (DBGF_IS_EVENT_ENABLED(pVM, enmEvent))
358 {
359 switch (enmEvent)
360 {
361 case DBGFEVENT_INTERRUPT_HARDWARE:
362 AssertReturn(uEventArg < 256, false);
363 return ASMBitTest(pVM->dbgf.s.bmHardIntBreakpoints, (uint32_t)uEventArg);
364
365 case DBGFEVENT_INTERRUPT_SOFTWARE:
366 AssertReturn(uEventArg < 256, false);
367 return ASMBitTest(pVM->dbgf.s.bmSoftIntBreakpoints, (uint32_t)uEventArg);
368
369 default:
370 return true;
371
372 }
373 }
374 return false;
375}
376
377
378/**
379 * Raises a generic debug event if enabled and not being ignored.
380 *
381 * @returns Strict VBox status code.
382 * @retval VINF_EM_DBG_EVENT if the event was raised and the caller should
383 * return ASAP to the debugger (via EM). We set VMCPU_FF_DBGF so, it
384 * is okay not to pass this along in some situations.
385 * @retval VINF_SUCCESS if the event was disabled or ignored.
386 *
387 * @param pVM The cross context VM structure.
388 * @param pVCpu The cross context virtual CPU structure.
389 * @param enmEvent The generic event being raised.
390 * @param enmCtx The context in which this event is being raised.
391 * @param cArgs Number of arguments (0 - 6).
392 * @param ... Event arguments.
393 *
394 * @thread EMT(pVCpu)
395 */
396VMM_INT_DECL(VBOXSTRICTRC) DBGFEventGenericWithArgs(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent, DBGFEVENTCTX enmCtx,
397 unsigned cArgs, ...)
398{
399 Assert(cArgs < RT_ELEMENTS(pVCpu->dbgf.s.aEvents[0].Event.u.Generic.auArgs));
400
401 /*
402 * Is it enabled.
403 */
404 va_list va;
405 va_start(va, cArgs);
406 uint64_t uEventArg0 = cArgs ? va_arg(va, uint64_t) : 0;
407 if (dbgfEventIsGenericWithArgEnabled(pVM, enmEvent, uEventArg0))
408 {
409 /*
410 * Any events on the stack. Should the incoming event be ignored?
411 */
412 uint64_t const rip = CPUMGetGuestRIP(pVCpu);
413 uint32_t i = pVCpu->dbgf.s.cEvents;
414 if (i > 0)
415 {
416 while (i-- > 0)
417 {
418 if ( pVCpu->dbgf.s.aEvents[i].Event.enmType == enmEvent
419 && pVCpu->dbgf.s.aEvents[i].enmState == DBGFEVENTSTATE_IGNORE
420 && pVCpu->dbgf.s.aEvents[i].rip == rip)
421 {
422 pVCpu->dbgf.s.aEvents[i].enmState = DBGFEVENTSTATE_RESTORABLE;
423 va_end(va);
424 return VINF_SUCCESS;
425 }
426 Assert(pVCpu->dbgf.s.aEvents[i].enmState != DBGFEVENTSTATE_CURRENT);
427 }
428
429 /*
430 * Trim the event stack.
431 */
432 i = pVCpu->dbgf.s.cEvents;
433 while (i-- > 0)
434 {
435 if ( pVCpu->dbgf.s.aEvents[i].rip == rip
436 && ( pVCpu->dbgf.s.aEvents[i].enmState == DBGFEVENTSTATE_RESTORABLE
437 || pVCpu->dbgf.s.aEvents[i].enmState == DBGFEVENTSTATE_IGNORE) )
438 pVCpu->dbgf.s.aEvents[i].enmState = DBGFEVENTSTATE_IGNORE;
439 else
440 {
441 if (i + 1 != pVCpu->dbgf.s.cEvents)
442 memmove(&pVCpu->dbgf.s.aEvents[i], &pVCpu->dbgf.s.aEvents[i + 1],
443 (pVCpu->dbgf.s.cEvents - i) * sizeof(pVCpu->dbgf.s.aEvents));
444 pVCpu->dbgf.s.cEvents--;
445 }
446 }
447
448 i = pVCpu->dbgf.s.cEvents;
449 AssertStmt(i < RT_ELEMENTS(pVCpu->dbgf.s.aEvents), i = RT_ELEMENTS(pVCpu->dbgf.s.aEvents) - 1);
450 }
451
452 /*
453 * Push the event.
454 */
455 pVCpu->dbgf.s.aEvents[i].enmState = DBGFEVENTSTATE_CURRENT;
456 pVCpu->dbgf.s.aEvents[i].rip = rip;
457 pVCpu->dbgf.s.aEvents[i].Event.enmType = enmEvent;
458 pVCpu->dbgf.s.aEvents[i].Event.enmCtx = enmCtx;
459 pVCpu->dbgf.s.aEvents[i].Event.u.Generic.cArgs = cArgs;
460 pVCpu->dbgf.s.aEvents[i].Event.u.Generic.auArgs[0] = uEventArg0;
461 if (cArgs > 1)
462 {
463 AssertStmt(cArgs < RT_ELEMENTS(pVCpu->dbgf.s.aEvents[i].Event.u.Generic.auArgs),
464 cArgs = RT_ELEMENTS(pVCpu->dbgf.s.aEvents[i].Event.u.Generic.auArgs));
465 for (unsigned iArg = 1; iArg < cArgs; iArg++)
466 pVCpu->dbgf.s.aEvents[i].Event.u.Generic.auArgs[iArg] = va_arg(va, uint64_t);
467 }
468 pVCpu->dbgf.s.cEvents = i + 1;
469
470 VMCPU_FF_SET(pVCpu, VMCPU_FF_DBGF);
471 va_end(va);
472 return VINF_EM_DBG_EVENT;
473 }
474
475 va_end(va);
476 return VINF_SUCCESS;
477}
478
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette