VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/DBGFAllBp.cpp@ 106443

Last change on this file since 106443 was 106369, checked in by vboxsync, 5 weeks ago

VMM/DBGF: Prepare DBGF to support ARMv8/A64 style breakpoints for the VMM debugger. This converts the x86 centric int3 naming to software breakpoint, bugref:10393 [x86 build fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.1 KB
Line 
1/* $Id: DBGFAllBp.cpp 106369 2024-10-16 13:19:45Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, All Context breakpoint management part.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DBGF
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/selm.h>
38#include <VBox/log.h>
39#include "DBGFInternal.h"
40#include <VBox/vmm/vmcc.h>
41#include <VBox/err.h>
42#include <iprt/assert.h>
43
44#include "DBGFInline.h"
45
46
47#ifdef IN_RC
48# error "You lucky person have the pleasure to implement the raw mode part for this!"
49#endif
50
51
52/**
53 * Returns the internal breakpoint state for the given handle.
54 *
55 * @returns Pointer to the internal breakpoint state or NULL if the handle is invalid.
56 * @param pVM The ring-0 VM structure pointer.
57 * @param hBp The breakpoint handle to resolve.
58 * @param ppBpR0 Where to store the pointer to the ring-0 only part of the breakpoint
59 * on success, optional.
60 */
61#ifdef IN_RING0
62DECLINLINE(PDBGFBPINT) dbgfBpGetByHnd(PVMCC pVM, DBGFBP hBp, PDBGFBPINTR0 *ppBpR0)
63#else
64DECLINLINE(PDBGFBPINT) dbgfBpGetByHnd(PVMCC pVM, DBGFBP hBp)
65#endif
66{
67 uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp);
68 uint32_t idxEntry = DBGF_BP_HND_GET_ENTRY(hBp);
69
70 AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, NULL);
71 AssertReturn(idxEntry < DBGF_BP_COUNT_PER_CHUNK, NULL);
72
73#ifdef IN_RING0
74 PDBGFBPCHUNKR0 pBpChunk = &pVM->dbgfr0.s.aBpChunks[idChunk];
75 AssertPtrReturn(pBpChunk->CTX_SUFF(paBpBaseShared), NULL);
76
77 if (ppBpR0)
78 *ppBpR0 = &pBpChunk->paBpBaseR0Only[idxEntry];
79 return &pBpChunk->CTX_SUFF(paBpBaseShared)[idxEntry];
80
81#elif defined(IN_RING3)
82 PUVM pUVM = pVM->pUVM;
83 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
84 AssertPtrReturn(pBpChunk->CTX_SUFF(pBpBase), NULL);
85
86 return &pBpChunk->CTX_SUFF(pBpBase)[idxEntry];
87
88#else
89# error "Unsupported context"
90#endif
91}
92
93
94/**
95 * Returns the pointer to the L2 table entry from the given index.
96 *
97 * @returns Current context pointer to the L2 table entry or NULL if the provided index value is invalid.
98 * @param pVM The cross context VM structure.
99 * @param idxL2 The L2 table index to resolve.
100 *
101 * @note The content of the resolved L2 table entry is not validated!.
102 */
103DECLINLINE(PCDBGFBPL2ENTRY) dbgfBpL2GetByIdx(PVMCC pVM, uint32_t idxL2)
104{
105 uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2);
106 uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2);
107
108 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, NULL);
109 AssertReturn(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK, NULL);
110
111#ifdef IN_RING0
112 PDBGFBPL2TBLCHUNKR0 pL2Chunk = &pVM->dbgfr0.s.aBpL2TblChunks[idChunk];
113 AssertPtrReturn(pL2Chunk->CTX_SUFF(paBpL2TblBaseShared), NULL);
114
115 return &pL2Chunk->CTX_SUFF(paBpL2TblBaseShared)[idxEntry];
116#elif defined(IN_RING3)
117 PUVM pUVM = pVM->pUVM;
118 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
119 AssertPtrReturn(pL2Chunk->pbmAlloc, NULL);
120 AssertReturn(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry), NULL);
121
122 return &pL2Chunk->CTX_SUFF(pL2Base)[idxEntry];
123#endif
124}
125
126
127#ifdef IN_RING0
128/**
129 * Returns the internal breakpoint owner state for the given handle.
130 *
131 * @returns Pointer to the internal ring-0 breakpoint owner state or NULL if the handle is invalid.
132 * @param pVM The cross context VM structure.
133 * @param hBpOwner The breakpoint owner handle to resolve.
134 */
135DECLINLINE(PCDBGFBPOWNERINTR0) dbgfR0BpOwnerGetByHnd(PVMCC pVM, DBGFBPOWNER hBpOwner)
136{
137 if (hBpOwner == NIL_DBGFBPOWNER)
138 return NULL;
139
140 AssertReturn(hBpOwner < DBGF_BP_OWNER_COUNT_MAX, NULL);
141
142 PCDBGFBPOWNERINTR0 pBpOwnerR0 = &pVM->dbgfr0.s.paBpOwnersR0[hBpOwner];
143 AssertReturn(pBpOwnerR0->cRefs > 1, NULL);
144
145 return pBpOwnerR0;
146}
147#endif
148
149
150/**
151 * Executes the actions associated with the given breakpoint.
152 *
153 * @returns VBox status code.
154 * @param pVM The cross context VM structure.
155 * @param pVCpu The cross context virtual CPU structure.
156 * @param pCtx Pointer to the register context for the CPU.
157 * @param hBp The breakpoint handle which hit.
158 * @param pBp The shared breakpoint state.
159 * @param pBpR0 The ring-0 only breakpoint state.
160 */
161#ifdef IN_RING0
162DECLINLINE(int) dbgfBpHit(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTX pCtx, DBGFBP hBp, PDBGFBPINT pBp, PDBGFBPINTR0 pBpR0)
163#else
164DECLINLINE(int) dbgfBpHit(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTX pCtx, DBGFBP hBp, PDBGFBPINT pBp)
165#endif
166{
167 uint64_t cHits = ASMAtomicIncU64(&pBp->Pub.cHits); RT_NOREF(cHits);
168
169 RT_NOREF(pCtx);
170#ifdef VBOX_VMM_TARGET_ARMV8
171 LogFlow(("dbgfBpHit: hit breakpoint %u at %RGv cHits=0x%RX64\n", hBp, pCtx->Pc.u64, cHits));
172#else
173 LogFlow(("dbgfBpHit: hit breakpoint %u at %04x:%RGv cHits=0x%RX64\n", hBp, pCtx->cs.Sel, pCtx->rip, cHits));
174#endif
175
176 int rc = VINF_EM_DBG_BREAKPOINT;
177#ifdef IN_RING0
178 PCDBGFBPOWNERINTR0 pBpOwnerR0 = dbgfR0BpOwnerGetByHnd(pVM,
179 pBpR0->fInUse
180 ? pBpR0->hOwner
181 : NIL_DBGFBPOWNER);
182 if (pBpOwnerR0)
183 {
184 AssertReturn(pBpOwnerR0->pfnBpIoHitR0, VERR_DBGF_BP_IPE_1);
185
186 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
187
188 if (DBGF_BP_PUB_IS_EXEC_BEFORE(&pBp->Pub))
189 rcStrict = pBpOwnerR0->pfnBpHitR0(pVM, pVCpu->idCpu, pBpR0->pvUserR0, hBp, &pBp->Pub, DBGF_BP_F_HIT_EXEC_BEFORE);
190 if (rcStrict == VINF_SUCCESS)
191 {
192# ifdef VBOX_VMM_TARGET_ARMV8
193 /** @todo Requires instruction interpreter. */
194 AssertFailed();
195# else
196 uint8_t abInstr[DBGF_BP_INSN_MAX];
197 RTGCPTR const GCPtrInstr = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
198 rc = PGMPhysSimpleReadGCPtr(pVCpu, &abInstr[0], GCPtrInstr, sizeof(abInstr));
199 AssertRC(rc);
200 if (RT_SUCCESS(rc))
201 {
202 /* Replace the int3 with the original instruction byte. */
203 abInstr[0] = pBp->Pub.u.Sw.Arch.x86.bOrg;
204 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, GCPtrInstr, &abInstr[0], sizeof(abInstr));
205 if ( rcStrict == VINF_SUCCESS
206 && DBGF_BP_PUB_IS_EXEC_AFTER(&pBp->Pub))
207 {
208 rcStrict = pBpOwnerR0->pfnBpHitR0(pVM, pVCpu->idCpu, pBpR0->pvUserR0, hBp, &pBp->Pub, DBGF_BP_F_HIT_EXEC_AFTER);
209 if (rcStrict == VINF_SUCCESS)
210 rc = VINF_SUCCESS;
211 else if ( rcStrict == VINF_DBGF_BP_HALT
212 || rcStrict == VINF_DBGF_R3_BP_OWNER_DEFER)
213 {
214 pVCpu->dbgf.s.hBpActive = hBp;
215 if (rcStrict == VINF_DBGF_R3_BP_OWNER_DEFER)
216 pVCpu->dbgf.s.fBpInvokeOwnerCallback = true;
217 else
218 pVCpu->dbgf.s.fBpInvokeOwnerCallback = false;
219 }
220 else /* Guru meditation. */
221 rc = VERR_DBGF_BP_OWNER_CALLBACK_WRONG_STATUS;
222 }
223 else
224 rc = VBOXSTRICTRC_VAL(rcStrict);
225#endif
226 }
227 }
228 else if ( rcStrict == VINF_DBGF_BP_HALT
229 || rcStrict == VINF_DBGF_R3_BP_OWNER_DEFER)
230 {
231 pVCpu->dbgf.s.hBpActive = hBp;
232 if (rcStrict == VINF_DBGF_R3_BP_OWNER_DEFER)
233 pVCpu->dbgf.s.fBpInvokeOwnerCallback = true;
234 else
235 pVCpu->dbgf.s.fBpInvokeOwnerCallback = false;
236 }
237 else /* Guru meditation. */
238 rc = VERR_DBGF_BP_OWNER_CALLBACK_WRONG_STATUS;
239 }
240 else
241 {
242 pVCpu->dbgf.s.fBpInvokeOwnerCallback = true; /* Need to check this for ring-3 only owners. */
243 pVCpu->dbgf.s.hBpActive = hBp;
244 }
245#else
246 RT_NOREF(pVM);
247 pVCpu->dbgf.s.fBpInvokeOwnerCallback = true;
248 pVCpu->dbgf.s.hBpActive = hBp;
249#endif
250
251 return rc;
252}
253
254
255/**
256 * Executes the actions associated with the given port I/O breakpoint.
257 *
258 * @returns VBox status code.
259 * @param pVM The cross context VM structure.
260 * @param pVCpu The cross context virtual CPU structure.
261 * @param fBefore Flag whether the check is done before the access is carried out,
262 * false if it is done after the access.
263 * @param fAccess Access flags, see DBGFBPIOACCESS_XXX.
264 * @param uAddr The address of the access, for port I/O this will hold the port number.
265 * @param uValue The value read or written (the value for reads is only valid when DBGF_BP_F_HIT_EXEC_AFTER is set).
266 * @param hBp The breakpoint handle which hit.
267 * @param pBp The shared breakpoint state.
268 * @param pBpR0 The ring-0 only breakpoint state.
269 */
270#ifdef IN_RING0
271DECLINLINE(VBOXSTRICTRC) dbgfBpPortIoHit(PVMCC pVM, PVMCPU pVCpu, bool fBefore, uint32_t fAccess, uint64_t uAddr, uint64_t uValue,
272 DBGFBP hBp, PDBGFBPINT pBp, PDBGFBPINTR0 pBpR0)
273#else
274DECLINLINE(VBOXSTRICTRC) dbgfBpPortIoHit(PVMCC pVM, PVMCPU pVCpu, bool fBefore, uint32_t fAccess, uint64_t uAddr, uint64_t uValue,
275 DBGFBP hBp, PDBGFBPINT pBp)
276#endif
277{
278 ASMAtomicIncU64(&pBp->Pub.cHits);
279
280 VBOXSTRICTRC rcStrict = VINF_EM_DBG_BREAKPOINT;
281#ifdef IN_RING0
282 PCDBGFBPOWNERINTR0 pBpOwnerR0 = dbgfR0BpOwnerGetByHnd(pVM,
283 pBpR0->fInUse
284 ? pBpR0->hOwner
285 : NIL_DBGFBPOWNER);
286 if (pBpOwnerR0)
287 {
288 AssertReturn(pBpOwnerR0->pfnBpIoHitR0, VERR_DBGF_BP_IPE_1);
289 rcStrict = pBpOwnerR0->pfnBpIoHitR0(pVM, pVCpu->idCpuUnsafe, pBpR0->pvUserR0, hBp, &pBp->Pub,
290 fBefore
291 ? DBGF_BP_F_HIT_EXEC_BEFORE
292 : DBGF_BP_F_HIT_EXEC_AFTER,
293 fAccess, uAddr, uValue);
294 }
295 else
296 {
297 pVCpu->dbgf.s.fBpInvokeOwnerCallback = true; /* Need to check this for ring-3 only owners. */
298 pVCpu->dbgf.s.hBpActive = hBp;
299 pVCpu->dbgf.s.fBpIoActive = true;
300 pVCpu->dbgf.s.fBpIoBefore = fBefore;
301 pVCpu->dbgf.s.uBpIoAddress = uAddr;
302 pVCpu->dbgf.s.fBpIoAccess = fAccess;
303 pVCpu->dbgf.s.uBpIoValue = uValue;
304 }
305#else
306 /* Resolve owner (can be NIL_DBGFBPOWNER) and invoke callback if there is one. */
307 if (pBp->Pub.hOwner != NIL_DBGFBPOWNER)
308 {
309 PCDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pVM->pUVM, pBp->Pub.hOwner);
310 if (pBpOwner)
311 {
312 AssertReturn(pBpOwner->pfnBpIoHitR3, VERR_DBGF_BP_IPE_1);
313 rcStrict = pBpOwner->pfnBpIoHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub,
314 fBefore
315 ? DBGF_BP_F_HIT_EXEC_BEFORE
316 : DBGF_BP_F_HIT_EXEC_AFTER,
317 fAccess, uAddr, uValue);
318 }
319 }
320#endif
321 if ( rcStrict == VINF_DBGF_BP_HALT
322 || rcStrict == VINF_DBGF_R3_BP_OWNER_DEFER)
323 {
324 pVCpu->dbgf.s.hBpActive = hBp;
325 if (rcStrict == VINF_DBGF_R3_BP_OWNER_DEFER)
326 pVCpu->dbgf.s.fBpInvokeOwnerCallback = true;
327 else
328 pVCpu->dbgf.s.fBpInvokeOwnerCallback = false;
329 rcStrict = VINF_EM_DBG_BREAKPOINT;
330 }
331 else if ( rcStrict != VINF_SUCCESS
332 && rcStrict != VINF_EM_DBG_BREAKPOINT)
333 rcStrict = VERR_DBGF_BP_OWNER_CALLBACK_WRONG_STATUS; /* Guru meditation. */
334
335 return rcStrict;
336}
337
338
339/**
340 * Walks the L2 table starting at the given root index searching for the given key.
341 *
342 * @returns VBox status code.
343 * @param pVM The cross context VM structure.
344 * @param pVCpu The cross context virtual CPU structure.
345 * @param pCtx Pointer to the register context for the CPU.
346 * @param idxL2Root L2 table index of the table root.
347 * @param GCPtrKey The key to search for.
348 */
349static int dbgfBpL2Walk(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTX pCtx, uint32_t idxL2Root, RTGCUINTPTR GCPtrKey)
350{
351 /** @todo We don't use the depth right now but abort the walking after a fixed amount of levels. */
352 uint8_t iDepth = 32;
353 PCDBGFBPL2ENTRY pL2Entry = dbgfBpL2GetByIdx(pVM, idxL2Root);
354
355 while (RT_LIKELY( iDepth-- > 0
356 && pL2Entry))
357 {
358 /* Make a copy of the entry before verification. */
359 DBGFBPL2ENTRY L2Entry;
360 L2Entry.u64GCPtrKeyAndBpHnd1 = ASMAtomicReadU64((volatile uint64_t *)&pL2Entry->u64GCPtrKeyAndBpHnd1);
361 L2Entry.u64LeftRightIdxDepthBpHnd2 = ASMAtomicReadU64((volatile uint64_t *)&pL2Entry->u64LeftRightIdxDepthBpHnd2);
362
363 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(L2Entry.u64GCPtrKeyAndBpHnd1);
364 if (GCPtrKey == GCPtrL2Entry)
365 {
366 DBGFBP hBp = DBGF_BP_L2_ENTRY_GET_BP_HND(L2Entry.u64GCPtrKeyAndBpHnd1, L2Entry.u64LeftRightIdxDepthBpHnd2);
367
368 /* Query the internal breakpoint state from the handle. */
369#ifdef IN_RING3
370 PDBGFBPINT pBp = dbgfBpGetByHnd(pVM, hBp);
371#else
372 PDBGFBPINTR0 pBpR0 = NULL;
373 PDBGFBPINT pBp = dbgfBpGetByHnd(pVM, hBp, &pBpR0);
374#endif
375 if ( pBp
376 && DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_SOFTWARE)
377#ifdef IN_RING3
378 return dbgfBpHit(pVM, pVCpu, pCtx, hBp, pBp);
379#else
380 return dbgfBpHit(pVM, pVCpu, pCtx, hBp, pBp, pBpR0);
381#endif
382
383 /* The entry got corrupted, just abort. */
384 return VERR_DBGF_BP_L2_LOOKUP_FAILED;
385 }
386
387 /* Not found, get to the next level. */
388 uint32_t idxL2Next = (GCPtrKey < GCPtrL2Entry)
389 ? DBGF_BP_L2_ENTRY_GET_IDX_LEFT(L2Entry.u64LeftRightIdxDepthBpHnd2)
390 : DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(L2Entry.u64LeftRightIdxDepthBpHnd2);
391 /* It is genuine guest trap or we hit some assertion if we are at the end. */
392 if (idxL2Next == DBGF_BP_L2_ENTRY_IDX_END)
393 return VINF_EM_RAW_GUEST_TRAP;
394
395 pL2Entry = dbgfBpL2GetByIdx(pVM, idxL2Next);
396 }
397
398 return VERR_DBGF_BP_L2_LOOKUP_FAILED;
399}
400
401
402/**
403 * Checks whether there is a port I/O breakpoint for the given range and access size.
404 *
405 * @returns VBox status code.
406 * @retval VINF_EM_DBG_BREAKPOINT means there is a breakpoint pending.
407 * @retval VINF_SUCCESS means everything is fine to continue.
408 * @retval anything else means a fatal error causing a guru meditation.
409 *
410 * @param pVM The current context VM structure.
411 * @param pVCpu The cross context virtual CPU structure.
412 * @param uIoPort The I/O port being accessed.
413 * @param fAccess Appropriate DBGFBPIOACCESS_XXX.
414 * @param uValue The value being written to or read from the device
415 * (The value is only valid for a read when the
416 * call is made after the access, writes are always valid).
417 * @param fBefore Flag whether the check is done before the access is carried out,
418 * false if it is done after the access.
419 */
420VMM_INT_DECL(VBOXSTRICTRC) DBGFBpCheckPortIo(PVMCC pVM, PVMCPU pVCpu, RTIOPORT uIoPort,
421 uint32_t fAccess, uint32_t uValue, bool fBefore)
422{
423 RT_NOREF(uValue); /** @todo Trigger only on specific values. */
424
425 /* Don't trigger in single stepping mode. */
426 if (pVCpu->dbgf.s.fSingleSteppingRaw)
427 return VINF_SUCCESS;
428
429#if defined(IN_RING0)
430 uint32_t volatile *paBpLocPortIo = pVM->dbgfr0.s.CTX_SUFF(paBpLocPortIo);
431#elif defined(IN_RING3)
432 PUVM pUVM = pVM->pUVM;
433 uint32_t volatile *paBpLocPortIo = pUVM->dbgf.s.CTX_SUFF(paBpLocPortIo);
434#else
435# error "Unsupported host context"
436#endif
437 if (paBpLocPortIo)
438 {
439 const uint32_t u32Entry = ASMAtomicReadU32(&paBpLocPortIo[uIoPort]);
440 if (u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
441 {
442 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
443 if (RT_LIKELY(u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND))
444 {
445 DBGFBP hBp = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32Entry);
446
447 /* Query the internal breakpoint state from the handle. */
448#ifdef IN_RING3
449 PDBGFBPINT pBp = dbgfBpGetByHnd(pVM, hBp);
450#else
451 PDBGFBPINTR0 pBpR0 = NULL;
452 PDBGFBPINT pBp = dbgfBpGetByHnd(pVM, hBp, &pBpR0);
453#endif
454 if ( pBp
455 && DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_PORT_IO)
456 {
457 if ( uIoPort >= pBp->Pub.u.PortIo.uPort
458 && uIoPort < pBp->Pub.u.PortIo.uPort + pBp->Pub.u.PortIo.cPorts
459 && pBp->Pub.u.PortIo.fAccess & fAccess
460 && ( ( fBefore
461 && DBGF_BP_PUB_IS_EXEC_BEFORE(&pBp->Pub))
462 || ( !fBefore
463 && DBGF_BP_PUB_IS_EXEC_AFTER(&pBp->Pub))))
464#ifdef IN_RING3
465 return dbgfBpPortIoHit(pVM, pVCpu, fBefore, fAccess, uIoPort, uValue, hBp, pBp);
466#else
467 return dbgfBpPortIoHit(pVM, pVCpu, fBefore, fAccess, uIoPort, uValue, hBp, pBp, pBpR0);
468#endif
469 /* else: No matching port I/O breakpoint. */
470 }
471 else /* Invalid breakpoint handle or not an port I/O breakpoint. */
472 return VERR_DBGF_BP_L1_LOOKUP_FAILED;
473 }
474 else /* Some invalid type. */
475 return VERR_DBGF_BP_L1_LOOKUP_FAILED;
476 }
477 }
478
479 return VINF_SUCCESS;
480}
481
482
483#ifndef VBOX_VMM_TARGET_ARMV8 /** @todo for hardware break-/watchpoints */
484/**
485 * \#DB (Debug event) handler.
486 *
487 * @returns VBox status code.
488 * VINF_SUCCESS means we completely handled this trap,
489 * other codes are passed execution to host context.
490 *
491 * @param pVM The cross context VM structure.
492 * @param pVCpu The cross context virtual CPU structure.
493 * @param pCtx Pointer to the register context for the CPU.
494 * @param uDr6 The DR6 hypervisor register value.
495 * @param fAltStepping Alternative stepping indicator.
496 */
497VMM_INT_DECL(int) DBGFTrap01Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTGCUINTREG uDr6, bool fAltStepping)
498{
499 /** @todo Intel docs say that X86_DR6_BS has the highest priority... */
500 RT_NOREF(pCtx);
501
502 /*
503 * A breakpoint?
504 */
505 AssertCompile(X86_DR6_B0 == 1 && X86_DR6_B1 == 2 && X86_DR6_B2 == 4 && X86_DR6_B3 == 8);
506 if ( (uDr6 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3))
507 && pVM->dbgf.s.cEnabledHwBreakpoints > 0)
508 {
509 for (unsigned iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++)
510 if ( ((uint32_t)uDr6 & RT_BIT_32(iBp))
511 && pVM->dbgf.s.aHwBreakpoints[iBp].hBp != NIL_DBGFBP)
512 {
513 pVCpu->dbgf.s.hBpActive = pVM->dbgf.s.aHwBreakpoints[iBp].hBp;
514 pVCpu->dbgf.s.fSingleSteppingRaw = false;
515 LogFlow(("DBGFRZTrap03Handler: hit hw breakpoint %x at %04x:%RGv\n",
516 pVM->dbgf.s.aHwBreakpoints[iBp].hBp, pCtx->cs.Sel, pCtx->rip));
517
518 return VINF_EM_DBG_BREAKPOINT;
519 }
520 }
521
522 /*
523 * Single step?
524 * Are we single stepping or is it the guest?
525 */
526 if ( (uDr6 & X86_DR6_BS)
527 && (pVCpu->dbgf.s.fSingleSteppingRaw || fAltStepping))
528 {
529 pVCpu->dbgf.s.fSingleSteppingRaw = false;
530 LogFlow(("DBGFRZTrap01Handler: single step at %04x:%RGv\n", pCtx->cs.Sel, pCtx->rip));
531 return VINF_EM_DBG_STEPPED;
532 }
533
534 LogFlow(("DBGFRZTrap01Handler: guest debug event %#x at %04x:%RGv!\n", (uint32_t)uDr6, pCtx->cs.Sel, pCtx->rip));
535 return VINF_EM_RAW_GUEST_TRAP;
536}
537#endif
538
539
540/**
541 * \#BP (Breakpoint) handler.
542 *
543 * @returns VBox status code.
544 * VINF_SUCCESS means we completely handled this trap,
545 * other codes are passed execution to host context.
546 *
547 * @param pVM The cross context VM structure.
548 * @param pVCpu The cross context virtual CPU structure.
549 * @param pCtx Pointer to the register context for the CPU.
550 */
551VMM_INT_DECL(VBOXSTRICTRC) DBGFTrap03Handler(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTX pCtx)
552{
553#if defined(IN_RING0)
554 uint32_t volatile *paBpLocL1 = pVM->dbgfr0.s.CTX_SUFF(paBpLocL1);
555#elif defined(IN_RING3)
556 PUVM pUVM = pVM->pUVM;
557 uint32_t volatile *paBpLocL1 = pUVM->dbgf.s.CTX_SUFF(paBpLocL1);
558#else
559# error "Unsupported host context"
560#endif
561 if (paBpLocL1)
562 {
563 RTGCPTR GCPtrBp;
564#ifdef VBOX_VMM_TARGET_ARMV8
565 int rc = VINF_SUCCESS;
566 GCPtrBp = pCtx->Pc.u64;
567#else
568 int rc = SELMValidateAndConvertCSAddr(pVCpu, pCtx->eflags.u, pCtx->ss.Sel, pCtx->cs.Sel, &pCtx->cs,
569 pCtx->rip /* no -1 outside non-rawmode */, &GCPtrBp);
570 AssertRCReturn(rc, rc);
571#endif
572
573 const uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtrBp);
574 const uint32_t u32L1Entry = ASMAtomicReadU32(&paBpLocL1[idxL1]);
575
576 LogFlowFunc(("GCPtrBp=%RGv idxL1=%u u32L1Entry=%#x\n", GCPtrBp, idxL1, u32L1Entry));
577 if (u32L1Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
578 {
579 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32L1Entry);
580 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
581 {
582 DBGFBP hBp = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32L1Entry);
583
584 /* Query the internal breakpoint state from the handle. */
585#ifdef IN_RING3
586 PDBGFBPINT pBp = dbgfBpGetByHnd(pVM, hBp);
587#else
588 PDBGFBPINTR0 pBpR0 = NULL;
589 PDBGFBPINT pBp = dbgfBpGetByHnd(pVM, hBp, &pBpR0);
590#endif
591 if ( pBp
592 && DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_SOFTWARE)
593 {
594 if (pBp->Pub.u.Sw.GCPtr == (RTGCUINTPTR)GCPtrBp)
595#ifdef IN_RING3
596 rc = dbgfBpHit(pVM, pVCpu, pCtx, hBp, pBp);
597#else
598 rc = dbgfBpHit(pVM, pVCpu, pCtx, hBp, pBp, pBpR0);
599#endif
600 else
601 rc = VINF_EM_RAW_GUEST_TRAP; /* Genuine guest trap. */
602 }
603 else /* Invalid breakpoint handle or not an int3 breakpoint. */
604 rc = VERR_DBGF_BP_L1_LOOKUP_FAILED;
605 }
606 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
607 rc = dbgfBpL2Walk(pVM, pVCpu, pCtx, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32L1Entry),
608 DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR((RTGCUINTPTR)GCPtrBp));
609 else /* Some invalid type. */
610 rc = VERR_DBGF_BP_L1_LOOKUP_FAILED;
611 }
612 else
613 rc = VINF_EM_RAW_GUEST_TRAP; /* Genuine guest trap. */
614
615 return rc;
616 }
617
618 return VINF_EM_RAW_GUEST_TRAP;
619}
620
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette