VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGFR3Bp.cpp@ 95421

Last change on this file since 95421 was 93554, checked in by vboxsync, 3 years ago

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 106.0 KB
Line 
1/* $Id: DBGFR3Bp.cpp 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, Breakpoint Management.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_dbgf_bp DBGF - The Debugger Facility, Breakpoint Management
20 *
21 * The debugger facilities breakpoint managers purpose is to efficiently manage
22 * large amounts of breakpoints for various use cases like dtrace like operations
23 * or execution flow tracing for instance. Especially execution flow tracing can
24 * require thousands of breakpoints which need to be managed efficiently to not slow
25 * down guest operation too much. Before the rewrite starting end of 2020, DBGF could
26 * only handle 32 breakpoints (+ 4 hardware assisted breakpoints). The new
27 * manager is supposed to be able to handle up to one million breakpoints.
28 *
29 * @see grp_dbgf
30 *
31 *
32 * @section sec_dbgf_bp_owner Breakpoint owners
33 *
34 * A single breakpoint owner has a mandatory ring-3 callback and an optional ring-0
35 * callback assigned which is called whenever a breakpoint with the owner assigned is hit.
36 * The common part of the owner is managed by a single table mapped into both ring-0
37 * and ring-3 and the handle being the index into the table. This allows resolving
38 * the handle to the internal structure efficiently. Searching for a free entry is
39 * done using a bitmap indicating free and occupied entries. For the optional
40 * ring-0 owner part there is a separate ring-0 only table for security reasons.
41 *
42 * The callback of the owner can be used to gather and log guest state information
43 * and decide whether to continue guest execution or stop and drop into the debugger.
44 * Breakpoints which don't have an owner assigned will always drop the VM right into
45 * the debugger.
46 *
47 *
48 * @section sec_dbgf_bp_bps Breakpoints
49 *
50 * Breakpoints are referenced by an opaque handle which acts as an index into a global table
51 * mapped into ring-3 and ring-0. Each entry contains the necessary state to manage the breakpoint
52 * like trigger conditions, type, owner, etc. If an owner is given an optional opaque user argument
53 * can be supplied which is passed in the respective owner callback. For owners with ring-0 callbacks
54 * a dedicated ring-0 table is held saving possible ring-0 user arguments.
55 *
56 * To keep memory consumption under control and still support large amounts of
57 * breakpoints the table is split into fixed sized chunks and the chunk index and index
58 * into the chunk can be derived from the handle with only a few logical operations.
59 *
60 *
61 * @section sec_dbgf_bp_resolv Resolving breakpoint addresses
62 *
63 * Whenever a \#BP(0) event is triggered DBGF needs to decide whether the event originated
64 * from within the guest or whether a DBGF breakpoint caused it. This has to happen as fast
65 * as possible. The following scheme is employed to achieve this:
66 *
67 * @verbatim
68 * 7 6 5 4 3 2 1 0
69 * +---+---+---+---+---+---+---+---+
70 * | | | | | | | | | BP address
71 * +---+---+---+---+---+---+---+---+
72 * \_____________________/ \_____/
73 * | |
74 * | +---------------+
75 * | |
76 * BP table | v
77 * +------------+ | +-----------+
78 * | hBp 0 | | X <- | 0 | xxxxx |
79 * | hBp 1 | <----------------+------------------------ | 1 | hBp 1 |
80 * | | | +--- | 2 | idxL2 |
81 * | hBp <m> | <---+ v | |...| ... |
82 * | | | +-----------+ | |...| ... |
83 * | | | | | | |...| ... |
84 * | hBp <n> | <-+ +----- | +> leaf | | | . |
85 * | | | | | | | | . |
86 * | | | | + root + | <------------+ | . |
87 * | | | | | | +-----------+
88 * | | +------- | leaf<+ | L1: 65536
89 * | . | | . |
90 * | . | | . |
91 * | . | | . |
92 * +------------+ +-----------+
93 * L2 idx BST
94 * @endverbatim
95 *
96 * -# Take the lowest 16 bits of the breakpoint address and use it as an direct index
97 * into the L1 table. The L1 table is contiguous and consists of 4 byte entries
98 * resulting in 256KiB of memory used. The topmost 4 bits indicate how to proceed
99 * and the meaning of the remaining 28bits depends on the topmost 4 bits:
100 * - A 0 type entry means no breakpoint is registered with the matching lowest 16bits,
101 * so forward the event to the guest.
102 * - A 1 in the topmost 4 bits means that the remaining 28bits directly denote a breakpoint
103 * handle which can be resolved by extracting the chunk index and index into the chunk
104 * of the global breakpoint table. If the address matches the breakpoint is processed
105 * according to the configuration. Otherwise the breakpoint is again forwarded to the guest.
106 * - A 2 in the topmost 4 bits means that there are multiple breakpoints registered
107 * matching the lowest 16bits and the search must continue in the L2 table with the
108 * remaining 28bits acting as an index into the L2 table indicating the search root.
109 * -# The L2 table consists of multiple index based binary search trees, there is one for each reference
110 * from the L1 table. The key for the table are the upper 6 bytes of the breakpoint address
111 * used for searching. This tree is traversed until either a matching address is found and
112 * the breakpoint is being processed or again forwarded to the guest if it isn't successful.
113 * Each entry in the L2 table is 16 bytes big and densly packed to avoid excessive memory usage.
114 *
115 * @section sec_dbgf_bp_ioport Handling I/O port breakpoints
116 *
117 * Because of the limited amount of I/O ports being available (65536) a single table with 65536 entries,
118 * each 4 byte big will be allocated. This amounts to 256KiB of memory being used additionally as soon as
119 * an I/O breakpoint is enabled. The entries contain the breakpoint handle directly allowing only one breakpoint
120 * per port right now, which is something we accept as a limitation right now to keep things relatively simple.
121 * When there is at least one I/O breakpoint active IOM will be notified and it will afterwards call the DBGF API
122 * whenever the guest does an I/O port access to decide whether a breakpoint was hit. This keeps the overhead small
123 * when there is no I/O port breakpoint enabled.
124 *
125 * @section sec_dbgf_bp_note Random thoughts and notes for the implementation
126 *
127 * - The assumption for this approach is that the lowest 16bits of the breakpoint address are
128 * hopefully the ones being the most varying ones across breakpoints so the traversal
129 * can skip the L2 table in most of the cases. Even if the L2 table must be taken the
130 * individual trees should be quite shallow resulting in low overhead when walking it
131 * (though only real world testing can assert this assumption).
132 * - Index based tables and trees are used instead of pointers because the tables
133 * are always mapped into ring-0 and ring-3 with different base addresses.
134 * - Efficent breakpoint allocation is done by having a global bitmap indicating free
135 * and occupied breakpoint entries. Same applies for the L2 BST table.
136 * - Special care must be taken when modifying the L1 and L2 tables as other EMTs
137 * might still access it (want to try a lockless approach first using
138 * atomic updates, have to resort to locking if that turns out to be too difficult).
139 * - Each BP entry is supposed to be 64 byte big and each chunk should contain 65536
140 * breakpoints which results in 4MiB for each chunk plus the allocation bitmap.
141 * - ring-0 has to take special care when traversing the L2 BST to not run into cycles
142 * and do strict bounds checking before accessing anything. The L1 and L2 table
143 * are written to from ring-3 only. Same goes for the breakpoint table with the
144 * exception being the opaque user argument for ring-0 which is stored in ring-0 only
145 * memory.
146 */
147
148
149/*********************************************************************************************************************************
150* Header Files *
151*********************************************************************************************************************************/
152#define LOG_GROUP LOG_GROUP_DBGF
153#define VMCPU_INCL_CPUM_GST_CTX
154#include <VBox/vmm/dbgf.h>
155#include <VBox/vmm/selm.h>
156#include <VBox/vmm/iem.h>
157#include <VBox/vmm/mm.h>
158#include <VBox/vmm/iom.h>
159#include <VBox/vmm/hm.h>
160#include "DBGFInternal.h"
161#include <VBox/vmm/vm.h>
162#include <VBox/vmm/uvm.h>
163
164#include <VBox/err.h>
165#include <VBox/log.h>
166#include <iprt/assert.h>
167#include <iprt/mem.h>
168
169#include "DBGFInline.h"
170
171
172/*********************************************************************************************************************************
173* Structures and Typedefs *
174*********************************************************************************************************************************/
175
176
177/*********************************************************************************************************************************
178* Internal Functions *
179*********************************************************************************************************************************/
180RT_C_DECLS_BEGIN
181RT_C_DECLS_END
182
183
184/**
185 * Initialize the breakpoint mangement.
186 *
187 * @returns VBox status code.
188 * @param pUVM The user mode VM handle.
189 */
190DECLHIDDEN(int) dbgfR3BpInit(PUVM pUVM)
191{
192 PVM pVM = pUVM->pVM;
193
194 //pUVM->dbgf.s.paBpOwnersR3 = NULL;
195 //pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
196
197 /* Init hardware breakpoint states. */
198 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
199 {
200 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
201
202 AssertCompileSize(DBGFBP, sizeof(uint32_t));
203 pHwBp->hBp = NIL_DBGFBP;
204 //pHwBp->fEnabled = false;
205 }
206
207 /* Now the global breakpoint table chunks. */
208 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
209 {
210 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
211
212 //pBpChunk->pBpBaseR3 = NULL;
213 //pBpChunk->pbmAlloc = NULL;
214 //pBpChunk->cBpsFree = 0;
215 pBpChunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; /* Not allocated. */
216 }
217
218 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
219 {
220 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
221
222 //pL2Chunk->pL2BaseR3 = NULL;
223 //pL2Chunk->pbmAlloc = NULL;
224 //pL2Chunk->cFree = 0;
225 pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; /* Not allocated. */
226 }
227
228 //pUVM->dbgf.s.paBpLocL1R3 = NULL;
229 //pUVM->dbgf.s.paBpLocPortIoR3 = NULL;
230 pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX;
231 return RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxBpL2Wr);
232}
233
234
235/**
236 * Terminates the breakpoint mangement.
237 *
238 * @returns VBox status code.
239 * @param pUVM The user mode VM handle.
240 */
241DECLHIDDEN(int) dbgfR3BpTerm(PUVM pUVM)
242{
243 if (pUVM->dbgf.s.pbmBpOwnersAllocR3)
244 {
245 RTMemFree((void *)pUVM->dbgf.s.pbmBpOwnersAllocR3);
246 pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
247 }
248
249 /* Free all allocated chunk bitmaps (the chunks itself are destroyed during ring-0 VM destruction). */
250 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
251 {
252 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
253
254 if (pBpChunk->idChunk != DBGF_BP_CHUNK_ID_INVALID)
255 {
256 AssertPtr(pBpChunk->pbmAlloc);
257 RTMemFree((void *)pBpChunk->pbmAlloc);
258 pBpChunk->pbmAlloc = NULL;
259 pBpChunk->idChunk = DBGF_BP_CHUNK_ID_INVALID;
260 }
261 }
262
263 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
264 {
265 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
266
267 if (pL2Chunk->idChunk != DBGF_BP_CHUNK_ID_INVALID)
268 {
269 AssertPtr(pL2Chunk->pbmAlloc);
270 RTMemFree((void *)pL2Chunk->pbmAlloc);
271 pL2Chunk->pbmAlloc = NULL;
272 pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID;
273 }
274 }
275
276 if (pUVM->dbgf.s.hMtxBpL2Wr != NIL_RTSEMFASTMUTEX)
277 {
278 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxBpL2Wr);
279 pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX;
280 }
281
282 return VINF_SUCCESS;
283}
284
285
286/**
287 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
288 */
289static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
290{
291 RT_NOREF(pvUser);
292
293 VMCPU_ASSERT_EMT(pVCpu);
294 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
295
296 /*
297 * The initialization will be done on EMT(0). It is possible that multiple
298 * initialization attempts are done because dbgfR3BpEnsureInit() can be called
299 * from racing non EMT threads when trying to set a breakpoint for the first time.
300 * Just fake success if the L1 is already present which means that a previous rendezvous
301 * successfully initialized the breakpoint manager.
302 */
303 PUVM pUVM = pVM->pUVM;
304 if ( pVCpu->idCpu == 0
305 && !pUVM->dbgf.s.paBpLocL1R3)
306 {
307 if (!SUPR3IsDriverless())
308 {
309 DBGFBPINITREQ Req;
310 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
311 Req.Hdr.cbReq = sizeof(Req);
312 Req.paBpLocL1R3 = NULL;
313 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_INIT, 0 /*u64Arg*/, &Req.Hdr);
314 AssertLogRelMsgRCReturn(rc, ("VMMR0_DO_DBGF_BP_INIT failed: %Rrc\n", rc), rc);
315 pUVM->dbgf.s.paBpLocL1R3 = Req.paBpLocL1R3;
316 }
317 else
318 {
319 /* Driverless: Do dbgfR0BpInitWorker here, ring-3 style. */
320 uint32_t const cbL1Loc = RT_ALIGN_32(UINT16_MAX * sizeof(uint32_t), HOST_PAGE_SIZE);
321 pUVM->dbgf.s.paBpLocL1R3 = (uint32_t *)RTMemPageAllocZ(cbL1Loc);
322 AssertLogRelMsgReturn(pUVM->dbgf.s.paBpLocL1R3, ("cbL1Loc=%#x\n", cbL1Loc), VERR_NO_PAGE_MEMORY);
323 }
324 }
325
326 return VINF_SUCCESS;
327}
328
329
330/**
331 * Ensures that the breakpoint manager is fully initialized.
332 *
333 * @returns VBox status code.
334 * @param pUVM The user mode VM handle.
335 *
336 * @thread Any thread.
337 */
338static int dbgfR3BpEnsureInit(PUVM pUVM)
339{
340 /* If the L1 lookup table is allocated initialization succeeded before. */
341 if (RT_LIKELY(pUVM->dbgf.s.paBpLocL1R3))
342 return VINF_SUCCESS;
343
344 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
345 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpInitEmtWorker, NULL /*pvUser*/);
346}
347
348
349/**
350 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
351 */
352static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpPortIoInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
353{
354 RT_NOREF(pvUser);
355
356 VMCPU_ASSERT_EMT(pVCpu);
357 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
358
359 /*
360 * The initialization will be done on EMT(0). It is possible that multiple
361 * initialization attempts are done because dbgfR3BpPortIoEnsureInit() can be called
362 * from racing non EMT threads when trying to set a breakpoint for the first time.
363 * Just fake success if the L1 is already present which means that a previous rendezvous
364 * successfully initialized the breakpoint manager.
365 */
366 PUVM pUVM = pVM->pUVM;
367 if ( pVCpu->idCpu == 0
368 && !pUVM->dbgf.s.paBpLocPortIoR3)
369 {
370 if (!SUPR3IsDriverless())
371 {
372 DBGFBPINITREQ Req;
373 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
374 Req.Hdr.cbReq = sizeof(Req);
375 Req.paBpLocL1R3 = NULL;
376 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_PORTIO_INIT, 0 /*u64Arg*/, &Req.Hdr);
377 AssertLogRelMsgRCReturn(rc, ("VMMR0_DO_DBGF_BP_PORTIO_INIT failed: %Rrc\n", rc), rc);
378 pUVM->dbgf.s.paBpLocPortIoR3 = Req.paBpLocL1R3;
379 }
380 else
381 {
382 /* Driverless: Do dbgfR0BpPortIoInitWorker here, ring-3 style. */
383 uint32_t const cbPortIoLoc = RT_ALIGN_32(UINT16_MAX * sizeof(uint32_t), HOST_PAGE_SIZE);
384 pUVM->dbgf.s.paBpLocPortIoR3 = (uint32_t *)RTMemPageAllocZ(cbPortIoLoc);
385 AssertLogRelMsgReturn(pUVM->dbgf.s.paBpLocPortIoR3, ("cbPortIoLoc=%#x\n", cbPortIoLoc), VERR_NO_PAGE_MEMORY);
386 }
387 }
388
389 return VINF_SUCCESS;
390}
391
392
393/**
394 * Ensures that the breakpoint manager is initialized to handle I/O port breakpoint.
395 *
396 * @returns VBox status code.
397 * @param pUVM The user mode VM handle.
398 *
399 * @thread Any thread.
400 */
401static int dbgfR3BpPortIoEnsureInit(PUVM pUVM)
402{
403 /* If the L1 lookup table is allocated initialization succeeded before. */
404 if (RT_LIKELY(pUVM->dbgf.s.paBpLocPortIoR3))
405 return VINF_SUCCESS;
406
407 /* Ensure that the breakpoint manager is initialized. */
408 int rc = dbgfR3BpEnsureInit(pUVM);
409 if (RT_FAILURE(rc))
410 return rc;
411
412 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
413 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpPortIoInitEmtWorker, NULL /*pvUser*/);
414}
415
416
417/**
418 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
419 */
420static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpOwnerInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
421{
422 RT_NOREF(pvUser);
423
424 VMCPU_ASSERT_EMT(pVCpu);
425 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
426
427 /*
428 * The initialization will be done on EMT(0). It is possible that multiple
429 * initialization attempts are done because dbgfR3BpOwnerEnsureInit() can be called
430 * from racing non EMT threads when trying to create a breakpoint owner for the first time.
431 * Just fake success if the pointers are initialized already, meaning that a previous rendezvous
432 * successfully initialized the breakpoint owner table.
433 */
434 int rc = VINF_SUCCESS;
435 PUVM pUVM = pVM->pUVM;
436 if ( pVCpu->idCpu == 0
437 && !pUVM->dbgf.s.pbmBpOwnersAllocR3)
438 {
439 AssertCompile(!(DBGF_BP_OWNER_COUNT_MAX % 64));
440 pUVM->dbgf.s.pbmBpOwnersAllocR3 = RTMemAllocZ(DBGF_BP_OWNER_COUNT_MAX / 8);
441 if (pUVM->dbgf.s.pbmBpOwnersAllocR3)
442 {
443 if (!SUPR3IsDriverless())
444 {
445 DBGFBPOWNERINITREQ Req;
446 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
447 Req.Hdr.cbReq = sizeof(Req);
448 Req.paBpOwnerR3 = NULL;
449 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_OWNER_INIT, 0 /*u64Arg*/, &Req.Hdr);
450 if (RT_SUCCESS(rc))
451 {
452 pUVM->dbgf.s.paBpOwnersR3 = (PDBGFBPOWNERINT)Req.paBpOwnerR3;
453 return VINF_SUCCESS;
454 }
455 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_OWNER_INIT failed: %Rrc\n", rc));
456 }
457 else
458 {
459 /* Driverless: Do dbgfR0BpOwnerInitWorker here, ring-3 style. */
460 uint32_t const cbBpOwnerR3 = RT_ALIGN_32(DBGF_BP_OWNER_COUNT_MAX * sizeof(DBGFBPOWNERINT), HOST_PAGE_SIZE);
461 pUVM->dbgf.s.paBpLocPortIoR3 = (uint32_t *)RTMemPageAllocZ(cbBpOwnerR3);
462 if (pUVM->dbgf.s.paBpLocPortIoR3)
463 return VINF_SUCCESS;
464 AssertLogRelMsgFailed(("cbBpOwnerR3=%#x\n", cbBpOwnerR3));
465 rc = VERR_NO_PAGE_MEMORY;
466 }
467
468 RTMemFree((void *)pUVM->dbgf.s.pbmBpOwnersAllocR3);
469 pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
470 }
471 else
472 rc = VERR_NO_MEMORY;
473 }
474
475 return rc;
476}
477
478
479/**
480 * Ensures that the breakpoint manager is fully initialized.
481 *
482 * @returns VBox status code.
483 * @param pUVM The user mode VM handle.
484 *
485 * @thread Any thread.
486 */
487static int dbgfR3BpOwnerEnsureInit(PUVM pUVM)
488{
489 /* If the allocation bitmap is allocated initialization succeeded before. */
490 if (RT_LIKELY(pUVM->dbgf.s.pbmBpOwnersAllocR3))
491 return VINF_SUCCESS;
492
493 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
494 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpOwnerInitEmtWorker, NULL /*pvUser*/);
495}
496
497
498/**
499 * Retains the given breakpoint owner handle for use.
500 *
501 * @returns VBox status code.
502 * @retval VERR_INVALID_HANDLE if the given breakpoint owner handle is invalid.
503 * @param pUVM The user mode VM handle.
504 * @param hBpOwner The breakpoint owner handle to retain, NIL_DBGFOWNER is accepted without doing anything.
505 * @param fIo Flag whether the owner must have the I/O handler set because it used by an I/O breakpoint.
506 */
507DECLINLINE(int) dbgfR3BpOwnerRetain(PUVM pUVM, DBGFBPOWNER hBpOwner, bool fIo)
508{
509 if (hBpOwner == NIL_DBGFBPOWNER)
510 return VINF_SUCCESS;
511
512 PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
513 if (pBpOwner)
514 {
515 AssertReturn ( ( fIo
516 && pBpOwner->pfnBpIoHitR3)
517 || ( !fIo
518 && pBpOwner->pfnBpHitR3),
519 VERR_INVALID_HANDLE);
520 ASMAtomicIncU32(&pBpOwner->cRefs);
521 return VINF_SUCCESS;
522 }
523
524 return VERR_INVALID_HANDLE;
525}
526
527
528/**
529 * Releases the given breakpoint owner handle.
530 *
531 * @returns VBox status code.
532 * @retval VERR_INVALID_HANDLE if the given breakpoint owner handle is invalid.
533 * @param pUVM The user mode VM handle.
534 * @param hBpOwner The breakpoint owner handle to retain, NIL_DBGFOWNER is accepted without doing anything.
535 */
536DECLINLINE(int) dbgfR3BpOwnerRelease(PUVM pUVM, DBGFBPOWNER hBpOwner)
537{
538 if (hBpOwner == NIL_DBGFBPOWNER)
539 return VINF_SUCCESS;
540
541 PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
542 if (pBpOwner)
543 {
544 Assert(pBpOwner->cRefs > 1);
545 ASMAtomicDecU32(&pBpOwner->cRefs);
546 return VINF_SUCCESS;
547 }
548
549 return VERR_INVALID_HANDLE;
550}
551
552
553/**
554 * Returns the internal breakpoint state for the given handle.
555 *
556 * @returns Pointer to the internal breakpoint state or NULL if the handle is invalid.
557 * @param pUVM The user mode VM handle.
558 * @param hBp The breakpoint handle to resolve.
559 */
560DECLINLINE(PDBGFBPINT) dbgfR3BpGetByHnd(PUVM pUVM, DBGFBP hBp)
561{
562 uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp);
563 uint32_t idxEntry = DBGF_BP_HND_GET_ENTRY(hBp);
564
565 AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, NULL);
566 AssertReturn(idxEntry < DBGF_BP_COUNT_PER_CHUNK, NULL);
567
568 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
569 AssertReturn(pBpChunk->idChunk == idChunk, NULL);
570 AssertPtrReturn(pBpChunk->pbmAlloc, NULL);
571 AssertReturn(ASMBitTest(pBpChunk->pbmAlloc, idxEntry), NULL);
572
573 return &pBpChunk->pBpBaseR3[idxEntry];
574}
575
576
577/**
578 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
579 */
580static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpChunkAllocEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
581{
582 uint32_t idChunk = (uint32_t)(uintptr_t)pvUser;
583
584 VMCPU_ASSERT_EMT(pVCpu);
585 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
586
587 AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, VERR_DBGF_BP_IPE_1);
588
589 PUVM pUVM = pVM->pUVM;
590 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
591
592 AssertReturn( pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID
593 || pBpChunk->idChunk == idChunk,
594 VERR_DBGF_BP_IPE_2);
595
596 /*
597 * The initialization will be done on EMT(0). It is possible that multiple
598 * allocation attempts are done when multiple racing non EMT threads try to
599 * allocate a breakpoint and a new chunk needs to be allocated.
600 * Ignore the request and succeed if the chunk is allocated meaning that a
601 * previous rendezvous successfully allocated the chunk.
602 */
603 int rc = VINF_SUCCESS;
604 if ( pVCpu->idCpu == 0
605 && pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID)
606 {
607 /* Allocate the bitmap first so we can skip calling into VMMR0 if it fails. */
608 AssertCompile(!(DBGF_BP_COUNT_PER_CHUNK % 64));
609 void *pbmAlloc = RTMemAllocZ(DBGF_BP_COUNT_PER_CHUNK / 8);
610 if (RT_LIKELY(pbmAlloc))
611 {
612 if (!SUPR3IsDriverless())
613 {
614 DBGFBPCHUNKALLOCREQ Req;
615 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
616 Req.Hdr.cbReq = sizeof(Req);
617 Req.idChunk = idChunk;
618 Req.pChunkBaseR3 = NULL;
619 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_CHUNK_ALLOC, 0 /*u64Arg*/, &Req.Hdr);
620 if (RT_SUCCESS(rc))
621 pBpChunk->pBpBaseR3 = (PDBGFBPINT)Req.pChunkBaseR3;
622 else
623 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_CHUNK_ALLOC failed: %Rrc\n", rc));
624 }
625 else
626 {
627 /* Driverless: Do dbgfR0BpChunkAllocWorker here, ring-3 style. */
628 uint32_t const cbShared = RT_ALIGN_32(DBGF_BP_COUNT_PER_CHUNK * sizeof(DBGFBPINT), HOST_PAGE_SIZE);
629 pBpChunk->pBpBaseR3 = (PDBGFBPINT)RTMemPageAllocZ(cbShared);
630 AssertLogRelMsgStmt(pBpChunk->pBpBaseR3, ("cbShared=%#x\n", cbShared), rc = VERR_NO_PAGE_MEMORY);
631 }
632 if (RT_SUCCESS(rc))
633 {
634 pBpChunk->pbmAlloc = (void volatile *)pbmAlloc;
635 pBpChunk->cBpsFree = DBGF_BP_COUNT_PER_CHUNK;
636 pBpChunk->idChunk = idChunk;
637 return VINF_SUCCESS;
638 }
639
640 RTMemFree(pbmAlloc);
641 }
642 else
643 rc = VERR_NO_MEMORY;
644 }
645
646 return rc;
647}
648
649
650/**
651 * Tries to allocate the given chunk which requires an EMT rendezvous.
652 *
653 * @returns VBox status code.
654 * @param pUVM The user mode VM handle.
655 * @param idChunk The chunk to allocate.
656 *
657 * @thread Any thread.
658 */
659DECLINLINE(int) dbgfR3BpChunkAlloc(PUVM pUVM, uint32_t idChunk)
660{
661 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpChunkAllocEmtWorker, (void *)(uintptr_t)idChunk);
662}
663
664
665/**
666 * Tries to allocate a new breakpoint of the given type.
667 *
668 * @returns VBox status code.
669 * @param pUVM The user mode VM handle.
670 * @param hOwner The owner handle, NIL_DBGFBPOWNER if none assigned.
671 * @param pvUser Opaque user data passed in the owner callback.
672 * @param enmType Breakpoint type to allocate.
673 * @param fFlags Flags assoicated with the allocated breakpoint.
674 * @param iHitTrigger The hit count at which the breakpoint start triggering.
675 * Use 0 (or 1) if it's gonna trigger at once.
676 * @param iHitDisable The hit count which disables the breakpoint.
677 * Use ~(uint64_t) if it's never gonna be disabled.
678 * @param phBp Where to return the opaque breakpoint handle on success.
679 * @param ppBp Where to return the pointer to the internal breakpoint state on success.
680 *
681 * @thread Any thread.
682 */
683static int dbgfR3BpAlloc(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser, DBGFBPTYPE enmType,
684 uint16_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp,
685 PDBGFBPINT *ppBp)
686{
687 bool fIo = enmType == DBGFBPTYPE_PORT_IO
688 || enmType == DBGFBPTYPE_MMIO;
689 int rc = dbgfR3BpOwnerRetain(pUVM, hOwner, fIo);
690 if (RT_FAILURE(rc))
691 return rc;
692
693 /*
694 * Search for a chunk having a free entry, allocating new chunks
695 * if the encountered ones are full.
696 *
697 * This can be called from multiple threads at the same time so special care
698 * has to be taken to not require any locking here.
699 */
700 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
701 {
702 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
703
704 uint32_t idChunk = ASMAtomicReadU32(&pBpChunk->idChunk);
705 if (idChunk == DBGF_BP_CHUNK_ID_INVALID)
706 {
707 rc = dbgfR3BpChunkAlloc(pUVM, i);
708 if (RT_FAILURE(rc))
709 {
710 LogRel(("DBGF/Bp: Allocating new breakpoint table chunk failed with %Rrc\n", rc));
711 break;
712 }
713
714 idChunk = ASMAtomicReadU32(&pBpChunk->idChunk);
715 Assert(idChunk == i);
716 }
717
718 /** @todo Optimize with some hinting if this turns out to be too slow. */
719 for (;;)
720 {
721 uint32_t cBpsFree = ASMAtomicReadU32(&pBpChunk->cBpsFree);
722 if (cBpsFree)
723 {
724 /*
725 * Scan the associated bitmap for a free entry, if none can be found another thread
726 * raced us and we go to the next chunk.
727 */
728 int32_t iClr = ASMBitFirstClear(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK);
729 if (iClr != -1)
730 {
731 /*
732 * Try to allocate, we could get raced here as well. In that case
733 * we try again.
734 */
735 if (!ASMAtomicBitTestAndSet(pBpChunk->pbmAlloc, iClr))
736 {
737 /* Success, immediately mark as allocated, initialize the breakpoint state and return. */
738 ASMAtomicDecU32(&pBpChunk->cBpsFree);
739
740 PDBGFBPINT pBp = &pBpChunk->pBpBaseR3[iClr];
741 pBp->Pub.cHits = 0;
742 pBp->Pub.iHitTrigger = iHitTrigger;
743 pBp->Pub.iHitDisable = iHitDisable;
744 pBp->Pub.hOwner = hOwner;
745 pBp->Pub.u16Type = DBGF_BP_PUB_MAKE_TYPE(enmType);
746 pBp->Pub.fFlags = fFlags & ~DBGF_BP_F_ENABLED; /* The enabled flag is handled in the respective APIs. */
747 pBp->pvUserR3 = pvUser;
748
749 /** @todo Owner handling (reference and call ring-0 if it has an ring-0 callback). */
750
751 *phBp = DBGF_BP_HND_CREATE(idChunk, iClr);
752 *ppBp = pBp;
753 return VINF_SUCCESS;
754 }
755 /* else Retry with another spot. */
756 }
757 else /* no free entry in bitmap, go to the next chunk */
758 break;
759 }
760 else /* !cBpsFree, go to the next chunk */
761 break;
762 }
763 }
764
765 rc = dbgfR3BpOwnerRelease(pUVM, hOwner); AssertRC(rc);
766 return VERR_DBGF_NO_MORE_BP_SLOTS;
767}
768
769
770/**
771 * Frees the given breakpoint handle.
772 *
773 * @returns nothing.
774 * @param pUVM The user mode VM handle.
775 * @param hBp The breakpoint handle to free.
776 * @param pBp The internal breakpoint state pointer.
777 */
778static void dbgfR3BpFree(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
779{
780 uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp);
781 uint32_t idxEntry = DBGF_BP_HND_GET_ENTRY(hBp);
782
783 AssertReturnVoid(idChunk < DBGF_BP_CHUNK_COUNT);
784 AssertReturnVoid(idxEntry < DBGF_BP_COUNT_PER_CHUNK);
785
786 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
787 AssertPtrReturnVoid(pBpChunk->pbmAlloc);
788 AssertReturnVoid(ASMBitTest(pBpChunk->pbmAlloc, idxEntry));
789
790 /** @todo Need a trip to Ring-0 if an owner is assigned with a Ring-0 part to clear the breakpoint. */
791 int rc = dbgfR3BpOwnerRelease(pUVM, pBp->Pub.hOwner); AssertRC(rc); RT_NOREF(rc);
792 memset(pBp, 0, sizeof(*pBp));
793
794 ASMAtomicBitClear(pBpChunk->pbmAlloc, idxEntry);
795 ASMAtomicIncU32(&pBpChunk->cBpsFree);
796}
797
798
799/**
800 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
801 */
802static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpL2TblChunkAllocEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
803{
804 uint32_t idChunk = (uint32_t)(uintptr_t)pvUser;
805
806 VMCPU_ASSERT_EMT(pVCpu);
807 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
808
809 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, VERR_DBGF_BP_IPE_1);
810
811 PUVM pUVM = pVM->pUVM;
812 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
813
814 AssertReturn( pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID
815 || pL2Chunk->idChunk == idChunk,
816 VERR_DBGF_BP_IPE_2);
817
818 /*
819 * The initialization will be done on EMT(0). It is possible that multiple
820 * allocation attempts are done when multiple racing non EMT threads try to
821 * allocate a breakpoint and a new chunk needs to be allocated.
822 * Ignore the request and succeed if the chunk is allocated meaning that a
823 * previous rendezvous successfully allocated the chunk.
824 */
825 int rc = VINF_SUCCESS;
826 if ( pVCpu->idCpu == 0
827 && pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID)
828 {
829 /* Allocate the bitmap first so we can skip calling into VMMR0 if it fails. */
830 AssertCompile(!(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK % 64));
831 void *pbmAlloc = RTMemAllocZ(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK / 8);
832 if (RT_LIKELY(pbmAlloc))
833 {
834 if (!SUPR3IsDriverless())
835 {
836 DBGFBPL2TBLCHUNKALLOCREQ Req;
837 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
838 Req.Hdr.cbReq = sizeof(Req);
839 Req.idChunk = idChunk;
840 Req.pChunkBaseR3 = NULL;
841 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC, 0 /*u64Arg*/, &Req.Hdr);
842 if (RT_SUCCESS(rc))
843 pL2Chunk->pL2BaseR3 = (PDBGFBPL2ENTRY)Req.pChunkBaseR3;
844 else
845 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC failed: %Rrc\n", rc));
846 }
847 else
848 {
849 /* Driverless: Do dbgfR0BpL2TblChunkAllocWorker here, ring-3 style. */
850 uint32_t const cbTotal = RT_ALIGN_32(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK * sizeof(DBGFBPL2ENTRY), HOST_PAGE_SIZE);
851 pL2Chunk->pL2BaseR3 = (PDBGFBPL2ENTRY)RTMemPageAllocZ(cbTotal);
852 AssertLogRelMsgStmt(pL2Chunk->pL2BaseR3, ("cbTotal=%#x\n", cbTotal), rc = VERR_NO_PAGE_MEMORY);
853 }
854 if (RT_SUCCESS(rc))
855 {
856 pL2Chunk->pbmAlloc = (void volatile *)pbmAlloc;
857 pL2Chunk->cFree = DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK;
858 pL2Chunk->idChunk = idChunk;
859 return VINF_SUCCESS;
860 }
861
862 RTMemFree(pbmAlloc);
863 }
864 else
865 rc = VERR_NO_MEMORY;
866 }
867
868 return rc;
869}
870
871
872/**
873 * Tries to allocate the given L2 table chunk which requires an EMT rendezvous.
874 *
875 * @returns VBox status code.
876 * @param pUVM The user mode VM handle.
877 * @param idChunk The chunk to allocate.
878 *
879 * @thread Any thread.
880 */
881DECLINLINE(int) dbgfR3BpL2TblChunkAlloc(PUVM pUVM, uint32_t idChunk)
882{
883 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpL2TblChunkAllocEmtWorker, (void *)(uintptr_t)idChunk);
884}
885
886
887/**
888 * Tries to allocate a new breakpoint of the given type.
889 *
890 * @returns VBox status code.
891 * @param pUVM The user mode VM handle.
892 * @param pidxL2Tbl Where to return the L2 table entry index on success.
893 * @param ppL2TblEntry Where to return the pointer to the L2 table entry on success.
894 *
895 * @thread Any thread.
896 */
897static int dbgfR3BpL2TblEntryAlloc(PUVM pUVM, uint32_t *pidxL2Tbl, PDBGFBPL2ENTRY *ppL2TblEntry)
898{
899 /*
900 * Search for a chunk having a free entry, allocating new chunks
901 * if the encountered ones are full.
902 *
903 * This can be called from multiple threads at the same time so special care
904 * has to be taken to not require any locking here.
905 */
906 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
907 {
908 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
909
910 uint32_t idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk);
911 if (idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID)
912 {
913 int rc = dbgfR3BpL2TblChunkAlloc(pUVM, i);
914 if (RT_FAILURE(rc))
915 {
916 LogRel(("DBGF/Bp: Allocating new breakpoint L2 lookup table chunk failed with %Rrc\n", rc));
917 break;
918 }
919
920 idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk);
921 Assert(idChunk == i);
922 }
923
924 /** @todo Optimize with some hinting if this turns out to be too slow. */
925 for (;;)
926 {
927 uint32_t cFree = ASMAtomicReadU32(&pL2Chunk->cFree);
928 if (cFree)
929 {
930 /*
931 * Scan the associated bitmap for a free entry, if none can be found another thread
932 * raced us and we go to the next chunk.
933 */
934 int32_t iClr = ASMBitFirstClear(pL2Chunk->pbmAlloc, DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK);
935 if (iClr != -1)
936 {
937 /*
938 * Try to allocate, we could get raced here as well. In that case
939 * we try again.
940 */
941 if (!ASMAtomicBitTestAndSet(pL2Chunk->pbmAlloc, iClr))
942 {
943 /* Success, immediately mark as allocated, initialize the breakpoint state and return. */
944 ASMAtomicDecU32(&pL2Chunk->cFree);
945
946 PDBGFBPL2ENTRY pL2Entry = &pL2Chunk->pL2BaseR3[iClr];
947
948 *pidxL2Tbl = DBGF_BP_L2_IDX_CREATE(idChunk, iClr);
949 *ppL2TblEntry = pL2Entry;
950 return VINF_SUCCESS;
951 }
952 /* else Retry with another spot. */
953 }
954 else /* no free entry in bitmap, go to the next chunk */
955 break;
956 }
957 else /* !cFree, go to the next chunk */
958 break;
959 }
960 }
961
962 return VERR_DBGF_NO_MORE_BP_SLOTS;
963}
964
965
966/**
967 * Frees the given breakpoint handle.
968 *
969 * @returns nothing.
970 * @param pUVM The user mode VM handle.
971 * @param idxL2Tbl The L2 table index to free.
972 * @param pL2TblEntry The L2 table entry pointer to free.
973 */
974static void dbgfR3BpL2TblEntryFree(PUVM pUVM, uint32_t idxL2Tbl, PDBGFBPL2ENTRY pL2TblEntry)
975{
976 uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2Tbl);
977 uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2Tbl);
978
979 AssertReturnVoid(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT);
980 AssertReturnVoid(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK);
981
982 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
983 AssertPtrReturnVoid(pL2Chunk->pbmAlloc);
984 AssertReturnVoid(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry));
985
986 memset(pL2TblEntry, 0, sizeof(*pL2TblEntry));
987
988 ASMAtomicBitClear(pL2Chunk->pbmAlloc, idxEntry);
989 ASMAtomicIncU32(&pL2Chunk->cFree);
990}
991
992
993/**
994 * Sets the enabled flag of the given breakpoint to the given value.
995 *
996 * @returns nothing.
997 * @param pBp The breakpoint to set the state.
998 * @param fEnabled Enabled status.
999 */
1000DECLINLINE(void) dbgfR3BpSetEnabled(PDBGFBPINT pBp, bool fEnabled)
1001{
1002 if (fEnabled)
1003 pBp->Pub.fFlags |= DBGF_BP_F_ENABLED;
1004 else
1005 pBp->Pub.fFlags &= ~DBGF_BP_F_ENABLED;
1006}
1007
1008
1009/**
1010 * Assigns a hardware breakpoint state to the given register breakpoint.
1011 *
1012 * @returns VBox status code.
1013 * @param pVM The cross-context VM structure pointer.
1014 * @param hBp The breakpoint handle to assign.
1015 * @param pBp The internal breakpoint state.
1016 *
1017 * @thread Any thread.
1018 */
1019static int dbgfR3BpRegAssign(PVM pVM, DBGFBP hBp, PDBGFBPINT pBp)
1020{
1021 AssertReturn(pBp->Pub.u.Reg.iReg == UINT8_MAX, VERR_DBGF_BP_IPE_3);
1022
1023 for (uint8_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
1024 {
1025 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
1026
1027 AssertCompileSize(DBGFBP, sizeof(uint32_t));
1028 if (ASMAtomicCmpXchgU32(&pHwBp->hBp, hBp, NIL_DBGFBP))
1029 {
1030 pHwBp->GCPtr = pBp->Pub.u.Reg.GCPtr;
1031 pHwBp->fType = pBp->Pub.u.Reg.fType;
1032 pHwBp->cb = pBp->Pub.u.Reg.cb;
1033 pHwBp->fEnabled = DBGF_BP_PUB_IS_ENABLED(&pBp->Pub);
1034
1035 pBp->Pub.u.Reg.iReg = i;
1036 return VINF_SUCCESS;
1037 }
1038 }
1039
1040 return VERR_DBGF_NO_MORE_BP_SLOTS;
1041}
1042
1043
1044/**
1045 * Removes the assigned hardware breakpoint state from the given register breakpoint.
1046 *
1047 * @returns VBox status code.
1048 * @param pVM The cross-context VM structure pointer.
1049 * @param hBp The breakpoint handle to remove.
1050 * @param pBp The internal breakpoint state.
1051 *
1052 * @thread Any thread.
1053 */
1054static int dbgfR3BpRegRemove(PVM pVM, DBGFBP hBp, PDBGFBPINT pBp)
1055{
1056 AssertReturn(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints), VERR_DBGF_BP_IPE_3);
1057
1058 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1059 AssertReturn(pHwBp->hBp == hBp, VERR_DBGF_BP_IPE_4);
1060 AssertReturn(!pHwBp->fEnabled, VERR_DBGF_BP_IPE_5);
1061
1062 pHwBp->GCPtr = 0;
1063 pHwBp->fType = 0;
1064 pHwBp->cb = 0;
1065 ASMCompilerBarrier();
1066
1067 ASMAtomicWriteU32(&pHwBp->hBp, NIL_DBGFBP);
1068 return VINF_SUCCESS;
1069}
1070
1071
1072/**
1073 * Returns the pointer to the L2 table entry from the given index.
1074 *
1075 * @returns Current context pointer to the L2 table entry or NULL if the provided index value is invalid.
1076 * @param pUVM The user mode VM handle.
1077 * @param idxL2 The L2 table index to resolve.
1078 *
1079 * @note The content of the resolved L2 table entry is not validated!.
1080 */
1081DECLINLINE(PDBGFBPL2ENTRY) dbgfR3BpL2GetByIdx(PUVM pUVM, uint32_t idxL2)
1082{
1083 uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2);
1084 uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2);
1085
1086 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, NULL);
1087 AssertReturn(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK, NULL);
1088
1089 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
1090 AssertPtrReturn(pL2Chunk->pbmAlloc, NULL);
1091 AssertReturn(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry), NULL);
1092
1093 return &pL2Chunk->CTX_SUFF(pL2Base)[idxEntry];
1094}
1095
1096
1097/**
1098 * Creates a binary search tree with the given root and leaf nodes.
1099 *
1100 * @returns VBox status code.
1101 * @param pUVM The user mode VM handle.
1102 * @param idxL1 The index into the L1 table where the created tree should be linked into.
1103 * @param u32EntryOld The old entry in the L1 table used to compare with in the atomic update.
1104 * @param hBpRoot The root node DBGF handle to assign.
1105 * @param GCPtrRoot The root nodes GC pointer to use as a key.
1106 * @param hBpLeaf The leafs node DBGF handle to assign.
1107 * @param GCPtrLeaf The leafs node GC pointer to use as a key.
1108 */
1109static int dbgfR3BpInt3L2BstCreate(PUVM pUVM, uint32_t idxL1, uint32_t u32EntryOld,
1110 DBGFBP hBpRoot, RTGCUINTPTR GCPtrRoot,
1111 DBGFBP hBpLeaf, RTGCUINTPTR GCPtrLeaf)
1112{
1113 AssertReturn(GCPtrRoot != GCPtrLeaf, VERR_DBGF_BP_IPE_9);
1114 Assert(DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtrRoot) == DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtrLeaf));
1115
1116 /* Allocate two nodes. */
1117 uint32_t idxL2Root = 0;
1118 PDBGFBPL2ENTRY pL2Root = NULL;
1119 int rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Root, &pL2Root);
1120 if (RT_SUCCESS(rc))
1121 {
1122 uint32_t idxL2Leaf = 0;
1123 PDBGFBPL2ENTRY pL2Leaf = NULL;
1124 rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Leaf, &pL2Leaf);
1125 if (RT_SUCCESS(rc))
1126 {
1127 dbgfBpL2TblEntryInit(pL2Leaf, hBpLeaf, GCPtrLeaf, DBGF_BP_L2_ENTRY_IDX_END, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
1128 if (GCPtrLeaf < GCPtrRoot)
1129 dbgfBpL2TblEntryInit(pL2Root, hBpRoot, GCPtrRoot, idxL2Leaf, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
1130 else
1131 dbgfBpL2TblEntryInit(pL2Root, hBpRoot, GCPtrRoot, DBGF_BP_L2_ENTRY_IDX_END, idxL2Leaf, 0 /*iDepth*/);
1132
1133 uint32_t const u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_L2_IDX(idxL2Root);
1134 if (ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], u32Entry, u32EntryOld))
1135 return VINF_SUCCESS;
1136
1137 /* The L1 entry has changed due to another thread racing us during insertion, free nodes and try again. */
1138 dbgfR3BpL2TblEntryFree(pUVM, idxL2Leaf, pL2Leaf);
1139 rc = VINF_TRY_AGAIN;
1140 }
1141
1142 dbgfR3BpL2TblEntryFree(pUVM, idxL2Root, pL2Root);
1143 }
1144
1145 return rc;
1146}
1147
1148
1149/**
1150 * Inserts the given breakpoint handle into an existing binary search tree.
1151 *
1152 * @returns VBox status code.
1153 * @param pUVM The user mode VM handle.
1154 * @param idxL2Root The index of the tree root in the L2 table.
1155 * @param hBp The node DBGF handle to insert.
1156 * @param GCPtr The nodes GC pointer to use as a key.
1157 */
1158static int dbgfR3BpInt2L2BstNodeInsert(PUVM pUVM, uint32_t idxL2Root, DBGFBP hBp, RTGCUINTPTR GCPtr)
1159{
1160 GCPtr = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
1161
1162 /* Allocate a new node first. */
1163 uint32_t idxL2Nd = 0;
1164 PDBGFBPL2ENTRY pL2Nd = NULL;
1165 int rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Nd, &pL2Nd);
1166 if (RT_SUCCESS(rc))
1167 {
1168 /* Walk the tree and find the correct node to insert to. */
1169 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Root);
1170 while (RT_LIKELY(pL2Entry))
1171 {
1172 /* Make a copy of the entry. */
1173 DBGFBPL2ENTRY L2Entry;
1174 L2Entry.u64GCPtrKeyAndBpHnd1 = ASMAtomicReadU64(&pL2Entry->u64GCPtrKeyAndBpHnd1);
1175 L2Entry.u64LeftRightIdxDepthBpHnd2 = ASMAtomicReadU64(&pL2Entry->u64LeftRightIdxDepthBpHnd2);
1176
1177 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(L2Entry.u64GCPtrKeyAndBpHnd1);
1178 AssertBreak(GCPtr != GCPtrL2Entry);
1179
1180 /* Not found, get to the next level. */
1181 uint32_t idxL2Next = GCPtr < GCPtrL2Entry
1182 ? DBGF_BP_L2_ENTRY_GET_IDX_LEFT(L2Entry.u64LeftRightIdxDepthBpHnd2)
1183 : DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(L2Entry.u64LeftRightIdxDepthBpHnd2);
1184 if (idxL2Next == DBGF_BP_L2_ENTRY_IDX_END)
1185 {
1186 /* Insert the new node here. */
1187 dbgfBpL2TblEntryInit(pL2Nd, hBp, GCPtr, DBGF_BP_L2_ENTRY_IDX_END, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
1188 if (GCPtr < GCPtrL2Entry)
1189 dbgfBpL2TblEntryUpdateLeft(pL2Entry, idxL2Next, 0 /*iDepth*/);
1190 else
1191 dbgfBpL2TblEntryUpdateRight(pL2Entry, idxL2Next, 0 /*iDepth*/);
1192 return VINF_SUCCESS;
1193 }
1194
1195 pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Next);
1196 }
1197
1198 dbgfR3BpL2TblEntryFree(pUVM, idxL2Nd, pL2Nd);
1199 rc = VERR_DBGF_BP_L2_LOOKUP_FAILED;
1200 }
1201
1202 return rc;
1203}
1204
1205
1206/**
1207 * Adds the given breakpoint handle keyed with the GC pointer to the proper L2 binary search tree
1208 * possibly creating a new tree.
1209 *
1210 * @returns VBox status code.
1211 * @param pUVM The user mode VM handle.
1212 * @param idxL1 The index into the L1 table the breakpoint uses.
1213 * @param hBp The breakpoint handle which is to be added.
1214 * @param GCPtr The GC pointer the breakpoint is keyed with.
1215 */
1216static int dbgfR3BpInt3L2BstNodeAdd(PUVM pUVM, uint32_t idxL1, DBGFBP hBp, RTGCUINTPTR GCPtr)
1217{
1218 int rc = RTSemFastMutexRequest(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc);
1219
1220 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]); /* Re-read, could get raced by a remove operation. */
1221 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1222 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1223 {
1224 /* Create a new search tree, gather the necessary information first. */
1225 DBGFBP hBp2 = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32Entry);
1226 PDBGFBPINT pBp2 = dbgfR3BpGetByHnd(pUVM, hBp2);
1227 AssertStmt(RT_VALID_PTR(pBp2), rc = VERR_DBGF_BP_IPE_7);
1228 if (RT_SUCCESS(rc))
1229 rc = dbgfR3BpInt3L2BstCreate(pUVM, idxL1, u32Entry, hBp, GCPtr, hBp2, pBp2->Pub.u.Int3.GCPtr);
1230 }
1231 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1232 rc = dbgfR3BpInt2L2BstNodeInsert(pUVM, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry), hBp, GCPtr);
1233
1234 int rc2 = RTSemFastMutexRelease(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc2);
1235 return rc;
1236}
1237
1238
1239/**
1240 * Gets the leftmost from the given tree node start index.
1241 *
1242 * @returns VBox status code.
1243 * @param pUVM The user mode VM handle.
1244 * @param idxL2Start The start index to walk from.
1245 * @param pidxL2Leftmost Where to store the L2 table index of the leftmost entry.
1246 * @param ppL2NdLeftmost Where to store the pointer to the leftmost L2 table entry.
1247 * @param pidxL2NdLeftParent Where to store the L2 table index of the leftmost entries parent.
1248 * @param ppL2NdLeftParent Where to store the pointer to the leftmost L2 table entries parent.
1249 */
1250static int dbgfR33BpInt3BstGetLeftmostEntryFromNode(PUVM pUVM, uint32_t idxL2Start,
1251 uint32_t *pidxL2Leftmost, PDBGFBPL2ENTRY *ppL2NdLeftmost,
1252 uint32_t *pidxL2NdLeftParent, PDBGFBPL2ENTRY *ppL2NdLeftParent)
1253{
1254 uint32_t idxL2Parent = DBGF_BP_L2_ENTRY_IDX_END;
1255 PDBGFBPL2ENTRY pL2NdParent = NULL;
1256
1257 for (;;)
1258 {
1259 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Start);
1260 AssertPtr(pL2Entry);
1261
1262 uint32_t idxL2Left = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
1263 if (idxL2Start == DBGF_BP_L2_ENTRY_IDX_END)
1264 {
1265 *pidxL2Leftmost = idxL2Start;
1266 *ppL2NdLeftmost = pL2Entry;
1267 *pidxL2NdLeftParent = idxL2Parent;
1268 *ppL2NdLeftParent = pL2NdParent;
1269 break;
1270 }
1271
1272 idxL2Parent = idxL2Start;
1273 idxL2Start = idxL2Left;
1274 pL2NdParent = pL2Entry;
1275 }
1276
1277 return VINF_SUCCESS;
1278}
1279
1280
1281/**
1282 * Removes the given node rearranging the tree.
1283 *
1284 * @returns VBox status code.
1285 * @param pUVM The user mode VM handle.
1286 * @param idxL1 The index into the L1 table pointing to the binary search tree containing the node.
1287 * @param idxL2Root The L2 table index where the tree root is located.
1288 * @param idxL2Nd The node index to remove.
1289 * @param pL2Nd The L2 table entry to remove.
1290 * @param idxL2NdParent The parents index, can be DBGF_BP_L2_ENTRY_IDX_END if the root is about to be removed.
1291 * @param pL2NdParent The parents L2 table entry, can be NULL if the root is about to be removed.
1292 * @param fLeftChild Flag whether the node is the left child of the parent or the right one.
1293 */
1294static int dbgfR3BpInt3BstNodeRemove(PUVM pUVM, uint32_t idxL1, uint32_t idxL2Root,
1295 uint32_t idxL2Nd, PDBGFBPL2ENTRY pL2Nd,
1296 uint32_t idxL2NdParent, PDBGFBPL2ENTRY pL2NdParent,
1297 bool fLeftChild)
1298{
1299 /*
1300 * If there are only two nodes remaining the tree will get destroyed and the
1301 * L1 entry will be converted to the direct handle type.
1302 */
1303 uint32_t idxL2Left = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
1304 uint32_t idxL2Right = DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
1305
1306 Assert(idxL2NdParent != DBGF_BP_L2_ENTRY_IDX_END || !pL2NdParent); RT_NOREF(idxL2NdParent);
1307 uint32_t idxL2ParentNew = DBGF_BP_L2_ENTRY_IDX_END;
1308 if (idxL2Right == DBGF_BP_L2_ENTRY_IDX_END)
1309 idxL2ParentNew = idxL2Left;
1310 else
1311 {
1312 /* Find the leftmost entry of the right subtree and move it to the to be removed nodes location in the tree. */
1313 PDBGFBPL2ENTRY pL2NdLeftmostParent = NULL;
1314 PDBGFBPL2ENTRY pL2NdLeftmost = NULL;
1315 uint32_t idxL2NdLeftmostParent = DBGF_BP_L2_ENTRY_IDX_END;
1316 uint32_t idxL2Leftmost = DBGF_BP_L2_ENTRY_IDX_END;
1317 int rc = dbgfR33BpInt3BstGetLeftmostEntryFromNode(pUVM, idxL2Right, &idxL2Leftmost ,&pL2NdLeftmost,
1318 &idxL2NdLeftmostParent, &pL2NdLeftmostParent);
1319 AssertRCReturn(rc, rc);
1320
1321 if (pL2NdLeftmostParent)
1322 {
1323 /* Rearrange the leftmost entries parents pointer. */
1324 dbgfBpL2TblEntryUpdateLeft(pL2NdLeftmostParent, DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2NdLeftmost->u64LeftRightIdxDepthBpHnd2), 0 /*iDepth*/);
1325 dbgfBpL2TblEntryUpdateRight(pL2NdLeftmost, idxL2Right, 0 /*iDepth*/);
1326 }
1327
1328 dbgfBpL2TblEntryUpdateLeft(pL2NdLeftmost, idxL2Left, 0 /*iDepth*/);
1329
1330 /* Update the remove nodes parent to point to the new node. */
1331 idxL2ParentNew = idxL2Leftmost;
1332 }
1333
1334 if (pL2NdParent)
1335 {
1336 /* Asssign the new L2 index to proper parents left or right pointer. */
1337 if (fLeftChild)
1338 dbgfBpL2TblEntryUpdateLeft(pL2NdParent, idxL2ParentNew, 0 /*iDepth*/);
1339 else
1340 dbgfBpL2TblEntryUpdateRight(pL2NdParent, idxL2ParentNew, 0 /*iDepth*/);
1341 }
1342 else
1343 {
1344 /* The root node is removed, set the new root in the L1 table. */
1345 Assert(idxL2ParentNew != DBGF_BP_L2_ENTRY_IDX_END);
1346 idxL2Root = idxL2ParentNew;
1347 ASMAtomicXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_CREATE_L2_IDX(idxL2Left));
1348 }
1349
1350 /* Free the node. */
1351 dbgfR3BpL2TblEntryFree(pUVM, idxL2Nd, pL2Nd);
1352
1353 /*
1354 * Check whether the old/new root is the only node remaining and convert the L1
1355 * table entry to a direct breakpoint handle one in that case.
1356 */
1357 pL2Nd = dbgfR3BpL2GetByIdx(pUVM, idxL2Root);
1358 AssertPtr(pL2Nd);
1359 if ( DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2) == DBGF_BP_L2_ENTRY_IDX_END
1360 && DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2) == DBGF_BP_L2_ENTRY_IDX_END)
1361 {
1362 DBGFBP hBp = DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Nd->u64GCPtrKeyAndBpHnd1, pL2Nd->u64LeftRightIdxDepthBpHnd2);
1363 dbgfR3BpL2TblEntryFree(pUVM, idxL2Root, pL2Nd);
1364 ASMAtomicXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp));
1365 }
1366
1367 return VINF_SUCCESS;
1368}
1369
1370
1371/**
1372 * Removes the given breakpoint handle keyed with the GC pointer from the L2 binary search tree
1373 * pointed to by the given L2 root index.
1374 *
1375 * @returns VBox status code.
1376 * @param pUVM The user mode VM handle.
1377 * @param idxL1 The index into the L1 table pointing to the binary search tree.
1378 * @param idxL2Root The L2 table index where the tree root is located.
1379 * @param hBp The breakpoint handle which is to be removed.
1380 * @param GCPtr The GC pointer the breakpoint is keyed with.
1381 */
1382static int dbgfR3BpInt3L2BstRemove(PUVM pUVM, uint32_t idxL1, uint32_t idxL2Root, DBGFBP hBp, RTGCUINTPTR GCPtr)
1383{
1384 GCPtr = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
1385
1386 int rc = RTSemFastMutexRequest(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc);
1387
1388 uint32_t idxL2Cur = idxL2Root;
1389 uint32_t idxL2Parent = DBGF_BP_L2_ENTRY_IDX_END;
1390 bool fLeftChild = false;
1391 PDBGFBPL2ENTRY pL2EntryParent = NULL;
1392 for (;;)
1393 {
1394 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Cur);
1395 AssertPtr(pL2Entry);
1396
1397 /* Check whether this node is to be removed.. */
1398 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(pL2Entry->u64GCPtrKeyAndBpHnd1);
1399 if (GCPtrL2Entry == GCPtr)
1400 {
1401 Assert(DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Entry->u64GCPtrKeyAndBpHnd1, pL2Entry->u64LeftRightIdxDepthBpHnd2) == hBp); RT_NOREF(hBp);
1402
1403 rc = dbgfR3BpInt3BstNodeRemove(pUVM, idxL1, idxL2Root, idxL2Cur, pL2Entry, idxL2Parent, pL2EntryParent, fLeftChild);
1404 break;
1405 }
1406
1407 pL2EntryParent = pL2Entry;
1408 idxL2Parent = idxL2Cur;
1409
1410 if (GCPtrL2Entry < GCPtr)
1411 {
1412 fLeftChild = true;
1413 idxL2Cur = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
1414 }
1415 else
1416 {
1417 fLeftChild = false;
1418 idxL2Cur = DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
1419 }
1420
1421 AssertBreakStmt(idxL2Cur != DBGF_BP_L2_ENTRY_IDX_END, rc = VERR_DBGF_BP_L2_LOOKUP_FAILED);
1422 }
1423
1424 int rc2 = RTSemFastMutexRelease(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc2);
1425
1426 return rc;
1427}
1428
1429
1430/**
1431 * Adds the given int3 breakpoint to the appropriate lookup tables.
1432 *
1433 * @returns VBox status code.
1434 * @param pUVM The user mode VM handle.
1435 * @param hBp The breakpoint handle to add.
1436 * @param pBp The internal breakpoint state.
1437 */
1438static int dbgfR3BpInt3Add(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1439{
1440 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_INT3, VERR_DBGF_BP_IPE_3);
1441
1442 int rc = VINF_SUCCESS;
1443 uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(pBp->Pub.u.Int3.GCPtr);
1444 uint8_t cTries = 16;
1445
1446 while (cTries--)
1447 {
1448 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1449 if (u32Entry == DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1450 {
1451 /*
1452 * No breakpoint assigned so far for this entry, create an entry containing
1453 * the direct breakpoint handle and try to exchange it atomically.
1454 */
1455 u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp);
1456 if (ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], u32Entry, DBGF_BP_INT3_L1_ENTRY_TYPE_NULL))
1457 break;
1458 }
1459 else
1460 {
1461 rc = dbgfR3BpInt3L2BstNodeAdd(pUVM, idxL1, hBp, pBp->Pub.u.Int3.GCPtr);
1462 if (rc != VINF_TRY_AGAIN)
1463 break;
1464 }
1465 }
1466
1467 if ( RT_SUCCESS(rc)
1468 && !cTries) /* Too much contention, abort with an error. */
1469 rc = VERR_DBGF_BP_INT3_ADD_TRIES_REACHED;
1470
1471 return rc;
1472}
1473
1474
1475/**
1476 * Adds the given port I/O breakpoint to the appropriate lookup tables.
1477 *
1478 * @returns VBox status code.
1479 * @param pUVM The user mode VM handle.
1480 * @param hBp The breakpoint handle to add.
1481 * @param pBp The internal breakpoint state.
1482 */
1483static int dbgfR3BpPortIoAdd(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1484{
1485 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_PORT_IO, VERR_DBGF_BP_IPE_3);
1486
1487 uint16_t uPortExcl = pBp->Pub.u.PortIo.uPort + pBp->Pub.u.PortIo.cPorts;
1488 uint32_t u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp);
1489 for (uint16_t idxPort = pBp->Pub.u.PortIo.uPort; idxPort < uPortExcl; idxPort++)
1490 {
1491 bool fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], u32Entry, DBGF_BP_INT3_L1_ENTRY_TYPE_NULL);
1492 if (!fXchg)
1493 {
1494 /* Something raced us, so roll back the other registrations. */
1495 while (idxPort > pBp->Pub.u.PortIo.uPort)
1496 {
1497 fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry);
1498 Assert(fXchg); RT_NOREF(fXchg);
1499 }
1500
1501 return VERR_DBGF_BP_INT3_ADD_TRIES_REACHED; /** @todo New status code */
1502 }
1503 }
1504
1505 return VINF_SUCCESS;
1506}
1507
1508
1509/**
1510 * Get a breakpoint give by address.
1511 *
1512 * @returns The breakpoint handle on success or NIL_DBGF if not found.
1513 * @param pUVM The user mode VM handle.
1514 * @param enmType The breakpoint type.
1515 * @param GCPtr The breakpoint address.
1516 * @param ppBp Where to store the pointer to the internal breakpoint state on success, optional.
1517 */
1518static DBGFBP dbgfR3BpGetByAddr(PUVM pUVM, DBGFBPTYPE enmType, RTGCUINTPTR GCPtr, PDBGFBPINT *ppBp)
1519{
1520 DBGFBP hBp = NIL_DBGFBP;
1521
1522 switch (enmType)
1523 {
1524 case DBGFBPTYPE_REG:
1525 {
1526 PVM pVM = pUVM->pVM;
1527 VM_ASSERT_VALID_EXT_RETURN(pVM, NIL_DBGFBP);
1528
1529 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
1530 {
1531 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
1532
1533 AssertCompileSize(DBGFBP, sizeof(uint32_t));
1534 DBGFBP hBpTmp = ASMAtomicReadU32(&pHwBp->hBp);
1535 if ( pHwBp->GCPtr == GCPtr
1536 && hBpTmp != NIL_DBGFBP)
1537 {
1538 hBp = hBpTmp;
1539 break;
1540 }
1541 }
1542 break;
1543 }
1544
1545 case DBGFBPTYPE_INT3:
1546 {
1547 const uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtr);
1548 const uint32_t u32L1Entry = ASMAtomicReadU32(&pUVM->dbgf.s.CTX_SUFF(paBpLocL1)[idxL1]);
1549
1550 if (u32L1Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1551 {
1552 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32L1Entry);
1553 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1554 hBp = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32L1Entry);
1555 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1556 {
1557 RTGCUINTPTR GCPtrKey = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
1558 PDBGFBPL2ENTRY pL2Nd = dbgfR3BpL2GetByIdx(pUVM, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32L1Entry));
1559
1560 for (;;)
1561 {
1562 AssertPtr(pL2Nd);
1563
1564 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(pL2Nd->u64GCPtrKeyAndBpHnd1);
1565 if (GCPtrKey == GCPtrL2Entry)
1566 {
1567 hBp = DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Nd->u64GCPtrKeyAndBpHnd1, pL2Nd->u64LeftRightIdxDepthBpHnd2);
1568 break;
1569 }
1570
1571 /* Not found, get to the next level. */
1572 uint32_t idxL2Next = GCPtrKey < GCPtrL2Entry
1573 ? DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2)
1574 : DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
1575 /* Address not found if the entry denotes the end. */
1576 if (idxL2Next == DBGF_BP_L2_ENTRY_IDX_END)
1577 break;
1578
1579 pL2Nd = dbgfR3BpL2GetByIdx(pUVM, idxL2Next);
1580 }
1581 }
1582 }
1583 break;
1584 }
1585
1586 default:
1587 AssertMsgFailed(("enmType=%d\n", enmType));
1588 break;
1589 }
1590
1591 if ( hBp != NIL_DBGFBP
1592 && ppBp)
1593 *ppBp = dbgfR3BpGetByHnd(pUVM, hBp);
1594 return hBp;
1595}
1596
1597
1598/**
1599 * Get a port I/O breakpoint given by the range.
1600 *
1601 * @returns The breakpoint handle on success or NIL_DBGF if not found.
1602 * @param pUVM The user mode VM handle.
1603 * @param uPort First port in the range.
1604 * @param cPorts Number of ports in the range.
1605 * @param ppBp Where to store the pointer to the internal breakpoint state on success, optional.
1606 */
1607static DBGFBP dbgfR3BpPortIoGetByRange(PUVM pUVM, RTIOPORT uPort, RTIOPORT cPorts, PDBGFBPINT *ppBp)
1608{
1609 DBGFBP hBp = NIL_DBGFBP;
1610
1611 for (RTIOPORT idxPort = uPort; idxPort < uPort + cPorts; idxPort++)
1612 {
1613 const uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.CTX_SUFF(paBpLocPortIo)[idxPort]);
1614 if (u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1615 {
1616 hBp = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32Entry);
1617 break;
1618 }
1619 }
1620
1621 if ( hBp != NIL_DBGFBP
1622 && ppBp)
1623 *ppBp = dbgfR3BpGetByHnd(pUVM, hBp);
1624 return hBp;
1625}
1626
1627
1628/**
1629 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1630 */
1631static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpInt3RemoveEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
1632{
1633 DBGFBP hBp = (DBGFBP)(uintptr_t)pvUser;
1634
1635 VMCPU_ASSERT_EMT(pVCpu);
1636 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1637
1638 PUVM pUVM = pVM->pUVM;
1639 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
1640 AssertPtrReturn(pBp, VERR_DBGF_BP_IPE_8);
1641
1642 int rc = VINF_SUCCESS;
1643 if (pVCpu->idCpu == 0)
1644 {
1645 uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(pBp->Pub.u.Int3.GCPtr);
1646 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1647 AssertReturn(u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, VERR_DBGF_BP_IPE_6);
1648
1649 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1650 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1651 {
1652 /* Single breakpoint, just exchange atomically with the null value. */
1653 if (!ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry))
1654 {
1655 /*
1656 * A breakpoint addition must have raced us converting the L1 entry to an L2 index type, re-read
1657 * and remove the node from the created binary search tree.
1658 *
1659 * This works because after the entry was converted to an L2 index it can only be converted back
1660 * to a direct handle by removing one or more nodes which always goes through the fast mutex
1661 * protecting the L2 table. Likewise adding a new breakpoint requires grabbing the mutex as well
1662 * so there is serialization here and the node can be removed safely without having to worry about
1663 * concurrent tree modifications.
1664 */
1665 u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1666 AssertReturn(DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry) == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX, VERR_DBGF_BP_IPE_9);
1667
1668 rc = dbgfR3BpInt3L2BstRemove(pUVM, idxL1, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry),
1669 hBp, pBp->Pub.u.Int3.GCPtr);
1670 }
1671 }
1672 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1673 rc = dbgfR3BpInt3L2BstRemove(pUVM, idxL1, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry),
1674 hBp, pBp->Pub.u.Int3.GCPtr);
1675 }
1676
1677 return rc;
1678}
1679
1680
1681/**
1682 * Removes the given int3 breakpoint from all lookup tables.
1683 *
1684 * @returns VBox status code.
1685 * @param pUVM The user mode VM handle.
1686 * @param hBp The breakpoint handle to remove.
1687 * @param pBp The internal breakpoint state.
1688 */
1689static int dbgfR3BpInt3Remove(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1690{
1691 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_INT3, VERR_DBGF_BP_IPE_3);
1692
1693 /*
1694 * This has to be done by an EMT rendezvous in order to not have an EMT traversing
1695 * any L2 trees while it is being removed.
1696 */
1697 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpInt3RemoveEmtWorker, (void *)(uintptr_t)hBp);
1698}
1699
1700
1701/**
1702 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1703 */
1704static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpPortIoRemoveEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
1705{
1706 DBGFBP hBp = (DBGFBP)(uintptr_t)pvUser;
1707
1708 VMCPU_ASSERT_EMT(pVCpu);
1709 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1710
1711 PUVM pUVM = pVM->pUVM;
1712 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
1713 AssertPtrReturn(pBp, VERR_DBGF_BP_IPE_8);
1714
1715 int rc = VINF_SUCCESS;
1716 if (pVCpu->idCpu == 0)
1717 {
1718 /*
1719 * Remove the whole range, there shouldn't be any other breakpoint configured for this range as this is not
1720 * allowed right now.
1721 */
1722 uint16_t uPortExcl = pBp->Pub.u.PortIo.uPort + pBp->Pub.u.PortIo.cPorts;
1723 for (uint16_t idxPort = pBp->Pub.u.PortIo.uPort; idxPort < uPortExcl; idxPort++)
1724 {
1725 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort]);
1726 AssertReturn(u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, VERR_DBGF_BP_IPE_6);
1727
1728 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1729 AssertReturn(u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND, VERR_DBGF_BP_IPE_7);
1730
1731 bool fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry);
1732 Assert(fXchg); RT_NOREF(fXchg);
1733 }
1734 }
1735
1736 return rc;
1737}
1738
1739
1740/**
1741 * Removes the given port I/O breakpoint from all lookup tables.
1742 *
1743 * @returns VBox status code.
1744 * @param pUVM The user mode VM handle.
1745 * @param hBp The breakpoint handle to remove.
1746 * @param pBp The internal breakpoint state.
1747 */
1748static int dbgfR3BpPortIoRemove(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1749{
1750 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_PORT_IO, VERR_DBGF_BP_IPE_3);
1751
1752 /*
1753 * This has to be done by an EMT rendezvous in order to not have an EMT accessing
1754 * the breakpoint while it is removed.
1755 */
1756 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpPortIoRemoveEmtWorker, (void *)(uintptr_t)hBp);
1757}
1758
1759
1760/**
1761 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1762 */
1763static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpRegRecalcOnCpu(PVM pVM, PVMCPU pVCpu, void *pvUser)
1764{
1765 RT_NOREF(pvUser);
1766
1767 /*
1768 * CPU 0 updates the enabled hardware breakpoint counts.
1769 */
1770 if (pVCpu->idCpu == 0)
1771 {
1772 pVM->dbgf.s.cEnabledHwBreakpoints = 0;
1773 pVM->dbgf.s.cEnabledHwIoBreakpoints = 0;
1774
1775 for (uint32_t iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++)
1776 {
1777 if (pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled)
1778 {
1779 pVM->dbgf.s.cEnabledHwBreakpoints += 1;
1780 pVM->dbgf.s.cEnabledHwIoBreakpoints += pVM->dbgf.s.aHwBreakpoints[iBp].fType == X86_DR7_RW_IO;
1781 }
1782 }
1783 }
1784
1785 return CPUMRecalcHyperDRx(pVCpu, UINT8_MAX);
1786}
1787
1788
1789/**
1790 * Arms the given breakpoint.
1791 *
1792 * @returns VBox status code.
1793 * @param pUVM The user mode VM handle.
1794 * @param hBp The breakpoint handle to arm.
1795 * @param pBp The internal breakpoint state pointer for the handle.
1796 *
1797 * @thread Any thread.
1798 */
1799static int dbgfR3BpArm(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1800{
1801 int rc;
1802 PVM pVM = pUVM->pVM;
1803
1804 Assert(!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub));
1805 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
1806 {
1807 case DBGFBPTYPE_REG:
1808 {
1809 Assert(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints));
1810 PDBGFBPHW pBpHw = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1811 Assert(pBpHw->hBp == hBp); RT_NOREF(hBp);
1812
1813 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1814 ASMAtomicWriteBool(&pBpHw->fEnabled, true);
1815 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
1816 if (RT_FAILURE(rc))
1817 {
1818 ASMAtomicWriteBool(&pBpHw->fEnabled, false);
1819 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1820 }
1821 break;
1822 }
1823 case DBGFBPTYPE_INT3:
1824 {
1825 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1826
1827 /** @todo When we enable the first int3 breakpoint we should do this in an EMT rendezvous
1828 * as the VMX code intercepts #BP only when at least one int3 breakpoint is enabled.
1829 * A racing vCPU might trigger it and forward it to the guest causing panics/crashes/havoc. */
1830 /*
1831 * Save current byte and write the int3 instruction byte.
1832 */
1833 rc = PGMPhysSimpleReadGCPhys(pVM, &pBp->Pub.u.Int3.bOrg, pBp->Pub.u.Int3.PhysAddr, sizeof(pBp->Pub.u.Int3.bOrg));
1834 if (RT_SUCCESS(rc))
1835 {
1836 static const uint8_t s_bInt3 = 0xcc;
1837 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Int3.PhysAddr, &s_bInt3, sizeof(s_bInt3));
1838 if (RT_SUCCESS(rc))
1839 {
1840 ASMAtomicIncU32(&pVM->dbgf.s.cEnabledInt3Breakpoints);
1841 Log(("DBGF: Set breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Int3.GCPtr, pBp->Pub.u.Int3.PhysAddr));
1842 }
1843 }
1844
1845 if (RT_FAILURE(rc))
1846 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1847
1848 break;
1849 }
1850 case DBGFBPTYPE_PORT_IO:
1851 {
1852 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1853 ASMAtomicIncU32(&pUVM->dbgf.s.cPortIoBps);
1854 IOMR3NotifyBreakpointCountChange(pVM, true /*fPortIo*/, false /*fMmio*/);
1855 rc = VINF_SUCCESS;
1856 break;
1857 }
1858 case DBGFBPTYPE_MMIO:
1859 rc = VERR_NOT_IMPLEMENTED;
1860 break;
1861 default:
1862 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
1863 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1864 }
1865
1866 return rc;
1867}
1868
1869
1870/**
1871 * Disarms the given breakpoint.
1872 *
1873 * @returns VBox status code.
1874 * @param pUVM The user mode VM handle.
1875 * @param hBp The breakpoint handle to disarm.
1876 * @param pBp The internal breakpoint state pointer for the handle.
1877 *
1878 * @thread Any thread.
1879 */
1880static int dbgfR3BpDisarm(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1881{
1882 int rc;
1883 PVM pVM = pUVM->pVM;
1884
1885 Assert(DBGF_BP_PUB_IS_ENABLED(&pBp->Pub));
1886 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
1887 {
1888 case DBGFBPTYPE_REG:
1889 {
1890 Assert(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints));
1891 PDBGFBPHW pBpHw = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1892 Assert(pBpHw->hBp == hBp); RT_NOREF(hBp);
1893
1894 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1895 ASMAtomicWriteBool(&pBpHw->fEnabled, false);
1896 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
1897 if (RT_FAILURE(rc))
1898 {
1899 ASMAtomicWriteBool(&pBpHw->fEnabled, true);
1900 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1901 }
1902 break;
1903 }
1904 case DBGFBPTYPE_INT3:
1905 {
1906 /*
1907 * Check that the current byte is the int3 instruction, and restore the original one.
1908 * We currently ignore invalid bytes.
1909 */
1910 uint8_t bCurrent = 0;
1911 rc = PGMPhysSimpleReadGCPhys(pVM, &bCurrent, pBp->Pub.u.Int3.PhysAddr, sizeof(bCurrent));
1912 if ( RT_SUCCESS(rc)
1913 && bCurrent == 0xcc)
1914 {
1915 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Int3.PhysAddr, &pBp->Pub.u.Int3.bOrg, sizeof(pBp->Pub.u.Int3.bOrg));
1916 if (RT_SUCCESS(rc))
1917 {
1918 ASMAtomicDecU32(&pVM->dbgf.s.cEnabledInt3Breakpoints);
1919 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1920 Log(("DBGF: Removed breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Int3.GCPtr, pBp->Pub.u.Int3.PhysAddr));
1921 }
1922 }
1923 break;
1924 }
1925 case DBGFBPTYPE_PORT_IO:
1926 {
1927 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1928 uint32_t cPortIoBps = ASMAtomicDecU32(&pUVM->dbgf.s.cPortIoBps);
1929 if (!cPortIoBps) /** @todo Need to gather all EMTs to not have a stray EMT accessing BP data when it might go away. */
1930 IOMR3NotifyBreakpointCountChange(pVM, false /*fPortIo*/, false /*fMmio*/);
1931 rc = VINF_SUCCESS;
1932 break;
1933 }
1934 case DBGFBPTYPE_MMIO:
1935 rc = VERR_NOT_IMPLEMENTED;
1936 break;
1937 default:
1938 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
1939 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1940 }
1941
1942 return rc;
1943}
1944
1945
1946/**
1947 * Worker for DBGFR3BpHit() differnetiating on the breakpoint type.
1948 *
1949 * @returns Strict VBox status code.
1950 * @param pVM The cross context VM structure.
1951 * @param pVCpu The vCPU the breakpoint event happened on.
1952 * @param hBp The breakpoint handle.
1953 * @param pBp The breakpoint data.
1954 * @param pBpOwner The breakpoint owner data.
1955 *
1956 * @thread EMT
1957 */
1958static VBOXSTRICTRC dbgfR3BpHit(PVM pVM, PVMCPU pVCpu, DBGFBP hBp, PDBGFBPINT pBp, PCDBGFBPOWNERINT pBpOwner)
1959{
1960 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1961
1962 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
1963 {
1964 case DBGFBPTYPE_REG:
1965 case DBGFBPTYPE_INT3:
1966 {
1967 if (DBGF_BP_PUB_IS_EXEC_BEFORE(&pBp->Pub))
1968 rcStrict = pBpOwner->pfnBpHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub, DBGF_BP_F_HIT_EXEC_BEFORE);
1969 if (rcStrict == VINF_SUCCESS)
1970 {
1971 uint8_t abInstr[DBGF_BP_INSN_MAX];
1972 RTGCPTR const GCPtrInstr = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
1973 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &abInstr[0], GCPtrInstr, sizeof(abInstr));
1974 AssertRC(rc);
1975 if (RT_SUCCESS(rc))
1976 {
1977 /* Replace the int3 with the original instruction byte. */
1978 abInstr[0] = pBp->Pub.u.Int3.bOrg;
1979 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), GCPtrInstr,
1980 &abInstr[0], sizeof(abInstr));
1981 if ( rcStrict == VINF_SUCCESS
1982 && DBGF_BP_PUB_IS_EXEC_AFTER(&pBp->Pub))
1983 {
1984 VBOXSTRICTRC rcStrict2 = pBpOwner->pfnBpHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub,
1985 DBGF_BP_F_HIT_EXEC_AFTER);
1986 if (rcStrict2 == VINF_SUCCESS)
1987 return VBOXSTRICTRC_VAL(rcStrict);
1988 if (rcStrict2 != VINF_DBGF_BP_HALT)
1989 return VERR_DBGF_BP_OWNER_CALLBACK_WRONG_STATUS;
1990 }
1991 else
1992 return VBOXSTRICTRC_VAL(rcStrict);
1993 }
1994 }
1995 break;
1996 }
1997 case DBGFBPTYPE_PORT_IO:
1998 case DBGFBPTYPE_MMIO:
1999 {
2000 pVCpu->dbgf.s.fBpIoActive = false;
2001 rcStrict = pBpOwner->pfnBpIoHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub,
2002 pVCpu->dbgf.s.fBpIoBefore
2003 ? DBGF_BP_F_HIT_EXEC_BEFORE
2004 : DBGF_BP_F_HIT_EXEC_AFTER,
2005 pVCpu->dbgf.s.fBpIoAccess, pVCpu->dbgf.s.uBpIoAddress,
2006 pVCpu->dbgf.s.uBpIoValue);
2007
2008 break;
2009 }
2010 default:
2011 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
2012 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
2013 }
2014
2015 return rcStrict;
2016}
2017
2018
2019/**
2020 * Creates a new breakpoint owner returning a handle which can be used when setting breakpoints.
2021 *
2022 * @returns VBox status code.
2023 * @retval VERR_DBGF_BP_OWNER_NO_MORE_HANDLES if there are no more free owner handles available.
2024 * @param pUVM The user mode VM handle.
2025 * @param pfnBpHit The R3 callback which is called when a breakpoint with the owner handle is hit.
2026 * @param pfnBpIoHit The R3 callback which is called when a I/O breakpoint with the owner handle is hit.
2027 * @param phBpOwner Where to store the owner handle on success.
2028 *
2029 * @thread Any thread but might defer work to EMT on the first call.
2030 */
2031VMMR3DECL(int) DBGFR3BpOwnerCreate(PUVM pUVM, PFNDBGFBPHIT pfnBpHit, PFNDBGFBPIOHIT pfnBpIoHit, PDBGFBPOWNER phBpOwner)
2032{
2033 /*
2034 * Validate the input.
2035 */
2036 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2037 AssertReturn(pfnBpHit || pfnBpIoHit, VERR_INVALID_PARAMETER);
2038 AssertPtrReturn(phBpOwner, VERR_INVALID_POINTER);
2039
2040 int rc = dbgfR3BpOwnerEnsureInit(pUVM);
2041 AssertRCReturn(rc ,rc);
2042
2043 /* Try to find a free entry in the owner table. */
2044 for (;;)
2045 {
2046 /* Scan the associated bitmap for a free entry. */
2047 int32_t iClr = ASMBitFirstClear(pUVM->dbgf.s.pbmBpOwnersAllocR3, DBGF_BP_OWNER_COUNT_MAX);
2048 if (iClr != -1)
2049 {
2050 /*
2051 * Try to allocate, we could get raced here as well. In that case
2052 * we try again.
2053 */
2054 if (!ASMAtomicBitTestAndSet(pUVM->dbgf.s.pbmBpOwnersAllocR3, iClr))
2055 {
2056 PDBGFBPOWNERINT pBpOwner = &pUVM->dbgf.s.paBpOwnersR3[iClr];
2057 pBpOwner->cRefs = 1;
2058 pBpOwner->pfnBpHitR3 = pfnBpHit;
2059 pBpOwner->pfnBpIoHitR3 = pfnBpIoHit;
2060
2061 *phBpOwner = (DBGFBPOWNER)iClr;
2062 return VINF_SUCCESS;
2063 }
2064 /* else Retry with another spot. */
2065 }
2066 else /* no free entry in bitmap, out of entries. */
2067 {
2068 rc = VERR_DBGF_BP_OWNER_NO_MORE_HANDLES;
2069 break;
2070 }
2071 }
2072
2073 return rc;
2074}
2075
2076
2077/**
2078 * Destroys the owner identified by the given handle.
2079 *
2080 * @returns VBox status code.
2081 * @retval VERR_INVALID_HANDLE if the given owner handle is invalid.
2082 * @retval VERR_DBGF_OWNER_BUSY if there are still breakpoints set with the given owner handle.
2083 * @param pUVM The user mode VM handle.
2084 * @param hBpOwner The breakpoint owner handle to destroy.
2085 */
2086VMMR3DECL(int) DBGFR3BpOwnerDestroy(PUVM pUVM, DBGFBPOWNER hBpOwner)
2087{
2088 /*
2089 * Validate the input.
2090 */
2091 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2092 AssertReturn(hBpOwner != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2093
2094 int rc = dbgfR3BpOwnerEnsureInit(pUVM);
2095 AssertRCReturn(rc ,rc);
2096
2097 PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
2098 if (RT_LIKELY(pBpOwner))
2099 {
2100 if (ASMAtomicReadU32(&pBpOwner->cRefs) == 1)
2101 {
2102 pBpOwner->pfnBpHitR3 = NULL;
2103 ASMAtomicDecU32(&pBpOwner->cRefs);
2104 ASMAtomicBitClear(pUVM->dbgf.s.pbmBpOwnersAllocR3, hBpOwner);
2105 }
2106 else
2107 rc = VERR_DBGF_OWNER_BUSY;
2108 }
2109 else
2110 rc = VERR_INVALID_HANDLE;
2111
2112 return rc;
2113}
2114
2115
2116/**
2117 * Sets a breakpoint (int 3 based).
2118 *
2119 * @returns VBox status code.
2120 * @param pUVM The user mode VM handle.
2121 * @param idSrcCpu The ID of the virtual CPU used for the
2122 * breakpoint address resolution.
2123 * @param pAddress The address of the breakpoint.
2124 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2125 * Use 0 (or 1) if it's gonna trigger at once.
2126 * @param iHitDisable The hit count which disables the breakpoint.
2127 * Use ~(uint64_t) if it's never gonna be disabled.
2128 * @param phBp Where to store the breakpoint handle on success.
2129 *
2130 * @thread Any thread.
2131 */
2132VMMR3DECL(int) DBGFR3BpSetInt3(PUVM pUVM, VMCPUID idSrcCpu, PCDBGFADDRESS pAddress,
2133 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2134{
2135 return DBGFR3BpSetInt3Ex(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, idSrcCpu, pAddress,
2136 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
2137}
2138
2139
2140/**
2141 * Sets a breakpoint (int 3 based) - extended version.
2142 *
2143 * @returns VBox status code.
2144 * @param pUVM The user mode VM handle.
2145 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2146 * @param pvUser Opaque user data to pass in the owner callback.
2147 * @param idSrcCpu The ID of the virtual CPU used for the
2148 * breakpoint address resolution.
2149 * @param pAddress The address of the breakpoint.
2150 * @param fFlags Combination of DBGF_BP_F_XXX.
2151 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2152 * Use 0 (or 1) if it's gonna trigger at once.
2153 * @param iHitDisable The hit count which disables the breakpoint.
2154 * Use ~(uint64_t) if it's never gonna be disabled.
2155 * @param phBp Where to store the breakpoint handle on success.
2156 *
2157 * @thread Any thread.
2158 */
2159VMMR3DECL(int) DBGFR3BpSetInt3Ex(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2160 VMCPUID idSrcCpu, PCDBGFADDRESS pAddress, uint16_t fFlags,
2161 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2162{
2163 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2164 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2165 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
2166 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2167 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2168
2169 int rc = dbgfR3BpEnsureInit(pUVM);
2170 AssertRCReturn(rc, rc);
2171
2172 /*
2173 * Translate & save the breakpoint address into a guest-physical address.
2174 */
2175 RTGCPHYS GCPhysBpAddr = NIL_RTGCPHYS;
2176 rc = DBGFR3AddrToPhys(pUVM, idSrcCpu, pAddress, &GCPhysBpAddr);
2177 if (RT_SUCCESS(rc))
2178 {
2179 /*
2180 * The physical address from DBGFR3AddrToPhys() is the start of the page,
2181 * we need the exact byte offset into the page while writing to it in dbgfR3BpInt3Arm().
2182 */
2183 GCPhysBpAddr |= (pAddress->FlatPtr & X86_PAGE_OFFSET_MASK);
2184
2185 PDBGFBPINT pBp = NULL;
2186 DBGFBP hBp = dbgfR3BpGetByAddr(pUVM, DBGFBPTYPE_INT3, pAddress->FlatPtr, &pBp);
2187 if ( hBp != NIL_DBGFBP
2188 && pBp->Pub.u.Int3.PhysAddr == GCPhysBpAddr)
2189 {
2190 rc = VINF_SUCCESS;
2191 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2192 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2193 if (RT_SUCCESS(rc))
2194 {
2195 rc = VINF_DBGF_BP_ALREADY_EXIST;
2196 if (phBp)
2197 *phBp = hBp;
2198 }
2199 return rc;
2200 }
2201
2202 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_INT3, fFlags, iHitTrigger, iHitDisable, &hBp, &pBp);
2203 if (RT_SUCCESS(rc))
2204 {
2205 pBp->Pub.u.Int3.PhysAddr = GCPhysBpAddr;
2206 pBp->Pub.u.Int3.GCPtr = pAddress->FlatPtr;
2207
2208 /* Add the breakpoint to the lookup tables. */
2209 rc = dbgfR3BpInt3Add(pUVM, hBp, pBp);
2210 if (RT_SUCCESS(rc))
2211 {
2212 /* Enable the breakpoint if requested. */
2213 if (fFlags & DBGF_BP_F_ENABLED)
2214 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2215 if (RT_SUCCESS(rc))
2216 {
2217 *phBp = hBp;
2218 return VINF_SUCCESS;
2219 }
2220
2221 int rc2 = dbgfR3BpInt3Remove(pUVM, hBp, pBp); AssertRC(rc2);
2222 }
2223
2224 dbgfR3BpFree(pUVM, hBp, pBp);
2225 }
2226 }
2227
2228 return rc;
2229}
2230
2231
2232/**
2233 * Sets a register breakpoint.
2234 *
2235 * @returns VBox status code.
2236 * @param pUVM The user mode VM handle.
2237 * @param pAddress The address of the breakpoint.
2238 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2239 * Use 0 (or 1) if it's gonna trigger at once.
2240 * @param iHitDisable The hit count which disables the breakpoint.
2241 * Use ~(uint64_t) if it's never gonna be disabled.
2242 * @param fType The access type (one of the X86_DR7_RW_* defines).
2243 * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
2244 * Must be 1 if fType is X86_DR7_RW_EO.
2245 * @param phBp Where to store the breakpoint handle.
2246 *
2247 * @thread Any thread.
2248 */
2249VMMR3DECL(int) DBGFR3BpSetReg(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger,
2250 uint64_t iHitDisable, uint8_t fType, uint8_t cb, PDBGFBP phBp)
2251{
2252 return DBGFR3BpSetRegEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, pAddress,
2253 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, fType, cb, phBp);
2254}
2255
2256
2257/**
2258 * Sets a register breakpoint - extended version.
2259 *
2260 * @returns VBox status code.
2261 * @param pUVM The user mode VM handle.
2262 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2263 * @param pvUser Opaque user data to pass in the owner callback.
2264 * @param pAddress The address of the breakpoint.
2265 * @param fFlags Combination of DBGF_BP_F_XXX.
2266 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2267 * Use 0 (or 1) if it's gonna trigger at once.
2268 * @param iHitDisable The hit count which disables the breakpoint.
2269 * Use ~(uint64_t) if it's never gonna be disabled.
2270 * @param fType The access type (one of the X86_DR7_RW_* defines).
2271 * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
2272 * Must be 1 if fType is X86_DR7_RW_EO.
2273 * @param phBp Where to store the breakpoint handle.
2274 *
2275 * @thread Any thread.
2276 */
2277VMMR3DECL(int) DBGFR3BpSetRegEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2278 PCDBGFADDRESS pAddress, uint16_t fFlags,
2279 uint64_t iHitTrigger, uint64_t iHitDisable,
2280 uint8_t fType, uint8_t cb, PDBGFBP phBp)
2281{
2282 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2283 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2284 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
2285 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2286 AssertReturn(cb > 0 && cb <= 8 && RT_IS_POWER_OF_TWO(cb), VERR_INVALID_PARAMETER);
2287 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2288 switch (fType)
2289 {
2290 case X86_DR7_RW_EO:
2291 if (cb == 1)
2292 break;
2293 AssertMsgFailedReturn(("fType=%#x cb=%d != 1\n", fType, cb), VERR_INVALID_PARAMETER);
2294 case X86_DR7_RW_IO:
2295 case X86_DR7_RW_RW:
2296 case X86_DR7_RW_WO:
2297 break;
2298 default:
2299 AssertMsgFailedReturn(("fType=%#x\n", fType), VERR_INVALID_PARAMETER);
2300 }
2301
2302 int rc = dbgfR3BpEnsureInit(pUVM);
2303 AssertRCReturn(rc, rc);
2304
2305 PDBGFBPINT pBp = NULL;
2306 DBGFBP hBp = dbgfR3BpGetByAddr(pUVM, DBGFBPTYPE_REG, pAddress->FlatPtr, &pBp);
2307 if ( hBp != NIL_DBGFBP
2308 && pBp->Pub.u.Reg.cb == cb
2309 && pBp->Pub.u.Reg.fType == fType)
2310 {
2311 rc = VINF_SUCCESS;
2312 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2313 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2314 if (RT_SUCCESS(rc))
2315 {
2316 rc = VINF_DBGF_BP_ALREADY_EXIST;
2317 if (phBp)
2318 *phBp = hBp;
2319 }
2320 return rc;
2321 }
2322
2323 /* Allocate new breakpoint. */
2324 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_REG, fFlags,
2325 iHitTrigger, iHitDisable, &hBp, &pBp);
2326 if (RT_SUCCESS(rc))
2327 {
2328 pBp->Pub.u.Reg.GCPtr = pAddress->FlatPtr;
2329 pBp->Pub.u.Reg.fType = fType;
2330 pBp->Pub.u.Reg.cb = cb;
2331 pBp->Pub.u.Reg.iReg = UINT8_MAX;
2332 ASMCompilerBarrier();
2333
2334 /* Assign the proper hardware breakpoint. */
2335 rc = dbgfR3BpRegAssign(pUVM->pVM, hBp, pBp);
2336 if (RT_SUCCESS(rc))
2337 {
2338 /* Arm the breakpoint. */
2339 if (fFlags & DBGF_BP_F_ENABLED)
2340 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2341 if (RT_SUCCESS(rc))
2342 {
2343 if (phBp)
2344 *phBp = hBp;
2345 return VINF_SUCCESS;
2346 }
2347
2348 int rc2 = dbgfR3BpRegRemove(pUVM->pVM, hBp, pBp);
2349 AssertRC(rc2); RT_NOREF(rc2);
2350 }
2351
2352 dbgfR3BpFree(pUVM, hBp, pBp);
2353 }
2354
2355 return rc;
2356}
2357
2358
2359/**
2360 * This is only kept for now to not mess with the debugger implementation at this point,
2361 * recompiler breakpoints are not supported anymore (IEM has some API but it isn't implemented
2362 * and should probably be merged with the DBGF breakpoints).
2363 */
2364VMMR3DECL(int) DBGFR3BpSetREM(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger,
2365 uint64_t iHitDisable, PDBGFBP phBp)
2366{
2367 RT_NOREF(pUVM, pAddress, iHitTrigger, iHitDisable, phBp);
2368 return VERR_NOT_SUPPORTED;
2369}
2370
2371
2372/**
2373 * Sets an I/O port breakpoint.
2374 *
2375 * @returns VBox status code.
2376 * @param pUVM The user mode VM handle.
2377 * @param uPort The first I/O port.
2378 * @param cPorts The number of I/O ports, see DBGFBPIOACCESS_XXX.
2379 * @param fAccess The access we want to break on.
2380 * @param iHitTrigger The hit count at which the breakpoint start
2381 * triggering. Use 0 (or 1) if it's gonna trigger at
2382 * once.
2383 * @param iHitDisable The hit count which disables the breakpoint.
2384 * Use ~(uint64_t) if it's never gonna be disabled.
2385 * @param phBp Where to store the breakpoint handle.
2386 *
2387 * @thread Any thread.
2388 */
2389VMMR3DECL(int) DBGFR3BpSetPortIo(PUVM pUVM, RTIOPORT uPort, RTIOPORT cPorts, uint32_t fAccess,
2390 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2391{
2392 return DBGFR3BpSetPortIoEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, uPort, cPorts, fAccess,
2393 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
2394}
2395
2396
2397/**
2398 * Sets an I/O port breakpoint - extended version.
2399 *
2400 * @returns VBox status code.
2401 * @param pUVM The user mode VM handle.
2402 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2403 * @param pvUser Opaque user data to pass in the owner callback.
2404 * @param uPort The first I/O port.
2405 * @param cPorts The number of I/O ports, see DBGFBPIOACCESS_XXX.
2406 * @param fAccess The access we want to break on.
2407 * @param fFlags Combination of DBGF_BP_F_XXX.
2408 * @param iHitTrigger The hit count at which the breakpoint start
2409 * triggering. Use 0 (or 1) if it's gonna trigger at
2410 * once.
2411 * @param iHitDisable The hit count which disables the breakpoint.
2412 * Use ~(uint64_t) if it's never gonna be disabled.
2413 * @param phBp Where to store the breakpoint handle.
2414 *
2415 * @thread Any thread.
2416 */
2417VMMR3DECL(int) DBGFR3BpSetPortIoEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2418 RTIOPORT uPort, RTIOPORT cPorts, uint32_t fAccess,
2419 uint32_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2420{
2421 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2422 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2423 AssertReturn(!(fAccess & ~DBGFBPIOACCESS_VALID_MASK_PORT_IO), VERR_INVALID_FLAGS);
2424 AssertReturn(fAccess, VERR_INVALID_FLAGS);
2425 AssertReturn(!(fFlags & ~DBGF_BP_F_VALID_MASK), VERR_INVALID_FLAGS);
2426 AssertReturn(fFlags, VERR_INVALID_FLAGS);
2427 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2428 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2429 AssertReturn(cPorts > 0, VERR_OUT_OF_RANGE);
2430 AssertReturn((RTIOPORT)(uPort + (cPorts - 1)) >= uPort, VERR_OUT_OF_RANGE);
2431
2432 int rc = dbgfR3BpPortIoEnsureInit(pUVM);
2433 AssertRCReturn(rc, rc);
2434
2435 PDBGFBPINT pBp = NULL;
2436 DBGFBP hBp = dbgfR3BpPortIoGetByRange(pUVM, uPort, cPorts, &pBp);
2437 if ( hBp != NIL_DBGFBP
2438 && pBp->Pub.u.PortIo.uPort == uPort
2439 && pBp->Pub.u.PortIo.cPorts == cPorts
2440 && pBp->Pub.u.PortIo.fAccess == fAccess)
2441 {
2442 rc = VINF_SUCCESS;
2443 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2444 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2445 if (RT_SUCCESS(rc))
2446 {
2447 rc = VINF_DBGF_BP_ALREADY_EXIST;
2448 if (phBp)
2449 *phBp = hBp;
2450 }
2451 return rc;
2452 }
2453
2454 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_PORT_IO, fFlags, iHitTrigger, iHitDisable, &hBp, &pBp);
2455 if (RT_SUCCESS(rc))
2456 {
2457 pBp->Pub.u.PortIo.uPort = uPort;
2458 pBp->Pub.u.PortIo.cPorts = cPorts;
2459 pBp->Pub.u.PortIo.fAccess = fAccess;
2460
2461 /* Add the breakpoint to the lookup tables. */
2462 rc = dbgfR3BpPortIoAdd(pUVM, hBp, pBp);
2463 if (RT_SUCCESS(rc))
2464 {
2465 /* Enable the breakpoint if requested. */
2466 if (fFlags & DBGF_BP_F_ENABLED)
2467 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2468 if (RT_SUCCESS(rc))
2469 {
2470 *phBp = hBp;
2471 return VINF_SUCCESS;
2472 }
2473
2474 int rc2 = dbgfR3BpPortIoRemove(pUVM, hBp, pBp); AssertRC(rc2);
2475 }
2476
2477 dbgfR3BpFree(pUVM, hBp, pBp);
2478 }
2479
2480 return rc;
2481}
2482
2483
2484/**
2485 * Sets a memory mapped I/O breakpoint.
2486 *
2487 * @returns VBox status code.
2488 * @param pUVM The user mode VM handle.
2489 * @param GCPhys The first MMIO address.
2490 * @param cb The size of the MMIO range to break on.
2491 * @param fAccess The access we want to break on.
2492 * @param iHitTrigger The hit count at which the breakpoint start
2493 * triggering. Use 0 (or 1) if it's gonna trigger at
2494 * once.
2495 * @param iHitDisable The hit count which disables the breakpoint.
2496 * Use ~(uint64_t) if it's never gonna be disabled.
2497 * @param phBp Where to store the breakpoint handle.
2498 *
2499 * @thread Any thread.
2500 */
2501VMMR3DECL(int) DBGFR3BpSetMmio(PUVM pUVM, RTGCPHYS GCPhys, uint32_t cb, uint32_t fAccess,
2502 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2503{
2504 return DBGFR3BpSetMmioEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, GCPhys, cb, fAccess,
2505 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
2506}
2507
2508
2509/**
2510 * Sets a memory mapped I/O breakpoint - extended version.
2511 *
2512 * @returns VBox status code.
2513 * @param pUVM The user mode VM handle.
2514 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2515 * @param pvUser Opaque user data to pass in the owner callback.
2516 * @param GCPhys The first MMIO address.
2517 * @param cb The size of the MMIO range to break on.
2518 * @param fAccess The access we want to break on.
2519 * @param fFlags Combination of DBGF_BP_F_XXX.
2520 * @param iHitTrigger The hit count at which the breakpoint start
2521 * triggering. Use 0 (or 1) if it's gonna trigger at
2522 * once.
2523 * @param iHitDisable The hit count which disables the breakpoint.
2524 * Use ~(uint64_t) if it's never gonna be disabled.
2525 * @param phBp Where to store the breakpoint handle.
2526 *
2527 * @thread Any thread.
2528 */
2529VMMR3DECL(int) DBGFR3BpSetMmioEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2530 RTGCPHYS GCPhys, uint32_t cb, uint32_t fAccess,
2531 uint32_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2532{
2533 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2534 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2535 AssertReturn(!(fAccess & ~DBGFBPIOACCESS_VALID_MASK_MMIO), VERR_INVALID_FLAGS);
2536 AssertReturn(fAccess, VERR_INVALID_FLAGS);
2537 AssertReturn(!(fFlags & ~DBGF_BP_F_VALID_MASK), VERR_INVALID_FLAGS);
2538 AssertReturn(fFlags, VERR_INVALID_FLAGS);
2539 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2540 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2541 AssertReturn(cb, VERR_OUT_OF_RANGE);
2542 AssertReturn(GCPhys + cb < GCPhys, VERR_OUT_OF_RANGE);
2543
2544 int rc = dbgfR3BpEnsureInit(pUVM);
2545 AssertRCReturn(rc, rc);
2546
2547 return VERR_NOT_IMPLEMENTED;
2548}
2549
2550
2551/**
2552 * Clears a breakpoint.
2553 *
2554 * @returns VBox status code.
2555 * @param pUVM The user mode VM handle.
2556 * @param hBp The handle of the breakpoint which should be removed (cleared).
2557 *
2558 * @thread Any thread.
2559 */
2560VMMR3DECL(int) DBGFR3BpClear(PUVM pUVM, DBGFBP hBp)
2561{
2562 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2563 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2564
2565 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2566 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
2567
2568 /* Disarm the breakpoint when it is enabled. */
2569 if (DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2570 {
2571 int rc = dbgfR3BpDisarm(pUVM, hBp, pBp);
2572 AssertRC(rc);
2573 }
2574
2575 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
2576 {
2577 case DBGFBPTYPE_REG:
2578 {
2579 int rc = dbgfR3BpRegRemove(pUVM->pVM, hBp, pBp);
2580 AssertRC(rc);
2581 break;
2582 }
2583 case DBGFBPTYPE_INT3:
2584 {
2585 int rc = dbgfR3BpInt3Remove(pUVM, hBp, pBp);
2586 AssertRC(rc);
2587 break;
2588 }
2589 case DBGFBPTYPE_PORT_IO:
2590 {
2591 int rc = dbgfR3BpPortIoRemove(pUVM, hBp, pBp);
2592 AssertRC(rc);
2593 break;
2594 }
2595 default:
2596 break;
2597 }
2598
2599 dbgfR3BpFree(pUVM, hBp, pBp);
2600 return VINF_SUCCESS;
2601}
2602
2603
2604/**
2605 * Enables a breakpoint.
2606 *
2607 * @returns VBox status code.
2608 * @param pUVM The user mode VM handle.
2609 * @param hBp The handle of the breakpoint which should be enabled.
2610 *
2611 * @thread Any thread.
2612 */
2613VMMR3DECL(int) DBGFR3BpEnable(PUVM pUVM, DBGFBP hBp)
2614{
2615 /*
2616 * Validate the input.
2617 */
2618 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2619 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2620
2621 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2622 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
2623
2624 int rc;
2625 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2626 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2627 else
2628 rc = VINF_DBGF_BP_ALREADY_ENABLED;
2629
2630 return rc;
2631}
2632
2633
2634/**
2635 * Disables a breakpoint.
2636 *
2637 * @returns VBox status code.
2638 * @param pUVM The user mode VM handle.
2639 * @param hBp The handle of the breakpoint which should be disabled.
2640 *
2641 * @thread Any thread.
2642 */
2643VMMR3DECL(int) DBGFR3BpDisable(PUVM pUVM, DBGFBP hBp)
2644{
2645 /*
2646 * Validate the input.
2647 */
2648 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2649 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2650
2651 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2652 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
2653
2654 int rc;
2655 if (DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2656 rc = dbgfR3BpDisarm(pUVM, hBp, pBp);
2657 else
2658 rc = VINF_DBGF_BP_ALREADY_DISABLED;
2659
2660 return rc;
2661}
2662
2663
2664/**
2665 * Enumerate the breakpoints.
2666 *
2667 * @returns VBox status code.
2668 * @param pUVM The user mode VM handle.
2669 * @param pfnCallback The callback function.
2670 * @param pvUser The user argument to pass to the callback.
2671 *
2672 * @thread Any thread.
2673 */
2674VMMR3DECL(int) DBGFR3BpEnum(PUVM pUVM, PFNDBGFBPENUM pfnCallback, void *pvUser)
2675{
2676 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2677
2678 for (uint32_t idChunk = 0; idChunk < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); idChunk++)
2679 {
2680 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
2681
2682 if (pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID)
2683 break; /* Stop here as the first non allocated chunk means there is no one allocated afterwards as well. */
2684
2685 if (pBpChunk->cBpsFree < DBGF_BP_COUNT_PER_CHUNK)
2686 {
2687 /* Scan the bitmap for allocated entries. */
2688 int32_t iAlloc = ASMBitFirstSet(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK);
2689 if (iAlloc != -1)
2690 {
2691 do
2692 {
2693 DBGFBP hBp = DBGF_BP_HND_CREATE(idChunk, (uint32_t)iAlloc);
2694 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2695
2696 /* Make a copy of the breakpoints public data to have a consistent view. */
2697 DBGFBPPUB BpPub;
2698 BpPub.cHits = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.cHits);
2699 BpPub.iHitTrigger = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.iHitTrigger);
2700 BpPub.iHitDisable = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.iHitDisable);
2701 BpPub.hOwner = ASMAtomicReadU32((volatile uint32_t *)&pBp->Pub.hOwner);
2702 BpPub.u16Type = ASMAtomicReadU16((volatile uint16_t *)&pBp->Pub.u16Type); /* Actually constant. */
2703 BpPub.fFlags = ASMAtomicReadU16((volatile uint16_t *)&pBp->Pub.fFlags);
2704 memcpy(&BpPub.u, &pBp->Pub.u, sizeof(pBp->Pub.u)); /* Is constant after allocation. */
2705
2706 /* Check if a removal raced us. */
2707 if (ASMBitTest(pBpChunk->pbmAlloc, iAlloc))
2708 {
2709 int rc = pfnCallback(pUVM, pvUser, hBp, &BpPub);
2710 if (RT_FAILURE(rc) || rc == VINF_CALLBACK_RETURN)
2711 return rc;
2712 }
2713
2714 iAlloc = ASMBitNextSet(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK, iAlloc);
2715 } while (iAlloc != -1);
2716 }
2717 }
2718 }
2719
2720 return VINF_SUCCESS;
2721}
2722
2723
2724/**
2725 * Called whenever a breakpoint event needs to be serviced in ring-3 to decide what to do.
2726 *
2727 * @returns VBox status code.
2728 * @param pVM The cross context VM structure.
2729 * @param pVCpu The vCPU the breakpoint event happened on.
2730 *
2731 * @thread EMT
2732 */
2733VMMR3_INT_DECL(int) DBGFR3BpHit(PVM pVM, PVMCPU pVCpu)
2734{
2735 /* Send it straight into the debugger?. */
2736 if (pVCpu->dbgf.s.fBpInvokeOwnerCallback)
2737 {
2738 DBGFBP hBp = pVCpu->dbgf.s.hBpActive;
2739 pVCpu->dbgf.s.fBpInvokeOwnerCallback = false;
2740
2741 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pVM->pUVM, hBp);
2742 AssertReturn(pBp, VERR_DBGF_BP_IPE_9);
2743
2744 /* Resolve owner (can be NIL_DBGFBPOWNER) and invoke callback if there is one. */
2745 if (pBp->Pub.hOwner != NIL_DBGFBPOWNER)
2746 {
2747 PCDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pVM->pUVM, pBp->Pub.hOwner);
2748 if (pBpOwner)
2749 {
2750 VBOXSTRICTRC rcStrict = dbgfR3BpHit(pVM, pVCpu, hBp, pBp, pBpOwner);
2751 if (VBOXSTRICTRC_VAL(rcStrict) == VINF_SUCCESS)
2752 {
2753 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
2754 return VINF_SUCCESS;
2755 }
2756 if (VBOXSTRICTRC_VAL(rcStrict) != VINF_DBGF_BP_HALT) /* Guru meditation. */
2757 return VERR_DBGF_BP_OWNER_CALLBACK_WRONG_STATUS;
2758 /* else: Halt in the debugger. */
2759 }
2760 }
2761 }
2762
2763 return DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
2764}
2765
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette