VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/DBGFR0Bp.cpp@ 87758

Last change on this file since 87758 was 87600, checked in by vboxsync, 4 years ago

VMM/DBGF: comment fix. bugref:9837

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.1 KB
Line 
1/* $Id: DBGFR0Bp.cpp 87600 2021-02-04 00:14:51Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, R0 breakpoint management part.
4 */
5
6/*
7 * Copyright (C) 2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DBGF
23#include "DBGFInternal.h"
24#include <VBox/vmm/gvm.h>
25#include <VBox/vmm/gvmm.h>
26#include <VBox/vmm/vmm.h>
27
28#include <VBox/log.h>
29#include <VBox/sup.h>
30#include <iprt/asm.h>
31#include <iprt/assert.h>
32#include <iprt/errcore.h>
33#include <iprt/ctype.h>
34#include <iprt/mem.h>
35#include <iprt/memobj.h>
36#include <iprt/process.h>
37#include <iprt/string.h>
38
39#include "dtrace/VBoxVMM.h"
40
41
42/*********************************************************************************************************************************
43* Internal Functions *
44*********************************************************************************************************************************/
45
46/**
47 * Used by DBGFR0InitPerVM() to initialize the breakpoint manager.
48 *
49 * @returns nothing.
50 * @param pGVM The global (ring-0) VM structure.
51 */
52DECLHIDDEN(void) dbgfR0BpInit(PGVM pGVM)
53{
54 pGVM->dbgfr0.s.hMemObjBpOwners = NIL_RTR0MEMOBJ;
55 pGVM->dbgfr0.s.hMapObjBpOwners = NIL_RTR0MEMOBJ;
56 //pGVM->dbgfr0.s.paBpOwnersR0 = NULL;
57
58 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->dbgfr0.s.aBpChunks); i++)
59 {
60 PDBGFBPCHUNKR0 pBpChunk = &pGVM->dbgfr0.s.aBpChunks[i];
61
62 pBpChunk->hMemObj = NIL_RTR0MEMOBJ;
63 pBpChunk->hMapObj = NIL_RTR0MEMOBJ;
64 //pBpChunk->paBpBaseSharedR0 = NULL;
65 //pBpChunk->paBpBaseR0Only = NULL;
66 }
67
68 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->dbgfr0.s.aBpL2TblChunks); i++)
69 {
70 PDBGFBPL2TBLCHUNKR0 pL2Chunk = &pGVM->dbgfr0.s.aBpL2TblChunks[i];
71
72 pL2Chunk->hMemObj = NIL_RTR0MEMOBJ;
73 pL2Chunk->hMapObj = NIL_RTR0MEMOBJ;
74 //pL2Chunk->paBpL2TblBaseSharedR0 = NULL;
75 }
76
77 pGVM->dbgfr0.s.hMemObjBpLocL1 = NIL_RTR0MEMOBJ;
78 //pGVM->dbgfr0.s.paBpLocL1R0 = NULL;
79 //pGVM->dbgfr0.s.fInit = false;
80}
81
82
83/**
84 * Used by DBGFR0CleanupVM to destroy the breakpoint manager.
85 *
86 * This is done during VM cleanup so that we're sure there are no active threads
87 * using the breakpoint code.
88 *
89 * @param pGVM The global (ring-0) VM structure.
90 */
91DECLHIDDEN(void) dbgfR0BpDestroy(PGVM pGVM)
92{
93 if (pGVM->dbgfr0.s.hMemObjBpOwners != NIL_RTR0MEMOBJ)
94 {
95 Assert(pGVM->dbgfr0.s.hMapObjBpOwners != NIL_RTR0MEMOBJ);
96 AssertPtr(pGVM->dbgfr0.s.paBpOwnersR0);
97
98 RTR0MEMOBJ hMemObj = pGVM->dbgfr0.s.hMapObjBpOwners;
99 pGVM->dbgfr0.s.hMapObjBpOwners = NIL_RTR0MEMOBJ;
100 RTR0MemObjFree(hMemObj, true);
101
102 hMemObj = pGVM->dbgfr0.s.hMemObjBpOwners;
103 pGVM->dbgfr0.s.hMemObjBpOwners = NIL_RTR0MEMOBJ;
104 RTR0MemObjFree(hMemObj, true);
105 }
106
107 if (pGVM->dbgfr0.s.fInit)
108 {
109 Assert(pGVM->dbgfr0.s.hMemObjBpLocL1 != NIL_RTR0MEMOBJ);
110 AssertPtr(pGVM->dbgfr0.s.paBpLocL1R0);
111
112 /*
113 * Free all allocated memory and ring-3 mapping objects.
114 */
115 RTR0MEMOBJ hMemObj = pGVM->dbgfr0.s.hMemObjBpLocL1;
116 pGVM->dbgfr0.s.hMemObjBpLocL1 = NIL_RTR0MEMOBJ;
117 pGVM->dbgfr0.s.paBpLocL1R0 = NULL;
118 RTR0MemObjFree(hMemObj, true);
119
120 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->dbgfr0.s.aBpChunks); i++)
121 {
122 PDBGFBPCHUNKR0 pBpChunk = &pGVM->dbgfr0.s.aBpChunks[i];
123
124 if (pBpChunk->hMemObj != NIL_RTR0MEMOBJ)
125 {
126 Assert(pBpChunk->hMapObj != NIL_RTR0MEMOBJ);
127
128 pBpChunk->paBpBaseSharedR0 = NULL;
129 pBpChunk->paBpBaseR0Only = NULL;
130
131 hMemObj = pBpChunk->hMapObj;
132 pBpChunk->hMapObj = NIL_RTR0MEMOBJ;
133 RTR0MemObjFree(hMemObj, true);
134
135 hMemObj = pBpChunk->hMemObj;
136 pBpChunk->hMemObj = NIL_RTR0MEMOBJ;
137 RTR0MemObjFree(hMemObj, true);
138 }
139 }
140
141 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->dbgfr0.s.aBpL2TblChunks); i++)
142 {
143 PDBGFBPL2TBLCHUNKR0 pL2Chunk = &pGVM->dbgfr0.s.aBpL2TblChunks[i];
144
145 if (pL2Chunk->hMemObj != NIL_RTR0MEMOBJ)
146 {
147 Assert(pL2Chunk->hMapObj != NIL_RTR0MEMOBJ);
148
149 pL2Chunk->paBpL2TblBaseSharedR0 = NULL;
150
151 hMemObj = pL2Chunk->hMapObj;
152 pL2Chunk->hMapObj = NIL_RTR0MEMOBJ;
153 RTR0MemObjFree(hMemObj, true);
154
155 hMemObj = pL2Chunk->hMemObj;
156 pL2Chunk->hMemObj = NIL_RTR0MEMOBJ;
157 RTR0MemObjFree(hMemObj, true);
158 }
159 }
160
161 pGVM->dbgfr0.s.fInit = false;
162 }
163#ifdef RT_STRICT
164 else
165 {
166 Assert(pGVM->dbgfr0.s.hMemObjBpLocL1 == NIL_RTR0MEMOBJ);
167 Assert(!pGVM->dbgfr0.s.paBpLocL1R0);
168
169 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->dbgfr0.s.aBpChunks); i++)
170 {
171 PDBGFBPCHUNKR0 pBpChunk = &pGVM->dbgfr0.s.aBpChunks[i];
172
173 Assert(pBpChunk->hMemObj == NIL_RTR0MEMOBJ);
174 Assert(pBpChunk->hMapObj == NIL_RTR0MEMOBJ);
175 Assert(!pBpChunk->paBpBaseSharedR0);
176 Assert(!pBpChunk->paBpBaseR0Only);
177 }
178
179 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->dbgfr0.s.aBpL2TblChunks); i++)
180 {
181 PDBGFBPL2TBLCHUNKR0 pL2Chunk = &pGVM->dbgfr0.s.aBpL2TblChunks[i];
182
183 Assert(pL2Chunk->hMemObj == NIL_RTR0MEMOBJ);
184 Assert(pL2Chunk->hMapObj == NIL_RTR0MEMOBJ);
185 Assert(!pL2Chunk->paBpL2TblBaseSharedR0);
186 }
187 }
188#endif
189}
190
191
192/**
193 * Worker for DBGFR0BpInitReqHandler() that does the actual initialization.
194 *
195 * @returns VBox status code.
196 * @param pGVM The global (ring-0) VM structure.
197 * @param ppaBpLocL1R3 Where to return the ring-3 L1 lookup table address on success.
198 * @thread EMT(0)
199 */
200static int dbgfR0BpInitWorker(PGVM pGVM, R3PTRTYPE(volatile uint32_t *) *ppaBpLocL1R3)
201{
202 /*
203 * Figure out how much memory we need for the L1 lookup table and allocate it.
204 */
205 uint32_t const cbL1Loc = RT_ALIGN_32(UINT16_MAX * sizeof(uint32_t), PAGE_SIZE);
206
207 RTR0MEMOBJ hMemObj;
208 int rc = RTR0MemObjAllocPage(&hMemObj, cbL1Loc, false /*fExecutable*/);
209 if (RT_FAILURE(rc))
210 return rc;
211 RT_BZERO(RTR0MemObjAddress(hMemObj), cbL1Loc);
212
213 /* Map it. */
214 RTR0MEMOBJ hMapObj;
215 rc = RTR0MemObjMapUserEx(&hMapObj, hMemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, RTR0ProcHandleSelf(),
216 0 /*offSub*/, cbL1Loc);
217 if (RT_SUCCESS(rc))
218 {
219 pGVM->dbgfr0.s.hMemObjBpLocL1 = hMemObj;
220 pGVM->dbgfr0.s.hMapObjBpLocL1 = hMapObj;
221 pGVM->dbgfr0.s.paBpLocL1R0 = (volatile uint32_t *)RTR0MemObjAddress(hMemObj);
222
223 /*
224 * We're done.
225 */
226 *ppaBpLocL1R3 = RTR0MemObjAddressR3(hMapObj);
227 pGVM->dbgfr0.s.fInit = true;
228 return rc;
229 }
230
231 RTR0MemObjFree(hMemObj, true);
232 return rc;
233}
234
235
236/**
237 * Worker for DBGFR0BpOwnerInitReqHandler() that does the actual initialization.
238 *
239 * @returns VBox status code.
240 * @param pGVM The global (ring-0) VM structure.
241 * @param ppaBpOwnerR3 Where to return the ring-3 breakpoint owner table base address on success.
242 * @thread EMT(0)
243 */
244static int dbgfR0BpOwnerInitWorker(PGVM pGVM, R3PTRTYPE(void *) *ppaBpOwnerR3)
245{
246 /*
247 * Figure out how much memory we need for the owner tables and allocate it.
248 */
249 uint32_t const cbBpOwnerR0 = RT_ALIGN_32(DBGF_BP_OWNER_COUNT_MAX * sizeof(DBGFBPOWNERINTR0), PAGE_SIZE);
250 uint32_t const cbBpOwnerR3 = RT_ALIGN_32(DBGF_BP_OWNER_COUNT_MAX * sizeof(DBGFBPOWNERINT), PAGE_SIZE);
251 uint32_t const cbTotal = RT_ALIGN_32(cbBpOwnerR0 + cbBpOwnerR3, PAGE_SIZE);
252
253 RTR0MEMOBJ hMemObj;
254 int rc = RTR0MemObjAllocPage(&hMemObj, cbTotal, false /*fExecutable*/);
255 if (RT_FAILURE(rc))
256 return rc;
257 RT_BZERO(RTR0MemObjAddress(hMemObj), cbTotal);
258
259 /* Map it. */
260 RTR0MEMOBJ hMapObj;
261 rc = RTR0MemObjMapUserEx(&hMapObj, hMemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, RTR0ProcHandleSelf(),
262 cbBpOwnerR0 /*offSub*/, cbBpOwnerR3);
263 if (RT_SUCCESS(rc))
264 {
265 pGVM->dbgfr0.s.hMemObjBpOwners = hMemObj;
266 pGVM->dbgfr0.s.hMapObjBpOwners = hMapObj;
267 pGVM->dbgfr0.s.paBpOwnersR0 = (PDBGFBPOWNERINTR0)RTR0MemObjAddress(hMemObj);
268
269 /*
270 * We're done.
271 */
272 *ppaBpOwnerR3 = RTR0MemObjAddressR3(hMapObj);
273 return rc;
274 }
275
276 RTR0MemObjFree(hMemObj, true);
277 return rc;
278}
279
280
281/**
282 * Worker for DBGFR0BpChunkAllocReqHandler() that does the actual chunk allocation.
283 *
284 * Allocates a memory object and divides it up as follows:
285 * @verbatim
286 --------------------------------------
287 ring-0 chunk data
288 --------------------------------------
289 page alignment padding
290 --------------------------------------
291 shared chunk data
292 --------------------------------------
293 @endverbatim
294 *
295 * @returns VBox status code.
296 * @param pGVM The global (ring-0) VM structure.
297 * @param idChunk The chunk ID to allocate.
298 * @param ppBpChunkBaseR3 Where to return the ring-3 chunk base address on success.
299 * @thread EMT(0)
300 */
301static int dbgfR0BpChunkAllocWorker(PGVM pGVM, uint32_t idChunk, R3PTRTYPE(void *) *ppBpChunkBaseR3)
302{
303 /*
304 * Figure out how much memory we need for the chunk and allocate it.
305 */
306 uint32_t const cbRing0 = RT_ALIGN_32(DBGF_BP_COUNT_PER_CHUNK * sizeof(DBGFBPINTR0), PAGE_SIZE);
307 uint32_t const cbShared = RT_ALIGN_32(DBGF_BP_COUNT_PER_CHUNK * sizeof(DBGFBPINT), PAGE_SIZE);
308 uint32_t const cbTotal = cbRing0 + cbShared;
309
310 RTR0MEMOBJ hMemObj;
311 int rc = RTR0MemObjAllocPage(&hMemObj, cbTotal, false /*fExecutable*/);
312 if (RT_FAILURE(rc))
313 return rc;
314 RT_BZERO(RTR0MemObjAddress(hMemObj), cbTotal);
315
316 /* Map it. */
317 RTR0MEMOBJ hMapObj;
318 rc = RTR0MemObjMapUserEx(&hMapObj, hMemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, RTR0ProcHandleSelf(),
319 cbRing0 /*offSub*/, cbTotal - cbRing0);
320 if (RT_SUCCESS(rc))
321 {
322 PDBGFBPCHUNKR0 pBpChunkR0 = &pGVM->dbgfr0.s.aBpChunks[idChunk];
323
324 pBpChunkR0->hMemObj = hMemObj;
325 pBpChunkR0->hMapObj = hMapObj;
326 pBpChunkR0->paBpBaseR0Only = (PDBGFBPINTR0)RTR0MemObjAddress(hMemObj);
327 pBpChunkR0->paBpBaseSharedR0 = (PDBGFBPINT)&pBpChunkR0->paBpBaseR0Only[DBGF_BP_COUNT_PER_CHUNK];
328
329 /*
330 * We're done.
331 */
332 *ppBpChunkBaseR3 = RTR0MemObjAddressR3(hMapObj);
333 return rc;
334 }
335
336 RTR0MemObjFree(hMemObj, true);
337 return rc;
338}
339
340
341/**
342 * Worker for DBGFR0BpL2TblChunkAllocReqHandler() that does the actual chunk allocation.
343 *
344 * @returns VBox status code.
345 * @param pGVM The global (ring-0) VM structure.
346 * @param idChunk The chunk ID to allocate.
347 * @param ppL2ChunkBaseR3 Where to return the ring-3 chunk base address on success.
348 * @thread EMT(0)
349 */
350static int dbgfR0BpL2TblChunkAllocWorker(PGVM pGVM, uint32_t idChunk, R3PTRTYPE(void *) *ppL2ChunkBaseR3)
351{
352 /*
353 * Figure out how much memory we need for the chunk and allocate it.
354 */
355 uint32_t const cbTotal = RT_ALIGN_32(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK * sizeof(DBGFBPL2ENTRY), PAGE_SIZE);
356
357 RTR0MEMOBJ hMemObj;
358 int rc = RTR0MemObjAllocPage(&hMemObj, cbTotal, false /*fExecutable*/);
359 if (RT_FAILURE(rc))
360 return rc;
361 RT_BZERO(RTR0MemObjAddress(hMemObj), cbTotal);
362
363 /* Map it. */
364 RTR0MEMOBJ hMapObj;
365 rc = RTR0MemObjMapUserEx(&hMapObj, hMemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, RTR0ProcHandleSelf(),
366 0 /*offSub*/, cbTotal);
367 if (RT_SUCCESS(rc))
368 {
369 PDBGFBPL2TBLCHUNKR0 pL2ChunkR0 = &pGVM->dbgfr0.s.aBpL2TblChunks[idChunk];
370
371 pL2ChunkR0->hMemObj = hMemObj;
372 pL2ChunkR0->hMapObj = hMapObj;
373 pL2ChunkR0->paBpL2TblBaseSharedR0 = (PDBGFBPL2ENTRY)RTR0MemObjAddress(hMemObj);
374
375 /*
376 * We're done.
377 */
378 *ppL2ChunkBaseR3 = RTR0MemObjAddressR3(hMapObj);
379 return rc;
380 }
381
382 RTR0MemObjFree(hMemObj, true);
383 return rc;
384}
385
386
387/**
388 * Used by ring-3 DBGF to fully initialize the breakpoint manager for operation.
389 *
390 * @returns VBox status code.
391 * @param pGVM The global (ring-0) VM structure.
392 * @param pReq Pointer to the request buffer.
393 * @thread EMT(0)
394 */
395VMMR0_INT_DECL(int) DBGFR0BpInitReqHandler(PGVM pGVM, PDBGFBPINITREQ pReq)
396{
397 LogFlow(("DBGFR0BpInitReqHandler:\n"));
398
399 /*
400 * Validate the request.
401 */
402 AssertReturn(pReq->Hdr.cbReq == sizeof(*pReq), VERR_INVALID_PARAMETER);
403
404 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
405 AssertRCReturn(rc, rc);
406
407 AssertReturn(!pGVM->dbgfr0.s.fInit, VERR_WRONG_ORDER);
408
409 return dbgfR0BpInitWorker(pGVM, &pReq->paBpLocL1R3);
410}
411
412
413/**
414 * Used by ring-3 DBGF to initialize the breakpoint owner table for operation.
415 *
416 * @returns VBox status code.
417 * @param pGVM The global (ring-0) VM structure.
418 * @param pReq Pointer to the request buffer.
419 * @thread EMT(0)
420 */
421VMMR0_INT_DECL(int) DBGFR0BpOwnerInitReqHandler(PGVM pGVM, PDBGFBPOWNERINITREQ pReq)
422{
423 LogFlow(("DBGFR0BpOwnerInitReqHandler:\n"));
424
425 /*
426 * Validate the request.
427 */
428 AssertReturn(pReq->Hdr.cbReq == sizeof(*pReq), VERR_INVALID_PARAMETER);
429
430 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
431 AssertRCReturn(rc, rc);
432
433 AssertReturn(!pGVM->dbgfr0.s.paBpOwnersR0, VERR_WRONG_ORDER);
434
435 return dbgfR0BpOwnerInitWorker(pGVM, &pReq->paBpOwnerR3);
436}
437
438
439/**
440 * Used by ring-3 DBGF to allocate a given chunk in the global breakpoint table.
441 *
442 * @returns VBox status code.
443 * @param pGVM The global (ring-0) VM structure.
444 * @param pReq Pointer to the request buffer.
445 * @thread EMT(0)
446 */
447VMMR0_INT_DECL(int) DBGFR0BpChunkAllocReqHandler(PGVM pGVM, PDBGFBPCHUNKALLOCREQ pReq)
448{
449 LogFlow(("DBGFR0BpChunkAllocReqHandler:\n"));
450
451 /*
452 * Validate the request.
453 */
454 AssertReturn(pReq->Hdr.cbReq == sizeof(*pReq), VERR_INVALID_PARAMETER);
455
456 uint32_t const idChunk = pReq->idChunk;
457 AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, VERR_INVALID_PARAMETER);
458
459 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
460 AssertRCReturn(rc, rc);
461
462 AssertReturn(pGVM->dbgfr0.s.fInit, VERR_WRONG_ORDER);
463 AssertReturn(pGVM->dbgfr0.s.aBpChunks[idChunk].hMemObj == NIL_RTR0MEMOBJ, VERR_INVALID_PARAMETER);
464
465 return dbgfR0BpChunkAllocWorker(pGVM, idChunk, &pReq->pChunkBaseR3);
466}
467
468
469/**
470 * Used by ring-3 DBGF to allocate a given chunk in the global L2 lookup table.
471 *
472 * @returns VBox status code.
473 * @param pGVM The global (ring-0) VM structure.
474 * @param pReq Pointer to the request buffer.
475 * @thread EMT(0)
476 */
477VMMR0_INT_DECL(int) DBGFR0BpL2TblChunkAllocReqHandler(PGVM pGVM, PDBGFBPL2TBLCHUNKALLOCREQ pReq)
478{
479 LogFlow(("DBGFR0BpL2TblChunkAllocReqHandler:\n"));
480
481 /*
482 * Validate the request.
483 */
484 AssertReturn(pReq->Hdr.cbReq == sizeof(*pReq), VERR_INVALID_PARAMETER);
485
486 uint32_t const idChunk = pReq->idChunk;
487 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, VERR_INVALID_PARAMETER);
488
489 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
490 AssertRCReturn(rc, rc);
491
492 AssertReturn(pGVM->dbgfr0.s.fInit, VERR_WRONG_ORDER);
493 AssertReturn(pGVM->dbgfr0.s.aBpL2TblChunks[idChunk].hMemObj == NIL_RTR0MEMOBJ, VERR_INVALID_PARAMETER);
494
495 return dbgfR0BpL2TblChunkAllocWorker(pGVM, idChunk, &pReq->pChunkBaseR3);
496}
497
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette