VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMSharedPage.cpp@ 105745

Last change on this file since 105745 was 104840, checked in by vboxsync, 6 months ago

VMM/PGM: Refactored RAM ranges, MMIO2 ranges and ROM ranges and added MMIO ranges (to PGM) so we can safely access RAM ranges at runtime w/o fear of them ever being freed up. It is now only possible to create these during VM creation and loading, and they will live till VM destruction (except for MMIO2 which could be destroyed during loading (PCNet fun)). The lookup handling is by table instead of pointer tree. No more ring-0 pointers in shared data. bugref:10687 bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.1 KB
Line 
1/* $Id: PGMSharedPage.cpp 104840 2024-06-05 00:59:51Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Shared page handling
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM_SHARED
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/pgm.h>
35#include <VBox/vmm/stam.h>
36#include <VBox/vmm/uvm.h>
37#include "PGMInternal.h"
38#include <VBox/vmm/vmcc.h>
39#include <VBox/sup.h>
40#include <VBox/param.h>
41#include <VBox/err.h>
42#include <VBox/log.h>
43#include <VBox/VMMDev.h>
44#include <iprt/asm.h>
45#include <iprt/assert.h>
46#include <iprt/mem.h>
47#include <iprt/string.h>
48
49#include "PGMInline.h"
50
51
52#ifdef VBOX_WITH_PAGE_SHARING
53
54
55/*********************************************************************************************************************************
56* Global Variables *
57*********************************************************************************************************************************/
58# ifdef VBOX_STRICT
59/** Keep a copy of all registered shared modules for the .pgmcheckduppages debugger command. */
60static PGMMREGISTERSHAREDMODULEREQ g_apSharedModules[512] = {0};
61static unsigned g_cSharedModules = 0;
62# endif /* VBOX_STRICT */
63
64
65/**
66 * Registers a new shared module for the VM
67 *
68 * @returns VBox status code.
69 * @param pVM The cross context VM structure.
70 * @param enmGuestOS Guest OS type.
71 * @param pszModuleName Module name.
72 * @param pszVersion Module version.
73 * @param GCBaseAddr Module base address.
74 * @param cbModule Module size.
75 * @param cRegions Number of shared region descriptors.
76 * @param paRegions Shared region(s).
77 *
78 * @todo This should be a GMMR3 call. No need to involve GMM here.
79 */
80VMMR3DECL(int) PGMR3SharedModuleRegister(PVM pVM, VBOXOSFAMILY enmGuestOS, char *pszModuleName, char *pszVersion,
81 RTGCPTR GCBaseAddr, uint32_t cbModule, uint32_t cRegions,
82 VMMDEVSHAREDREGIONDESC const *paRegions)
83{
84 Log(("PGMR3SharedModuleRegister family=%d name=%s version=%s base=%RGv size=%x cRegions=%d\n",
85 enmGuestOS, pszModuleName, pszVersion, GCBaseAddr, cbModule, cRegions));
86
87 /*
88 * Sanity check.
89 */
90 AssertReturn(cRegions <= VMMDEVSHAREDREGIONDESC_MAX, VERR_INVALID_PARAMETER);
91 if (!pVM->pgm.s.fPageFusionAllowed)
92 return VERR_NOT_SUPPORTED;
93
94 /*
95 * Allocate and initialize a GMM request.
96 */
97 PGMMREGISTERSHAREDMODULEREQ pReq;
98 pReq = (PGMMREGISTERSHAREDMODULEREQ)RTMemAllocZ(RT_UOFFSETOF_DYN(GMMREGISTERSHAREDMODULEREQ, aRegions[cRegions]));
99 AssertReturn(pReq, VERR_NO_MEMORY);
100
101 pReq->enmGuestOS = enmGuestOS;
102 pReq->GCBaseAddr = GCBaseAddr;
103 pReq->cbModule = cbModule;
104 pReq->cRegions = cRegions;
105 for (uint32_t i = 0; i < cRegions; i++)
106 pReq->aRegions[i] = paRegions[i];
107
108 int rc = RTStrCopy(pReq->szName, sizeof(pReq->szName), pszModuleName);
109 if (RT_SUCCESS(rc))
110 {
111 rc = RTStrCopy(pReq->szVersion, sizeof(pReq->szVersion), pszVersion);
112 if (RT_SUCCESS(rc))
113 {
114 /*
115 * Issue the request. In strict builds, do some local tracking.
116 */
117 pgmR3PhysAssertSharedPageChecksums(pVM);
118 rc = GMMR3RegisterSharedModule(pVM, pReq);
119 if (RT_SUCCESS(rc))
120 rc = pReq->rc;
121 AssertMsg(rc == VINF_SUCCESS || rc == VINF_GMM_SHARED_MODULE_ALREADY_REGISTERED, ("%Rrc\n", rc));
122
123# ifdef VBOX_STRICT
124 if ( rc == VINF_SUCCESS
125 && g_cSharedModules < RT_ELEMENTS(g_apSharedModules))
126 {
127 unsigned i;
128 for (i = 0; i < RT_ELEMENTS(g_apSharedModules); i++)
129 if (g_apSharedModules[i] == NULL)
130 {
131
132 size_t const cbSharedModule = RT_UOFFSETOF_DYN(GMMREGISTERSHAREDMODULEREQ, aRegions[cRegions]);
133 g_apSharedModules[i] = (PGMMREGISTERSHAREDMODULEREQ)RTMemDup(pReq, cbSharedModule);
134 g_cSharedModules++;
135 break;
136 }
137 Assert(i < RT_ELEMENTS(g_apSharedModules));
138 }
139# endif /* VBOX_STRICT */
140 if (RT_SUCCESS(rc))
141 rc = VINF_SUCCESS;
142 }
143 }
144
145 RTMemFree(pReq);
146 return rc;
147}
148
149
150/**
151 * Unregisters a shared module for the VM
152 *
153 * @returns VBox status code.
154 * @param pVM The cross context VM structure.
155 * @param pszModuleName Module name.
156 * @param pszVersion Module version.
157 * @param GCBaseAddr Module base address.
158 * @param cbModule Module size.
159 *
160 * @todo This should be a GMMR3 call. No need to involve GMM here.
161 */
162VMMR3DECL(int) PGMR3SharedModuleUnregister(PVM pVM, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule)
163{
164 Log(("PGMR3SharedModuleUnregister name=%s version=%s base=%RGv size=%x\n", pszModuleName, pszVersion, GCBaseAddr, cbModule));
165
166 AssertMsgReturn(cbModule > 0 && cbModule < _1G, ("%u\n", cbModule), VERR_OUT_OF_RANGE);
167 if (!pVM->pgm.s.fPageFusionAllowed)
168 return VERR_NOT_SUPPORTED;
169
170 /*
171 * Forward the request to GMM (ring-0).
172 */
173 PGMMUNREGISTERSHAREDMODULEREQ pReq = (PGMMUNREGISTERSHAREDMODULEREQ)RTMemAlloc(sizeof(*pReq));
174 AssertReturn(pReq, VERR_NO_MEMORY);
175
176 pReq->GCBaseAddr = GCBaseAddr;
177 pReq->u32Alignment = 0;
178 pReq->cbModule = cbModule;
179
180 int rc = RTStrCopy(pReq->szName, sizeof(pReq->szName), pszModuleName);
181 if (RT_SUCCESS(rc))
182 {
183 rc = RTStrCopy(pReq->szVersion, sizeof(pReq->szVersion), pszVersion);
184 if (RT_SUCCESS(rc))
185 {
186 pgmR3PhysAssertSharedPageChecksums(pVM);
187 rc = GMMR3UnregisterSharedModule(pVM, pReq);
188 pgmR3PhysAssertSharedPageChecksums(pVM);
189
190# ifdef VBOX_STRICT
191 /*
192 * Update our local tracking.
193 */
194 for (unsigned i = 0; i < g_cSharedModules; i++)
195 {
196 if ( g_apSharedModules[i]
197 && !strcmp(g_apSharedModules[i]->szName, pszModuleName)
198 && !strcmp(g_apSharedModules[i]->szVersion, pszVersion))
199 {
200 RTMemFree(g_apSharedModules[i]);
201 g_apSharedModules[i] = NULL;
202 g_cSharedModules--;
203 break;
204 }
205 }
206# endif /* VBOX_STRICT */
207 }
208 }
209
210 RTMemFree(pReq);
211 return rc;
212}
213
214
215/**
216 * Rendezvous callback that will be called once.
217 *
218 * @returns VBox strict status code.
219 * @param pVM The cross context VM structure.
220 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
221 * @param pvUser Pointer to a VMCPUID with the requester's ID.
222 */
223static DECLCALLBACK(VBOXSTRICTRC) pgmR3SharedModuleRegRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
224{
225 VMCPUID idCpu = *(VMCPUID *)pvUser;
226
227 /* Execute on the VCPU that issued the original request to make sure we're in the right cr3 context. */
228 if (pVCpu->idCpu != idCpu)
229 {
230 Assert(pVM->cCpus > 1);
231 return VINF_SUCCESS;
232 }
233
234
235 /* Flush all pending handy page operations before changing any shared page assignments. */
236 int rc = PGMR3PhysAllocateHandyPages(pVM);
237 AssertRC(rc);
238
239 /*
240 * Lock it here as we can't deal with busy locks in this ring-0 path.
241 */
242 LogFlow(("pgmR3SharedModuleRegRendezvous: start (%d)\n", pVM->pgm.s.cSharedPages));
243
244 PGM_LOCK_VOID(pVM);
245 pgmR3PhysAssertSharedPageChecksums(pVM);
246 rc = GMMR3CheckSharedModules(pVM);
247 pgmR3PhysAssertSharedPageChecksums(pVM);
248 PGM_UNLOCK(pVM);
249 AssertLogRelRC(rc);
250
251 LogFlow(("pgmR3SharedModuleRegRendezvous: done (%d)\n", pVM->pgm.s.cSharedPages));
252 return rc;
253}
254
255/**
256 * Shared module check helper (called on the way out).
257 *
258 * @param pVM The cross context VM structure.
259 * @param idCpu VCPU id.
260 */
261static DECLCALLBACK(void) pgmR3CheckSharedModulesHelper(PVM pVM, VMCPUID idCpu)
262{
263 /* We must stall other VCPUs as we'd otherwise have to send IPI flush commands for every single change we make. */
264 STAM_REL_PROFILE_START(&pVM->pgm.s.StatShModCheck, a);
265 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, pgmR3SharedModuleRegRendezvous, &idCpu);
266 AssertRCSuccess(rc);
267 STAM_REL_PROFILE_STOP(&pVM->pgm.s.StatShModCheck, a);
268}
269
270
271/**
272 * Check all registered modules for changes.
273 *
274 * @returns VBox status code.
275 * @param pVM The cross context VM structure.
276 */
277VMMR3DECL(int) PGMR3SharedModuleCheckAll(PVM pVM)
278{
279 if (!pVM->pgm.s.fPageFusionAllowed)
280 return VERR_NOT_SUPPORTED;
281
282 /* Queue the actual registration as we are under the IOM lock right now. Perform this operation on the way out. */
283 return VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3CheckSharedModulesHelper, 2, pVM, VMMGetCpuId(pVM));
284}
285
286
287# ifdef DEBUG
288/**
289 * Query the state of a page in a shared module
290 *
291 * @returns VBox status code.
292 * @param pVM The cross context VM structure.
293 * @param GCPtrPage Page address.
294 * @param pfShared Shared status (out).
295 * @param pfPageFlags Page flags (out).
296 */
297VMMR3DECL(int) PGMR3SharedModuleGetPageState(PVM pVM, RTGCPTR GCPtrPage, bool *pfShared, uint64_t *pfPageFlags)
298{
299 /* Debug only API for the page fusion testcase. */
300 PGMPTWALK Walk;
301
302 PGM_LOCK_VOID(pVM);
303
304 int rc = PGMGstGetPage(VMMGetCpu(pVM), GCPtrPage, &Walk);
305 switch (rc)
306 {
307 case VINF_SUCCESS:
308 {
309 PPGMPAGE pPage = pgmPhysGetPage(pVM, Walk.GCPhys);
310 if (pPage)
311 {
312 *pfShared = PGM_PAGE_IS_SHARED(pPage);
313 *pfPageFlags = Walk.fEffective;
314 }
315 else
316 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
317 break;
318 }
319
320 case VERR_PAGE_NOT_PRESENT:
321 case VERR_PAGE_TABLE_NOT_PRESENT:
322 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
323 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
324 *pfShared = false;
325 *pfPageFlags = 0;
326 rc = VINF_SUCCESS;
327 break;
328
329 default:
330 break;
331 }
332
333 PGM_UNLOCK(pVM);
334 return rc;
335}
336# endif /* DEBUG */
337
338# ifdef VBOX_STRICT
339
340/**
341 * @callback_method_impl{FNDBGCCMD, The '.pgmcheckduppages' command.}
342 */
343DECLCALLBACK(int) pgmR3CmdCheckDuplicatePages(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
344{
345 unsigned cBallooned = 0;
346 unsigned cShared = 0;
347 unsigned cZero = 0;
348 unsigned cUnique = 0;
349 unsigned cDuplicate = 0;
350 unsigned cAllocZero = 0;
351 unsigned cPages = 0;
352 NOREF(pCmd); NOREF(paArgs); NOREF(cArgs);
353 PVM pVM = pUVM->pVM;
354 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
355
356 PGM_LOCK_VOID(pVM);
357
358 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U);
359 for (uint32_t idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++)
360 {
361 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange];
362 Assert(pRam || idRamRange == 0);
363 if (!pRam) continue;
364 Assert(pRam->idRange == idRamRange);
365
366 PPGMPAGE pPage = &pRam->aPages[0];
367 RTGCPHYS GCPhys = pRam->GCPhys;
368 uint32_t cLeft = pRam->cb >> GUEST_PAGE_SHIFT;
369 while (cLeft-- > 0)
370 {
371 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
372 {
373 switch (PGM_PAGE_GET_STATE(pPage))
374 {
375 case PGM_PAGE_STATE_ZERO:
376 cZero++;
377 break;
378
379 case PGM_PAGE_STATE_BALLOONED:
380 cBallooned++;
381 break;
382
383 case PGM_PAGE_STATE_SHARED:
384 cShared++;
385 break;
386
387 case PGM_PAGE_STATE_ALLOCATED:
388 case PGM_PAGE_STATE_WRITE_MONITORED:
389 {
390 /* Check if the page was allocated, but completely zero. */
391 PGMPAGEMAPLOCK PgMpLck;
392 const void *pvPage;
393 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
394 if ( RT_SUCCESS(rc)
395 && ASMMemIsZero(pvPage, GUEST_PAGE_SIZE))
396 cAllocZero++;
397 else if (GMMR3IsDuplicatePage(pVM, PGM_PAGE_GET_PAGEID(pPage)))
398 cDuplicate++;
399 else
400 cUnique++;
401 if (RT_SUCCESS(rc))
402 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
403 break;
404 }
405
406 default:
407 AssertFailed();
408 break;
409 }
410 }
411
412 /* next */
413 pPage++;
414 GCPhys += GUEST_PAGE_SIZE;
415 cPages++;
416 /* Give some feedback for every processed megabyte. */
417 if ((cPages & 0x7f) == 0)
418 pCmdHlp->pfnPrintf(pCmdHlp, NULL, ".");
419 }
420 }
421
422 PGM_UNLOCK(pVM);
423
424 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "\nNumber of zero pages %08x (%d MB)\n", cZero, cZero / 256);
425 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of alloczero pages %08x (%d MB)\n", cAllocZero, cAllocZero / 256);
426 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of ballooned pages %08x (%d MB)\n", cBallooned, cBallooned / 256);
427 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of shared pages %08x (%d MB)\n", cShared, cShared / 256);
428 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of unique pages %08x (%d MB)\n", cUnique, cUnique / 256);
429 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of duplicate pages %08x (%d MB)\n", cDuplicate, cDuplicate / 256);
430 return VINF_SUCCESS;
431}
432
433
434/**
435 * @callback_method_impl{FNDBGCCMD, The '.pgmsharedmodules' command.}
436 */
437DECLCALLBACK(int) pgmR3CmdShowSharedModules(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
438{
439 NOREF(pCmd); NOREF(paArgs); NOREF(cArgs);
440 PVM pVM = pUVM->pVM;
441 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
442
443 PGM_LOCK_VOID(pVM);
444 for (unsigned i = 0; i < RT_ELEMENTS(g_apSharedModules); i++)
445 {
446 if (g_apSharedModules[i])
447 {
448 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Shared module %s (%s):\n", g_apSharedModules[i]->szName, g_apSharedModules[i]->szVersion);
449 for (unsigned j = 0; j < g_apSharedModules[i]->cRegions; j++)
450 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "--- Region %d: base %RGv size %x\n", j, g_apSharedModules[i]->aRegions[j].GCRegionAddr, g_apSharedModules[i]->aRegions[j].cbRegion);
451 }
452 }
453 PGM_UNLOCK(pVM);
454
455 return VINF_SUCCESS;
456}
457
458# endif /* VBOX_STRICT*/
459#endif /* VBOX_WITH_PAGE_SHARING */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette