VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMBth.h@ 18927

Last change on this file since 18927 was 18927, checked in by vboxsync, 16 years ago

Big step to separate VMM data structures for guest SMP. (pgm, em)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 10.8 KB
Line 
1/* $Id: PGMBth.h 18927 2009-04-16 11:41:38Z vboxsync $ */
2/** @file
3 * VBox - Page Manager / Monitor, Shadow+Guest Paging Template.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Internal Functions *
25*******************************************************************************/
26__BEGIN_DECLS
27PGM_BTH_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR0);
28PGM_BTH_DECL(int, Enter)(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
29PGM_BTH_DECL(int, Relocate)(PVM pVM, PVMCPU pVCpu, RTGCPTR offDelta);
30
31PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
32PGM_BTH_DECL(int, SyncCR3)(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
33PGM_BTH_DECL(int, SyncPage)(PVM pVM, PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError);
34PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVM pVM, PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uError);
35PGM_BTH_DECL(int, InvalidatePage)(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage);
36PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage);
37PGM_BTH_DECL(unsigned, AssertCR3)(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
38PGM_BTH_DECL(int, MapCR3)(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
39PGM_BTH_DECL(int, UnmapCR3)(PVM pVM, PVMCPU pVCpu);
40__END_DECLS
41
42
43/**
44 * Initializes the both bit of the paging mode data.
45 *
46 * @returns VBox status code.
47 * @param pVM The VM handle.
48 * @param fResolveGCAndR0 Indicate whether or not GC and Ring-0 symbols can be resolved now.
49 * This is used early in the init process to avoid trouble with PDM
50 * not being initialized yet.
51 */
52PGM_BTH_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR0)
53{
54 Assert(pModeData->uShwType == PGM_SHW_TYPE); Assert(pModeData->uGstType == PGM_GST_TYPE);
55
56 /* Ring 3 */
57 pModeData->pfnR3BthRelocate = PGM_BTH_NAME(Relocate);
58 pModeData->pfnR3BthSyncCR3 = PGM_BTH_NAME(SyncCR3);
59 pModeData->pfnR3BthInvalidatePage = PGM_BTH_NAME(InvalidatePage);
60 pModeData->pfnR3BthSyncPage = PGM_BTH_NAME(SyncPage);
61 pModeData->pfnR3BthPrefetchPage = PGM_BTH_NAME(PrefetchPage);
62 pModeData->pfnR3BthVerifyAccessSyncPage = PGM_BTH_NAME(VerifyAccessSyncPage);
63#ifdef VBOX_STRICT
64 pModeData->pfnR3BthAssertCR3 = PGM_BTH_NAME(AssertCR3);
65#endif
66 pModeData->pfnR3BthMapCR3 = PGM_BTH_NAME(MapCR3);
67 pModeData->pfnR3BthUnmapCR3 = PGM_BTH_NAME(UnmapCR3);
68
69 if (fResolveGCAndR0)
70 {
71 int rc;
72
73#if PGM_SHW_TYPE != PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT /* No AMD64 for traditional virtualization, only VT-x and AMD-V. */
74 /* GC */
75 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(Trap0eHandler), &pModeData->pfnRCBthTrap0eHandler);
76 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(Trap0eHandler), rc), rc);
77 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(InvalidatePage), &pModeData->pfnRCBthInvalidatePage);
78 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(InvalidatePage), rc), rc);
79 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(SyncCR3), &pModeData->pfnRCBthSyncCR3);
80 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(SyncPage), rc), rc);
81 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(SyncPage), &pModeData->pfnRCBthSyncPage);
82 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(SyncPage), rc), rc);
83 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(PrefetchPage), &pModeData->pfnRCBthPrefetchPage);
84 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(PrefetchPage), rc), rc);
85 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(VerifyAccessSyncPage),&pModeData->pfnRCBthVerifyAccessSyncPage);
86 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(VerifyAccessSyncPage), rc), rc);
87# ifdef VBOX_STRICT
88 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(AssertCR3), &pModeData->pfnRCBthAssertCR3);
89 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(AssertCR3), rc), rc);
90# endif
91 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(MapCR3), &pModeData->pfnRCBthMapCR3);
92 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(MapCR3), rc), rc);
93 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(UnmapCR3), &pModeData->pfnRCBthUnmapCR3);
94 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(UnmapCR3), rc), rc);
95#endif /* Not AMD64 shadow paging. */
96
97 /* Ring 0 */
98 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(Trap0eHandler), &pModeData->pfnR0BthTrap0eHandler);
99 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(Trap0eHandler), rc), rc);
100 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(InvalidatePage), &pModeData->pfnR0BthInvalidatePage);
101 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(InvalidatePage), rc), rc);
102 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(SyncCR3), &pModeData->pfnR0BthSyncCR3);
103 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(SyncCR3), rc), rc);
104 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(SyncPage), &pModeData->pfnR0BthSyncPage);
105 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(SyncPage), rc), rc);
106 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(PrefetchPage), &pModeData->pfnR0BthPrefetchPage);
107 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(PrefetchPage), rc), rc);
108 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(VerifyAccessSyncPage),&pModeData->pfnR0BthVerifyAccessSyncPage);
109 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(VerifyAccessSyncPage), rc), rc);
110#ifdef VBOX_STRICT
111 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(AssertCR3), &pModeData->pfnR0BthAssertCR3);
112 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(AssertCR3), rc), rc);
113#endif
114 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(MapCR3), &pModeData->pfnR0BthMapCR3);
115 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(MapCR3), rc), rc);
116 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(UnmapCR3), &pModeData->pfnR0BthUnmapCR3);
117 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(UnmapCR3), rc), rc);
118 }
119 return VINF_SUCCESS;
120}
121
122
123/**
124 * Enters the shadow+guest mode.
125 *
126 * @returns VBox status code.
127 * @param pVM VM handle.
128 * @param pVCpu The VMCPU to operate on.
129 * @param GCPhysCR3 The physical address from the CR3 register.
130 */
131PGM_BTH_DECL(int, Enter)(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3)
132{
133 /* Here we deal with allocation of the root shadow page table for real and protected mode during mode switches;
134 * Other modes rely on MapCR3/UnmapCR3 to setup the shadow root page tables.
135 */
136#if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \
137 || PGM_SHW_TYPE == PGM_TYPE_PAE \
138 || PGM_SHW_TYPE == PGM_TYPE_AMD64) \
139 && ( PGM_GST_TYPE == PGM_TYPE_REAL \
140 || PGM_GST_TYPE == PGM_TYPE_PROT))
141
142 Assert(!HWACCMIsNestedPagingActive(pVM));
143 /* Note: we only really need shadow paging in real and protected mode for VT-x and AMD-V (excluding nested paging/EPT modes),
144 * but any calls to GC need a proper shadow page setup as well.
145 */
146 /* Free the previous root mapping if still active. */
147 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
148 if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
149 {
150 Assert(pVCpu->pgm.s.pShwPageCR3R3->enmKind != PGMPOOLKIND_FREE);
151
152 /* Mark the page as unlocked; allow flushing again. */
153 pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
154
155 /* Remove the hypervisor mappings from the shadow page table. */
156 pgmMapDeactivateCR3(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
157
158 pgmPoolFreeByPage(pPool, pVCpu->pgm.s.pShwPageCR3R3, pVCpu->pgm.s.iShwUser, pVCpu->pgm.s.iShwUserTable);
159 pVCpu->pgm.s.pShwPageCR3R3 = 0;
160 pVCpu->pgm.s.pShwPageCR3RC = 0;
161 pVCpu->pgm.s.pShwPageCR3R0 = 0;
162 pVCpu->pgm.s.iShwUser = 0;
163 pVCpu->pgm.s.iShwUserTable = 0;
164 }
165
166 /* contruct a fake address */
167 GCPhysCR3 = RT_BIT_64(63);
168 pVCpu->pgm.s.iShwUser = SHW_POOL_ROOT_IDX;
169 pVCpu->pgm.s.iShwUserTable = GCPhysCR3 >> PAGE_SHIFT;
170 int rc = pgmPoolAlloc(pVM, GCPhysCR3, BTH_PGMPOOLKIND_ROOT, pVCpu->pgm.s.iShwUser, pVCpu->pgm.s.iShwUserTable, &pVCpu->pgm.s.pShwPageCR3R3);
171 if (rc == VERR_PGM_POOL_FLUSHED)
172 {
173 Log(("Bth-Enter: PGM pool flushed -> signal sync cr3\n"));
174 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
175 return VINF_PGM_SYNC_CR3;
176 }
177 AssertRCReturn(rc, rc);
178
179 /* Mark the page as locked; disallow flushing. */
180 pgmPoolLockPage(pPool, pVCpu->pgm.s.pShwPageCR3R3);
181
182 pVCpu->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVCpu->pgm.s.pShwPageCR3R3);
183 pVCpu->pgm.s.pShwPageCR3RC = MMHyperCCToRC(pVM, pVCpu->pgm.s.pShwPageCR3R3);
184
185 /* Set the current hypervisor CR3. */
186 CPUMSetHyperCR3(pVCpu, PGMGetHyperCR3(pVCpu));
187
188 /* Apply all hypervisor mappings to the new CR3. */
189 return pgmMapActivateCR3(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
190#else
191 return VINF_SUCCESS;
192#endif
193}
194
195
196/**
197 * Relocate any GC pointers related to shadow mode paging.
198 *
199 * @returns VBox status code.
200 * @param pVM The VM handle.
201 * @param pVCpu The VMCPU to operate on.
202 * @param offDelta The reloation offset.
203 */
204PGM_BTH_DECL(int, Relocate)(PVM pVM, PVMCPU pVCpu, RTGCPTR offDelta)
205{
206 /* nothing special to do here - InitData does the job. */
207 return VINF_SUCCESS;
208}
209
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette