VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATMSSM.cpp@ 26224

Last change on this file since 26224 was 26152, checked in by vboxsync, 15 years ago

VMM: pdm.h and @copydoc cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 56.6 KB
Line 
1/* $Id: PATMSSM.cpp 26152 2010-02-02 16:00:35Z vboxsync $ */
2/** @file
3 * PATMSSM - Dynamic Guest OS Patching Manager; Save and load state
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_PATM
28#include <VBox/patm.h>
29#include <VBox/cpum.h>
30#include <VBox/mm.h>
31#include <VBox/ssm.h>
32#include <VBox/param.h>
33#include <iprt/avl.h>
34#include "PATMInternal.h"
35#include "PATMPatch.h"
36#include "PATMA.h"
37#include <VBox/vm.h>
38#include <VBox/csam.h>
39
40#include <VBox/dbg.h>
41#include <VBox/err.h>
42#include <VBox/log.h>
43#include <iprt/assert.h>
44#include <iprt/asm.h>
45#include <iprt/string.h>
46#include <VBox/dis.h>
47#include <VBox/disopcode.h>
48
49/*******************************************************************************
50* Defined Constants And Macros *
51*******************************************************************************/
52#define PATM_SUBTRACT_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) - (uintptr_t)(b)
53#define PATM_ADD_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) + (uintptr_t)(b)
54
55/*******************************************************************************
56* Internal Functions *
57*******************************************************************************/
58static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup);
59
60/*******************************************************************************
61* Global Variables *
62*******************************************************************************/
63/**
64 * SSM descriptor table for the PATM structure.
65 */
66static SSMFIELD const g_aPatmFields[] =
67{
68 /** @todo there are a bunch more fields here which can be marked as ignored. */
69 SSMFIELD_ENTRY_IGNORE( PATM, offVM),
70 SSMFIELD_ENTRY_RCPTR( PATM, pPatchMemGC),
71 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pPatchMemHC),
72 SSMFIELD_ENTRY( PATM, cbPatchMem),
73 SSMFIELD_ENTRY( PATM, offPatchMem),
74 SSMFIELD_ENTRY( PATM, fOutOfMemory),
75 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
76 SSMFIELD_ENTRY( PATM, deltaReloc),
77 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStateHC),
78 SSMFIELD_ENTRY_RCPTR( PATM, pGCStateGC),
79 SSMFIELD_ENTRY_RCPTR( PATM, pGCStackGC),
80 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStackHC),
81 SSMFIELD_ENTRY_RCPTR( PATM, pCPUMCtxGC),
82 SSMFIELD_ENTRY_RCPTR( PATM, pStatsGC),
83 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pStatsHC),
84 SSMFIELD_ENTRY( PATM, uCurrentPatchIdx),
85 SSMFIELD_ENTRY( PATM, ulCallDepth),
86 SSMFIELD_ENTRY( PATM, cPageRecords),
87 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCLowest),
88 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCHighest),
89 SSMFIELD_ENTRY_RCPTR( PATM, PatchLookupTreeGC),
90 SSMFIELD_ENTRY_IGN_HCPTR( PATM, PatchLookupTreeHC),
91 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperCallGC),
92 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperRetGC),
93 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperJumpGC),
94 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperIretGC),
95 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGlobalPatchRec),
96 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterGC),
97 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterPatchGC),
98 SSMFIELD_ENTRY( PATM, uSysEnterPatchIdx),
99 SSMFIELD_ENTRY_RCPTR( PATM, pvFaultMonitor),
100 SSMFIELD_ENTRY_GCPHYS( PATM, mmio.GCPhys),
101 SSMFIELD_ENTRY_RCPTR( PATM, mmio.pCachedData),
102 SSMFIELD_ENTRY_IGN_RCPTR( PATM, mmio.Alignment0),
103 SSMFIELD_ENTRY_IGN_HCPTR( PATM, savedstate.pSSM),
104 SSMFIELD_ENTRY( PATM, savedstate.cPatches),
105 SSMFIELD_ENTRY_PAD_HC64( PATM, savedstate.Alignment0, sizeof(uint32_t)),
106 SSMFIELD_ENTRY_IGNORE( PATM, StatNrOpcodeRead),
107 SSMFIELD_ENTRY_IGNORE( PATM, StatDisabled),
108 SSMFIELD_ENTRY_IGNORE( PATM, StatUnusable),
109 SSMFIELD_ENTRY_IGNORE( PATM, StatEnabled),
110 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalled),
111 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledFunctionPatches),
112 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledTrampoline),
113 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledJump),
114 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3Callable),
115 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3BlockRun),
116 SSMFIELD_ENTRY_IGNORE( PATM, StatOverwritten),
117 SSMFIELD_ENTRY_IGNORE( PATM, StatFixedConflicts),
118 SSMFIELD_ENTRY_IGNORE( PATM, StatFlushed),
119 SSMFIELD_ENTRY_IGNORE( PATM, StatPageBoundaryCrossed),
120 SSMFIELD_ENTRY_IGNORE( PATM, StatMonitored),
121 SSMFIELD_ENTRY_IGNORE( PATM, StatHandleTrap),
122 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBack),
123 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBackFail),
124 SSMFIELD_ENTRY_IGNORE( PATM, StatPATMMemoryUsed),
125 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQSuccess),
126 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQFailed),
127 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateUseExisting),
128 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionFound),
129 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionNotFound),
130 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWrite),
131 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteDetect),
132 SSMFIELD_ENTRY_IGNORE( PATM, StatDirty),
133 SSMFIELD_ENTRY_IGNORE( PATM, StatPushTrap),
134 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpreted),
135 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpretedFailed),
136 SSMFIELD_ENTRY_IGNORE( PATM, StatSysEnter),
137 SSMFIELD_ENTRY_IGNORE( PATM, StatSysExit),
138 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIret),
139 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIretFailed),
140 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirty),
141 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyGood),
142 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyBad),
143 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageInserted),
144 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageRemoved),
145 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshSuccess),
146 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshFailed),
147 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRet),
148 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRetReused),
149 SSMFIELD_ENTRY_IGNORE( PATM, StatGenJump),
150 SSMFIELD_ENTRY_IGNORE( PATM, StatGenCall),
151 SSMFIELD_ENTRY_IGNORE( PATM, StatGenPopf),
152 SSMFIELD_ENTRY_IGNORE( PATM, StatCheckPendingIRQ),
153 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupReplace),
154 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupInsert),
155 SSMFIELD_ENTRY_IGNORE( PATM, StatU32FunctionMaxSlotsUsed),
156 SSMFIELD_ENTRY_IGNORE( PATM, Alignment0),
157 SSMFIELD_ENTRY_TERM()
158};
159
160/**
161 * SSM descriptor table for the PATMGCSTATE structure.
162 */
163static SSMFIELD const g_aPatmGCStateFields[] =
164{
165 SSMFIELD_ENTRY( PATMGCSTATE, uVMFlags),
166 SSMFIELD_ENTRY( PATMGCSTATE, uPendingAction),
167 SSMFIELD_ENTRY( PATMGCSTATE, uPatchCalls),
168 SSMFIELD_ENTRY( PATMGCSTATE, uScratch),
169 SSMFIELD_ENTRY( PATMGCSTATE, uIretEFlags),
170 SSMFIELD_ENTRY( PATMGCSTATE, uIretCS),
171 SSMFIELD_ENTRY( PATMGCSTATE, uIretEIP),
172 SSMFIELD_ENTRY( PATMGCSTATE, Psp),
173 SSMFIELD_ENTRY( PATMGCSTATE, fPIF),
174 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCPtrInhibitInterrupts),
175 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallPatchTargetAddr),
176 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallReturnAddr),
177 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEAX),
178 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uECX),
179 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEDI),
180 SSMFIELD_ENTRY( PATMGCSTATE, Restore.eFlags),
181 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uFlags),
182 SSMFIELD_ENTRY_TERM()
183};
184
185/**
186 * SSM descriptor table for the PATMPATCHREC structure.
187 */
188static SSMFIELD const g_aPatmPatchRecFields[] =
189{
190 SSMFIELD_ENTRY( PATMPATCHREC, Core.Key),
191 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, Core.pLeft),
192 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, Core.pRight),
193 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, Core.uchHeight),
194 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
195 SSMFIELD_ENTRY( PATMPATCHREC, CoreOffset.Key),
196 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, CoreOffset.pLeft),
197 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, CoreOffset.pRight),
198 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, CoreOffset.uchHeight),
199 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
200 SSMFIELD_ENTRY( PATMPATCHREC, patch.uState),
201 SSMFIELD_ENTRY( PATMPATCHREC, patch.uOldState),
202 SSMFIELD_ENTRY( PATMPATCHREC, patch.uOpMode),
203 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pPrivInstrGC),
204 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.pPrivInstrHC),
205 SSMFIELD_ENTRY( PATMPATCHREC, patch.aPrivInstr),
206 SSMFIELD_ENTRY( PATMPATCHREC, patch.cbPrivInstr),
207 SSMFIELD_ENTRY( PATMPATCHREC, patch.opcode),
208 SSMFIELD_ENTRY( PATMPATCHREC, patch.cbPatchJump),
209 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pPatchJumpDestGC),
210 SSMFIELD_ENTRY( PATMPATCHREC, patch.pPatchBlockOffset),
211 SSMFIELD_ENTRY( PATMPATCHREC, patch.cbPatchBlockSize),
212 SSMFIELD_ENTRY( PATMPATCHREC, patch.uCurPatchOffset),
213 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHREC, patch.Alignment0, sizeof(uint32_t)),
214 SSMFIELD_ENTRY( PATMPATCHREC, patch.flags),
215 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pInstrGCLowest),
216 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pInstrGCHighest),
217 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.FixupTree),
218 SSMFIELD_ENTRY( PATMPATCHREC, patch.nrFixups),
219 SSMFIELD_ENTRY( PATMPATCHREC, patch.nrJumpRecs), // should be zero?
220 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.JumpTree),
221 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.Patch2GuestAddrTree),
222 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.Guest2PatchAddrTree),
223 SSMFIELD_ENTRY( PATMPATCHREC, patch.nrPatch2GuestRecs),
224 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHREC, patch.Alignment1, sizeof(uint32_t)),
225 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.cacheRec.pPatchLocStartHC), // saved as zero
226 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.cacheRec.pPatchLocEndHC), // ditto
227 SSMFIELD_ENTRY_IGN_RCPTR( PATMPATCHREC, patch.cacheRec.pGuestLoc), // ditto
228 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, patch.cacheRec.opsize), // ditto
229 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.pTempInfo),
230 SSMFIELD_ENTRY( PATMPATCHREC, patch.cCodeWrites),
231 SSMFIELD_ENTRY( PATMPATCHREC, patch.cTraps),
232 SSMFIELD_ENTRY( PATMPATCHREC, patch.cInvalidWrites),
233 SSMFIELD_ENTRY( PATMPATCHREC, patch.uPatchIdx),
234 SSMFIELD_ENTRY( PATMPATCHREC, patch.bDirtyOpcode),
235 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, patch.Alignment2),
236 SSMFIELD_ENTRY_TERM()
237};
238
239/**
240 * SSM descriptor table for the RELOCREC structure.
241 */
242static SSMFIELD const g_aPatmRelocRec[] =
243{
244 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, Core.Key), // Used to store the relocation type
245 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pLeft),
246 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pRight),
247 SSMFIELD_ENTRY_IGNORE( RELOCREC, Core.uchHeight),
248 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
249 SSMFIELD_ENTRY( RELOCREC, uType),
250 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
251 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, pRelocPos), // converted to a patch member offset.
252 SSMFIELD_ENTRY_RCPTR( RELOCREC, pSource),
253 SSMFIELD_ENTRY_RCPTR( RELOCREC, pDest),
254 SSMFIELD_ENTRY_TERM()
255};
256
257/**
258 * SSM descriptor table for the RECPATCHTOGUEST structure.
259 */
260static SSMFIELD const g_aPatmRecPatchToGuest[] =
261{
262 SSMFIELD_ENTRY( RECPATCHTOGUEST, Core.Key),
263 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
264 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pLeft),
265 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pRight),
266 SSMFIELD_ENTRY_IGNORE( RECPATCHTOGUEST, Core.uchHeight),
267 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
268 SSMFIELD_ENTRY_RCPTR( RECPATCHTOGUEST, pOrgInstrGC),
269 SSMFIELD_ENTRY( RECPATCHTOGUEST, enmType),
270 SSMFIELD_ENTRY( RECPATCHTOGUEST, fDirty),
271 SSMFIELD_ENTRY( RECPATCHTOGUEST, fJumpTarget),
272 SSMFIELD_ENTRY( RECPATCHTOGUEST, u8DirtyOpcode),
273 SSMFIELD_ENTRY_PAD_HC_AUTO( 1, 5),
274 SSMFIELD_ENTRY_TERM()
275};
276
277
278#ifdef VBOX_STRICT
279/**
280 * Callback function for RTAvlPVDoWithAll
281 *
282 * Counts the number of patches in the tree
283 *
284 * @returns VBox status code.
285 * @param pNode Current node
286 * @param pcPatches Pointer to patch counter (uint32_t)
287 */
288static DECLCALLBACK(int) patmCountLeafPV(PAVLPVNODECORE pNode, void *pcPatches)
289{
290 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
291 return VINF_SUCCESS;
292}
293
294/**
295 * Callback function for RTAvlU32DoWithAll
296 *
297 * Counts the number of patches in the tree
298 *
299 * @returns VBox status code.
300 * @param pNode Current node
301 * @param pcPatches Pointer to patch counter (uint32_t)
302 */
303static DECLCALLBACK(int) patmCountLeaf(PAVLU32NODECORE pNode, void *pcPatches)
304{
305 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
306 return VINF_SUCCESS;
307}
308#endif /* VBOX_STRICT */
309
310/**
311 * Callback function for RTAvloU32DoWithAll
312 *
313 * Counts the number of patches in the tree
314 *
315 * @returns VBox status code.
316 * @param pNode Current node
317 * @param pcPatches Pointer to patch counter
318 */
319static DECLCALLBACK(int) patmCountPatch(PAVLOU32NODECORE pNode, void *pcPatches)
320{
321 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
322 return VINF_SUCCESS;
323}
324
325/**
326 * Callback function for RTAvlU32DoWithAll
327 *
328 * Saves all patch to guest lookup records.
329 *
330 * @returns VBox status code.
331 * @param pNode Current node
332 * @param pVM1 VM Handle
333 */
334static DECLCALLBACK(int) patmSaveP2GLookupRecords(PAVLU32NODECORE pNode, void *pVM1)
335{
336 PVM pVM = (PVM)pVM1;
337 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
338 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)pNode;
339
340 /* Save the lookup record. */
341 int rc = SSMR3PutMem(pSSM, pPatchToGuestRec, sizeof(RECPATCHTOGUEST));
342 AssertRCReturn(rc, rc);
343
344 return VINF_SUCCESS;
345}
346
347/**
348 * Callback function for RTAvlPVDoWithAll
349 *
350 * Saves all patch to guest lookup records.
351 *
352 * @returns VBox status code.
353 * @param pNode Current node
354 * @param pVM1 VM Handle
355 */
356static DECLCALLBACK(int) patmSaveFixupRecords(PAVLPVNODECORE pNode, void *pVM1)
357{
358 PVM pVM = (PVM)pVM1;
359 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
360 RELOCREC rec = *(PRELOCREC)pNode;
361 RTRCPTR *pFixup = (RTRCPTR *)rec.pRelocPos;
362
363 Assert(rec.pRelocPos);
364 /* Convert pointer to an offset into patch memory. */
365 PATM_SUBTRACT_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
366
367 if (rec.uType == FIXUP_ABSOLUTE)
368 {
369 /* Core.Key abused to store the fixup type. */
370 if (*pFixup == pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions))
371 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPU_FF_ACTION;
372 else
373 if (*pFixup == CPUMR3GetGuestCpuIdDefRCPtr(pVM))
374 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_DEFAULT;
375 else
376 if (*pFixup == CPUMR3GetGuestCpuIdStdRCPtr(pVM))
377 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_STANDARD;
378 else
379 if (*pFixup == CPUMR3GetGuestCpuIdExtRCPtr(pVM))
380 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_EXTENDED;
381 else
382 if (*pFixup == CPUMR3GetGuestCpuIdCentaurRCPtr(pVM))
383 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_CENTAUR;
384 }
385
386 /* Save the lookup record. */
387 int rc = SSMR3PutMem(pSSM, &rec, sizeof(rec));
388 AssertRCReturn(rc, rc);
389
390 return VINF_SUCCESS;
391}
392
393
394/**
395 * Callback function for RTAvloU32DoWithAll
396 *
397 * Saves the state of the patch that's being enumerated
398 *
399 * @returns VBox status code.
400 * @param pNode Current node
401 * @param pVM1 VM Handle
402 */
403static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
404{
405 PVM pVM = (PVM)pVM1;
406 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
407 PATMPATCHREC patch = *pPatch;
408 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
409 int rc;
410
411 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
412
413 /*
414 * Reset HC pointers that need to be recalculated when loading the state
415 */
416 AssertMsg(patch.patch.uState == PATCH_REFUSED || (patch.patch.pPatchBlockOffset || (patch.patch.flags & (PATMFL_SYSENTER_XP|PATMFL_INT3_REPLACEMENT))),
417 ("State = %x pPrivInstrHC=%08x pPatchBlockHC=%08x flags=%x\n", patch.patch.uState, patch.patch.pPrivInstrHC, PATCHCODE_PTR_HC(&patch.patch), patch.patch.flags));
418 Assert(pPatch->patch.JumpTree == 0);
419 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->DisasmJumpTree == 0);
420 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->IllegalInstrTree == 0);
421
422 memset(&patch.patch.cacheRec, 0, sizeof(patch.patch.cacheRec));
423
424 /* Save the patch record itself */
425 rc = SSMR3PutMem(pSSM, &patch, sizeof(patch));
426 AssertRCReturn(rc, rc);
427
428 /*
429 * Reset HC pointers in fixup records and save them.
430 */
431#ifdef VBOX_STRICT
432 uint32_t nrFixupRecs = 0;
433 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmCountLeafPV, &nrFixupRecs);
434 AssertMsg((int32_t)nrFixupRecs == pPatch->patch.nrFixups, ("Fixup inconsistency! counted %d vs %d\n", nrFixupRecs, pPatch->patch.nrFixups));
435#endif
436 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmSaveFixupRecords, pVM);
437
438#ifdef VBOX_STRICT
439 uint32_t nrLookupRecords = 0;
440 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmCountLeaf, &nrLookupRecords);
441 Assert(nrLookupRecords == pPatch->patch.nrPatch2GuestRecs);
442#endif
443
444 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmSaveP2GLookupRecords, pVM);
445 return VINF_SUCCESS;
446}
447
448/**
449 * Execute state save operation.
450 *
451 * @returns VBox status code.
452 * @param pVM VM Handle.
453 * @param pSSM SSM operation handle.
454 */
455DECLCALLBACK(int) patmR3Save(PVM pVM, PSSMHANDLE pSSM)
456{
457 PATM patmInfo = pVM->patm.s;
458 int rc;
459
460 pVM->patm.s.savedstate.pSSM = pSSM;
461
462 /*
463 * Reset HC pointers that need to be recalculated when loading the state
464 */
465 patmInfo.pPatchMemHC = NULL;
466 patmInfo.pGCStateHC = 0;
467 patmInfo.pvFaultMonitor = 0;
468
469 Assert(patmInfo.ulCallDepth == 0);
470
471 /*
472 * Count the number of patches in the tree (feeling lazy)
473 */
474 patmInfo.savedstate.cPatches = 0;
475 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmCountPatch, &patmInfo.savedstate.cPatches);
476
477 /*
478 * Save PATM structure
479 */
480 rc = SSMR3PutMem(pSSM, &patmInfo, sizeof(patmInfo));
481 AssertRCReturn(rc, rc);
482
483 /*
484 * Save patch memory contents
485 */
486 rc = SSMR3PutMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
487 AssertRCReturn(rc, rc);
488
489 /*
490 * Save GC state memory
491 */
492 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
493 AssertRCReturn(rc, rc);
494
495 /*
496 * Save PATM stack page
497 */
498 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
499 AssertRCReturn(rc, rc);
500
501 /*
502 * Save all patches
503 */
504 rc = RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmSavePatchState, pVM);
505 AssertRCReturn(rc, rc);
506
507 /** @note patch statistics are not saved. */
508
509 return VINF_SUCCESS;
510}
511
512/**
513 * Execute state load operation.
514 *
515 * @returns VBox status code.
516 * @param pVM VM Handle.
517 * @param pSSM SSM operation handle.
518 * @param uVersion Data layout version.
519 * @param uPass The data pass.
520 */
521DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
522{
523 PATM patmInfo;
524 int rc;
525
526 if ( uVersion != PATM_SSM_VERSION
527 && uVersion != PATM_SSM_VERSION_FIXUP_HACK
528 && uVersion != PATM_SSM_VERSION_VER16
529#ifdef PATM_WITH_NEW_SSM
530 && uVersion != PATM_SSM_VERSION_GETPUTMEM)
531#else
532 )
533#endif
534 {
535 AssertMsgFailed(("patmR3Load: Invalid version uVersion=%d!\n", uVersion));
536 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
537 }
538 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
539
540 pVM->patm.s.savedstate.pSSM = pSSM;
541
542 /*
543 * Restore PATM structure
544 */
545#ifdef PATM_WITH_NEW_SSM
546 if (uVersion == PATM_SSM_VERSION_GETPUTMEM)
547 {
548#endif
549#if 0
550 rc = SSMR3GetMem(pSSM, &patmInfo, sizeof(patmInfo));
551#else
552 RT_ZERO(patmInfo);
553 rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmFields[0], NULL);
554#endif
555 AssertRCReturn(rc, rc);
556
557#ifdef PATM_WITH_NEW_SSM
558 }
559 else
560 {
561 memset(&patmInfo, 0, sizeof(patmInfo));
562
563 AssertCompile(sizeof(patmInfo.pGCStateGC) == sizeof(RTRCPTR));
564 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pGCStateGC);
565 AssertRCReturn(rc, rc);
566
567 AssertCompile(sizeof(patmInfo.pCPUMCtxGC) == sizeof(RTRCPTR));
568 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pCPUMCtxGC);
569 AssertRCReturn(rc, rc);
570
571 AssertCompile(sizeof(patmInfo.pStatsGC) == sizeof(RTRCPTR));
572 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pStatsGC);
573 AssertRCReturn(rc, rc);
574
575 AssertCompile(sizeof(patmInfo.pfnHelperCallGC) == sizeof(RTRCPTR));
576 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperCallGC);
577 AssertRCReturn(rc, rc);
578
579 AssertCompile(sizeof(patmInfo.pfnHelperRetGC) == sizeof(RTRCPTR));
580 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperRetGC);
581 AssertRCReturn(rc, rc);
582
583 AssertCompile(sizeof(patmInfo.pfnHelperJumpGC) == sizeof(RTRCPTR));
584 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperJumpGC);
585 AssertRCReturn(rc, rc);
586
587 AssertCompile(sizeof(patmInfo.pfnHelperIretGC) == sizeof(RTRCPTR));
588 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperIretGC);
589 AssertRCReturn(rc, rc);
590
591 AssertCompile(sizeof(patmInfo.pPatchMemGC) == sizeof(RTRCPTR));
592 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchMemGC);
593 AssertRCReturn(rc, rc);
594
595 AssertCompile(sizeof(patmInfo.cbPatchMem) == sizeof(uint32_t));
596 rc = SSMR3GetU32(pSSM, &patmInfo.cbPatchMem);
597 AssertRCReturn(rc, rc);
598
599 AssertCompile(sizeof(patmInfo.offPatchMem) == sizeof(uint32_t));
600 rc = SSMR3GetU32(pSSM, &patmInfo.offPatchMem);
601 AssertRCReturn(rc, rc);
602
603 AssertCompile(sizeof(patmInfo.deltaReloc) == sizeof(int32_t));
604 rc = SSMR3GetS32(pSSM, &patmInfo.deltaReloc);
605 AssertRCReturn(rc, rc);
606
607 AssertCompile(sizeof(patmInfo.uCurrentPatchIdx) == sizeof(uint32_t));
608 rc = SSMR3GetS32(pSSM, &patmInfo.uCurrentPatchIdx);
609 AssertRCReturn(rc, rc);
610
611 AssertCompile(sizeof(patmInfo.pPatchedInstrGCLowest) == sizeof(RTRCPTR));
612 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchedInstrGCLowest);
613 AssertRCReturn(rc, rc);
614
615 AssertCompile(sizeof(patmInfo.pPatchedInstrGCHighest) == sizeof(RTRCPTR));
616 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchedInstrGCHighest);
617 AssertRCReturn(rc, rc);
618
619 AssertCompile(sizeof(patmInfo.pfnSysEnterGC) == sizeof(RTRCPTR));
620 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnSysEnterGC);
621 AssertRCReturn(rc, rc);
622
623 AssertCompile(sizeof(patmInfo.pfnSysEnterPatchGC) == sizeof(RTRCPTR));
624 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnSysEnterPatchGC);
625 AssertRCReturn(rc, rc);
626
627 AssertCompile(sizeof(patmInfo.uSysEnterPatchIdx) == sizeof(uint32_t));
628 rc = SSMR3GetU32(pSSM, &patmInfo.uSysEnterPatchIdx);
629 AssertRCReturn(rc, rc);
630
631 AssertCompile(sizeof(patmInfo.ulCallDepth) == sizeof(uint32_t));
632 rc = SSMR3GetU32(pSSM, &patmInfo.ulCallDepth);
633 AssertRCReturn(rc, rc);
634
635 AssertCompile(sizeof(patmInfo.pGCStackGC) == sizeof(RTRCPTR));
636 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pGCStackGC);
637 AssertRCReturn(rc, rc);
638
639 AssertCompile(sizeof(patmInfo.cPageRecords) == sizeof(uint32_t));
640 rc = SSMR3GetU32(pSSM, &patmInfo.cPageRecords);
641 AssertRCReturn(rc, rc);
642
643 AssertCompile(sizeof(patmInfo.fOutOfMemory) == sizeof(bool));
644 rc = SSMR3GetBool(pSSM, &patmInfo.fOutOfMemory);
645 AssertRCReturn(rc, rc);
646
647 AssertCompile(sizeof(patmInfo.savedstate.cPatches) == sizeof(uint32_t));
648 rc = SSMR3GetU32(pSSM, &patmInfo.savedstate.cPatches);
649 AssertRCReturn(rc, rc);
650
651 }
652#endif
653
654 /* Relative calls are made to the helper functions. Therefor their relative location must not change! */
655 /* Note: we reuse the saved global helpers and assume they are identical, which is kind of dangerous. */
656 if ( (pVM->patm.s.pfnHelperCallGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperCallGC - patmInfo.pPatchMemGC)
657 || (pVM->patm.s.pfnHelperRetGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperRetGC - patmInfo.pPatchMemGC)
658 || (pVM->patm.s.pfnHelperJumpGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperJumpGC - patmInfo.pPatchMemGC)
659 || (pVM->patm.s.pfnHelperIretGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperIretGC - patmInfo.pPatchMemGC))
660 {
661 AssertMsgFailed(("Helper function ptrs don't match!!!\n"));
662 return VERR_SSM_INVALID_STATE;
663 }
664
665 if (pVM->patm.s.cbPatchMem != patmInfo.cbPatchMem)
666 {
667 AssertMsgFailed(("Patch memory ptrs and/or sizes don't match!!!\n"));
668 return VERR_SSM_INVALID_STATE;
669 }
670 pVM->patm.s.offPatchMem = patmInfo.offPatchMem;
671 pVM->patm.s.deltaReloc = patmInfo.deltaReloc;
672 pVM->patm.s.uCurrentPatchIdx = patmInfo.uCurrentPatchIdx;
673 pVM->patm.s.fOutOfMemory = patmInfo.fOutOfMemory;
674
675 /* Lowest and highest patched instruction */
676 pVM->patm.s.pPatchedInstrGCLowest = patmInfo.pPatchedInstrGCLowest;
677 pVM->patm.s.pPatchedInstrGCHighest = patmInfo.pPatchedInstrGCHighest;
678
679 /* Sysenter handlers */
680 pVM->patm.s.pfnSysEnterGC = patmInfo.pfnSysEnterGC;
681 pVM->patm.s.pfnSysEnterPatchGC = patmInfo.pfnSysEnterPatchGC;
682 pVM->patm.s.uSysEnterPatchIdx = patmInfo.uSysEnterPatchIdx;
683
684 Assert(patmInfo.ulCallDepth == 0 && pVM->patm.s.ulCallDepth == 0);
685
686 Log(("pPatchMemGC %RRv vs old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
687 Log(("pGCStateGC %RRv vs old %RRv\n", pVM->patm.s.pGCStateGC, patmInfo.pGCStateGC));
688 Log(("pGCStackGC %RRv vs old %RRv\n", pVM->patm.s.pGCStackGC, patmInfo.pGCStackGC));
689 Log(("pCPUMCtxGC %RRv vs old %RRv\n", pVM->patm.s.pCPUMCtxGC, patmInfo.pCPUMCtxGC));
690
691
692 /** @note patch statistics are not restored. */
693
694 /*
695 * Restore patch memory contents
696 */
697 Log(("Restore patch memory: new %RRv old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
698 rc = SSMR3GetMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
699 AssertRCReturn(rc, rc);
700
701 /*
702 * Restore GC state memory
703 */
704#ifdef PATM_WITH_NEW_SSM
705 if (uVersion == PATM_SSM_VERSION_GETPUTMEM)
706 {
707#endif
708#if 0
709 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
710#else
711 RT_BZERO(pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
712 rc = SSMR3GetStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmGCStateFields[0], NULL);
713#endif
714 AssertRCReturn(rc, rc);
715#ifdef PATM_WITH_NEW_SSM
716 }
717 else
718 {
719 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uVMFlags) == sizeof(uint32_t));
720 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uVMFlags);
721 AssertRCReturn(rc, rc);
722
723 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uPendingAction) == sizeof(uint32_t));
724 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uPendingAction);
725 AssertRCReturn(rc, rc);
726
727 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uPatchCalls) == sizeof(uint32_t));
728 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uPatchCalls);
729 AssertRCReturn(rc, rc);
730
731 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uScratch) == sizeof(uint32_t));
732 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uScratch);
733 AssertRCReturn(rc, rc);
734
735 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretEFlags) == sizeof(uint32_t));
736 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretEFlags);
737 AssertRCReturn(rc, rc);
738
739 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretCS) == sizeof(uint32_t));
740 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretCS);
741 AssertRCReturn(rc, rc);
742
743 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretEIP) == sizeof(uint32_t));
744 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretEIP);
745 AssertRCReturn(rc, rc);
746
747 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Psp) == sizeof(uint32_t));
748 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Psp);
749 AssertRCReturn(rc, rc);
750
751 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->fPIF) == sizeof(uint32_t));
752 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->fPIF);
753 AssertRCReturn(rc, rc);
754
755 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts) == sizeof(RTRCPTR));
756 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts);
757 AssertRCReturn(rc, rc);
758
759 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCCallPatchTargetAddr) == sizeof(RTRCPTR));
760 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCCallPatchTargetAddr);
761 AssertRCReturn(rc, rc);
762
763 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCCallReturnAddr) == sizeof(RTRCPTR));
764 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCCallReturnAddr);
765 AssertRCReturn(rc, rc);
766
767 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uEAX) == sizeof(uint32_t));
768 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uEAX);
769 AssertRCReturn(rc, rc);
770
771 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uECX) == sizeof(uint32_t));
772 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uECX);
773 AssertRCReturn(rc, rc);
774
775 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uEDI) == sizeof(uint32_t));
776 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uEDI);
777 AssertRCReturn(rc, rc);
778
779 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.eFlags) == sizeof(uint32_t));
780 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.eFlags);
781 AssertRCReturn(rc, rc);
782
783 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uFlags) == sizeof(uint32_t));
784 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uFlags);
785 AssertRCReturn(rc, rc);
786 }
787#endif
788
789 /*
790 * Restore PATM stack page
791 */
792 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
793 AssertRCReturn(rc, rc);
794
795 /*
796 * Load all patches
797 */
798 for (uint32_t i=0;i<patmInfo.savedstate.cPatches;i++)
799 {
800 PATMPATCHREC patch, *pPatchRec;
801
802#if 0
803 rc = SSMR3GetMem(pSSM, &patch, sizeof(patch));
804#else
805 RT_ZERO(patch);
806 rc = SSMR3GetStructEx(pSSM, &patch, sizeof(patch), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmPatchRecFields[0], NULL);
807#endif
808 AssertRCReturn(rc, rc);
809
810 Assert(!(patch.patch.flags & PATMFL_GLOBAL_FUNCTIONS));
811
812 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
813 if (RT_FAILURE(rc))
814 {
815 AssertMsgFailed(("Out of memory!!!!\n"));
816 return VERR_NO_MEMORY;
817 }
818 /*
819 * Only restore the patch part of the tree record; not the internal data (except the key of course)
820 */
821 pPatchRec->patch = patch.patch;
822 pPatchRec->Core.Key = patch.Core.Key;
823 pPatchRec->CoreOffset.Key = patch.CoreOffset.Key;
824
825 Log(("Restoring patch %RRv -> %RRv\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset));
826 bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
827 Assert(ret);
828 if (pPatchRec->patch.uState != PATCH_REFUSED)
829 {
830 if (pPatchRec->patch.pPatchBlockOffset)
831 {
832 /* We actually generated code for this patch. */
833 ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
834 AssertMsg(ret, ("Inserting patch %RRv offset %08RX32 failed!!\n", pPatchRec->patch.pPrivInstrGC, pPatchRec->CoreOffset.Key));
835 }
836 }
837 /* Set to zero as we don't need it anymore. */
838 pPatchRec->patch.pTempInfo = 0;
839
840 pPatchRec->patch.pPrivInstrHC = 0;
841 /* The GC virtual ptr is fixed, but we must convert it manually again to HC. */
842 int rc2 = rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatchRec->patch.pPrivInstrGC, (PRTR3PTR)&pPatchRec->patch.pPrivInstrHC);
843 /* Can fail due to page or page table not present. */
844
845 /*
846 * Restore fixup records and correct HC pointers in fixup records
847 */
848 pPatchRec->patch.FixupTree = 0;
849 pPatchRec->patch.nrFixups = 0; /* increased by patmPatchAddReloc32 */
850 for (int j=0;j<patch.patch.nrFixups;j++)
851 {
852 RELOCREC rec;
853 int32_t offset;
854 RTRCPTR *pFixup;
855
856#if 0
857 rc = SSMR3GetMem(pSSM, &rec, sizeof(rec));
858#else
859 RT_ZERO(rec);
860 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmRelocRec[0], NULL);
861#endif
862 AssertRCReturn(rc, rc);
863
864 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
865 offset = (int32_t)(int64_t)rec.pRelocPos;
866 /* Convert to HC pointer again. */
867 PATM_ADD_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
868 pFixup = (RTRCPTR *)rec.pRelocPos;
869
870 if (pPatchRec->patch.uState != PATCH_REFUSED)
871 {
872 if ( rec.uType == FIXUP_REL_JMPTOPATCH
873 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE))
874 {
875 Assert(pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32 || pPatchRec->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32);
876 unsigned offset2 = (pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32) ? 1 : 2;
877
878 /** @todo This will fail & crash in patmCorrectFixup if the page isn't present
879 * when we restore. Happens with my XP image here
880 * (pPrivInstrGC=0x8069e051). */
881 AssertLogRelMsg(pPatchRec->patch.pPrivInstrHC, ("%RRv rc=%Rrc uState=%u\n", pPatchRec->patch.pPrivInstrGC, rc2, pPatchRec->patch.uState));
882 rec.pRelocPos = pPatchRec->patch.pPrivInstrHC + offset2;
883 pFixup = (RTRCPTR *)rec.pRelocPos;
884 }
885
886 patmCorrectFixup(pVM, uVersion, patmInfo, &pPatchRec->patch, &rec, offset, pFixup);
887 }
888
889 rc = patmPatchAddReloc32(pVM, &pPatchRec->patch, rec.pRelocPos, rec.uType, rec.pSource, rec.pDest);
890 AssertRCReturn(rc, rc);
891 }
892
893 /* And all patch to guest lookup records */
894 Assert(pPatchRec->patch.nrPatch2GuestRecs || pPatchRec->patch.uState == PATCH_REFUSED || (pPatchRec->patch.flags & (PATMFL_SYSENTER_XP | PATMFL_IDTHANDLER | PATMFL_TRAPHANDLER | PATMFL_INT3_REPLACEMENT)));
895
896 pPatchRec->patch.Patch2GuestAddrTree = 0;
897 pPatchRec->patch.Guest2PatchAddrTree = 0;
898 if (pPatchRec->patch.nrPatch2GuestRecs)
899 {
900 RECPATCHTOGUEST rec;
901 uint32_t nrPatch2GuestRecs = pPatchRec->patch.nrPatch2GuestRecs;
902
903 pPatchRec->patch.nrPatch2GuestRecs = 0; /* incremented by patmr3AddP2GLookupRecord */
904 for (uint32_t j=0;j<nrPatch2GuestRecs;j++)
905 {
906#if 0
907 rc = SSMR3GetMem(pSSM, &rec, sizeof(rec));
908#else
909 RT_ZERO(rec);
910 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmRecPatchToGuest[0], NULL);
911#endif
912
913 AssertRCReturn(rc, rc);
914
915 patmr3AddP2GLookupRecord(pVM, &pPatchRec->patch, (uintptr_t)rec.Core.Key + pVM->patm.s.pPatchMemHC, rec.pOrgInstrGC, rec.enmType, rec.fDirty);
916 }
917 Assert(pPatchRec->patch.Patch2GuestAddrTree);
918 }
919
920 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
921 {
922 /* Insert the guest page lookup records (for detection self-modifying code) */
923 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
924 AssertRCReturn(rc, rc);
925 }
926
927#if 0 /* can fail def LOG_ENABLED */
928 if ( pPatchRec->patch.uState != PATCH_REFUSED
929 && !(pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT))
930 {
931 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
932 Log(("Patch code ----------------------------------------------------------\n"));
933 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(&pPatchRec->patch), PATCHCODE_PTR_GC(&pPatchRec->patch), patmr3DisasmCallback, &pPatchRec->patch);
934 Log(("Patch code ends -----------------------------------------------------\n"));
935 MMR3HeapFree(pPatchRec->patch.pTempInfo);
936 pPatchRec->patch.pTempInfo = NULL;
937 }
938#endif
939
940 }
941
942 /*
943 * Correct absolute fixups in the global patch. (helper functions)
944 * Bit of a mess. Uses the new patch record, but restored patch functions.
945 */
946 PRELOCREC pRec = 0;
947 AVLPVKEY key = 0;
948
949 Log(("Correct fixups in global helper functions\n"));
950 while (true)
951 {
952 int32_t offset;
953 RTRCPTR *pFixup;
954
955 /* Get the record that's closest from above */
956 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pVM->patm.s.pGlobalPatchRec->patch.FixupTree, key, true);
957 if (pRec == 0)
958 break;
959
960 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
961
962 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
963 offset = (int32_t)(pRec->pRelocPos - pVM->patm.s.pPatchMemHC);
964 pFixup = (RTRCPTR *)pRec->pRelocPos;
965
966 /* Correct fixups that refer to PATM structures in the hypervisor region (their addresses might have changed). */
967 patmCorrectFixup(pVM, uVersion, patmInfo, &pVM->patm.s.pGlobalPatchRec->patch, pRec, offset, pFixup);
968 }
969
970#ifdef VBOX_WITH_STATISTICS
971 /*
972 * Restore relevant old statistics
973 */
974 pVM->patm.s.StatDisabled = patmInfo.StatDisabled;
975 pVM->patm.s.StatUnusable = patmInfo.StatUnusable;
976 pVM->patm.s.StatEnabled = patmInfo.StatEnabled;
977 pVM->patm.s.StatInstalled = patmInfo.StatInstalled;
978#endif
979
980 return VINF_SUCCESS;
981}
982
983/**
984 * Correct fixups to predefined hypervisor PATM regions. (their addresses might have changed)
985 *
986 * @returns VBox status code.
987 * @param pVM VM Handle.
988 * @param ulSSMVersion SSM version
989 * @param patmInfo Saved PATM structure
990 * @param pPatch Patch record
991 * @param pRec Relocation record
992 * @param offset Offset of referenced data/code
993 * @param pFixup Fixup address
994 */
995static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup)
996{
997 int32_t delta = pVM->patm.s.pPatchMemGC - patmInfo.pPatchMemGC;
998
999 switch (pRec->uType)
1000 {
1001 case FIXUP_ABSOLUTE:
1002 {
1003 if (pRec->pSource && !PATMIsPatchGCAddr(pVM, pRec->pSource))
1004 break;
1005
1006 if ( *pFixup >= patmInfo.pGCStateGC
1007 && *pFixup < patmInfo.pGCStateGC + sizeof(PATMGCSTATE))
1008 {
1009 LogFlow(("Changing absolute GCState at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC));
1010 *pFixup = (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC;
1011 }
1012 else
1013 if ( *pFixup >= patmInfo.pCPUMCtxGC
1014 && *pFixup < patmInfo.pCPUMCtxGC + sizeof(CPUMCTX))
1015 {
1016 LogFlow(("Changing absolute CPUMCTX at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC));
1017
1018 /* The CPUMCTX structure has completely changed, so correct the offsets too. */
1019 if (ulSSMVersion == PATM_SSM_VERSION_VER16)
1020 {
1021 unsigned uCPUMOffset = *pFixup - patmInfo.pCPUMCtxGC;
1022
1023 /* ''case RT_OFFSETOF()'' does not work as gcc refuses to use & as a constant expression.
1024 * Defining RT_OFFSETOF as __builtin_offsetof for gcc would make this possible. But this
1025 * function is not available in older gcc versions, at least not in gcc-3.3 */
1026 if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr0))
1027 {
1028 LogFlow(("Changing dr[0] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[0])));
1029 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[0]);
1030 }
1031 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr1))
1032 {
1033 LogFlow(("Changing dr[1] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[1])));
1034 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[1]);
1035 }
1036 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr2))
1037 {
1038 LogFlow(("Changing dr[2] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[2])));
1039 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[2]);
1040 }
1041 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr3))
1042 {
1043 LogFlow(("Changing dr[3] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[3])));
1044 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[3]);
1045 }
1046 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr4))
1047 {
1048 LogFlow(("Changing dr[4] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[4])));
1049 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[4]);
1050 }
1051 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr5))
1052 {
1053 LogFlow(("Changing dr[5] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[5])));
1054 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[5]);
1055 }
1056 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr6))
1057 {
1058 LogFlow(("Changing dr[6] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[6])));
1059 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[6]);
1060 }
1061 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr7))
1062 {
1063 LogFlow(("Changing dr[7] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[7])));
1064 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[7]);
1065 }
1066 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr0))
1067 {
1068 LogFlow(("Changing cr0 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr0)));
1069 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr0);
1070 }
1071 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr2))
1072 {
1073 LogFlow(("Changing cr2 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr2)));
1074 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr2);
1075 }
1076 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr3))
1077 {
1078 LogFlow(("Changing cr3 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr3)));
1079 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr3);
1080 }
1081 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr4))
1082 {
1083 LogFlow(("Changing cr4 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr4)));
1084 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr4);
1085 }
1086 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, tr))
1087 {
1088 LogFlow(("Changing tr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, tr)));
1089 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1090 }
1091 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, ldtr))
1092 {
1093 LogFlow(("Changing ldtr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, ldtr)));
1094 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1095 }
1096 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.pGdt))
1097 {
1098 LogFlow(("Changing pGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.pGdt)));
1099 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1100 }
1101 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.cbGdt))
1102 {
1103 LogFlow(("Changing cbGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.cbGdt)));
1104 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1105 }
1106 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.pIdt))
1107 {
1108 LogFlow(("Changing pIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.pIdt)));
1109 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1110 }
1111 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.cbIdt))
1112 {
1113 LogFlow(("Changing cbIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.cbIdt)));
1114 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1115 }
1116 else
1117 AssertMsgFailed(("Unexpected CPUMCTX offset %x\n", uCPUMOffset));
1118 }
1119 else
1120 *pFixup = (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC;
1121 }
1122 else
1123 if ( *pFixup >= patmInfo.pStatsGC
1124 && *pFixup < patmInfo.pStatsGC + PATM_STAT_MEMSIZE)
1125 {
1126 LogFlow(("Changing absolute Stats at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC));
1127 *pFixup = (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC;
1128 }
1129 else
1130 if ( *pFixup >= patmInfo.pGCStackGC
1131 && *pFixup < patmInfo.pGCStackGC + PATM_STACK_TOTAL_SIZE)
1132 {
1133 LogFlow(("Changing absolute Stack at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC));
1134 *pFixup = (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC;
1135 }
1136 else
1137 if ( *pFixup >= patmInfo.pPatchMemGC
1138 && *pFixup < patmInfo.pPatchMemGC + patmInfo.cbPatchMem)
1139 {
1140 LogFlow(("Changing absolute PatchMem at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC));
1141 *pFixup = (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC;
1142 }
1143 else
1144 /* Boldly ASSUMES:
1145 * 1. That pCPUMCtxGC is in the VM structure and that its location is
1146 * at the first page of the same 4 MB chunk.
1147 * 2. That the forced actions were in the first 32 bytes of the VM
1148 * structure.
1149 * 3. That the CPUM leafs are less than 8KB into the structure. */
1150 if ( ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
1151 && *pFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(32))
1152 {
1153 LogFlow(("Changing fLocalForcedActions fixup from %RRv to %RRv\n", *pFixup, pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions)));
1154 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1155 }
1156 else
1157 if ( ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
1158 && *pFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(8192))
1159 {
1160 static int cCpuidFixup = 0;
1161#ifdef LOG_ENABLED
1162 RTRCPTR oldFixup = *pFixup;
1163#endif
1164 /* very dirty assumptions about the cpuid patch and cpuid ordering. */
1165 switch(cCpuidFixup & 3)
1166 {
1167 case 0:
1168 *pFixup = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
1169 break;
1170 case 1:
1171 *pFixup = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
1172 break;
1173 case 2:
1174 *pFixup = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
1175 break;
1176 case 3:
1177 *pFixup = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
1178 break;
1179 }
1180 LogFlow(("Changing cpuid fixup %d from %RRv to %RRv\n", cCpuidFixup, oldFixup, *pFixup));
1181 cCpuidFixup++;
1182 }
1183 else
1184 if (ulSSMVersion >= PATM_SSM_VERSION)
1185 {
1186#ifdef LOG_ENABLED
1187 RTRCPTR oldFixup = *pFixup;
1188#endif
1189 /* Core.Key abused to store the type of fixup */
1190 switch ((uintptr_t)pRec->Core.Key)
1191 {
1192 case PATM_FIXUP_CPU_FF_ACTION:
1193 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1194 LogFlow(("Changing cpu ff action fixup from %x to %x\n", oldFixup, *pFixup));
1195 break;
1196 case PATM_FIXUP_CPUID_DEFAULT:
1197 *pFixup = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
1198 LogFlow(("Changing cpuid def fixup from %x to %x\n", oldFixup, *pFixup));
1199 break;
1200 case PATM_FIXUP_CPUID_STANDARD:
1201 *pFixup = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
1202 LogFlow(("Changing cpuid std fixup from %x to %x\n", oldFixup, *pFixup));
1203 break;
1204 case PATM_FIXUP_CPUID_EXTENDED:
1205 *pFixup = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
1206 LogFlow(("Changing cpuid ext fixup from %x to %x\n", oldFixup, *pFixup));
1207 break;
1208 case PATM_FIXUP_CPUID_CENTAUR:
1209 *pFixup = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
1210 LogFlow(("Changing cpuid centaur fixup from %x to %x\n", oldFixup, *pFixup));
1211 break;
1212 default:
1213 AssertMsgFailed(("Unexpected fixup value %x\n", *pFixup));
1214 break;
1215 }
1216 }
1217
1218#ifdef RT_OS_WINDOWS
1219 AssertCompile(RT_OFFSETOF(VM, fGlobalForcedActions) < 32);
1220#endif
1221 break;
1222 }
1223
1224 case FIXUP_REL_JMPTOPATCH:
1225 {
1226 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
1227
1228 if ( pPatch->uState == PATCH_ENABLED
1229 && (pPatch->flags & PATMFL_PATCHED_GUEST_CODE))
1230 {
1231 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
1232 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
1233 RTRCPTR pJumpOffGC;
1234 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
1235 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
1236
1237 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1238
1239 Assert(pRec->pSource - pPatch->cbPatchJump == pPatch->pPrivInstrGC);
1240#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
1241 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
1242 {
1243 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
1244
1245 pJumpOffGC = pPatch->pPrivInstrGC + 2; //two byte opcode
1246 oldJump[0] = pPatch->aPrivInstr[0];
1247 oldJump[1] = pPatch->aPrivInstr[1];
1248 *(RTRCUINTPTR *)&oldJump[2] = displOld;
1249 }
1250 else
1251#endif
1252 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
1253 {
1254 pJumpOffGC = pPatch->pPrivInstrGC + 1; //one byte opcode
1255 oldJump[0] = 0xE9;
1256 *(RTRCUINTPTR *)&oldJump[1] = displOld;
1257 }
1258 else
1259 {
1260 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->cbPatchJump));
1261 break;
1262 }
1263 Assert(pPatch->cbPatchJump <= sizeof(temp));
1264
1265 /*
1266 * Read old patch jump and compare it to the one we previously installed
1267 */
1268 int rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
1269 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1270
1271 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1272 {
1273 RTRCPTR pPage = pPatch->pPrivInstrGC & PAGE_BASE_GC_MASK;
1274
1275 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
1276 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
1277 }
1278 else
1279 if (memcmp(temp, oldJump, pPatch->cbPatchJump))
1280 {
1281 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
1282 /*
1283 * Disable patch; this is not a good solution
1284 */
1285 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
1286 pPatch->uState = PATCH_DISABLED;
1287 }
1288 else
1289 if (RT_SUCCESS(rc))
1290 {
1291 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
1292 AssertRC(rc);
1293 }
1294 else
1295 {
1296 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
1297 }
1298 }
1299 else
1300 {
1301 Log(("Skip the guest jump to patch code for this disabled patch %08X - %08X\n", pPatch->pPrivInstrHC, pRec->pRelocPos));
1302 }
1303
1304 pRec->pDest = pTarget;
1305 break;
1306 }
1307
1308 case FIXUP_REL_JMPTOGUEST:
1309 {
1310 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
1311 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
1312
1313 Assert(!(pPatch->flags & PATMFL_GLOBAL_FUNCTIONS));
1314 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1315 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
1316 pRec->pSource = pSource;
1317 break;
1318
1319 }
1320}
1321}
1322
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette