VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 45530

Last change on this file since 45530 was 45485, checked in by vboxsync, 12 years ago
  • *: Where possible, drop the #ifdef VBOX_WITH_RAW_RING1 when EMIsRawRing1Enabled is used.
  • SELM: Don't shadow TSS.esp1/ss1 unless ring-1 compression is enabled (also fixed a log statement there).
  • SELM: selmGuestToShadowDesc should not push ring-1 selectors into ring-2 unless EMIsRawRing1Enabled() holds true.
  • REM: Don't set CPU_INTERRUPT_EXTERNAL_EXIT in helper_ltr() for now.
  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 259.9 KB
Line 
1/* $Id: PATM.cpp 45485 2013-04-11 14:46:04Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * @note Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2013 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/cpumdis.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/trpm.h>
34#include <VBox/vmm/cfgm.h>
35#include <VBox/param.h>
36#include <VBox/vmm/selm.h>
37#include <VBox/vmm/csam.h>
38#include <iprt/avl.h>
39#include "PATMInternal.h"
40#include "PATMPatch.h"
41#include <VBox/vmm/vm.h>
42#include <VBox/vmm/uvm.h>
43#include <VBox/dbg.h>
44#include <VBox/err.h>
45#include <VBox/log.h>
46#include <iprt/assert.h>
47#include <iprt/asm.h>
48#include <VBox/dis.h>
49#include <VBox/disopcode.h>
50#include "internal/pgm.h"
51
52#include <iprt/string.h>
53#include "PATMA.h"
54
55//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
56//#define PATM_DISABLE_ALL
57
58/**
59 * Refresh trampoline patch state.
60 */
61typedef struct PATMREFRESHPATCH
62{
63 /** Pointer to the VM structure. */
64 PVM pVM;
65 /** The trampoline patch record. */
66 PPATCHINFO pPatchTrampoline;
67 /** The new patch we want to jump to. */
68 PPATCHINFO pPatchRec;
69} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
70
71
72#define PATMREAD_RAWCODE 1 /* read code as-is */
73#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
74#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
75
76/*
77 * Private structure used during disassembly
78 */
79typedef struct
80{
81 PVM pVM;
82 PPATCHINFO pPatchInfo;
83 R3PTRTYPE(uint8_t *) pbInstrHC;
84 RTRCPTR pInstrGC;
85 uint32_t fReadFlags;
86} PATMDISASM, *PPATMDISASM;
87
88
89/*******************************************************************************
90* Internal Functions *
91*******************************************************************************/
92
93static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
94static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
95static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
96
97#ifdef LOG_ENABLED // keep gcc quiet
98static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
99#endif
100#ifdef VBOX_WITH_STATISTICS
101static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
102static void patmResetStat(PVM pVM, void *pvSample);
103static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
104#endif
105
106#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
107#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
108
109static int patmReinit(PVM pVM);
110static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
111static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC);
112static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch);
113
114#ifdef VBOX_WITH_DEBUGGER
115static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
116static FNDBGCCMD patmr3CmdOn;
117static FNDBGCCMD patmr3CmdOff;
118
119/** Command descriptors. */
120static const DBGCCMD g_aCmds[] =
121{
122 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
123 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
124 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
125};
126#endif
127
128/* Don't want to break saved states, so put it here as a global variable. */
129static unsigned int cIDTHandlersDisabled = 0;
130
131/**
132 * Initializes the PATM.
133 *
134 * @returns VBox status code.
135 * @param pVM Pointer to the VM.
136 */
137VMMR3_INT_DECL(int) PATMR3Init(PVM pVM)
138{
139 int rc;
140
141 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
142
143 /* These values can't change as they are hardcoded in patch code (old saved states!) */
144 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
145 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
146 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
147 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
148
149 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
150 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
151
152 /* Allocate patch memory and GC patch state memory. */
153 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
154 /* Add another page in case the generated code is much larger than expected. */
155 /** @todo bad safety precaution */
156 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
157 if (RT_FAILURE(rc))
158 {
159 Log(("MMHyperAlloc failed with %Rrc\n", rc));
160 return rc;
161 }
162 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
163
164 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
165 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
166 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
167
168 /*
169 * Hypervisor memory for GC status data (read/write)
170 *
171 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
172 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
173 *
174 */
175 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
176 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
177 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
178
179 /* Hypervisor memory for patch statistics */
180 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
181 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
182
183 /* Memory for patch lookup trees. */
184 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
185 AssertRCReturn(rc, rc);
186 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
187
188#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
189 /* Check CFGM option. */
190 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
191 if (RT_FAILURE(rc))
192# ifdef PATM_DISABLE_ALL
193 pVM->fPATMEnabled = false;
194# else
195 pVM->fPATMEnabled = true;
196# endif
197#endif
198
199 rc = patmReinit(pVM);
200 AssertRC(rc);
201 if (RT_FAILURE(rc))
202 return rc;
203
204 /*
205 * Register save and load state notifiers.
206 */
207 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
208 NULL, NULL, NULL,
209 NULL, patmR3Save, NULL,
210 NULL, patmR3Load, NULL);
211 AssertRCReturn(rc, rc);
212
213#ifdef VBOX_WITH_DEBUGGER
214 /*
215 * Debugger commands.
216 */
217 static bool s_fRegisteredCmds = false;
218 if (!s_fRegisteredCmds)
219 {
220 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
221 if (RT_SUCCESS(rc2))
222 s_fRegisteredCmds = true;
223 }
224#endif
225
226#ifdef VBOX_WITH_STATISTICS
227 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
228 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
229 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
230 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
231 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
232 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
233 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
234 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
235
236 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
237 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
238
239 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
240 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
241 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
242
243 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
244 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
245 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
246 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
247 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
248
249 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
250 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
251
252 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
253 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
254
255 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
256 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
257 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
258
259 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
260 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
261 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
262
263 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
264 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
265
266 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
267 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
268 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
269 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
270
271 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
272 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
273
274 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
275 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
276
277 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
278 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
279 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
280
281 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
282 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
283 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
284 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
285
286 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
287 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
288 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
289 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
290 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
291
292 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
293#endif /* VBOX_WITH_STATISTICS */
294
295 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
296 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
297 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
298 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
299 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
300 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
301 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
302 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
303
304 return rc;
305}
306
307/**
308 * Finalizes HMA page attributes.
309 *
310 * @returns VBox status code.
311 * @param pVM Pointer to the VM.
312 */
313VMMR3_INT_DECL(int) PATMR3InitFinalize(PVM pVM)
314{
315 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
316 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
317 if (RT_FAILURE(rc))
318 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
319
320 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
321 if (RT_FAILURE(rc))
322 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
323
324 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
325 if (RT_FAILURE(rc))
326 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
327
328 return rc;
329}
330
331/**
332 * (Re)initializes PATM
333 *
334 * @param pVM The VM.
335 */
336static int patmReinit(PVM pVM)
337{
338 int rc;
339
340 /*
341 * Assert alignment and sizes.
342 */
343 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
344 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
345
346 /*
347 * Setup any fixed pointers and offsets.
348 */
349 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
350
351#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
352#ifndef PATM_DISABLE_ALL
353 pVM->fPATMEnabled = true;
354#endif
355#endif
356
357 Assert(pVM->patm.s.pGCStateHC);
358 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
359 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
360
361 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
362 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
363
364 Assert(pVM->patm.s.pGCStackHC);
365 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
366 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
367 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
368 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
369
370 Assert(pVM->patm.s.pStatsHC);
371 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
372 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
373
374 Assert(pVM->patm.s.pPatchMemHC);
375 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
376 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
377 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
378
379 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
380 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
381
382 Assert(pVM->patm.s.PatchLookupTreeHC);
383 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
384
385 /*
386 * (Re)Initialize PATM structure
387 */
388 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
389 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
390 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
391 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
392 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
393 pVM->patm.s.pvFaultMonitor = 0;
394 pVM->patm.s.deltaReloc = 0;
395
396 /* Lowest and highest patched instruction */
397 pVM->patm.s.pPatchedInstrGCLowest = ~0;
398 pVM->patm.s.pPatchedInstrGCHighest = 0;
399
400 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
401 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
402 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
403
404 pVM->patm.s.pfnSysEnterPatchGC = 0;
405 pVM->patm.s.pfnSysEnterGC = 0;
406
407 pVM->patm.s.fOutOfMemory = false;
408
409 pVM->patm.s.pfnHelperCallGC = 0;
410
411 /* Generate all global functions to be used by future patches. */
412 /* We generate a fake patch in order to use the existing code for relocation. */
413 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
414 if (RT_FAILURE(rc))
415 {
416 Log(("Out of memory!!!!\n"));
417 return VERR_NO_MEMORY;
418 }
419 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
420 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
421 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
422
423 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
424 AssertRC(rc);
425
426 /* Update free pointer in patch memory. */
427 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
428 /* Round to next 8 byte boundary. */
429 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
430 return rc;
431}
432
433
434/**
435 * Applies relocations to data and code managed by this
436 * component. This function will be called at init and
437 * whenever the VMM need to relocate it self inside the GC.
438 *
439 * The PATM will update the addresses used by the switcher.
440 *
441 * @param pVM The VM.
442 */
443VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM)
444{
445 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
446 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
447
448 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
449 if (delta)
450 {
451 PCPUMCTX pCtx;
452
453 /* Update CPUMCTX guest context pointer. */
454 pVM->patm.s.pCPUMCtxGC += delta;
455
456 pVM->patm.s.deltaReloc = delta;
457
458 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
459
460 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
461
462 /* If we are running patch code right now, then also adjust EIP. */
463 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
464 pCtx->eip += delta;
465
466 pVM->patm.s.pGCStateGC = GCPtrNew;
467 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
468
469 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
470
471 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
472
473 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
474
475 if (pVM->patm.s.pfnSysEnterPatchGC)
476 pVM->patm.s.pfnSysEnterPatchGC += delta;
477
478 /* Deal with the global patch functions. */
479 pVM->patm.s.pfnHelperCallGC += delta;
480 pVM->patm.s.pfnHelperRetGC += delta;
481 pVM->patm.s.pfnHelperIretGC += delta;
482 pVM->patm.s.pfnHelperJumpGC += delta;
483
484 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
485 }
486}
487
488
489/**
490 * Terminates the PATM.
491 *
492 * Termination means cleaning up and freeing all resources,
493 * the VM it self is at this point powered off or suspended.
494 *
495 * @returns VBox status code.
496 * @param pVM Pointer to the VM.
497 */
498VMMR3_INT_DECL(int) PATMR3Term(PVM pVM)
499{
500 /* Memory was all allocated from the two MM heaps and requires no freeing. */
501 NOREF(pVM);
502 return VINF_SUCCESS;
503}
504
505
506/**
507 * PATM reset callback.
508 *
509 * @returns VBox status code.
510 * @param pVM The VM which is reset.
511 */
512VMMR3_INT_DECL(int) PATMR3Reset(PVM pVM)
513{
514 Log(("PATMR3Reset\n"));
515
516 /* Free all patches. */
517 while (true)
518 {
519 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
520 if (pPatchRec)
521 patmR3RemovePatch(pVM, pPatchRec, true);
522 else
523 break;
524 }
525 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
526 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
527 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
528 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
529
530 int rc = patmReinit(pVM);
531 if (RT_SUCCESS(rc))
532 rc = PATMR3InitFinalize(pVM); /* paranoia */
533
534 return rc;
535}
536
537/**
538 * @callback_method_impl{FNDISREADBYTES}
539 */
540static DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
541{
542 PATMDISASM *pDisInfo = (PATMDISASM *)pDis->pvUser;
543
544/** @todo change this to read more! */
545 /*
546 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
547 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
548 */
549 /** @todo could change in the future! */
550 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
551 {
552 size_t cbRead = cbMaxRead;
553 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
554 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
555 if (RT_SUCCESS(rc))
556 {
557 if (cbRead >= cbMinRead)
558 {
559 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
560 return VINF_SUCCESS;
561 }
562
563 cbMinRead -= (uint8_t)cbRead;
564 cbMaxRead -= (uint8_t)cbRead;
565 offInstr += (uint8_t)cbRead;
566 uSrcAddr += cbRead;
567 }
568
569#ifdef VBOX_STRICT
570 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
571 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
572 {
573 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr, NULL) == false);
574 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr + cbMinRead-1, NULL) == false);
575 }
576#endif
577 }
578
579 int rc = VINF_SUCCESS;
580 RTGCPTR32 uSrcAddr = (RTGCPTR32)pDis->uInstrAddr + offInstr;
581 if ( !pDisInfo->pbInstrHC
582 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbMinRead - 1)
583 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
584 {
585 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
586 rc = PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
587 offInstr += cbMinRead;
588 }
589 else
590 {
591 /*
592 * pbInstrHC is the base address; adjust according to the GC pointer.
593 *
594 * Try read the max number of bytes here. Since the disassembler only
595 * ever uses these bytes for the current instruction, it doesn't matter
596 * much if we accidentally read the start of the next instruction even
597 * if it happens to be a patch jump or int3.
598 */
599 uint8_t const *pbInstrHC = pDisInfo->pbInstrHC; AssertPtr(pbInstrHC);
600 pbInstrHC += uSrcAddr - pDisInfo->pInstrGC;
601
602 size_t cbMaxRead1 = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
603 size_t cbMaxRead2 = PAGE_SIZE - ((uintptr_t)pbInstrHC & PAGE_OFFSET_MASK);
604 size_t cbToRead = RT_MIN(cbMaxRead1, RT_MAX(cbMaxRead2, cbMinRead));
605 if (cbToRead > cbMaxRead)
606 cbToRead = cbMaxRead;
607
608 memcpy(&pDis->abInstr[offInstr], pbInstrHC, cbToRead);
609 offInstr += (uint8_t)cbToRead;
610 }
611
612 pDis->cbCachedInstr = offInstr;
613 return rc;
614}
615
616
617DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
618 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
619{
620 PATMDISASM disinfo;
621 disinfo.pVM = pVM;
622 disinfo.pPatchInfo = pPatch;
623 disinfo.pbInstrHC = pbInstrHC;
624 disinfo.pInstrGC = InstrGCPtr32;
625 disinfo.fReadFlags = fReadFlags;
626 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
627 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
628 patmReadBytes, &disinfo,
629 pCpu, pcbInstr, pszOutput, cbOutput));
630}
631
632
633DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
634 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
635{
636 PATMDISASM disinfo;
637 disinfo.pVM = pVM;
638 disinfo.pPatchInfo = pPatch;
639 disinfo.pbInstrHC = pbInstrHC;
640 disinfo.pInstrGC = InstrGCPtr32;
641 disinfo.fReadFlags = fReadFlags;
642 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
643 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
644 patmReadBytes, &disinfo,
645 pCpu, pcbInstr));
646}
647
648
649DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
650 uint32_t fReadFlags,
651 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
652{
653 PATMDISASM disinfo;
654 disinfo.pVM = pVM;
655 disinfo.pPatchInfo = pPatch;
656 disinfo.pbInstrHC = pbInstrHC;
657 disinfo.pInstrGC = InstrGCPtr32;
658 disinfo.fReadFlags = fReadFlags;
659 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
660 pCpu, pcbInstr));
661}
662
663#ifdef LOG_ENABLED
664# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
665 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
666# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
667 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
668
669# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
670 do { \
671 if (LogIsEnabled()) \
672 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
673 } while (0)
674
675static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
676 const char *pszComment1, const char *pszComment2)
677{
678 DISCPUSTATE DisState;
679 char szOutput[128];
680 szOutput[0] = '\0';
681 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
682 &DisState, NULL, szOutput, sizeof(szOutput));
683 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
684}
685
686#else
687# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
688# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
689# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
690#endif
691
692
693/**
694 * Callback function for RTAvloU32DoWithAll
695 *
696 * Updates all fixups in the patches
697 *
698 * @returns VBox status code.
699 * @param pNode Current node
700 * @param pParam Pointer to the VM.
701 */
702static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
703{
704 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
705 PVM pVM = (PVM)pParam;
706 RTRCINTPTR delta;
707 int rc;
708
709 /* Nothing to do if the patch is not active. */
710 if (pPatch->patch.uState == PATCH_REFUSED)
711 return 0;
712
713 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
714 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
715
716 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
717 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
718
719 /*
720 * Apply fixups
721 */
722 PRELOCREC pRec = 0;
723 AVLPVKEY key = 0;
724
725 while (true)
726 {
727 /* Get the record that's closest from above */
728 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
729 if (pRec == 0)
730 break;
731
732 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
733
734 switch (pRec->uType)
735 {
736 case FIXUP_ABSOLUTE:
737 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
738 if ( !pRec->pSource
739 || PATMIsPatchGCAddr(pVM, pRec->pSource))
740 {
741 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
742 }
743 else
744 {
745 uint8_t curInstr[15];
746 uint8_t oldInstr[15];
747 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
748
749 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
750
751 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
752 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
753
754 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
755 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
756
757 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
758
759 if ( rc == VERR_PAGE_NOT_PRESENT
760 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
761 {
762 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
763
764 Log(("PATM: Patch page not present -> check later!\n"));
765 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
766 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
767 }
768 else
769 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
770 {
771 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
772 /*
773 * Disable patch; this is not a good solution
774 */
775 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
776 pPatch->patch.uState = PATCH_DISABLED;
777 }
778 else
779 if (RT_SUCCESS(rc))
780 {
781 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
782 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
783 AssertRC(rc);
784 }
785 }
786 break;
787
788 case FIXUP_REL_JMPTOPATCH:
789 {
790 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
791
792 if ( pPatch->patch.uState == PATCH_ENABLED
793 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
794 {
795 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
796 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
797 RTRCPTR pJumpOffGC;
798 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
799 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
800
801#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
802 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
803#else
804 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
805#endif
806
807 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
808#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
809 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
810 {
811 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
812
813 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
814 oldJump[0] = pPatch->patch.aPrivInstr[0];
815 oldJump[1] = pPatch->patch.aPrivInstr[1];
816 *(RTRCUINTPTR *)&oldJump[2] = displOld;
817 }
818 else
819#endif
820 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
821 {
822 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
823 oldJump[0] = 0xE9;
824 *(RTRCUINTPTR *)&oldJump[1] = displOld;
825 }
826 else
827 {
828 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
829 continue; //this should never happen!!
830 }
831 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
832
833 /*
834 * Read old patch jump and compare it to the one we previously installed
835 */
836 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
837 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
838
839 if ( rc == VERR_PAGE_NOT_PRESENT
840 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
841 {
842 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
843
844 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
845 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
846 }
847 else
848 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
849 {
850 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
851 /*
852 * Disable patch; this is not a good solution
853 */
854 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
855 pPatch->patch.uState = PATCH_DISABLED;
856 }
857 else
858 if (RT_SUCCESS(rc))
859 {
860 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
861 AssertRC(rc);
862 }
863 else
864 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
865 }
866 else
867 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
868
869 pRec->pDest = pTarget;
870 break;
871 }
872
873 case FIXUP_REL_JMPTOGUEST:
874 {
875 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
876 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
877
878 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
879 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
880 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
881 pRec->pSource = pSource;
882 break;
883 }
884
885 default:
886 AssertMsg(0, ("Invalid fixup type!!\n"));
887 return VERR_INVALID_PARAMETER;
888 }
889 }
890
891 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
892 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
893 return 0;
894}
895
896/**
897 * \#PF Handler callback for virtual access handler ranges.
898 *
899 * Important to realize that a physical page in a range can have aliases, and
900 * for ALL and WRITE handlers these will also trigger.
901 *
902 * @returns VINF_SUCCESS if the handler have carried out the operation.
903 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
904 * @param pVM Pointer to the VM.
905 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
906 * @param pvPtr The HC mapping of that address.
907 * @param pvBuf What the guest is reading/writing.
908 * @param cbBuf How much it's reading/writing.
909 * @param enmAccessType The access type.
910 * @param pvUser User argument.
911 */
912DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
913 PGMACCESSTYPE enmAccessType, void *pvUser)
914{
915 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
916 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(pvUser);
917
918 /** @todo could be the wrong virtual address (alias) */
919 pVM->patm.s.pvFaultMonitor = GCPtr;
920 PATMR3HandleMonitoredPage(pVM);
921 return VINF_PGM_HANDLER_DO_DEFAULT;
922}
923
924#ifdef VBOX_WITH_DEBUGGER
925
926/**
927 * Callback function for RTAvloU32DoWithAll
928 *
929 * Enables the patch that's being enumerated
930 *
931 * @returns 0 (continue enumeration).
932 * @param pNode Current node
933 * @param pVM Pointer to the VM.
934 */
935static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
936{
937 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
938
939 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
940 return 0;
941}
942
943
944/**
945 * Callback function for RTAvloU32DoWithAll
946 *
947 * Disables the patch that's being enumerated
948 *
949 * @returns 0 (continue enumeration).
950 * @param pNode Current node
951 * @param pVM Pointer to the VM.
952 */
953static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
954{
955 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
956
957 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
958 return 0;
959}
960
961#endif /* VBOX_WITH_DEBUGGER */
962#ifdef UNUSED_FUNCTIONS
963
964/**
965 * Returns the host context pointer and size of the patch memory block
966 *
967 * @returns Host context pointer.
968 * @param pVM Pointer to the VM.
969 * @param pcb Size of the patch memory block
970 * @internal
971 */
972VMMR3_INT_DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
973{
974 if (pcb)
975 *pcb = pVM->patm.s.cbPatchMem;
976
977 return pVM->patm.s.pPatchMemHC;
978}
979
980
981/**
982 * Returns the guest context pointer and size of the patch memory block
983 *
984 * @returns Guest context pointer.
985 * @param pVM Pointer to the VM.
986 * @param pcb Size of the patch memory block
987 */
988VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
989{
990 if (pcb)
991 *pcb = pVM->patm.s.cbPatchMem;
992
993 return pVM->patm.s.pPatchMemGC;
994}
995
996#endif /* UNUSED_FUNCTIONS */
997
998/**
999 * Returns the host context pointer of the GC context structure
1000 *
1001 * @returns VBox status code.
1002 * @param pVM Pointer to the VM.
1003 */
1004VMMR3_INT_DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
1005{
1006 return pVM->patm.s.pGCStateHC;
1007}
1008
1009
1010#ifdef UNUSED_FUNCTION
1011/**
1012 * Checks whether the HC address is part of our patch region
1013 *
1014 * @returns true/false.
1015 * @param pVM Pointer to the VM.
1016 * @param pAddrHC Host context ring-3 address to check.
1017 */
1018VMMR3_INT_DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, void *pAddrHC)
1019{
1020 return (uintptr_t)pAddrHC >= (uintptr_t)pVM->patm.s.pPatchMemHC
1021 && (uintptr_t)pAddrHC < (uintptr_t)pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem;
1022}
1023#endif
1024
1025
1026/**
1027 * Allows or disallow patching of privileged instructions executed by the guest OS
1028 *
1029 * @returns VBox status code.
1030 * @param pUVM The user mode VM handle.
1031 * @param fAllowPatching Allow/disallow patching
1032 */
1033VMMR3DECL(int) PATMR3AllowPatching(PUVM pUVM, bool fAllowPatching)
1034{
1035 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1036 PVM pVM = pUVM->pVM;
1037 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1038
1039 pVM->fPATMEnabled = fAllowPatching;
1040 return VINF_SUCCESS;
1041}
1042
1043
1044/**
1045 * Checks if the patch manager is enabled or not.
1046 *
1047 * @returns true if enabled, false if not (or if invalid handle).
1048 * @param pUVM The user mode VM handle.
1049 */
1050VMMR3DECL(bool) PATMR3IsEnabled(PUVM pUVM)
1051{
1052 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1053 PVM pVM = pUVM->pVM;
1054 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1055 return PATMIsEnabled(pVM);
1056}
1057
1058
1059/**
1060 * Convert a GC patch block pointer to a HC patch pointer
1061 *
1062 * @returns HC pointer or NULL if it's not a GC patch pointer
1063 * @param pVM Pointer to the VM.
1064 * @param pAddrGC GC pointer
1065 */
1066VMMR3_INT_DECL(void *) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
1067{
1068 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
1069 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
1070 return NULL;
1071}
1072
1073
1074/**
1075 * Convert guest context address to host context pointer
1076 *
1077 * @returns VBox status code.
1078 * @param pVM Pointer to the VM.
1079 * @param pCacheRec Address conversion cache record
1080 * @param pGCPtr Guest context pointer
1081 *
1082 * @returns Host context pointer or NULL in case of an error
1083 *
1084 */
1085R3PTRTYPE(uint8_t *) patmR3GCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
1086{
1087 int rc;
1088 R3PTRTYPE(uint8_t *) pHCPtr;
1089 uint32_t offset;
1090
1091 if (PATMIsPatchGCAddr(pVM, pGCPtr))
1092 {
1093 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1094 Assert(pPatch);
1095 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
1096 }
1097
1098 offset = pGCPtr & PAGE_OFFSET_MASK;
1099 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1100 return pCacheRec->pPageLocStartHC + offset;
1101
1102 /* Release previous lock if any. */
1103 if (pCacheRec->Lock.pvMap)
1104 {
1105 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1106 pCacheRec->Lock.pvMap = NULL;
1107 }
1108
1109 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1110 if (rc != VINF_SUCCESS)
1111 {
1112 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1113 return NULL;
1114 }
1115 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1116 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1117 return pHCPtr;
1118}
1119
1120
1121/* Calculates and fills in all branch targets
1122 *
1123 * @returns VBox status code.
1124 * @param pVM Pointer to the VM.
1125 * @param pPatch Current patch block pointer
1126 *
1127 */
1128static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1129{
1130 int32_t displ;
1131
1132 PJUMPREC pRec = 0;
1133 unsigned nrJumpRecs = 0;
1134
1135 /*
1136 * Set all branch targets inside the patch block.
1137 * We remove all jump records as they are no longer needed afterwards.
1138 */
1139 while (true)
1140 {
1141 RCPTRTYPE(uint8_t *) pInstrGC;
1142 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1143
1144 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1145 if (pRec == 0)
1146 break;
1147
1148 nrJumpRecs++;
1149
1150 /* HC in patch block to GC in patch block. */
1151 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1152
1153 if (pRec->opcode == OP_CALL)
1154 {
1155 /* Special case: call function replacement patch from this patch block.
1156 */
1157 PPATMPATCHREC pFunctionRec = patmQueryFunctionPatch(pVM, pRec->pTargetGC);
1158 if (!pFunctionRec)
1159 {
1160 int rc;
1161
1162 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1163 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1164 else
1165 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1166
1167 if (RT_FAILURE(rc))
1168 {
1169 uint8_t *pPatchHC;
1170 RTRCPTR pPatchGC;
1171 RTRCPTR pOrgInstrGC;
1172
1173 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1174 Assert(pOrgInstrGC);
1175
1176 /* Failure for some reason -> mark exit point with int 3. */
1177 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1178
1179 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1180 Assert(pPatchGC);
1181
1182 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1183
1184 /* Set a breakpoint at the very beginning of the recompiled instruction */
1185 *pPatchHC = 0xCC;
1186
1187 continue;
1188 }
1189 }
1190 else
1191 {
1192 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1193 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1194 }
1195
1196 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1197 }
1198 else
1199 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1200
1201 if (pBranchTargetGC == 0)
1202 {
1203 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1204 return VERR_PATCHING_REFUSED;
1205 }
1206 /* Our jumps *always* have a dword displacement (to make things easier). */
1207 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1208 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1209 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1210 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1211 }
1212 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1213 Assert(pPatch->JumpTree == 0);
1214 return VINF_SUCCESS;
1215}
1216
1217/* Add an illegal instruction record
1218 *
1219 * @param pVM Pointer to the VM.
1220 * @param pPatch Patch structure ptr
1221 * @param pInstrGC Guest context pointer to privileged instruction
1222 *
1223 */
1224static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1225{
1226 PAVLPVNODECORE pRec;
1227
1228 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1229 Assert(pRec);
1230 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1231
1232 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1233 Assert(ret); NOREF(ret);
1234 pPatch->pTempInfo->nrIllegalInstr++;
1235}
1236
1237static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1238{
1239 PAVLPVNODECORE pRec;
1240
1241 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1242 if (pRec)
1243 return true;
1244 else
1245 return false;
1246}
1247
1248/**
1249 * Add a patch to guest lookup record
1250 *
1251 * @param pVM Pointer to the VM.
1252 * @param pPatch Patch structure ptr
1253 * @param pPatchInstrHC Guest context pointer to patch block
1254 * @param pInstrGC Guest context pointer to privileged instruction
1255 * @param enmType Lookup type
1256 * @param fDirty Dirty flag
1257 *
1258 */
1259 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1260void patmR3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1261{
1262 bool ret;
1263 PRECPATCHTOGUEST pPatchToGuestRec;
1264 PRECGUESTTOPATCH pGuestToPatchRec;
1265 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1266
1267 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1268 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1269
1270 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1271 {
1272 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1273 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1274 return; /* already there */
1275
1276 Assert(!pPatchToGuestRec);
1277 }
1278#ifdef VBOX_STRICT
1279 else
1280 {
1281 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1282 Assert(!pPatchToGuestRec);
1283 }
1284#endif
1285
1286 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1287 Assert(pPatchToGuestRec);
1288 pPatchToGuestRec->Core.Key = PatchOffset;
1289 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1290 pPatchToGuestRec->enmType = enmType;
1291 pPatchToGuestRec->fDirty = fDirty;
1292
1293 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1294 Assert(ret);
1295
1296 /* GC to patch address */
1297 if (enmType == PATM_LOOKUP_BOTHDIR)
1298 {
1299 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1300 if (!pGuestToPatchRec)
1301 {
1302 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1303 pGuestToPatchRec->Core.Key = pInstrGC;
1304 pGuestToPatchRec->PatchOffset = PatchOffset;
1305
1306 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1307 Assert(ret);
1308 }
1309 }
1310
1311 pPatch->nrPatch2GuestRecs++;
1312}
1313
1314
1315/**
1316 * Removes a patch to guest lookup record
1317 *
1318 * @param pVM Pointer to the VM.
1319 * @param pPatch Patch structure ptr
1320 * @param pPatchInstrGC Guest context pointer to patch block
1321 */
1322void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1323{
1324 PAVLU32NODECORE pNode;
1325 PAVLU32NODECORE pNode2;
1326 PRECPATCHTOGUEST pPatchToGuestRec;
1327 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1328
1329 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1330 Assert(pPatchToGuestRec);
1331 if (pPatchToGuestRec)
1332 {
1333 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1334 {
1335 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1336
1337 Assert(pGuestToPatchRec->Core.Key);
1338 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1339 Assert(pNode2);
1340 }
1341 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1342 Assert(pNode);
1343
1344 MMR3HeapFree(pPatchToGuestRec);
1345 pPatch->nrPatch2GuestRecs--;
1346 }
1347}
1348
1349
1350/**
1351 * RTAvlPVDestroy callback.
1352 */
1353static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1354{
1355 MMR3HeapFree(pNode);
1356 return 0;
1357}
1358
1359/**
1360 * Empty the specified tree (PV tree, MMR3 heap)
1361 *
1362 * @param pVM Pointer to the VM.
1363 * @param ppTree Tree to empty
1364 */
1365static void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1366{
1367 NOREF(pVM);
1368 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1369}
1370
1371
1372/**
1373 * RTAvlU32Destroy callback.
1374 */
1375static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1376{
1377 MMR3HeapFree(pNode);
1378 return 0;
1379}
1380
1381/**
1382 * Empty the specified tree (U32 tree, MMR3 heap)
1383 *
1384 * @param pVM Pointer to the VM.
1385 * @param ppTree Tree to empty
1386 */
1387static void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1388{
1389 NOREF(pVM);
1390 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1391}
1392
1393
1394/**
1395 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1396 *
1397 * @returns VBox status code.
1398 * @param pVM Pointer to the VM.
1399 * @param pCpu CPU disassembly state
1400 * @param pInstrGC Guest context pointer to privileged instruction
1401 * @param pCurInstrGC Guest context pointer to the current instruction
1402 * @param pCacheRec Cache record ptr
1403 *
1404 */
1405static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1406{
1407 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1408 bool fIllegalInstr = false;
1409
1410 /*
1411 * Preliminary heuristics:
1412 *- no call instructions without a fixed displacement between cli and sti/popf
1413 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1414 *- no nested pushf/cli
1415 *- sti/popf should be the (eventual) target of all branches
1416 *- no near or far returns; no int xx, no into
1417 *
1418 * Note: Later on we can impose less stricter guidelines if the need arises
1419 */
1420
1421 /* Bail out if the patch gets too big. */
1422 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1423 {
1424 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1425 fIllegalInstr = true;
1426 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1427 }
1428 else
1429 {
1430 /* No unconditional jumps or calls without fixed displacements. */
1431 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1432 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1433 )
1434 {
1435 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1436 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1437 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1438 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1439 )
1440 {
1441 fIllegalInstr = true;
1442 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1443 }
1444 }
1445
1446 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1447 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP)
1448 {
1449 if ( pCurInstrGC > pPatch->pPrivInstrGC
1450 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1451 {
1452 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1453 /* We turn this one into a int 3 callable patch. */
1454 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1455 }
1456 }
1457 else
1458 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1459 if (pPatch->opcode == OP_PUSHF)
1460 {
1461 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF)
1462 {
1463 fIllegalInstr = true;
1464 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1465 }
1466 }
1467
1468 /* no far returns */
1469 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1470 {
1471 pPatch->pTempInfo->nrRetInstr++;
1472 fIllegalInstr = true;
1473 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1474 }
1475 else if ( pCpu->pCurInstr->uOpcode == OP_INT3
1476 || pCpu->pCurInstr->uOpcode == OP_INT
1477 || pCpu->pCurInstr->uOpcode == OP_INTO)
1478 {
1479 /* No int xx or into either. */
1480 fIllegalInstr = true;
1481 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1482 }
1483 }
1484
1485 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1486
1487 /* Illegal instruction -> end of analysis phase for this code block */
1488 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1489 return VINF_SUCCESS;
1490
1491 /* Check for exit points. */
1492 switch (pCpu->pCurInstr->uOpcode)
1493 {
1494 case OP_SYSEXIT:
1495 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1496
1497 case OP_SYSENTER:
1498 case OP_ILLUD2:
1499 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1500 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1501 return VINF_SUCCESS;
1502
1503 case OP_STI:
1504 case OP_POPF:
1505 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1506 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1507 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1508 {
1509 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1510 return VERR_PATCHING_REFUSED;
1511 }
1512 if (pPatch->opcode == OP_PUSHF)
1513 {
1514 if (pCpu->pCurInstr->uOpcode == OP_POPF)
1515 {
1516 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1517 return VINF_SUCCESS;
1518
1519 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1520 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1521 pPatch->flags |= PATMFL_CHECK_SIZE;
1522 }
1523 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1524 }
1525 /* else: fall through. */
1526 case OP_RETN: /* exit point for function replacement */
1527 return VINF_SUCCESS;
1528
1529 case OP_IRET:
1530 return VINF_SUCCESS; /* exitpoint */
1531
1532 case OP_CPUID:
1533 case OP_CALL:
1534 case OP_JMP:
1535 break;
1536
1537#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1538 case OP_STR:
1539 break;
1540#endif
1541
1542 default:
1543 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1544 {
1545 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1546 return VINF_SUCCESS; /* exit point */
1547 }
1548 break;
1549 }
1550
1551 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1552 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW))
1553 {
1554 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1555 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
1556 return VINF_SUCCESS;
1557 }
1558
1559 return VWRN_CONTINUE_ANALYSIS;
1560}
1561
1562/**
1563 * Analyses the instructions inside a function for compliance
1564 *
1565 * @returns VBox status code.
1566 * @param pVM Pointer to the VM.
1567 * @param pCpu CPU disassembly state
1568 * @param pInstrGC Guest context pointer to privileged instruction
1569 * @param pCurInstrGC Guest context pointer to the current instruction
1570 * @param pCacheRec Cache record ptr
1571 *
1572 */
1573static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1574{
1575 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1576 bool fIllegalInstr = false;
1577 NOREF(pInstrGC);
1578
1579 //Preliminary heuristics:
1580 //- no call instructions
1581 //- ret ends a block
1582
1583 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1584
1585 // bail out if the patch gets too big
1586 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1587 {
1588 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1589 fIllegalInstr = true;
1590 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1591 }
1592 else
1593 {
1594 // no unconditional jumps or calls without fixed displacements
1595 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1596 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1597 )
1598 {
1599 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1600 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1601 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1602 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1603 )
1604 {
1605 fIllegalInstr = true;
1606 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1607 }
1608 }
1609 else /* no far returns */
1610 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1611 {
1612 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1613 fIllegalInstr = true;
1614 }
1615 else /* no int xx or into either */
1616 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO)
1617 {
1618 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1619 fIllegalInstr = true;
1620 }
1621
1622 #if 0
1623 ///@todo we can handle certain in/out and privileged instructions in the guest context
1624 if (pCpu->pCurInstr->fOpType & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI)
1625 {
1626 Log(("Illegal instructions for function patch!!\n"));
1627 return VERR_PATCHING_REFUSED;
1628 }
1629 #endif
1630 }
1631
1632 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1633
1634 /* Illegal instruction -> end of analysis phase for this code block */
1635 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1636 {
1637 return VINF_SUCCESS;
1638 }
1639
1640 // Check for exit points
1641 switch (pCpu->pCurInstr->uOpcode)
1642 {
1643 case OP_ILLUD2:
1644 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1645 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1646 return VINF_SUCCESS;
1647
1648 case OP_IRET:
1649 case OP_SYSEXIT: /* will fault or emulated in GC */
1650 case OP_RETN:
1651 return VINF_SUCCESS;
1652
1653#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1654 case OP_STR:
1655 break;
1656#endif
1657
1658 case OP_POPF:
1659 case OP_STI:
1660 return VWRN_CONTINUE_ANALYSIS;
1661 default:
1662 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1663 {
1664 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1665 return VINF_SUCCESS; /* exit point */
1666 }
1667 return VWRN_CONTINUE_ANALYSIS;
1668 }
1669
1670 return VWRN_CONTINUE_ANALYSIS;
1671}
1672
1673/**
1674 * Recompiles the instructions in a code block
1675 *
1676 * @returns VBox status code.
1677 * @param pVM Pointer to the VM.
1678 * @param pCpu CPU disassembly state
1679 * @param pInstrGC Guest context pointer to privileged instruction
1680 * @param pCurInstrGC Guest context pointer to the current instruction
1681 * @param pCacheRec Cache record ptr
1682 *
1683 */
1684static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1685{
1686 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1687 int rc = VINF_SUCCESS;
1688 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1689
1690 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1691
1692 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1693 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1694 {
1695 /*
1696 * Been there, done that; so insert a jump (we don't want to duplicate code)
1697 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1698 */
1699 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1700 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1701 }
1702
1703 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1704 {
1705 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1706 }
1707 else
1708 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1709
1710 if (RT_FAILURE(rc))
1711 return rc;
1712
1713 /* Note: Never do a direct return unless a failure is encountered! */
1714
1715 /* Clear recompilation of next instruction flag; we are doing that right here. */
1716 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1717 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1718
1719 /* Add lookup record for patch to guest address translation */
1720 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1721
1722 /* Update lowest and highest instruction address for this patch */
1723 if (pCurInstrGC < pPatch->pInstrGCLowest)
1724 pPatch->pInstrGCLowest = pCurInstrGC;
1725 else
1726 if (pCurInstrGC > pPatch->pInstrGCHighest)
1727 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
1728
1729 /* Illegal instruction -> end of recompile phase for this code block. */
1730 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1731 {
1732 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1733 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1734 goto end;
1735 }
1736
1737 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1738 * Indirect calls are handled below.
1739 */
1740 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1741 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1742 && (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J))
1743 {
1744 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1745 if (pTargetGC == 0)
1746 {
1747 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
1748 return VERR_PATCHING_REFUSED;
1749 }
1750
1751 if (pCpu->pCurInstr->uOpcode == OP_CALL)
1752 {
1753 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1754 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1755 if (RT_FAILURE(rc))
1756 goto end;
1757 }
1758 else
1759 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1760
1761 if (RT_SUCCESS(rc))
1762 rc = VWRN_CONTINUE_RECOMPILE;
1763
1764 goto end;
1765 }
1766
1767 switch (pCpu->pCurInstr->uOpcode)
1768 {
1769 case OP_CLI:
1770 {
1771 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1772 * until we've found the proper exit point(s).
1773 */
1774 if ( pCurInstrGC != pInstrGC
1775 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1776 )
1777 {
1778 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1779 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1780 }
1781 /* Set by irq inhibition; no longer valid now. */
1782 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1783
1784 rc = patmPatchGenCli(pVM, pPatch);
1785 if (RT_SUCCESS(rc))
1786 rc = VWRN_CONTINUE_RECOMPILE;
1787 break;
1788 }
1789
1790 case OP_MOV:
1791 if (pCpu->pCurInstr->fOpType & DISOPTYPE_POTENTIALLY_DANGEROUS)
1792 {
1793 /* mov ss, src? */
1794 if ( (pCpu->Param1.fUse & DISUSE_REG_SEG)
1795 && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS))
1796 {
1797 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1798 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1799 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1800 }
1801#if 0 /* necessary for Haiku */
1802 else
1803 if ( (pCpu->Param2.fUse & DISUSE_REG_SEG)
1804 && (pCpu->Param2.Base.idxSegReg == USE_REG_SS)
1805 && (pCpu->Param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1806 {
1807 /* mov GPR, ss */
1808 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1809 if (RT_SUCCESS(rc))
1810 rc = VWRN_CONTINUE_RECOMPILE;
1811 break;
1812 }
1813#endif
1814 }
1815 goto duplicate_instr;
1816
1817 case OP_POP:
1818 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1819 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
1820 {
1821 Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
1822
1823 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1824 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1825 }
1826 goto duplicate_instr;
1827
1828 case OP_STI:
1829 {
1830 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1831
1832 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1833 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1834 {
1835 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1836 fInhibitIRQInstr = true;
1837 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1838 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1839 }
1840 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1841
1842 if (RT_SUCCESS(rc))
1843 {
1844 DISCPUSTATE cpu = *pCpu;
1845 unsigned cbInstr;
1846 int disret;
1847 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1848
1849 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1850
1851 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1852 { /* Force pNextInstrHC out of scope after using it */
1853 uint8_t *pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1854 if (pNextInstrHC == NULL)
1855 {
1856 AssertFailed();
1857 return VERR_PATCHING_REFUSED;
1858 }
1859
1860 // Disassemble the next instruction
1861 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
1862 }
1863 if (disret == false)
1864 {
1865 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1866 return VERR_PATCHING_REFUSED;
1867 }
1868 pReturnInstrGC = pNextInstrGC + cbInstr;
1869
1870 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1871 || pReturnInstrGC <= pInstrGC
1872 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1873 )
1874 {
1875 /* Not an exit point for function duplication patches */
1876 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1877 && RT_SUCCESS(rc))
1878 {
1879 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1880 rc = VWRN_CONTINUE_RECOMPILE;
1881 }
1882 else
1883 rc = VINF_SUCCESS; //exit point
1884 }
1885 else {
1886 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1887 rc = VERR_PATCHING_REFUSED; //not allowed!!
1888 }
1889 }
1890 break;
1891 }
1892
1893 case OP_POPF:
1894 {
1895 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
1896
1897 /* Not an exit point for IDT handler or function replacement patches */
1898 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1899 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1900 fGenerateJmpBack = false;
1901
1902 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->fPrefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
1903 if (RT_SUCCESS(rc))
1904 {
1905 if (fGenerateJmpBack == false)
1906 {
1907 /* Not an exit point for IDT handler or function replacement patches */
1908 rc = VWRN_CONTINUE_RECOMPILE;
1909 }
1910 else
1911 {
1912 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1913 rc = VINF_SUCCESS; /* exit point! */
1914 }
1915 }
1916 break;
1917 }
1918
1919 case OP_PUSHF:
1920 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1921 if (RT_SUCCESS(rc))
1922 rc = VWRN_CONTINUE_RECOMPILE;
1923 break;
1924
1925 case OP_PUSH:
1926 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1927 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
1928 {
1929 rc = patmPatchGenPushCS(pVM, pPatch);
1930 if (RT_SUCCESS(rc))
1931 rc = VWRN_CONTINUE_RECOMPILE;
1932 break;
1933 }
1934 goto duplicate_instr;
1935
1936 case OP_IRET:
1937 Log(("IRET at %RRv\n", pCurInstrGC));
1938 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1939 if (RT_SUCCESS(rc))
1940 {
1941 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1942 rc = VINF_SUCCESS; /* exit point by definition */
1943 }
1944 break;
1945
1946 case OP_ILLUD2:
1947 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1948 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1949 if (RT_SUCCESS(rc))
1950 rc = VINF_SUCCESS; /* exit point by definition */
1951 Log(("Illegal opcode (0xf 0xb)\n"));
1952 break;
1953
1954 case OP_CPUID:
1955 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1956 if (RT_SUCCESS(rc))
1957 rc = VWRN_CONTINUE_RECOMPILE;
1958 break;
1959
1960 case OP_STR:
1961#ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into #ifndef */
1962 /* Now safe because our shadow TR entry is identical to the guest's. */
1963 goto duplicate_instr;
1964#endif
1965 case OP_SLDT:
1966 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1967 if (RT_SUCCESS(rc))
1968 rc = VWRN_CONTINUE_RECOMPILE;
1969 break;
1970
1971 case OP_SGDT:
1972 case OP_SIDT:
1973 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1974 if (RT_SUCCESS(rc))
1975 rc = VWRN_CONTINUE_RECOMPILE;
1976 break;
1977
1978 case OP_RETN:
1979 /* retn is an exit point for function patches */
1980 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1981 if (RT_SUCCESS(rc))
1982 rc = VINF_SUCCESS; /* exit point by definition */
1983 break;
1984
1985 case OP_SYSEXIT:
1986 /* Duplicate it, so it can be emulated in GC (or fault). */
1987 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1988 if (RT_SUCCESS(rc))
1989 rc = VINF_SUCCESS; /* exit point by definition */
1990 break;
1991
1992 case OP_CALL:
1993 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1994 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1995 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1996 */
1997 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
1998 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far calls! */)
1999 {
2000 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
2001 if (RT_SUCCESS(rc))
2002 {
2003 rc = VWRN_CONTINUE_RECOMPILE;
2004 }
2005 break;
2006 }
2007 goto gen_illegal_instr;
2008
2009 case OP_JMP:
2010 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2011 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2012 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2013 */
2014 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2015 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far jumps! */)
2016 {
2017 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
2018 if (RT_SUCCESS(rc))
2019 rc = VINF_SUCCESS; /* end of branch */
2020 break;
2021 }
2022 goto gen_illegal_instr;
2023
2024 case OP_INT3:
2025 case OP_INT:
2026 case OP_INTO:
2027 goto gen_illegal_instr;
2028
2029 case OP_MOV_DR:
2030 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2031 if (pCpu->pCurInstr->fParam2 == OP_PARM_Dd)
2032 {
2033 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
2034 if (RT_SUCCESS(rc))
2035 rc = VWRN_CONTINUE_RECOMPILE;
2036 break;
2037 }
2038 goto duplicate_instr;
2039
2040 case OP_MOV_CR:
2041 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2042 if (pCpu->pCurInstr->fParam2 == OP_PARM_Cd)
2043 {
2044 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
2045 if (RT_SUCCESS(rc))
2046 rc = VWRN_CONTINUE_RECOMPILE;
2047 break;
2048 }
2049 goto duplicate_instr;
2050
2051 default:
2052 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
2053 {
2054gen_illegal_instr:
2055 rc = patmPatchGenIllegalInstr(pVM, pPatch);
2056 if (RT_SUCCESS(rc))
2057 rc = VINF_SUCCESS; /* exit point by definition */
2058 }
2059 else
2060 {
2061duplicate_instr:
2062 Log(("patmPatchGenDuplicate\n"));
2063 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2064 if (RT_SUCCESS(rc))
2065 rc = VWRN_CONTINUE_RECOMPILE;
2066 }
2067 break;
2068 }
2069
2070end:
2071
2072 if ( !fInhibitIRQInstr
2073 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2074 {
2075 int rc2;
2076 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2077
2078 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2079 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
2080 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2081 {
2082 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
2083
2084 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2085 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2086 rc = VINF_SUCCESS; /* end of the line */
2087 }
2088 else
2089 {
2090 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2091 }
2092 if (RT_FAILURE(rc2))
2093 rc = rc2;
2094 }
2095
2096 if (RT_SUCCESS(rc))
2097 {
2098 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2099 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2100 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
2101 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
2102 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2103 )
2104 {
2105 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2106
2107 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2108 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
2109
2110 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2111 AssertRC(rc);
2112 }
2113 }
2114 return rc;
2115}
2116
2117
2118#ifdef LOG_ENABLED
2119
2120/* Add a disasm jump record (temporary for prevent duplicate analysis)
2121 *
2122 * @param pVM Pointer to the VM.
2123 * @param pPatch Patch structure ptr
2124 * @param pInstrGC Guest context pointer to privileged instruction
2125 *
2126 */
2127static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2128{
2129 PAVLPVNODECORE pRec;
2130
2131 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2132 Assert(pRec);
2133 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2134
2135 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2136 Assert(ret);
2137}
2138
2139/**
2140 * Checks if jump target has been analysed before.
2141 *
2142 * @returns VBox status code.
2143 * @param pPatch Patch struct
2144 * @param pInstrGC Jump target
2145 *
2146 */
2147static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2148{
2149 PAVLPVNODECORE pRec;
2150
2151 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2152 if (pRec)
2153 return true;
2154 return false;
2155}
2156
2157/**
2158 * For proper disassembly of the final patch block
2159 *
2160 * @returns VBox status code.
2161 * @param pVM Pointer to the VM.
2162 * @param pCpu CPU disassembly state
2163 * @param pInstrGC Guest context pointer to privileged instruction
2164 * @param pCurInstrGC Guest context pointer to the current instruction
2165 * @param pCacheRec Cache record ptr
2166 *
2167 */
2168int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2169{
2170 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2171 NOREF(pInstrGC);
2172
2173 if (pCpu->pCurInstr->uOpcode == OP_INT3)
2174 {
2175 /* Could be an int3 inserted in a call patch. Check to be sure */
2176 DISCPUSTATE cpu;
2177 RTRCPTR pOrgJumpGC;
2178
2179 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2180
2181 { /* Force pOrgJumpHC out of scope after using it */
2182 uint8_t *pOrgJumpHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2183
2184 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
2185 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.Param1.cb != 4 /* only near calls */)
2186 return VINF_SUCCESS;
2187 }
2188 return VWRN_CONTINUE_ANALYSIS;
2189 }
2190
2191 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2
2192 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2193 {
2194 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2195 return VWRN_CONTINUE_ANALYSIS;
2196 }
2197
2198 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2199 || pCpu->pCurInstr->uOpcode == OP_INT
2200 || pCpu->pCurInstr->uOpcode == OP_IRET
2201 || pCpu->pCurInstr->uOpcode == OP_RETN
2202 || pCpu->pCurInstr->uOpcode == OP_RETF
2203 )
2204 {
2205 return VINF_SUCCESS;
2206 }
2207
2208 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2)
2209 return VINF_SUCCESS;
2210
2211 return VWRN_CONTINUE_ANALYSIS;
2212}
2213
2214
2215/**
2216 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2217 *
2218 * @returns VBox status code.
2219 * @param pVM Pointer to the VM.
2220 * @param pInstrGC Guest context pointer to the initial privileged instruction
2221 * @param pCurInstrGC Guest context pointer to the current instruction
2222 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2223 * @param pCacheRec Cache record ptr
2224 *
2225 */
2226int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2227{
2228 DISCPUSTATE cpu;
2229 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2230 int rc = VWRN_CONTINUE_ANALYSIS;
2231 uint32_t cbInstr, delta;
2232 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2233 bool disret;
2234 char szOutput[256];
2235
2236 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2237
2238 /* We need this to determine branch targets (and for disassembling). */
2239 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2240
2241 while (rc == VWRN_CONTINUE_ANALYSIS)
2242 {
2243 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2244 if (pCurInstrHC == NULL)
2245 {
2246 rc = VERR_PATCHING_REFUSED;
2247 goto end;
2248 }
2249
2250 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
2251 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2252 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2253 {
2254 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2255
2256 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2257 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2258 else
2259 Log(("DIS %s", szOutput));
2260
2261 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2262 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2263 {
2264 rc = VINF_SUCCESS;
2265 goto end;
2266 }
2267 }
2268 else
2269 Log(("DIS: %s", szOutput));
2270
2271 if (disret == false)
2272 {
2273 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2274 rc = VINF_SUCCESS;
2275 goto end;
2276 }
2277
2278 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2279 if (rc != VWRN_CONTINUE_ANALYSIS) {
2280 break; //done!
2281 }
2282
2283 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2284 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2285 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2286 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2287 )
2288 {
2289 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2290 RTRCPTR pOrgTargetGC;
2291
2292 if (pTargetGC == 0)
2293 {
2294 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2295 rc = VERR_PATCHING_REFUSED;
2296 break;
2297 }
2298
2299 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2300 {
2301 //jump back to guest code
2302 rc = VINF_SUCCESS;
2303 goto end;
2304 }
2305 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2306
2307 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2308 {
2309 rc = VINF_SUCCESS;
2310 goto end;
2311 }
2312
2313 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2314 {
2315 /* New jump, let's check it. */
2316 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2317
2318 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2319 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2320 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2321
2322 if (rc != VINF_SUCCESS) {
2323 break; //done!
2324 }
2325 }
2326 if (cpu.pCurInstr->uOpcode == OP_JMP)
2327 {
2328 /* Unconditional jump; return to caller. */
2329 rc = VINF_SUCCESS;
2330 goto end;
2331 }
2332
2333 rc = VWRN_CONTINUE_ANALYSIS;
2334 }
2335 pCurInstrGC += cbInstr;
2336 }
2337end:
2338 return rc;
2339}
2340
2341/**
2342 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2343 *
2344 * @returns VBox status code.
2345 * @param pVM Pointer to the VM.
2346 * @param pInstrGC Guest context pointer to the initial privileged instruction
2347 * @param pCurInstrGC Guest context pointer to the current instruction
2348 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2349 * @param pCacheRec Cache record ptr
2350 *
2351 */
2352int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2353{
2354 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2355
2356 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2357 /* Free all disasm jump records. */
2358 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2359 return rc;
2360}
2361
2362#endif /* LOG_ENABLED */
2363
2364/**
2365 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2366 * If so, this patch is permanently disabled.
2367 *
2368 * @param pVM Pointer to the VM.
2369 * @param pInstrGC Guest context pointer to instruction
2370 * @param pConflictGC Guest context pointer to check
2371 *
2372 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2373 *
2374 */
2375VMMR3_INT_DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2376{
2377 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2378 if (pTargetPatch)
2379 {
2380 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2381 }
2382 return VERR_PATCH_NO_CONFLICT;
2383}
2384
2385/**
2386 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2387 *
2388 * @returns VBox status code.
2389 * @param pVM Pointer to the VM.
2390 * @param pInstrGC Guest context pointer to privileged instruction
2391 * @param pCurInstrGC Guest context pointer to the current instruction
2392 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2393 * @param pCacheRec Cache record ptr
2394 *
2395 */
2396static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2397{
2398 DISCPUSTATE cpu;
2399 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2400 int rc = VWRN_CONTINUE_ANALYSIS;
2401 uint32_t cbInstr;
2402 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2403 bool disret;
2404#ifdef LOG_ENABLED
2405 char szOutput[256];
2406#endif
2407
2408 while (rc == VWRN_CONTINUE_RECOMPILE)
2409 {
2410 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2411 if (pCurInstrHC == NULL)
2412 {
2413 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2414 goto end;
2415 }
2416#ifdef LOG_ENABLED
2417 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
2418 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2419 Log(("Recompile: %s", szOutput));
2420#else
2421 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
2422#endif
2423 if (disret == false)
2424 {
2425 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2426
2427 /* Add lookup record for patch to guest address translation */
2428 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2429 patmPatchGenIllegalInstr(pVM, pPatch);
2430 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2431 goto end;
2432 }
2433
2434 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2435 if (rc != VWRN_CONTINUE_RECOMPILE)
2436 {
2437 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2438 if ( rc == VINF_SUCCESS
2439 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2440 {
2441 DISCPUSTATE cpunext;
2442 uint32_t opsizenext;
2443 uint8_t *pNextInstrHC;
2444 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
2445
2446 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2447
2448 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2449 * Recompile the next instruction as well
2450 */
2451 pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2452 if (pNextInstrHC == NULL)
2453 {
2454 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2455 goto end;
2456 }
2457 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
2458 if (disret == false)
2459 {
2460 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2461 goto end;
2462 }
2463 switch(cpunext.pCurInstr->uOpcode)
2464 {
2465 case OP_IRET: /* inhibit cleared in generated code */
2466 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2467 case OP_HLT:
2468 break; /* recompile these */
2469
2470 default:
2471 if (cpunext.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2472 {
2473 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2474
2475 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2476 AssertRC(rc);
2477 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2478 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2479 }
2480 break;
2481 }
2482
2483 /* Note: after a cli we must continue to a proper exit point */
2484 if (cpunext.pCurInstr->uOpcode != OP_CLI)
2485 {
2486 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2487 if (RT_SUCCESS(rc))
2488 {
2489 rc = VINF_SUCCESS;
2490 goto end;
2491 }
2492 break;
2493 }
2494 else
2495 rc = VWRN_CONTINUE_RECOMPILE;
2496 }
2497 else
2498 break; /* done! */
2499 }
2500
2501 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2502
2503
2504 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2505 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2506 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2507 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2508 )
2509 {
2510 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2511 if (addr == 0)
2512 {
2513 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2514 rc = VERR_PATCHING_REFUSED;
2515 break;
2516 }
2517
2518 Log(("Jump encountered target %RRv\n", addr));
2519
2520 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2521 if (!(cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW))
2522 {
2523 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2524 /* First we need to finish this linear code stream until the next exit point. */
2525 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
2526 if (RT_FAILURE(rc))
2527 {
2528 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2529 break; //fatal error
2530 }
2531 }
2532
2533 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2534 {
2535 /* New code; let's recompile it. */
2536 Log(("patmRecompileCodeStream continue with jump\n"));
2537
2538 /*
2539 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2540 * this patch so we can continue our analysis
2541 *
2542 * We rely on CSAM to detect and resolve conflicts
2543 */
2544 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, addr);
2545 if(pTargetPatch)
2546 {
2547 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2548 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2549 }
2550
2551 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2552 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2553 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2554
2555 if(pTargetPatch)
2556 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2557
2558 if (RT_FAILURE(rc))
2559 {
2560 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2561 break; //done!
2562 }
2563 }
2564 /* Always return to caller here; we're done! */
2565 rc = VINF_SUCCESS;
2566 goto end;
2567 }
2568 else
2569 if (cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW)
2570 {
2571 rc = VINF_SUCCESS;
2572 goto end;
2573 }
2574 pCurInstrGC += cbInstr;
2575 }
2576end:
2577 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2578 return rc;
2579}
2580
2581
2582/**
2583 * Generate the jump from guest to patch code
2584 *
2585 * @returns VBox status code.
2586 * @param pVM Pointer to the VM.
2587 * @param pPatch Patch record
2588 * @param pCacheRec Guest translation lookup cache record
2589 */
2590static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2591{
2592 uint8_t temp[8];
2593 uint8_t *pPB;
2594 int rc;
2595
2596 Assert(pPatch->cbPatchJump <= sizeof(temp));
2597 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2598
2599 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2600 Assert(pPB);
2601
2602#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2603 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2604 {
2605 Assert(pPatch->pPatchJumpDestGC);
2606
2607 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2608 {
2609 // jmp [PatchCode]
2610 if (fAddFixup)
2611 {
2612 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2613 {
2614 Log(("Relocation failed for the jump in the guest code!!\n"));
2615 return VERR_PATCHING_REFUSED;
2616 }
2617 }
2618
2619 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2620 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2621 }
2622 else
2623 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2624 {
2625 // jmp [PatchCode]
2626 if (fAddFixup)
2627 {
2628 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2629 {
2630 Log(("Relocation failed for the jump in the guest code!!\n"));
2631 return VERR_PATCHING_REFUSED;
2632 }
2633 }
2634
2635 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2636 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2637 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2638 }
2639 else
2640 {
2641 Assert(0);
2642 return VERR_PATCHING_REFUSED;
2643 }
2644 }
2645 else
2646#endif
2647 {
2648 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2649
2650 // jmp [PatchCode]
2651 if (fAddFixup)
2652 {
2653 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2654 {
2655 Log(("Relocation failed for the jump in the guest code!!\n"));
2656 return VERR_PATCHING_REFUSED;
2657 }
2658 }
2659 temp[0] = 0xE9; //jmp
2660 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2661 }
2662 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2663 AssertRC(rc);
2664
2665 if (rc == VINF_SUCCESS)
2666 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2667
2668 return rc;
2669}
2670
2671/**
2672 * Remove the jump from guest to patch code
2673 *
2674 * @returns VBox status code.
2675 * @param pVM Pointer to the VM.
2676 * @param pPatch Patch record
2677 */
2678static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2679{
2680#ifdef DEBUG
2681 DISCPUSTATE cpu;
2682 char szOutput[256];
2683 uint32_t cbInstr, i = 0;
2684 bool disret;
2685
2686 while (i < pPatch->cbPrivInstr)
2687 {
2688 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2689 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2690 if (disret == false)
2691 break;
2692
2693 Log(("Org patch jump: %s", szOutput));
2694 Assert(cbInstr);
2695 i += cbInstr;
2696 }
2697#endif
2698
2699 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2700 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2701#ifdef DEBUG
2702 if (rc == VINF_SUCCESS)
2703 {
2704 i = 0;
2705 while (i < pPatch->cbPrivInstr)
2706 {
2707 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2708 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2709 if (disret == false)
2710 break;
2711
2712 Log(("Org instr: %s", szOutput));
2713 Assert(cbInstr);
2714 i += cbInstr;
2715 }
2716 }
2717#endif
2718 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2719 return rc;
2720}
2721
2722/**
2723 * Generate the call from guest to patch code
2724 *
2725 * @returns VBox status code.
2726 * @param pVM Pointer to the VM.
2727 * @param pPatch Patch record
2728 * @param pInstrHC HC address where to insert the jump
2729 * @param pCacheRec Guest translation cache record
2730 */
2731static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2732{
2733 uint8_t temp[8];
2734 uint8_t *pPB;
2735 int rc;
2736
2737 Assert(pPatch->cbPatchJump <= sizeof(temp));
2738
2739 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2740 Assert(pPB);
2741
2742 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2743
2744 // jmp [PatchCode]
2745 if (fAddFixup)
2746 {
2747 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2748 {
2749 Log(("Relocation failed for the jump in the guest code!!\n"));
2750 return VERR_PATCHING_REFUSED;
2751 }
2752 }
2753
2754 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2755 temp[0] = pPatch->aPrivInstr[0];
2756 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2757
2758 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2759 AssertRC(rc);
2760
2761 return rc;
2762}
2763
2764
2765/**
2766 * Patch cli/sti pushf/popf instruction block at specified location
2767 *
2768 * @returns VBox status code.
2769 * @param pVM Pointer to the VM.
2770 * @param pInstrGC Guest context point to privileged instruction
2771 * @param pInstrHC Host context point to privileged instruction
2772 * @param uOpcode Instruction opcode
2773 * @param uOpSize Size of starting instruction
2774 * @param pPatchRec Patch record
2775 *
2776 * @note returns failure if patching is not allowed or possible
2777 *
2778 */
2779static int patmR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2780 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2781{
2782 PPATCHINFO pPatch = &pPatchRec->patch;
2783 int rc = VERR_PATCHING_REFUSED;
2784 uint32_t orgOffsetPatchMem = ~0;
2785 RTRCPTR pInstrStart;
2786 bool fInserted;
2787 NOREF(pInstrHC); NOREF(uOpSize);
2788
2789 /* Save original offset (in case of failures later on) */
2790 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2791 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2792
2793 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2794 switch (uOpcode)
2795 {
2796 case OP_MOV:
2797 break;
2798
2799 case OP_CLI:
2800 case OP_PUSHF:
2801 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2802 /* Note: special precautions are taken when disabling and enabling such patches. */
2803 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2804 break;
2805
2806 default:
2807 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2808 {
2809 AssertMsg(0, ("patmR3PatchBlock: Invalid opcode %x\n", uOpcode));
2810 return VERR_INVALID_PARAMETER;
2811 }
2812 }
2813
2814 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2815 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2816
2817 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2818 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2819 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2820 )
2821 {
2822 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2823 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2824 rc = VERR_PATCHING_REFUSED;
2825 goto failure;
2826 }
2827
2828 pPatch->nrPatch2GuestRecs = 0;
2829 pInstrStart = pInstrGC;
2830
2831#ifdef PATM_ENABLE_CALL
2832 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2833#endif
2834
2835 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2836 pPatch->uCurPatchOffset = 0;
2837
2838 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2839 {
2840 Assert(pPatch->flags & PATMFL_INTHANDLER);
2841
2842 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2843 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2844 if (RT_FAILURE(rc))
2845 goto failure;
2846 }
2847
2848 /***************************************************************************************************************************/
2849 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2850 /***************************************************************************************************************************/
2851#ifdef VBOX_WITH_STATISTICS
2852 if (!(pPatch->flags & PATMFL_SYSENTER))
2853 {
2854 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2855 if (RT_FAILURE(rc))
2856 goto failure;
2857 }
2858#endif
2859
2860 PATMP2GLOOKUPREC cacheRec;
2861 RT_ZERO(cacheRec);
2862 cacheRec.pPatch = pPatch;
2863
2864 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2865 /* Free leftover lock if any. */
2866 if (cacheRec.Lock.pvMap)
2867 {
2868 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2869 cacheRec.Lock.pvMap = NULL;
2870 }
2871 if (rc != VINF_SUCCESS)
2872 {
2873 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2874 goto failure;
2875 }
2876
2877 /* Calculated during analysis. */
2878 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2879 {
2880 /* Most likely cause: we encountered an illegal instruction very early on. */
2881 /** @todo could turn it into an int3 callable patch. */
2882 Log(("patmR3PatchBlock: patch block too small -> refuse\n"));
2883 rc = VERR_PATCHING_REFUSED;
2884 goto failure;
2885 }
2886
2887 /* size of patch block */
2888 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2889
2890
2891 /* Update free pointer in patch memory. */
2892 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2893 /* Round to next 8 byte boundary. */
2894 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2895
2896 /*
2897 * Insert into patch to guest lookup tree
2898 */
2899 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2900 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2901 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2902 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2903 if (!fInserted)
2904 {
2905 rc = VERR_PATCHING_REFUSED;
2906 goto failure;
2907 }
2908
2909 /* Note that patmr3SetBranchTargets can install additional patches!! */
2910 rc = patmr3SetBranchTargets(pVM, pPatch);
2911 if (rc != VINF_SUCCESS)
2912 {
2913 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2914 goto failure;
2915 }
2916
2917#ifdef LOG_ENABLED
2918 Log(("Patch code ----------------------------------------------------------\n"));
2919 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2920 /* Free leftover lock if any. */
2921 if (cacheRec.Lock.pvMap)
2922 {
2923 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2924 cacheRec.Lock.pvMap = NULL;
2925 }
2926 Log(("Patch code ends -----------------------------------------------------\n"));
2927#endif
2928
2929 /* make a copy of the guest code bytes that will be overwritten */
2930 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2931
2932 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2933 AssertRC(rc);
2934
2935 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2936 {
2937 /*uint8_t bASMInt3 = 0xCC; - unused */
2938
2939 Log(("patmR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2940 /* Replace first opcode byte with 'int 3'. */
2941 rc = patmActivateInt3Patch(pVM, pPatch);
2942 if (RT_FAILURE(rc))
2943 goto failure;
2944
2945 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2946 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2947
2948 pPatch->flags &= ~PATMFL_INSTR_HINT;
2949 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2950 }
2951 else
2952 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2953 {
2954 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2955 /* now insert a jump in the guest code */
2956 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2957 AssertRC(rc);
2958 if (RT_FAILURE(rc))
2959 goto failure;
2960
2961 }
2962
2963 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
2964
2965 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2966 pPatch->pTempInfo->nrIllegalInstr = 0;
2967
2968 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2969
2970 pPatch->uState = PATCH_ENABLED;
2971 return VINF_SUCCESS;
2972
2973failure:
2974 if (pPatchRec->CoreOffset.Key)
2975 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2976
2977 patmEmptyTree(pVM, &pPatch->FixupTree);
2978 pPatch->nrFixups = 0;
2979
2980 patmEmptyTree(pVM, &pPatch->JumpTree);
2981 pPatch->nrJumpRecs = 0;
2982
2983 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2984 pPatch->pTempInfo->nrIllegalInstr = 0;
2985
2986 /* Turn this cli patch into a dummy. */
2987 pPatch->uState = PATCH_REFUSED;
2988 pPatch->pPatchBlockOffset = 0;
2989
2990 // Give back the patch memory we no longer need
2991 Assert(orgOffsetPatchMem != (uint32_t)~0);
2992 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2993
2994 return rc;
2995}
2996
2997/**
2998 * Patch IDT handler
2999 *
3000 * @returns VBox status code.
3001 * @param pVM Pointer to the VM.
3002 * @param pInstrGC Guest context point to privileged instruction
3003 * @param uOpSize Size of starting instruction
3004 * @param pPatchRec Patch record
3005 * @param pCacheRec Cache record ptr
3006 *
3007 * @note returns failure if patching is not allowed or possible
3008 *
3009 */
3010static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3011{
3012 PPATCHINFO pPatch = &pPatchRec->patch;
3013 bool disret;
3014 DISCPUSTATE cpuPush, cpuJmp;
3015 uint32_t cbInstr;
3016 RTRCPTR pCurInstrGC = pInstrGC;
3017 uint8_t *pCurInstrHC, *pInstrHC;
3018 uint32_t orgOffsetPatchMem = ~0;
3019
3020 pInstrHC = pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
3021 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
3022
3023 /*
3024 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
3025 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
3026 * condition here and only patch the common entypoint once.
3027 */
3028 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
3029 Assert(disret);
3030 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH)
3031 {
3032 RTRCPTR pJmpInstrGC;
3033 int rc;
3034 pCurInstrGC += cbInstr;
3035
3036 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
3037 if ( disret
3038 && cpuJmp.pCurInstr->uOpcode == OP_JMP
3039 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
3040 )
3041 {
3042 bool fInserted;
3043 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3044 if (pJmpPatch == 0)
3045 {
3046 /* Patch it first! */
3047 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
3048 if (rc != VINF_SUCCESS)
3049 goto failure;
3050 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3051 Assert(pJmpPatch);
3052 }
3053 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3054 goto failure;
3055
3056 /* save original offset (in case of failures later on) */
3057 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3058
3059 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3060 pPatch->uCurPatchOffset = 0;
3061 pPatch->nrPatch2GuestRecs = 0;
3062
3063#ifdef VBOX_WITH_STATISTICS
3064 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3065 if (RT_FAILURE(rc))
3066 goto failure;
3067#endif
3068
3069 /* Install fake cli patch (to clear the virtual IF) */
3070 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
3071 if (RT_FAILURE(rc))
3072 goto failure;
3073
3074 /* Add lookup record for patch to guest address translation (for the push) */
3075 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
3076
3077 /* Duplicate push. */
3078 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
3079 if (RT_FAILURE(rc))
3080 goto failure;
3081
3082 /* Generate jump to common entrypoint. */
3083 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
3084 if (RT_FAILURE(rc))
3085 goto failure;
3086
3087 /* size of patch block */
3088 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3089
3090 /* Update free pointer in patch memory. */
3091 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3092 /* Round to next 8 byte boundary */
3093 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3094
3095 /* There's no jump from guest to patch code. */
3096 pPatch->cbPatchJump = 0;
3097
3098
3099#ifdef LOG_ENABLED
3100 Log(("Patch code ----------------------------------------------------------\n"));
3101 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3102 Log(("Patch code ends -----------------------------------------------------\n"));
3103#endif
3104 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3105
3106 /*
3107 * Insert into patch to guest lookup tree
3108 */
3109 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3110 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3111 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3112 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3113
3114 pPatch->uState = PATCH_ENABLED;
3115
3116 return VINF_SUCCESS;
3117 }
3118 }
3119failure:
3120 /* Give back the patch memory we no longer need */
3121 if (orgOffsetPatchMem != (uint32_t)~0)
3122 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3123
3124 return patmR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3125}
3126
3127/**
3128 * Install a trampoline to call a guest trap handler directly
3129 *
3130 * @returns VBox status code.
3131 * @param pVM Pointer to the VM.
3132 * @param pInstrGC Guest context point to privileged instruction
3133 * @param pPatchRec Patch record
3134 * @param pCacheRec Cache record ptr
3135 *
3136 */
3137static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3138{
3139 PPATCHINFO pPatch = &pPatchRec->patch;
3140 int rc = VERR_PATCHING_REFUSED;
3141 uint32_t orgOffsetPatchMem = ~0;
3142 bool fInserted;
3143
3144 // save original offset (in case of failures later on)
3145 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3146
3147 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3148 pPatch->uCurPatchOffset = 0;
3149 pPatch->nrPatch2GuestRecs = 0;
3150
3151#ifdef VBOX_WITH_STATISTICS
3152 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3153 if (RT_FAILURE(rc))
3154 goto failure;
3155#endif
3156
3157 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3158 if (RT_FAILURE(rc))
3159 goto failure;
3160
3161 /* size of patch block */
3162 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3163
3164 /* Update free pointer in patch memory. */
3165 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3166 /* Round to next 8 byte boundary */
3167 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3168
3169 /* There's no jump from guest to patch code. */
3170 pPatch->cbPatchJump = 0;
3171
3172#ifdef LOG_ENABLED
3173 Log(("Patch code ----------------------------------------------------------\n"));
3174 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3175 Log(("Patch code ends -----------------------------------------------------\n"));
3176#endif
3177 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
3178 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3179
3180 /*
3181 * Insert into patch to guest lookup tree
3182 */
3183 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3184 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3185 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3186 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3187
3188 pPatch->uState = PATCH_ENABLED;
3189 return VINF_SUCCESS;
3190
3191failure:
3192 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3193
3194 /* Turn this cli patch into a dummy. */
3195 pPatch->uState = PATCH_REFUSED;
3196 pPatch->pPatchBlockOffset = 0;
3197
3198 /* Give back the patch memory we no longer need */
3199 Assert(orgOffsetPatchMem != (uint32_t)~0);
3200 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3201
3202 return rc;
3203}
3204
3205
3206#ifdef LOG_ENABLED
3207/**
3208 * Check if the instruction is patched as a common idt handler
3209 *
3210 * @returns true or false
3211 * @param pVM Pointer to the VM.
3212 * @param pInstrGC Guest context point to the instruction
3213 *
3214 */
3215static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3216{
3217 PPATMPATCHREC pRec;
3218
3219 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3220 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3221 return true;
3222 return false;
3223}
3224#endif //DEBUG
3225
3226
3227/**
3228 * Duplicates a complete function
3229 *
3230 * @returns VBox status code.
3231 * @param pVM Pointer to the VM.
3232 * @param pInstrGC Guest context point to privileged instruction
3233 * @param pPatchRec Patch record
3234 * @param pCacheRec Cache record ptr
3235 *
3236 */
3237static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3238{
3239 PPATCHINFO pPatch = &pPatchRec->patch;
3240 int rc = VERR_PATCHING_REFUSED;
3241 uint32_t orgOffsetPatchMem = ~0;
3242 bool fInserted;
3243
3244 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3245 /* Save original offset (in case of failures later on). */
3246 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3247
3248 /* We will not go on indefinitely with call instruction handling. */
3249 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3250 {
3251 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3252 return VERR_PATCHING_REFUSED;
3253 }
3254
3255 pVM->patm.s.ulCallDepth++;
3256
3257#ifdef PATM_ENABLE_CALL
3258 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3259#endif
3260
3261 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3262
3263 pPatch->nrPatch2GuestRecs = 0;
3264 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3265 pPatch->uCurPatchOffset = 0;
3266
3267 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3268 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3269 if (RT_FAILURE(rc))
3270 goto failure;
3271
3272#ifdef VBOX_WITH_STATISTICS
3273 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3274 if (RT_FAILURE(rc))
3275 goto failure;
3276#endif
3277
3278 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3279 if (rc != VINF_SUCCESS)
3280 {
3281 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3282 goto failure;
3283 }
3284
3285 //size of patch block
3286 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3287
3288 //update free pointer in patch memory
3289 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3290 /* Round to next 8 byte boundary. */
3291 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3292
3293 pPatch->uState = PATCH_ENABLED;
3294
3295 /*
3296 * Insert into patch to guest lookup tree
3297 */
3298 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3299 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3300 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3301 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3302 if (!fInserted)
3303 {
3304 rc = VERR_PATCHING_REFUSED;
3305 goto failure;
3306 }
3307
3308 /* Note that patmr3SetBranchTargets can install additional patches!! */
3309 rc = patmr3SetBranchTargets(pVM, pPatch);
3310 if (rc != VINF_SUCCESS)
3311 {
3312 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3313 goto failure;
3314 }
3315
3316#ifdef LOG_ENABLED
3317 Log(("Patch code ----------------------------------------------------------\n"));
3318 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3319 Log(("Patch code ends -----------------------------------------------------\n"));
3320#endif
3321
3322 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3323
3324 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3325 pPatch->pTempInfo->nrIllegalInstr = 0;
3326
3327 pVM->patm.s.ulCallDepth--;
3328 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3329 return VINF_SUCCESS;
3330
3331failure:
3332 if (pPatchRec->CoreOffset.Key)
3333 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3334
3335 patmEmptyTree(pVM, &pPatch->FixupTree);
3336 pPatch->nrFixups = 0;
3337
3338 patmEmptyTree(pVM, &pPatch->JumpTree);
3339 pPatch->nrJumpRecs = 0;
3340
3341 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3342 pPatch->pTempInfo->nrIllegalInstr = 0;
3343
3344 /* Turn this cli patch into a dummy. */
3345 pPatch->uState = PATCH_REFUSED;
3346 pPatch->pPatchBlockOffset = 0;
3347
3348 // Give back the patch memory we no longer need
3349 Assert(orgOffsetPatchMem != (uint32_t)~0);
3350 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3351
3352 pVM->patm.s.ulCallDepth--;
3353 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3354 return rc;
3355}
3356
3357/**
3358 * Creates trampoline code to jump inside an existing patch
3359 *
3360 * @returns VBox status code.
3361 * @param pVM Pointer to the VM.
3362 * @param pInstrGC Guest context point to privileged instruction
3363 * @param pPatchRec Patch record
3364 *
3365 */
3366static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3367{
3368 PPATCHINFO pPatch = &pPatchRec->patch;
3369 RTRCPTR pPage, pPatchTargetGC = 0;
3370 uint32_t orgOffsetPatchMem = ~0;
3371 int rc = VERR_PATCHING_REFUSED;
3372 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3373 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3374 bool fInserted = false;
3375
3376 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3377 /* Save original offset (in case of failures later on). */
3378 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3379
3380 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3381 /** @todo we already checked this before */
3382 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3383
3384 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3385 if (pPatchPage)
3386 {
3387 uint32_t i;
3388
3389 for (i=0;i<pPatchPage->cCount;i++)
3390 {
3391 if (pPatchPage->papPatch[i])
3392 {
3393 pPatchToJmp = pPatchPage->papPatch[i];
3394
3395 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3396 && pPatchToJmp->uState == PATCH_ENABLED)
3397 {
3398 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3399 if (pPatchTargetGC)
3400 {
3401 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3402 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3403 Assert(pPatchToGuestRec);
3404
3405 pPatchToGuestRec->fJumpTarget = true;
3406 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3407 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3408 break;
3409 }
3410 }
3411 }
3412 }
3413 }
3414 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3415
3416 /*
3417 * Only record the trampoline patch if this is the first patch to the target
3418 * or we recorded other patches already.
3419 * The goal is to refuse refreshing function duplicates if the guest
3420 * modifies code after a saved state was loaded because it is not possible
3421 * to save the relation between trampoline and target without changing the
3422 * saved satte version.
3423 */
3424 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3425 || pPatchToJmp->pTrampolinePatchesHead)
3426 {
3427 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3428 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3429 if (!pTrampRec)
3430 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3431
3432 pTrampRec->pPatchTrampoline = pPatchRec;
3433 }
3434
3435 pPatch->nrPatch2GuestRecs = 0;
3436 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3437 pPatch->uCurPatchOffset = 0;
3438
3439 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3440 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3441 if (RT_FAILURE(rc))
3442 goto failure;
3443
3444#ifdef VBOX_WITH_STATISTICS
3445 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3446 if (RT_FAILURE(rc))
3447 goto failure;
3448#endif
3449
3450 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3451 if (RT_FAILURE(rc))
3452 goto failure;
3453
3454 /*
3455 * Insert into patch to guest lookup tree
3456 */
3457 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3458 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3459 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3460 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3461 if (!fInserted)
3462 {
3463 rc = VERR_PATCHING_REFUSED;
3464 goto failure;
3465 }
3466
3467 /* size of patch block */
3468 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3469
3470 /* Update free pointer in patch memory. */
3471 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3472 /* Round to next 8 byte boundary */
3473 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3474
3475 /* There's no jump from guest to patch code. */
3476 pPatch->cbPatchJump = 0;
3477
3478 /* Enable the patch. */
3479 pPatch->uState = PATCH_ENABLED;
3480 /* We allow this patch to be called as a function. */
3481 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3482
3483 if (pTrampRec)
3484 {
3485 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3486 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3487 }
3488 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3489 return VINF_SUCCESS;
3490
3491failure:
3492 if (pPatchRec->CoreOffset.Key)
3493 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3494
3495 patmEmptyTree(pVM, &pPatch->FixupTree);
3496 pPatch->nrFixups = 0;
3497
3498 patmEmptyTree(pVM, &pPatch->JumpTree);
3499 pPatch->nrJumpRecs = 0;
3500
3501 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3502 pPatch->pTempInfo->nrIllegalInstr = 0;
3503
3504 /* Turn this cli patch into a dummy. */
3505 pPatch->uState = PATCH_REFUSED;
3506 pPatch->pPatchBlockOffset = 0;
3507
3508 // Give back the patch memory we no longer need
3509 Assert(orgOffsetPatchMem != (uint32_t)~0);
3510 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3511
3512 if (pTrampRec)
3513 MMR3HeapFree(pTrampRec);
3514
3515 return rc;
3516}
3517
3518
3519/**
3520 * Patch branch target function for call/jump at specified location.
3521 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3522 *
3523 * @returns VBox status code.
3524 * @param pVM Pointer to the VM.
3525 * @param pCtx Pointer to the guest CPU context.
3526 *
3527 */
3528VMMR3_INT_DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3529{
3530 RTRCPTR pBranchTarget, pPage;
3531 int rc;
3532 RTRCPTR pPatchTargetGC = 0;
3533
3534 pBranchTarget = pCtx->edx;
3535 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3536
3537 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3538 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3539
3540 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3541 if (pPatchPage)
3542 {
3543 uint32_t i;
3544
3545 for (i=0;i<pPatchPage->cCount;i++)
3546 {
3547 if (pPatchPage->papPatch[i])
3548 {
3549 PPATCHINFO pPatch = pPatchPage->papPatch[i];
3550
3551 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3552 && pPatch->uState == PATCH_ENABLED)
3553 {
3554 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3555 if (pPatchTargetGC)
3556 {
3557 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3558 break;
3559 }
3560 }
3561 }
3562 }
3563 }
3564
3565 if (pPatchTargetGC)
3566 {
3567 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3568 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3569 }
3570 else
3571 {
3572 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3573 }
3574
3575 if (rc == VINF_SUCCESS)
3576 {
3577 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3578 Assert(pPatchTargetGC);
3579 }
3580
3581 if (pPatchTargetGC)
3582 {
3583 pCtx->eax = pPatchTargetGC;
3584 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3585 }
3586 else
3587 {
3588 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3589 pCtx->eax = 0;
3590 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3591 }
3592 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3593 rc = patmAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3594 AssertRC(rc);
3595
3596 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3597 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3598 return VINF_SUCCESS;
3599}
3600
3601/**
3602 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3603 *
3604 * @returns VBox status code.
3605 * @param pVM Pointer to the VM.
3606 * @param pCpu Disassembly CPU structure ptr
3607 * @param pInstrGC Guest context point to privileged instruction
3608 * @param pCacheRec Cache record ptr
3609 *
3610 */
3611static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3612{
3613 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3614 int rc = VERR_PATCHING_REFUSED;
3615 DISCPUSTATE cpu;
3616 RTRCPTR pTargetGC;
3617 PPATMPATCHREC pPatchFunction;
3618 uint32_t cbInstr;
3619 bool disret;
3620
3621 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3622 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
3623
3624 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
3625 {
3626 rc = VERR_PATCHING_REFUSED;
3627 goto failure;
3628 }
3629
3630 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3631 if (pTargetGC == 0)
3632 {
3633 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
3634 rc = VERR_PATCHING_REFUSED;
3635 goto failure;
3636 }
3637
3638 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3639 if (pPatchFunction == NULL)
3640 {
3641 for(;;)
3642 {
3643 /* It could be an indirect call (call -> jmp dest).
3644 * Note that it's dangerous to assume the jump will never change...
3645 */
3646 uint8_t *pTmpInstrHC;
3647
3648 pTmpInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3649 Assert(pTmpInstrHC);
3650 if (pTmpInstrHC == 0)
3651 break;
3652
3653 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
3654 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP)
3655 break;
3656
3657 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3658 if (pTargetGC == 0)
3659 {
3660 break;
3661 }
3662
3663 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3664 break;
3665 }
3666 if (pPatchFunction == 0)
3667 {
3668 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3669 rc = VERR_PATCHING_REFUSED;
3670 goto failure;
3671 }
3672 }
3673
3674 // make a copy of the guest code bytes that will be overwritten
3675 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3676
3677 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3678 AssertRC(rc);
3679
3680 /* Now replace the original call in the guest code */
3681 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3682 AssertRC(rc);
3683 if (RT_FAILURE(rc))
3684 goto failure;
3685
3686 /* Lowest and highest address for write monitoring. */
3687 pPatch->pInstrGCLowest = pInstrGC;
3688 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3689 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
3690
3691 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3692
3693 pPatch->uState = PATCH_ENABLED;
3694 return VINF_SUCCESS;
3695
3696failure:
3697 /* Turn this patch into a dummy. */
3698 pPatch->uState = PATCH_REFUSED;
3699
3700 return rc;
3701}
3702
3703/**
3704 * Replace the address in an MMIO instruction with the cached version.
3705 *
3706 * @returns VBox status code.
3707 * @param pVM Pointer to the VM.
3708 * @param pInstrGC Guest context point to privileged instruction
3709 * @param pCpu Disassembly CPU structure ptr
3710 * @param pCacheRec Cache record ptr
3711 *
3712 * @note returns failure if patching is not allowed or possible
3713 *
3714 */
3715static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3716{
3717 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3718 uint8_t *pPB;
3719 int rc = VERR_PATCHING_REFUSED;
3720
3721 Assert(pVM->patm.s.mmio.pCachedData);
3722 if (!pVM->patm.s.mmio.pCachedData)
3723 goto failure;
3724
3725 if (pCpu->Param2.fUse != DISUSE_DISPLACEMENT32)
3726 goto failure;
3727
3728 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3729 if (pPB == 0)
3730 goto failure;
3731
3732 /* Add relocation record for cached data access. */
3733 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3734 {
3735 Log(("Relocation failed for cached mmio address!!\n"));
3736 return VERR_PATCHING_REFUSED;
3737 }
3738 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
3739
3740 /* Save original instruction. */
3741 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3742 AssertRC(rc);
3743
3744 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3745
3746 /* Replace address with that of the cached item. */
3747 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3748 AssertRC(rc);
3749 if (RT_FAILURE(rc))
3750 {
3751 goto failure;
3752 }
3753
3754 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3755 pVM->patm.s.mmio.pCachedData = 0;
3756 pVM->patm.s.mmio.GCPhys = 0;
3757 pPatch->uState = PATCH_ENABLED;
3758 return VINF_SUCCESS;
3759
3760failure:
3761 /* Turn this patch into a dummy. */
3762 pPatch->uState = PATCH_REFUSED;
3763
3764 return rc;
3765}
3766
3767
3768/**
3769 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3770 *
3771 * @returns VBox status code.
3772 * @param pVM Pointer to the VM.
3773 * @param pInstrGC Guest context point to privileged instruction
3774 * @param pPatch Patch record
3775 *
3776 * @note returns failure if patching is not allowed or possible
3777 *
3778 */
3779static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3780{
3781 DISCPUSTATE cpu;
3782 uint32_t cbInstr;
3783 bool disret;
3784 uint8_t *pInstrHC;
3785
3786 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3787
3788 /* Convert GC to HC address. */
3789 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3790 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3791
3792 /* Disassemble mmio instruction. */
3793 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
3794 &cpu, &cbInstr);
3795 if (disret == false)
3796 {
3797 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3798 return VERR_PATCHING_REFUSED;
3799 }
3800
3801 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3802 if (cbInstr > MAX_INSTR_SIZE)
3803 return VERR_PATCHING_REFUSED;
3804 if (cpu.Param2.fUse != DISUSE_DISPLACEMENT32)
3805 return VERR_PATCHING_REFUSED;
3806
3807 /* Add relocation record for cached data access. */
3808 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3809 {
3810 Log(("Relocation failed for cached mmio address!!\n"));
3811 return VERR_PATCHING_REFUSED;
3812 }
3813 /* Replace address with that of the cached item. */
3814 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3815
3816 /* Lowest and highest address for write monitoring. */
3817 pPatch->pInstrGCLowest = pInstrGC;
3818 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
3819
3820 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3821 pVM->patm.s.mmio.pCachedData = 0;
3822 pVM->patm.s.mmio.GCPhys = 0;
3823 return VINF_SUCCESS;
3824}
3825
3826/**
3827 * Activates an int3 patch
3828 *
3829 * @returns VBox status code.
3830 * @param pVM Pointer to the VM.
3831 * @param pPatch Patch record
3832 */
3833static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3834{
3835 uint8_t bASMInt3 = 0xCC;
3836 int rc;
3837
3838 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3839 Assert(pPatch->uState != PATCH_ENABLED);
3840
3841 /* Replace first opcode byte with 'int 3'. */
3842 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3843 AssertRC(rc);
3844
3845 pPatch->cbPatchJump = sizeof(bASMInt3);
3846
3847 return rc;
3848}
3849
3850/**
3851 * Deactivates an int3 patch
3852 *
3853 * @returns VBox status code.
3854 * @param pVM Pointer to the VM.
3855 * @param pPatch Patch record
3856 */
3857static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3858{
3859 uint8_t ASMInt3 = 0xCC;
3860 int rc;
3861
3862 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3863 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3864
3865 /* Restore first opcode byte. */
3866 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3867 AssertRC(rc);
3868 return rc;
3869}
3870
3871/**
3872 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3873 * in the raw-mode context.
3874 *
3875 * @returns VBox status code.
3876 * @param pVM Pointer to the VM.
3877 * @param pInstrGC Guest context point to privileged instruction
3878 * @param pInstrHC Host context point to privileged instruction
3879 * @param pCpu Disassembly CPU structure ptr
3880 * @param pPatch Patch record
3881 *
3882 * @note returns failure if patching is not allowed or possible
3883 *
3884 */
3885int patmR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3886{
3887 uint8_t bASMInt3 = 0xCC;
3888 int rc;
3889
3890 /* Note: Do not use patch memory here! It might called during patch installation too. */
3891 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "patmR3PatchInstrInt3:", "");
3892
3893 /* Save the original instruction. */
3894 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3895 AssertRC(rc);
3896 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3897
3898 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3899
3900 /* Replace first opcode byte with 'int 3'. */
3901 rc = patmActivateInt3Patch(pVM, pPatch);
3902 if (RT_FAILURE(rc))
3903 goto failure;
3904
3905 /* Lowest and highest address for write monitoring. */
3906 pPatch->pInstrGCLowest = pInstrGC;
3907 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3908
3909 pPatch->uState = PATCH_ENABLED;
3910 return VINF_SUCCESS;
3911
3912failure:
3913 /* Turn this patch into a dummy. */
3914 return VERR_PATCHING_REFUSED;
3915}
3916
3917#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3918/**
3919 * Patch a jump instruction at specified location
3920 *
3921 * @returns VBox status code.
3922 * @param pVM Pointer to the VM.
3923 * @param pInstrGC Guest context point to privileged instruction
3924 * @param pInstrHC Host context point to privileged instruction
3925 * @param pCpu Disassembly CPU structure ptr
3926 * @param pPatchRec Patch record
3927 *
3928 * @note returns failure if patching is not allowed or possible
3929 *
3930 */
3931int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3932{
3933 PPATCHINFO pPatch = &pPatchRec->patch;
3934 int rc = VERR_PATCHING_REFUSED;
3935
3936 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3937 pPatch->uCurPatchOffset = 0;
3938 pPatch->cbPatchBlockSize = 0;
3939 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3940
3941 /*
3942 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3943 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3944 */
3945 switch (pCpu->pCurInstr->uOpcode)
3946 {
3947 case OP_JO:
3948 case OP_JNO:
3949 case OP_JC:
3950 case OP_JNC:
3951 case OP_JE:
3952 case OP_JNE:
3953 case OP_JBE:
3954 case OP_JNBE:
3955 case OP_JS:
3956 case OP_JNS:
3957 case OP_JP:
3958 case OP_JNP:
3959 case OP_JL:
3960 case OP_JNL:
3961 case OP_JLE:
3962 case OP_JNLE:
3963 case OP_JMP:
3964 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3965 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL);
3966 if (!(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL))
3967 goto failure;
3968
3969 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
3970 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
3971 goto failure;
3972
3973 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
3974 {
3975 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3976 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3977 rc = VERR_PATCHING_REFUSED;
3978 goto failure;
3979 }
3980
3981 break;
3982
3983 default:
3984 goto failure;
3985 }
3986
3987 // make a copy of the guest code bytes that will be overwritten
3988 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
3989 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
3990 pPatch->cbPatchJump = pCpu->cbInstr;
3991
3992 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3993 AssertRC(rc);
3994
3995 /* Now insert a jump in the guest code. */
3996 /*
3997 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3998 * references the target instruction in the conflict patch.
3999 */
4000 RTRCPTR pJmpDest = patmR3GuestGCPtrToPatchGCPtrSimple(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
4001
4002 AssertMsg(pJmpDest, ("patmR3GuestGCPtrToPatchGCPtrSimple failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
4003 pPatch->pPatchJumpDestGC = pJmpDest;
4004
4005 PATMP2GLOOKUPREC cacheRec;
4006 RT_ZERO(cacheRec);
4007 cacheRec.pPatch = pPatch;
4008
4009 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
4010 /* Free leftover lock if any. */
4011 if (cacheRec.Lock.pvMap)
4012 {
4013 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4014 cacheRec.Lock.pvMap = NULL;
4015 }
4016 AssertRC(rc);
4017 if (RT_FAILURE(rc))
4018 goto failure;
4019
4020 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
4021
4022 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
4023 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
4024
4025 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
4026
4027 /* Lowest and highest address for write monitoring. */
4028 pPatch->pInstrGCLowest = pInstrGC;
4029 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
4030
4031 pPatch->uState = PATCH_ENABLED;
4032 return VINF_SUCCESS;
4033
4034failure:
4035 /* Turn this cli patch into a dummy. */
4036 pPatch->uState = PATCH_REFUSED;
4037
4038 return rc;
4039}
4040#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4041
4042
4043/**
4044 * Gives hint to PATM about supervisor guest instructions
4045 *
4046 * @returns VBox status code.
4047 * @param pVM Pointer to the VM.
4048 * @param pInstr Guest context point to privileged instruction
4049 * @param flags Patch flags
4050 */
4051VMMR3_INT_DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4052{
4053 Assert(pInstrGC);
4054 Assert(flags == PATMFL_CODE32);
4055
4056 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4057 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4058}
4059
4060/**
4061 * Patch privileged instruction at specified location
4062 *
4063 * @returns VBox status code.
4064 * @param pVM Pointer to the VM.
4065 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4066 * @param flags Patch flags
4067 *
4068 * @note returns failure if patching is not allowed or possible
4069 */
4070VMMR3_INT_DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4071{
4072 DISCPUSTATE cpu;
4073 R3PTRTYPE(uint8_t *) pInstrHC;
4074 uint32_t cbInstr;
4075 PPATMPATCHREC pPatchRec;
4076 PCPUMCTX pCtx = 0;
4077 bool disret;
4078 int rc;
4079 PVMCPU pVCpu = VMMGetCpu0(pVM);
4080 LogFlow(("PATMR3InstallPatch: %08x (%#llx)\n", pInstrGC, flags));
4081
4082 if ( !pVM
4083 || pInstrGC == 0
4084 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4085 {
4086 AssertFailed();
4087 return VERR_INVALID_PARAMETER;
4088 }
4089
4090 if (PATMIsEnabled(pVM) == false)
4091 return VERR_PATCHING_REFUSED;
4092
4093 /* Test for patch conflict only with patches that actually change guest code. */
4094 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4095 {
4096 PPATCHINFO pConflictPatch = patmFindActivePatchByEntrypoint(pVM, pInstrGC);
4097 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4098 if (pConflictPatch != 0)
4099 return VERR_PATCHING_REFUSED;
4100 }
4101
4102 if (!(flags & PATMFL_CODE32))
4103 {
4104 /** @todo Only 32 bits code right now */
4105 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4106 return VERR_NOT_IMPLEMENTED;
4107 }
4108
4109 /* We ran out of patch memory; don't bother anymore. */
4110 if (pVM->patm.s.fOutOfMemory == true)
4111 return VERR_PATCHING_REFUSED;
4112
4113#if 0 /* DONT COMMIT ENABLED! */
4114 /* Blacklisted NT4SP1 areas - debugging why we sometimes crash early on, */
4115 if ( 0
4116 //|| (pInstrGC - 0x80010000U) < 0x10000U // NT4SP1 HAL
4117 //|| (pInstrGC - 0x80010000U) < 0x5000U // NT4SP1 HAL
4118 //|| (pInstrGC - 0x80013000U) < 0x2000U // NT4SP1 HAL
4119 //|| (pInstrGC - 0x80014000U) < 0x1000U // NT4SP1 HAL
4120 //|| (pInstrGC - 0x80014000U) < 0x800U // NT4SP1 HAL
4121 //|| (pInstrGC - 0x80014400U) < 0x400U // NT4SP1 HAL
4122 //|| (pInstrGC - 0x80014400U) < 0x200U // NT4SP1 HAL
4123 //|| (pInstrGC - 0x80014400U) < 0x100U // NT4SP1 HAL
4124 //|| (pInstrGC - 0x80014500U) < 0x100U // NT4SP1 HAL - negative
4125 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4126 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4127 //|| (pInstrGC - 0x80014440U) < 0x40U // NT4SP1 HAL
4128 //|| (pInstrGC - 0x80014440U) < 0x20U // NT4SP1 HAL
4129 || pInstrGC == 0x80014447 /* KfLowerIrql */
4130 || 0)
4131 {
4132 Log(("PATMR3InstallPatch: %08x is blacklisted\n", pInstrGC));
4133 return VERR_PATCHING_REFUSED;
4134 }
4135#endif
4136
4137 /* Make sure the code selector is wide open; otherwise refuse. */
4138 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4139 if (CPUMGetGuestCPL(pVCpu) == 0)
4140 {
4141 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4142 if (pInstrGCFlat != pInstrGC)
4143 {
4144 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs.Sel, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4145 return VERR_PATCHING_REFUSED;
4146 }
4147 }
4148
4149 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4150 if (!(flags & PATMFL_GUEST_SPECIFIC))
4151 {
4152 /* New code. Make sure CSAM has a go at it first. */
4153 CSAMR3CheckCode(pVM, pInstrGC);
4154 }
4155
4156 /* Note: obsolete */
4157 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4158 && (flags & PATMFL_MMIO_ACCESS))
4159 {
4160 RTRCUINTPTR offset;
4161 void *pvPatchCoreOffset;
4162
4163 /* Find the patch record. */
4164 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4165 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4166 if (pvPatchCoreOffset == NULL)
4167 {
4168 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4169 return VERR_PATCH_NOT_FOUND; //fatal error
4170 }
4171 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4172
4173 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4174 }
4175
4176 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4177
4178 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4179 if (pPatchRec)
4180 {
4181 Assert(!(flags & PATMFL_TRAMPOLINE));
4182
4183 /* Hints about existing patches are ignored. */
4184 if (flags & PATMFL_INSTR_HINT)
4185 return VERR_PATCHING_REFUSED;
4186
4187 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4188 {
4189 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4190 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4191 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4192 }
4193
4194 if (pPatchRec->patch.uState == PATCH_DISABLED)
4195 {
4196 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4197 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4198 {
4199 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4200 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4201 }
4202 else
4203 Log(("Enabling patch %RRv again\n", pInstrGC));
4204
4205 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4206 rc = PATMR3EnablePatch(pVM, pInstrGC);
4207 if (RT_SUCCESS(rc))
4208 return VWRN_PATCH_ENABLED;
4209
4210 return rc;
4211 }
4212 if ( pPatchRec->patch.uState == PATCH_ENABLED
4213 || pPatchRec->patch.uState == PATCH_DIRTY)
4214 {
4215 /*
4216 * The patch might have been overwritten.
4217 */
4218 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4219 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4220 {
4221 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4222 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4223 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4224 {
4225 if (flags & PATMFL_IDTHANDLER)
4226 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4227
4228 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4229 }
4230 }
4231 rc = PATMR3RemovePatch(pVM, pInstrGC);
4232 if (RT_FAILURE(rc))
4233 return VERR_PATCHING_REFUSED;
4234 }
4235 else
4236 {
4237 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4238 /* already tried it once! */
4239 return VERR_PATCHING_REFUSED;
4240 }
4241 }
4242
4243 RTGCPHYS GCPhys;
4244 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4245 if (rc != VINF_SUCCESS)
4246 {
4247 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4248 return rc;
4249 }
4250 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4251 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4252 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4253 {
4254 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4255 return VERR_PATCHING_REFUSED;
4256 }
4257
4258 /* Initialize cache record for guest address translations. */
4259 bool fInserted;
4260 PATMP2GLOOKUPREC cacheRec;
4261 RT_ZERO(cacheRec);
4262
4263 pInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4264 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4265
4266 /* Allocate patch record. */
4267 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4268 if (RT_FAILURE(rc))
4269 {
4270 Log(("Out of memory!!!!\n"));
4271 return VERR_NO_MEMORY;
4272 }
4273 pPatchRec->Core.Key = pInstrGC;
4274 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4275 /* Insert patch record into the lookup tree. */
4276 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4277 Assert(fInserted);
4278
4279 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4280 pPatchRec->patch.flags = flags;
4281 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
4282 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4283
4284 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4285 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4286
4287 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4288 {
4289 /*
4290 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4291 */
4292 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4293 if (pPatchNear)
4294 {
4295 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4296 {
4297 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4298
4299 pPatchRec->patch.uState = PATCH_UNUSABLE;
4300 /*
4301 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4302 */
4303 return VERR_PATCHING_REFUSED;
4304 }
4305 }
4306 }
4307
4308 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4309 if (pPatchRec->patch.pTempInfo == 0)
4310 {
4311 Log(("Out of memory!!!!\n"));
4312 return VERR_NO_MEMORY;
4313 }
4314
4315 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
4316 if (disret == false)
4317 {
4318 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4319 return VERR_PATCHING_REFUSED;
4320 }
4321
4322 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4323 if (cbInstr > MAX_INSTR_SIZE)
4324 return VERR_PATCHING_REFUSED;
4325
4326 pPatchRec->patch.cbPrivInstr = cbInstr;
4327 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode;
4328
4329 /* Restricted hinting for now. */
4330 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI);
4331
4332 /* Initialize cache record patch pointer. */
4333 cacheRec.pPatch = &pPatchRec->patch;
4334
4335 /* Allocate statistics slot */
4336 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4337 {
4338 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4339 }
4340 else
4341 {
4342 Log(("WARNING: Patch index wrap around!!\n"));
4343 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4344 }
4345
4346 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4347 {
4348 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4349 }
4350 else
4351 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4352 {
4353 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4354 }
4355 else
4356 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4357 {
4358 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4359 }
4360 else
4361 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4362 {
4363 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4364 }
4365 else
4366 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4367 {
4368 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4369 }
4370 else
4371 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4372 {
4373 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4374 }
4375 else
4376 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4377 {
4378 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4379 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4380
4381 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
4382#ifdef VBOX_WITH_STATISTICS
4383 if ( rc == VINF_SUCCESS
4384 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4385 {
4386 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4387 }
4388#endif
4389 }
4390 else
4391 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4392 {
4393 switch (cpu.pCurInstr->uOpcode)
4394 {
4395 case OP_SYSENTER:
4396 case OP_PUSH:
4397 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4398 if (rc == VINF_SUCCESS)
4399 {
4400 if (rc == VINF_SUCCESS)
4401 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4402 return rc;
4403 }
4404 break;
4405
4406 default:
4407 rc = VERR_NOT_IMPLEMENTED;
4408 break;
4409 }
4410 }
4411 else
4412 {
4413 switch (cpu.pCurInstr->uOpcode)
4414 {
4415 case OP_SYSENTER:
4416 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4417 if (rc == VINF_SUCCESS)
4418 {
4419 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4420 return VINF_SUCCESS;
4421 }
4422 break;
4423
4424#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4425 case OP_JO:
4426 case OP_JNO:
4427 case OP_JC:
4428 case OP_JNC:
4429 case OP_JE:
4430 case OP_JNE:
4431 case OP_JBE:
4432 case OP_JNBE:
4433 case OP_JS:
4434 case OP_JNS:
4435 case OP_JP:
4436 case OP_JNP:
4437 case OP_JL:
4438 case OP_JNL:
4439 case OP_JLE:
4440 case OP_JNLE:
4441 case OP_JECXZ:
4442 case OP_LOOP:
4443 case OP_LOOPNE:
4444 case OP_LOOPE:
4445 case OP_JMP:
4446 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4447 {
4448 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4449 break;
4450 }
4451 return VERR_NOT_IMPLEMENTED;
4452#endif
4453
4454 case OP_PUSHF:
4455 case OP_CLI:
4456 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4457 rc = patmR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
4458 break;
4459
4460#ifndef VBOX_WITH_SAFE_STR
4461 case OP_STR:
4462#endif
4463 case OP_SGDT:
4464 case OP_SLDT:
4465 case OP_SIDT:
4466 case OP_CPUID:
4467 case OP_LSL:
4468 case OP_LAR:
4469 case OP_SMSW:
4470 case OP_VERW:
4471 case OP_VERR:
4472 case OP_IRET:
4473#ifdef VBOX_WITH_RAW_RING1
4474 case OP_MOV:
4475#endif
4476 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4477 break;
4478
4479 default:
4480 return VERR_NOT_IMPLEMENTED;
4481 }
4482 }
4483
4484 if (rc != VINF_SUCCESS)
4485 {
4486 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4487 {
4488 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4489 pPatchRec->patch.nrPatch2GuestRecs = 0;
4490 }
4491 pVM->patm.s.uCurrentPatchIdx--;
4492 }
4493 else
4494 {
4495 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4496 AssertRCReturn(rc, rc);
4497
4498 /* Keep track upper and lower boundaries of patched instructions */
4499 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4500 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4501 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4502 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4503
4504 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4505 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4506
4507 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4508 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4509
4510 rc = VINF_SUCCESS;
4511
4512 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4513 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4514 {
4515 rc = PATMR3DisablePatch(pVM, pInstrGC);
4516 AssertRCReturn(rc, rc);
4517 }
4518
4519#ifdef VBOX_WITH_STATISTICS
4520 /* Register statistics counter */
4521 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4522 {
4523 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4524 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4525#ifndef DEBUG_sandervl
4526 /* Full breakdown for the GUI. */
4527 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4528 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4529 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4530 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4531 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4532 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4533 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4534 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4535 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4536 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4537 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4538 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4539 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4540 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4541 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4542 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4543#endif
4544 }
4545#endif
4546 }
4547 /* Free leftover lock if any. */
4548 if (cacheRec.Lock.pvMap)
4549 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4550 return rc;
4551}
4552
4553/**
4554 * Query instruction size
4555 *
4556 * @returns VBox status code.
4557 * @param pVM Pointer to the VM.
4558 * @param pPatch Patch record
4559 * @param pInstrGC Instruction address
4560 */
4561static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4562{
4563 uint8_t *pInstrHC;
4564 PGMPAGEMAPLOCK Lock;
4565
4566 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4567 if (rc == VINF_SUCCESS)
4568 {
4569 DISCPUSTATE cpu;
4570 bool disret;
4571 uint32_t cbInstr;
4572
4573 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
4574 PGMPhysReleasePageMappingLock(pVM, &Lock);
4575 if (disret)
4576 return cbInstr;
4577 }
4578 return 0;
4579}
4580
4581/**
4582 * Add patch to page record
4583 *
4584 * @returns VBox status code.
4585 * @param pVM Pointer to the VM.
4586 * @param pPage Page address
4587 * @param pPatch Patch record
4588 */
4589int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4590{
4591 PPATMPATCHPAGE pPatchPage;
4592 int rc;
4593
4594 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4595
4596 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4597 if (pPatchPage)
4598 {
4599 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4600 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4601 {
4602 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4603 PPATCHINFO *papPatchOld = pPatchPage->papPatch;
4604
4605 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4606 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH,
4607 (void **)&pPatchPage->papPatch);
4608 if (RT_FAILURE(rc))
4609 {
4610 Log(("Out of memory!!!!\n"));
4611 return VERR_NO_MEMORY;
4612 }
4613 memcpy(pPatchPage->papPatch, papPatchOld, cMaxPatchesOld * sizeof(pPatchPage->papPatch[0]));
4614 MMHyperFree(pVM, papPatchOld);
4615 }
4616 pPatchPage->papPatch[pPatchPage->cCount] = pPatch;
4617 pPatchPage->cCount++;
4618 }
4619 else
4620 {
4621 bool fInserted;
4622
4623 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4624 if (RT_FAILURE(rc))
4625 {
4626 Log(("Out of memory!!!!\n"));
4627 return VERR_NO_MEMORY;
4628 }
4629 pPatchPage->Core.Key = pPage;
4630 pPatchPage->cCount = 1;
4631 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4632
4633 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH,
4634 (void **)&pPatchPage->papPatch);
4635 if (RT_FAILURE(rc))
4636 {
4637 Log(("Out of memory!!!!\n"));
4638 MMHyperFree(pVM, pPatchPage);
4639 return VERR_NO_MEMORY;
4640 }
4641 pPatchPage->papPatch[0] = pPatch;
4642
4643 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4644 Assert(fInserted);
4645 pVM->patm.s.cPageRecords++;
4646
4647 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4648 }
4649 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4650
4651 /* Get the closest guest instruction (from below) */
4652 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4653 Assert(pGuestToPatchRec);
4654 if (pGuestToPatchRec)
4655 {
4656 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4657 if ( pPatchPage->pLowestAddrGC == 0
4658 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4659 {
4660 RTRCUINTPTR offset;
4661
4662 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4663
4664 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4665 /* If we're too close to the page boundary, then make sure an
4666 instruction from the previous page doesn't cross the
4667 boundary itself. */
4668 if (offset && offset < MAX_INSTR_SIZE)
4669 {
4670 /* Get the closest guest instruction (from above) */
4671 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4672
4673 if (pGuestToPatchRec)
4674 {
4675 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4676 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4677 {
4678 pPatchPage->pLowestAddrGC = pPage;
4679 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4680 }
4681 }
4682 }
4683 }
4684 }
4685
4686 /* Get the closest guest instruction (from above) */
4687 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4688 Assert(pGuestToPatchRec);
4689 if (pGuestToPatchRec)
4690 {
4691 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4692 if ( pPatchPage->pHighestAddrGC == 0
4693 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4694 {
4695 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4696 /* Increase by instruction size. */
4697 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4698//// Assert(size);
4699 pPatchPage->pHighestAddrGC += size;
4700 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4701 }
4702 }
4703
4704 return VINF_SUCCESS;
4705}
4706
4707/**
4708 * Remove patch from page record
4709 *
4710 * @returns VBox status code.
4711 * @param pVM Pointer to the VM.
4712 * @param pPage Page address
4713 * @param pPatch Patch record
4714 */
4715int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4716{
4717 PPATMPATCHPAGE pPatchPage;
4718 int rc;
4719
4720 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4721 Assert(pPatchPage);
4722
4723 if (!pPatchPage)
4724 return VERR_INVALID_PARAMETER;
4725
4726 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4727
4728 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4729 if (pPatchPage->cCount > 1)
4730 {
4731 uint32_t i;
4732
4733 /* Used by multiple patches */
4734 for (i = 0; i < pPatchPage->cCount; i++)
4735 {
4736 if (pPatchPage->papPatch[i] == pPatch)
4737 {
4738 /* close the gap between the remaining pointers. */
4739 uint32_t cNew = --pPatchPage->cCount;
4740 if (i < cNew)
4741 pPatchPage->papPatch[i] = pPatchPage->papPatch[cNew];
4742 pPatchPage->papPatch[cNew] = NULL;
4743 return VINF_SUCCESS;
4744 }
4745 }
4746 AssertMsgFailed(("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4747 }
4748 else
4749 {
4750 PPATMPATCHPAGE pPatchNode;
4751
4752 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4753
4754 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4755 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4756 Assert(pPatchNode && pPatchNode == pPatchPage);
4757
4758 Assert(pPatchPage->papPatch);
4759 rc = MMHyperFree(pVM, pPatchPage->papPatch);
4760 AssertRC(rc);
4761 rc = MMHyperFree(pVM, pPatchPage);
4762 AssertRC(rc);
4763 pVM->patm.s.cPageRecords--;
4764 }
4765 return VINF_SUCCESS;
4766}
4767
4768/**
4769 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4770 *
4771 * @returns VBox status code.
4772 * @param pVM Pointer to the VM.
4773 * @param pPatch Patch record
4774 */
4775int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4776{
4777 int rc;
4778 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4779
4780 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4781 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4782 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4783
4784 /** @todo optimize better (large gaps between current and next used page) */
4785 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4786 {
4787 /* Get the closest guest instruction (from above) */
4788 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4789 if ( pGuestToPatchRec
4790 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4791 )
4792 {
4793 /* Code in page really patched -> add record */
4794 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4795 AssertRC(rc);
4796 }
4797 }
4798 pPatch->flags |= PATMFL_CODE_MONITORED;
4799 return VINF_SUCCESS;
4800}
4801
4802/**
4803 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4804 *
4805 * @returns VBox status code.
4806 * @param pVM Pointer to the VM.
4807 * @param pPatch Patch record
4808 */
4809static int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4810{
4811 int rc;
4812 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4813
4814 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4815 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4816 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4817
4818 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4819 {
4820 /* Get the closest guest instruction (from above) */
4821 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4822 if ( pGuestToPatchRec
4823 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4824 )
4825 {
4826 /* Code in page really patched -> remove record */
4827 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4828 AssertRC(rc);
4829 }
4830 }
4831 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4832 return VINF_SUCCESS;
4833}
4834
4835/**
4836 * Notifies PATM about a (potential) write to code that has been patched.
4837 *
4838 * @returns VBox status code.
4839 * @param pVM Pointer to the VM.
4840 * @param GCPtr GC pointer to write address
4841 * @param cbWrite Nr of bytes to write
4842 *
4843 */
4844VMMR3_INT_DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4845{
4846 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4847
4848 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4849
4850 Assert(VM_IS_EMT(pVM));
4851
4852 /* Quick boundary check */
4853 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4854 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4855 )
4856 return VINF_SUCCESS;
4857
4858 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4859
4860 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4861 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4862
4863 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4864 {
4865loop_start:
4866 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4867 if (pPatchPage)
4868 {
4869 uint32_t i;
4870 bool fValidPatchWrite = false;
4871
4872 /* Quick check to see if the write is in the patched part of the page */
4873 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4874 || pPatchPage->pHighestAddrGC < GCPtr)
4875 {
4876 break;
4877 }
4878
4879 for (i=0;i<pPatchPage->cCount;i++)
4880 {
4881 if (pPatchPage->papPatch[i])
4882 {
4883 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4884 RTRCPTR pPatchInstrGC;
4885 //unused: bool fForceBreak = false;
4886
4887 Assert(pPatchPage->papPatch[i]->flags & PATMFL_CODE_MONITORED);
4888 /** @todo inefficient and includes redundant checks for multiple pages. */
4889 for (uint32_t j=0; j<cbWrite; j++)
4890 {
4891 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4892
4893 if ( pPatch->cbPatchJump
4894 && pGuestPtrGC >= pPatch->pPrivInstrGC
4895 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4896 {
4897 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4898 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4899 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4900 if (rc == VINF_SUCCESS)
4901 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4902 goto loop_start;
4903
4904 continue;
4905 }
4906
4907 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4908 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4909 if (!pPatchInstrGC)
4910 {
4911 RTRCPTR pClosestInstrGC;
4912 uint32_t size;
4913
4914 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4915 if (pPatchInstrGC)
4916 {
4917 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4918 Assert(pClosestInstrGC <= pGuestPtrGC);
4919 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4920 /* Check if this is not a write into a gap between two patches */
4921 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4922 pPatchInstrGC = 0;
4923 }
4924 }
4925 if (pPatchInstrGC)
4926 {
4927 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4928
4929 fValidPatchWrite = true;
4930
4931 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4932 Assert(pPatchToGuestRec);
4933 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4934 {
4935 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4936
4937 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4938 {
4939 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4940
4941 patmR3MarkDirtyPatch(pVM, pPatch);
4942
4943 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4944 goto loop_start;
4945 }
4946 else
4947 {
4948 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4949 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4950
4951 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4952 pPatchToGuestRec->fDirty = true;
4953
4954 *pInstrHC = 0xCC;
4955
4956 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4957 }
4958 }
4959 /* else already marked dirty */
4960 }
4961 }
4962 }
4963 } /* for each patch */
4964
4965 if (fValidPatchWrite == false)
4966 {
4967 /* Write to a part of the page that either:
4968 * - doesn't contain any code (shared code/data); rather unlikely
4969 * - old code page that's no longer in active use.
4970 */
4971invalid_write_loop_start:
4972 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4973
4974 if (pPatchPage)
4975 {
4976 for (i=0;i<pPatchPage->cCount;i++)
4977 {
4978 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4979
4980 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4981 {
4982 /* Note: possibly dangerous assumption that all future writes will be harmless. */
4983 if (pPatch->flags & PATMFL_IDTHANDLER)
4984 {
4985 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4986
4987 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4988 int rc = patmRemovePatchPages(pVM, pPatch);
4989 AssertRC(rc);
4990 }
4991 else
4992 {
4993 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4994 patmR3MarkDirtyPatch(pVM, pPatch);
4995 }
4996 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4997 goto invalid_write_loop_start;
4998 }
4999 } /* for */
5000 }
5001 }
5002 }
5003 }
5004 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
5005 return VINF_SUCCESS;
5006
5007}
5008
5009/**
5010 * Disable all patches in a flushed page
5011 *
5012 * @returns VBox status code
5013 * @param pVM Pointer to the VM.
5014 * @param addr GC address of the page to flush
5015 */
5016/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
5017 */
5018VMMR3_INT_DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
5019{
5020 addr &= PAGE_BASE_GC_MASK;
5021
5022 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
5023 if (pPatchPage)
5024 {
5025 int i;
5026
5027 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
5028 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
5029 {
5030 if (pPatchPage->papPatch[i])
5031 {
5032 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5033
5034 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
5035 patmR3MarkDirtyPatch(pVM, pPatch);
5036 }
5037 }
5038 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
5039 }
5040 return VINF_SUCCESS;
5041}
5042
5043/**
5044 * Checks if the instructions at the specified address has been patched already.
5045 *
5046 * @returns boolean, patched or not
5047 * @param pVM Pointer to the VM.
5048 * @param pInstrGC Guest context pointer to instruction
5049 */
5050VMMR3_INT_DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
5051{
5052 PPATMPATCHREC pPatchRec;
5053 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5054 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
5055 return true;
5056 return false;
5057}
5058
5059/**
5060 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
5061 *
5062 * @returns VBox status code.
5063 * @param pVM Pointer to the VM.
5064 * @param pInstrGC GC address of instr
5065 * @param pByte opcode byte pointer (OUT)
5066 *
5067 */
5068VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
5069{
5070 PPATMPATCHREC pPatchRec;
5071
5072 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5073
5074 /* Shortcut. */
5075 if ( !PATMIsEnabled(pVM)
5076 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5077 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5078 {
5079 return VERR_PATCH_NOT_FOUND;
5080 }
5081
5082 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5083 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5084 if ( pPatchRec
5085 && pPatchRec->patch.uState == PATCH_ENABLED
5086 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5087 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5088 {
5089 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5090 *pByte = pPatchRec->patch.aPrivInstr[offset];
5091
5092 if (pPatchRec->patch.cbPatchJump == 1)
5093 {
5094 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5095 }
5096 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5097 return VINF_SUCCESS;
5098 }
5099 return VERR_PATCH_NOT_FOUND;
5100}
5101
5102/**
5103 * Read instruction bytes of the original code that was overwritten by the 5
5104 * bytes patch jump.
5105 *
5106 * @returns VINF_SUCCESS or VERR_PATCH_NOT_FOUND.
5107 * @param pVM Pointer to the VM.
5108 * @param GCPtrInstr GC address of instr
5109 * @param pbDst The output buffer.
5110 * @param cbToRead The maximum number bytes to read.
5111 * @param pcbRead Where to return the acutal number of bytes read.
5112 */
5113VMMR3_INT_DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
5114{
5115 /* Shortcut. */
5116 if ( !PATMIsEnabled(pVM)
5117 || GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
5118 || GCPtrInstr > pVM->patm.s.pPatchedInstrGCHighest)
5119 return VERR_PATCH_NOT_FOUND;
5120
5121 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5122
5123 /*
5124 * If the patch is enabled and the pointer lies within 5 bytes of this
5125 * priv instr ptr, then we've got a hit!
5126 */
5127 RTGCPTR32 off;
5128 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree,
5129 GCPtrInstr, false /*fAbove*/);
5130 if ( pPatchRec
5131 && pPatchRec->patch.uState == PATCH_ENABLED
5132 && (off = GCPtrInstr - pPatchRec->patch.pPrivInstrGC) < pPatchRec->patch.cbPatchJump)
5133 {
5134 uint8_t const *pbSrc = &pPatchRec->patch.aPrivInstr[off];
5135 uint32_t const cbMax = pPatchRec->patch.cbPatchJump - off;
5136 if (cbToRead > cbMax)
5137 cbToRead = cbMax;
5138 switch (cbToRead)
5139 {
5140 case 5: pbDst[4] = pbSrc[4];
5141 case 4: pbDst[3] = pbSrc[3];
5142 case 3: pbDst[2] = pbSrc[2];
5143 case 2: pbDst[1] = pbSrc[1];
5144 case 1: pbDst[0] = pbSrc[0];
5145 break;
5146 default:
5147 memcpy(pbDst, pbSrc, cbToRead);
5148 }
5149 *pcbRead = cbToRead;
5150
5151 if (pPatchRec->patch.cbPatchJump == 1)
5152 Log(("PATMR3ReadOrgInstr: returning opcode %.*Rhxs for instruction at %RX32\n", cbToRead, pbSrc, GCPtrInstr));
5153 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5154 return VINF_SUCCESS;
5155 }
5156
5157 return VERR_PATCH_NOT_FOUND;
5158}
5159
5160/**
5161 * Disable patch for privileged instruction at specified location
5162 *
5163 * @returns VBox status code.
5164 * @param pVM Pointer to the VM.
5165 * @param pInstr Guest context point to privileged instruction
5166 *
5167 * @note returns failure if patching is not allowed or possible
5168 *
5169 */
5170VMMR3_INT_DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5171{
5172 PPATMPATCHREC pPatchRec;
5173 PPATCHINFO pPatch;
5174
5175 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5176 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5177 if (pPatchRec)
5178 {
5179 int rc = VINF_SUCCESS;
5180
5181 pPatch = &pPatchRec->patch;
5182
5183 /* Already disabled? */
5184 if (pPatch->uState == PATCH_DISABLED)
5185 return VINF_SUCCESS;
5186
5187 /* Clear the IDT entries for the patch we're disabling. */
5188 /* Note: very important as we clear IF in the patch itself */
5189 /** @todo this needs to be changed */
5190 if (pPatch->flags & PATMFL_IDTHANDLER)
5191 {
5192 uint32_t iGate;
5193
5194 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5195 if (iGate != (uint32_t)~0)
5196 {
5197 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5198 if (++cIDTHandlersDisabled < 256)
5199 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5200 }
5201 }
5202
5203 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5204 if ( pPatch->pPatchBlockOffset
5205 && pPatch->uState == PATCH_ENABLED)
5206 {
5207 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5208 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5209 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5210 }
5211
5212 /* IDT or function patches haven't changed any guest code. */
5213 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5214 {
5215 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5216 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5217
5218 if (pPatch->uState != PATCH_REFUSED)
5219 {
5220 uint8_t temp[16];
5221
5222 Assert(pPatch->cbPatchJump < sizeof(temp));
5223
5224 /* Let's first check if the guest code is still the same. */
5225 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5226 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5227 if (rc == VINF_SUCCESS)
5228 {
5229 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5230
5231 if ( temp[0] != 0xE9 /* jmp opcode */
5232 || *(RTRCINTPTR *)(&temp[1]) != displ
5233 )
5234 {
5235 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5236 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5237 /* Remove it completely */
5238 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5239 rc = PATMR3RemovePatch(pVM, pInstrGC);
5240 AssertRC(rc);
5241 return VWRN_PATCH_REMOVED;
5242 }
5243 patmRemoveJumpToPatch(pVM, pPatch);
5244 }
5245 else
5246 {
5247 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5248 pPatch->uState = PATCH_DISABLE_PENDING;
5249 }
5250 }
5251 else
5252 {
5253 AssertMsgFailed(("Patch was refused!\n"));
5254 return VERR_PATCH_ALREADY_DISABLED;
5255 }
5256 }
5257 else
5258 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5259 {
5260 uint8_t temp[16];
5261
5262 Assert(pPatch->cbPatchJump < sizeof(temp));
5263
5264 /* Let's first check if the guest code is still the same. */
5265 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5266 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5267 if (rc == VINF_SUCCESS)
5268 {
5269 if (temp[0] != 0xCC)
5270 {
5271 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5272 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5273 /* Remove it completely */
5274 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5275 rc = PATMR3RemovePatch(pVM, pInstrGC);
5276 AssertRC(rc);
5277 return VWRN_PATCH_REMOVED;
5278 }
5279 patmDeactivateInt3Patch(pVM, pPatch);
5280 }
5281 }
5282
5283 if (rc == VINF_SUCCESS)
5284 {
5285 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5286 if (pPatch->uState == PATCH_DISABLE_PENDING)
5287 {
5288 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5289 pPatch->uState = PATCH_UNUSABLE;
5290 }
5291 else
5292 if (pPatch->uState != PATCH_DIRTY)
5293 {
5294 pPatch->uOldState = pPatch->uState;
5295 pPatch->uState = PATCH_DISABLED;
5296 }
5297 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5298 }
5299
5300 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5301 return VINF_SUCCESS;
5302 }
5303 Log(("Patch not found!\n"));
5304 return VERR_PATCH_NOT_FOUND;
5305}
5306
5307/**
5308 * Permanently disable patch for privileged instruction at specified location
5309 *
5310 * @returns VBox status code.
5311 * @param pVM Pointer to the VM.
5312 * @param pInstr Guest context instruction pointer
5313 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5314 * @param pConflictPatch Conflicting patch
5315 *
5316 */
5317static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5318{
5319 NOREF(pConflictAddr);
5320#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5321 PATCHINFO patch;
5322 DISCPUSTATE cpu;
5323 R3PTRTYPE(uint8_t *) pInstrHC;
5324 uint32_t cbInstr;
5325 bool disret;
5326 int rc;
5327
5328 RT_ZERO(patch);
5329 pInstrHC = patmR3GCVirtToHCVirt(pVM, &patch, pInstrGC);
5330 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
5331 /*
5332 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5333 * with one that jumps right into the conflict patch.
5334 * Otherwise we must disable the conflicting patch to avoid serious problems.
5335 */
5336 if ( disret == true
5337 && (pConflictPatch->flags & PATMFL_CODE32)
5338 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW))
5339 && (cpu.Param1.fUse & DISUSE_IMMEDIATE32_REL))
5340 {
5341 /* Hint patches must be enabled first. */
5342 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5343 {
5344 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5345 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5346 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5347 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5348 /* Enabling might fail if the patched code has changed in the meantime. */
5349 if (rc != VINF_SUCCESS)
5350 return rc;
5351 }
5352
5353 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5354 if (RT_SUCCESS(rc))
5355 {
5356 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5357 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5358 return VINF_SUCCESS;
5359 }
5360 }
5361#endif
5362
5363 if (pConflictPatch->opcode == OP_CLI)
5364 {
5365 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5366 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5367 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5368 if (rc == VWRN_PATCH_REMOVED)
5369 return VINF_SUCCESS;
5370 if (RT_SUCCESS(rc))
5371 {
5372 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5373 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5374 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5375 if (rc == VERR_PATCH_NOT_FOUND)
5376 return VINF_SUCCESS; /* removed already */
5377
5378 AssertRC(rc);
5379 if (RT_SUCCESS(rc))
5380 {
5381 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5382 return VINF_SUCCESS;
5383 }
5384 }
5385 /* else turned into unusable patch (see below) */
5386 }
5387 else
5388 {
5389 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5390 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5391 if (rc == VWRN_PATCH_REMOVED)
5392 return VINF_SUCCESS;
5393 }
5394
5395 /* No need to monitor the code anymore. */
5396 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5397 {
5398 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5399 AssertRC(rc);
5400 }
5401 pConflictPatch->uState = PATCH_UNUSABLE;
5402 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5403 return VERR_PATCH_DISABLED;
5404}
5405
5406/**
5407 * Enable patch for privileged instruction at specified location
5408 *
5409 * @returns VBox status code.
5410 * @param pVM Pointer to the VM.
5411 * @param pInstr Guest context point to privileged instruction
5412 *
5413 * @note returns failure if patching is not allowed or possible
5414 *
5415 */
5416VMMR3_INT_DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5417{
5418 PPATMPATCHREC pPatchRec;
5419 PPATCHINFO pPatch;
5420
5421 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5422 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5423 if (pPatchRec)
5424 {
5425 int rc = VINF_SUCCESS;
5426
5427 pPatch = &pPatchRec->patch;
5428
5429 if (pPatch->uState == PATCH_DISABLED)
5430 {
5431 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5432 {
5433 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5434 uint8_t temp[16];
5435
5436 Assert(pPatch->cbPatchJump < sizeof(temp));
5437
5438 /* Let's first check if the guest code is still the same. */
5439 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5440 AssertRC(rc2);
5441 if (rc2 == VINF_SUCCESS)
5442 {
5443 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5444 {
5445 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5446 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5447 /* Remove it completely */
5448 rc = PATMR3RemovePatch(pVM, pInstrGC);
5449 AssertRC(rc);
5450 return VERR_PATCH_NOT_FOUND;
5451 }
5452
5453 PATMP2GLOOKUPREC cacheRec;
5454 RT_ZERO(cacheRec);
5455 cacheRec.pPatch = pPatch;
5456
5457 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5458 /* Free leftover lock if any. */
5459 if (cacheRec.Lock.pvMap)
5460 {
5461 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5462 cacheRec.Lock.pvMap = NULL;
5463 }
5464 AssertRC(rc2);
5465 if (RT_FAILURE(rc2))
5466 return rc2;
5467
5468#ifdef DEBUG
5469 {
5470 DISCPUSTATE cpu;
5471 char szOutput[256];
5472 uint32_t cbInstr;
5473 uint32_t i = 0;
5474 bool disret;
5475 while(i < pPatch->cbPatchJump)
5476 {
5477 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
5478 &cpu, &cbInstr, szOutput, sizeof(szOutput));
5479 Log(("Renewed patch instr: %s", szOutput));
5480 i += cbInstr;
5481 }
5482 }
5483#endif
5484 }
5485 }
5486 else
5487 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5488 {
5489 uint8_t temp[16];
5490
5491 Assert(pPatch->cbPatchJump < sizeof(temp));
5492
5493 /* Let's first check if the guest code is still the same. */
5494 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5495 AssertRC(rc2);
5496
5497 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5498 {
5499 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5500 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5501 rc = PATMR3RemovePatch(pVM, pInstrGC);
5502 AssertRC(rc);
5503 return VERR_PATCH_NOT_FOUND;
5504 }
5505
5506 rc2 = patmActivateInt3Patch(pVM, pPatch);
5507 if (RT_FAILURE(rc2))
5508 return rc2;
5509 }
5510
5511 pPatch->uState = pPatch->uOldState; //restore state
5512
5513 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5514 if (pPatch->pPatchBlockOffset)
5515 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5516
5517 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5518 }
5519 else
5520 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5521
5522 return rc;
5523 }
5524 return VERR_PATCH_NOT_FOUND;
5525}
5526
5527/**
5528 * Remove patch for privileged instruction at specified location
5529 *
5530 * @returns VBox status code.
5531 * @param pVM Pointer to the VM.
5532 * @param pPatchRec Patch record
5533 * @param fForceRemove Remove *all* patches
5534 */
5535int patmR3RemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5536{
5537 PPATCHINFO pPatch;
5538
5539 pPatch = &pPatchRec->patch;
5540
5541 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5542 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5543 {
5544 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5545 return VERR_ACCESS_DENIED;
5546 }
5547 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5548
5549 /* Note: NEVER EVER REUSE PATCH MEMORY */
5550 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5551
5552 if (pPatchRec->patch.pPatchBlockOffset)
5553 {
5554 PAVLOU32NODECORE pNode;
5555
5556 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5557 Assert(pNode);
5558 }
5559
5560 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5561 {
5562 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5563 AssertRC(rc);
5564 }
5565
5566#ifdef VBOX_WITH_STATISTICS
5567 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5568 {
5569 STAMR3Deregister(pVM, &pPatchRec->patch);
5570#ifndef DEBUG_sandervl
5571 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5572 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5573 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5574 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5575 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5576 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5577 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5578 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5579 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5580 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5581 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5582 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5583 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5584 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5585#endif
5586 }
5587#endif
5588
5589 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5590 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5591 pPatch->nrPatch2GuestRecs = 0;
5592 Assert(pPatch->Patch2GuestAddrTree == 0);
5593
5594 patmEmptyTree(pVM, &pPatch->FixupTree);
5595 pPatch->nrFixups = 0;
5596 Assert(pPatch->FixupTree == 0);
5597
5598 if (pPatchRec->patch.pTempInfo)
5599 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5600
5601 /* Note: might fail, because it has already been removed (e.g. during reset). */
5602 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5603
5604 /* Free the patch record */
5605 MMHyperFree(pVM, pPatchRec);
5606 return VINF_SUCCESS;
5607}
5608
5609/**
5610 * RTAvlU32DoWithAll() worker.
5611 * Checks whether the current trampoline instruction is the jump to the target patch
5612 * and updates the displacement to jump to the new target.
5613 *
5614 * @returns VBox status code.
5615 * @retval VERR_ALREADY_EXISTS if the jump was found.
5616 * @param pNode The current patch to guest record to check.
5617 * @param pvUser The refresh state.
5618 */
5619static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5620{
5621 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5622 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5623 PVM pVM = pRefreshPatchState->pVM;
5624
5625 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5626
5627 /*
5628 * Check if the patch instruction starts with a jump.
5629 * ASSUMES that there is no other patch to guest record that starts
5630 * with a jump.
5631 */
5632 if (*pPatchInstr == 0xE9)
5633 {
5634 /* Jump found, update the displacement. */
5635 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5636 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5637 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5638
5639 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5640 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5641
5642 *(uint32_t *)&pPatchInstr[1] = displ;
5643 return VERR_ALREADY_EXISTS; /** @todo better return code */
5644 }
5645
5646 return VINF_SUCCESS;
5647}
5648
5649/**
5650 * Attempt to refresh the patch by recompiling its entire code block
5651 *
5652 * @returns VBox status code.
5653 * @param pVM Pointer to the VM.
5654 * @param pPatchRec Patch record
5655 */
5656int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5657{
5658 PPATCHINFO pPatch;
5659 int rc;
5660 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5661 PTRAMPREC pTrampolinePatchesHead = NULL;
5662
5663 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5664
5665 pPatch = &pPatchRec->patch;
5666 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5667 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5668 {
5669 if (!pPatch->pTrampolinePatchesHead)
5670 {
5671 /*
5672 * It is sometimes possible that there are trampoline patches to this patch
5673 * but they are not recorded (after a saved state load for example).
5674 * Refuse to refresh those patches.
5675 * Can hurt performance in theory if the patched code is modified by the guest
5676 * and is executed often. However most of the time states are saved after the guest
5677 * code was modified and is not updated anymore afterwards so this shouldn't be a
5678 * big problem.
5679 */
5680 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5681 return VERR_PATCHING_REFUSED;
5682 }
5683 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5684 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5685 }
5686
5687 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5688
5689 rc = PATMR3DisablePatch(pVM, pInstrGC);
5690 AssertRC(rc);
5691
5692 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5693 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5694#ifdef VBOX_WITH_STATISTICS
5695 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5696 {
5697 STAMR3Deregister(pVM, &pPatchRec->patch);
5698#ifndef DEBUG_sandervl
5699 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5700 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5701 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5702 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5703 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5704 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5705 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5706 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5707 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5708 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5709 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5710 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5711 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5712 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5713#endif
5714 }
5715#endif
5716
5717 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5718
5719 /* Attempt to install a new patch. */
5720 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5721 if (RT_SUCCESS(rc))
5722 {
5723 RTRCPTR pPatchTargetGC;
5724 PPATMPATCHREC pNewPatchRec;
5725
5726 /* Determine target address in new patch */
5727 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5728 Assert(pPatchTargetGC);
5729 if (!pPatchTargetGC)
5730 {
5731 rc = VERR_PATCHING_REFUSED;
5732 goto failure;
5733 }
5734
5735 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5736 pPatch->uCurPatchOffset = 0;
5737
5738 /* insert jump to new patch in old patch block */
5739 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5740 if (RT_FAILURE(rc))
5741 goto failure;
5742
5743 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5744 Assert(pNewPatchRec); /* can't fail */
5745
5746 /* Remove old patch (only do that when everything is finished) */
5747 int rc2 = patmR3RemovePatch(pVM, pPatchRec, true /* force removal */);
5748 AssertRC(rc2);
5749
5750 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5751 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5752 Assert(fInserted); NOREF(fInserted);
5753
5754 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5755 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5756
5757 /* Used by another patch, so don't remove it! */
5758 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5759
5760 if (pTrampolinePatchesHead)
5761 {
5762 /* Update all trampoline patches to jump to the new patch. */
5763 PTRAMPREC pTrampRec = NULL;
5764 PATMREFRESHPATCH RefreshPatch;
5765
5766 RefreshPatch.pVM = pVM;
5767 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5768
5769 pTrampRec = pTrampolinePatchesHead;
5770
5771 while (pTrampRec)
5772 {
5773 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5774
5775 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5776 /*
5777 * We have to find the right patch2guest record because there might be others
5778 * for statistics.
5779 */
5780 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5781 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5782 Assert(rc == VERR_ALREADY_EXISTS);
5783 rc = VINF_SUCCESS;
5784 pTrampRec = pTrampRec->pNext;
5785 }
5786 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5787 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5788 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5789 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5790 }
5791 }
5792
5793failure:
5794 if (RT_FAILURE(rc))
5795 {
5796 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5797
5798 /* Remove the new inactive patch */
5799 rc = PATMR3RemovePatch(pVM, pInstrGC);
5800 AssertRC(rc);
5801
5802 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5803 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5804 Assert(fInserted); NOREF(fInserted);
5805
5806 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5807 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5808 AssertRC(rc2);
5809
5810 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5811 }
5812 return rc;
5813}
5814
5815/**
5816 * Find patch for privileged instruction at specified location
5817 *
5818 * @returns Patch structure pointer if found; else NULL
5819 * @param pVM Pointer to the VM.
5820 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5821 * @param fIncludeHints Include hinted patches or not
5822 *
5823 */
5824PPATCHINFO patmFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5825{
5826 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5827 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5828 if (pPatchRec)
5829 {
5830 if ( pPatchRec->patch.uState == PATCH_ENABLED
5831 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5832 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5833 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5834 {
5835 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5836 return &pPatchRec->patch;
5837 }
5838 else
5839 if ( fIncludeHints
5840 && pPatchRec->patch.uState == PATCH_DISABLED
5841 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5842 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5843 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5844 {
5845 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5846 return &pPatchRec->patch;
5847 }
5848 }
5849 return NULL;
5850}
5851
5852/**
5853 * Checks whether the GC address is inside a generated patch jump
5854 *
5855 * @returns true -> yes, false -> no
5856 * @param pVM Pointer to the VM.
5857 * @param pAddr Guest context address.
5858 * @param pPatchAddr Guest context patch address (if true).
5859 */
5860VMMR3_INT_DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5861{
5862 RTRCPTR addr;
5863 PPATCHINFO pPatch;
5864
5865 if (PATMIsEnabled(pVM) == false)
5866 return false;
5867
5868 if (pPatchAddr == NULL)
5869 pPatchAddr = &addr;
5870
5871 *pPatchAddr = 0;
5872
5873 pPatch = patmFindActivePatchByEntrypoint(pVM, pAddr);
5874 if (pPatch)
5875 *pPatchAddr = pPatch->pPrivInstrGC;
5876
5877 return *pPatchAddr == 0 ? false : true;
5878}
5879
5880/**
5881 * Remove patch for privileged instruction at specified location
5882 *
5883 * @returns VBox status code.
5884 * @param pVM Pointer to the VM.
5885 * @param pInstr Guest context point to privileged instruction
5886 *
5887 * @note returns failure if patching is not allowed or possible
5888 *
5889 */
5890VMMR3_INT_DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5891{
5892 PPATMPATCHREC pPatchRec;
5893
5894 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5895 if (pPatchRec)
5896 {
5897 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5898 if (rc == VWRN_PATCH_REMOVED)
5899 return VINF_SUCCESS;
5900
5901 return patmR3RemovePatch(pVM, pPatchRec, false);
5902 }
5903 AssertFailed();
5904 return VERR_PATCH_NOT_FOUND;
5905}
5906
5907/**
5908 * Mark patch as dirty
5909 *
5910 * @returns VBox status code.
5911 * @param pVM Pointer to the VM.
5912 * @param pPatch Patch record
5913 *
5914 * @note returns failure if patching is not allowed or possible
5915 *
5916 */
5917static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5918{
5919 if (pPatch->pPatchBlockOffset)
5920 {
5921 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5922 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5923 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5924 }
5925
5926 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5927 /* Put back the replaced instruction. */
5928 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5929 if (rc == VWRN_PATCH_REMOVED)
5930 return VINF_SUCCESS;
5931
5932 /* Note: we don't restore patch pages for patches that are not enabled! */
5933 /* Note: be careful when changing this behaviour!! */
5934
5935 /* The patch pages are no longer marked for self-modifying code detection */
5936 if (pPatch->flags & PATMFL_CODE_MONITORED)
5937 {
5938 rc = patmRemovePatchPages(pVM, pPatch);
5939 AssertRCReturn(rc, rc);
5940 }
5941 pPatch->uState = PATCH_DIRTY;
5942
5943 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5944 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5945
5946 return VINF_SUCCESS;
5947}
5948
5949/**
5950 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5951 *
5952 * @returns VBox status code.
5953 * @param pVM Pointer to the VM.
5954 * @param pPatch Patch block structure pointer
5955 * @param pPatchGC GC address in patch block
5956 */
5957RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5958{
5959 Assert(pPatch->Patch2GuestAddrTree);
5960 /* Get the closest record from below. */
5961 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5962 if (pPatchToGuestRec)
5963 return pPatchToGuestRec->pOrgInstrGC;
5964
5965 return 0;
5966}
5967
5968/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5969 *
5970 * @returns corresponding GC pointer in patch block
5971 * @param pVM Pointer to the VM.
5972 * @param pPatch Current patch block pointer
5973 * @param pInstrGC Guest context pointer to privileged instruction
5974 *
5975 */
5976RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5977{
5978 if (pPatch->Guest2PatchAddrTree)
5979 {
5980 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5981 if (pGuestToPatchRec)
5982 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5983 }
5984
5985 return 0;
5986}
5987
5988/**
5989 * Converts Guest code GC ptr to Patch code GC ptr (if found)
5990 *
5991 * @returns corresponding GC pointer in patch block
5992 * @param pVM Pointer to the VM.
5993 * @param pInstrGC Guest context pointer to privileged instruction
5994 */
5995static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5996{
5997 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5998 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5999 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
6000 return NIL_RTRCPTR;
6001}
6002
6003/**
6004 * Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no
6005 * identical match)
6006 *
6007 * @returns corresponding GC pointer in patch block
6008 * @param pVM Pointer to the VM.
6009 * @param pPatch Current patch block pointer
6010 * @param pInstrGC Guest context pointer to privileged instruction
6011 *
6012 */
6013RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6014{
6015 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
6016 if (pGuestToPatchRec)
6017 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6018 return NIL_RTRCPTR;
6019}
6020
6021/**
6022 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6023 *
6024 * @returns original GC instruction pointer or 0 if not found
6025 * @param pVM Pointer to the VM.
6026 * @param pPatchGC GC address in patch block
6027 * @param pEnmState State of the translated address (out)
6028 *
6029 */
6030VMMR3_INT_DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
6031{
6032 PPATMPATCHREC pPatchRec;
6033 void *pvPatchCoreOffset;
6034 RTRCPTR pPrivInstrGC;
6035
6036 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
6037 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6038 if (pvPatchCoreOffset == 0)
6039 {
6040 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
6041 return 0;
6042 }
6043 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6044 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
6045 if (pEnmState)
6046 {
6047 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
6048 || pPatchRec->patch.uState == PATCH_DIRTY
6049 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
6050 || pPatchRec->patch.uState == PATCH_UNUSABLE),
6051 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
6052
6053 if ( !pPrivInstrGC
6054 || pPatchRec->patch.uState == PATCH_UNUSABLE
6055 || pPatchRec->patch.uState == PATCH_REFUSED)
6056 {
6057 pPrivInstrGC = 0;
6058 *pEnmState = PATMTRANS_FAILED;
6059 }
6060 else
6061 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
6062 {
6063 *pEnmState = PATMTRANS_INHIBITIRQ;
6064 }
6065 else
6066 if ( pPatchRec->patch.uState == PATCH_ENABLED
6067 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
6068 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
6069 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
6070 {
6071 *pEnmState = PATMTRANS_OVERWRITTEN;
6072 }
6073 else
6074 if (patmFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
6075 {
6076 *pEnmState = PATMTRANS_OVERWRITTEN;
6077 }
6078 else
6079 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
6080 {
6081 *pEnmState = PATMTRANS_PATCHSTART;
6082 }
6083 else
6084 *pEnmState = PATMTRANS_SAFE;
6085 }
6086 return pPrivInstrGC;
6087}
6088
6089/**
6090 * Returns the GC pointer of the patch for the specified GC address
6091 *
6092 * @returns VBox status code.
6093 * @param pVM Pointer to the VM.
6094 * @param pAddrGC Guest context address
6095 */
6096VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
6097{
6098 PPATMPATCHREC pPatchRec;
6099
6100 /* Find the patch record. */
6101 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
6102 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
6103 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
6104 return PATCHCODE_PTR_GC(&pPatchRec->patch);
6105 return NIL_RTRCPTR;
6106}
6107
6108/**
6109 * Attempt to recover dirty instructions
6110 *
6111 * @returns VBox status code.
6112 * @param pVM Pointer to the VM.
6113 * @param pCtx Pointer to the guest CPU context.
6114 * @param pPatch Patch record.
6115 * @param pPatchToGuestRec Patch to guest address record.
6116 * @param pEip GC pointer of trapping instruction.
6117 */
6118static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
6119{
6120 DISCPUSTATE CpuOld, CpuNew;
6121 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
6122 int rc;
6123 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
6124 uint32_t cbDirty;
6125 PRECPATCHTOGUEST pRec;
6126 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6127 PVMCPU pVCpu = VMMGetCpu0(pVM);
6128 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6129
6130 pRec = pPatchToGuestRec;
6131 pCurInstrGC = pOrgInstrGC;
6132 pCurPatchInstrGC = pEip;
6133 cbDirty = 0;
6134 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6135
6136 /* Find all adjacent dirty instructions */
6137 while (true)
6138 {
6139 if (pRec->fJumpTarget)
6140 {
6141 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6142 pRec->fDirty = false;
6143 return VERR_PATCHING_REFUSED;
6144 }
6145
6146 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6147 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6148 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6149
6150 /* Only harmless instructions are acceptable. */
6151 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6152 if ( RT_FAILURE(rc)
6153 || !(CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS))
6154 {
6155 if (RT_SUCCESS(rc))
6156 cbDirty += CpuOld.cbInstr;
6157 else
6158 if (!cbDirty)
6159 cbDirty = 1;
6160 break;
6161 }
6162
6163#ifdef DEBUG
6164 char szBuf[256];
6165 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6166 szBuf, sizeof(szBuf), NULL);
6167 Log(("DIRTY: %s\n", szBuf));
6168#endif
6169 /* Mark as clean; if we fail we'll let it always fault. */
6170 pRec->fDirty = false;
6171
6172 /* Remove old lookup record. */
6173 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6174 pPatchToGuestRec = NULL;
6175
6176 pCurPatchInstrGC += CpuOld.cbInstr;
6177 cbDirty += CpuOld.cbInstr;
6178
6179 /* Let's see if there's another dirty instruction right after. */
6180 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6181 if (!pRec || !pRec->fDirty)
6182 break; /* no more dirty instructions */
6183
6184 /* In case of complex instructions the next guest instruction could be quite far off. */
6185 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6186 }
6187
6188 if ( RT_SUCCESS(rc)
6189 && (CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS)
6190 )
6191 {
6192 uint32_t cbLeft;
6193
6194 pCurPatchInstrHC = pPatchInstrHC;
6195 pCurPatchInstrGC = pEip;
6196 cbLeft = cbDirty;
6197
6198 while (cbLeft && RT_SUCCESS(rc))
6199 {
6200 bool fValidInstr;
6201
6202 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6203
6204 fValidInstr = !!(CpuNew.pCurInstr->fOpType & DISOPTYPE_HARMLESS);
6205 if ( !fValidInstr
6206 && (CpuNew.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
6207 )
6208 {
6209 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6210
6211 if ( pTargetGC >= pOrgInstrGC
6212 && pTargetGC <= pOrgInstrGC + cbDirty
6213 )
6214 {
6215 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6216 fValidInstr = true;
6217 }
6218 }
6219
6220 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6221 if ( rc == VINF_SUCCESS
6222 && CpuNew.cbInstr <= cbLeft /* must still fit */
6223 && fValidInstr
6224 )
6225 {
6226#ifdef DEBUG
6227 char szBuf[256];
6228 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6229 szBuf, sizeof(szBuf), NULL);
6230 Log(("NEW: %s\n", szBuf));
6231#endif
6232
6233 /* Copy the new instruction. */
6234 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
6235 AssertRC(rc);
6236
6237 /* Add a new lookup record for the duplicated instruction. */
6238 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6239 }
6240 else
6241 {
6242#ifdef DEBUG
6243 char szBuf[256];
6244 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6245 szBuf, sizeof(szBuf), NULL);
6246 Log(("NEW: %s (FAILED)\n", szBuf));
6247#endif
6248 /* Restore the old lookup record for the duplicated instruction. */
6249 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6250
6251 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6252 rc = VERR_PATCHING_REFUSED;
6253 break;
6254 }
6255 pCurInstrGC += CpuNew.cbInstr;
6256 pCurPatchInstrHC += CpuNew.cbInstr;
6257 pCurPatchInstrGC += CpuNew.cbInstr;
6258 cbLeft -= CpuNew.cbInstr;
6259
6260 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6261 if (!cbLeft)
6262 {
6263 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6264 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6265 {
6266 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6267 if (pRec)
6268 {
6269 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6270 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6271
6272 Assert(!pRec->fDirty);
6273
6274 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6275 if (cbFiller >= SIZEOF_NEARJUMP32)
6276 {
6277 pPatchFillHC[0] = 0xE9;
6278 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6279#ifdef DEBUG
6280 char szBuf[256];
6281 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC,
6282 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6283 Log(("FILL: %s\n", szBuf));
6284#endif
6285 }
6286 else
6287 {
6288 for (unsigned i = 0; i < cbFiller; i++)
6289 {
6290 pPatchFillHC[i] = 0x90; /* NOP */
6291#ifdef DEBUG
6292 char szBuf[256];
6293 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
6294 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6295 Log(("FILL: %s\n", szBuf));
6296#endif
6297 }
6298 }
6299 }
6300 }
6301 }
6302 }
6303 }
6304 else
6305 rc = VERR_PATCHING_REFUSED;
6306
6307 if (RT_SUCCESS(rc))
6308 {
6309 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6310 }
6311 else
6312 {
6313 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6314 Assert(cbDirty);
6315
6316 /* Mark the whole instruction stream with breakpoints. */
6317 if (cbDirty)
6318 memset(pPatchInstrHC, 0xCC, cbDirty);
6319
6320 if ( pVM->patm.s.fOutOfMemory == false
6321 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6322 {
6323 rc = patmR3RefreshPatch(pVM, pPatch);
6324 if (RT_FAILURE(rc))
6325 {
6326 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6327 }
6328 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6329 rc = VERR_PATCHING_REFUSED;
6330 }
6331 }
6332 return rc;
6333}
6334
6335/**
6336 * Handle trap inside patch code
6337 *
6338 * @returns VBox status code.
6339 * @param pVM Pointer to the VM.
6340 * @param pCtx Pointer to the guest CPU context.
6341 * @param pEip GC pointer of trapping instruction.
6342 * @param ppNewEip GC pointer to new instruction.
6343 */
6344VMMR3_INT_DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6345{
6346 PPATMPATCHREC pPatch = 0;
6347 void *pvPatchCoreOffset;
6348 RTRCUINTPTR offset;
6349 RTRCPTR pNewEip;
6350 int rc ;
6351 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6352 PVMCPU pVCpu = VMMGetCpu0(pVM);
6353
6354 Assert(pVM->cCpus == 1);
6355
6356 pNewEip = 0;
6357 *ppNewEip = 0;
6358
6359 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6360
6361 /* Find the patch record. */
6362 /* Note: there might not be a patch to guest translation record (global function) */
6363 offset = pEip - pVM->patm.s.pPatchMemGC;
6364 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6365 if (pvPatchCoreOffset)
6366 {
6367 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6368
6369 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6370
6371 if (pPatch->patch.uState == PATCH_DIRTY)
6372 {
6373 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6374 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6375 {
6376 /* Function duplication patches set fPIF to 1 on entry */
6377 pVM->patm.s.pGCStateHC->fPIF = 1;
6378 }
6379 }
6380 else
6381 if (pPatch->patch.uState == PATCH_DISABLED)
6382 {
6383 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6384 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6385 {
6386 /* Function duplication patches set fPIF to 1 on entry */
6387 pVM->patm.s.pGCStateHC->fPIF = 1;
6388 }
6389 }
6390 else
6391 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6392 {
6393 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6394
6395 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6396 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6397 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6398 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6399 }
6400
6401 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6402 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6403
6404 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6405 pPatch->patch.cTraps++;
6406 PATM_STAT_FAULT_INC(&pPatch->patch);
6407 }
6408 else
6409 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6410
6411 /* Check if we were interrupted in PATM generated instruction code. */
6412 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6413 {
6414 DISCPUSTATE Cpu;
6415 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6416 AssertRC(rc);
6417
6418 if ( rc == VINF_SUCCESS
6419 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF
6420 || Cpu.pCurInstr->uOpcode == OP_PUSH
6421 || Cpu.pCurInstr->uOpcode == OP_CALL)
6422 )
6423 {
6424 uint64_t fFlags;
6425
6426 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6427
6428 if (Cpu.pCurInstr->uOpcode == OP_PUSH)
6429 {
6430 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6431 if ( rc == VINF_SUCCESS
6432 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6433 {
6434 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6435
6436 /* Reset the PATM stack. */
6437 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6438
6439 pVM->patm.s.pGCStateHC->fPIF = 1;
6440
6441 Log(("Faulting push -> go back to the original instruction\n"));
6442
6443 /* continue at the original instruction */
6444 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6445 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6446 return VINF_SUCCESS;
6447 }
6448 }
6449
6450 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6451 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6452 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6453 if (rc == VINF_SUCCESS)
6454 {
6455 /* The guest page *must* be present. */
6456 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6457 if ( rc == VINF_SUCCESS
6458 && (fFlags & X86_PTE_P))
6459 {
6460 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6461 return VINF_PATCH_CONTINUE;
6462 }
6463 }
6464 }
6465 else
6466 if (pPatch->patch.pPrivInstrGC == pNewEip)
6467 {
6468 /* Invalidated patch or first instruction overwritten.
6469 * We can ignore the fPIF state in this case.
6470 */
6471 /* Reset the PATM stack. */
6472 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6473
6474 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6475
6476 pVM->patm.s.pGCStateHC->fPIF = 1;
6477
6478 /* continue at the original instruction */
6479 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6480 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6481 return VINF_SUCCESS;
6482 }
6483
6484 char szBuf[256];
6485 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6486
6487 /* Very bad. We crashed in emitted code. Probably stack? */
6488 if (pPatch)
6489 {
6490 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6491 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6492 }
6493 else
6494 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6495 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6496 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6497 }
6498
6499 /* From here on, we must have a valid patch to guest translation. */
6500 if (pvPatchCoreOffset == 0)
6501 {
6502 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6503 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6504 return VERR_PATCH_NOT_FOUND;
6505 }
6506
6507 /* Take care of dirty/changed instructions. */
6508 if (pPatchToGuestRec->fDirty)
6509 {
6510 Assert(pPatchToGuestRec->Core.Key == offset);
6511 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6512
6513 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6514 if (RT_SUCCESS(rc))
6515 {
6516 /* Retry the current instruction. */
6517 pNewEip = pEip;
6518 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6519 }
6520 else
6521 {
6522 /* Reset the PATM stack. */
6523 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6524
6525 rc = VINF_SUCCESS; /* Continue at original instruction. */
6526 }
6527
6528 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6529 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6530 return rc;
6531 }
6532
6533#ifdef VBOX_STRICT
6534 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6535 {
6536 DISCPUSTATE cpu;
6537 bool disret;
6538 uint32_t cbInstr;
6539 PATMP2GLOOKUPREC cacheRec;
6540 RT_ZERO(cacheRec);
6541 cacheRec.pPatch = &pPatch->patch;
6542
6543 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6544 &cpu, &cbInstr);
6545 if (cacheRec.Lock.pvMap)
6546 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6547
6548 if (disret && cpu.pCurInstr->uOpcode == OP_RETN)
6549 {
6550 RTRCPTR retaddr;
6551 PCPUMCTX pCtx2;
6552
6553 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6554
6555 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6556 AssertRC(rc);
6557
6558 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6559 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6560 }
6561 }
6562#endif
6563
6564 /* Return original address, correct by subtracting the CS base address. */
6565 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6566
6567 /* Reset the PATM stack. */
6568 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6569
6570 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6571 {
6572 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6573 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6574#ifdef VBOX_STRICT
6575 DISCPUSTATE cpu;
6576 bool disret;
6577 uint32_t cbInstr;
6578 PATMP2GLOOKUPREC cacheRec;
6579 RT_ZERO(cacheRec);
6580 cacheRec.pPatch = &pPatch->patch;
6581
6582 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
6583 &cpu, &cbInstr);
6584 if (cacheRec.Lock.pvMap)
6585 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6586
6587 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
6588 {
6589 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6590 &cpu, &cbInstr);
6591 if (cacheRec.Lock.pvMap)
6592 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6593
6594 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET);
6595 }
6596#endif
6597 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6598 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6599 }
6600
6601 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6602 DBGFR3_DISAS_INSTR_LOG(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
6603 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6604 {
6605 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6606 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6607 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6608 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6609 return VERR_PATCH_DISABLED;
6610 }
6611
6612#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6613 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6614 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6615 {
6616 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6617 //we are only wasting time, back out the patch
6618 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6619 pTrapRec->pNextPatchInstr = 0;
6620 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6621 return VERR_PATCH_DISABLED;
6622 }
6623#endif
6624
6625 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6626 return VINF_SUCCESS;
6627}
6628
6629
6630/**
6631 * Handle page-fault in monitored page
6632 *
6633 * @returns VBox status code.
6634 * @param pVM Pointer to the VM.
6635 */
6636VMMR3_INT_DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6637{
6638 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6639
6640 addr &= PAGE_BASE_GC_MASK;
6641
6642 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6643 AssertRC(rc); NOREF(rc);
6644
6645 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6646 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6647 {
6648 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6649 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6650 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6651 if (rc == VWRN_PATCH_REMOVED)
6652 return VINF_SUCCESS;
6653
6654 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6655
6656 if (addr == pPatchRec->patch.pPrivInstrGC)
6657 addr++;
6658 }
6659
6660 for(;;)
6661 {
6662 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6663
6664 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6665 break;
6666
6667 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6668 {
6669 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6670 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6671 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6672 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6673 }
6674 addr = pPatchRec->patch.pPrivInstrGC + 1;
6675 }
6676
6677 pVM->patm.s.pvFaultMonitor = 0;
6678 return VINF_SUCCESS;
6679}
6680
6681
6682#ifdef VBOX_WITH_STATISTICS
6683
6684static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6685{
6686 if (pPatch->flags & PATMFL_SYSENTER)
6687 {
6688 return "SYSENT";
6689 }
6690 else
6691 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6692 {
6693 static char szTrap[16];
6694 uint32_t iGate;
6695
6696 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6697 if (iGate < 256)
6698 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6699 else
6700 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6701 return szTrap;
6702 }
6703 else
6704 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6705 return "DUPFUNC";
6706 else
6707 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6708 return "FUNCCALL";
6709 else
6710 if (pPatch->flags & PATMFL_TRAMPOLINE)
6711 return "TRAMP";
6712 else
6713 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6714}
6715
6716static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6717{
6718 NOREF(pVM);
6719 switch(pPatch->uState)
6720 {
6721 case PATCH_ENABLED:
6722 return "ENA";
6723 case PATCH_DISABLED:
6724 return "DIS";
6725 case PATCH_DIRTY:
6726 return "DIR";
6727 case PATCH_UNUSABLE:
6728 return "UNU";
6729 case PATCH_REFUSED:
6730 return "REF";
6731 case PATCH_DISABLE_PENDING:
6732 return "DIP";
6733 default:
6734 AssertFailed();
6735 return " ";
6736 }
6737}
6738
6739/**
6740 * Resets the sample.
6741 * @param pVM Pointer to the VM.
6742 * @param pvSample The sample registered using STAMR3RegisterCallback.
6743 */
6744static void patmResetStat(PVM pVM, void *pvSample)
6745{
6746 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6747 Assert(pPatch);
6748
6749 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6750 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6751}
6752
6753/**
6754 * Prints the sample into the buffer.
6755 *
6756 * @param pVM Pointer to the VM.
6757 * @param pvSample The sample registered using STAMR3RegisterCallback.
6758 * @param pszBuf The buffer to print into.
6759 * @param cchBuf The size of the buffer.
6760 */
6761static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6762{
6763 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6764 Assert(pPatch);
6765
6766 Assert(pPatch->uState != PATCH_REFUSED);
6767 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6768
6769 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6770 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6771 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6772}
6773
6774/**
6775 * Returns the GC address of the corresponding patch statistics counter
6776 *
6777 * @returns Stat address
6778 * @param pVM Pointer to the VM.
6779 * @param pPatch Patch structure
6780 */
6781RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6782{
6783 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6784 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6785}
6786
6787#endif /* VBOX_WITH_STATISTICS */
6788#ifdef VBOX_WITH_DEBUGGER
6789
6790/**
6791 * The '.patmoff' command.
6792 *
6793 * @returns VBox status.
6794 * @param pCmd Pointer to the command descriptor (as registered).
6795 * @param pCmdHlp Pointer to command helper functions.
6796 * @param pVM Pointer to the current VM (if any).
6797 * @param paArgs Pointer to (readonly) array of arguments.
6798 * @param cArgs Number of arguments in the array.
6799 */
6800static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6801{
6802 /*
6803 * Validate input.
6804 */
6805 NOREF(cArgs); NOREF(paArgs);
6806 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6807 PVM pVM = pUVM->pVM;
6808 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6809
6810 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6811 PATMR3AllowPatching(pVM->pUVM, false);
6812 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6813}
6814
6815/**
6816 * The '.patmon' command.
6817 *
6818 * @returns VBox status.
6819 * @param pCmd Pointer to the command descriptor (as registered).
6820 * @param pCmdHlp Pointer to command helper functions.
6821 * @param pVM Pointer to the current VM (if any).
6822 * @param paArgs Pointer to (readonly) array of arguments.
6823 * @param cArgs Number of arguments in the array.
6824 */
6825static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6826{
6827 /*
6828 * Validate input.
6829 */
6830 NOREF(cArgs); NOREF(paArgs);
6831 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6832 PVM pVM = pUVM->pVM;
6833 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6834
6835 PATMR3AllowPatching(pVM->pUVM, true);
6836 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6837 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6838}
6839
6840#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette