VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 39333

Last change on this file since 39333 was 39078, checked in by vboxsync, 13 years ago

VMM: -Wunused-parameter

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 253.7 KB
Line 
1/* $Id: PATM.cpp 39078 2011-10-21 14:18:22Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * NOTE: Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/cpumdis.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/trpm.h>
34#include <VBox/vmm/cfgm.h>
35#include <VBox/param.h>
36#include <VBox/vmm/selm.h>
37#include <iprt/avl.h>
38#include "PATMInternal.h"
39#include "PATMPatch.h"
40#include <VBox/vmm/vm.h>
41#include <VBox/vmm/csam.h>
42#include <VBox/dbg.h>
43#include <VBox/err.h>
44#include <VBox/log.h>
45#include <iprt/assert.h>
46#include <iprt/asm.h>
47#include <VBox/dis.h>
48#include <VBox/disopcode.h>
49#include "internal/pgm.h"
50
51#include <iprt/string.h>
52#include "PATMA.h"
53
54//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
55//#define PATM_DISABLE_ALL
56
57/**
58 * Refresh trampoline patch state.
59 */
60typedef struct PATMREFRESHPATCH
61{
62 /** Pointer to the VM structure. */
63 PVM pVM;
64 /** The trampoline patch record. */
65 PPATCHINFO pPatchTrampoline;
66 /** The new patch we want to jump to. */
67 PPATCHINFO pPatchRec;
68} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
69
70
71/*******************************************************************************
72* Internal Functions *
73*******************************************************************************/
74
75static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
76static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
77static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
78
79#ifdef LOG_ENABLED // keep gcc quiet
80static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
81#endif
82#ifdef VBOX_WITH_STATISTICS
83static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
84static void patmResetStat(PVM pVM, void *pvSample);
85static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
86#endif
87
88#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
89#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
90
91static int patmReinit(PVM pVM);
92static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
93
94#ifdef VBOX_WITH_DEBUGGER
95static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
96static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
97static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
98
99/** Command descriptors. */
100static const DBGCCMD g_aCmds[] =
101{
102 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
103 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
104 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
105};
106#endif
107
108/* Don't want to break saved states, so put it here as a global variable. */
109static unsigned int cIDTHandlersDisabled = 0;
110
111/**
112 * Initializes the PATM.
113 *
114 * @returns VBox status code.
115 * @param pVM The VM to operate on.
116 */
117VMMR3DECL(int) PATMR3Init(PVM pVM)
118{
119 int rc;
120
121 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
122
123 /* These values can't change as they are hardcoded in patch code (old saved states!) */
124 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
125 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
126 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
127 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
128
129 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
130 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
131
132 /* Allocate patch memory and GC patch state memory. */
133 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
134 /* Add another page in case the generated code is much larger than expected. */
135 /** @todo bad safety precaution */
136 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
137 if (RT_FAILURE(rc))
138 {
139 Log(("MMHyperAlloc failed with %Rrc\n", rc));
140 return rc;
141 }
142 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
143
144 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
145 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
146 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
147
148 /*
149 * Hypervisor memory for GC status data (read/write)
150 *
151 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
152 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
153 *
154 */
155 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
156 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
157 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
158
159 /* Hypervisor memory for patch statistics */
160 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
161 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
162
163 /* Memory for patch lookup trees. */
164 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
165 AssertRCReturn(rc, rc);
166 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
167
168#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
169 /* Check CFGM option. */
170 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
171 if (RT_FAILURE(rc))
172# ifdef PATM_DISABLE_ALL
173 pVM->fPATMEnabled = false;
174# else
175 pVM->fPATMEnabled = true;
176# endif
177#endif
178
179 rc = patmReinit(pVM);
180 AssertRC(rc);
181 if (RT_FAILURE(rc))
182 return rc;
183
184 /*
185 * Register save and load state notifiers.
186 */
187 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
188 NULL, NULL, NULL,
189 NULL, patmR3Save, NULL,
190 NULL, patmR3Load, NULL);
191 AssertRCReturn(rc, rc);
192
193#ifdef VBOX_WITH_DEBUGGER
194 /*
195 * Debugger commands.
196 */
197 static bool s_fRegisteredCmds = false;
198 if (!s_fRegisteredCmds)
199 {
200 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
201 if (RT_SUCCESS(rc2))
202 s_fRegisteredCmds = true;
203 }
204#endif
205
206#ifdef VBOX_WITH_STATISTICS
207 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
208 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
209 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
210 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
211 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
212 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
213 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
214 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
215
216 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
217 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
218
219 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
220 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
221 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
222
223 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
224 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
225 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
226 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
227 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
228
229 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
230 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
231
232 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
233 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
234
235 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
236 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
237 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
238
239 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
240 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
241 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
242
243 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
244 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
245
246 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
247 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
248 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
249 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
250
251 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
252 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
253
254 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
255 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
256
257 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
258 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
259 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
260
261 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
262 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
263 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
264 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
265
266 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
267 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
268 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
269 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
270 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
271
272 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
273#endif /* VBOX_WITH_STATISTICS */
274
275 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
276 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
277 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
278 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
279 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
280 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
281 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
282 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
283
284 return rc;
285}
286
287/**
288 * Finalizes HMA page attributes.
289 *
290 * @returns VBox status code.
291 * @param pVM The VM handle.
292 */
293VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
294{
295 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
296 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
297 if (RT_FAILURE(rc))
298 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
299
300 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
301 if (RT_FAILURE(rc))
302 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
303
304 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
305 if (RT_FAILURE(rc))
306 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
307
308 return rc;
309}
310
311/**
312 * (Re)initializes PATM
313 *
314 * @param pVM The VM.
315 */
316static int patmReinit(PVM pVM)
317{
318 int rc;
319
320 /*
321 * Assert alignment and sizes.
322 */
323 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
324 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
325
326 /*
327 * Setup any fixed pointers and offsets.
328 */
329 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
330
331#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
332#ifndef PATM_DISABLE_ALL
333 pVM->fPATMEnabled = true;
334#endif
335#endif
336
337 Assert(pVM->patm.s.pGCStateHC);
338 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
339 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
340
341 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
342 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
343
344 Assert(pVM->patm.s.pGCStackHC);
345 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
346 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
347 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
348 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
349
350 Assert(pVM->patm.s.pStatsHC);
351 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
352 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
353
354 Assert(pVM->patm.s.pPatchMemHC);
355 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
356 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
357 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
358
359 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
360 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
361
362 Assert(pVM->patm.s.PatchLookupTreeHC);
363 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
364
365 /*
366 * (Re)Initialize PATM structure
367 */
368 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
369 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
370 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
371 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
372 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
373 pVM->patm.s.pvFaultMonitor = 0;
374 pVM->patm.s.deltaReloc = 0;
375
376 /* Lowest and highest patched instruction */
377 pVM->patm.s.pPatchedInstrGCLowest = ~0;
378 pVM->patm.s.pPatchedInstrGCHighest = 0;
379
380 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
381 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
382 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
383
384 pVM->patm.s.pfnSysEnterPatchGC = 0;
385 pVM->patm.s.pfnSysEnterGC = 0;
386
387 pVM->patm.s.fOutOfMemory = false;
388
389 pVM->patm.s.pfnHelperCallGC = 0;
390
391 /* Generate all global functions to be used by future patches. */
392 /* We generate a fake patch in order to use the existing code for relocation. */
393 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
394 if (RT_FAILURE(rc))
395 {
396 Log(("Out of memory!!!!\n"));
397 return VERR_NO_MEMORY;
398 }
399 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
400 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
401 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
402
403 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
404 AssertRC(rc);
405
406 /* Update free pointer in patch memory. */
407 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
408 /* Round to next 8 byte boundary. */
409 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
410 return rc;
411}
412
413
414/**
415 * Applies relocations to data and code managed by this
416 * component. This function will be called at init and
417 * whenever the VMM need to relocate it self inside the GC.
418 *
419 * The PATM will update the addresses used by the switcher.
420 *
421 * @param pVM The VM.
422 */
423VMMR3DECL(void) PATMR3Relocate(PVM pVM)
424{
425 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
426 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
427
428 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
429 if (delta)
430 {
431 PCPUMCTX pCtx;
432
433 /* Update CPUMCTX guest context pointer. */
434 pVM->patm.s.pCPUMCtxGC += delta;
435
436 pVM->patm.s.deltaReloc = delta;
437
438 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
439
440 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
441
442 /* If we are running patch code right now, then also adjust EIP. */
443 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
444 pCtx->eip += delta;
445
446 pVM->patm.s.pGCStateGC = GCPtrNew;
447 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
448
449 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
450
451 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
452
453 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
454
455 if (pVM->patm.s.pfnSysEnterPatchGC)
456 pVM->patm.s.pfnSysEnterPatchGC += delta;
457
458 /* Deal with the global patch functions. */
459 pVM->patm.s.pfnHelperCallGC += delta;
460 pVM->patm.s.pfnHelperRetGC += delta;
461 pVM->patm.s.pfnHelperIretGC += delta;
462 pVM->patm.s.pfnHelperJumpGC += delta;
463
464 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
465 }
466}
467
468
469/**
470 * Terminates the PATM.
471 *
472 * Termination means cleaning up and freeing all resources,
473 * the VM it self is at this point powered off or suspended.
474 *
475 * @returns VBox status code.
476 * @param pVM The VM to operate on.
477 */
478VMMR3DECL(int) PATMR3Term(PVM pVM)
479{
480 /* Memory was all allocated from the two MM heaps and requires no freeing. */
481 NOREF(pVM);
482 return VINF_SUCCESS;
483}
484
485
486/**
487 * PATM reset callback.
488 *
489 * @returns VBox status code.
490 * @param pVM The VM which is reset.
491 */
492VMMR3DECL(int) PATMR3Reset(PVM pVM)
493{
494 Log(("PATMR3Reset\n"));
495
496 /* Free all patches. */
497 while (true)
498 {
499 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
500 if (pPatchRec)
501 {
502 PATMRemovePatch(pVM, pPatchRec, true);
503 }
504 else
505 break;
506 }
507 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
508 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
509 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
510 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
511
512 int rc = patmReinit(pVM);
513 if (RT_SUCCESS(rc))
514 rc = PATMR3InitFinalize(pVM); /* paranoia */
515
516 return rc;
517}
518
519/**
520 * Read callback for disassembly function; supports reading bytes that cross a page boundary
521 *
522 * @returns VBox status code.
523 * @param pSrc GC source pointer
524 * @param pDest HC destination pointer
525 * @param size Number of bytes to read
526 * @param pvUserdata Callback specific user data (pCpu)
527 *
528 */
529int patmReadBytes(RTUINTPTR pSrc, uint8_t *pDest, unsigned size, void *pvUserdata)
530{
531 DISCPUSTATE *pCpu = (DISCPUSTATE *)pvUserdata;
532 PATMDISASM *pDisInfo = (PATMDISASM *)pCpu->apvUserData[0];
533 int orgsize = size;
534
535 Assert(size);
536 if (size == 0)
537 return VERR_INVALID_PARAMETER;
538
539 /*
540 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
541 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
542 */
543 /** @todo could change in the future! */
544 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
545 {
546 for (int i=0;i<orgsize;i++)
547 {
548 int rc = PATMR3QueryOpcode(pDisInfo->pVM, (RTRCPTR)pSrc, pDest);
549 if (RT_SUCCESS(rc))
550 {
551 pSrc++;
552 pDest++;
553 size--;
554 }
555 else break;
556 }
557 if (size == 0)
558 return VINF_SUCCESS;
559#ifdef VBOX_STRICT
560 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
561 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
562 {
563 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc, NULL) == false);
564 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc+size-1, NULL) == false);
565 }
566#endif
567 }
568
569 if ( !pDisInfo->pInstrHC
570 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(pSrc + size - 1)
571 && !PATMIsPatchGCAddr(pDisInfo->pVM, pSrc)))
572 {
573 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, pSrc));
574 return PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], pDest, pSrc, size);
575 }
576 else
577 {
578 Assert(pDisInfo->pInstrHC);
579
580 uint8_t *pInstrHC = pDisInfo->pInstrHC;
581
582 Assert(pInstrHC);
583
584 /* pInstrHC is the base address; adjust according to the GC pointer. */
585 pInstrHC = pInstrHC + (pSrc - pDisInfo->pInstrGC);
586
587 memcpy(pDest, (void *)pInstrHC, size);
588 }
589
590 return VINF_SUCCESS;
591}
592
593/**
594 * Callback function for RTAvloU32DoWithAll
595 *
596 * Updates all fixups in the patches
597 *
598 * @returns VBox status code.
599 * @param pNode Current node
600 * @param pParam The VM to operate on.
601 */
602static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
603{
604 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
605 PVM pVM = (PVM)pParam;
606 RTRCINTPTR delta;
607#ifdef LOG_ENABLED
608 DISCPUSTATE cpu;
609 char szOutput[256];
610 uint32_t opsize;
611 bool disret;
612#endif
613 int rc;
614
615 /* Nothing to do if the patch is not active. */
616 if (pPatch->patch.uState == PATCH_REFUSED)
617 return 0;
618
619#ifdef LOG_ENABLED
620 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
621 {
622 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
623 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
624 Log(("Org patch jump: %s", szOutput));
625 }
626#endif
627
628 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
629 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
630
631 /*
632 * Apply fixups
633 */
634 PRELOCREC pRec = 0;
635 AVLPVKEY key = 0;
636
637 while (true)
638 {
639 /* Get the record that's closest from above */
640 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
641 if (pRec == 0)
642 break;
643
644 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
645
646 switch (pRec->uType)
647 {
648 case FIXUP_ABSOLUTE:
649 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
650 if ( !pRec->pSource
651 || PATMIsPatchGCAddr(pVM, pRec->pSource))
652 {
653 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
654 }
655 else
656 {
657 uint8_t curInstr[15];
658 uint8_t oldInstr[15];
659 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
660
661 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
662
663 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
664 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
665
666 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
667 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
668
669 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
670
671 if ( rc == VERR_PAGE_NOT_PRESENT
672 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
673 {
674 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
675
676 Log(("PATM: Patch page not present -> check later!\n"));
677 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
678 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
679 }
680 else
681 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
682 {
683 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
684 /*
685 * Disable patch; this is not a good solution
686 */
687 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
688 pPatch->patch.uState = PATCH_DISABLED;
689 }
690 else
691 if (RT_SUCCESS(rc))
692 {
693 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
694 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
695 AssertRC(rc);
696 }
697 }
698 break;
699
700 case FIXUP_REL_JMPTOPATCH:
701 {
702 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
703
704 if ( pPatch->patch.uState == PATCH_ENABLED
705 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
706 {
707 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
708 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
709 RTRCPTR pJumpOffGC;
710 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
711 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
712
713#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
714 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
715#else
716 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
717#endif
718
719 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
720#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
721 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
722 {
723 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
724
725 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
726 oldJump[0] = pPatch->patch.aPrivInstr[0];
727 oldJump[1] = pPatch->patch.aPrivInstr[1];
728 *(RTRCUINTPTR *)&oldJump[2] = displOld;
729 }
730 else
731#endif
732 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
733 {
734 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
735 oldJump[0] = 0xE9;
736 *(RTRCUINTPTR *)&oldJump[1] = displOld;
737 }
738 else
739 {
740 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
741 continue; //this should never happen!!
742 }
743 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
744
745 /*
746 * Read old patch jump and compare it to the one we previously installed
747 */
748 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
749 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
750
751 if ( rc == VERR_PAGE_NOT_PRESENT
752 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
753 {
754 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
755
756 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
757 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
758 }
759 else
760 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
761 {
762 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
763 /*
764 * Disable patch; this is not a good solution
765 */
766 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
767 pPatch->patch.uState = PATCH_DISABLED;
768 }
769 else
770 if (RT_SUCCESS(rc))
771 {
772 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
773 AssertRC(rc);
774 }
775 else
776 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
777 }
778 else
779 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
780
781 pRec->pDest = pTarget;
782 break;
783 }
784
785 case FIXUP_REL_JMPTOGUEST:
786 {
787 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
788 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
789
790 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
791 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
792 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
793 pRec->pSource = pSource;
794 break;
795 }
796
797 default:
798 AssertMsg(0, ("Invalid fixup type!!\n"));
799 return VERR_INVALID_PARAMETER;
800 }
801 }
802
803#ifdef LOG_ENABLED
804 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
805 {
806 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
807 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
808 Log(("Rel patch jump: %s", szOutput));
809 }
810#endif
811 return 0;
812}
813
814/**
815 * \#PF Handler callback for virtual access handler ranges.
816 *
817 * Important to realize that a physical page in a range can have aliases, and
818 * for ALL and WRITE handlers these will also trigger.
819 *
820 * @returns VINF_SUCCESS if the handler have carried out the operation.
821 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
822 * @param pVM VM Handle.
823 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
824 * @param pvPtr The HC mapping of that address.
825 * @param pvBuf What the guest is reading/writing.
826 * @param cbBuf How much it's reading/writing.
827 * @param enmAccessType The access type.
828 * @param pvUser User argument.
829 */
830DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
831 PGMACCESSTYPE enmAccessType, void *pvUser)
832{
833 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
834 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(pvUser);
835
836 /** @todo could be the wrong virtual address (alias) */
837 pVM->patm.s.pvFaultMonitor = GCPtr;
838 PATMR3HandleMonitoredPage(pVM);
839 return VINF_PGM_HANDLER_DO_DEFAULT;
840}
841
842
843#ifdef VBOX_WITH_DEBUGGER
844/**
845 * Callback function for RTAvloU32DoWithAll
846 *
847 * Enables the patch that's being enumerated
848 *
849 * @returns 0 (continue enumeration).
850 * @param pNode Current node
851 * @param pVM The VM to operate on.
852 */
853static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
854{
855 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
856
857 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
858 return 0;
859}
860#endif /* VBOX_WITH_DEBUGGER */
861
862
863#ifdef VBOX_WITH_DEBUGGER
864/**
865 * Callback function for RTAvloU32DoWithAll
866 *
867 * Disables the patch that's being enumerated
868 *
869 * @returns 0 (continue enumeration).
870 * @param pNode Current node
871 * @param pVM The VM to operate on.
872 */
873static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
874{
875 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
876
877 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
878 return 0;
879}
880#endif
881
882/**
883 * Returns the host context pointer and size of the patch memory block
884 *
885 * @returns VBox status code.
886 * @param pVM The VM to operate on.
887 * @param pcb Size of the patch memory block
888 */
889VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
890{
891 if (pcb)
892 *pcb = pVM->patm.s.cbPatchMem;
893
894 return pVM->patm.s.pPatchMemHC;
895}
896
897
898/**
899 * Returns the guest context pointer and size of the patch memory block
900 *
901 * @returns VBox status code.
902 * @param pVM The VM to operate on.
903 * @param pcb Size of the patch memory block
904 */
905VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
906{
907 if (pcb)
908 *pcb = pVM->patm.s.cbPatchMem;
909
910 return pVM->patm.s.pPatchMemGC;
911}
912
913
914/**
915 * Returns the host context pointer of the GC context structure
916 *
917 * @returns VBox status code.
918 * @param pVM The VM to operate on.
919 */
920VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
921{
922 return pVM->patm.s.pGCStateHC;
923}
924
925
926/**
927 * Checks whether the HC address is part of our patch region
928 *
929 * @returns VBox status code.
930 * @param pVM The VM to operate on.
931 * @param pAddrGC Guest context address
932 */
933VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
934{
935 return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
936}
937
938
939/**
940 * Allows or disallow patching of privileged instructions executed by the guest OS
941 *
942 * @returns VBox status code.
943 * @param pVM The VM to operate on.
944 * @param fAllowPatching Allow/disallow patching
945 */
946VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
947{
948 pVM->fPATMEnabled = (fAllowPatching) ? true : false;
949 return VINF_SUCCESS;
950}
951
952/**
953 * Convert a GC patch block pointer to a HC patch pointer
954 *
955 * @returns HC pointer or NULL if it's not a GC patch pointer
956 * @param pVM The VM to operate on.
957 * @param pAddrGC GC pointer
958 */
959VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
960{
961 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
962 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
963 else
964 return NULL;
965}
966
967/**
968 * Query PATM state (enabled/disabled)
969 *
970 * @returns 0 - disabled, 1 - enabled
971 * @param pVM The VM to operate on.
972 */
973VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
974{
975 return pVM->fPATMEnabled;
976}
977
978
979/**
980 * Convert guest context address to host context pointer
981 *
982 * @returns VBox status code.
983 * @param pVM The VM to operate on.
984 * @param pCacheRec Address conversion cache record
985 * @param pGCPtr Guest context pointer
986 *
987 * @returns Host context pointer or NULL in case of an error
988 *
989 */
990R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
991{
992 int rc;
993 R3PTRTYPE(uint8_t *) pHCPtr;
994 uint32_t offset;
995
996 if (PATMIsPatchGCAddr(pVM, pGCPtr))
997 {
998 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
999 Assert(pPatch);
1000 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
1001 }
1002
1003 offset = pGCPtr & PAGE_OFFSET_MASK;
1004 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1005 return pCacheRec->pPageLocStartHC + offset;
1006
1007 /* Release previous lock if any. */
1008 if (pCacheRec->Lock.pvMap)
1009 {
1010 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1011 pCacheRec->Lock.pvMap = NULL;
1012 }
1013
1014 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1015 if (rc != VINF_SUCCESS)
1016 {
1017 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1018 return NULL;
1019 }
1020 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1021 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1022 return pHCPtr;
1023}
1024
1025
1026/* Calculates and fills in all branch targets
1027 *
1028 * @returns VBox status code.
1029 * @param pVM The VM to operate on.
1030 * @param pPatch Current patch block pointer
1031 *
1032 */
1033static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1034{
1035 int32_t displ;
1036
1037 PJUMPREC pRec = 0;
1038 unsigned nrJumpRecs = 0;
1039
1040 /*
1041 * Set all branch targets inside the patch block.
1042 * We remove all jump records as they are no longer needed afterwards.
1043 */
1044 while (true)
1045 {
1046 RCPTRTYPE(uint8_t *) pInstrGC;
1047 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1048
1049 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1050 if (pRec == 0)
1051 break;
1052
1053 nrJumpRecs++;
1054
1055 /* HC in patch block to GC in patch block. */
1056 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1057
1058 if (pRec->opcode == OP_CALL)
1059 {
1060 /* Special case: call function replacement patch from this patch block.
1061 */
1062 PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
1063 if (!pFunctionRec)
1064 {
1065 int rc;
1066
1067 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1068 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1069 else
1070 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1071
1072 if (RT_FAILURE(rc))
1073 {
1074 uint8_t *pPatchHC;
1075 RTRCPTR pPatchGC;
1076 RTRCPTR pOrgInstrGC;
1077
1078 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1079 Assert(pOrgInstrGC);
1080
1081 /* Failure for some reason -> mark exit point with int 3. */
1082 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1083
1084 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1085 Assert(pPatchGC);
1086
1087 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1088
1089 /* Set a breakpoint at the very beginning of the recompiled instruction */
1090 *pPatchHC = 0xCC;
1091
1092 continue;
1093 }
1094 }
1095 else
1096 {
1097 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1098 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1099 }
1100
1101 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1102 }
1103 else
1104 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1105
1106 if (pBranchTargetGC == 0)
1107 {
1108 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1109 return VERR_PATCHING_REFUSED;
1110 }
1111 /* Our jumps *always* have a dword displacement (to make things easier). */
1112 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1113 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1114 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1115 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1116 }
1117 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1118 Assert(pPatch->JumpTree == 0);
1119 return VINF_SUCCESS;
1120}
1121
1122/* Add an illegal instruction record
1123 *
1124 * @param pVM The VM to operate on.
1125 * @param pPatch Patch structure ptr
1126 * @param pInstrGC Guest context pointer to privileged instruction
1127 *
1128 */
1129static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1130{
1131 PAVLPVNODECORE pRec;
1132
1133 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1134 Assert(pRec);
1135 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1136
1137 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1138 Assert(ret); NOREF(ret);
1139 pPatch->pTempInfo->nrIllegalInstr++;
1140}
1141
1142static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1143{
1144 PAVLPVNODECORE pRec;
1145
1146 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1147 if (pRec)
1148 return true;
1149 else
1150 return false;
1151}
1152
1153/**
1154 * Add a patch to guest lookup record
1155 *
1156 * @param pVM The VM to operate on.
1157 * @param pPatch Patch structure ptr
1158 * @param pPatchInstrHC Guest context pointer to patch block
1159 * @param pInstrGC Guest context pointer to privileged instruction
1160 * @param enmType Lookup type
1161 * @param fDirty Dirty flag
1162 *
1163 */
1164 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1165void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1166{
1167 bool ret;
1168 PRECPATCHTOGUEST pPatchToGuestRec;
1169 PRECGUESTTOPATCH pGuestToPatchRec;
1170 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1171
1172 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1173 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1174
1175 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1176 {
1177 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1178 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1179 return; /* already there */
1180
1181 Assert(!pPatchToGuestRec);
1182 }
1183#ifdef VBOX_STRICT
1184 else
1185 {
1186 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1187 Assert(!pPatchToGuestRec);
1188 }
1189#endif
1190
1191 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1192 Assert(pPatchToGuestRec);
1193 pPatchToGuestRec->Core.Key = PatchOffset;
1194 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1195 pPatchToGuestRec->enmType = enmType;
1196 pPatchToGuestRec->fDirty = fDirty;
1197
1198 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1199 Assert(ret);
1200
1201 /* GC to patch address */
1202 if (enmType == PATM_LOOKUP_BOTHDIR)
1203 {
1204 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1205 if (!pGuestToPatchRec)
1206 {
1207 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1208 pGuestToPatchRec->Core.Key = pInstrGC;
1209 pGuestToPatchRec->PatchOffset = PatchOffset;
1210
1211 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1212 Assert(ret);
1213 }
1214 }
1215
1216 pPatch->nrPatch2GuestRecs++;
1217}
1218
1219
1220/**
1221 * Removes a patch to guest lookup record
1222 *
1223 * @param pVM The VM to operate on.
1224 * @param pPatch Patch structure ptr
1225 * @param pPatchInstrGC Guest context pointer to patch block
1226 */
1227void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1228{
1229 PAVLU32NODECORE pNode;
1230 PAVLU32NODECORE pNode2;
1231 PRECPATCHTOGUEST pPatchToGuestRec;
1232 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1233
1234 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1235 Assert(pPatchToGuestRec);
1236 if (pPatchToGuestRec)
1237 {
1238 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1239 {
1240 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1241
1242 Assert(pGuestToPatchRec->Core.Key);
1243 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1244 Assert(pNode2);
1245 }
1246 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1247 Assert(pNode);
1248
1249 MMR3HeapFree(pPatchToGuestRec);
1250 pPatch->nrPatch2GuestRecs--;
1251 }
1252}
1253
1254
1255/**
1256 * RTAvlPVDestroy callback.
1257 */
1258static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1259{
1260 MMR3HeapFree(pNode);
1261 return 0;
1262}
1263
1264/**
1265 * Empty the specified tree (PV tree, MMR3 heap)
1266 *
1267 * @param pVM The VM to operate on.
1268 * @param ppTree Tree to empty
1269 */
1270void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1271{
1272 NOREF(pVM);
1273 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1274}
1275
1276
1277/**
1278 * RTAvlU32Destroy callback.
1279 */
1280static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1281{
1282 MMR3HeapFree(pNode);
1283 return 0;
1284}
1285
1286/**
1287 * Empty the specified tree (U32 tree, MMR3 heap)
1288 *
1289 * @param pVM The VM to operate on.
1290 * @param ppTree Tree to empty
1291 */
1292void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1293{
1294 NOREF(pVM);
1295 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1296}
1297
1298
1299/**
1300 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1301 *
1302 * @returns VBox status code.
1303 * @param pVM The VM to operate on.
1304 * @param pCpu CPU disassembly state
1305 * @param pInstrGC Guest context pointer to privileged instruction
1306 * @param pCurInstrGC Guest context pointer to the current instruction
1307 * @param pCacheRec Cache record ptr
1308 *
1309 */
1310static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1311{
1312 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1313 bool fIllegalInstr = false;
1314
1315 /*
1316 * Preliminary heuristics:
1317 *- no call instructions without a fixed displacement between cli and sti/popf
1318 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1319 *- no nested pushf/cli
1320 *- sti/popf should be the (eventual) target of all branches
1321 *- no near or far returns; no int xx, no into
1322 *
1323 * Note: Later on we can impose less stricter guidelines if the need arises
1324 */
1325
1326 /* Bail out if the patch gets too big. */
1327 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1328 {
1329 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1330 fIllegalInstr = true;
1331 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1332 }
1333 else
1334 {
1335 /* No unconditional jumps or calls without fixed displacements. */
1336 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1337 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1338 )
1339 {
1340 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1341 if ( pCpu->param1.size == 6 /* far call/jmp */
1342 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1343 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1344 )
1345 {
1346 fIllegalInstr = true;
1347 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1348 }
1349 }
1350
1351 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1352 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->opcode == OP_JMP)
1353 {
1354 if ( pCurInstrGC > pPatch->pPrivInstrGC
1355 && pCurInstrGC + pCpu->opsize < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1356 {
1357 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1358 /* We turn this one into a int 3 callable patch. */
1359 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1360 }
1361 }
1362 else
1363 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1364 if (pPatch->opcode == OP_PUSHF)
1365 {
1366 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->opcode == OP_PUSHF)
1367 {
1368 fIllegalInstr = true;
1369 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1370 }
1371 }
1372
1373 /* no far returns */
1374 if (pCpu->pCurInstr->opcode == OP_RETF)
1375 {
1376 pPatch->pTempInfo->nrRetInstr++;
1377 fIllegalInstr = true;
1378 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1379 }
1380 else if ( pCpu->pCurInstr->opcode == OP_INT3
1381 || pCpu->pCurInstr->opcode == OP_INT
1382 || pCpu->pCurInstr->opcode == OP_INTO)
1383 {
1384 /* No int xx or into either. */
1385 fIllegalInstr = true;
1386 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1387 }
1388 }
1389
1390 pPatch->cbPatchBlockSize += pCpu->opsize;
1391
1392 /* Illegal instruction -> end of analysis phase for this code block */
1393 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1394 return VINF_SUCCESS;
1395
1396 /* Check for exit points. */
1397 switch (pCpu->pCurInstr->opcode)
1398 {
1399 case OP_SYSEXIT:
1400 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1401
1402 case OP_SYSENTER:
1403 case OP_ILLUD2:
1404 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1405 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1406 return VINF_SUCCESS;
1407
1408 case OP_STI:
1409 case OP_POPF:
1410 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1411 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1412 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1413 {
1414 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1415 return VERR_PATCHING_REFUSED;
1416 }
1417 if (pPatch->opcode == OP_PUSHF)
1418 {
1419 if (pCpu->pCurInstr->opcode == OP_POPF)
1420 {
1421 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1422 return VINF_SUCCESS;
1423
1424 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1425 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1426 pPatch->flags |= PATMFL_CHECK_SIZE;
1427 }
1428 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1429 }
1430 /* else: fall through. */
1431 case OP_RETN: /* exit point for function replacement */
1432 return VINF_SUCCESS;
1433
1434 case OP_IRET:
1435 return VINF_SUCCESS; /* exitpoint */
1436
1437 case OP_CPUID:
1438 case OP_CALL:
1439 case OP_JMP:
1440 break;
1441
1442 default:
1443 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1444 {
1445 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1446 return VINF_SUCCESS; /* exit point */
1447 }
1448 break;
1449 }
1450
1451 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1452 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW))
1453 {
1454 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1455 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->opsize));
1456 return VINF_SUCCESS;
1457 }
1458
1459 return VWRN_CONTINUE_ANALYSIS;
1460}
1461
1462/**
1463 * Analyses the instructions inside a function for compliance
1464 *
1465 * @returns VBox status code.
1466 * @param pVM The VM to operate on.
1467 * @param pCpu CPU disassembly state
1468 * @param pInstrGC Guest context pointer to privileged instruction
1469 * @param pCurInstrGC Guest context pointer to the current instruction
1470 * @param pCacheRec Cache record ptr
1471 *
1472 */
1473static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1474{
1475 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1476 bool fIllegalInstr = false;
1477 NOREF(pInstrGC);
1478
1479 //Preliminary heuristics:
1480 //- no call instructions
1481 //- ret ends a block
1482
1483 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1484
1485 // bail out if the patch gets too big
1486 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1487 {
1488 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1489 fIllegalInstr = true;
1490 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1491 }
1492 else
1493 {
1494 // no unconditional jumps or calls without fixed displacements
1495 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1496 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1497 )
1498 {
1499 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1500 if ( pCpu->param1.size == 6 /* far call/jmp */
1501 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1502 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1503 )
1504 {
1505 fIllegalInstr = true;
1506 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1507 }
1508 }
1509 else /* no far returns */
1510 if (pCpu->pCurInstr->opcode == OP_RETF)
1511 {
1512 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1513 fIllegalInstr = true;
1514 }
1515 else /* no int xx or into either */
1516 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1517 {
1518 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1519 fIllegalInstr = true;
1520 }
1521
1522 #if 0
1523 ///@todo we can handle certain in/out and privileged instructions in the guest context
1524 if (pCpu->pCurInstr->optype & OPTYPE_PRIVILEGED && pCpu->pCurInstr->opcode != OP_STI)
1525 {
1526 Log(("Illegal instructions for function patch!!\n"));
1527 return VERR_PATCHING_REFUSED;
1528 }
1529 #endif
1530 }
1531
1532 pPatch->cbPatchBlockSize += pCpu->opsize;
1533
1534 /* Illegal instruction -> end of analysis phase for this code block */
1535 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1536 {
1537 return VINF_SUCCESS;
1538 }
1539
1540 // Check for exit points
1541 switch (pCpu->pCurInstr->opcode)
1542 {
1543 case OP_ILLUD2:
1544 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1545 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1546 return VINF_SUCCESS;
1547
1548 case OP_IRET:
1549 case OP_SYSEXIT: /* will fault or emulated in GC */
1550 case OP_RETN:
1551 return VINF_SUCCESS;
1552
1553 case OP_POPF:
1554 case OP_STI:
1555 return VWRN_CONTINUE_ANALYSIS;
1556 default:
1557 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1558 {
1559 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1560 return VINF_SUCCESS; /* exit point */
1561 }
1562 return VWRN_CONTINUE_ANALYSIS;
1563 }
1564
1565 return VWRN_CONTINUE_ANALYSIS;
1566}
1567
1568/**
1569 * Recompiles the instructions in a code block
1570 *
1571 * @returns VBox status code.
1572 * @param pVM The VM to operate on.
1573 * @param pCpu CPU disassembly state
1574 * @param pInstrGC Guest context pointer to privileged instruction
1575 * @param pCurInstrGC Guest context pointer to the current instruction
1576 * @param pCacheRec Cache record ptr
1577 *
1578 */
1579static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1580{
1581 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1582 int rc = VINF_SUCCESS;
1583 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1584
1585 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1586
1587 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1588 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1589 {
1590 /*
1591 * Been there, done that; so insert a jump (we don't want to duplicate code)
1592 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1593 */
1594 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1595 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->prefix & PREFIX_OPSIZE));
1596 }
1597
1598 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1599 {
1600 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1601 }
1602 else
1603 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1604
1605 if (RT_FAILURE(rc))
1606 return rc;
1607
1608 /* Note: Never do a direct return unless a failure is encountered! */
1609
1610 /* Clear recompilation of next instruction flag; we are doing that right here. */
1611 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1612 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1613
1614 /* Add lookup record for patch to guest address translation */
1615 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1616
1617 /* Update lowest and highest instruction address for this patch */
1618 if (pCurInstrGC < pPatch->pInstrGCLowest)
1619 pPatch->pInstrGCLowest = pCurInstrGC;
1620 else
1621 if (pCurInstrGC > pPatch->pInstrGCHighest)
1622 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->opsize;
1623
1624 /* Illegal instruction -> end of recompile phase for this code block. */
1625 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1626 {
1627 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1628 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1629 goto end;
1630 }
1631
1632 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1633 * Indirect calls are handled below.
1634 */
1635 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1636 && (pCpu->pCurInstr->opcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1637 && (OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J))
1638 {
1639 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1640 if (pTargetGC == 0)
1641 {
1642 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
1643 return VERR_PATCHING_REFUSED;
1644 }
1645
1646 if (pCpu->pCurInstr->opcode == OP_CALL)
1647 {
1648 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1649 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1650 if (RT_FAILURE(rc))
1651 goto end;
1652 }
1653 else
1654 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->opcode, !!(pCpu->prefix & PREFIX_OPSIZE));
1655
1656 if (RT_SUCCESS(rc))
1657 rc = VWRN_CONTINUE_RECOMPILE;
1658
1659 goto end;
1660 }
1661
1662 switch (pCpu->pCurInstr->opcode)
1663 {
1664 case OP_CLI:
1665 {
1666 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1667 * until we've found the proper exit point(s).
1668 */
1669 if ( pCurInstrGC != pInstrGC
1670 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1671 )
1672 {
1673 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1674 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1675 }
1676 /* Set by irq inhibition; no longer valid now. */
1677 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1678
1679 rc = patmPatchGenCli(pVM, pPatch);
1680 if (RT_SUCCESS(rc))
1681 rc = VWRN_CONTINUE_RECOMPILE;
1682 break;
1683 }
1684
1685 case OP_MOV:
1686 if (pCpu->pCurInstr->optype & OPTYPE_POTENTIALLY_DANGEROUS)
1687 {
1688 /* mov ss, src? */
1689 if ( (pCpu->param1.flags & USE_REG_SEG)
1690 && (pCpu->param1.base.reg_seg == DIS_SELREG_SS))
1691 {
1692 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1693 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1694 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1695 }
1696#if 0 /* necessary for Haiku */
1697 else
1698 if ( (pCpu->param2.flags & USE_REG_SEG)
1699 && (pCpu->param2.base.reg_seg == USE_REG_SS)
1700 && (pCpu->param1.flags & (USE_REG_GEN32|USE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1701 {
1702 /* mov GPR, ss */
1703 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1704 if (RT_SUCCESS(rc))
1705 rc = VWRN_CONTINUE_RECOMPILE;
1706 break;
1707 }
1708#endif
1709 }
1710 goto duplicate_instr;
1711
1712 case OP_POP:
1713 if (pCpu->pCurInstr->param1 == OP_PARM_REG_SS)
1714 {
1715 Assert(pCpu->pCurInstr->optype & OPTYPE_INHIBIT_IRQS);
1716
1717 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1718 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1719 }
1720 goto duplicate_instr;
1721
1722 case OP_STI:
1723 {
1724 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1725
1726 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1727 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1728 {
1729 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1730 fInhibitIRQInstr = true;
1731 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1732 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1733 }
1734 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1735
1736 if (RT_SUCCESS(rc))
1737 {
1738 DISCPUSTATE cpu = *pCpu;
1739 unsigned opsize;
1740 int disret;
1741 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1742
1743 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1744
1745 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1746 { /* Force pNextInstrHC out of scope after using it */
1747 uint8_t *pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1748 if (pNextInstrHC == NULL)
1749 {
1750 AssertFailed();
1751 return VERR_PATCHING_REFUSED;
1752 }
1753
1754 // Disassemble the next instruction
1755 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pNextInstrGC, pNextInstrHC, &opsize, NULL);
1756 }
1757 if (disret == false)
1758 {
1759 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1760 return VERR_PATCHING_REFUSED;
1761 }
1762 pReturnInstrGC = pNextInstrGC + opsize;
1763
1764 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1765 || pReturnInstrGC <= pInstrGC
1766 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1767 )
1768 {
1769 /* Not an exit point for function duplication patches */
1770 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1771 && RT_SUCCESS(rc))
1772 {
1773 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1774 rc = VWRN_CONTINUE_RECOMPILE;
1775 }
1776 else
1777 rc = VINF_SUCCESS; //exit point
1778 }
1779 else {
1780 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1781 rc = VERR_PATCHING_REFUSED; //not allowed!!
1782 }
1783 }
1784 break;
1785 }
1786
1787 case OP_POPF:
1788 {
1789 bool fGenerateJmpBack = (pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32);
1790
1791 /* Not an exit point for IDT handler or function replacement patches */
1792 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1793 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1794 fGenerateJmpBack = false;
1795
1796 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->opsize, !!(pCpu->prefix & PREFIX_OPSIZE), fGenerateJmpBack);
1797 if (RT_SUCCESS(rc))
1798 {
1799 if (fGenerateJmpBack == false)
1800 {
1801 /* Not an exit point for IDT handler or function replacement patches */
1802 rc = VWRN_CONTINUE_RECOMPILE;
1803 }
1804 else
1805 {
1806 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1807 rc = VINF_SUCCESS; /* exit point! */
1808 }
1809 }
1810 break;
1811 }
1812
1813 case OP_PUSHF:
1814 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->prefix & PREFIX_OPSIZE));
1815 if (RT_SUCCESS(rc))
1816 rc = VWRN_CONTINUE_RECOMPILE;
1817 break;
1818
1819 case OP_PUSH:
1820 if (pCpu->pCurInstr->param1 == OP_PARM_REG_CS)
1821 {
1822 rc = patmPatchGenPushCS(pVM, pPatch);
1823 if (RT_SUCCESS(rc))
1824 rc = VWRN_CONTINUE_RECOMPILE;
1825 break;
1826 }
1827 goto duplicate_instr;
1828
1829 case OP_IRET:
1830 Log(("IRET at %RRv\n", pCurInstrGC));
1831 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->prefix & PREFIX_OPSIZE));
1832 if (RT_SUCCESS(rc))
1833 {
1834 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1835 rc = VINF_SUCCESS; /* exit point by definition */
1836 }
1837 break;
1838
1839 case OP_ILLUD2:
1840 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1841 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1842 if (RT_SUCCESS(rc))
1843 rc = VINF_SUCCESS; /* exit point by definition */
1844 Log(("Illegal opcode (0xf 0xb)\n"));
1845 break;
1846
1847 case OP_CPUID:
1848 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1849 if (RT_SUCCESS(rc))
1850 rc = VWRN_CONTINUE_RECOMPILE;
1851 break;
1852
1853 case OP_STR:
1854 case OP_SLDT:
1855 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1856 if (RT_SUCCESS(rc))
1857 rc = VWRN_CONTINUE_RECOMPILE;
1858 break;
1859
1860 case OP_SGDT:
1861 case OP_SIDT:
1862 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1863 if (RT_SUCCESS(rc))
1864 rc = VWRN_CONTINUE_RECOMPILE;
1865 break;
1866
1867 case OP_RETN:
1868 /* retn is an exit point for function patches */
1869 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1870 if (RT_SUCCESS(rc))
1871 rc = VINF_SUCCESS; /* exit point by definition */
1872 break;
1873
1874 case OP_SYSEXIT:
1875 /* Duplicate it, so it can be emulated in GC (or fault). */
1876 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1877 if (RT_SUCCESS(rc))
1878 rc = VINF_SUCCESS; /* exit point by definition */
1879 break;
1880
1881 case OP_CALL:
1882 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1883 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1884 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1885 */
1886 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1887 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far calls! */)
1888 {
1889 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
1890 if (RT_SUCCESS(rc))
1891 {
1892 rc = VWRN_CONTINUE_RECOMPILE;
1893 }
1894 break;
1895 }
1896 goto gen_illegal_instr;
1897
1898 case OP_JMP:
1899 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1900 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1901 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1902 */
1903 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1904 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far jumps! */)
1905 {
1906 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
1907 if (RT_SUCCESS(rc))
1908 rc = VINF_SUCCESS; /* end of branch */
1909 break;
1910 }
1911 goto gen_illegal_instr;
1912
1913 case OP_INT3:
1914 case OP_INT:
1915 case OP_INTO:
1916 goto gen_illegal_instr;
1917
1918 case OP_MOV_DR:
1919 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1920 if (pCpu->pCurInstr->param2 == OP_PARM_Dd)
1921 {
1922 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
1923 if (RT_SUCCESS(rc))
1924 rc = VWRN_CONTINUE_RECOMPILE;
1925 break;
1926 }
1927 goto duplicate_instr;
1928
1929 case OP_MOV_CR:
1930 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1931 if (pCpu->pCurInstr->param2 == OP_PARM_Cd)
1932 {
1933 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
1934 if (RT_SUCCESS(rc))
1935 rc = VWRN_CONTINUE_RECOMPILE;
1936 break;
1937 }
1938 goto duplicate_instr;
1939
1940 default:
1941 if (pCpu->pCurInstr->optype & (OPTYPE_CONTROLFLOW | OPTYPE_PRIVILEGED_NOTRAP))
1942 {
1943gen_illegal_instr:
1944 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1945 if (RT_SUCCESS(rc))
1946 rc = VINF_SUCCESS; /* exit point by definition */
1947 }
1948 else
1949 {
1950duplicate_instr:
1951 Log(("patmPatchGenDuplicate\n"));
1952 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1953 if (RT_SUCCESS(rc))
1954 rc = VWRN_CONTINUE_RECOMPILE;
1955 }
1956 break;
1957 }
1958
1959end:
1960
1961 if ( !fInhibitIRQInstr
1962 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
1963 {
1964 int rc2;
1965 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1966
1967 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
1968 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
1969 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
1970 {
1971 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
1972
1973 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
1974 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1975 rc = VINF_SUCCESS; /* end of the line */
1976 }
1977 else
1978 {
1979 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
1980 }
1981 if (RT_FAILURE(rc2))
1982 rc = rc2;
1983 }
1984
1985 if (RT_SUCCESS(rc))
1986 {
1987 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1988 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
1989 && pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32
1990 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
1991 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
1992 )
1993 {
1994 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1995
1996 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1997 Log(("patmRecompileCallback: end found for single instruction patch at %RRv opsize %d\n", pNextInstrGC, pCpu->opsize));
1998
1999 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2000 AssertRC(rc);
2001 }
2002 }
2003 return rc;
2004}
2005
2006
2007#ifdef LOG_ENABLED
2008
2009/* Add a disasm jump record (temporary for prevent duplicate analysis)
2010 *
2011 * @param pVM The VM to operate on.
2012 * @param pPatch Patch structure ptr
2013 * @param pInstrGC Guest context pointer to privileged instruction
2014 *
2015 */
2016static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2017{
2018 PAVLPVNODECORE pRec;
2019
2020 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2021 Assert(pRec);
2022 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2023
2024 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2025 Assert(ret);
2026}
2027
2028/**
2029 * Checks if jump target has been analysed before.
2030 *
2031 * @returns VBox status code.
2032 * @param pPatch Patch struct
2033 * @param pInstrGC Jump target
2034 *
2035 */
2036static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2037{
2038 PAVLPVNODECORE pRec;
2039
2040 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2041 if (pRec)
2042 return true;
2043 return false;
2044}
2045
2046/**
2047 * For proper disassembly of the final patch block
2048 *
2049 * @returns VBox status code.
2050 * @param pVM The VM to operate on.
2051 * @param pCpu CPU disassembly state
2052 * @param pInstrGC Guest context pointer to privileged instruction
2053 * @param pCurInstrGC Guest context pointer to the current instruction
2054 * @param pCacheRec Cache record ptr
2055 *
2056 */
2057int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2058{
2059 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2060 NOREF(pInstrGC);
2061
2062 if (pCpu->pCurInstr->opcode == OP_INT3)
2063 {
2064 /* Could be an int3 inserted in a call patch. Check to be sure */
2065 DISCPUSTATE cpu;
2066 RTRCPTR pOrgJumpGC;
2067 uint32_t dummy;
2068
2069 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2070 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2071
2072 { /* Force pOrgJumpHC out of scope after using it */
2073 uint8_t *pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2074
2075 bool disret = PATMR3DISInstr(pVM, pPatch, &cpu, pOrgJumpGC, pOrgJumpHC, &dummy, NULL);
2076 if (!disret || cpu.pCurInstr->opcode != OP_CALL || cpu.param1.size != 4 /* only near calls */)
2077 return VINF_SUCCESS;
2078 }
2079 return VWRN_CONTINUE_ANALYSIS;
2080 }
2081
2082 if ( pCpu->pCurInstr->opcode == OP_ILLUD2
2083 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2084 {
2085 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2086 return VWRN_CONTINUE_ANALYSIS;
2087 }
2088
2089 if ( (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2090 || pCpu->pCurInstr->opcode == OP_INT
2091 || pCpu->pCurInstr->opcode == OP_IRET
2092 || pCpu->pCurInstr->opcode == OP_RETN
2093 || pCpu->pCurInstr->opcode == OP_RETF
2094 )
2095 {
2096 return VINF_SUCCESS;
2097 }
2098
2099 if (pCpu->pCurInstr->opcode == OP_ILLUD2)
2100 return VINF_SUCCESS;
2101
2102 return VWRN_CONTINUE_ANALYSIS;
2103}
2104
2105
2106/**
2107 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2108 *
2109 * @returns VBox status code.
2110 * @param pVM The VM to operate on.
2111 * @param pInstrGC Guest context pointer to the initial privileged instruction
2112 * @param pCurInstrGC Guest context pointer to the current instruction
2113 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2114 * @param pCacheRec Cache record ptr
2115 *
2116 */
2117int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2118{
2119 DISCPUSTATE cpu;
2120 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2121 int rc = VWRN_CONTINUE_ANALYSIS;
2122 uint32_t opsize, delta;
2123 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2124 bool disret;
2125 char szOutput[256];
2126
2127 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2128
2129 /* We need this to determine branch targets (and for disassembling). */
2130 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2131
2132 while(rc == VWRN_CONTINUE_ANALYSIS)
2133 {
2134 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2135
2136 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2137 if (pCurInstrHC == NULL)
2138 {
2139 rc = VERR_PATCHING_REFUSED;
2140 goto end;
2141 }
2142
2143 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2144 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2145 {
2146 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2147
2148 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2149 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2150 else
2151 Log(("DIS %s", szOutput));
2152
2153 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2154 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2155 {
2156 rc = VINF_SUCCESS;
2157 goto end;
2158 }
2159 }
2160 else
2161 Log(("DIS: %s", szOutput));
2162
2163 if (disret == false)
2164 {
2165 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2166 rc = VINF_SUCCESS;
2167 goto end;
2168 }
2169
2170 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2171 if (rc != VWRN_CONTINUE_ANALYSIS) {
2172 break; //done!
2173 }
2174
2175 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2176 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2177 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2178 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2179 )
2180 {
2181 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2182 RTRCPTR pOrgTargetGC;
2183
2184 if (pTargetGC == 0)
2185 {
2186 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2187 rc = VERR_PATCHING_REFUSED;
2188 break;
2189 }
2190
2191 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2192 {
2193 //jump back to guest code
2194 rc = VINF_SUCCESS;
2195 goto end;
2196 }
2197 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2198
2199 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2200 {
2201 rc = VINF_SUCCESS;
2202 goto end;
2203 }
2204
2205 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2206 {
2207 /* New jump, let's check it. */
2208 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2209
2210 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2211 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2212 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2213
2214 if (rc != VINF_SUCCESS) {
2215 break; //done!
2216 }
2217 }
2218 if (cpu.pCurInstr->opcode == OP_JMP)
2219 {
2220 /* Unconditional jump; return to caller. */
2221 rc = VINF_SUCCESS;
2222 goto end;
2223 }
2224
2225 rc = VWRN_CONTINUE_ANALYSIS;
2226 }
2227 pCurInstrGC += opsize;
2228 }
2229end:
2230 return rc;
2231}
2232
2233/**
2234 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2235 *
2236 * @returns VBox status code.
2237 * @param pVM The VM to operate on.
2238 * @param pInstrGC Guest context pointer to the initial privileged instruction
2239 * @param pCurInstrGC Guest context pointer to the current instruction
2240 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2241 * @param pCacheRec Cache record ptr
2242 *
2243 */
2244int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2245{
2246 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2247
2248 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2249 /* Free all disasm jump records. */
2250 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2251 return rc;
2252}
2253
2254#endif /* LOG_ENABLED */
2255
2256/**
2257 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2258 * If so, this patch is permanently disabled.
2259 *
2260 * @param pVM The VM to operate on.
2261 * @param pInstrGC Guest context pointer to instruction
2262 * @param pConflictGC Guest context pointer to check
2263 *
2264 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2265 *
2266 */
2267VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2268{
2269 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2270 if (pTargetPatch)
2271 {
2272 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2273 }
2274 return VERR_PATCH_NO_CONFLICT;
2275}
2276
2277/**
2278 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2279 *
2280 * @returns VBox status code.
2281 * @param pVM The VM to operate on.
2282 * @param pInstrGC Guest context pointer to privileged instruction
2283 * @param pCurInstrGC Guest context pointer to the current instruction
2284 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2285 * @param pCacheRec Cache record ptr
2286 *
2287 */
2288static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2289{
2290 DISCPUSTATE cpu;
2291 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2292 int rc = VWRN_CONTINUE_ANALYSIS;
2293 uint32_t opsize;
2294 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2295 bool disret;
2296#ifdef LOG_ENABLED
2297 char szOutput[256];
2298#endif
2299
2300 while (rc == VWRN_CONTINUE_RECOMPILE)
2301 {
2302 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2303
2304 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2305 if (pCurInstrHC == NULL)
2306 {
2307 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2308 goto end;
2309 }
2310#ifdef LOG_ENABLED
2311 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput);
2312 Log(("Recompile: %s", szOutput));
2313#else
2314 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2315#endif
2316 if (disret == false)
2317 {
2318 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2319
2320 /* Add lookup record for patch to guest address translation */
2321 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2322 patmPatchGenIllegalInstr(pVM, pPatch);
2323 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2324 goto end;
2325 }
2326
2327 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2328 if (rc != VWRN_CONTINUE_RECOMPILE)
2329 {
2330 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2331 if ( rc == VINF_SUCCESS
2332 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2333 {
2334 DISCPUSTATE cpunext;
2335 uint32_t opsizenext;
2336 uint8_t *pNextInstrHC;
2337 RTRCPTR pNextInstrGC = pCurInstrGC + opsize;
2338
2339 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2340
2341 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2342 * Recompile the next instruction as well
2343 */
2344 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2345 if (pNextInstrHC == NULL)
2346 {
2347 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2348 goto end;
2349 }
2350 cpunext.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2351 disret = PATMR3DISInstr(pVM, pPatch, &cpunext, pNextInstrGC, pNextInstrHC, &opsizenext, NULL);
2352 if (disret == false)
2353 {
2354 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2355 goto end;
2356 }
2357 switch(cpunext.pCurInstr->opcode)
2358 {
2359 case OP_IRET: /* inhibit cleared in generated code */
2360 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2361 case OP_HLT:
2362 break; /* recompile these */
2363
2364 default:
2365 if (cpunext.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2366 {
2367 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2368
2369 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2370 AssertRC(rc);
2371 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2372 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2373 }
2374 break;
2375 }
2376
2377 /* Note: after a cli we must continue to a proper exit point */
2378 if (cpunext.pCurInstr->opcode != OP_CLI)
2379 {
2380 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2381 if (RT_SUCCESS(rc))
2382 {
2383 rc = VINF_SUCCESS;
2384 goto end;
2385 }
2386 break;
2387 }
2388 else
2389 rc = VWRN_CONTINUE_RECOMPILE;
2390 }
2391 else
2392 break; /* done! */
2393 }
2394
2395 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2396
2397
2398 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2399 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2400 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2401 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2402 )
2403 {
2404 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2405 if (addr == 0)
2406 {
2407 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2408 rc = VERR_PATCHING_REFUSED;
2409 break;
2410 }
2411
2412 Log(("Jump encountered target %RRv\n", addr));
2413
2414 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2415 if (!(cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW))
2416 {
2417 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2418 /* First we need to finish this linear code stream until the next exit point. */
2419 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+opsize, pfnPATMR3Recompile, pCacheRec);
2420 if (RT_FAILURE(rc))
2421 {
2422 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2423 break; //fatal error
2424 }
2425 }
2426
2427 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2428 {
2429 /* New code; let's recompile it. */
2430 Log(("patmRecompileCodeStream continue with jump\n"));
2431
2432 /*
2433 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2434 * this patch so we can continue our analysis
2435 *
2436 * We rely on CSAM to detect and resolve conflicts
2437 */
2438 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
2439 if(pTargetPatch)
2440 {
2441 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2442 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2443 }
2444
2445 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2446 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2447 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2448
2449 if(pTargetPatch)
2450 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2451
2452 if (RT_FAILURE(rc))
2453 {
2454 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2455 break; //done!
2456 }
2457 }
2458 /* Always return to caller here; we're done! */
2459 rc = VINF_SUCCESS;
2460 goto end;
2461 }
2462 else
2463 if (cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW)
2464 {
2465 rc = VINF_SUCCESS;
2466 goto end;
2467 }
2468 pCurInstrGC += opsize;
2469 }
2470end:
2471 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2472 return rc;
2473}
2474
2475
2476/**
2477 * Generate the jump from guest to patch code
2478 *
2479 * @returns VBox status code.
2480 * @param pVM The VM to operate on.
2481 * @param pPatch Patch record
2482 * @param pCacheRec Guest translation lookup cache record
2483 */
2484static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2485{
2486 uint8_t temp[8];
2487 uint8_t *pPB;
2488 int rc;
2489
2490 Assert(pPatch->cbPatchJump <= sizeof(temp));
2491 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2492
2493 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2494 Assert(pPB);
2495
2496#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2497 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2498 {
2499 Assert(pPatch->pPatchJumpDestGC);
2500
2501 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2502 {
2503 // jmp [PatchCode]
2504 if (fAddFixup)
2505 {
2506 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2507 {
2508 Log(("Relocation failed for the jump in the guest code!!\n"));
2509 return VERR_PATCHING_REFUSED;
2510 }
2511 }
2512
2513 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2514 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2515 }
2516 else
2517 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2518 {
2519 // jmp [PatchCode]
2520 if (fAddFixup)
2521 {
2522 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2523 {
2524 Log(("Relocation failed for the jump in the guest code!!\n"));
2525 return VERR_PATCHING_REFUSED;
2526 }
2527 }
2528
2529 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2530 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2531 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2532 }
2533 else
2534 {
2535 Assert(0);
2536 return VERR_PATCHING_REFUSED;
2537 }
2538 }
2539 else
2540#endif
2541 {
2542 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2543
2544 // jmp [PatchCode]
2545 if (fAddFixup)
2546 {
2547 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2548 {
2549 Log(("Relocation failed for the jump in the guest code!!\n"));
2550 return VERR_PATCHING_REFUSED;
2551 }
2552 }
2553 temp[0] = 0xE9; //jmp
2554 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2555 }
2556 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2557 AssertRC(rc);
2558
2559 if (rc == VINF_SUCCESS)
2560 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2561
2562 return rc;
2563}
2564
2565/**
2566 * Remove the jump from guest to patch code
2567 *
2568 * @returns VBox status code.
2569 * @param pVM The VM to operate on.
2570 * @param pPatch Patch record
2571 */
2572static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2573{
2574#ifdef DEBUG
2575 DISCPUSTATE cpu;
2576 char szOutput[256];
2577 uint32_t opsize, i = 0;
2578 bool disret;
2579
2580 while (i < pPatch->cbPrivInstr)
2581 {
2582 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2583 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
2584 if (disret == false)
2585 break;
2586
2587 Log(("Org patch jump: %s", szOutput));
2588 Assert(opsize);
2589 i += opsize;
2590 }
2591#endif
2592
2593 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2594 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2595#ifdef DEBUG
2596 if (rc == VINF_SUCCESS)
2597 {
2598 i = 0;
2599 while(i < pPatch->cbPrivInstr)
2600 {
2601 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2602 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
2603 if (disret == false)
2604 break;
2605
2606 Log(("Org instr: %s", szOutput));
2607 Assert(opsize);
2608 i += opsize;
2609 }
2610 }
2611#endif
2612 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2613 return rc;
2614}
2615
2616/**
2617 * Generate the call from guest to patch code
2618 *
2619 * @returns VBox status code.
2620 * @param pVM The VM to operate on.
2621 * @param pPatch Patch record
2622 * @param pInstrHC HC address where to insert the jump
2623 * @param pCacheRec Guest translation cache record
2624 */
2625static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2626{
2627 uint8_t temp[8];
2628 uint8_t *pPB;
2629 int rc;
2630
2631 Assert(pPatch->cbPatchJump <= sizeof(temp));
2632
2633 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2634 Assert(pPB);
2635
2636 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2637
2638 // jmp [PatchCode]
2639 if (fAddFixup)
2640 {
2641 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2642 {
2643 Log(("Relocation failed for the jump in the guest code!!\n"));
2644 return VERR_PATCHING_REFUSED;
2645 }
2646 }
2647
2648 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2649 temp[0] = pPatch->aPrivInstr[0];
2650 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2651
2652 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2653 AssertRC(rc);
2654
2655 return rc;
2656}
2657
2658
2659/**
2660 * Patch cli/sti pushf/popf instruction block at specified location
2661 *
2662 * @returns VBox status code.
2663 * @param pVM The VM to operate on.
2664 * @param pInstrGC Guest context point to privileged instruction
2665 * @param pInstrHC Host context point to privileged instruction
2666 * @param uOpcode Instruction opcode
2667 * @param uOpSize Size of starting instruction
2668 * @param pPatchRec Patch record
2669 *
2670 * @note returns failure if patching is not allowed or possible
2671 *
2672 */
2673VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2674 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2675{
2676 PPATCHINFO pPatch = &pPatchRec->patch;
2677 int rc = VERR_PATCHING_REFUSED;
2678 DISCPUSTATE cpu;
2679 uint32_t orgOffsetPatchMem = ~0;
2680 RTRCPTR pInstrStart;
2681 bool fInserted;
2682#ifdef LOG_ENABLED
2683 uint32_t opsize;
2684 char szOutput[256];
2685 bool disret;
2686#endif
2687 NOREF(pInstrHC); NOREF(uOpSize);
2688
2689 /* Save original offset (in case of failures later on) */
2690 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2691 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2692
2693 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2694 switch (uOpcode)
2695 {
2696 case OP_MOV:
2697 break;
2698
2699 case OP_CLI:
2700 case OP_PUSHF:
2701 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2702 /* Note: special precautions are taken when disabling and enabling such patches. */
2703 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2704 break;
2705
2706 default:
2707 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2708 {
2709 AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
2710 return VERR_INVALID_PARAMETER;
2711 }
2712 }
2713
2714 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2715 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2716
2717 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2718 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2719 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2720 )
2721 {
2722 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2723 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2724 rc = VERR_PATCHING_REFUSED;
2725 goto failure;
2726 }
2727
2728 pPatch->nrPatch2GuestRecs = 0;
2729 pInstrStart = pInstrGC;
2730
2731#ifdef PATM_ENABLE_CALL
2732 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2733#endif
2734
2735 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2736 pPatch->uCurPatchOffset = 0;
2737
2738 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2739
2740 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2741 {
2742 Assert(pPatch->flags & PATMFL_INTHANDLER);
2743
2744 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2745 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2746 if (RT_FAILURE(rc))
2747 goto failure;
2748 }
2749
2750 /***************************************************************************************************************************/
2751 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2752 /***************************************************************************************************************************/
2753#ifdef VBOX_WITH_STATISTICS
2754 if (!(pPatch->flags & PATMFL_SYSENTER))
2755 {
2756 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2757 if (RT_FAILURE(rc))
2758 goto failure;
2759 }
2760#endif
2761
2762 PATMP2GLOOKUPREC cacheRec;
2763 RT_ZERO(cacheRec);
2764 cacheRec.pPatch = pPatch;
2765
2766 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2767 /* Free leftover lock if any. */
2768 if (cacheRec.Lock.pvMap)
2769 {
2770 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2771 cacheRec.Lock.pvMap = NULL;
2772 }
2773 if (rc != VINF_SUCCESS)
2774 {
2775 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2776 goto failure;
2777 }
2778
2779 /* Calculated during analysis. */
2780 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2781 {
2782 /* Most likely cause: we encountered an illegal instruction very early on. */
2783 /** @todo could turn it into an int3 callable patch. */
2784 Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
2785 rc = VERR_PATCHING_REFUSED;
2786 goto failure;
2787 }
2788
2789 /* size of patch block */
2790 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2791
2792
2793 /* Update free pointer in patch memory. */
2794 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2795 /* Round to next 8 byte boundary. */
2796 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2797
2798 /*
2799 * Insert into patch to guest lookup tree
2800 */
2801 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2802 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2803 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2804 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2805 if (!fInserted)
2806 {
2807 rc = VERR_PATCHING_REFUSED;
2808 goto failure;
2809 }
2810
2811 /* Note that patmr3SetBranchTargets can install additional patches!! */
2812 rc = patmr3SetBranchTargets(pVM, pPatch);
2813 if (rc != VINF_SUCCESS)
2814 {
2815 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2816 goto failure;
2817 }
2818
2819#ifdef LOG_ENABLED
2820 Log(("Patch code ----------------------------------------------------------\n"));
2821 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2822 /* Free leftover lock if any. */
2823 if (cacheRec.Lock.pvMap)
2824 {
2825 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2826 cacheRec.Lock.pvMap = NULL;
2827 }
2828 Log(("Patch code ends -----------------------------------------------------\n"));
2829#endif
2830
2831 /* make a copy of the guest code bytes that will be overwritten */
2832 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2833
2834 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2835 AssertRC(rc);
2836
2837 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2838 {
2839 /*uint8_t ASMInt3 = 0xCC; - unused */
2840
2841 Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2842 /* Replace first opcode byte with 'int 3'. */
2843 rc = patmActivateInt3Patch(pVM, pPatch);
2844 if (RT_FAILURE(rc))
2845 goto failure;
2846
2847 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2848 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2849
2850 pPatch->flags &= ~PATMFL_INSTR_HINT;
2851 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2852 }
2853 else
2854 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2855 {
2856 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2857 /* now insert a jump in the guest code */
2858 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2859 AssertRC(rc);
2860 if (RT_FAILURE(rc))
2861 goto failure;
2862
2863 }
2864
2865#ifdef LOG_ENABLED
2866 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2867 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
2868 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
2869#endif
2870
2871 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2872 pPatch->pTempInfo->nrIllegalInstr = 0;
2873
2874 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2875
2876 pPatch->uState = PATCH_ENABLED;
2877 return VINF_SUCCESS;
2878
2879failure:
2880 if (pPatchRec->CoreOffset.Key)
2881 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2882
2883 patmEmptyTree(pVM, &pPatch->FixupTree);
2884 pPatch->nrFixups = 0;
2885
2886 patmEmptyTree(pVM, &pPatch->JumpTree);
2887 pPatch->nrJumpRecs = 0;
2888
2889 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2890 pPatch->pTempInfo->nrIllegalInstr = 0;
2891
2892 /* Turn this cli patch into a dummy. */
2893 pPatch->uState = PATCH_REFUSED;
2894 pPatch->pPatchBlockOffset = 0;
2895
2896 // Give back the patch memory we no longer need
2897 Assert(orgOffsetPatchMem != (uint32_t)~0);
2898 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2899
2900 return rc;
2901}
2902
2903/**
2904 * Patch IDT handler
2905 *
2906 * @returns VBox status code.
2907 * @param pVM The VM to operate on.
2908 * @param pInstrGC Guest context point to privileged instruction
2909 * @param uOpSize Size of starting instruction
2910 * @param pPatchRec Patch record
2911 * @param pCacheRec Cache record ptr
2912 *
2913 * @note returns failure if patching is not allowed or possible
2914 *
2915 */
2916static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
2917{
2918 PPATCHINFO pPatch = &pPatchRec->patch;
2919 bool disret;
2920 DISCPUSTATE cpuPush, cpuJmp;
2921 uint32_t opsize;
2922 RTRCPTR pCurInstrGC = pInstrGC;
2923 uint8_t *pCurInstrHC, *pInstrHC;
2924 uint32_t orgOffsetPatchMem = ~0;
2925
2926 pInstrHC = pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2927 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
2928
2929 /*
2930 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
2931 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
2932 * condition here and only patch the common entypoint once.
2933 */
2934 cpuPush.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2935 disret = PATMR3DISInstr(pVM, pPatch, &cpuPush, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2936 Assert(disret);
2937 if (disret && cpuPush.pCurInstr->opcode == OP_PUSH)
2938 {
2939 RTRCPTR pJmpInstrGC;
2940 int rc;
2941 pCurInstrGC += opsize;
2942
2943 cpuJmp.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2944 disret = PATMR3DISInstr(pVM, pPatch, &cpuJmp, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2945 if ( disret
2946 && cpuJmp.pCurInstr->opcode == OP_JMP
2947 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
2948 )
2949 {
2950 bool fInserted;
2951 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2952 if (pJmpPatch == 0)
2953 {
2954 /* Patch it first! */
2955 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
2956 if (rc != VINF_SUCCESS)
2957 goto failure;
2958 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2959 Assert(pJmpPatch);
2960 }
2961 if (pJmpPatch->patch.uState != PATCH_ENABLED)
2962 goto failure;
2963
2964 /* save original offset (in case of failures later on) */
2965 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2966
2967 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2968 pPatch->uCurPatchOffset = 0;
2969 pPatch->nrPatch2GuestRecs = 0;
2970
2971#ifdef VBOX_WITH_STATISTICS
2972 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2973 if (RT_FAILURE(rc))
2974 goto failure;
2975#endif
2976
2977 /* Install fake cli patch (to clear the virtual IF) */
2978 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2979 if (RT_FAILURE(rc))
2980 goto failure;
2981
2982 /* Add lookup record for patch to guest address translation (for the push) */
2983 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
2984
2985 /* Duplicate push. */
2986 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
2987 if (RT_FAILURE(rc))
2988 goto failure;
2989
2990 /* Generate jump to common entrypoint. */
2991 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
2992 if (RT_FAILURE(rc))
2993 goto failure;
2994
2995 /* size of patch block */
2996 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2997
2998 /* Update free pointer in patch memory. */
2999 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3000 /* Round to next 8 byte boundary */
3001 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3002
3003 /* There's no jump from guest to patch code. */
3004 pPatch->cbPatchJump = 0;
3005
3006
3007#ifdef LOG_ENABLED
3008 Log(("Patch code ----------------------------------------------------------\n"));
3009 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3010 Log(("Patch code ends -----------------------------------------------------\n"));
3011#endif
3012 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3013
3014 /*
3015 * Insert into patch to guest lookup tree
3016 */
3017 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3018 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3019 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3020 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3021
3022 pPatch->uState = PATCH_ENABLED;
3023
3024 return VINF_SUCCESS;
3025 }
3026 }
3027failure:
3028 /* Give back the patch memory we no longer need */
3029 if (orgOffsetPatchMem != (uint32_t)~0)
3030 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3031
3032 return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3033}
3034
3035/**
3036 * Install a trampoline to call a guest trap handler directly
3037 *
3038 * @returns VBox status code.
3039 * @param pVM The VM to operate on.
3040 * @param pInstrGC Guest context point to privileged instruction
3041 * @param pPatchRec Patch record
3042 * @param pCacheRec Cache record ptr
3043 *
3044 */
3045static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3046{
3047 PPATCHINFO pPatch = &pPatchRec->patch;
3048 int rc = VERR_PATCHING_REFUSED;
3049 uint32_t orgOffsetPatchMem = ~0;
3050 bool fInserted;
3051#ifdef LOG_ENABLED
3052 bool disret;
3053 DISCPUSTATE cpu;
3054 uint32_t opsize;
3055 char szOutput[256];
3056#endif
3057
3058 // save original offset (in case of failures later on)
3059 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3060
3061 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3062 pPatch->uCurPatchOffset = 0;
3063 pPatch->nrPatch2GuestRecs = 0;
3064
3065#ifdef VBOX_WITH_STATISTICS
3066 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3067 if (RT_FAILURE(rc))
3068 goto failure;
3069#endif
3070
3071 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3072 if (RT_FAILURE(rc))
3073 goto failure;
3074
3075 /* size of patch block */
3076 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3077
3078 /* Update free pointer in patch memory. */
3079 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3080 /* Round to next 8 byte boundary */
3081 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3082
3083 /* There's no jump from guest to patch code. */
3084 pPatch->cbPatchJump = 0;
3085
3086#ifdef LOG_ENABLED
3087 Log(("Patch code ----------------------------------------------------------\n"));
3088 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3089 Log(("Patch code ends -----------------------------------------------------\n"));
3090#endif
3091
3092#ifdef LOG_ENABLED
3093 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3094 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3095 Log(("TRAP handler patch: %s", szOutput));
3096#endif
3097 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3098
3099 /*
3100 * Insert into patch to guest lookup tree
3101 */
3102 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3103 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3104 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3105 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3106
3107 pPatch->uState = PATCH_ENABLED;
3108 return VINF_SUCCESS;
3109
3110failure:
3111 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3112
3113 /* Turn this cli patch into a dummy. */
3114 pPatch->uState = PATCH_REFUSED;
3115 pPatch->pPatchBlockOffset = 0;
3116
3117 /* Give back the patch memory we no longer need */
3118 Assert(orgOffsetPatchMem != (uint32_t)~0);
3119 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3120
3121 return rc;
3122}
3123
3124
3125#ifdef LOG_ENABLED
3126/**
3127 * Check if the instruction is patched as a common idt handler
3128 *
3129 * @returns true or false
3130 * @param pVM The VM to operate on.
3131 * @param pInstrGC Guest context point to the instruction
3132 *
3133 */
3134static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3135{
3136 PPATMPATCHREC pRec;
3137
3138 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3139 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3140 return true;
3141 return false;
3142}
3143#endif //DEBUG
3144
3145
3146/**
3147 * Duplicates a complete function
3148 *
3149 * @returns VBox status code.
3150 * @param pVM The VM to operate on.
3151 * @param pInstrGC Guest context point to privileged instruction
3152 * @param pPatchRec Patch record
3153 * @param pCacheRec Cache record ptr
3154 *
3155 */
3156static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3157{
3158 PPATCHINFO pPatch = &pPatchRec->patch;
3159 int rc = VERR_PATCHING_REFUSED;
3160 DISCPUSTATE cpu;
3161 uint32_t orgOffsetPatchMem = ~0;
3162 bool fInserted;
3163
3164 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3165 /* Save original offset (in case of failures later on). */
3166 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3167
3168 /* We will not go on indefinitely with call instruction handling. */
3169 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3170 {
3171 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3172 return VERR_PATCHING_REFUSED;
3173 }
3174
3175 pVM->patm.s.ulCallDepth++;
3176
3177#ifdef PATM_ENABLE_CALL
3178 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3179#endif
3180
3181 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3182
3183 pPatch->nrPatch2GuestRecs = 0;
3184 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3185 pPatch->uCurPatchOffset = 0;
3186
3187 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3188
3189 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3190 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3191 if (RT_FAILURE(rc))
3192 goto failure;
3193
3194#ifdef VBOX_WITH_STATISTICS
3195 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3196 if (RT_FAILURE(rc))
3197 goto failure;
3198#endif
3199
3200 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3201 if (rc != VINF_SUCCESS)
3202 {
3203 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3204 goto failure;
3205 }
3206
3207 //size of patch block
3208 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3209
3210 //update free pointer in patch memory
3211 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3212 /* Round to next 8 byte boundary. */
3213 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3214
3215 pPatch->uState = PATCH_ENABLED;
3216
3217 /*
3218 * Insert into patch to guest lookup tree
3219 */
3220 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3221 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3222 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3223 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3224 if (!fInserted)
3225 {
3226 rc = VERR_PATCHING_REFUSED;
3227 goto failure;
3228 }
3229
3230 /* Note that patmr3SetBranchTargets can install additional patches!! */
3231 rc = patmr3SetBranchTargets(pVM, pPatch);
3232 if (rc != VINF_SUCCESS)
3233 {
3234 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3235 goto failure;
3236 }
3237
3238#ifdef LOG_ENABLED
3239 Log(("Patch code ----------------------------------------------------------\n"));
3240 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3241 Log(("Patch code ends -----------------------------------------------------\n"));
3242#endif
3243
3244 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3245
3246 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3247 pPatch->pTempInfo->nrIllegalInstr = 0;
3248
3249 pVM->patm.s.ulCallDepth--;
3250 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3251 return VINF_SUCCESS;
3252
3253failure:
3254 if (pPatchRec->CoreOffset.Key)
3255 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3256
3257 patmEmptyTree(pVM, &pPatch->FixupTree);
3258 pPatch->nrFixups = 0;
3259
3260 patmEmptyTree(pVM, &pPatch->JumpTree);
3261 pPatch->nrJumpRecs = 0;
3262
3263 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3264 pPatch->pTempInfo->nrIllegalInstr = 0;
3265
3266 /* Turn this cli patch into a dummy. */
3267 pPatch->uState = PATCH_REFUSED;
3268 pPatch->pPatchBlockOffset = 0;
3269
3270 // Give back the patch memory we no longer need
3271 Assert(orgOffsetPatchMem != (uint32_t)~0);
3272 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3273
3274 pVM->patm.s.ulCallDepth--;
3275 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3276 return rc;
3277}
3278
3279/**
3280 * Creates trampoline code to jump inside an existing patch
3281 *
3282 * @returns VBox status code.
3283 * @param pVM The VM to operate on.
3284 * @param pInstrGC Guest context point to privileged instruction
3285 * @param pPatchRec Patch record
3286 *
3287 */
3288static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3289{
3290 PPATCHINFO pPatch = &pPatchRec->patch;
3291 RTRCPTR pPage, pPatchTargetGC = 0;
3292 uint32_t orgOffsetPatchMem = ~0;
3293 int rc = VERR_PATCHING_REFUSED;
3294 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3295 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3296 bool fInserted = false;
3297
3298 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3299 /* Save original offset (in case of failures later on). */
3300 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3301
3302 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3303 /** @todo we already checked this before */
3304 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3305
3306 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3307 if (pPatchPage)
3308 {
3309 uint32_t i;
3310
3311 for (i=0;i<pPatchPage->cCount;i++)
3312 {
3313 if (pPatchPage->aPatch[i])
3314 {
3315 pPatchToJmp = pPatchPage->aPatch[i];
3316
3317 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3318 && pPatchToJmp->uState == PATCH_ENABLED)
3319 {
3320 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3321 if (pPatchTargetGC)
3322 {
3323 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3324 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3325 Assert(pPatchToGuestRec);
3326
3327 pPatchToGuestRec->fJumpTarget = true;
3328 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3329 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3330 break;
3331 }
3332 }
3333 }
3334 }
3335 }
3336 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3337
3338 /*
3339 * Only record the trampoline patch if this is the first patch to the target
3340 * or we recorded other patches already.
3341 * The goal is to refuse refreshing function duplicates if the guest
3342 * modifies code after a saved state was loaded because it is not possible
3343 * to save the relation between trampoline and target without changing the
3344 * saved satte version.
3345 */
3346 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3347 || pPatchToJmp->pTrampolinePatchesHead)
3348 {
3349 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3350 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3351 if (!pTrampRec)
3352 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3353
3354 pTrampRec->pPatchTrampoline = pPatchRec;
3355 }
3356
3357 pPatch->nrPatch2GuestRecs = 0;
3358 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3359 pPatch->uCurPatchOffset = 0;
3360
3361 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3362 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3363 if (RT_FAILURE(rc))
3364 goto failure;
3365
3366#ifdef VBOX_WITH_STATISTICS
3367 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3368 if (RT_FAILURE(rc))
3369 goto failure;
3370#endif
3371
3372 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3373 if (RT_FAILURE(rc))
3374 goto failure;
3375
3376 /*
3377 * Insert into patch to guest lookup tree
3378 */
3379 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3380 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3381 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3382 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3383 if (!fInserted)
3384 {
3385 rc = VERR_PATCHING_REFUSED;
3386 goto failure;
3387 }
3388
3389 /* size of patch block */
3390 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3391
3392 /* Update free pointer in patch memory. */
3393 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3394 /* Round to next 8 byte boundary */
3395 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3396
3397 /* There's no jump from guest to patch code. */
3398 pPatch->cbPatchJump = 0;
3399
3400 /* Enable the patch. */
3401 pPatch->uState = PATCH_ENABLED;
3402 /* We allow this patch to be called as a function. */
3403 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3404
3405 if (pTrampRec)
3406 {
3407 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3408 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3409 }
3410 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3411 return VINF_SUCCESS;
3412
3413failure:
3414 if (pPatchRec->CoreOffset.Key)
3415 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3416
3417 patmEmptyTree(pVM, &pPatch->FixupTree);
3418 pPatch->nrFixups = 0;
3419
3420 patmEmptyTree(pVM, &pPatch->JumpTree);
3421 pPatch->nrJumpRecs = 0;
3422
3423 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3424 pPatch->pTempInfo->nrIllegalInstr = 0;
3425
3426 /* Turn this cli patch into a dummy. */
3427 pPatch->uState = PATCH_REFUSED;
3428 pPatch->pPatchBlockOffset = 0;
3429
3430 // Give back the patch memory we no longer need
3431 Assert(orgOffsetPatchMem != (uint32_t)~0);
3432 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3433
3434 if (pTrampRec)
3435 MMR3HeapFree(pTrampRec);
3436
3437 return rc;
3438}
3439
3440
3441/**
3442 * Patch branch target function for call/jump at specified location.
3443 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3444 *
3445 * @returns VBox status code.
3446 * @param pVM The VM to operate on.
3447 * @param pCtx Guest context
3448 *
3449 */
3450VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3451{
3452 RTRCPTR pBranchTarget, pPage;
3453 int rc;
3454 RTRCPTR pPatchTargetGC = 0;
3455
3456 pBranchTarget = pCtx->edx;
3457 pBranchTarget = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3458
3459 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3460 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3461
3462 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3463 if (pPatchPage)
3464 {
3465 uint32_t i;
3466
3467 for (i=0;i<pPatchPage->cCount;i++)
3468 {
3469 if (pPatchPage->aPatch[i])
3470 {
3471 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3472
3473 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3474 && pPatch->uState == PATCH_ENABLED)
3475 {
3476 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3477 if (pPatchTargetGC)
3478 {
3479 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3480 break;
3481 }
3482 }
3483 }
3484 }
3485 }
3486
3487 if (pPatchTargetGC)
3488 {
3489 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3490 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3491 }
3492 else
3493 {
3494 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3495 }
3496
3497 if (rc == VINF_SUCCESS)
3498 {
3499 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3500 Assert(pPatchTargetGC);
3501 }
3502
3503 if (pPatchTargetGC)
3504 {
3505 pCtx->eax = pPatchTargetGC;
3506 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3507 }
3508 else
3509 {
3510 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3511 pCtx->eax = 0;
3512 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3513 }
3514 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3515 rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3516 AssertRC(rc);
3517
3518 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3519 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3520 return VINF_SUCCESS;
3521}
3522
3523/**
3524 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3525 *
3526 * @returns VBox status code.
3527 * @param pVM The VM to operate on.
3528 * @param pCpu Disassembly CPU structure ptr
3529 * @param pInstrGC Guest context point to privileged instruction
3530 * @param pCacheRec Cache record ptr
3531 *
3532 */
3533static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3534{
3535 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3536 int rc = VERR_PATCHING_REFUSED;
3537 DISCPUSTATE cpu;
3538 RTRCPTR pTargetGC;
3539 PPATMPATCHREC pPatchFunction;
3540 uint32_t opsize;
3541 bool disret;
3542#ifdef LOG_ENABLED
3543 char szOutput[256];
3544#endif
3545
3546 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3547 Assert((pCpu->pCurInstr->opcode == OP_CALL || pCpu->pCurInstr->opcode == OP_JMP) && pCpu->opsize == SIZEOF_NEARJUMP32);
3548
3549 if ((pCpu->pCurInstr->opcode != OP_CALL && pCpu->pCurInstr->opcode != OP_JMP) || pCpu->opsize != SIZEOF_NEARJUMP32)
3550 {
3551 rc = VERR_PATCHING_REFUSED;
3552 goto failure;
3553 }
3554
3555 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3556 if (pTargetGC == 0)
3557 {
3558 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
3559 rc = VERR_PATCHING_REFUSED;
3560 goto failure;
3561 }
3562
3563 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3564 if (pPatchFunction == NULL)
3565 {
3566 for(;;)
3567 {
3568 /* It could be an indirect call (call -> jmp dest).
3569 * Note that it's dangerous to assume the jump will never change...
3570 */
3571 uint8_t *pTmpInstrHC;
3572
3573 pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3574 Assert(pTmpInstrHC);
3575 if (pTmpInstrHC == 0)
3576 break;
3577
3578 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3579 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pTargetGC, pTmpInstrHC, &opsize, NULL);
3580 if (disret == false || cpu.pCurInstr->opcode != OP_JMP)
3581 break;
3582
3583 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3584 if (pTargetGC == 0)
3585 {
3586 break;
3587 }
3588
3589 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3590 break;
3591 }
3592 if (pPatchFunction == 0)
3593 {
3594 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3595 rc = VERR_PATCHING_REFUSED;
3596 goto failure;
3597 }
3598 }
3599
3600 // make a copy of the guest code bytes that will be overwritten
3601 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3602
3603 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3604 AssertRC(rc);
3605
3606 /* Now replace the original call in the guest code */
3607 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3608 AssertRC(rc);
3609 if (RT_FAILURE(rc))
3610 goto failure;
3611
3612 /* Lowest and highest address for write monitoring. */
3613 pPatch->pInstrGCLowest = pInstrGC;
3614 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3615
3616#ifdef LOG_ENABLED
3617 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3618 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3619 Log(("Call patch: %s", szOutput));
3620#endif
3621
3622 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3623
3624 pPatch->uState = PATCH_ENABLED;
3625 return VINF_SUCCESS;
3626
3627failure:
3628 /* Turn this patch into a dummy. */
3629 pPatch->uState = PATCH_REFUSED;
3630
3631 return rc;
3632}
3633
3634/**
3635 * Replace the address in an MMIO instruction with the cached version.
3636 *
3637 * @returns VBox status code.
3638 * @param pVM The VM to operate on.
3639 * @param pInstrGC Guest context point to privileged instruction
3640 * @param pCpu Disassembly CPU structure ptr
3641 * @param pCacheRec Cache record ptr
3642 *
3643 * @note returns failure if patching is not allowed or possible
3644 *
3645 */
3646static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3647{
3648 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3649 uint8_t *pPB;
3650 int rc = VERR_PATCHING_REFUSED;
3651#ifdef LOG_ENABLED
3652 DISCPUSTATE cpu;
3653 uint32_t opsize;
3654 bool disret;
3655 char szOutput[256];
3656#endif
3657
3658 Assert(pVM->patm.s.mmio.pCachedData);
3659 if (!pVM->patm.s.mmio.pCachedData)
3660 goto failure;
3661
3662 if (pCpu->param2.flags != USE_DISPLACEMENT32)
3663 goto failure;
3664
3665 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3666 if (pPB == 0)
3667 goto failure;
3668
3669 /* Add relocation record for cached data access. */
3670 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3671 {
3672 Log(("Relocation failed for cached mmio address!!\n"));
3673 return VERR_PATCHING_REFUSED;
3674 }
3675#ifdef LOG_ENABLED
3676 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3677 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3678 Log(("MMIO patch old instruction: %s", szOutput));
3679#endif
3680
3681 /* Save original instruction. */
3682 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3683 AssertRC(rc);
3684
3685 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3686
3687 /* Replace address with that of the cached item. */
3688 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->opsize - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3689 AssertRC(rc);
3690 if (RT_FAILURE(rc))
3691 {
3692 goto failure;
3693 }
3694
3695#ifdef LOG_ENABLED
3696 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3697 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3698 Log(("MMIO patch: %s", szOutput));
3699#endif
3700 pVM->patm.s.mmio.pCachedData = 0;
3701 pVM->patm.s.mmio.GCPhys = 0;
3702 pPatch->uState = PATCH_ENABLED;
3703 return VINF_SUCCESS;
3704
3705failure:
3706 /* Turn this patch into a dummy. */
3707 pPatch->uState = PATCH_REFUSED;
3708
3709 return rc;
3710}
3711
3712
3713/**
3714 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3715 *
3716 * @returns VBox status code.
3717 * @param pVM The VM to operate on.
3718 * @param pInstrGC Guest context point to privileged instruction
3719 * @param pPatch Patch record
3720 *
3721 * @note returns failure if patching is not allowed or possible
3722 *
3723 */
3724static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3725{
3726 DISCPUSTATE cpu;
3727 uint32_t opsize;
3728 bool disret;
3729 uint8_t *pInstrHC;
3730#ifdef LOG_ENABLED
3731 char szOutput[256];
3732#endif
3733
3734 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3735
3736 /* Convert GC to HC address. */
3737 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3738 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3739
3740 /* Disassemble mmio instruction. */
3741 cpu.mode = pPatch->uOpMode;
3742 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
3743 if (disret == false)
3744 {
3745 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3746 return VERR_PATCHING_REFUSED;
3747 }
3748
3749 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
3750 if (opsize > MAX_INSTR_SIZE)
3751 return VERR_PATCHING_REFUSED;
3752 if (cpu.param2.flags != USE_DISPLACEMENT32)
3753 return VERR_PATCHING_REFUSED;
3754
3755 /* Add relocation record for cached data access. */
3756 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3757 {
3758 Log(("Relocation failed for cached mmio address!!\n"));
3759 return VERR_PATCHING_REFUSED;
3760 }
3761 /* Replace address with that of the cached item. */
3762 *(RTRCPTR *)&pInstrHC[cpu.opsize - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3763
3764 /* Lowest and highest address for write monitoring. */
3765 pPatch->pInstrGCLowest = pInstrGC;
3766 pPatch->pInstrGCHighest = pInstrGC + cpu.opsize;
3767
3768#ifdef LOG_ENABLED
3769 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3770 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3771 Log(("MMIO patch: %s", szOutput));
3772#endif
3773
3774 pVM->patm.s.mmio.pCachedData = 0;
3775 pVM->patm.s.mmio.GCPhys = 0;
3776 return VINF_SUCCESS;
3777}
3778
3779/**
3780 * Activates an int3 patch
3781 *
3782 * @returns VBox status code.
3783 * @param pVM The VM to operate on.
3784 * @param pPatch Patch record
3785 */
3786static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3787{
3788 uint8_t ASMInt3 = 0xCC;
3789 int rc;
3790
3791 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3792 Assert(pPatch->uState != PATCH_ENABLED);
3793
3794 /* Replace first opcode byte with 'int 3'. */
3795 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &ASMInt3, sizeof(ASMInt3));
3796 AssertRC(rc);
3797
3798 pPatch->cbPatchJump = sizeof(ASMInt3);
3799
3800 return rc;
3801}
3802
3803/**
3804 * Deactivates an int3 patch
3805 *
3806 * @returns VBox status code.
3807 * @param pVM The VM to operate on.
3808 * @param pPatch Patch record
3809 */
3810static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3811{
3812 uint8_t ASMInt3 = 0xCC;
3813 int rc;
3814
3815 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3816 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3817
3818 /* Restore first opcode byte. */
3819 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3820 AssertRC(rc);
3821 return rc;
3822}
3823
3824/**
3825 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically in the guest context.
3826 *
3827 * @returns VBox status code.
3828 * @param pVM The VM to operate on.
3829 * @param pInstrGC Guest context point to privileged instruction
3830 * @param pInstrHC Host context point to privileged instruction
3831 * @param pCpu Disassembly CPU structure ptr
3832 * @param pPatch Patch record
3833 *
3834 * @note returns failure if patching is not allowed or possible
3835 *
3836 */
3837VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3838{
3839 uint8_t ASMInt3 = 0xCC;
3840 int rc;
3841
3842 /* Note: Do not use patch memory here! It might called during patch installation too. */
3843
3844#ifdef LOG_ENABLED
3845 DISCPUSTATE cpu;
3846 char szOutput[256];
3847 uint32_t opsize;
3848
3849 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3850 PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3851 Log(("PATMR3PatchInstrInt3: %s", szOutput));
3852#endif
3853
3854 /* Save the original instruction. */
3855 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3856 AssertRC(rc);
3857 pPatch->cbPatchJump = sizeof(ASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3858
3859 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3860
3861 /* Replace first opcode byte with 'int 3'. */
3862 rc = patmActivateInt3Patch(pVM, pPatch);
3863 if (RT_FAILURE(rc))
3864 goto failure;
3865
3866 /* Lowest and highest address for write monitoring. */
3867 pPatch->pInstrGCLowest = pInstrGC;
3868 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3869
3870 pPatch->uState = PATCH_ENABLED;
3871 return VINF_SUCCESS;
3872
3873failure:
3874 /* Turn this patch into a dummy. */
3875 return VERR_PATCHING_REFUSED;
3876}
3877
3878#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3879/**
3880 * Patch a jump instruction at specified location
3881 *
3882 * @returns VBox status code.
3883 * @param pVM The VM to operate on.
3884 * @param pInstrGC Guest context point to privileged instruction
3885 * @param pInstrHC Host context point to privileged instruction
3886 * @param pCpu Disassembly CPU structure ptr
3887 * @param pPatchRec Patch record
3888 *
3889 * @note returns failure if patching is not allowed or possible
3890 *
3891 */
3892int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3893{
3894 PPATCHINFO pPatch = &pPatchRec->patch;
3895 int rc = VERR_PATCHING_REFUSED;
3896#ifdef LOG_ENABLED
3897 bool disret;
3898 DISCPUSTATE cpu;
3899 uint32_t opsize;
3900 char szOutput[256];
3901#endif
3902
3903 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3904 pPatch->uCurPatchOffset = 0;
3905 pPatch->cbPatchBlockSize = 0;
3906 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3907
3908 /*
3909 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3910 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3911 */
3912 switch (pCpu->pCurInstr->opcode)
3913 {
3914 case OP_JO:
3915 case OP_JNO:
3916 case OP_JC:
3917 case OP_JNC:
3918 case OP_JE:
3919 case OP_JNE:
3920 case OP_JBE:
3921 case OP_JNBE:
3922 case OP_JS:
3923 case OP_JNS:
3924 case OP_JP:
3925 case OP_JNP:
3926 case OP_JL:
3927 case OP_JNL:
3928 case OP_JLE:
3929 case OP_JNLE:
3930 case OP_JMP:
3931 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3932 Assert(pCpu->param1.flags & USE_IMMEDIATE32_REL);
3933 if (!(pCpu->param1.flags & USE_IMMEDIATE32_REL))
3934 goto failure;
3935
3936 Assert(pCpu->opsize == SIZEOF_NEARJUMP32 || pCpu->opsize == SIZEOF_NEAR_COND_JUMP32);
3937 if (pCpu->opsize != SIZEOF_NEARJUMP32 && pCpu->opsize != SIZEOF_NEAR_COND_JUMP32)
3938 goto failure;
3939
3940 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->opsize))
3941 {
3942 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3943 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3944 rc = VERR_PATCHING_REFUSED;
3945 goto failure;
3946 }
3947
3948 break;
3949
3950 default:
3951 goto failure;
3952 }
3953
3954 // make a copy of the guest code bytes that will be overwritten
3955 Assert(pCpu->opsize <= sizeof(pPatch->aPrivInstr));
3956 Assert(pCpu->opsize >= SIZEOF_NEARJUMP32);
3957 pPatch->cbPatchJump = pCpu->opsize;
3958
3959 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3960 AssertRC(rc);
3961
3962 /* Now insert a jump in the guest code. */
3963 /*
3964 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3965 * references the target instruction in the conflict patch.
3966 */
3967 RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval);
3968
3969 AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval));
3970 pPatch->pPatchJumpDestGC = pJmpDest;
3971
3972 PATMP2GLOOKUPREC cacheRec;
3973 RT_ZERO(cacheRec);
3974 cacheRec.pPatch = pPatch;
3975
3976 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
3977 /* Free leftover lock if any. */
3978 if (cacheRec.Lock.pvMap)
3979 {
3980 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
3981 cacheRec.Lock.pvMap = NULL;
3982 }
3983 AssertRC(rc);
3984 if (RT_FAILURE(rc))
3985 goto failure;
3986
3987 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
3988
3989#ifdef LOG_ENABLED
3990 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3991 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3992 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
3993#endif
3994
3995 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3996
3997 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
3998
3999 /* Lowest and highest address for write monitoring. */
4000 pPatch->pInstrGCLowest = pInstrGC;
4001 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
4002
4003 pPatch->uState = PATCH_ENABLED;
4004 return VINF_SUCCESS;
4005
4006failure:
4007 /* Turn this cli patch into a dummy. */
4008 pPatch->uState = PATCH_REFUSED;
4009
4010 return rc;
4011}
4012#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4013
4014
4015/**
4016 * Gives hint to PATM about supervisor guest instructions
4017 *
4018 * @returns VBox status code.
4019 * @param pVM The VM to operate on.
4020 * @param pInstr Guest context point to privileged instruction
4021 * @param flags Patch flags
4022 */
4023VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4024{
4025 Assert(pInstrGC);
4026 Assert(flags == PATMFL_CODE32);
4027
4028 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4029 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4030}
4031
4032/**
4033 * Patch privileged instruction at specified location
4034 *
4035 * @returns VBox status code.
4036 * @param pVM The VM to operate on.
4037 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4038 * @param flags Patch flags
4039 *
4040 * @note returns failure if patching is not allowed or possible
4041 */
4042VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4043{
4044 DISCPUSTATE cpu;
4045 R3PTRTYPE(uint8_t *) pInstrHC;
4046 uint32_t opsize;
4047 PPATMPATCHREC pPatchRec;
4048 PCPUMCTX pCtx = 0;
4049 bool disret;
4050 int rc;
4051 PVMCPU pVCpu = VMMGetCpu0(pVM);
4052
4053 if ( !pVM
4054 || pInstrGC == 0
4055 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4056 {
4057 AssertFailed();
4058 return VERR_INVALID_PARAMETER;
4059 }
4060
4061 if (PATMIsEnabled(pVM) == false)
4062 return VERR_PATCHING_REFUSED;
4063
4064 /* Test for patch conflict only with patches that actually change guest code. */
4065 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4066 {
4067 PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
4068 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4069 if (pConflictPatch != 0)
4070 return VERR_PATCHING_REFUSED;
4071 }
4072
4073 if (!(flags & PATMFL_CODE32))
4074 {
4075 /** @todo Only 32 bits code right now */
4076 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4077 return VERR_NOT_IMPLEMENTED;
4078 }
4079
4080 /* We ran out of patch memory; don't bother anymore. */
4081 if (pVM->patm.s.fOutOfMemory == true)
4082 return VERR_PATCHING_REFUSED;
4083
4084 /* Make sure the code selector is wide open; otherwise refuse. */
4085 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4086 if (CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0)
4087 {
4088 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4089 if (pInstrGCFlat != pInstrGC)
4090 {
4091 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4092 return VERR_PATCHING_REFUSED;
4093 }
4094 }
4095
4096 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4097 if (!(flags & PATMFL_GUEST_SPECIFIC))
4098 {
4099 /* New code. Make sure CSAM has a go at it first. */
4100 CSAMR3CheckCode(pVM, pInstrGC);
4101 }
4102
4103 /* Note: obsolete */
4104 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4105 && (flags & PATMFL_MMIO_ACCESS))
4106 {
4107 RTRCUINTPTR offset;
4108 void *pvPatchCoreOffset;
4109
4110 /* Find the patch record. */
4111 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4112 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4113 if (pvPatchCoreOffset == NULL)
4114 {
4115 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4116 return VERR_PATCH_NOT_FOUND; //fatal error
4117 }
4118 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4119
4120 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4121 }
4122
4123 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4124
4125 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4126 if (pPatchRec)
4127 {
4128 Assert(!(flags & PATMFL_TRAMPOLINE));
4129
4130 /* Hints about existing patches are ignored. */
4131 if (flags & PATMFL_INSTR_HINT)
4132 return VERR_PATCHING_REFUSED;
4133
4134 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4135 {
4136 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4137 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4138 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4139 }
4140
4141 if (pPatchRec->patch.uState == PATCH_DISABLED)
4142 {
4143 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4144 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4145 {
4146 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4147 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4148 }
4149 else
4150 Log(("Enabling patch %RRv again\n", pInstrGC));
4151
4152 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4153 rc = PATMR3EnablePatch(pVM, pInstrGC);
4154 if (RT_SUCCESS(rc))
4155 return VWRN_PATCH_ENABLED;
4156
4157 return rc;
4158 }
4159 if ( pPatchRec->patch.uState == PATCH_ENABLED
4160 || pPatchRec->patch.uState == PATCH_DIRTY)
4161 {
4162 /*
4163 * The patch might have been overwritten.
4164 */
4165 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4166 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4167 {
4168 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4169 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4170 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4171 {
4172 if (flags & PATMFL_IDTHANDLER)
4173 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4174
4175 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4176 }
4177 }
4178 rc = PATMR3RemovePatch(pVM, pInstrGC);
4179 if (RT_FAILURE(rc))
4180 return VERR_PATCHING_REFUSED;
4181 }
4182 else
4183 {
4184 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4185 /* already tried it once! */
4186 return VERR_PATCHING_REFUSED;
4187 }
4188 }
4189
4190 RTGCPHYS GCPhys;
4191 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4192 if (rc != VINF_SUCCESS)
4193 {
4194 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4195 return rc;
4196 }
4197 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4198 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4199 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4200 {
4201 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4202 return VERR_PATCHING_REFUSED;
4203 }
4204
4205 /* Initialize cache record for guest address translations. */
4206 bool fInserted;
4207 PATMP2GLOOKUPREC cacheRec;
4208 RT_ZERO(cacheRec);
4209
4210 pInstrHC = PATMGCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4211 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4212
4213 /* Allocate patch record. */
4214 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4215 if (RT_FAILURE(rc))
4216 {
4217 Log(("Out of memory!!!!\n"));
4218 return VERR_NO_MEMORY;
4219 }
4220 pPatchRec->Core.Key = pInstrGC;
4221 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4222 /* Insert patch record into the lookup tree. */
4223 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4224 Assert(fInserted);
4225
4226 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4227 pPatchRec->patch.flags = flags;
4228 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4229 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4230
4231 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4232 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4233
4234 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4235 {
4236 /*
4237 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4238 */
4239 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4240 if (pPatchNear)
4241 {
4242 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4243 {
4244 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4245
4246 pPatchRec->patch.uState = PATCH_UNUSABLE;
4247 /*
4248 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4249 */
4250 return VERR_PATCHING_REFUSED;
4251 }
4252 }
4253 }
4254
4255 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4256 if (pPatchRec->patch.pTempInfo == 0)
4257 {
4258 Log(("Out of memory!!!!\n"));
4259 return VERR_NO_MEMORY;
4260 }
4261
4262 cpu.mode = pPatchRec->patch.uOpMode;
4263 disret = PATMR3DISInstr(pVM, &pPatchRec->patch, &cpu, pInstrGC, NULL, &opsize, NULL);
4264 if (disret == false)
4265 {
4266 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4267 return VERR_PATCHING_REFUSED;
4268 }
4269
4270 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
4271 if (opsize > MAX_INSTR_SIZE)
4272 return VERR_PATCHING_REFUSED;
4273
4274 pPatchRec->patch.cbPrivInstr = opsize;
4275 pPatchRec->patch.opcode = cpu.pCurInstr->opcode;
4276
4277 /* Restricted hinting for now. */
4278 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->opcode == OP_CLI);
4279
4280 /* Initialize cache record patch pointer. */
4281 cacheRec.pPatch = &pPatchRec->patch;
4282
4283 /* Allocate statistics slot */
4284 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4285 {
4286 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4287 }
4288 else
4289 {
4290 Log(("WARNING: Patch index wrap around!!\n"));
4291 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4292 }
4293
4294 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4295 {
4296 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4297 }
4298 else
4299 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4300 {
4301 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4302 }
4303 else
4304 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4305 {
4306 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4307 }
4308 else
4309 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4310 {
4311 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4312 }
4313 else
4314 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4315 {
4316 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4317 }
4318 else
4319 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4320 {
4321 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4322 }
4323 else
4324 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4325 {
4326 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4327 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4328
4329 rc = patmIdtHandler(pVM, pInstrGC, opsize, pPatchRec, &cacheRec);
4330#ifdef VBOX_WITH_STATISTICS
4331 if ( rc == VINF_SUCCESS
4332 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4333 {
4334 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4335 }
4336#endif
4337 }
4338 else
4339 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4340 {
4341 switch (cpu.pCurInstr->opcode)
4342 {
4343 case OP_SYSENTER:
4344 case OP_PUSH:
4345 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4346 if (rc == VINF_SUCCESS)
4347 {
4348 if (rc == VINF_SUCCESS)
4349 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4350 return rc;
4351 }
4352 break;
4353
4354 default:
4355 rc = VERR_NOT_IMPLEMENTED;
4356 break;
4357 }
4358 }
4359 else
4360 {
4361 switch (cpu.pCurInstr->opcode)
4362 {
4363 case OP_SYSENTER:
4364 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4365 if (rc == VINF_SUCCESS)
4366 {
4367 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4368 return VINF_SUCCESS;
4369 }
4370 break;
4371
4372#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4373 case OP_JO:
4374 case OP_JNO:
4375 case OP_JC:
4376 case OP_JNC:
4377 case OP_JE:
4378 case OP_JNE:
4379 case OP_JBE:
4380 case OP_JNBE:
4381 case OP_JS:
4382 case OP_JNS:
4383 case OP_JP:
4384 case OP_JNP:
4385 case OP_JL:
4386 case OP_JNL:
4387 case OP_JLE:
4388 case OP_JNLE:
4389 case OP_JECXZ:
4390 case OP_LOOP:
4391 case OP_LOOPNE:
4392 case OP_LOOPE:
4393 case OP_JMP:
4394 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4395 {
4396 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4397 break;
4398 }
4399 return VERR_NOT_IMPLEMENTED;
4400#endif
4401
4402 case OP_PUSHF:
4403 case OP_CLI:
4404 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4405 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->opcode, opsize, pPatchRec);
4406 break;
4407
4408 case OP_STR:
4409 case OP_SGDT:
4410 case OP_SLDT:
4411 case OP_SIDT:
4412 case OP_CPUID:
4413 case OP_LSL:
4414 case OP_LAR:
4415 case OP_SMSW:
4416 case OP_VERW:
4417 case OP_VERR:
4418 case OP_IRET:
4419 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4420 break;
4421
4422 default:
4423 return VERR_NOT_IMPLEMENTED;
4424 }
4425 }
4426
4427 if (rc != VINF_SUCCESS)
4428 {
4429 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4430 {
4431 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4432 pPatchRec->patch.nrPatch2GuestRecs = 0;
4433 }
4434 pVM->patm.s.uCurrentPatchIdx--;
4435 }
4436 else
4437 {
4438 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4439 AssertRCReturn(rc, rc);
4440
4441 /* Keep track upper and lower boundaries of patched instructions */
4442 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4443 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4444 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4445 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4446
4447 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4448 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4449
4450 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4451 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4452
4453 rc = VINF_SUCCESS;
4454
4455 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4456 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4457 {
4458 rc = PATMR3DisablePatch(pVM, pInstrGC);
4459 AssertRCReturn(rc, rc);
4460 }
4461
4462#ifdef VBOX_WITH_STATISTICS
4463 /* Register statistics counter */
4464 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4465 {
4466 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4467 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4468#ifndef DEBUG_sandervl
4469 /* Full breakdown for the GUI. */
4470 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4471 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4472 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4473 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4474 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4475 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4476 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4477 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4478 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4479 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4480 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4481 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4482 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4483 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4484 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4485 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4486#endif
4487 }
4488#endif
4489 }
4490 /* Free leftover lock if any. */
4491 if (cacheRec.Lock.pvMap)
4492 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4493 return rc;
4494}
4495
4496/**
4497 * Query instruction size
4498 *
4499 * @returns VBox status code.
4500 * @param pVM The VM to operate on.
4501 * @param pPatch Patch record
4502 * @param pInstrGC Instruction address
4503 */
4504static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4505{
4506 uint8_t *pInstrHC;
4507 PGMPAGEMAPLOCK Lock;
4508
4509 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4510 if (rc == VINF_SUCCESS)
4511 {
4512 DISCPUSTATE cpu;
4513 bool disret;
4514 uint32_t opsize;
4515
4516 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4517 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL, PATMREAD_ORGCODE | PATMREAD_NOCHECK);
4518 PGMPhysReleasePageMappingLock(pVM, &Lock);
4519 if (disret)
4520 return opsize;
4521 }
4522 return 0;
4523}
4524
4525/**
4526 * Add patch to page record
4527 *
4528 * @returns VBox status code.
4529 * @param pVM The VM to operate on.
4530 * @param pPage Page address
4531 * @param pPatch Patch record
4532 */
4533int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4534{
4535 PPATMPATCHPAGE pPatchPage;
4536 int rc;
4537
4538 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4539
4540 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4541 if (pPatchPage)
4542 {
4543 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4544 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4545 {
4546 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4547 PPATCHINFO *paPatchOld = pPatchPage->aPatch;
4548
4549 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4550 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4551 if (RT_FAILURE(rc))
4552 {
4553 Log(("Out of memory!!!!\n"));
4554 return VERR_NO_MEMORY;
4555 }
4556 memcpy(pPatchPage->aPatch, paPatchOld, cMaxPatchesOld*sizeof(PPATCHINFO));
4557 MMHyperFree(pVM, paPatchOld);
4558 }
4559 pPatchPage->aPatch[pPatchPage->cCount] = pPatch;
4560 pPatchPage->cCount++;
4561 }
4562 else
4563 {
4564 bool fInserted;
4565
4566 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4567 if (RT_FAILURE(rc))
4568 {
4569 Log(("Out of memory!!!!\n"));
4570 return VERR_NO_MEMORY;
4571 }
4572 pPatchPage->Core.Key = pPage;
4573 pPatchPage->cCount = 1;
4574 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4575
4576 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4577 if (RT_FAILURE(rc))
4578 {
4579 Log(("Out of memory!!!!\n"));
4580 MMHyperFree(pVM, pPatchPage);
4581 return VERR_NO_MEMORY;
4582 }
4583 pPatchPage->aPatch[0] = pPatch;
4584
4585 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4586 Assert(fInserted);
4587 pVM->patm.s.cPageRecords++;
4588
4589 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4590 }
4591 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4592
4593 /* Get the closest guest instruction (from below) */
4594 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4595 Assert(pGuestToPatchRec);
4596 if (pGuestToPatchRec)
4597 {
4598 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4599 if ( pPatchPage->pLowestAddrGC == 0
4600 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4601 {
4602 RTRCUINTPTR offset;
4603
4604 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4605
4606 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4607 /* If we're too close to the page boundary, then make sure an instruction from the previous page doesn't cross the boundary itself. */
4608 if (offset && offset < MAX_INSTR_SIZE)
4609 {
4610 /* Get the closest guest instruction (from above) */
4611 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4612
4613 if (pGuestToPatchRec)
4614 {
4615 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4616 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4617 {
4618 pPatchPage->pLowestAddrGC = pPage;
4619 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4620 }
4621 }
4622 }
4623 }
4624 }
4625
4626 /* Get the closest guest instruction (from above) */
4627 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4628 Assert(pGuestToPatchRec);
4629 if (pGuestToPatchRec)
4630 {
4631 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4632 if ( pPatchPage->pHighestAddrGC == 0
4633 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4634 {
4635 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4636 /* Increase by instruction size. */
4637 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4638//// Assert(size);
4639 pPatchPage->pHighestAddrGC += size;
4640 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4641 }
4642 }
4643
4644 return VINF_SUCCESS;
4645}
4646
4647/**
4648 * Remove patch from page record
4649 *
4650 * @returns VBox status code.
4651 * @param pVM The VM to operate on.
4652 * @param pPage Page address
4653 * @param pPatch Patch record
4654 */
4655int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4656{
4657 PPATMPATCHPAGE pPatchPage;
4658 int rc;
4659
4660 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4661 Assert(pPatchPage);
4662
4663 if (!pPatchPage)
4664 return VERR_INVALID_PARAMETER;
4665
4666 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4667
4668 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4669 if (pPatchPage->cCount > 1)
4670 {
4671 uint32_t i;
4672
4673 /* Used by multiple patches */
4674 for (i=0;i<pPatchPage->cCount;i++)
4675 {
4676 if (pPatchPage->aPatch[i] == pPatch)
4677 {
4678 pPatchPage->aPatch[i] = 0;
4679 break;
4680 }
4681 }
4682 /* close the gap between the remaining pointers. */
4683 if (i < pPatchPage->cCount - 1)
4684 {
4685 memcpy(&pPatchPage->aPatch[i], &pPatchPage->aPatch[i+1], sizeof(PPATCHINFO)*(pPatchPage->cCount - (i+1)));
4686 }
4687 AssertMsg(i < pPatchPage->cCount, ("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4688
4689 pPatchPage->cCount--;
4690 }
4691 else
4692 {
4693 PPATMPATCHPAGE pPatchNode;
4694
4695 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4696
4697 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4698 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4699 Assert(pPatchNode && pPatchNode == pPatchPage);
4700
4701 Assert(pPatchPage->aPatch);
4702 rc = MMHyperFree(pVM, pPatchPage->aPatch);
4703 AssertRC(rc);
4704 rc = MMHyperFree(pVM, pPatchPage);
4705 AssertRC(rc);
4706 pVM->patm.s.cPageRecords--;
4707 }
4708 return VINF_SUCCESS;
4709}
4710
4711/**
4712 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4713 *
4714 * @returns VBox status code.
4715 * @param pVM The VM to operate on.
4716 * @param pPatch Patch record
4717 */
4718int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4719{
4720 int rc;
4721 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4722
4723 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4724 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4725 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4726
4727 /** @todo optimize better (large gaps between current and next used page) */
4728 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4729 {
4730 /* Get the closest guest instruction (from above) */
4731 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4732 if ( pGuestToPatchRec
4733 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4734 )
4735 {
4736 /* Code in page really patched -> add record */
4737 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4738 AssertRC(rc);
4739 }
4740 }
4741 pPatch->flags |= PATMFL_CODE_MONITORED;
4742 return VINF_SUCCESS;
4743}
4744
4745/**
4746 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4747 *
4748 * @returns VBox status code.
4749 * @param pVM The VM to operate on.
4750 * @param pPatch Patch record
4751 */
4752int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4753{
4754 int rc;
4755 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4756
4757 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4758 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4759 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4760
4761 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4762 {
4763 /* Get the closest guest instruction (from above) */
4764 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4765 if ( pGuestToPatchRec
4766 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4767 )
4768 {
4769 /* Code in page really patched -> remove record */
4770 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4771 AssertRC(rc);
4772 }
4773 }
4774 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4775 return VINF_SUCCESS;
4776}
4777
4778/**
4779 * Notifies PATM about a (potential) write to code that has been patched.
4780 *
4781 * @returns VBox status code.
4782 * @param pVM The VM to operate on.
4783 * @param GCPtr GC pointer to write address
4784 * @param cbWrite Nr of bytes to write
4785 *
4786 */
4787VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4788{
4789 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4790
4791 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4792
4793 Assert(VM_IS_EMT(pVM));
4794
4795 /* Quick boundary check */
4796 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4797 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4798 )
4799 return VINF_SUCCESS;
4800
4801 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4802
4803 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4804 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4805
4806 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4807 {
4808loop_start:
4809 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4810 if (pPatchPage)
4811 {
4812 uint32_t i;
4813 bool fValidPatchWrite = false;
4814
4815 /* Quick check to see if the write is in the patched part of the page */
4816 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4817 || pPatchPage->pHighestAddrGC < GCPtr)
4818 {
4819 break;
4820 }
4821
4822 for (i=0;i<pPatchPage->cCount;i++)
4823 {
4824 if (pPatchPage->aPatch[i])
4825 {
4826 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4827 RTRCPTR pPatchInstrGC;
4828 //unused: bool fForceBreak = false;
4829
4830 Assert(pPatchPage->aPatch[i]->flags & PATMFL_CODE_MONITORED);
4831 /** @todo inefficient and includes redundant checks for multiple pages. */
4832 for (uint32_t j=0; j<cbWrite; j++)
4833 {
4834 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4835
4836 if ( pPatch->cbPatchJump
4837 && pGuestPtrGC >= pPatch->pPrivInstrGC
4838 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4839 {
4840 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4841 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4842 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4843 if (rc == VINF_SUCCESS)
4844 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4845 goto loop_start;
4846
4847 continue;
4848 }
4849
4850 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4851 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4852 if (!pPatchInstrGC)
4853 {
4854 RTRCPTR pClosestInstrGC;
4855 uint32_t size;
4856
4857 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4858 if (pPatchInstrGC)
4859 {
4860 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4861 Assert(pClosestInstrGC <= pGuestPtrGC);
4862 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4863 /* Check if this is not a write into a gap between two patches */
4864 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4865 pPatchInstrGC = 0;
4866 }
4867 }
4868 if (pPatchInstrGC)
4869 {
4870 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4871
4872 fValidPatchWrite = true;
4873
4874 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4875 Assert(pPatchToGuestRec);
4876 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4877 {
4878 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4879
4880 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4881 {
4882 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4883
4884 PATMR3MarkDirtyPatch(pVM, pPatch);
4885
4886 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4887 goto loop_start;
4888 }
4889 else
4890 {
4891 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4892 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4893
4894 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4895 pPatchToGuestRec->fDirty = true;
4896
4897 *pInstrHC = 0xCC;
4898
4899 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4900 }
4901 }
4902 /* else already marked dirty */
4903 }
4904 }
4905 }
4906 } /* for each patch */
4907
4908 if (fValidPatchWrite == false)
4909 {
4910 /* Write to a part of the page that either:
4911 * - doesn't contain any code (shared code/data); rather unlikely
4912 * - old code page that's no longer in active use.
4913 */
4914invalid_write_loop_start:
4915 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4916
4917 if (pPatchPage)
4918 {
4919 for (i=0;i<pPatchPage->cCount;i++)
4920 {
4921 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4922
4923 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4924 {
4925 /* Note: possibly dangerous assumption that all future writes will be harmless. */
4926 if (pPatch->flags & PATMFL_IDTHANDLER)
4927 {
4928 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4929
4930 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4931 int rc = patmRemovePatchPages(pVM, pPatch);
4932 AssertRC(rc);
4933 }
4934 else
4935 {
4936 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4937 PATMR3MarkDirtyPatch(pVM, pPatch);
4938 }
4939 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4940 goto invalid_write_loop_start;
4941 }
4942 } /* for */
4943 }
4944 }
4945 }
4946 }
4947 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
4948 return VINF_SUCCESS;
4949
4950}
4951
4952/**
4953 * Disable all patches in a flushed page
4954 *
4955 * @returns VBox status code
4956 * @param pVM The VM to operate on.
4957 * @param addr GC address of the page to flush
4958 */
4959/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
4960 */
4961VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
4962{
4963 addr &= PAGE_BASE_GC_MASK;
4964
4965 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
4966 if (pPatchPage)
4967 {
4968 int i;
4969
4970 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
4971 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
4972 {
4973 if (pPatchPage->aPatch[i])
4974 {
4975 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4976
4977 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
4978 PATMR3MarkDirtyPatch(pVM, pPatch);
4979 }
4980 }
4981 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
4982 }
4983 return VINF_SUCCESS;
4984}
4985
4986/**
4987 * Checks if the instructions at the specified address has been patched already.
4988 *
4989 * @returns boolean, patched or not
4990 * @param pVM The VM to operate on.
4991 * @param pInstrGC Guest context pointer to instruction
4992 */
4993VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
4994{
4995 PPATMPATCHREC pPatchRec;
4996 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4997 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
4998 return true;
4999 return false;
5000}
5001
5002/**
5003 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
5004 *
5005 * @returns VBox status code.
5006 * @param pVM The VM to operate on.
5007 * @param pInstrGC GC address of instr
5008 * @param pByte opcode byte pointer (OUT)
5009 *
5010 */
5011VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
5012{
5013 PPATMPATCHREC pPatchRec;
5014
5015 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5016
5017 /* Shortcut. */
5018 if ( !PATMIsEnabled(pVM)
5019 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5020 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5021 {
5022 return VERR_PATCH_NOT_FOUND;
5023 }
5024
5025 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5026 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5027 if ( pPatchRec
5028 && pPatchRec->patch.uState == PATCH_ENABLED
5029 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5030 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5031 {
5032 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5033 *pByte = pPatchRec->patch.aPrivInstr[offset];
5034
5035 if (pPatchRec->patch.cbPatchJump == 1)
5036 {
5037 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5038 }
5039 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5040 return VINF_SUCCESS;
5041 }
5042 return VERR_PATCH_NOT_FOUND;
5043}
5044
5045/**
5046 * Disable patch for privileged instruction at specified location
5047 *
5048 * @returns VBox status code.
5049 * @param pVM The VM to operate on.
5050 * @param pInstr Guest context point to privileged instruction
5051 *
5052 * @note returns failure if patching is not allowed or possible
5053 *
5054 */
5055VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5056{
5057 PPATMPATCHREC pPatchRec;
5058 PPATCHINFO pPatch;
5059
5060 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5061 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5062 if (pPatchRec)
5063 {
5064 int rc = VINF_SUCCESS;
5065
5066 pPatch = &pPatchRec->patch;
5067
5068 /* Already disabled? */
5069 if (pPatch->uState == PATCH_DISABLED)
5070 return VINF_SUCCESS;
5071
5072 /* Clear the IDT entries for the patch we're disabling. */
5073 /* Note: very important as we clear IF in the patch itself */
5074 /** @todo this needs to be changed */
5075 if (pPatch->flags & PATMFL_IDTHANDLER)
5076 {
5077 uint32_t iGate;
5078
5079 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5080 if (iGate != (uint32_t)~0)
5081 {
5082 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5083 if (++cIDTHandlersDisabled < 256)
5084 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5085 }
5086 }
5087
5088 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5089 if ( pPatch->pPatchBlockOffset
5090 && pPatch->uState == PATCH_ENABLED)
5091 {
5092 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5093 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5094 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5095 }
5096
5097 /* IDT or function patches haven't changed any guest code. */
5098 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5099 {
5100 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5101 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5102
5103 if (pPatch->uState != PATCH_REFUSED)
5104 {
5105 uint8_t temp[16];
5106
5107 Assert(pPatch->cbPatchJump < sizeof(temp));
5108
5109 /* Let's first check if the guest code is still the same. */
5110 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5111 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5112 if (rc == VINF_SUCCESS)
5113 {
5114 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5115
5116 if ( temp[0] != 0xE9 /* jmp opcode */
5117 || *(RTRCINTPTR *)(&temp[1]) != displ
5118 )
5119 {
5120 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5121 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5122 /* Remove it completely */
5123 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5124 rc = PATMR3RemovePatch(pVM, pInstrGC);
5125 AssertRC(rc);
5126 return VWRN_PATCH_REMOVED;
5127 }
5128 patmRemoveJumpToPatch(pVM, pPatch);
5129 }
5130 else
5131 {
5132 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5133 pPatch->uState = PATCH_DISABLE_PENDING;
5134 }
5135 }
5136 else
5137 {
5138 AssertMsgFailed(("Patch was refused!\n"));
5139 return VERR_PATCH_ALREADY_DISABLED;
5140 }
5141 }
5142 else
5143 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5144 {
5145 uint8_t temp[16];
5146
5147 Assert(pPatch->cbPatchJump < sizeof(temp));
5148
5149 /* Let's first check if the guest code is still the same. */
5150 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5151 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5152 if (rc == VINF_SUCCESS)
5153 {
5154 if (temp[0] != 0xCC)
5155 {
5156 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5157 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5158 /* Remove it completely */
5159 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5160 rc = PATMR3RemovePatch(pVM, pInstrGC);
5161 AssertRC(rc);
5162 return VWRN_PATCH_REMOVED;
5163 }
5164 patmDeactivateInt3Patch(pVM, pPatch);
5165 }
5166 }
5167
5168 if (rc == VINF_SUCCESS)
5169 {
5170 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5171 if (pPatch->uState == PATCH_DISABLE_PENDING)
5172 {
5173 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5174 pPatch->uState = PATCH_UNUSABLE;
5175 }
5176 else
5177 if (pPatch->uState != PATCH_DIRTY)
5178 {
5179 pPatch->uOldState = pPatch->uState;
5180 pPatch->uState = PATCH_DISABLED;
5181 }
5182 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5183 }
5184
5185 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5186 return VINF_SUCCESS;
5187 }
5188 Log(("Patch not found!\n"));
5189 return VERR_PATCH_NOT_FOUND;
5190}
5191
5192/**
5193 * Permanently disable patch for privileged instruction at specified location
5194 *
5195 * @returns VBox status code.
5196 * @param pVM The VM to operate on.
5197 * @param pInstr Guest context instruction pointer
5198 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5199 * @param pConflictPatch Conflicting patch
5200 *
5201 */
5202static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5203{
5204 NOREF(pConflictAddr);
5205#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5206 PATCHINFO patch;
5207 DISCPUSTATE cpu;
5208 R3PTRTYPE(uint8_t *) pInstrHC;
5209 uint32_t opsize;
5210 bool disret;
5211 int rc;
5212
5213 RT_ZERO(patch);
5214 pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
5215 cpu.mode = (pConflictPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5216 disret = PATMR3DISInstr(pVM, &patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
5217 /*
5218 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5219 * with one that jumps right into the conflict patch.
5220 * Otherwise we must disable the conflicting patch to avoid serious problems.
5221 */
5222 if ( disret == true
5223 && (pConflictPatch->flags & PATMFL_CODE32)
5224 && (cpu.pCurInstr->opcode == OP_JMP || (cpu.pCurInstr->optype & OPTYPE_COND_CONTROLFLOW))
5225 && (cpu.param1.flags & USE_IMMEDIATE32_REL))
5226 {
5227 /* Hint patches must be enabled first. */
5228 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5229 {
5230 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5231 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5232 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5233 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5234 /* Enabling might fail if the patched code has changed in the meantime. */
5235 if (rc != VINF_SUCCESS)
5236 return rc;
5237 }
5238
5239 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5240 if (RT_SUCCESS(rc))
5241 {
5242 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5243 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5244 return VINF_SUCCESS;
5245 }
5246 }
5247#endif
5248
5249 if (pConflictPatch->opcode == OP_CLI)
5250 {
5251 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5252 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5253 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5254 if (rc == VWRN_PATCH_REMOVED)
5255 return VINF_SUCCESS;
5256 if (RT_SUCCESS(rc))
5257 {
5258 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5259 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5260 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5261 if (rc == VERR_PATCH_NOT_FOUND)
5262 return VINF_SUCCESS; /* removed already */
5263
5264 AssertRC(rc);
5265 if (RT_SUCCESS(rc))
5266 {
5267 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5268 return VINF_SUCCESS;
5269 }
5270 }
5271 /* else turned into unusable patch (see below) */
5272 }
5273 else
5274 {
5275 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5276 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5277 if (rc == VWRN_PATCH_REMOVED)
5278 return VINF_SUCCESS;
5279 }
5280
5281 /* No need to monitor the code anymore. */
5282 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5283 {
5284 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5285 AssertRC(rc);
5286 }
5287 pConflictPatch->uState = PATCH_UNUSABLE;
5288 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5289 return VERR_PATCH_DISABLED;
5290}
5291
5292/**
5293 * Enable patch for privileged instruction at specified location
5294 *
5295 * @returns VBox status code.
5296 * @param pVM The VM to operate on.
5297 * @param pInstr Guest context point to privileged instruction
5298 *
5299 * @note returns failure if patching is not allowed or possible
5300 *
5301 */
5302VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5303{
5304 PPATMPATCHREC pPatchRec;
5305 PPATCHINFO pPatch;
5306
5307 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5308 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5309 if (pPatchRec)
5310 {
5311 int rc = VINF_SUCCESS;
5312
5313 pPatch = &pPatchRec->patch;
5314
5315 if (pPatch->uState == PATCH_DISABLED)
5316 {
5317 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5318 {
5319 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5320 uint8_t temp[16];
5321
5322 Assert(pPatch->cbPatchJump < sizeof(temp));
5323
5324 /* Let's first check if the guest code is still the same. */
5325 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5326 AssertRC(rc2);
5327 if (rc2 == VINF_SUCCESS)
5328 {
5329 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5330 {
5331 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5332 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5333 /* Remove it completely */
5334 rc = PATMR3RemovePatch(pVM, pInstrGC);
5335 AssertRC(rc);
5336 return VERR_PATCH_NOT_FOUND;
5337 }
5338
5339 PATMP2GLOOKUPREC cacheRec;
5340 RT_ZERO(cacheRec);
5341 cacheRec.pPatch = pPatch;
5342
5343 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5344 /* Free leftover lock if any. */
5345 if (cacheRec.Lock.pvMap)
5346 {
5347 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5348 cacheRec.Lock.pvMap = NULL;
5349 }
5350 AssertRC(rc2);
5351 if (RT_FAILURE(rc2))
5352 return rc2;
5353
5354#ifdef DEBUG
5355 {
5356 DISCPUSTATE cpu;
5357 char szOutput[256];
5358 uint32_t opsize, i = 0;
5359 bool disret;
5360 i = 0;
5361 while(i < pPatch->cbPatchJump)
5362 {
5363 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5364 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
5365 Log(("Renewed patch instr: %s", szOutput));
5366 i += opsize;
5367 }
5368 }
5369#endif
5370 }
5371 }
5372 else
5373 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5374 {
5375 uint8_t temp[16];
5376
5377 Assert(pPatch->cbPatchJump < sizeof(temp));
5378
5379 /* Let's first check if the guest code is still the same. */
5380 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5381 AssertRC(rc2);
5382
5383 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5384 {
5385 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5386 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5387 rc = PATMR3RemovePatch(pVM, pInstrGC);
5388 AssertRC(rc);
5389 return VERR_PATCH_NOT_FOUND;
5390 }
5391
5392 rc2 = patmActivateInt3Patch(pVM, pPatch);
5393 if (RT_FAILURE(rc2))
5394 return rc2;
5395 }
5396
5397 pPatch->uState = pPatch->uOldState; //restore state
5398
5399 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5400 if (pPatch->pPatchBlockOffset)
5401 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5402
5403 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5404 }
5405 else
5406 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5407
5408 return rc;
5409 }
5410 return VERR_PATCH_NOT_FOUND;
5411}
5412
5413/**
5414 * Remove patch for privileged instruction at specified location
5415 *
5416 * @returns VBox status code.
5417 * @param pVM The VM to operate on.
5418 * @param pPatchRec Patch record
5419 * @param fForceRemove Remove *all* patches
5420 */
5421int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5422{
5423 PPATCHINFO pPatch;
5424
5425 pPatch = &pPatchRec->patch;
5426
5427 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5428 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5429 {
5430 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5431 return VERR_ACCESS_DENIED;
5432 }
5433 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5434
5435 /* Note: NEVER EVER REUSE PATCH MEMORY */
5436 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5437
5438 if (pPatchRec->patch.pPatchBlockOffset)
5439 {
5440 PAVLOU32NODECORE pNode;
5441
5442 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5443 Assert(pNode);
5444 }
5445
5446 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5447 {
5448 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5449 AssertRC(rc);
5450 }
5451
5452#ifdef VBOX_WITH_STATISTICS
5453 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5454 {
5455 STAMR3Deregister(pVM, &pPatchRec->patch);
5456#ifndef DEBUG_sandervl
5457 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5458 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5459 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5460 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5461 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5462 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5463 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5464 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5465 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5466 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5467 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5468 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5469 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5470 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5471#endif
5472 }
5473#endif
5474
5475 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5476 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5477 pPatch->nrPatch2GuestRecs = 0;
5478 Assert(pPatch->Patch2GuestAddrTree == 0);
5479
5480 patmEmptyTree(pVM, &pPatch->FixupTree);
5481 pPatch->nrFixups = 0;
5482 Assert(pPatch->FixupTree == 0);
5483
5484 if (pPatchRec->patch.pTempInfo)
5485 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5486
5487 /* Note: might fail, because it has already been removed (e.g. during reset). */
5488 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5489
5490 /* Free the patch record */
5491 MMHyperFree(pVM, pPatchRec);
5492 return VINF_SUCCESS;
5493}
5494
5495/**
5496 * RTAvlU32DoWithAll() worker.
5497 * Checks whether the current trampoline instruction is the jump to the target patch
5498 * and updates the displacement to jump to the new target.
5499 *
5500 * @returns VBox status code.
5501 * @retval VERR_ALREADY_EXISTS if the jump was found.
5502 * @param pNode The current patch to guest record to check.
5503 * @param pvUser The refresh state.
5504 */
5505static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5506{
5507 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5508 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5509 PVM pVM = pRefreshPatchState->pVM;
5510
5511 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5512
5513 /*
5514 * Check if the patch instruction starts with a jump.
5515 * ASSUMES that there is no other patch to guest record that starts
5516 * with a jump.
5517 */
5518 if (*pPatchInstr == 0xE9)
5519 {
5520 /* Jump found, update the displacement. */
5521 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5522 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5523 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5524
5525 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5526 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5527
5528 *(uint32_t *)&pPatchInstr[1] = displ;
5529 return VERR_ALREADY_EXISTS; /** @todo better return code */
5530 }
5531
5532 return VINF_SUCCESS;
5533}
5534
5535/**
5536 * Attempt to refresh the patch by recompiling its entire code block
5537 *
5538 * @returns VBox status code.
5539 * @param pVM The VM to operate on.
5540 * @param pPatchRec Patch record
5541 */
5542int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5543{
5544 PPATCHINFO pPatch;
5545 int rc;
5546 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5547 PTRAMPREC pTrampolinePatchesHead = NULL;
5548
5549 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5550
5551 pPatch = &pPatchRec->patch;
5552 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5553 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5554 {
5555 if (!pPatch->pTrampolinePatchesHead)
5556 {
5557 /*
5558 * It is sometimes possible that there are trampoline patches to this patch
5559 * but they are not recorded (after a saved state load for example).
5560 * Refuse to refresh those patches.
5561 * Can hurt performance in theory if the patched code is modified by the guest
5562 * and is executed often. However most of the time states are saved after the guest
5563 * code was modified and is not updated anymore afterwards so this shouldn't be a
5564 * big problem.
5565 */
5566 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5567 return VERR_PATCHING_REFUSED;
5568 }
5569 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5570 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5571 }
5572
5573 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5574
5575 rc = PATMR3DisablePatch(pVM, pInstrGC);
5576 AssertRC(rc);
5577
5578 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5579 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5580#ifdef VBOX_WITH_STATISTICS
5581 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5582 {
5583 STAMR3Deregister(pVM, &pPatchRec->patch);
5584#ifndef DEBUG_sandervl
5585 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5586 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5587 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5588 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5589 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5590 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5591 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5592 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5593 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5594 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5595 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5596 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5597 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5598 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5599#endif
5600 }
5601#endif
5602
5603 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5604
5605 /* Attempt to install a new patch. */
5606 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5607 if (RT_SUCCESS(rc))
5608 {
5609 RTRCPTR pPatchTargetGC;
5610 PPATMPATCHREC pNewPatchRec;
5611
5612 /* Determine target address in new patch */
5613 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5614 Assert(pPatchTargetGC);
5615 if (!pPatchTargetGC)
5616 {
5617 rc = VERR_PATCHING_REFUSED;
5618 goto failure;
5619 }
5620
5621 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5622 pPatch->uCurPatchOffset = 0;
5623
5624 /* insert jump to new patch in old patch block */
5625 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5626 if (RT_FAILURE(rc))
5627 goto failure;
5628
5629 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5630 Assert(pNewPatchRec); /* can't fail */
5631
5632 /* Remove old patch (only do that when everything is finished) */
5633 int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
5634 AssertRC(rc2);
5635
5636 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5637 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5638 Assert(fInserted); NOREF(fInserted);
5639
5640 LogRel(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5641 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5642
5643 /* Used by another patch, so don't remove it! */
5644 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5645
5646 if (pTrampolinePatchesHead)
5647 {
5648 /* Update all trampoline patches to jump to the new patch. */
5649 PTRAMPREC pTrampRec = NULL;
5650 PATMREFRESHPATCH RefreshPatch;
5651
5652 RefreshPatch.pVM = pVM;
5653 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5654
5655 pTrampRec = pTrampolinePatchesHead;
5656
5657 while (pTrampRec)
5658 {
5659 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5660
5661 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5662 /*
5663 * We have to find the right patch2guest record because there might be others
5664 * for statistics.
5665 */
5666 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5667 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5668 Assert(rc == VERR_ALREADY_EXISTS);
5669 rc = VINF_SUCCESS;
5670 pTrampRec = pTrampRec->pNext;
5671 }
5672 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5673 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5674 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5675 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5676 }
5677 }
5678
5679failure:
5680 if (RT_FAILURE(rc))
5681 {
5682 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5683
5684 /* Remove the new inactive patch */
5685 rc = PATMR3RemovePatch(pVM, pInstrGC);
5686 AssertRC(rc);
5687
5688 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5689 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5690 Assert(fInserted); NOREF(fInserted);
5691
5692 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5693 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5694 AssertRC(rc2);
5695
5696 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5697 }
5698 return rc;
5699}
5700
5701/**
5702 * Find patch for privileged instruction at specified location
5703 *
5704 * @returns Patch structure pointer if found; else NULL
5705 * @param pVM The VM to operate on.
5706 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5707 * @param fIncludeHints Include hinted patches or not
5708 *
5709 */
5710PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5711{
5712 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5713 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5714 if (pPatchRec)
5715 {
5716 if ( pPatchRec->patch.uState == PATCH_ENABLED
5717 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5718 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5719 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5720 {
5721 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5722 return &pPatchRec->patch;
5723 }
5724 else
5725 if ( fIncludeHints
5726 && pPatchRec->patch.uState == PATCH_DISABLED
5727 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5728 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5729 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5730 {
5731 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5732 return &pPatchRec->patch;
5733 }
5734 }
5735 return NULL;
5736}
5737
5738/**
5739 * Checks whether the GC address is inside a generated patch jump
5740 *
5741 * @returns true -> yes, false -> no
5742 * @param pVM The VM to operate on.
5743 * @param pAddr Guest context address
5744 * @param pPatchAddr Guest context patch address (if true)
5745 */
5746VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5747{
5748 RTRCPTR addr;
5749 PPATCHINFO pPatch;
5750
5751 if (PATMIsEnabled(pVM) == false)
5752 return false;
5753
5754 if (pPatchAddr == NULL)
5755 pPatchAddr = &addr;
5756
5757 *pPatchAddr = 0;
5758
5759 pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
5760 if (pPatch)
5761 *pPatchAddr = pPatch->pPrivInstrGC;
5762
5763 return *pPatchAddr == 0 ? false : true;
5764}
5765
5766/**
5767 * Remove patch for privileged instruction at specified location
5768 *
5769 * @returns VBox status code.
5770 * @param pVM The VM to operate on.
5771 * @param pInstr Guest context point to privileged instruction
5772 *
5773 * @note returns failure if patching is not allowed or possible
5774 *
5775 */
5776VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5777{
5778 PPATMPATCHREC pPatchRec;
5779
5780 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5781 if (pPatchRec)
5782 {
5783 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5784 if (rc == VWRN_PATCH_REMOVED)
5785 return VINF_SUCCESS;
5786
5787 return PATMRemovePatch(pVM, pPatchRec, false);
5788 }
5789 AssertFailed();
5790 return VERR_PATCH_NOT_FOUND;
5791}
5792
5793/**
5794 * Mark patch as dirty
5795 *
5796 * @returns VBox status code.
5797 * @param pVM The VM to operate on.
5798 * @param pPatch Patch record
5799 *
5800 * @note returns failure if patching is not allowed or possible
5801 *
5802 */
5803VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5804{
5805 if (pPatch->pPatchBlockOffset)
5806 {
5807 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5808 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5809 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5810 }
5811
5812 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5813 /* Put back the replaced instruction. */
5814 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5815 if (rc == VWRN_PATCH_REMOVED)
5816 return VINF_SUCCESS;
5817
5818 /* Note: we don't restore patch pages for patches that are not enabled! */
5819 /* Note: be careful when changing this behaviour!! */
5820
5821 /* The patch pages are no longer marked for self-modifying code detection */
5822 if (pPatch->flags & PATMFL_CODE_MONITORED)
5823 {
5824 rc = patmRemovePatchPages(pVM, pPatch);
5825 AssertRCReturn(rc, rc);
5826 }
5827 pPatch->uState = PATCH_DIRTY;
5828
5829 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5830 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5831
5832 return VINF_SUCCESS;
5833}
5834
5835/**
5836 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5837 *
5838 * @returns VBox status code.
5839 * @param pVM The VM to operate on.
5840 * @param pPatch Patch block structure pointer
5841 * @param pPatchGC GC address in patch block
5842 */
5843RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5844{
5845 Assert(pPatch->Patch2GuestAddrTree);
5846 /* Get the closest record from below. */
5847 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5848 if (pPatchToGuestRec)
5849 return pPatchToGuestRec->pOrgInstrGC;
5850
5851 return 0;
5852}
5853
5854/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5855 *
5856 * @returns corresponding GC pointer in patch block
5857 * @param pVM The VM to operate on.
5858 * @param pPatch Current patch block pointer
5859 * @param pInstrGC Guest context pointer to privileged instruction
5860 *
5861 */
5862RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5863{
5864 if (pPatch->Guest2PatchAddrTree)
5865 {
5866 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5867 if (pGuestToPatchRec)
5868 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5869 }
5870
5871 return 0;
5872}
5873
5874/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
5875 *
5876 * @returns corresponding GC pointer in patch block
5877 * @param pVM The VM to operate on.
5878 * @param pPatch Current patch block pointer
5879 * @param pInstrGC Guest context pointer to privileged instruction
5880 *
5881 */
5882RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5883{
5884 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
5885 if (pGuestToPatchRec)
5886 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5887
5888 return 0;
5889}
5890
5891/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5892 *
5893 * @returns corresponding GC pointer in patch block
5894 * @param pVM The VM to operate on.
5895 * @param pInstrGC Guest context pointer to privileged instruction
5896 *
5897 */
5898VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5899{
5900 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5901 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5902 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
5903 else
5904 return 0;
5905}
5906
5907/**
5908 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5909 *
5910 * @returns original GC instruction pointer or 0 if not found
5911 * @param pVM The VM to operate on.
5912 * @param pPatchGC GC address in patch block
5913 * @param pEnmState State of the translated address (out)
5914 *
5915 */
5916VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
5917{
5918 PPATMPATCHREC pPatchRec;
5919 void *pvPatchCoreOffset;
5920 RTRCPTR pPrivInstrGC;
5921
5922 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
5923 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5924 if (pvPatchCoreOffset == 0)
5925 {
5926 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
5927 return 0;
5928 }
5929 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
5930 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
5931 if (pEnmState)
5932 {
5933 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
5934 || pPatchRec->patch.uState == PATCH_DIRTY
5935 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
5936 || pPatchRec->patch.uState == PATCH_UNUSABLE),
5937 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
5938
5939 if ( !pPrivInstrGC
5940 || pPatchRec->patch.uState == PATCH_UNUSABLE
5941 || pPatchRec->patch.uState == PATCH_REFUSED)
5942 {
5943 pPrivInstrGC = 0;
5944 *pEnmState = PATMTRANS_FAILED;
5945 }
5946 else
5947 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
5948 {
5949 *pEnmState = PATMTRANS_INHIBITIRQ;
5950 }
5951 else
5952 if ( pPatchRec->patch.uState == PATCH_ENABLED
5953 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
5954 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
5955 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5956 {
5957 *pEnmState = PATMTRANS_OVERWRITTEN;
5958 }
5959 else
5960 if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
5961 {
5962 *pEnmState = PATMTRANS_OVERWRITTEN;
5963 }
5964 else
5965 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
5966 {
5967 *pEnmState = PATMTRANS_PATCHSTART;
5968 }
5969 else
5970 *pEnmState = PATMTRANS_SAFE;
5971 }
5972 return pPrivInstrGC;
5973}
5974
5975/**
5976 * Returns the GC pointer of the patch for the specified GC address
5977 *
5978 * @returns VBox status code.
5979 * @param pVM The VM to operate on.
5980 * @param pAddrGC Guest context address
5981 */
5982VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
5983{
5984 PPATMPATCHREC pPatchRec;
5985
5986 /* Find the patch record. */
5987 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
5988 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
5989 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
5990 return PATCHCODE_PTR_GC(&pPatchRec->patch);
5991 else
5992 return 0;
5993}
5994
5995/**
5996 * Attempt to recover dirty instructions
5997 *
5998 * @returns VBox status code.
5999 * @param pVM The VM to operate on.
6000 * @param pCtx CPU context
6001 * @param pPatch Patch record
6002 * @param pPatchToGuestRec Patch to guest address record
6003 * @param pEip GC pointer of trapping instruction
6004 */
6005static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
6006{
6007 DISCPUSTATE CpuOld, CpuNew;
6008 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
6009 int rc;
6010 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
6011 uint32_t cbDirty;
6012 PRECPATCHTOGUEST pRec;
6013 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6014 PVMCPU pVCpu = VMMGetCpu0(pVM);
6015 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6016
6017 pRec = pPatchToGuestRec;
6018 pCurInstrGC = pOrgInstrGC;
6019 pCurPatchInstrGC = pEip;
6020 cbDirty = 0;
6021 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6022
6023 /* Find all adjacent dirty instructions */
6024 while (true)
6025 {
6026 if (pRec->fJumpTarget)
6027 {
6028 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6029 pRec->fDirty = false;
6030 return VERR_PATCHING_REFUSED;
6031 }
6032
6033 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6034 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6035 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6036
6037 /* Only harmless instructions are acceptable. */
6038 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6039 if ( RT_FAILURE(rc)
6040 || !(CpuOld.pCurInstr->optype & OPTYPE_HARMLESS))
6041 {
6042 if (RT_SUCCESS(rc))
6043 cbDirty += CpuOld.opsize;
6044 else
6045 if (!cbDirty)
6046 cbDirty = 1;
6047 break;
6048 }
6049
6050#ifdef DEBUG
6051 char szBuf[256];
6052 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6053 szBuf, sizeof(szBuf), NULL);
6054 Log(("DIRTY: %s\n", szBuf));
6055#endif
6056 /* Mark as clean; if we fail we'll let it always fault. */
6057 pRec->fDirty = false;
6058
6059 /* Remove old lookup record. */
6060 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6061 pPatchToGuestRec = NULL;
6062
6063 pCurPatchInstrGC += CpuOld.opsize;
6064 cbDirty += CpuOld.opsize;
6065
6066 /* Let's see if there's another dirty instruction right after. */
6067 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6068 if (!pRec || !pRec->fDirty)
6069 break; /* no more dirty instructions */
6070
6071 /* In case of complex instructions the next guest instruction could be quite far off. */
6072 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6073 }
6074
6075 if ( RT_SUCCESS(rc)
6076 && (CpuOld.pCurInstr->optype & OPTYPE_HARMLESS)
6077 )
6078 {
6079 uint32_t cbLeft;
6080
6081 pCurPatchInstrHC = pPatchInstrHC;
6082 pCurPatchInstrGC = pEip;
6083 cbLeft = cbDirty;
6084
6085 while (cbLeft && RT_SUCCESS(rc))
6086 {
6087 bool fValidInstr;
6088
6089 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6090
6091 fValidInstr = !!(CpuNew.pCurInstr->optype & OPTYPE_HARMLESS);
6092 if ( !fValidInstr
6093 && (CpuNew.pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
6094 )
6095 {
6096 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6097
6098 if ( pTargetGC >= pOrgInstrGC
6099 && pTargetGC <= pOrgInstrGC + cbDirty
6100 )
6101 {
6102 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6103 fValidInstr = true;
6104 }
6105 }
6106
6107 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6108 if ( rc == VINF_SUCCESS
6109 && CpuNew.opsize <= cbLeft /* must still fit */
6110 && fValidInstr
6111 )
6112 {
6113#ifdef DEBUG
6114 char szBuf[256];
6115 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6116 szBuf, sizeof(szBuf), NULL);
6117 Log(("NEW: %s\n", szBuf));
6118#endif
6119
6120 /* Copy the new instruction. */
6121 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.opsize);
6122 AssertRC(rc);
6123
6124 /* Add a new lookup record for the duplicated instruction. */
6125 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6126 }
6127 else
6128 {
6129#ifdef DEBUG
6130 char szBuf[256];
6131 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6132 szBuf, sizeof(szBuf), NULL);
6133 Log(("NEW: %s (FAILED)\n", szBuf));
6134#endif
6135 /* Restore the old lookup record for the duplicated instruction. */
6136 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6137
6138 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6139 rc = VERR_PATCHING_REFUSED;
6140 break;
6141 }
6142 pCurInstrGC += CpuNew.opsize;
6143 pCurPatchInstrHC += CpuNew.opsize;
6144 pCurPatchInstrGC += CpuNew.opsize;
6145 cbLeft -= CpuNew.opsize;
6146
6147 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6148 if (!cbLeft)
6149 {
6150 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6151 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6152 {
6153 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6154 if (pRec)
6155 {
6156 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6157 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6158
6159 Assert(!pRec->fDirty);
6160
6161 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6162 if (cbFiller >= SIZEOF_NEARJUMP32)
6163 {
6164 pPatchFillHC[0] = 0xE9;
6165 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6166#ifdef DEBUG
6167 char szBuf[256];
6168 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6169 szBuf, sizeof(szBuf), NULL);
6170 Log(("FILL: %s\n", szBuf));
6171#endif
6172 }
6173 else
6174 {
6175 for (unsigned i = 0; i < cbFiller; i++)
6176 {
6177 pPatchFillHC[i] = 0x90; /* NOP */
6178#ifdef DEBUG
6179 char szBuf[256];
6180 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC + i,
6181 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6182 Log(("FILL: %s\n", szBuf));
6183#endif
6184 }
6185 }
6186 }
6187 }
6188 }
6189 }
6190 }
6191 else
6192 rc = VERR_PATCHING_REFUSED;
6193
6194 if (RT_SUCCESS(rc))
6195 {
6196 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6197 }
6198 else
6199 {
6200 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6201 Assert(cbDirty);
6202
6203 /* Mark the whole instruction stream with breakpoints. */
6204 if (cbDirty)
6205 memset(pPatchInstrHC, 0xCC, cbDirty);
6206
6207 if ( pVM->patm.s.fOutOfMemory == false
6208 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6209 {
6210 rc = patmR3RefreshPatch(pVM, pPatch);
6211 if (RT_FAILURE(rc))
6212 {
6213 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6214 }
6215 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6216 rc = VERR_PATCHING_REFUSED;
6217 }
6218 }
6219 return rc;
6220}
6221
6222/**
6223 * Handle trap inside patch code
6224 *
6225 * @returns VBox status code.
6226 * @param pVM The VM to operate on.
6227 * @param pCtx CPU context
6228 * @param pEip GC pointer of trapping instruction
6229 * @param ppNewEip GC pointer to new instruction
6230 */
6231VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6232{
6233 PPATMPATCHREC pPatch = 0;
6234 void *pvPatchCoreOffset;
6235 RTRCUINTPTR offset;
6236 RTRCPTR pNewEip;
6237 int rc ;
6238 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6239 PVMCPU pVCpu = VMMGetCpu0(pVM);
6240
6241 Assert(pVM->cCpus == 1);
6242
6243 pNewEip = 0;
6244 *ppNewEip = 0;
6245
6246 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6247
6248 /* Find the patch record. */
6249 /* Note: there might not be a patch to guest translation record (global function) */
6250 offset = pEip - pVM->patm.s.pPatchMemGC;
6251 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6252 if (pvPatchCoreOffset)
6253 {
6254 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6255
6256 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6257
6258 if (pPatch->patch.uState == PATCH_DIRTY)
6259 {
6260 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6261 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6262 {
6263 /* Function duplication patches set fPIF to 1 on entry */
6264 pVM->patm.s.pGCStateHC->fPIF = 1;
6265 }
6266 }
6267 else
6268 if (pPatch->patch.uState == PATCH_DISABLED)
6269 {
6270 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6271 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6272 {
6273 /* Function duplication patches set fPIF to 1 on entry */
6274 pVM->patm.s.pGCStateHC->fPIF = 1;
6275 }
6276 }
6277 else
6278 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6279 {
6280 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6281
6282 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6283 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6284 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6285 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6286 }
6287
6288 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6289 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6290
6291 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6292 pPatch->patch.cTraps++;
6293 PATM_STAT_FAULT_INC(&pPatch->patch);
6294 }
6295 else
6296 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6297
6298 /* Check if we were interrupted in PATM generated instruction code. */
6299 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6300 {
6301 DISCPUSTATE Cpu;
6302 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6303 AssertRC(rc);
6304
6305 if ( rc == VINF_SUCCESS
6306 && ( Cpu.pCurInstr->opcode == OP_PUSHF
6307 || Cpu.pCurInstr->opcode == OP_PUSH
6308 || Cpu.pCurInstr->opcode == OP_CALL)
6309 )
6310 {
6311 uint64_t fFlags;
6312
6313 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6314
6315 if (Cpu.pCurInstr->opcode == OP_PUSH)
6316 {
6317 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6318 if ( rc == VINF_SUCCESS
6319 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6320 {
6321 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6322
6323 /* Reset the PATM stack. */
6324 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6325
6326 pVM->patm.s.pGCStateHC->fPIF = 1;
6327
6328 Log(("Faulting push -> go back to the original instruction\n"));
6329
6330 /* continue at the original instruction */
6331 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6332 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6333 return VINF_SUCCESS;
6334 }
6335 }
6336
6337 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6338 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6339 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6340 if (rc == VINF_SUCCESS)
6341 {
6342 /* The guest page *must* be present. */
6343 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6344 if ( rc == VINF_SUCCESS
6345 && (fFlags & X86_PTE_P))
6346 {
6347 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6348 return VINF_PATCH_CONTINUE;
6349 }
6350 }
6351 }
6352 else
6353 if (pPatch->patch.pPrivInstrGC == pNewEip)
6354 {
6355 /* Invalidated patch or first instruction overwritten.
6356 * We can ignore the fPIF state in this case.
6357 */
6358 /* Reset the PATM stack. */
6359 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6360
6361 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6362
6363 pVM->patm.s.pGCStateHC->fPIF = 1;
6364
6365 /* continue at the original instruction */
6366 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6367 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6368 return VINF_SUCCESS;
6369 }
6370
6371 char szBuf[256];
6372 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6373
6374 /* Very bad. We crashed in emitted code. Probably stack? */
6375 if (pPatch)
6376 {
6377 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6378 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6379 }
6380 else
6381 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6382 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6383 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6384 }
6385
6386 /* From here on, we must have a valid patch to guest translation. */
6387 if (pvPatchCoreOffset == 0)
6388 {
6389 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6390 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6391 return VERR_PATCH_NOT_FOUND;
6392 }
6393
6394 /* Take care of dirty/changed instructions. */
6395 if (pPatchToGuestRec->fDirty)
6396 {
6397 Assert(pPatchToGuestRec->Core.Key == offset);
6398 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6399
6400 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6401 if (RT_SUCCESS(rc))
6402 {
6403 /* Retry the current instruction. */
6404 pNewEip = pEip;
6405 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6406 }
6407 else
6408 {
6409 /* Reset the PATM stack. */
6410 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6411
6412 rc = VINF_SUCCESS; /* Continue at original instruction. */
6413 }
6414
6415 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6416 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6417 return rc;
6418 }
6419
6420#ifdef VBOX_STRICT
6421 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6422 {
6423 DISCPUSTATE cpu;
6424 bool disret;
6425 uint32_t opsize;
6426 PATMP2GLOOKUPREC cacheRec;
6427 RT_ZERO(cacheRec);
6428 cacheRec.pPatch = &pPatch->patch;
6429
6430 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6431 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6432 if (cacheRec.Lock.pvMap)
6433 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6434
6435 if (disret && cpu.pCurInstr->opcode == OP_RETN)
6436 {
6437 RTRCPTR retaddr;
6438 PCPUMCTX pCtx2;
6439
6440 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6441
6442 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6443 AssertRC(rc);
6444
6445 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6446 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6447 }
6448 }
6449#endif
6450
6451 /* Return original address, correct by subtracting the CS base address. */
6452 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6453
6454 /* Reset the PATM stack. */
6455 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6456
6457 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6458 {
6459 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6460 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6461#ifdef VBOX_STRICT
6462 DISCPUSTATE cpu;
6463 bool disret;
6464 uint32_t opsize;
6465 PATMP2GLOOKUPREC cacheRec;
6466 RT_ZERO(cacheRec);
6467 cacheRec.pPatch = &pPatch->patch;
6468
6469 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6470 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_ORGCODE);
6471 if (cacheRec.Lock.pvMap)
6472 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6473
6474 if (disret && (cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_INT3))
6475 {
6476 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6477 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6478 if (cacheRec.Lock.pvMap)
6479 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6480
6481 Assert(cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_IRET);
6482 }
6483#endif
6484 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6485 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6486 }
6487
6488 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6489#ifdef LOG_ENABLED
6490 CPUMR3DisasmInstr(pVM, pVCpu, pCtx, pNewEip, "PATCHRET: ");
6491#endif
6492 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6493 {
6494 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6495 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6496 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6497 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6498 return VERR_PATCH_DISABLED;
6499 }
6500
6501#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6502 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6503 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6504 {
6505 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6506 //we are only wasting time, back out the patch
6507 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6508 pTrapRec->pNextPatchInstr = 0;
6509 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6510 return VERR_PATCH_DISABLED;
6511 }
6512#endif
6513
6514 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6515 return VINF_SUCCESS;
6516}
6517
6518
6519/**
6520 * Handle page-fault in monitored page
6521 *
6522 * @returns VBox status code.
6523 * @param pVM The VM to operate on.
6524 */
6525VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6526{
6527 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6528
6529 addr &= PAGE_BASE_GC_MASK;
6530
6531 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6532 AssertRC(rc); NOREF(rc);
6533
6534 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6535 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6536 {
6537 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6538 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6539 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6540 if (rc == VWRN_PATCH_REMOVED)
6541 return VINF_SUCCESS;
6542
6543 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6544
6545 if (addr == pPatchRec->patch.pPrivInstrGC)
6546 addr++;
6547 }
6548
6549 for(;;)
6550 {
6551 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6552
6553 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6554 break;
6555
6556 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6557 {
6558 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6559 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6560 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6561 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6562 }
6563 addr = pPatchRec->patch.pPrivInstrGC + 1;
6564 }
6565
6566 pVM->patm.s.pvFaultMonitor = 0;
6567 return VINF_SUCCESS;
6568}
6569
6570
6571#ifdef VBOX_WITH_STATISTICS
6572
6573static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6574{
6575 if (pPatch->flags & PATMFL_SYSENTER)
6576 {
6577 return "SYSENT";
6578 }
6579 else
6580 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6581 {
6582 static char szTrap[16];
6583 uint32_t iGate;
6584
6585 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6586 if (iGate < 256)
6587 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6588 else
6589 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6590 return szTrap;
6591 }
6592 else
6593 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6594 return "DUPFUNC";
6595 else
6596 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6597 return "FUNCCALL";
6598 else
6599 if (pPatch->flags & PATMFL_TRAMPOLINE)
6600 return "TRAMP";
6601 else
6602 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6603}
6604
6605static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6606{
6607 NOREF(pVM);
6608 switch(pPatch->uState)
6609 {
6610 case PATCH_ENABLED:
6611 return "ENA";
6612 case PATCH_DISABLED:
6613 return "DIS";
6614 case PATCH_DIRTY:
6615 return "DIR";
6616 case PATCH_UNUSABLE:
6617 return "UNU";
6618 case PATCH_REFUSED:
6619 return "REF";
6620 case PATCH_DISABLE_PENDING:
6621 return "DIP";
6622 default:
6623 AssertFailed();
6624 return " ";
6625 }
6626}
6627
6628/**
6629 * Resets the sample.
6630 * @param pVM The VM handle.
6631 * @param pvSample The sample registered using STAMR3RegisterCallback.
6632 */
6633static void patmResetStat(PVM pVM, void *pvSample)
6634{
6635 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6636 Assert(pPatch);
6637
6638 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6639 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6640}
6641
6642/**
6643 * Prints the sample into the buffer.
6644 *
6645 * @param pVM The VM handle.
6646 * @param pvSample The sample registered using STAMR3RegisterCallback.
6647 * @param pszBuf The buffer to print into.
6648 * @param cchBuf The size of the buffer.
6649 */
6650static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6651{
6652 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6653 Assert(pPatch);
6654
6655 Assert(pPatch->uState != PATCH_REFUSED);
6656 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6657
6658 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6659 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6660 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6661}
6662
6663/**
6664 * Returns the GC address of the corresponding patch statistics counter
6665 *
6666 * @returns Stat address
6667 * @param pVM The VM to operate on.
6668 * @param pPatch Patch structure
6669 */
6670RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6671{
6672 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6673 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6674}
6675
6676#endif /* VBOX_WITH_STATISTICS */
6677
6678#ifdef VBOX_WITH_DEBUGGER
6679/**
6680 * The '.patmoff' command.
6681 *
6682 * @returns VBox status.
6683 * @param pCmd Pointer to the command descriptor (as registered).
6684 * @param pCmdHlp Pointer to command helper functions.
6685 * @param pVM Pointer to the current VM (if any).
6686 * @param paArgs Pointer to (readonly) array of arguments.
6687 * @param cArgs Number of arguments in the array.
6688 */
6689static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6690{
6691 /*
6692 * Validate input.
6693 */
6694 NOREF(pCmd); NOREF(cArgs); NOREF(paArgs);
6695 if (!pVM)
6696 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6697
6698 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6699 PATMR3AllowPatching(pVM, false);
6700 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6701}
6702
6703/**
6704 * The '.patmon' command.
6705 *
6706 * @returns VBox status.
6707 * @param pCmd Pointer to the command descriptor (as registered).
6708 * @param pCmdHlp Pointer to command helper functions.
6709 * @param pVM Pointer to the current VM (if any).
6710 * @param paArgs Pointer to (readonly) array of arguments.
6711 * @param cArgs Number of arguments in the array.
6712 */
6713static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6714{
6715 /*
6716 * Validate input.
6717 */
6718 NOREF(pCmd); NOREF(cArgs); NOREF(paArgs);
6719 if (!pVM)
6720 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6721
6722 PATMR3AllowPatching(pVM, true);
6723 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6724 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6725}
6726#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette