VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 36054

Last change on this file since 36054 was 35696, checked in by vboxsync, 14 years ago

PCDBGCCMD & PFNDBGCCMD: Drop the return type & variable. Functions will be added separately from commands (superset of DBGCCMD).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 247.3 KB
Line 
1/* $Id: PATM.cpp 35696 2011-01-24 18:03:33Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * NOTE: Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/cpumdis.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/trpm.h>
34#include <VBox/vmm/cfgm.h>
35#include <VBox/param.h>
36#include <VBox/vmm/selm.h>
37#include <iprt/avl.h>
38#include "PATMInternal.h"
39#include "PATMPatch.h"
40#include <VBox/vmm/vm.h>
41#include <VBox/vmm/csam.h>
42#include <VBox/dbg.h>
43#include <VBox/err.h>
44#include <VBox/log.h>
45#include <iprt/assert.h>
46#include <iprt/asm.h>
47#include <VBox/dis.h>
48#include <VBox/disopcode.h>
49#include "internal/pgm.h"
50
51#include <iprt/string.h>
52#include "PATMA.h"
53
54//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
55//#define PATM_DISABLE_ALL
56
57/*******************************************************************************
58* Internal Functions *
59*******************************************************************************/
60
61static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
62static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
63static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
64
65#ifdef LOG_ENABLED // keep gcc quiet
66static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
67#endif
68#ifdef VBOX_WITH_STATISTICS
69static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
70static void patmResetStat(PVM pVM, void *pvSample);
71static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
72#endif
73
74#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
75#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
76
77static int patmReinit(PVM pVM);
78static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
79
80#ifdef VBOX_WITH_DEBUGGER
81static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
82static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
83static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
84
85/** Command descriptors. */
86static const DBGCCMD g_aCmds[] =
87{
88 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
89 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
90 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
91};
92#endif
93
94/* Don't want to break saved states, so put it here as a global variable. */
95static unsigned int cIDTHandlersDisabled = 0;
96
97/**
98 * Initializes the PATM.
99 *
100 * @returns VBox status code.
101 * @param pVM The VM to operate on.
102 */
103VMMR3DECL(int) PATMR3Init(PVM pVM)
104{
105 int rc;
106
107 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
108
109 /* These values can't change as they are hardcoded in patch code (old saved states!) */
110 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
111 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
112 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
113 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
114
115 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
116 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
117
118 /* Allocate patch memory and GC patch state memory. */
119 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
120 /* Add another page in case the generated code is much larger than expected. */
121 /** @todo bad safety precaution */
122 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
123 if (RT_FAILURE(rc))
124 {
125 Log(("MMHyperAlloc failed with %Rrc\n", rc));
126 return rc;
127 }
128 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
129
130 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
131 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
132 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
133
134 /*
135 * Hypervisor memory for GC status data (read/write)
136 *
137 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
138 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
139 *
140 */
141 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
142 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
143 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
144
145 /* Hypervisor memory for patch statistics */
146 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
147 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
148
149 /* Memory for patch lookup trees. */
150 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
151 AssertRCReturn(rc, rc);
152 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
153
154#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
155 /* Check CFGM option. */
156 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
157 if (RT_FAILURE(rc))
158# ifdef PATM_DISABLE_ALL
159 pVM->fPATMEnabled = false;
160# else
161 pVM->fPATMEnabled = true;
162# endif
163#endif
164
165 rc = patmReinit(pVM);
166 AssertRC(rc);
167 if (RT_FAILURE(rc))
168 return rc;
169
170 /*
171 * Register save and load state notifiers.
172 */
173 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
174 NULL, NULL, NULL,
175 NULL, patmR3Save, NULL,
176 NULL, patmR3Load, NULL);
177 AssertRCReturn(rc, rc);
178
179#ifdef VBOX_WITH_DEBUGGER
180 /*
181 * Debugger commands.
182 */
183 static bool s_fRegisteredCmds = false;
184 if (!s_fRegisteredCmds)
185 {
186 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
187 if (RT_SUCCESS(rc2))
188 s_fRegisteredCmds = true;
189 }
190#endif
191
192#ifdef VBOX_WITH_STATISTICS
193 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
194 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
195 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
196 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
197 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
198 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
199 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
200 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
201
202 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
203 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
204
205 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
206 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
207 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
208
209 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
210 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
211 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
212 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
213 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
214
215 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
216 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
217
218 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
219 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
220
221 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
222 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
223 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
224
225 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
226 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
227 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
228
229 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
230 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
231
232 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
233 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
234 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
235 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
236
237 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
238 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
239
240 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
241 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
242
243 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
244 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
245 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
246
247 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
248 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
249 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
250 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
251
252 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
253 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
254 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
255 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
256 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
257
258 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
259#endif /* VBOX_WITH_STATISTICS */
260
261 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
262 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
263 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
264 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
265 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
266 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
267 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
268 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
269
270 return rc;
271}
272
273/**
274 * Finalizes HMA page attributes.
275 *
276 * @returns VBox status code.
277 * @param pVM The VM handle.
278 */
279VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
280{
281 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
282 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
283 if (RT_FAILURE(rc))
284 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
285
286 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
287 if (RT_FAILURE(rc))
288 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
289
290 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
291 if (RT_FAILURE(rc))
292 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
293
294 return rc;
295}
296
297/**
298 * (Re)initializes PATM
299 *
300 * @param pVM The VM.
301 */
302static int patmReinit(PVM pVM)
303{
304 int rc;
305
306 /*
307 * Assert alignment and sizes.
308 */
309 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
310 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
311
312 /*
313 * Setup any fixed pointers and offsets.
314 */
315 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
316
317#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
318#ifndef PATM_DISABLE_ALL
319 pVM->fPATMEnabled = true;
320#endif
321#endif
322
323 Assert(pVM->patm.s.pGCStateHC);
324 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
325 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
326
327 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
328 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
329
330 Assert(pVM->patm.s.pGCStackHC);
331 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
332 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
333 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
334 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
335
336 Assert(pVM->patm.s.pStatsHC);
337 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
338 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
339
340 Assert(pVM->patm.s.pPatchMemHC);
341 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
342 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
343 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
344
345 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
346 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
347
348 Assert(pVM->patm.s.PatchLookupTreeHC);
349 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
350
351 /*
352 * (Re)Initialize PATM structure
353 */
354 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
355 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
356 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
357 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
358 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
359 pVM->patm.s.pvFaultMonitor = 0;
360 pVM->patm.s.deltaReloc = 0;
361
362 /* Lowest and highest patched instruction */
363 pVM->patm.s.pPatchedInstrGCLowest = ~0;
364 pVM->patm.s.pPatchedInstrGCHighest = 0;
365
366 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
367 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
368 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
369
370 pVM->patm.s.pfnSysEnterPatchGC = 0;
371 pVM->patm.s.pfnSysEnterGC = 0;
372
373 pVM->patm.s.fOutOfMemory = false;
374
375 pVM->patm.s.pfnHelperCallGC = 0;
376
377 /* Generate all global functions to be used by future patches. */
378 /* We generate a fake patch in order to use the existing code for relocation. */
379 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
380 if (RT_FAILURE(rc))
381 {
382 Log(("Out of memory!!!!\n"));
383 return VERR_NO_MEMORY;
384 }
385 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
386 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
387 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
388
389 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
390 AssertRC(rc);
391
392 /* Update free pointer in patch memory. */
393 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
394 /* Round to next 8 byte boundary. */
395 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
396 return rc;
397}
398
399
400/**
401 * Applies relocations to data and code managed by this
402 * component. This function will be called at init and
403 * whenever the VMM need to relocate it self inside the GC.
404 *
405 * The PATM will update the addresses used by the switcher.
406 *
407 * @param pVM The VM.
408 */
409VMMR3DECL(void) PATMR3Relocate(PVM pVM)
410{
411 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
412 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
413
414 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
415 if (delta)
416 {
417 PCPUMCTX pCtx;
418
419 /* Update CPUMCTX guest context pointer. */
420 pVM->patm.s.pCPUMCtxGC += delta;
421
422 pVM->patm.s.deltaReloc = delta;
423
424 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
425
426 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
427
428 /* If we are running patch code right now, then also adjust EIP. */
429 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
430 pCtx->eip += delta;
431
432 pVM->patm.s.pGCStateGC = GCPtrNew;
433 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
434
435 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
436
437 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
438
439 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
440
441 if (pVM->patm.s.pfnSysEnterPatchGC)
442 pVM->patm.s.pfnSysEnterPatchGC += delta;
443
444 /* Deal with the global patch functions. */
445 pVM->patm.s.pfnHelperCallGC += delta;
446 pVM->patm.s.pfnHelperRetGC += delta;
447 pVM->patm.s.pfnHelperIretGC += delta;
448 pVM->patm.s.pfnHelperJumpGC += delta;
449
450 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
451 }
452}
453
454
455/**
456 * Terminates the PATM.
457 *
458 * Termination means cleaning up and freeing all resources,
459 * the VM it self is at this point powered off or suspended.
460 *
461 * @returns VBox status code.
462 * @param pVM The VM to operate on.
463 */
464VMMR3DECL(int) PATMR3Term(PVM pVM)
465{
466 /* Memory was all allocated from the two MM heaps and requires no freeing. */
467 return VINF_SUCCESS;
468}
469
470
471/**
472 * PATM reset callback.
473 *
474 * @returns VBox status code.
475 * @param pVM The VM which is reset.
476 */
477VMMR3DECL(int) PATMR3Reset(PVM pVM)
478{
479 Log(("PATMR3Reset\n"));
480
481 /* Free all patches. */
482 while (true)
483 {
484 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
485 if (pPatchRec)
486 {
487 PATMRemovePatch(pVM, pPatchRec, true);
488 }
489 else
490 break;
491 }
492 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
493 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
494 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
495 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
496
497 int rc = patmReinit(pVM);
498 if (RT_SUCCESS(rc))
499 rc = PATMR3InitFinalize(pVM); /* paranoia */
500
501 return rc;
502}
503
504/**
505 * Read callback for disassembly function; supports reading bytes that cross a page boundary
506 *
507 * @returns VBox status code.
508 * @param pSrc GC source pointer
509 * @param pDest HC destination pointer
510 * @param size Number of bytes to read
511 * @param pvUserdata Callback specific user data (pCpu)
512 *
513 */
514int patmReadBytes(RTUINTPTR pSrc, uint8_t *pDest, unsigned size, void *pvUserdata)
515{
516 DISCPUSTATE *pCpu = (DISCPUSTATE *)pvUserdata;
517 PATMDISASM *pDisInfo = (PATMDISASM *)pCpu->apvUserData[0];
518 int orgsize = size;
519
520 Assert(size);
521 if (size == 0)
522 return VERR_INVALID_PARAMETER;
523
524 /*
525 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
526 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
527 */
528 /** @todo could change in the future! */
529 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
530 {
531 for (int i=0;i<orgsize;i++)
532 {
533 int rc = PATMR3QueryOpcode(pDisInfo->pVM, (RTRCPTR)pSrc, pDest);
534 if (RT_SUCCESS(rc))
535 {
536 pSrc++;
537 pDest++;
538 size--;
539 }
540 else break;
541 }
542 if (size == 0)
543 return VINF_SUCCESS;
544#ifdef VBOX_STRICT
545 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
546 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
547 {
548 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc, NULL) == false);
549 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc+size-1, NULL) == false);
550 }
551#endif
552 }
553
554 if ( !pDisInfo->pInstrHC
555 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(pSrc + size - 1)
556 && !PATMIsPatchGCAddr(pDisInfo->pVM, pSrc)))
557 {
558 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, pSrc));
559 return PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], pDest, pSrc, size);
560 }
561 else
562 {
563 Assert(pDisInfo->pInstrHC);
564
565 uint8_t *pInstrHC = pDisInfo->pInstrHC;
566
567 Assert(pInstrHC);
568
569 /* pInstrHC is the base address; adjust according to the GC pointer. */
570 pInstrHC = pInstrHC + (pSrc - pDisInfo->pInstrGC);
571
572 memcpy(pDest, (void *)pInstrHC, size);
573 }
574
575 return VINF_SUCCESS;
576}
577
578/**
579 * Callback function for RTAvloU32DoWithAll
580 *
581 * Updates all fixups in the patches
582 *
583 * @returns VBox status code.
584 * @param pNode Current node
585 * @param pParam The VM to operate on.
586 */
587static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
588{
589 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
590 PVM pVM = (PVM)pParam;
591 RTRCINTPTR delta;
592#ifdef LOG_ENABLED
593 DISCPUSTATE cpu;
594 char szOutput[256];
595 uint32_t opsize;
596 bool disret;
597#endif
598 int rc;
599
600 /* Nothing to do if the patch is not active. */
601 if (pPatch->patch.uState == PATCH_REFUSED)
602 return 0;
603
604#ifdef LOG_ENABLED
605 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
606 {
607 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
608 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
609 Log(("Org patch jump: %s", szOutput));
610 }
611#endif
612
613 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
614 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
615
616 /*
617 * Apply fixups
618 */
619 PRELOCREC pRec = 0;
620 AVLPVKEY key = 0;
621
622 while (true)
623 {
624 /* Get the record that's closest from above */
625 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
626 if (pRec == 0)
627 break;
628
629 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
630
631 switch (pRec->uType)
632 {
633 case FIXUP_ABSOLUTE:
634 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
635 if ( !pRec->pSource
636 || PATMIsPatchGCAddr(pVM, pRec->pSource))
637 {
638 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
639 }
640 else
641 {
642 uint8_t curInstr[15];
643 uint8_t oldInstr[15];
644 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
645
646 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
647
648 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
649 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
650
651 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
652 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
653
654 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
655
656 if ( rc == VERR_PAGE_NOT_PRESENT
657 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
658 {
659 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
660
661 Log(("PATM: Patch page not present -> check later!\n"));
662 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
663 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
664 }
665 else
666 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
667 {
668 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
669 /*
670 * Disable patch; this is not a good solution
671 */
672 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
673 pPatch->patch.uState = PATCH_DISABLED;
674 }
675 else
676 if (RT_SUCCESS(rc))
677 {
678 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
679 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
680 AssertRC(rc);
681 }
682 }
683 break;
684
685 case FIXUP_REL_JMPTOPATCH:
686 {
687 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
688
689 if ( pPatch->patch.uState == PATCH_ENABLED
690 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
691 {
692 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
693 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
694 RTRCPTR pJumpOffGC;
695 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
696 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
697
698#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
699 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
700#else
701 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
702#endif
703
704 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
705#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
706 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
707 {
708 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
709
710 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
711 oldJump[0] = pPatch->patch.aPrivInstr[0];
712 oldJump[1] = pPatch->patch.aPrivInstr[1];
713 *(RTRCUINTPTR *)&oldJump[2] = displOld;
714 }
715 else
716#endif
717 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
718 {
719 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
720 oldJump[0] = 0xE9;
721 *(RTRCUINTPTR *)&oldJump[1] = displOld;
722 }
723 else
724 {
725 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
726 continue; //this should never happen!!
727 }
728 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
729
730 /*
731 * Read old patch jump and compare it to the one we previously installed
732 */
733 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
734 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
735
736 if ( rc == VERR_PAGE_NOT_PRESENT
737 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
738 {
739 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
740
741 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
742 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
743 }
744 else
745 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
746 {
747 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
748 /*
749 * Disable patch; this is not a good solution
750 */
751 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
752 pPatch->patch.uState = PATCH_DISABLED;
753 }
754 else
755 if (RT_SUCCESS(rc))
756 {
757 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
758 AssertRC(rc);
759 }
760 else
761 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
762 }
763 else
764 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
765
766 pRec->pDest = pTarget;
767 break;
768 }
769
770 case FIXUP_REL_JMPTOGUEST:
771 {
772 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
773 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
774
775 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
776 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
777 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
778 pRec->pSource = pSource;
779 break;
780 }
781
782 default:
783 AssertMsg(0, ("Invalid fixup type!!\n"));
784 return VERR_INVALID_PARAMETER;
785 }
786 }
787
788#ifdef LOG_ENABLED
789 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
790 {
791 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
792 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
793 Log(("Rel patch jump: %s", szOutput));
794 }
795#endif
796 return 0;
797}
798
799/**
800 * \#PF Handler callback for virtual access handler ranges.
801 *
802 * Important to realize that a physical page in a range can have aliases, and
803 * for ALL and WRITE handlers these will also trigger.
804 *
805 * @returns VINF_SUCCESS if the handler have carried out the operation.
806 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
807 * @param pVM VM Handle.
808 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
809 * @param pvPtr The HC mapping of that address.
810 * @param pvBuf What the guest is reading/writing.
811 * @param cbBuf How much it's reading/writing.
812 * @param enmAccessType The access type.
813 * @param pvUser User argument.
814 */
815DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
816{
817 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
818 /** @todo could be the wrong virtual address (alias) */
819 pVM->patm.s.pvFaultMonitor = GCPtr;
820 PATMR3HandleMonitoredPage(pVM);
821 return VINF_PGM_HANDLER_DO_DEFAULT;
822}
823
824
825#ifdef VBOX_WITH_DEBUGGER
826/**
827 * Callback function for RTAvloU32DoWithAll
828 *
829 * Enables the patch that's being enumerated
830 *
831 * @returns 0 (continue enumeration).
832 * @param pNode Current node
833 * @param pVM The VM to operate on.
834 */
835static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
836{
837 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
838
839 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
840 return 0;
841}
842#endif /* VBOX_WITH_DEBUGGER */
843
844
845#ifdef VBOX_WITH_DEBUGGER
846/**
847 * Callback function for RTAvloU32DoWithAll
848 *
849 * Disables the patch that's being enumerated
850 *
851 * @returns 0 (continue enumeration).
852 * @param pNode Current node
853 * @param pVM The VM to operate on.
854 */
855static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
856{
857 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
858
859 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
860 return 0;
861}
862#endif
863
864/**
865 * Returns the host context pointer and size of the patch memory block
866 *
867 * @returns VBox status code.
868 * @param pVM The VM to operate on.
869 * @param pcb Size of the patch memory block
870 */
871VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
872{
873 if (pcb)
874 *pcb = pVM->patm.s.cbPatchMem;
875
876 return pVM->patm.s.pPatchMemHC;
877}
878
879
880/**
881 * Returns the guest context pointer and size of the patch memory block
882 *
883 * @returns VBox status code.
884 * @param pVM The VM to operate on.
885 * @param pcb Size of the patch memory block
886 */
887VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
888{
889 if (pcb)
890 *pcb = pVM->patm.s.cbPatchMem;
891
892 return pVM->patm.s.pPatchMemGC;
893}
894
895
896/**
897 * Returns the host context pointer of the GC context structure
898 *
899 * @returns VBox status code.
900 * @param pVM The VM to operate on.
901 */
902VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
903{
904 return pVM->patm.s.pGCStateHC;
905}
906
907
908/**
909 * Checks whether the HC address is part of our patch region
910 *
911 * @returns VBox status code.
912 * @param pVM The VM to operate on.
913 * @param pAddrGC Guest context address
914 */
915VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
916{
917 return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
918}
919
920
921/**
922 * Allows or disallow patching of privileged instructions executed by the guest OS
923 *
924 * @returns VBox status code.
925 * @param pVM The VM to operate on.
926 * @param fAllowPatching Allow/disallow patching
927 */
928VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
929{
930 pVM->fPATMEnabled = (fAllowPatching) ? true : false;
931 return VINF_SUCCESS;
932}
933
934/**
935 * Convert a GC patch block pointer to a HC patch pointer
936 *
937 * @returns HC pointer or NULL if it's not a GC patch pointer
938 * @param pVM The VM to operate on.
939 * @param pAddrGC GC pointer
940 */
941VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
942{
943 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
944 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
945 else
946 return NULL;
947}
948
949/**
950 * Query PATM state (enabled/disabled)
951 *
952 * @returns 0 - disabled, 1 - enabled
953 * @param pVM The VM to operate on.
954 */
955VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
956{
957 return pVM->fPATMEnabled;
958}
959
960
961/**
962 * Convert guest context address to host context pointer
963 *
964 * @returns VBox status code.
965 * @param pVM The VM to operate on.
966 * @param pCacheRec Address conversion cache record
967 * @param pGCPtr Guest context pointer
968 *
969 * @returns Host context pointer or NULL in case of an error
970 *
971 */
972R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
973{
974 int rc;
975 R3PTRTYPE(uint8_t *) pHCPtr;
976 uint32_t offset;
977
978 if (PATMIsPatchGCAddr(pVM, pGCPtr))
979 {
980 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
981 Assert(pPatch);
982 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
983 }
984
985 offset = pGCPtr & PAGE_OFFSET_MASK;
986 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
987 return pCacheRec->pPageLocStartHC + offset;
988
989 /* Release previous lock if any. */
990 if (pCacheRec->Lock.pvMap)
991 {
992 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
993 pCacheRec->Lock.pvMap = NULL;
994 }
995
996 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
997 if (rc != VINF_SUCCESS)
998 {
999 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1000 return NULL;
1001 }
1002 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1003 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1004 return pHCPtr;
1005}
1006
1007
1008/* Calculates and fills in all branch targets
1009 *
1010 * @returns VBox status code.
1011 * @param pVM The VM to operate on.
1012 * @param pPatch Current patch block pointer
1013 *
1014 */
1015static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1016{
1017 int32_t displ;
1018
1019 PJUMPREC pRec = 0;
1020 unsigned nrJumpRecs = 0;
1021
1022 /*
1023 * Set all branch targets inside the patch block.
1024 * We remove all jump records as they are no longer needed afterwards.
1025 */
1026 while (true)
1027 {
1028 RCPTRTYPE(uint8_t *) pInstrGC;
1029 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1030
1031 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1032 if (pRec == 0)
1033 break;
1034
1035 nrJumpRecs++;
1036
1037 /* HC in patch block to GC in patch block. */
1038 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1039
1040 if (pRec->opcode == OP_CALL)
1041 {
1042 /* Special case: call function replacement patch from this patch block.
1043 */
1044 PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
1045 if (!pFunctionRec)
1046 {
1047 int rc;
1048
1049 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1050 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1051 else
1052 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1053
1054 if (RT_FAILURE(rc))
1055 {
1056 uint8_t *pPatchHC;
1057 RTRCPTR pPatchGC;
1058 RTRCPTR pOrgInstrGC;
1059
1060 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1061 Assert(pOrgInstrGC);
1062
1063 /* Failure for some reason -> mark exit point with int 3. */
1064 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1065
1066 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1067 Assert(pPatchGC);
1068
1069 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1070
1071 /* Set a breakpoint at the very beginning of the recompiled instruction */
1072 *pPatchHC = 0xCC;
1073
1074 continue;
1075 }
1076 }
1077 else
1078 {
1079 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1080 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1081 }
1082
1083 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1084 }
1085 else
1086 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1087
1088 if (pBranchTargetGC == 0)
1089 {
1090 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1091 return VERR_PATCHING_REFUSED;
1092 }
1093 /* Our jumps *always* have a dword displacement (to make things easier). */
1094 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1095 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1096 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1097 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1098 }
1099 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1100 Assert(pPatch->JumpTree == 0);
1101 return VINF_SUCCESS;
1102}
1103
1104/* Add an illegal instruction record
1105 *
1106 * @param pVM The VM to operate on.
1107 * @param pPatch Patch structure ptr
1108 * @param pInstrGC Guest context pointer to privileged instruction
1109 *
1110 */
1111static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1112{
1113 PAVLPVNODECORE pRec;
1114
1115 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1116 Assert(pRec);
1117 pRec->Key = (AVLPVKEY)pInstrGC;
1118
1119 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1120 Assert(ret); NOREF(ret);
1121 pPatch->pTempInfo->nrIllegalInstr++;
1122}
1123
1124static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1125{
1126 PAVLPVNODECORE pRec;
1127
1128 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)pInstrGC);
1129 if (pRec)
1130 return true;
1131 else
1132 return false;
1133}
1134
1135/**
1136 * Add a patch to guest lookup record
1137 *
1138 * @param pVM The VM to operate on.
1139 * @param pPatch Patch structure ptr
1140 * @param pPatchInstrHC Guest context pointer to patch block
1141 * @param pInstrGC Guest context pointer to privileged instruction
1142 * @param enmType Lookup type
1143 * @param fDirty Dirty flag
1144 *
1145 */
1146 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1147void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1148{
1149 bool ret;
1150 PRECPATCHTOGUEST pPatchToGuestRec;
1151 PRECGUESTTOPATCH pGuestToPatchRec;
1152 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1153
1154 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1155 {
1156 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1157 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1158 return; /* already there */
1159
1160 Assert(!pPatchToGuestRec);
1161 }
1162#ifdef VBOX_STRICT
1163 else
1164 {
1165 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1166 Assert(!pPatchToGuestRec);
1167 }
1168#endif
1169
1170 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1171 Assert(pPatchToGuestRec);
1172 pPatchToGuestRec->Core.Key = PatchOffset;
1173 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1174 pPatchToGuestRec->enmType = enmType;
1175 pPatchToGuestRec->fDirty = fDirty;
1176
1177 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1178 Assert(ret);
1179
1180 /* GC to patch address */
1181 if (enmType == PATM_LOOKUP_BOTHDIR)
1182 {
1183 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1184 if (!pGuestToPatchRec)
1185 {
1186 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1187 pGuestToPatchRec->Core.Key = pInstrGC;
1188 pGuestToPatchRec->PatchOffset = PatchOffset;
1189
1190 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1191 Assert(ret);
1192 }
1193 }
1194
1195 pPatch->nrPatch2GuestRecs++;
1196}
1197
1198
1199/**
1200 * Removes a patch to guest lookup record
1201 *
1202 * @param pVM The VM to operate on.
1203 * @param pPatch Patch structure ptr
1204 * @param pPatchInstrGC Guest context pointer to patch block
1205 */
1206void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1207{
1208 PAVLU32NODECORE pNode;
1209 PAVLU32NODECORE pNode2;
1210 PRECPATCHTOGUEST pPatchToGuestRec;
1211 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1212
1213 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1214 Assert(pPatchToGuestRec);
1215 if (pPatchToGuestRec)
1216 {
1217 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1218 {
1219 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1220
1221 Assert(pGuestToPatchRec->Core.Key);
1222 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1223 Assert(pNode2);
1224 }
1225 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1226 Assert(pNode);
1227
1228 MMR3HeapFree(pPatchToGuestRec);
1229 pPatch->nrPatch2GuestRecs--;
1230 }
1231}
1232
1233
1234/**
1235 * RTAvlPVDestroy callback.
1236 */
1237static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1238{
1239 MMR3HeapFree(pNode);
1240 return 0;
1241}
1242
1243/**
1244 * Empty the specified tree (PV tree, MMR3 heap)
1245 *
1246 * @param pVM The VM to operate on.
1247 * @param ppTree Tree to empty
1248 */
1249void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1250{
1251 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1252}
1253
1254
1255/**
1256 * RTAvlU32Destroy callback.
1257 */
1258static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1259{
1260 MMR3HeapFree(pNode);
1261 return 0;
1262}
1263
1264/**
1265 * Empty the specified tree (U32 tree, MMR3 heap)
1266 *
1267 * @param pVM The VM to operate on.
1268 * @param ppTree Tree to empty
1269 */
1270void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1271{
1272 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1273}
1274
1275
1276/**
1277 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1278 *
1279 * @returns VBox status code.
1280 * @param pVM The VM to operate on.
1281 * @param pCpu CPU disassembly state
1282 * @param pInstrGC Guest context pointer to privileged instruction
1283 * @param pCurInstrGC Guest context pointer to the current instruction
1284 * @param pCacheRec Cache record ptr
1285 *
1286 */
1287static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1288{
1289 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1290 bool fIllegalInstr = false;
1291
1292 //Preliminary heuristics:
1293 //- no call instructions without a fixed displacement between cli and sti/popf
1294 //- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1295 //- no nested pushf/cli
1296 //- sti/popf should be the (eventual) target of all branches
1297 //- no near or far returns; no int xx, no into
1298 //
1299 // Note: Later on we can impose less stricter guidelines if the need arises
1300
1301 /* Bail out if the patch gets too big. */
1302 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1303 {
1304 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1305 fIllegalInstr = true;
1306 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1307 }
1308 else
1309 {
1310 /* No unconditional jumps or calls without fixed displacements. */
1311 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1312 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1313 )
1314 {
1315 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1316 if ( pCpu->param1.size == 6 /* far call/jmp */
1317 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1318 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1319 )
1320 {
1321 fIllegalInstr = true;
1322 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1323 }
1324 }
1325
1326 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1327 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->opcode == OP_JMP)
1328 {
1329 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC + pCpu->opsize < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1330 {
1331 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1332 /* We turn this one into a int 3 callable patch. */
1333 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1334 }
1335 }
1336 else
1337 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1338 if (pPatch->opcode == OP_PUSHF)
1339 {
1340 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->opcode == OP_PUSHF)
1341 {
1342 fIllegalInstr = true;
1343 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1344 }
1345 }
1346
1347 // no far returns
1348 if (pCpu->pCurInstr->opcode == OP_RETF)
1349 {
1350 pPatch->pTempInfo->nrRetInstr++;
1351 fIllegalInstr = true;
1352 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1353 }
1354 else
1355 // no int xx or into either
1356 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1357 {
1358 fIllegalInstr = true;
1359 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1360 }
1361 }
1362
1363 pPatch->cbPatchBlockSize += pCpu->opsize;
1364
1365 /* Illegal instruction -> end of analysis phase for this code block */
1366 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1367 return VINF_SUCCESS;
1368
1369 /* Check for exit points. */
1370 switch (pCpu->pCurInstr->opcode)
1371 {
1372 case OP_SYSEXIT:
1373 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1374
1375 case OP_SYSENTER:
1376 case OP_ILLUD2:
1377 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1378 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1379 return VINF_SUCCESS;
1380
1381 case OP_STI:
1382 case OP_POPF:
1383 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1384 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1385 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1386 {
1387 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1388 return VERR_PATCHING_REFUSED;
1389 }
1390 if (pPatch->opcode == OP_PUSHF)
1391 {
1392 if (pCpu->pCurInstr->opcode == OP_POPF)
1393 {
1394 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1395 return VINF_SUCCESS;
1396
1397 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1398 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1399 pPatch->flags |= PATMFL_CHECK_SIZE;
1400 }
1401 break; //sti doesn't mark the end of a pushf block; only popf does
1402 }
1403 //else no break
1404 case OP_RETN: /* exit point for function replacement */
1405 return VINF_SUCCESS;
1406
1407 case OP_IRET:
1408 return VINF_SUCCESS; /* exitpoint */
1409
1410 case OP_CPUID:
1411 case OP_CALL:
1412 case OP_JMP:
1413 break;
1414
1415 default:
1416 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1417 {
1418 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1419 return VINF_SUCCESS; /* exit point */
1420 }
1421 break;
1422 }
1423
1424 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1425 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW))
1426 {
1427 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1428 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->opsize));
1429 return VINF_SUCCESS;
1430 }
1431
1432 return VWRN_CONTINUE_ANALYSIS;
1433}
1434
1435/**
1436 * Analyses the instructions inside a function for compliance
1437 *
1438 * @returns VBox status code.
1439 * @param pVM The VM to operate on.
1440 * @param pCpu CPU disassembly state
1441 * @param pInstrGC Guest context pointer to privileged instruction
1442 * @param pCurInstrGC Guest context pointer to the current instruction
1443 * @param pCacheRec Cache record ptr
1444 *
1445 */
1446static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1447{
1448 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1449 bool fIllegalInstr = false;
1450
1451 //Preliminary heuristics:
1452 //- no call instructions
1453 //- ret ends a block
1454
1455 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1456
1457 // bail out if the patch gets too big
1458 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1459 {
1460 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1461 fIllegalInstr = true;
1462 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1463 }
1464 else
1465 {
1466 // no unconditional jumps or calls without fixed displacements
1467 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1468 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1469 )
1470 {
1471 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1472 if ( pCpu->param1.size == 6 /* far call/jmp */
1473 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1474 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1475 )
1476 {
1477 fIllegalInstr = true;
1478 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1479 }
1480 }
1481 else /* no far returns */
1482 if (pCpu->pCurInstr->opcode == OP_RETF)
1483 {
1484 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1485 fIllegalInstr = true;
1486 }
1487 else /* no int xx or into either */
1488 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1489 {
1490 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1491 fIllegalInstr = true;
1492 }
1493
1494 #if 0
1495 ///@todo we can handle certain in/out and privileged instructions in the guest context
1496 if (pCpu->pCurInstr->optype & OPTYPE_PRIVILEGED && pCpu->pCurInstr->opcode != OP_STI)
1497 {
1498 Log(("Illegal instructions for function patch!!\n"));
1499 return VERR_PATCHING_REFUSED;
1500 }
1501 #endif
1502 }
1503
1504 pPatch->cbPatchBlockSize += pCpu->opsize;
1505
1506 /* Illegal instruction -> end of analysis phase for this code block */
1507 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1508 {
1509 return VINF_SUCCESS;
1510 }
1511
1512 // Check for exit points
1513 switch (pCpu->pCurInstr->opcode)
1514 {
1515 case OP_ILLUD2:
1516 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1517 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1518 return VINF_SUCCESS;
1519
1520 case OP_IRET:
1521 case OP_SYSEXIT: /* will fault or emulated in GC */
1522 case OP_RETN:
1523 return VINF_SUCCESS;
1524
1525 case OP_POPF:
1526 case OP_STI:
1527 return VWRN_CONTINUE_ANALYSIS;
1528 default:
1529 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1530 {
1531 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1532 return VINF_SUCCESS; /* exit point */
1533 }
1534 return VWRN_CONTINUE_ANALYSIS;
1535 }
1536
1537 return VWRN_CONTINUE_ANALYSIS;
1538}
1539
1540/**
1541 * Recompiles the instructions in a code block
1542 *
1543 * @returns VBox status code.
1544 * @param pVM The VM to operate on.
1545 * @param pCpu CPU disassembly state
1546 * @param pInstrGC Guest context pointer to privileged instruction
1547 * @param pCurInstrGC Guest context pointer to the current instruction
1548 * @param pCacheRec Cache record ptr
1549 *
1550 */
1551static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1552{
1553 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1554 int rc = VINF_SUCCESS;
1555 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1556
1557 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1558
1559 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1560 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1561 {
1562 /*
1563 * Been there, done that; so insert a jump (we don't want to duplicate code)
1564 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1565 */
1566 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1567 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->prefix & PREFIX_OPSIZE));
1568 }
1569
1570 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1571 {
1572 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1573 }
1574 else
1575 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1576
1577 if (RT_FAILURE(rc))
1578 return rc;
1579
1580 /* Note: Never do a direct return unless a failure is encountered! */
1581
1582 /* Clear recompilation of next instruction flag; we are doing that right here. */
1583 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1584 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1585
1586 /* Add lookup record for patch to guest address translation */
1587 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1588
1589 /* Update lowest and highest instruction address for this patch */
1590 if (pCurInstrGC < pPatch->pInstrGCLowest)
1591 pPatch->pInstrGCLowest = pCurInstrGC;
1592 else
1593 if (pCurInstrGC > pPatch->pInstrGCHighest)
1594 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->opsize;
1595
1596 /* Illegal instruction -> end of recompile phase for this code block. */
1597 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1598 {
1599 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1600 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1601 goto end;
1602 }
1603
1604 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1605 * Indirect calls are handled below.
1606 */
1607 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1608 && (pCpu->pCurInstr->opcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1609 && (OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J))
1610 {
1611 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1612 if (pTargetGC == 0)
1613 {
1614 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
1615 return VERR_PATCHING_REFUSED;
1616 }
1617
1618 if (pCpu->pCurInstr->opcode == OP_CALL)
1619 {
1620 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1621 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1622 if (RT_FAILURE(rc))
1623 goto end;
1624 }
1625 else
1626 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->opcode, !!(pCpu->prefix & PREFIX_OPSIZE));
1627
1628 if (RT_SUCCESS(rc))
1629 rc = VWRN_CONTINUE_RECOMPILE;
1630
1631 goto end;
1632 }
1633
1634 switch (pCpu->pCurInstr->opcode)
1635 {
1636 case OP_CLI:
1637 {
1638 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1639 * until we've found the proper exit point(s).
1640 */
1641 if ( pCurInstrGC != pInstrGC
1642 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1643 )
1644 {
1645 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1646 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1647 }
1648 /* Set by irq inhibition; no longer valid now. */
1649 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1650
1651 rc = patmPatchGenCli(pVM, pPatch);
1652 if (RT_SUCCESS(rc))
1653 rc = VWRN_CONTINUE_RECOMPILE;
1654 break;
1655 }
1656
1657 case OP_MOV:
1658 if (pCpu->pCurInstr->optype & OPTYPE_POTENTIALLY_DANGEROUS)
1659 {
1660 /* mov ss, src? */
1661 if ( (pCpu->param1.flags & USE_REG_SEG)
1662 && (pCpu->param1.base.reg_seg == DIS_SELREG_SS))
1663 {
1664 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1665 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1666 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1667 }
1668#if 0 /* necessary for Haiku */
1669 else
1670 if ( (pCpu->param2.flags & USE_REG_SEG)
1671 && (pCpu->param2.base.reg_seg == USE_REG_SS)
1672 && (pCpu->param1.flags & (USE_REG_GEN32|USE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1673 {
1674 /* mov GPR, ss */
1675 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1676 if (RT_SUCCESS(rc))
1677 rc = VWRN_CONTINUE_RECOMPILE;
1678 break;
1679 }
1680#endif
1681 }
1682 goto duplicate_instr;
1683
1684 case OP_POP:
1685 if (pCpu->pCurInstr->param1 == OP_PARM_REG_SS)
1686 {
1687 Assert(pCpu->pCurInstr->optype & OPTYPE_INHIBIT_IRQS);
1688
1689 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1690 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1691 }
1692 goto duplicate_instr;
1693
1694 case OP_STI:
1695 {
1696 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1697
1698 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1699 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1700 {
1701 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1702 fInhibitIRQInstr = true;
1703 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1704 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1705 }
1706 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1707
1708 if (RT_SUCCESS(rc))
1709 {
1710 DISCPUSTATE cpu = *pCpu;
1711 unsigned opsize;
1712 int disret;
1713 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1714
1715 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1716
1717 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1718 { /* Force pNextInstrHC out of scope after using it */
1719 uint8_t *pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1720 if (pNextInstrHC == NULL)
1721 {
1722 AssertFailed();
1723 return VERR_PATCHING_REFUSED;
1724 }
1725
1726 // Disassemble the next instruction
1727 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pNextInstrGC, pNextInstrHC, &opsize, NULL);
1728 }
1729 if (disret == false)
1730 {
1731 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1732 return VERR_PATCHING_REFUSED;
1733 }
1734 pReturnInstrGC = pNextInstrGC + opsize;
1735
1736 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1737 || pReturnInstrGC <= pInstrGC
1738 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1739 )
1740 {
1741 /* Not an exit point for function duplication patches */
1742 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1743 && RT_SUCCESS(rc))
1744 {
1745 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1746 rc = VWRN_CONTINUE_RECOMPILE;
1747 }
1748 else
1749 rc = VINF_SUCCESS; //exit point
1750 }
1751 else {
1752 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1753 rc = VERR_PATCHING_REFUSED; //not allowed!!
1754 }
1755 }
1756 break;
1757 }
1758
1759 case OP_POPF:
1760 {
1761 bool fGenerateJmpBack = (pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32);
1762
1763 /* Not an exit point for IDT handler or function replacement patches */
1764 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1765 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1766 fGenerateJmpBack = false;
1767
1768 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->opsize, !!(pCpu->prefix & PREFIX_OPSIZE), fGenerateJmpBack);
1769 if (RT_SUCCESS(rc))
1770 {
1771 if (fGenerateJmpBack == false)
1772 {
1773 /* Not an exit point for IDT handler or function replacement patches */
1774 rc = VWRN_CONTINUE_RECOMPILE;
1775 }
1776 else
1777 {
1778 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1779 rc = VINF_SUCCESS; /* exit point! */
1780 }
1781 }
1782 break;
1783 }
1784
1785 case OP_PUSHF:
1786 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->prefix & PREFIX_OPSIZE));
1787 if (RT_SUCCESS(rc))
1788 rc = VWRN_CONTINUE_RECOMPILE;
1789 break;
1790
1791 case OP_PUSH:
1792 if (pCpu->pCurInstr->param1 == OP_PARM_REG_CS)
1793 {
1794 rc = patmPatchGenPushCS(pVM, pPatch);
1795 if (RT_SUCCESS(rc))
1796 rc = VWRN_CONTINUE_RECOMPILE;
1797 break;
1798 }
1799 goto duplicate_instr;
1800
1801 case OP_IRET:
1802 Log(("IRET at %RRv\n", pCurInstrGC));
1803 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->prefix & PREFIX_OPSIZE));
1804 if (RT_SUCCESS(rc))
1805 {
1806 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1807 rc = VINF_SUCCESS; /* exit point by definition */
1808 }
1809 break;
1810
1811 case OP_ILLUD2:
1812 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1813 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1814 if (RT_SUCCESS(rc))
1815 rc = VINF_SUCCESS; /* exit point by definition */
1816 Log(("Illegal opcode (0xf 0xb)\n"));
1817 break;
1818
1819 case OP_CPUID:
1820 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1821 if (RT_SUCCESS(rc))
1822 rc = VWRN_CONTINUE_RECOMPILE;
1823 break;
1824
1825 case OP_STR:
1826 case OP_SLDT:
1827 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1828 if (RT_SUCCESS(rc))
1829 rc = VWRN_CONTINUE_RECOMPILE;
1830 break;
1831
1832 case OP_SGDT:
1833 case OP_SIDT:
1834 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1835 if (RT_SUCCESS(rc))
1836 rc = VWRN_CONTINUE_RECOMPILE;
1837 break;
1838
1839 case OP_RETN:
1840 /* retn is an exit point for function patches */
1841 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1842 if (RT_SUCCESS(rc))
1843 rc = VINF_SUCCESS; /* exit point by definition */
1844 break;
1845
1846 case OP_SYSEXIT:
1847 /* Duplicate it, so it can be emulated in GC (or fault). */
1848 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1849 if (RT_SUCCESS(rc))
1850 rc = VINF_SUCCESS; /* exit point by definition */
1851 break;
1852
1853 case OP_CALL:
1854 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1855 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1856 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1857 */
1858 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1859 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far calls! */)
1860 {
1861 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
1862 if (RT_SUCCESS(rc))
1863 {
1864 rc = VWRN_CONTINUE_RECOMPILE;
1865 }
1866 break;
1867 }
1868 goto gen_illegal_instr;
1869
1870 case OP_JMP:
1871 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1872 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1873 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1874 */
1875 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1876 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far jumps! */)
1877 {
1878 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
1879 if (RT_SUCCESS(rc))
1880 rc = VINF_SUCCESS; /* end of branch */
1881 break;
1882 }
1883 goto gen_illegal_instr;
1884
1885 case OP_INT3:
1886 case OP_INT:
1887 case OP_INTO:
1888 goto gen_illegal_instr;
1889
1890 case OP_MOV_DR:
1891 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1892 if (pCpu->pCurInstr->param2 == OP_PARM_Dd)
1893 {
1894 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
1895 if (RT_SUCCESS(rc))
1896 rc = VWRN_CONTINUE_RECOMPILE;
1897 break;
1898 }
1899 goto duplicate_instr;
1900
1901 case OP_MOV_CR:
1902 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1903 if (pCpu->pCurInstr->param2 == OP_PARM_Cd)
1904 {
1905 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
1906 if (RT_SUCCESS(rc))
1907 rc = VWRN_CONTINUE_RECOMPILE;
1908 break;
1909 }
1910 goto duplicate_instr;
1911
1912 default:
1913 if (pCpu->pCurInstr->optype & (OPTYPE_CONTROLFLOW | OPTYPE_PRIVILEGED_NOTRAP))
1914 {
1915gen_illegal_instr:
1916 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1917 if (RT_SUCCESS(rc))
1918 rc = VINF_SUCCESS; /* exit point by definition */
1919 }
1920 else
1921 {
1922duplicate_instr:
1923 Log(("patmPatchGenDuplicate\n"));
1924 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1925 if (RT_SUCCESS(rc))
1926 rc = VWRN_CONTINUE_RECOMPILE;
1927 }
1928 break;
1929 }
1930
1931end:
1932
1933 if ( !fInhibitIRQInstr
1934 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
1935 {
1936 int rc2;
1937 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1938
1939 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
1940 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
1941 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
1942 {
1943 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
1944
1945 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
1946 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1947 rc = VINF_SUCCESS; /* end of the line */
1948 }
1949 else
1950 {
1951 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
1952 }
1953 if (RT_FAILURE(rc2))
1954 rc = rc2;
1955 }
1956
1957 if (RT_SUCCESS(rc))
1958 {
1959 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1960 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
1961 && pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32
1962 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
1963 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
1964 )
1965 {
1966 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1967
1968 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1969 Log(("patmRecompileCallback: end found for single instruction patch at %RRv opsize %d\n", pNextInstrGC, pCpu->opsize));
1970
1971 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
1972 AssertRC(rc);
1973 }
1974 }
1975 return rc;
1976}
1977
1978
1979#ifdef LOG_ENABLED
1980
1981/* Add a disasm jump record (temporary for prevent duplicate analysis)
1982 *
1983 * @param pVM The VM to operate on.
1984 * @param pPatch Patch structure ptr
1985 * @param pInstrGC Guest context pointer to privileged instruction
1986 *
1987 */
1988static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1989{
1990 PAVLPVNODECORE pRec;
1991
1992 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1993 Assert(pRec);
1994 pRec->Key = (AVLPVKEY)pInstrGC;
1995
1996 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
1997 Assert(ret);
1998}
1999
2000/**
2001 * Checks if jump target has been analysed before.
2002 *
2003 * @returns VBox status code.
2004 * @param pPatch Patch struct
2005 * @param pInstrGC Jump target
2006 *
2007 */
2008static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2009{
2010 PAVLPVNODECORE pRec;
2011
2012 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)pInstrGC);
2013 if (pRec)
2014 return true;
2015 return false;
2016}
2017
2018/**
2019 * For proper disassembly of the final patch block
2020 *
2021 * @returns VBox status code.
2022 * @param pVM The VM to operate on.
2023 * @param pCpu CPU disassembly state
2024 * @param pInstrGC Guest context pointer to privileged instruction
2025 * @param pCurInstrGC Guest context pointer to the current instruction
2026 * @param pCacheRec Cache record ptr
2027 *
2028 */
2029int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2030{
2031 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2032
2033 if (pCpu->pCurInstr->opcode == OP_INT3)
2034 {
2035 /* Could be an int3 inserted in a call patch. Check to be sure */
2036 DISCPUSTATE cpu;
2037 RTRCPTR pOrgJumpGC;
2038 uint32_t dummy;
2039
2040 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2041 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2042
2043 { /* Force pOrgJumpHC out of scope after using it */
2044 uint8_t *pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2045
2046 bool disret = PATMR3DISInstr(pVM, pPatch, &cpu, pOrgJumpGC, pOrgJumpHC, &dummy, NULL);
2047 if (!disret || cpu.pCurInstr->opcode != OP_CALL || cpu.param1.size != 4 /* only near calls */)
2048 return VINF_SUCCESS;
2049 }
2050 return VWRN_CONTINUE_ANALYSIS;
2051 }
2052
2053 if ( pCpu->pCurInstr->opcode == OP_ILLUD2
2054 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2055 {
2056 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2057 return VWRN_CONTINUE_ANALYSIS;
2058 }
2059
2060 if ( (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2061 || pCpu->pCurInstr->opcode == OP_INT
2062 || pCpu->pCurInstr->opcode == OP_IRET
2063 || pCpu->pCurInstr->opcode == OP_RETN
2064 || pCpu->pCurInstr->opcode == OP_RETF
2065 )
2066 {
2067 return VINF_SUCCESS;
2068 }
2069
2070 if (pCpu->pCurInstr->opcode == OP_ILLUD2)
2071 return VINF_SUCCESS;
2072
2073 return VWRN_CONTINUE_ANALYSIS;
2074}
2075
2076
2077/**
2078 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2079 *
2080 * @returns VBox status code.
2081 * @param pVM The VM to operate on.
2082 * @param pInstrGC Guest context pointer to the initial privileged instruction
2083 * @param pCurInstrGC Guest context pointer to the current instruction
2084 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2085 * @param pCacheRec Cache record ptr
2086 *
2087 */
2088int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2089{
2090 DISCPUSTATE cpu;
2091 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2092 int rc = VWRN_CONTINUE_ANALYSIS;
2093 uint32_t opsize, delta;
2094 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2095 bool disret;
2096 char szOutput[256];
2097
2098 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2099
2100 /* We need this to determine branch targets (and for disassembling). */
2101 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2102
2103 while(rc == VWRN_CONTINUE_ANALYSIS)
2104 {
2105 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2106
2107 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2108 if (pCurInstrHC == NULL)
2109 {
2110 rc = VERR_PATCHING_REFUSED;
2111 goto end;
2112 }
2113
2114 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2115 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2116 {
2117 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2118
2119 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2120 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2121 else
2122 Log(("DIS %s", szOutput));
2123
2124 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2125 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2126 {
2127 rc = VINF_SUCCESS;
2128 goto end;
2129 }
2130 }
2131 else
2132 Log(("DIS: %s", szOutput));
2133
2134 if (disret == false)
2135 {
2136 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2137 rc = VINF_SUCCESS;
2138 goto end;
2139 }
2140
2141 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2142 if (rc != VWRN_CONTINUE_ANALYSIS) {
2143 break; //done!
2144 }
2145
2146 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2147 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2148 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2149 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2150 )
2151 {
2152 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2153 RTRCPTR pOrgTargetGC;
2154
2155 if (pTargetGC == 0)
2156 {
2157 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2158 rc = VERR_PATCHING_REFUSED;
2159 break;
2160 }
2161
2162 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2163 {
2164 //jump back to guest code
2165 rc = VINF_SUCCESS;
2166 goto end;
2167 }
2168 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2169
2170 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2171 {
2172 rc = VINF_SUCCESS;
2173 goto end;
2174 }
2175
2176 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2177 {
2178 /* New jump, let's check it. */
2179 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2180
2181 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2182 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2183 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2184
2185 if (rc != VINF_SUCCESS) {
2186 break; //done!
2187 }
2188 }
2189 if (cpu.pCurInstr->opcode == OP_JMP)
2190 {
2191 /* Unconditional jump; return to caller. */
2192 rc = VINF_SUCCESS;
2193 goto end;
2194 }
2195
2196 rc = VWRN_CONTINUE_ANALYSIS;
2197 }
2198 pCurInstrGC += opsize;
2199 }
2200end:
2201 return rc;
2202}
2203
2204/**
2205 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2206 *
2207 * @returns VBox status code.
2208 * @param pVM The VM to operate on.
2209 * @param pInstrGC Guest context pointer to the initial privileged instruction
2210 * @param pCurInstrGC Guest context pointer to the current instruction
2211 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2212 * @param pCacheRec Cache record ptr
2213 *
2214 */
2215int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2216{
2217 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2218
2219 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2220 /* Free all disasm jump records. */
2221 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2222 return rc;
2223}
2224
2225#endif /* LOG_ENABLED */
2226
2227/**
2228 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2229 * If so, this patch is permanently disabled.
2230 *
2231 * @param pVM The VM to operate on.
2232 * @param pInstrGC Guest context pointer to instruction
2233 * @param pConflictGC Guest context pointer to check
2234 *
2235 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2236 *
2237 */
2238VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2239{
2240 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2241 if (pTargetPatch)
2242 {
2243 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2244 }
2245 return VERR_PATCH_NO_CONFLICT;
2246}
2247
2248/**
2249 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2250 *
2251 * @returns VBox status code.
2252 * @param pVM The VM to operate on.
2253 * @param pInstrGC Guest context pointer to privileged instruction
2254 * @param pCurInstrGC Guest context pointer to the current instruction
2255 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2256 * @param pCacheRec Cache record ptr
2257 *
2258 */
2259static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2260{
2261 DISCPUSTATE cpu;
2262 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2263 int rc = VWRN_CONTINUE_ANALYSIS;
2264 uint32_t opsize;
2265 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2266 bool disret;
2267#ifdef LOG_ENABLED
2268 char szOutput[256];
2269#endif
2270
2271 while (rc == VWRN_CONTINUE_RECOMPILE)
2272 {
2273 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2274
2275 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2276 if (pCurInstrHC == NULL)
2277 {
2278 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2279 goto end;
2280 }
2281#ifdef LOG_ENABLED
2282 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput);
2283 Log(("Recompile: %s", szOutput));
2284#else
2285 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2286#endif
2287 if (disret == false)
2288 {
2289 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2290
2291 /* Add lookup record for patch to guest address translation */
2292 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2293 patmPatchGenIllegalInstr(pVM, pPatch);
2294 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2295 goto end;
2296 }
2297
2298 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2299 if (rc != VWRN_CONTINUE_RECOMPILE)
2300 {
2301 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2302 if ( rc == VINF_SUCCESS
2303 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2304 {
2305 DISCPUSTATE cpunext;
2306 uint32_t opsizenext;
2307 uint8_t *pNextInstrHC;
2308 RTRCPTR pNextInstrGC = pCurInstrGC + opsize;
2309
2310 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2311
2312 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2313 * Recompile the next instruction as well
2314 */
2315 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2316 if (pNextInstrHC == NULL)
2317 {
2318 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2319 goto end;
2320 }
2321 cpunext.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2322 disret = PATMR3DISInstr(pVM, pPatch, &cpunext, pNextInstrGC, pNextInstrHC, &opsizenext, NULL);
2323 if (disret == false)
2324 {
2325 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2326 goto end;
2327 }
2328 switch(cpunext.pCurInstr->opcode)
2329 {
2330 case OP_IRET: /* inhibit cleared in generated code */
2331 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2332 case OP_HLT:
2333 break; /* recompile these */
2334
2335 default:
2336 if (cpunext.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2337 {
2338 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2339
2340 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2341 AssertRC(rc);
2342 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2343 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2344 }
2345 break;
2346 }
2347
2348 /* Note: after a cli we must continue to a proper exit point */
2349 if (cpunext.pCurInstr->opcode != OP_CLI)
2350 {
2351 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2352 if (RT_SUCCESS(rc))
2353 {
2354 rc = VINF_SUCCESS;
2355 goto end;
2356 }
2357 break;
2358 }
2359 else
2360 rc = VWRN_CONTINUE_RECOMPILE;
2361 }
2362 else
2363 break; /* done! */
2364 }
2365
2366 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2367
2368
2369 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2370 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2371 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2372 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2373 )
2374 {
2375 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2376 if (addr == 0)
2377 {
2378 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2379 rc = VERR_PATCHING_REFUSED;
2380 break;
2381 }
2382
2383 Log(("Jump encountered target %RRv\n", addr));
2384
2385 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2386 if (!(cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW))
2387 {
2388 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2389 /* First we need to finish this linear code stream until the next exit point. */
2390 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+opsize, pfnPATMR3Recompile, pCacheRec);
2391 if (RT_FAILURE(rc))
2392 {
2393 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2394 break; //fatal error
2395 }
2396 }
2397
2398 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2399 {
2400 /* New code; let's recompile it. */
2401 Log(("patmRecompileCodeStream continue with jump\n"));
2402
2403 /*
2404 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2405 * this patch so we can continue our analysis
2406 *
2407 * We rely on CSAM to detect and resolve conflicts
2408 */
2409 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
2410 if(pTargetPatch)
2411 {
2412 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2413 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2414 }
2415
2416 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2417 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2418 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2419
2420 if(pTargetPatch)
2421 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2422
2423 if (RT_FAILURE(rc))
2424 {
2425 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2426 break; //done!
2427 }
2428 }
2429 /* Always return to caller here; we're done! */
2430 rc = VINF_SUCCESS;
2431 goto end;
2432 }
2433 else
2434 if (cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW)
2435 {
2436 rc = VINF_SUCCESS;
2437 goto end;
2438 }
2439 pCurInstrGC += opsize;
2440 }
2441end:
2442 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2443 return rc;
2444}
2445
2446
2447/**
2448 * Generate the jump from guest to patch code
2449 *
2450 * @returns VBox status code.
2451 * @param pVM The VM to operate on.
2452 * @param pPatch Patch record
2453 * @param pCacheRec Guest translation lookup cache record
2454 */
2455static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2456{
2457 uint8_t temp[8];
2458 uint8_t *pPB;
2459 int rc;
2460
2461 Assert(pPatch->cbPatchJump <= sizeof(temp));
2462 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2463
2464 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2465 Assert(pPB);
2466
2467#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2468 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2469 {
2470 Assert(pPatch->pPatchJumpDestGC);
2471
2472 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2473 {
2474 // jmp [PatchCode]
2475 if (fAddFixup)
2476 {
2477 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2478 {
2479 Log(("Relocation failed for the jump in the guest code!!\n"));
2480 return VERR_PATCHING_REFUSED;
2481 }
2482 }
2483
2484 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2485 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2486 }
2487 else
2488 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2489 {
2490 // jmp [PatchCode]
2491 if (fAddFixup)
2492 {
2493 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2494 {
2495 Log(("Relocation failed for the jump in the guest code!!\n"));
2496 return VERR_PATCHING_REFUSED;
2497 }
2498 }
2499
2500 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2501 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2502 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2503 }
2504 else
2505 {
2506 Assert(0);
2507 return VERR_PATCHING_REFUSED;
2508 }
2509 }
2510 else
2511#endif
2512 {
2513 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2514
2515 // jmp [PatchCode]
2516 if (fAddFixup)
2517 {
2518 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2519 {
2520 Log(("Relocation failed for the jump in the guest code!!\n"));
2521 return VERR_PATCHING_REFUSED;
2522 }
2523 }
2524 temp[0] = 0xE9; //jmp
2525 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2526 }
2527 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2528 AssertRC(rc);
2529
2530 if (rc == VINF_SUCCESS)
2531 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2532
2533 return rc;
2534}
2535
2536/**
2537 * Remove the jump from guest to patch code
2538 *
2539 * @returns VBox status code.
2540 * @param pVM The VM to operate on.
2541 * @param pPatch Patch record
2542 */
2543static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2544{
2545#ifdef DEBUG
2546 DISCPUSTATE cpu;
2547 char szOutput[256];
2548 uint32_t opsize, i = 0;
2549 bool disret;
2550
2551 while (i < pPatch->cbPrivInstr)
2552 {
2553 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2554 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
2555 if (disret == false)
2556 break;
2557
2558 Log(("Org patch jump: %s", szOutput));
2559 Assert(opsize);
2560 i += opsize;
2561 }
2562#endif
2563
2564 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2565 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2566#ifdef DEBUG
2567 if (rc == VINF_SUCCESS)
2568 {
2569 i = 0;
2570 while(i < pPatch->cbPrivInstr)
2571 {
2572 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2573 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
2574 if (disret == false)
2575 break;
2576
2577 Log(("Org instr: %s", szOutput));
2578 Assert(opsize);
2579 i += opsize;
2580 }
2581 }
2582#endif
2583 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2584 return rc;
2585}
2586
2587/**
2588 * Generate the call from guest to patch code
2589 *
2590 * @returns VBox status code.
2591 * @param pVM The VM to operate on.
2592 * @param pPatch Patch record
2593 * @param pInstrHC HC address where to insert the jump
2594 * @param pCacheRec Guest translation cache record
2595 */
2596static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2597{
2598 uint8_t temp[8];
2599 uint8_t *pPB;
2600 int rc;
2601
2602 Assert(pPatch->cbPatchJump <= sizeof(temp));
2603
2604 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2605 Assert(pPB);
2606
2607 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2608
2609 // jmp [PatchCode]
2610 if (fAddFixup)
2611 {
2612 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2613 {
2614 Log(("Relocation failed for the jump in the guest code!!\n"));
2615 return VERR_PATCHING_REFUSED;
2616 }
2617 }
2618
2619 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2620 temp[0] = pPatch->aPrivInstr[0];
2621 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2622
2623 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2624 AssertRC(rc);
2625
2626 return rc;
2627}
2628
2629
2630/**
2631 * Patch cli/sti pushf/popf instruction block at specified location
2632 *
2633 * @returns VBox status code.
2634 * @param pVM The VM to operate on.
2635 * @param pInstrGC Guest context point to privileged instruction
2636 * @param pInstrHC Host context point to privileged instruction
2637 * @param uOpcode Instruction opcode
2638 * @param uOpSize Size of starting instruction
2639 * @param pPatchRec Patch record
2640 *
2641 * @note returns failure if patching is not allowed or possible
2642 *
2643 */
2644VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2645 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2646{
2647 PPATCHINFO pPatch = &pPatchRec->patch;
2648 int rc = VERR_PATCHING_REFUSED;
2649 DISCPUSTATE cpu;
2650 uint32_t orgOffsetPatchMem = ~0;
2651 RTRCPTR pInstrStart;
2652#ifdef LOG_ENABLED
2653 uint32_t opsize;
2654 char szOutput[256];
2655 bool disret;
2656#endif
2657
2658 /* Save original offset (in case of failures later on) */
2659 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2660 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2661
2662 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2663 switch (uOpcode)
2664 {
2665 case OP_MOV:
2666 break;
2667
2668 case OP_CLI:
2669 case OP_PUSHF:
2670 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2671 /* Note: special precautions are taken when disabling and enabling such patches. */
2672 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2673 break;
2674
2675 default:
2676 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2677 {
2678 AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
2679 return VERR_INVALID_PARAMETER;
2680 }
2681 }
2682
2683 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2684 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2685
2686 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2687 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2688 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2689 )
2690 {
2691 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2692 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2693 rc = VERR_PATCHING_REFUSED;
2694 goto failure;
2695 }
2696
2697 pPatch->nrPatch2GuestRecs = 0;
2698 pInstrStart = pInstrGC;
2699
2700#ifdef PATM_ENABLE_CALL
2701 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2702#endif
2703
2704 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2705 pPatch->uCurPatchOffset = 0;
2706
2707 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2708
2709 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2710 {
2711 Assert(pPatch->flags & PATMFL_INTHANDLER);
2712
2713 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2714 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2715 if (RT_FAILURE(rc))
2716 goto failure;
2717 }
2718
2719 /***************************************************************************************************************************/
2720 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2721 /***************************************************************************************************************************/
2722#ifdef VBOX_WITH_STATISTICS
2723 if (!(pPatch->flags & PATMFL_SYSENTER))
2724 {
2725 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2726 if (RT_FAILURE(rc))
2727 goto failure;
2728 }
2729#endif
2730
2731 PATMP2GLOOKUPREC cacheRec;
2732 RT_ZERO(cacheRec);
2733 cacheRec.pPatch = pPatch;
2734
2735 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2736 /* Free leftover lock if any. */
2737 if (cacheRec.Lock.pvMap)
2738 {
2739 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2740 cacheRec.Lock.pvMap = NULL;
2741 }
2742 if (rc != VINF_SUCCESS)
2743 {
2744 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2745 goto failure;
2746 }
2747
2748 /* Calculated during analysis. */
2749 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2750 {
2751 /* Most likely cause: we encountered an illegal instruction very early on. */
2752 /** @todo could turn it into an int3 callable patch. */
2753 Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
2754 rc = VERR_PATCHING_REFUSED;
2755 goto failure;
2756 }
2757
2758 /* size of patch block */
2759 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2760
2761
2762 /* Update free pointer in patch memory. */
2763 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2764 /* Round to next 8 byte boundary. */
2765 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2766
2767 /*
2768 * Insert into patch to guest lookup tree
2769 */
2770 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2771 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2772 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2773 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2774 if (!rc)
2775 {
2776 rc = VERR_PATCHING_REFUSED;
2777 goto failure;
2778 }
2779
2780 /* Note that patmr3SetBranchTargets can install additional patches!! */
2781 rc = patmr3SetBranchTargets(pVM, pPatch);
2782 if (rc != VINF_SUCCESS)
2783 {
2784 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2785 goto failure;
2786 }
2787
2788#ifdef LOG_ENABLED
2789 Log(("Patch code ----------------------------------------------------------\n"));
2790 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2791 /* Free leftover lock if any. */
2792 if (cacheRec.Lock.pvMap)
2793 {
2794 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2795 cacheRec.Lock.pvMap = NULL;
2796 }
2797 Log(("Patch code ends -----------------------------------------------------\n"));
2798#endif
2799
2800 /* make a copy of the guest code bytes that will be overwritten */
2801 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2802
2803 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2804 AssertRC(rc);
2805
2806 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2807 {
2808 /*uint8_t ASMInt3 = 0xCC; - unused */
2809
2810 Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2811 /* Replace first opcode byte with 'int 3'. */
2812 rc = patmActivateInt3Patch(pVM, pPatch);
2813 if (RT_FAILURE(rc))
2814 goto failure;
2815
2816 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2817 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2818
2819 pPatch->flags &= ~PATMFL_INSTR_HINT;
2820 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2821 }
2822 else
2823 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2824 {
2825 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2826 /* now insert a jump in the guest code */
2827 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2828 AssertRC(rc);
2829 if (RT_FAILURE(rc))
2830 goto failure;
2831
2832 }
2833
2834#ifdef LOG_ENABLED
2835 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2836 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
2837 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
2838#endif
2839
2840 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2841 pPatch->pTempInfo->nrIllegalInstr = 0;
2842
2843 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2844
2845 pPatch->uState = PATCH_ENABLED;
2846 return VINF_SUCCESS;
2847
2848failure:
2849 if (pPatchRec->CoreOffset.Key)
2850 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2851
2852 patmEmptyTree(pVM, &pPatch->FixupTree);
2853 pPatch->nrFixups = 0;
2854
2855 patmEmptyTree(pVM, &pPatch->JumpTree);
2856 pPatch->nrJumpRecs = 0;
2857
2858 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2859 pPatch->pTempInfo->nrIllegalInstr = 0;
2860
2861 /* Turn this cli patch into a dummy. */
2862 pPatch->uState = PATCH_REFUSED;
2863 pPatch->pPatchBlockOffset = 0;
2864
2865 // Give back the patch memory we no longer need
2866 Assert(orgOffsetPatchMem != (uint32_t)~0);
2867 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2868
2869 return rc;
2870}
2871
2872/**
2873 * Patch IDT handler
2874 *
2875 * @returns VBox status code.
2876 * @param pVM The VM to operate on.
2877 * @param pInstrGC Guest context point to privileged instruction
2878 * @param uOpSize Size of starting instruction
2879 * @param pPatchRec Patch record
2880 * @param pCacheRec Cache record ptr
2881 *
2882 * @note returns failure if patching is not allowed or possible
2883 *
2884 */
2885static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
2886{
2887 PPATCHINFO pPatch = &pPatchRec->patch;
2888 bool disret;
2889 DISCPUSTATE cpuPush, cpuJmp;
2890 uint32_t opsize;
2891 RTRCPTR pCurInstrGC = pInstrGC;
2892 uint8_t *pCurInstrHC, *pInstrHC;
2893 uint32_t orgOffsetPatchMem = ~0;
2894
2895 pInstrHC = pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2896 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
2897
2898 /*
2899 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
2900 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
2901 * condition here and only patch the common entypoint once.
2902 */
2903 cpuPush.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2904 disret = PATMR3DISInstr(pVM, pPatch, &cpuPush, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2905 Assert(disret);
2906 if (disret && cpuPush.pCurInstr->opcode == OP_PUSH)
2907 {
2908 RTRCPTR pJmpInstrGC;
2909 int rc;
2910 pCurInstrGC += opsize;
2911
2912 cpuJmp.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2913 disret = PATMR3DISInstr(pVM, pPatch, &cpuJmp, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2914 if ( disret
2915 && cpuJmp.pCurInstr->opcode == OP_JMP
2916 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
2917 )
2918 {
2919 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2920 if (pJmpPatch == 0)
2921 {
2922 /* Patch it first! */
2923 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
2924 if (rc != VINF_SUCCESS)
2925 goto failure;
2926 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2927 Assert(pJmpPatch);
2928 }
2929 if (pJmpPatch->patch.uState != PATCH_ENABLED)
2930 goto failure;
2931
2932 /* save original offset (in case of failures later on) */
2933 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2934
2935 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2936 pPatch->uCurPatchOffset = 0;
2937 pPatch->nrPatch2GuestRecs = 0;
2938
2939#ifdef VBOX_WITH_STATISTICS
2940 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2941 if (RT_FAILURE(rc))
2942 goto failure;
2943#endif
2944
2945 /* Install fake cli patch (to clear the virtual IF) */
2946 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2947 if (RT_FAILURE(rc))
2948 goto failure;
2949
2950 /* Add lookup record for patch to guest address translation (for the push) */
2951 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
2952
2953 /* Duplicate push. */
2954 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
2955 if (RT_FAILURE(rc))
2956 goto failure;
2957
2958 /* Generate jump to common entrypoint. */
2959 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
2960 if (RT_FAILURE(rc))
2961 goto failure;
2962
2963 /* size of patch block */
2964 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2965
2966 /* Update free pointer in patch memory. */
2967 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2968 /* Round to next 8 byte boundary */
2969 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2970
2971 /* There's no jump from guest to patch code. */
2972 pPatch->cbPatchJump = 0;
2973
2974
2975#ifdef LOG_ENABLED
2976 Log(("Patch code ----------------------------------------------------------\n"));
2977 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
2978 Log(("Patch code ends -----------------------------------------------------\n"));
2979#endif
2980 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
2981
2982 /*
2983 * Insert into patch to guest lookup tree
2984 */
2985 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2986 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2987 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2988 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2989
2990 pPatch->uState = PATCH_ENABLED;
2991
2992 return VINF_SUCCESS;
2993 }
2994 }
2995failure:
2996 /* Give back the patch memory we no longer need */
2997 if (orgOffsetPatchMem != (uint32_t)~0)
2998 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2999
3000 return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3001}
3002
3003/**
3004 * Install a trampoline to call a guest trap handler directly
3005 *
3006 * @returns VBox status code.
3007 * @param pVM The VM to operate on.
3008 * @param pInstrGC Guest context point to privileged instruction
3009 * @param pPatchRec Patch record
3010 * @param pCacheRec Cache record ptr
3011 *
3012 */
3013static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3014{
3015 PPATCHINFO pPatch = &pPatchRec->patch;
3016 int rc = VERR_PATCHING_REFUSED;
3017 uint32_t orgOffsetPatchMem = ~0;
3018#ifdef LOG_ENABLED
3019 bool disret;
3020 DISCPUSTATE cpu;
3021 uint32_t opsize;
3022 char szOutput[256];
3023#endif
3024
3025 // save original offset (in case of failures later on)
3026 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3027
3028 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3029 pPatch->uCurPatchOffset = 0;
3030 pPatch->nrPatch2GuestRecs = 0;
3031
3032#ifdef VBOX_WITH_STATISTICS
3033 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3034 if (RT_FAILURE(rc))
3035 goto failure;
3036#endif
3037
3038 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3039 if (RT_FAILURE(rc))
3040 goto failure;
3041
3042 /* size of patch block */
3043 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3044
3045 /* Update free pointer in patch memory. */
3046 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3047 /* Round to next 8 byte boundary */
3048 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3049
3050 /* There's no jump from guest to patch code. */
3051 pPatch->cbPatchJump = 0;
3052
3053#ifdef LOG_ENABLED
3054 Log(("Patch code ----------------------------------------------------------\n"));
3055 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3056 Log(("Patch code ends -----------------------------------------------------\n"));
3057#endif
3058
3059#ifdef LOG_ENABLED
3060 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3061 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3062 Log(("TRAP handler patch: %s", szOutput));
3063#endif
3064 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3065
3066 /*
3067 * Insert into patch to guest lookup tree
3068 */
3069 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3070 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3071 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3072 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3073
3074 pPatch->uState = PATCH_ENABLED;
3075 return VINF_SUCCESS;
3076
3077failure:
3078 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3079
3080 /* Turn this cli patch into a dummy. */
3081 pPatch->uState = PATCH_REFUSED;
3082 pPatch->pPatchBlockOffset = 0;
3083
3084 /* Give back the patch memory we no longer need */
3085 Assert(orgOffsetPatchMem != (uint32_t)~0);
3086 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3087
3088 return rc;
3089}
3090
3091
3092#ifdef LOG_ENABLED
3093/**
3094 * Check if the instruction is patched as a common idt handler
3095 *
3096 * @returns true or false
3097 * @param pVM The VM to operate on.
3098 * @param pInstrGC Guest context point to the instruction
3099 *
3100 */
3101static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3102{
3103 PPATMPATCHREC pRec;
3104
3105 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3106 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3107 return true;
3108 return false;
3109}
3110#endif //DEBUG
3111
3112
3113/**
3114 * Duplicates a complete function
3115 *
3116 * @returns VBox status code.
3117 * @param pVM The VM to operate on.
3118 * @param pInstrGC Guest context point to privileged instruction
3119 * @param pPatchRec Patch record
3120 * @param pCacheRec Cache record ptr
3121 *
3122 */
3123static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3124{
3125 PPATCHINFO pPatch = &pPatchRec->patch;
3126 int rc = VERR_PATCHING_REFUSED;
3127 DISCPUSTATE cpu;
3128 uint32_t orgOffsetPatchMem = ~0;
3129
3130 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3131 /* Save original offset (in case of failures later on). */
3132 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3133
3134 /* We will not go on indefinitely with call instruction handling. */
3135 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3136 {
3137 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3138 return VERR_PATCHING_REFUSED;
3139 }
3140
3141 pVM->patm.s.ulCallDepth++;
3142
3143#ifdef PATM_ENABLE_CALL
3144 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3145#endif
3146
3147 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3148
3149 pPatch->nrPatch2GuestRecs = 0;
3150 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3151 pPatch->uCurPatchOffset = 0;
3152
3153 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3154
3155 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3156 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3157 if (RT_FAILURE(rc))
3158 goto failure;
3159
3160#ifdef VBOX_WITH_STATISTICS
3161 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3162 if (RT_FAILURE(rc))
3163 goto failure;
3164#endif
3165
3166 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3167 if (rc != VINF_SUCCESS)
3168 {
3169 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3170 goto failure;
3171 }
3172
3173 //size of patch block
3174 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3175
3176 //update free pointer in patch memory
3177 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3178 /* Round to next 8 byte boundary. */
3179 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3180
3181 pPatch->uState = PATCH_ENABLED;
3182
3183 /*
3184 * Insert into patch to guest lookup tree
3185 */
3186 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3187 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3188 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3189 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3190 if (!rc)
3191 {
3192 rc = VERR_PATCHING_REFUSED;
3193 goto failure;
3194 }
3195
3196 /* Note that patmr3SetBranchTargets can install additional patches!! */
3197 rc = patmr3SetBranchTargets(pVM, pPatch);
3198 if (rc != VINF_SUCCESS)
3199 {
3200 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3201 goto failure;
3202 }
3203
3204#ifdef LOG_ENABLED
3205 Log(("Patch code ----------------------------------------------------------\n"));
3206 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3207 Log(("Patch code ends -----------------------------------------------------\n"));
3208#endif
3209
3210 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3211
3212 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3213 pPatch->pTempInfo->nrIllegalInstr = 0;
3214
3215 pVM->patm.s.ulCallDepth--;
3216 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3217 return VINF_SUCCESS;
3218
3219failure:
3220 if (pPatchRec->CoreOffset.Key)
3221 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3222
3223 patmEmptyTree(pVM, &pPatch->FixupTree);
3224 pPatch->nrFixups = 0;
3225
3226 patmEmptyTree(pVM, &pPatch->JumpTree);
3227 pPatch->nrJumpRecs = 0;
3228
3229 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3230 pPatch->pTempInfo->nrIllegalInstr = 0;
3231
3232 /* Turn this cli patch into a dummy. */
3233 pPatch->uState = PATCH_REFUSED;
3234 pPatch->pPatchBlockOffset = 0;
3235
3236 // Give back the patch memory we no longer need
3237 Assert(orgOffsetPatchMem != (uint32_t)~0);
3238 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3239
3240 pVM->patm.s.ulCallDepth--;
3241 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3242 return rc;
3243}
3244
3245/**
3246 * Creates trampoline code to jump inside an existing patch
3247 *
3248 * @returns VBox status code.
3249 * @param pVM The VM to operate on.
3250 * @param pInstrGC Guest context point to privileged instruction
3251 * @param pPatchRec Patch record
3252 *
3253 */
3254static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3255{
3256 PPATCHINFO pPatch = &pPatchRec->patch;
3257 RTRCPTR pPage, pPatchTargetGC = 0;
3258 uint32_t orgOffsetPatchMem = ~0;
3259 int rc = VERR_PATCHING_REFUSED;
3260
3261 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3262 /* Save original offset (in case of failures later on). */
3263 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3264
3265 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3266 /** @todo we already checked this before */
3267 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3268
3269 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3270 if (pPatchPage)
3271 {
3272 uint32_t i;
3273
3274 for (i=0;i<pPatchPage->cCount;i++)
3275 {
3276 if (pPatchPage->aPatch[i])
3277 {
3278 PPATCHINFO pPatch2 = pPatchPage->aPatch[i];
3279
3280 if ( (pPatch2->flags & PATMFL_DUPLICATE_FUNCTION)
3281 && pPatch2->uState == PATCH_ENABLED)
3282 {
3283 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch2, pInstrGC);
3284 if (pPatchTargetGC)
3285 {
3286 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3287 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch2->Patch2GuestAddrTree, offsetPatch, false);
3288 Assert(pPatchToGuestRec);
3289
3290 pPatchToGuestRec->fJumpTarget = true;
3291 Assert(pPatchTargetGC != pPatch2->pPrivInstrGC);
3292 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv\n", pPatch2->pPrivInstrGC));
3293 pPatch2->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3294 break;
3295 }
3296 }
3297 }
3298 }
3299 }
3300 AssertReturn(pPatchPage && pPatchTargetGC, VERR_PATCHING_REFUSED);
3301
3302 pPatch->nrPatch2GuestRecs = 0;
3303 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3304 pPatch->uCurPatchOffset = 0;
3305
3306 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3307 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3308 if (RT_FAILURE(rc))
3309 goto failure;
3310
3311#ifdef VBOX_WITH_STATISTICS
3312 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3313 if (RT_FAILURE(rc))
3314 goto failure;
3315#endif
3316
3317 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3318 if (RT_FAILURE(rc))
3319 goto failure;
3320
3321 /*
3322 * Insert into patch to guest lookup tree
3323 */
3324 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3325 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3326 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3327 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3328 if (!rc)
3329 {
3330 rc = VERR_PATCHING_REFUSED;
3331 goto failure;
3332 }
3333
3334 /* size of patch block */
3335 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3336
3337 /* Update free pointer in patch memory. */
3338 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3339 /* Round to next 8 byte boundary */
3340 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3341
3342 /* There's no jump from guest to patch code. */
3343 pPatch->cbPatchJump = 0;
3344
3345 /* Enable the patch. */
3346 pPatch->uState = PATCH_ENABLED;
3347 /* We allow this patch to be called as a function. */
3348 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3349 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3350 return VINF_SUCCESS;
3351
3352failure:
3353 if (pPatchRec->CoreOffset.Key)
3354 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3355
3356 patmEmptyTree(pVM, &pPatch->FixupTree);
3357 pPatch->nrFixups = 0;
3358
3359 patmEmptyTree(pVM, &pPatch->JumpTree);
3360 pPatch->nrJumpRecs = 0;
3361
3362 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3363 pPatch->pTempInfo->nrIllegalInstr = 0;
3364
3365 /* Turn this cli patch into a dummy. */
3366 pPatch->uState = PATCH_REFUSED;
3367 pPatch->pPatchBlockOffset = 0;
3368
3369 // Give back the patch memory we no longer need
3370 Assert(orgOffsetPatchMem != (uint32_t)~0);
3371 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3372
3373 return rc;
3374}
3375
3376
3377/**
3378 * Patch branch target function for call/jump at specified location.
3379 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3380 *
3381 * @returns VBox status code.
3382 * @param pVM The VM to operate on.
3383 * @param pCtx Guest context
3384 *
3385 */
3386VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3387{
3388 RTRCPTR pBranchTarget, pPage;
3389 int rc;
3390 RTRCPTR pPatchTargetGC = 0;
3391
3392 pBranchTarget = pCtx->edx;
3393 pBranchTarget = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3394
3395 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3396 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3397
3398 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3399 if (pPatchPage)
3400 {
3401 uint32_t i;
3402
3403 for (i=0;i<pPatchPage->cCount;i++)
3404 {
3405 if (pPatchPage->aPatch[i])
3406 {
3407 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3408
3409 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3410 && pPatch->uState == PATCH_ENABLED)
3411 {
3412 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3413 if (pPatchTargetGC)
3414 {
3415 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3416 break;
3417 }
3418 }
3419 }
3420 }
3421 }
3422
3423 if (pPatchTargetGC)
3424 {
3425 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3426 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3427 }
3428 else
3429 {
3430 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3431 }
3432
3433 if (rc == VINF_SUCCESS)
3434 {
3435 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3436 Assert(pPatchTargetGC);
3437 }
3438
3439 if (pPatchTargetGC)
3440 {
3441 pCtx->eax = pPatchTargetGC;
3442 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3443 }
3444 else
3445 {
3446 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3447 pCtx->eax = 0;
3448 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3449 }
3450 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3451 rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3452 AssertRC(rc);
3453
3454 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3455 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3456 return VINF_SUCCESS;
3457}
3458
3459/**
3460 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3461 *
3462 * @returns VBox status code.
3463 * @param pVM The VM to operate on.
3464 * @param pCpu Disassembly CPU structure ptr
3465 * @param pInstrGC Guest context point to privileged instruction
3466 * @param pCacheRec Cache record ptr
3467 *
3468 */
3469static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3470{
3471 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3472 int rc = VERR_PATCHING_REFUSED;
3473 DISCPUSTATE cpu;
3474 RTRCPTR pTargetGC;
3475 PPATMPATCHREC pPatchFunction;
3476 uint32_t opsize;
3477 bool disret;
3478#ifdef LOG_ENABLED
3479 char szOutput[256];
3480#endif
3481
3482 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3483 Assert((pCpu->pCurInstr->opcode == OP_CALL || pCpu->pCurInstr->opcode == OP_JMP) && pCpu->opsize == SIZEOF_NEARJUMP32);
3484
3485 if ((pCpu->pCurInstr->opcode != OP_CALL && pCpu->pCurInstr->opcode != OP_JMP) || pCpu->opsize != SIZEOF_NEARJUMP32)
3486 {
3487 rc = VERR_PATCHING_REFUSED;
3488 goto failure;
3489 }
3490
3491 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3492 if (pTargetGC == 0)
3493 {
3494 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
3495 rc = VERR_PATCHING_REFUSED;
3496 goto failure;
3497 }
3498
3499 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3500 if (pPatchFunction == NULL)
3501 {
3502 for(;;)
3503 {
3504 /* It could be an indirect call (call -> jmp dest).
3505 * Note that it's dangerous to assume the jump will never change...
3506 */
3507 uint8_t *pTmpInstrHC;
3508
3509 pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3510 Assert(pTmpInstrHC);
3511 if (pTmpInstrHC == 0)
3512 break;
3513
3514 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3515 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pTargetGC, pTmpInstrHC, &opsize, NULL);
3516 if (disret == false || cpu.pCurInstr->opcode != OP_JMP)
3517 break;
3518
3519 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3520 if (pTargetGC == 0)
3521 {
3522 break;
3523 }
3524
3525 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3526 break;
3527 }
3528 if (pPatchFunction == 0)
3529 {
3530 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3531 rc = VERR_PATCHING_REFUSED;
3532 goto failure;
3533 }
3534 }
3535
3536 // make a copy of the guest code bytes that will be overwritten
3537 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3538
3539 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3540 AssertRC(rc);
3541
3542 /* Now replace the original call in the guest code */
3543 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3544 AssertRC(rc);
3545 if (RT_FAILURE(rc))
3546 goto failure;
3547
3548 /* Lowest and highest address for write monitoring. */
3549 pPatch->pInstrGCLowest = pInstrGC;
3550 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3551
3552#ifdef LOG_ENABLED
3553 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3554 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3555 Log(("Call patch: %s", szOutput));
3556#endif
3557
3558 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3559
3560 pPatch->uState = PATCH_ENABLED;
3561 return VINF_SUCCESS;
3562
3563failure:
3564 /* Turn this patch into a dummy. */
3565 pPatch->uState = PATCH_REFUSED;
3566
3567 return rc;
3568}
3569
3570/**
3571 * Replace the address in an MMIO instruction with the cached version.
3572 *
3573 * @returns VBox status code.
3574 * @param pVM The VM to operate on.
3575 * @param pInstrGC Guest context point to privileged instruction
3576 * @param pCpu Disassembly CPU structure ptr
3577 * @param pCacheRec Cache record ptr
3578 *
3579 * @note returns failure if patching is not allowed or possible
3580 *
3581 */
3582static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3583{
3584 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3585 uint8_t *pPB;
3586 int rc = VERR_PATCHING_REFUSED;
3587#ifdef LOG_ENABLED
3588 DISCPUSTATE cpu;
3589 uint32_t opsize;
3590 bool disret;
3591 char szOutput[256];
3592#endif
3593
3594 Assert(pVM->patm.s.mmio.pCachedData);
3595 if (!pVM->patm.s.mmio.pCachedData)
3596 goto failure;
3597
3598 if (pCpu->param2.flags != USE_DISPLACEMENT32)
3599 goto failure;
3600
3601 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3602 if (pPB == 0)
3603 goto failure;
3604
3605 /* Add relocation record for cached data access. */
3606 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3607 {
3608 Log(("Relocation failed for cached mmio address!!\n"));
3609 return VERR_PATCHING_REFUSED;
3610 }
3611#ifdef LOG_ENABLED
3612 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3613 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3614 Log(("MMIO patch old instruction: %s", szOutput));
3615#endif
3616
3617 /* Save original instruction. */
3618 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3619 AssertRC(rc);
3620
3621 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3622
3623 /* Replace address with that of the cached item. */
3624 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->opsize - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3625 AssertRC(rc);
3626 if (RT_FAILURE(rc))
3627 {
3628 goto failure;
3629 }
3630
3631#ifdef LOG_ENABLED
3632 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3633 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3634 Log(("MMIO patch: %s", szOutput));
3635#endif
3636 pVM->patm.s.mmio.pCachedData = 0;
3637 pVM->patm.s.mmio.GCPhys = 0;
3638 pPatch->uState = PATCH_ENABLED;
3639 return VINF_SUCCESS;
3640
3641failure:
3642 /* Turn this patch into a dummy. */
3643 pPatch->uState = PATCH_REFUSED;
3644
3645 return rc;
3646}
3647
3648
3649/**
3650 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3651 *
3652 * @returns VBox status code.
3653 * @param pVM The VM to operate on.
3654 * @param pInstrGC Guest context point to privileged instruction
3655 * @param pPatch Patch record
3656 *
3657 * @note returns failure if patching is not allowed or possible
3658 *
3659 */
3660static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3661{
3662 DISCPUSTATE cpu;
3663 uint32_t opsize;
3664 bool disret;
3665 uint8_t *pInstrHC;
3666#ifdef LOG_ENABLED
3667 char szOutput[256];
3668#endif
3669
3670 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3671
3672 /* Convert GC to HC address. */
3673 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3674 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3675
3676 /* Disassemble mmio instruction. */
3677 cpu.mode = pPatch->uOpMode;
3678 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
3679 if (disret == false)
3680 {
3681 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3682 return VERR_PATCHING_REFUSED;
3683 }
3684
3685 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
3686 if (opsize > MAX_INSTR_SIZE)
3687 return VERR_PATCHING_REFUSED;
3688 if (cpu.param2.flags != USE_DISPLACEMENT32)
3689 return VERR_PATCHING_REFUSED;
3690
3691 /* Add relocation record for cached data access. */
3692 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3693 {
3694 Log(("Relocation failed for cached mmio address!!\n"));
3695 return VERR_PATCHING_REFUSED;
3696 }
3697 /* Replace address with that of the cached item. */
3698 *(RTRCPTR *)&pInstrHC[cpu.opsize - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3699
3700 /* Lowest and highest address for write monitoring. */
3701 pPatch->pInstrGCLowest = pInstrGC;
3702 pPatch->pInstrGCHighest = pInstrGC + cpu.opsize;
3703
3704#ifdef LOG_ENABLED
3705 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3706 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3707 Log(("MMIO patch: %s", szOutput));
3708#endif
3709
3710 pVM->patm.s.mmio.pCachedData = 0;
3711 pVM->patm.s.mmio.GCPhys = 0;
3712 return VINF_SUCCESS;
3713}
3714
3715/**
3716 * Activates an int3 patch
3717 *
3718 * @returns VBox status code.
3719 * @param pVM The VM to operate on.
3720 * @param pPatch Patch record
3721 */
3722static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3723{
3724 uint8_t ASMInt3 = 0xCC;
3725 int rc;
3726
3727 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3728 Assert(pPatch->uState != PATCH_ENABLED);
3729
3730 /* Replace first opcode byte with 'int 3'. */
3731 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &ASMInt3, sizeof(ASMInt3));
3732 AssertRC(rc);
3733
3734 pPatch->cbPatchJump = sizeof(ASMInt3);
3735
3736 return rc;
3737}
3738
3739/**
3740 * Deactivates an int3 patch
3741 *
3742 * @returns VBox status code.
3743 * @param pVM The VM to operate on.
3744 * @param pPatch Patch record
3745 */
3746static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3747{
3748 uint8_t ASMInt3 = 0xCC;
3749 int rc;
3750
3751 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3752 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3753
3754 /* Restore first opcode byte. */
3755 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3756 AssertRC(rc);
3757 return rc;
3758}
3759
3760/**
3761 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically in the guest context.
3762 *
3763 * @returns VBox status code.
3764 * @param pVM The VM to operate on.
3765 * @param pInstrGC Guest context point to privileged instruction
3766 * @param pInstrHC Host context point to privileged instruction
3767 * @param pCpu Disassembly CPU structure ptr
3768 * @param pPatch Patch record
3769 *
3770 * @note returns failure if patching is not allowed or possible
3771 *
3772 */
3773VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3774{
3775 uint8_t ASMInt3 = 0xCC;
3776 int rc;
3777
3778 /* Note: Do not use patch memory here! It might called during patch installation too. */
3779
3780#ifdef LOG_ENABLED
3781 DISCPUSTATE cpu;
3782 char szOutput[256];
3783 uint32_t opsize;
3784
3785 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3786 PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3787 Log(("PATMR3PatchInstrInt3: %s", szOutput));
3788#endif
3789
3790 /* Save the original instruction. */
3791 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3792 AssertRC(rc);
3793 pPatch->cbPatchJump = sizeof(ASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3794
3795 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3796
3797 /* Replace first opcode byte with 'int 3'. */
3798 rc = patmActivateInt3Patch(pVM, pPatch);
3799 if (RT_FAILURE(rc))
3800 goto failure;
3801
3802 /* Lowest and highest address for write monitoring. */
3803 pPatch->pInstrGCLowest = pInstrGC;
3804 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3805
3806 pPatch->uState = PATCH_ENABLED;
3807 return VINF_SUCCESS;
3808
3809failure:
3810 /* Turn this patch into a dummy. */
3811 return VERR_PATCHING_REFUSED;
3812}
3813
3814#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3815/**
3816 * Patch a jump instruction at specified location
3817 *
3818 * @returns VBox status code.
3819 * @param pVM The VM to operate on.
3820 * @param pInstrGC Guest context point to privileged instruction
3821 * @param pInstrHC Host context point to privileged instruction
3822 * @param pCpu Disassembly CPU structure ptr
3823 * @param pPatchRec Patch record
3824 *
3825 * @note returns failure if patching is not allowed or possible
3826 *
3827 */
3828int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3829{
3830 PPATCHINFO pPatch = &pPatchRec->patch;
3831 int rc = VERR_PATCHING_REFUSED;
3832#ifdef LOG_ENABLED
3833 bool disret;
3834 DISCPUSTATE cpu;
3835 uint32_t opsize;
3836 char szOutput[256];
3837#endif
3838
3839 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3840 pPatch->uCurPatchOffset = 0;
3841 pPatch->cbPatchBlockSize = 0;
3842 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3843
3844 /*
3845 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3846 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3847 */
3848 switch (pCpu->pCurInstr->opcode)
3849 {
3850 case OP_JO:
3851 case OP_JNO:
3852 case OP_JC:
3853 case OP_JNC:
3854 case OP_JE:
3855 case OP_JNE:
3856 case OP_JBE:
3857 case OP_JNBE:
3858 case OP_JS:
3859 case OP_JNS:
3860 case OP_JP:
3861 case OP_JNP:
3862 case OP_JL:
3863 case OP_JNL:
3864 case OP_JLE:
3865 case OP_JNLE:
3866 case OP_JMP:
3867 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3868 Assert(pCpu->param1.flags & USE_IMMEDIATE32_REL);
3869 if (!(pCpu->param1.flags & USE_IMMEDIATE32_REL))
3870 goto failure;
3871
3872 Assert(pCpu->opsize == SIZEOF_NEARJUMP32 || pCpu->opsize == SIZEOF_NEAR_COND_JUMP32);
3873 if (pCpu->opsize != SIZEOF_NEARJUMP32 && pCpu->opsize != SIZEOF_NEAR_COND_JUMP32)
3874 goto failure;
3875
3876 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->opsize))
3877 {
3878 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3879 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3880 rc = VERR_PATCHING_REFUSED;
3881 goto failure;
3882 }
3883
3884 break;
3885
3886 default:
3887 goto failure;
3888 }
3889
3890 // make a copy of the guest code bytes that will be overwritten
3891 Assert(pCpu->opsize <= sizeof(pPatch->aPrivInstr));
3892 Assert(pCpu->opsize >= SIZEOF_NEARJUMP32);
3893 pPatch->cbPatchJump = pCpu->opsize;
3894
3895 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3896 AssertRC(rc);
3897
3898 /* Now insert a jump in the guest code. */
3899 /*
3900 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3901 * references the target instruction in the conflict patch.
3902 */
3903 RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval);
3904
3905 AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval));
3906 pPatch->pPatchJumpDestGC = pJmpDest;
3907
3908 PATMP2GLOOKUPREC cacheRec;
3909 RT_ZERO(cacheRec);
3910 cacheRec.pPatch = pPatch;
3911
3912 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
3913 /* Free leftover lock if any. */
3914 if (cacheRec.Lock.pvMap)
3915 {
3916 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
3917 cacheRec.Lock.pvMap = NULL;
3918 }
3919 AssertRC(rc);
3920 if (RT_FAILURE(rc))
3921 goto failure;
3922
3923 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
3924
3925#ifdef LOG_ENABLED
3926 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3927 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3928 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
3929#endif
3930
3931 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3932
3933 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
3934
3935 /* Lowest and highest address for write monitoring. */
3936 pPatch->pInstrGCLowest = pInstrGC;
3937 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
3938
3939 pPatch->uState = PATCH_ENABLED;
3940 return VINF_SUCCESS;
3941
3942failure:
3943 /* Turn this cli patch into a dummy. */
3944 pPatch->uState = PATCH_REFUSED;
3945
3946 return rc;
3947}
3948#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
3949
3950
3951/**
3952 * Gives hint to PATM about supervisor guest instructions
3953 *
3954 * @returns VBox status code.
3955 * @param pVM The VM to operate on.
3956 * @param pInstr Guest context point to privileged instruction
3957 * @param flags Patch flags
3958 */
3959VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
3960{
3961 Assert(pInstrGC);
3962 Assert(flags == PATMFL_CODE32);
3963
3964 Log(("PATMR3AddHint %RRv\n", pInstrGC));
3965 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
3966}
3967
3968/**
3969 * Patch privileged instruction at specified location
3970 *
3971 * @returns VBox status code.
3972 * @param pVM The VM to operate on.
3973 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
3974 * @param flags Patch flags
3975 *
3976 * @note returns failure if patching is not allowed or possible
3977 */
3978VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
3979{
3980 DISCPUSTATE cpu;
3981 R3PTRTYPE(uint8_t *) pInstrHC;
3982 uint32_t opsize;
3983 PPATMPATCHREC pPatchRec;
3984 PCPUMCTX pCtx = 0;
3985 bool disret;
3986 int rc;
3987 PVMCPU pVCpu = VMMGetCpu0(pVM);
3988
3989 if ( !pVM
3990 || pInstrGC == 0
3991 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
3992 {
3993 AssertFailed();
3994 return VERR_INVALID_PARAMETER;
3995 }
3996
3997 if (PATMIsEnabled(pVM) == false)
3998 return VERR_PATCHING_REFUSED;
3999
4000 /* Test for patch conflict only with patches that actually change guest code. */
4001 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4002 {
4003 PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
4004 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4005 if (pConflictPatch != 0)
4006 return VERR_PATCHING_REFUSED;
4007 }
4008
4009 if (!(flags & PATMFL_CODE32))
4010 {
4011 /** @todo Only 32 bits code right now */
4012 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4013 return VERR_NOT_IMPLEMENTED;
4014 }
4015
4016 /* We ran out of patch memory; don't bother anymore. */
4017 if (pVM->patm.s.fOutOfMemory == true)
4018 return VERR_PATCHING_REFUSED;
4019
4020 /* Make sure the code selector is wide open; otherwise refuse. */
4021 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4022 if (CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0)
4023 {
4024 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4025 if (pInstrGCFlat != pInstrGC)
4026 {
4027 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4028 return VERR_PATCHING_REFUSED;
4029 }
4030 }
4031
4032 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4033 if (!(flags & PATMFL_GUEST_SPECIFIC))
4034 {
4035 /* New code. Make sure CSAM has a go at it first. */
4036 CSAMR3CheckCode(pVM, pInstrGC);
4037 }
4038
4039 /* Note: obsolete */
4040 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4041 && (flags & PATMFL_MMIO_ACCESS))
4042 {
4043 RTRCUINTPTR offset;
4044 void *pvPatchCoreOffset;
4045
4046 /* Find the patch record. */
4047 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4048 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4049 if (pvPatchCoreOffset == NULL)
4050 {
4051 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4052 return VERR_PATCH_NOT_FOUND; //fatal error
4053 }
4054 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4055
4056 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4057 }
4058
4059 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4060
4061 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4062 if (pPatchRec)
4063 {
4064 Assert(!(flags & PATMFL_TRAMPOLINE));
4065
4066 /* Hints about existing patches are ignored. */
4067 if (flags & PATMFL_INSTR_HINT)
4068 return VERR_PATCHING_REFUSED;
4069
4070 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4071 {
4072 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4073 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4074 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4075 }
4076
4077 if (pPatchRec->patch.uState == PATCH_DISABLED)
4078 {
4079 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4080 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4081 {
4082 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4083 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4084 }
4085 else
4086 Log(("Enabling patch %RRv again\n", pInstrGC));
4087
4088 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4089 rc = PATMR3EnablePatch(pVM, pInstrGC);
4090 if (RT_SUCCESS(rc))
4091 return VWRN_PATCH_ENABLED;
4092
4093 return rc;
4094 }
4095 if ( pPatchRec->patch.uState == PATCH_ENABLED
4096 || pPatchRec->patch.uState == PATCH_DIRTY)
4097 {
4098 /*
4099 * The patch might have been overwritten.
4100 */
4101 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4102 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4103 {
4104 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4105 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4106 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4107 {
4108 if (flags & PATMFL_IDTHANDLER)
4109 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4110
4111 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4112 }
4113 }
4114 rc = PATMR3RemovePatch(pVM, pInstrGC);
4115 if (RT_FAILURE(rc))
4116 return VERR_PATCHING_REFUSED;
4117 }
4118 else
4119 {
4120 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4121 /* already tried it once! */
4122 return VERR_PATCHING_REFUSED;
4123 }
4124 }
4125
4126 RTGCPHYS GCPhys;
4127 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4128 if (rc != VINF_SUCCESS)
4129 {
4130 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4131 return rc;
4132 }
4133 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4134 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4135 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4136 {
4137 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4138 return VERR_PATCHING_REFUSED;
4139 }
4140
4141 /* Initialize cache record for guest address translations. */
4142 PATMP2GLOOKUPREC cacheRec;
4143 RT_ZERO(cacheRec);
4144
4145 pInstrHC = PATMGCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4146 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4147
4148 /* Allocate patch record. */
4149 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4150 if (RT_FAILURE(rc))
4151 {
4152 Log(("Out of memory!!!!\n"));
4153 return VERR_NO_MEMORY;
4154 }
4155 pPatchRec->Core.Key = pInstrGC;
4156 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4157 /* Insert patch record into the lookup tree. */
4158 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4159 Assert(rc);
4160
4161 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4162 pPatchRec->patch.flags = flags;
4163 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4164
4165 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4166 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4167
4168 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4169 {
4170 /*
4171 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4172 */
4173 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4174 if (pPatchNear)
4175 {
4176 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4177 {
4178 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4179
4180 pPatchRec->patch.uState = PATCH_UNUSABLE;
4181 /*
4182 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4183 */
4184 return VERR_PATCHING_REFUSED;
4185 }
4186 }
4187 }
4188
4189 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4190 if (pPatchRec->patch.pTempInfo == 0)
4191 {
4192 Log(("Out of memory!!!!\n"));
4193 return VERR_NO_MEMORY;
4194 }
4195
4196 cpu.mode = pPatchRec->patch.uOpMode;
4197 disret = PATMR3DISInstr(pVM, &pPatchRec->patch, &cpu, pInstrGC, NULL, &opsize, NULL);
4198 if (disret == false)
4199 {
4200 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4201 return VERR_PATCHING_REFUSED;
4202 }
4203
4204 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
4205 if (opsize > MAX_INSTR_SIZE)
4206 return VERR_PATCHING_REFUSED;
4207
4208 pPatchRec->patch.cbPrivInstr = opsize;
4209 pPatchRec->patch.opcode = cpu.pCurInstr->opcode;
4210
4211 /* Restricted hinting for now. */
4212 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->opcode == OP_CLI);
4213
4214 /* Initialize cache record patch pointer. */
4215 cacheRec.pPatch = &pPatchRec->patch;
4216
4217 /* Allocate statistics slot */
4218 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4219 {
4220 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4221 }
4222 else
4223 {
4224 Log(("WARNING: Patch index wrap around!!\n"));
4225 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4226 }
4227
4228 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4229 {
4230 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4231 }
4232 else
4233 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4234 {
4235 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4236 }
4237 else
4238 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4239 {
4240 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4241 }
4242 else
4243 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4244 {
4245 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4246 }
4247 else
4248 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4249 {
4250 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4251 }
4252 else
4253 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4254 {
4255 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4256 }
4257 else
4258 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4259 {
4260 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4261 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4262
4263 rc = patmIdtHandler(pVM, pInstrGC, opsize, pPatchRec, &cacheRec);
4264#ifdef VBOX_WITH_STATISTICS
4265 if ( rc == VINF_SUCCESS
4266 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4267 {
4268 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4269 }
4270#endif
4271 }
4272 else
4273 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4274 {
4275 switch (cpu.pCurInstr->opcode)
4276 {
4277 case OP_SYSENTER:
4278 case OP_PUSH:
4279 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4280 if (rc == VINF_SUCCESS)
4281 {
4282 if (rc == VINF_SUCCESS)
4283 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4284 return rc;
4285 }
4286 break;
4287
4288 default:
4289 rc = VERR_NOT_IMPLEMENTED;
4290 break;
4291 }
4292 }
4293 else
4294 {
4295 switch (cpu.pCurInstr->opcode)
4296 {
4297 case OP_SYSENTER:
4298 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4299 if (rc == VINF_SUCCESS)
4300 {
4301 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4302 return VINF_SUCCESS;
4303 }
4304 break;
4305
4306#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4307 case OP_JO:
4308 case OP_JNO:
4309 case OP_JC:
4310 case OP_JNC:
4311 case OP_JE:
4312 case OP_JNE:
4313 case OP_JBE:
4314 case OP_JNBE:
4315 case OP_JS:
4316 case OP_JNS:
4317 case OP_JP:
4318 case OP_JNP:
4319 case OP_JL:
4320 case OP_JNL:
4321 case OP_JLE:
4322 case OP_JNLE:
4323 case OP_JECXZ:
4324 case OP_LOOP:
4325 case OP_LOOPNE:
4326 case OP_LOOPE:
4327 case OP_JMP:
4328 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4329 {
4330 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4331 break;
4332 }
4333 return VERR_NOT_IMPLEMENTED;
4334#endif
4335
4336 case OP_PUSHF:
4337 case OP_CLI:
4338 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4339 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->opcode, opsize, pPatchRec);
4340 break;
4341
4342 case OP_STR:
4343 case OP_SGDT:
4344 case OP_SLDT:
4345 case OP_SIDT:
4346 case OP_CPUID:
4347 case OP_LSL:
4348 case OP_LAR:
4349 case OP_SMSW:
4350 case OP_VERW:
4351 case OP_VERR:
4352 case OP_IRET:
4353 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4354 break;
4355
4356 default:
4357 return VERR_NOT_IMPLEMENTED;
4358 }
4359 }
4360
4361 if (rc != VINF_SUCCESS)
4362 {
4363 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4364 {
4365 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4366 pPatchRec->patch.nrPatch2GuestRecs = 0;
4367 }
4368 pVM->patm.s.uCurrentPatchIdx--;
4369 }
4370 else
4371 {
4372 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4373 AssertRCReturn(rc, rc);
4374
4375 /* Keep track upper and lower boundaries of patched instructions */
4376 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4377 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4378 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4379 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4380
4381 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4382 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4383
4384 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4385 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4386
4387 rc = VINF_SUCCESS;
4388
4389 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4390 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4391 {
4392 rc = PATMR3DisablePatch(pVM, pInstrGC);
4393 AssertRCReturn(rc, rc);
4394 }
4395
4396#ifdef VBOX_WITH_STATISTICS
4397 /* Register statistics counter */
4398 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4399 {
4400 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4401 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4402#ifndef DEBUG_sandervl
4403 /* Full breakdown for the GUI. */
4404 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4405 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4406 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4407 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4408 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4409 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4410 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4411 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4412 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4413 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4414 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4415 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4416 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4417 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4418 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4419 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4420#endif
4421 }
4422#endif
4423 }
4424 /* Free leftover lock if any. */
4425 if (cacheRec.Lock.pvMap)
4426 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4427 return rc;
4428}
4429
4430/**
4431 * Query instruction size
4432 *
4433 * @returns VBox status code.
4434 * @param pVM The VM to operate on.
4435 * @param pPatch Patch record
4436 * @param pInstrGC Instruction address
4437 */
4438static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4439{
4440 uint8_t *pInstrHC;
4441 PGMPAGEMAPLOCK Lock;
4442
4443 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4444 if (rc == VINF_SUCCESS)
4445 {
4446 DISCPUSTATE cpu;
4447 bool disret;
4448 uint32_t opsize;
4449
4450 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4451 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL, PATMREAD_ORGCODE | PATMREAD_NOCHECK);
4452 PGMPhysReleasePageMappingLock(pVM, &Lock);
4453 if (disret)
4454 return opsize;
4455 }
4456 return 0;
4457}
4458
4459/**
4460 * Add patch to page record
4461 *
4462 * @returns VBox status code.
4463 * @param pVM The VM to operate on.
4464 * @param pPage Page address
4465 * @param pPatch Patch record
4466 */
4467int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4468{
4469 PPATMPATCHPAGE pPatchPage;
4470 int rc;
4471
4472 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4473
4474 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4475 if (pPatchPage)
4476 {
4477 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4478 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4479 {
4480 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4481 PPATCHINFO *paPatchOld = pPatchPage->aPatch;
4482
4483 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4484 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4485 if (RT_FAILURE(rc))
4486 {
4487 Log(("Out of memory!!!!\n"));
4488 return VERR_NO_MEMORY;
4489 }
4490 memcpy(pPatchPage->aPatch, paPatchOld, cMaxPatchesOld*sizeof(PPATCHINFO));
4491 MMHyperFree(pVM, paPatchOld);
4492 }
4493 pPatchPage->aPatch[pPatchPage->cCount] = pPatch;
4494 pPatchPage->cCount++;
4495 }
4496 else
4497 {
4498 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4499 if (RT_FAILURE(rc))
4500 {
4501 Log(("Out of memory!!!!\n"));
4502 return VERR_NO_MEMORY;
4503 }
4504 pPatchPage->Core.Key = pPage;
4505 pPatchPage->cCount = 1;
4506 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4507
4508 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4509 if (RT_FAILURE(rc))
4510 {
4511 Log(("Out of memory!!!!\n"));
4512 MMHyperFree(pVM, pPatchPage);
4513 return VERR_NO_MEMORY;
4514 }
4515 pPatchPage->aPatch[0] = pPatch;
4516
4517 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4518 Assert(rc);
4519 pVM->patm.s.cPageRecords++;
4520
4521 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4522 }
4523 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4524
4525 /* Get the closest guest instruction (from below) */
4526 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4527 Assert(pGuestToPatchRec);
4528 if (pGuestToPatchRec)
4529 {
4530 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4531 if ( pPatchPage->pLowestAddrGC == 0
4532 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4533 {
4534 RTRCUINTPTR offset;
4535
4536 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4537
4538 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4539 /* If we're too close to the page boundary, then make sure an instruction from the previous page doesn't cross the boundary itself. */
4540 if (offset && offset < MAX_INSTR_SIZE)
4541 {
4542 /* Get the closest guest instruction (from above) */
4543 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4544
4545 if (pGuestToPatchRec)
4546 {
4547 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4548 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4549 {
4550 pPatchPage->pLowestAddrGC = pPage;
4551 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4552 }
4553 }
4554 }
4555 }
4556 }
4557
4558 /* Get the closest guest instruction (from above) */
4559 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4560 Assert(pGuestToPatchRec);
4561 if (pGuestToPatchRec)
4562 {
4563 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4564 if ( pPatchPage->pHighestAddrGC == 0
4565 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4566 {
4567 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4568 /* Increase by instruction size. */
4569 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4570//// Assert(size);
4571 pPatchPage->pHighestAddrGC += size;
4572 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4573 }
4574 }
4575
4576 return VINF_SUCCESS;
4577}
4578
4579/**
4580 * Remove patch from page record
4581 *
4582 * @returns VBox status code.
4583 * @param pVM The VM to operate on.
4584 * @param pPage Page address
4585 * @param pPatch Patch record
4586 */
4587int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4588{
4589 PPATMPATCHPAGE pPatchPage;
4590 int rc;
4591
4592 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4593 Assert(pPatchPage);
4594
4595 if (!pPatchPage)
4596 return VERR_INVALID_PARAMETER;
4597
4598 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4599
4600 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4601 if (pPatchPage->cCount > 1)
4602 {
4603 uint32_t i;
4604
4605 /* Used by multiple patches */
4606 for (i=0;i<pPatchPage->cCount;i++)
4607 {
4608 if (pPatchPage->aPatch[i] == pPatch)
4609 {
4610 pPatchPage->aPatch[i] = 0;
4611 break;
4612 }
4613 }
4614 /* close the gap between the remaining pointers. */
4615 if (i < pPatchPage->cCount - 1)
4616 {
4617 memcpy(&pPatchPage->aPatch[i], &pPatchPage->aPatch[i+1], sizeof(PPATCHINFO)*(pPatchPage->cCount - (i+1)));
4618 }
4619 AssertMsg(i < pPatchPage->cCount, ("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4620
4621 pPatchPage->cCount--;
4622 }
4623 else
4624 {
4625 PPATMPATCHPAGE pPatchNode;
4626
4627 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4628
4629 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4630 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4631 Assert(pPatchNode && pPatchNode == pPatchPage);
4632
4633 Assert(pPatchPage->aPatch);
4634 rc = MMHyperFree(pVM, pPatchPage->aPatch);
4635 AssertRC(rc);
4636 rc = MMHyperFree(pVM, pPatchPage);
4637 AssertRC(rc);
4638 pVM->patm.s.cPageRecords--;
4639 }
4640 return VINF_SUCCESS;
4641}
4642
4643/**
4644 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4645 *
4646 * @returns VBox status code.
4647 * @param pVM The VM to operate on.
4648 * @param pPatch Patch record
4649 */
4650int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4651{
4652 int rc;
4653 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4654
4655 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4656 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4657 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4658
4659 /** @todo optimize better (large gaps between current and next used page) */
4660 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4661 {
4662 /* Get the closest guest instruction (from above) */
4663 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4664 if ( pGuestToPatchRec
4665 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4666 )
4667 {
4668 /* Code in page really patched -> add record */
4669 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4670 AssertRC(rc);
4671 }
4672 }
4673 pPatch->flags |= PATMFL_CODE_MONITORED;
4674 return VINF_SUCCESS;
4675}
4676
4677/**
4678 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4679 *
4680 * @returns VBox status code.
4681 * @param pVM The VM to operate on.
4682 * @param pPatch Patch record
4683 */
4684int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4685{
4686 int rc;
4687 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4688
4689 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4690 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4691 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4692
4693 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4694 {
4695 /* Get the closest guest instruction (from above) */
4696 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4697 if ( pGuestToPatchRec
4698 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4699 )
4700 {
4701 /* Code in page really patched -> remove record */
4702 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4703 AssertRC(rc);
4704 }
4705 }
4706 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4707 return VINF_SUCCESS;
4708}
4709
4710/**
4711 * Notifies PATM about a (potential) write to code that has been patched.
4712 *
4713 * @returns VBox status code.
4714 * @param pVM The VM to operate on.
4715 * @param GCPtr GC pointer to write address
4716 * @param cbWrite Nr of bytes to write
4717 *
4718 */
4719VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4720{
4721 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4722
4723 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4724
4725 Assert(VM_IS_EMT(pVM));
4726
4727 /* Quick boundary check */
4728 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4729 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4730 )
4731 return VINF_SUCCESS;
4732
4733 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4734
4735 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4736 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4737
4738 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4739 {
4740loop_start:
4741 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4742 if (pPatchPage)
4743 {
4744 uint32_t i;
4745 bool fValidPatchWrite = false;
4746
4747 /* Quick check to see if the write is in the patched part of the page */
4748 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4749 || pPatchPage->pHighestAddrGC < GCPtr)
4750 {
4751 break;
4752 }
4753
4754 for (i=0;i<pPatchPage->cCount;i++)
4755 {
4756 if (pPatchPage->aPatch[i])
4757 {
4758 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4759 RTRCPTR pPatchInstrGC;
4760 //unused: bool fForceBreak = false;
4761
4762 Assert(pPatchPage->aPatch[i]->flags & PATMFL_CODE_MONITORED);
4763 /** @todo inefficient and includes redundant checks for multiple pages. */
4764 for (uint32_t j=0; j<cbWrite; j++)
4765 {
4766 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4767
4768 if ( pPatch->cbPatchJump
4769 && pGuestPtrGC >= pPatch->pPrivInstrGC
4770 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4771 {
4772 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4773 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4774 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4775 if (rc == VINF_SUCCESS)
4776 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4777 goto loop_start;
4778
4779 continue;
4780 }
4781
4782 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4783 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4784 if (!pPatchInstrGC)
4785 {
4786 RTRCPTR pClosestInstrGC;
4787 uint32_t size;
4788
4789 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4790 if (pPatchInstrGC)
4791 {
4792 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4793 Assert(pClosestInstrGC <= pGuestPtrGC);
4794 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4795 /* Check if this is not a write into a gap between two patches */
4796 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4797 pPatchInstrGC = 0;
4798 }
4799 }
4800 if (pPatchInstrGC)
4801 {
4802 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4803
4804 fValidPatchWrite = true;
4805
4806 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4807 Assert(pPatchToGuestRec);
4808 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4809 {
4810 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4811
4812 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4813 {
4814 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4815
4816 PATMR3MarkDirtyPatch(pVM, pPatch);
4817
4818 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4819 goto loop_start;
4820 }
4821 else
4822 {
4823 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4824 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4825
4826 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4827 pPatchToGuestRec->fDirty = true;
4828
4829 *pInstrHC = 0xCC;
4830
4831 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4832 }
4833 }
4834 /* else already marked dirty */
4835 }
4836 }
4837 }
4838 } /* for each patch */
4839
4840 if (fValidPatchWrite == false)
4841 {
4842 /* Write to a part of the page that either:
4843 * - doesn't contain any code (shared code/data); rather unlikely
4844 * - old code page that's no longer in active use.
4845 */
4846invalid_write_loop_start:
4847 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4848
4849 if (pPatchPage)
4850 {
4851 for (i=0;i<pPatchPage->cCount;i++)
4852 {
4853 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4854
4855 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4856 {
4857 /* Note: possibly dangerous assumption that all future writes will be harmless. */
4858 if (pPatch->flags & PATMFL_IDTHANDLER)
4859 {
4860 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4861
4862 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4863 int rc = patmRemovePatchPages(pVM, pPatch);
4864 AssertRC(rc);
4865 }
4866 else
4867 {
4868 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4869 PATMR3MarkDirtyPatch(pVM, pPatch);
4870 }
4871 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4872 goto invalid_write_loop_start;
4873 }
4874 } /* for */
4875 }
4876 }
4877 }
4878 }
4879 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
4880 return VINF_SUCCESS;
4881
4882}
4883
4884/**
4885 * Disable all patches in a flushed page
4886 *
4887 * @returns VBox status code
4888 * @param pVM The VM to operate on.
4889 * @param addr GC address of the page to flush
4890 */
4891/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
4892 */
4893VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
4894{
4895 addr &= PAGE_BASE_GC_MASK;
4896
4897 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
4898 if (pPatchPage)
4899 {
4900 int i;
4901
4902 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
4903 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
4904 {
4905 if (pPatchPage->aPatch[i])
4906 {
4907 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4908
4909 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
4910 PATMR3MarkDirtyPatch(pVM, pPatch);
4911 }
4912 }
4913 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
4914 }
4915 return VINF_SUCCESS;
4916}
4917
4918/**
4919 * Checks if the instructions at the specified address has been patched already.
4920 *
4921 * @returns boolean, patched or not
4922 * @param pVM The VM to operate on.
4923 * @param pInstrGC Guest context pointer to instruction
4924 */
4925VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
4926{
4927 PPATMPATCHREC pPatchRec;
4928 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4929 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
4930 return true;
4931 return false;
4932}
4933
4934/**
4935 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
4936 *
4937 * @returns VBox status code.
4938 * @param pVM The VM to operate on.
4939 * @param pInstrGC GC address of instr
4940 * @param pByte opcode byte pointer (OUT)
4941 *
4942 */
4943VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
4944{
4945 PPATMPATCHREC pPatchRec;
4946
4947 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
4948
4949 /* Shortcut. */
4950 if ( !PATMIsEnabled(pVM)
4951 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
4952 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
4953 {
4954 return VERR_PATCH_NOT_FOUND;
4955 }
4956
4957 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
4958 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
4959 if ( pPatchRec
4960 && pPatchRec->patch.uState == PATCH_ENABLED
4961 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
4962 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
4963 {
4964 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
4965 *pByte = pPatchRec->patch.aPrivInstr[offset];
4966
4967 if (pPatchRec->patch.cbPatchJump == 1)
4968 {
4969 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
4970 }
4971 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
4972 return VINF_SUCCESS;
4973 }
4974 return VERR_PATCH_NOT_FOUND;
4975}
4976
4977/**
4978 * Disable patch for privileged instruction at specified location
4979 *
4980 * @returns VBox status code.
4981 * @param pVM The VM to operate on.
4982 * @param pInstr Guest context point to privileged instruction
4983 *
4984 * @note returns failure if patching is not allowed or possible
4985 *
4986 */
4987VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
4988{
4989 PPATMPATCHREC pPatchRec;
4990 PPATCHINFO pPatch;
4991
4992 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
4993 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4994 if (pPatchRec)
4995 {
4996 int rc = VINF_SUCCESS;
4997
4998 pPatch = &pPatchRec->patch;
4999
5000 /* Already disabled? */
5001 if (pPatch->uState == PATCH_DISABLED)
5002 return VINF_SUCCESS;
5003
5004 /* Clear the IDT entries for the patch we're disabling. */
5005 /* Note: very important as we clear IF in the patch itself */
5006 /** @todo this needs to be changed */
5007 if (pPatch->flags & PATMFL_IDTHANDLER)
5008 {
5009 uint32_t iGate;
5010
5011 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5012 if (iGate != (uint32_t)~0)
5013 {
5014 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5015 if (++cIDTHandlersDisabled < 256)
5016 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5017 }
5018 }
5019
5020 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5021 if ( pPatch->pPatchBlockOffset
5022 && pPatch->uState == PATCH_ENABLED)
5023 {
5024 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5025 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5026 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5027 }
5028
5029 /* IDT or function patches haven't changed any guest code. */
5030 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5031 {
5032 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5033 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5034
5035 if (pPatch->uState != PATCH_REFUSED)
5036 {
5037 uint8_t temp[16];
5038
5039 Assert(pPatch->cbPatchJump < sizeof(temp));
5040
5041 /* Let's first check if the guest code is still the same. */
5042 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5043 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5044 if (rc == VINF_SUCCESS)
5045 {
5046 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5047
5048 if ( temp[0] != 0xE9 /* jmp opcode */
5049 || *(RTRCINTPTR *)(&temp[1]) != displ
5050 )
5051 {
5052 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5053 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5054 /* Remove it completely */
5055 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5056 rc = PATMR3RemovePatch(pVM, pInstrGC);
5057 AssertRC(rc);
5058 return VWRN_PATCH_REMOVED;
5059 }
5060 patmRemoveJumpToPatch(pVM, pPatch);
5061 }
5062 else
5063 {
5064 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5065 pPatch->uState = PATCH_DISABLE_PENDING;
5066 }
5067 }
5068 else
5069 {
5070 AssertMsgFailed(("Patch was refused!\n"));
5071 return VERR_PATCH_ALREADY_DISABLED;
5072 }
5073 }
5074 else
5075 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5076 {
5077 uint8_t temp[16];
5078
5079 Assert(pPatch->cbPatchJump < sizeof(temp));
5080
5081 /* Let's first check if the guest code is still the same. */
5082 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5083 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5084 if (rc == VINF_SUCCESS)
5085 {
5086 if (temp[0] != 0xCC)
5087 {
5088 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5089 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5090 /* Remove it completely */
5091 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5092 rc = PATMR3RemovePatch(pVM, pInstrGC);
5093 AssertRC(rc);
5094 return VWRN_PATCH_REMOVED;
5095 }
5096 patmDeactivateInt3Patch(pVM, pPatch);
5097 }
5098 }
5099
5100 if (rc == VINF_SUCCESS)
5101 {
5102 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5103 if (pPatch->uState == PATCH_DISABLE_PENDING)
5104 {
5105 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5106 pPatch->uState = PATCH_UNUSABLE;
5107 }
5108 else
5109 if (pPatch->uState != PATCH_DIRTY)
5110 {
5111 pPatch->uOldState = pPatch->uState;
5112 pPatch->uState = PATCH_DISABLED;
5113 }
5114 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5115 }
5116
5117 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5118 return VINF_SUCCESS;
5119 }
5120 Log(("Patch not found!\n"));
5121 return VERR_PATCH_NOT_FOUND;
5122}
5123
5124/**
5125 * Permanently disable patch for privileged instruction at specified location
5126 *
5127 * @returns VBox status code.
5128 * @param pVM The VM to operate on.
5129 * @param pInstr Guest context instruction pointer
5130 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5131 * @param pConflictPatch Conflicting patch
5132 *
5133 */
5134static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5135{
5136#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5137 PATCHINFO patch;
5138 DISCPUSTATE cpu;
5139 R3PTRTYPE(uint8_t *) pInstrHC;
5140 uint32_t opsize;
5141 bool disret;
5142 int rc;
5143
5144 RT_ZERO(patch);
5145 pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
5146 cpu.mode = (pConflictPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5147 disret = PATMR3DISInstr(pVM, &patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
5148 /*
5149 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5150 * with one that jumps right into the conflict patch.
5151 * Otherwise we must disable the conflicting patch to avoid serious problems.
5152 */
5153 if ( disret == true
5154 && (pConflictPatch->flags & PATMFL_CODE32)
5155 && (cpu.pCurInstr->opcode == OP_JMP || (cpu.pCurInstr->optype & OPTYPE_COND_CONTROLFLOW))
5156 && (cpu.param1.flags & USE_IMMEDIATE32_REL))
5157 {
5158 /* Hint patches must be enabled first. */
5159 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5160 {
5161 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5162 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5163 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5164 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5165 /* Enabling might fail if the patched code has changed in the meantime. */
5166 if (rc != VINF_SUCCESS)
5167 return rc;
5168 }
5169
5170 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5171 if (RT_SUCCESS(rc))
5172 {
5173 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5174 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5175 return VINF_SUCCESS;
5176 }
5177 }
5178#endif
5179
5180 if (pConflictPatch->opcode == OP_CLI)
5181 {
5182 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5183 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5184 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5185 if (rc == VWRN_PATCH_REMOVED)
5186 return VINF_SUCCESS;
5187 if (RT_SUCCESS(rc))
5188 {
5189 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5190 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5191 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5192 if (rc == VERR_PATCH_NOT_FOUND)
5193 return VINF_SUCCESS; /* removed already */
5194
5195 AssertRC(rc);
5196 if (RT_SUCCESS(rc))
5197 {
5198 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5199 return VINF_SUCCESS;
5200 }
5201 }
5202 /* else turned into unusable patch (see below) */
5203 }
5204 else
5205 {
5206 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5207 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5208 if (rc == VWRN_PATCH_REMOVED)
5209 return VINF_SUCCESS;
5210 }
5211
5212 /* No need to monitor the code anymore. */
5213 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5214 {
5215 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5216 AssertRC(rc);
5217 }
5218 pConflictPatch->uState = PATCH_UNUSABLE;
5219 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5220 return VERR_PATCH_DISABLED;
5221}
5222
5223/**
5224 * Enable patch for privileged instruction at specified location
5225 *
5226 * @returns VBox status code.
5227 * @param pVM The VM to operate on.
5228 * @param pInstr Guest context point to privileged instruction
5229 *
5230 * @note returns failure if patching is not allowed or possible
5231 *
5232 */
5233VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5234{
5235 PPATMPATCHREC pPatchRec;
5236 PPATCHINFO pPatch;
5237
5238 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5239 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5240 if (pPatchRec)
5241 {
5242 int rc = VINF_SUCCESS;
5243
5244 pPatch = &pPatchRec->patch;
5245
5246 if (pPatch->uState == PATCH_DISABLED)
5247 {
5248 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5249 {
5250 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5251 uint8_t temp[16];
5252
5253 Assert(pPatch->cbPatchJump < sizeof(temp));
5254
5255 /* Let's first check if the guest code is still the same. */
5256 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5257 AssertRC(rc2);
5258 if (rc2 == VINF_SUCCESS)
5259 {
5260 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5261 {
5262 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5263 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5264 /* Remove it completely */
5265 rc = PATMR3RemovePatch(pVM, pInstrGC);
5266 AssertRC(rc);
5267 return VERR_PATCH_NOT_FOUND;
5268 }
5269
5270 PATMP2GLOOKUPREC cacheRec;
5271 RT_ZERO(cacheRec);
5272 cacheRec.pPatch = pPatch;
5273
5274 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5275 /* Free leftover lock if any. */
5276 if (cacheRec.Lock.pvMap)
5277 {
5278 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5279 cacheRec.Lock.pvMap = NULL;
5280 }
5281 AssertRC(rc2);
5282 if (RT_FAILURE(rc2))
5283 return rc2;
5284
5285#ifdef DEBUG
5286 {
5287 DISCPUSTATE cpu;
5288 char szOutput[256];
5289 uint32_t opsize, i = 0;
5290 bool disret;
5291 i = 0;
5292 while(i < pPatch->cbPatchJump)
5293 {
5294 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5295 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
5296 Log(("Renewed patch instr: %s", szOutput));
5297 i += opsize;
5298 }
5299 }
5300#endif
5301 }
5302 }
5303 else
5304 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5305 {
5306 uint8_t temp[16];
5307
5308 Assert(pPatch->cbPatchJump < sizeof(temp));
5309
5310 /* Let's first check if the guest code is still the same. */
5311 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5312 AssertRC(rc2);
5313
5314 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5315 {
5316 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5317 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5318 rc = PATMR3RemovePatch(pVM, pInstrGC);
5319 AssertRC(rc);
5320 return VERR_PATCH_NOT_FOUND;
5321 }
5322
5323 rc2 = patmActivateInt3Patch(pVM, pPatch);
5324 if (RT_FAILURE(rc2))
5325 return rc2;
5326 }
5327
5328 pPatch->uState = pPatch->uOldState; //restore state
5329
5330 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5331 if (pPatch->pPatchBlockOffset)
5332 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5333
5334 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5335 }
5336 else
5337 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5338
5339 return rc;
5340 }
5341 return VERR_PATCH_NOT_FOUND;
5342}
5343
5344/**
5345 * Remove patch for privileged instruction at specified location
5346 *
5347 * @returns VBox status code.
5348 * @param pVM The VM to operate on.
5349 * @param pPatchRec Patch record
5350 * @param fForceRemove Remove *all* patches
5351 */
5352int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5353{
5354 PPATCHINFO pPatch;
5355
5356 pPatch = &pPatchRec->patch;
5357
5358 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5359 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5360 {
5361 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5362 return VERR_ACCESS_DENIED;
5363 }
5364 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5365
5366 /* Note: NEVER EVER REUSE PATCH MEMORY */
5367 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5368
5369 if (pPatchRec->patch.pPatchBlockOffset)
5370 {
5371 PAVLOU32NODECORE pNode;
5372
5373 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5374 Assert(pNode);
5375 }
5376
5377 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5378 {
5379 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5380 AssertRC(rc);
5381 }
5382
5383#ifdef VBOX_WITH_STATISTICS
5384 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5385 {
5386 STAMR3Deregister(pVM, &pPatchRec->patch);
5387#ifndef DEBUG_sandervl
5388 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5389 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5390 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5391 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5392 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5393 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5394 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5395 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5396 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5397 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5398 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5399 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5400 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5401 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5402#endif
5403 }
5404#endif
5405
5406 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5407 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5408 pPatch->nrPatch2GuestRecs = 0;
5409 Assert(pPatch->Patch2GuestAddrTree == 0);
5410
5411 patmEmptyTree(pVM, &pPatch->FixupTree);
5412 pPatch->nrFixups = 0;
5413 Assert(pPatch->FixupTree == 0);
5414
5415 if (pPatchRec->patch.pTempInfo)
5416 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5417
5418 /* Note: might fail, because it has already been removed (e.g. during reset). */
5419 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5420
5421 /* Free the patch record */
5422 MMHyperFree(pVM, pPatchRec);
5423 return VINF_SUCCESS;
5424}
5425
5426/**
5427 * Attempt to refresh the patch by recompiling its entire code block
5428 *
5429 * @returns VBox status code.
5430 * @param pVM The VM to operate on.
5431 * @param pPatchRec Patch record
5432 */
5433int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5434{
5435 PPATCHINFO pPatch;
5436 int rc;
5437 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5438
5439 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5440
5441 pPatch = &pPatchRec->patch;
5442 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5443 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5444 {
5445 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist\n"));
5446 return VERR_PATCHING_REFUSED;
5447 }
5448
5449 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5450
5451 rc = PATMR3DisablePatch(pVM, pInstrGC);
5452 AssertRC(rc);
5453
5454 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5455 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5456#ifdef VBOX_WITH_STATISTICS
5457 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5458 {
5459 STAMR3Deregister(pVM, &pPatchRec->patch);
5460#ifndef DEBUG_sandervl
5461 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5462 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5463 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5464 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5465 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5466 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5467 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5468 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5469 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5470 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5471 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5472 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5473 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5474 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5475#endif
5476 }
5477#endif
5478
5479 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5480
5481 /* Attempt to install a new patch. */
5482 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5483 if (RT_SUCCESS(rc))
5484 {
5485 RTRCPTR pPatchTargetGC;
5486 PPATMPATCHREC pNewPatchRec;
5487
5488 /* Determine target address in new patch */
5489 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5490 Assert(pPatchTargetGC);
5491 if (!pPatchTargetGC)
5492 {
5493 rc = VERR_PATCHING_REFUSED;
5494 goto failure;
5495 }
5496
5497 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5498 pPatch->uCurPatchOffset = 0;
5499
5500 /* insert jump to new patch in old patch block */
5501 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5502 if (RT_FAILURE(rc))
5503 goto failure;
5504
5505 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5506 Assert(pNewPatchRec); /* can't fail */
5507
5508 /* Remove old patch (only do that when everything is finished) */
5509 int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
5510 AssertRC(rc2);
5511
5512 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5513 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5514
5515 LogRel(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5516 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5517
5518 /* Used by another patch, so don't remove it! */
5519 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5520 }
5521
5522failure:
5523 if (RT_FAILURE(rc))
5524 {
5525 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5526
5527 /* Remove the new inactive patch */
5528 rc = PATMR3RemovePatch(pVM, pInstrGC);
5529 AssertRC(rc);
5530
5531 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5532 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5533
5534 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5535 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5536 AssertRC(rc2);
5537
5538 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5539 }
5540 return rc;
5541}
5542
5543/**
5544 * Find patch for privileged instruction at specified location
5545 *
5546 * @returns Patch structure pointer if found; else NULL
5547 * @param pVM The VM to operate on.
5548 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5549 * @param fIncludeHints Include hinted patches or not
5550 *
5551 */
5552PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5553{
5554 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5555 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5556 if (pPatchRec)
5557 {
5558 if ( pPatchRec->patch.uState == PATCH_ENABLED
5559 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5560 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5561 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5562 {
5563 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5564 return &pPatchRec->patch;
5565 }
5566 else
5567 if ( fIncludeHints
5568 && pPatchRec->patch.uState == PATCH_DISABLED
5569 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5570 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5571 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5572 {
5573 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5574 return &pPatchRec->patch;
5575 }
5576 }
5577 return NULL;
5578}
5579
5580/**
5581 * Checks whether the GC address is inside a generated patch jump
5582 *
5583 * @returns true -> yes, false -> no
5584 * @param pVM The VM to operate on.
5585 * @param pAddr Guest context address
5586 * @param pPatchAddr Guest context patch address (if true)
5587 */
5588VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5589{
5590 RTRCPTR addr;
5591 PPATCHINFO pPatch;
5592
5593 if (PATMIsEnabled(pVM) == false)
5594 return false;
5595
5596 if (pPatchAddr == NULL)
5597 pPatchAddr = &addr;
5598
5599 *pPatchAddr = 0;
5600
5601 pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
5602 if (pPatch)
5603 *pPatchAddr = pPatch->pPrivInstrGC;
5604
5605 return *pPatchAddr == 0 ? false : true;
5606}
5607
5608/**
5609 * Remove patch for privileged instruction at specified location
5610 *
5611 * @returns VBox status code.
5612 * @param pVM The VM to operate on.
5613 * @param pInstr Guest context point to privileged instruction
5614 *
5615 * @note returns failure if patching is not allowed or possible
5616 *
5617 */
5618VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5619{
5620 PPATMPATCHREC pPatchRec;
5621
5622 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5623 if (pPatchRec)
5624 {
5625 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5626 if (rc == VWRN_PATCH_REMOVED)
5627 return VINF_SUCCESS;
5628
5629 return PATMRemovePatch(pVM, pPatchRec, false);
5630 }
5631 AssertFailed();
5632 return VERR_PATCH_NOT_FOUND;
5633}
5634
5635/**
5636 * Mark patch as dirty
5637 *
5638 * @returns VBox status code.
5639 * @param pVM The VM to operate on.
5640 * @param pPatch Patch record
5641 *
5642 * @note returns failure if patching is not allowed or possible
5643 *
5644 */
5645VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5646{
5647 if (pPatch->pPatchBlockOffset)
5648 {
5649 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5650 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5651 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5652 }
5653
5654 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5655 /* Put back the replaced instruction. */
5656 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5657 if (rc == VWRN_PATCH_REMOVED)
5658 return VINF_SUCCESS;
5659
5660 /* Note: we don't restore patch pages for patches that are not enabled! */
5661 /* Note: be careful when changing this behaviour!! */
5662
5663 /* The patch pages are no longer marked for self-modifying code detection */
5664 if (pPatch->flags & PATMFL_CODE_MONITORED)
5665 {
5666 rc = patmRemovePatchPages(pVM, pPatch);
5667 AssertRCReturn(rc, rc);
5668 }
5669 pPatch->uState = PATCH_DIRTY;
5670
5671 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5672 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5673
5674 return VINF_SUCCESS;
5675}
5676
5677/**
5678 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5679 *
5680 * @returns VBox status code.
5681 * @param pVM The VM to operate on.
5682 * @param pPatch Patch block structure pointer
5683 * @param pPatchGC GC address in patch block
5684 */
5685RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5686{
5687 Assert(pPatch->Patch2GuestAddrTree);
5688 /* Get the closest record from below. */
5689 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5690 if (pPatchToGuestRec)
5691 return pPatchToGuestRec->pOrgInstrGC;
5692
5693 return 0;
5694}
5695
5696/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5697 *
5698 * @returns corresponding GC pointer in patch block
5699 * @param pVM The VM to operate on.
5700 * @param pPatch Current patch block pointer
5701 * @param pInstrGC Guest context pointer to privileged instruction
5702 *
5703 */
5704RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5705{
5706 if (pPatch->Guest2PatchAddrTree)
5707 {
5708 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5709 if (pGuestToPatchRec)
5710 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5711 }
5712
5713 return 0;
5714}
5715
5716/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
5717 *
5718 * @returns corresponding GC pointer in patch block
5719 * @param pVM The VM to operate on.
5720 * @param pPatch Current patch block pointer
5721 * @param pInstrGC Guest context pointer to privileged instruction
5722 *
5723 */
5724RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5725{
5726 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
5727 if (pGuestToPatchRec)
5728 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5729
5730 return 0;
5731}
5732
5733/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5734 *
5735 * @returns corresponding GC pointer in patch block
5736 * @param pVM The VM to operate on.
5737 * @param pInstrGC Guest context pointer to privileged instruction
5738 *
5739 */
5740VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5741{
5742 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5743 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5744 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
5745 else
5746 return 0;
5747}
5748
5749/**
5750 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5751 *
5752 * @returns original GC instruction pointer or 0 if not found
5753 * @param pVM The VM to operate on.
5754 * @param pPatchGC GC address in patch block
5755 * @param pEnmState State of the translated address (out)
5756 *
5757 */
5758VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
5759{
5760 PPATMPATCHREC pPatchRec;
5761 void *pvPatchCoreOffset;
5762 RTRCPTR pPrivInstrGC;
5763
5764 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
5765 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5766 if (pvPatchCoreOffset == 0)
5767 {
5768 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
5769 return 0;
5770 }
5771 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
5772 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
5773 if (pEnmState)
5774 {
5775 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
5776 || pPatchRec->patch.uState == PATCH_DIRTY
5777 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
5778 || pPatchRec->patch.uState == PATCH_UNUSABLE),
5779 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
5780
5781 if ( !pPrivInstrGC
5782 || pPatchRec->patch.uState == PATCH_UNUSABLE
5783 || pPatchRec->patch.uState == PATCH_REFUSED)
5784 {
5785 pPrivInstrGC = 0;
5786 *pEnmState = PATMTRANS_FAILED;
5787 }
5788 else
5789 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
5790 {
5791 *pEnmState = PATMTRANS_INHIBITIRQ;
5792 }
5793 else
5794 if ( pPatchRec->patch.uState == PATCH_ENABLED
5795 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
5796 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
5797 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5798 {
5799 *pEnmState = PATMTRANS_OVERWRITTEN;
5800 }
5801 else
5802 if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
5803 {
5804 *pEnmState = PATMTRANS_OVERWRITTEN;
5805 }
5806 else
5807 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
5808 {
5809 *pEnmState = PATMTRANS_PATCHSTART;
5810 }
5811 else
5812 *pEnmState = PATMTRANS_SAFE;
5813 }
5814 return pPrivInstrGC;
5815}
5816
5817/**
5818 * Returns the GC pointer of the patch for the specified GC address
5819 *
5820 * @returns VBox status code.
5821 * @param pVM The VM to operate on.
5822 * @param pAddrGC Guest context address
5823 */
5824VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
5825{
5826 PPATMPATCHREC pPatchRec;
5827
5828 /* Find the patch record. */
5829 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
5830 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
5831 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
5832 return PATCHCODE_PTR_GC(&pPatchRec->patch);
5833 else
5834 return 0;
5835}
5836
5837/**
5838 * Attempt to recover dirty instructions
5839 *
5840 * @returns VBox status code.
5841 * @param pVM The VM to operate on.
5842 * @param pCtx CPU context
5843 * @param pPatch Patch record
5844 * @param pPatchToGuestRec Patch to guest address record
5845 * @param pEip GC pointer of trapping instruction
5846 */
5847static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
5848{
5849 DISCPUSTATE CpuOld, CpuNew;
5850 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
5851 int rc;
5852 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
5853 uint32_t cbDirty;
5854 PRECPATCHTOGUEST pRec;
5855 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
5856 PVMCPU pVCpu = VMMGetCpu0(pVM);
5857 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
5858
5859 pRec = pPatchToGuestRec;
5860 pCurInstrGC = pOrgInstrGC;
5861 pCurPatchInstrGC = pEip;
5862 cbDirty = 0;
5863 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5864
5865 /* Find all adjacent dirty instructions */
5866 while (true)
5867 {
5868 if (pRec->fJumpTarget)
5869 {
5870 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
5871 pRec->fDirty = false;
5872 return VERR_PATCHING_REFUSED;
5873 }
5874
5875 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
5876 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5877 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
5878
5879 /* Only harmless instructions are acceptable. */
5880 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
5881 if ( RT_FAILURE(rc)
5882 || !(CpuOld.pCurInstr->optype & OPTYPE_HARMLESS))
5883 {
5884 if (RT_SUCCESS(rc))
5885 cbDirty += CpuOld.opsize;
5886 else
5887 if (!cbDirty)
5888 cbDirty = 1;
5889 break;
5890 }
5891
5892#ifdef DEBUG
5893 char szBuf[256];
5894 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
5895 szBuf, sizeof(szBuf), NULL);
5896 Log(("DIRTY: %s\n", szBuf));
5897#endif
5898 /* Mark as clean; if we fail we'll let it always fault. */
5899 pRec->fDirty = false;
5900
5901 /* Remove old lookup record. */
5902 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
5903 pPatchToGuestRec = NULL;
5904
5905 pCurPatchInstrGC += CpuOld.opsize;
5906 cbDirty += CpuOld.opsize;
5907
5908 /* Let's see if there's another dirty instruction right after. */
5909 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
5910 if (!pRec || !pRec->fDirty)
5911 break; /* no more dirty instructions */
5912
5913 /* In case of complex instructions the next guest instruction could be quite far off. */
5914 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
5915 }
5916
5917 if ( RT_SUCCESS(rc)
5918 && (CpuOld.pCurInstr->optype & OPTYPE_HARMLESS)
5919 )
5920 {
5921 uint32_t cbLeft;
5922
5923 pCurPatchInstrHC = pPatchInstrHC;
5924 pCurPatchInstrGC = pEip;
5925 cbLeft = cbDirty;
5926
5927 while (cbLeft && RT_SUCCESS(rc))
5928 {
5929 bool fValidInstr;
5930
5931 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
5932
5933 fValidInstr = !!(CpuNew.pCurInstr->optype & OPTYPE_HARMLESS);
5934 if ( !fValidInstr
5935 && (CpuNew.pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
5936 )
5937 {
5938 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
5939
5940 if ( pTargetGC >= pOrgInstrGC
5941 && pTargetGC <= pOrgInstrGC + cbDirty
5942 )
5943 {
5944 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
5945 fValidInstr = true;
5946 }
5947 }
5948
5949 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
5950 if ( rc == VINF_SUCCESS
5951 && CpuNew.opsize <= cbLeft /* must still fit */
5952 && fValidInstr
5953 )
5954 {
5955#ifdef DEBUG
5956 char szBuf[256];
5957 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
5958 szBuf, sizeof(szBuf), NULL);
5959 Log(("NEW: %s\n", szBuf));
5960#endif
5961
5962 /* Copy the new instruction. */
5963 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.opsize);
5964 AssertRC(rc);
5965
5966 /* Add a new lookup record for the duplicated instruction. */
5967 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5968 }
5969 else
5970 {
5971#ifdef DEBUG
5972 char szBuf[256];
5973 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
5974 szBuf, sizeof(szBuf), NULL);
5975 Log(("NEW: %s (FAILED)\n", szBuf));
5976#endif
5977 /* Restore the old lookup record for the duplicated instruction. */
5978 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5979
5980 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
5981 rc = VERR_PATCHING_REFUSED;
5982 break;
5983 }
5984 pCurInstrGC += CpuNew.opsize;
5985 pCurPatchInstrHC += CpuNew.opsize;
5986 pCurPatchInstrGC += CpuNew.opsize;
5987 cbLeft -= CpuNew.opsize;
5988
5989 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
5990 if (!cbLeft)
5991 {
5992 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
5993 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
5994 {
5995 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
5996 if (pRec)
5997 {
5998 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
5999 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6000
6001 Assert(!pRec->fDirty);
6002
6003 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6004 if (cbFiller >= SIZEOF_NEARJUMP32)
6005 {
6006 pPatchFillHC[0] = 0xE9;
6007 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6008#ifdef DEBUG
6009 char szBuf[256];
6010 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6011 szBuf, sizeof(szBuf), NULL);
6012 Log(("FILL: %s\n", szBuf));
6013#endif
6014 }
6015 else
6016 {
6017 for (unsigned i = 0; i < cbFiller; i++)
6018 {
6019 pPatchFillHC[i] = 0x90; /* NOP */
6020#ifdef DEBUG
6021 char szBuf[256];
6022 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC + i,
6023 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6024 Log(("FILL: %s\n", szBuf));
6025#endif
6026 }
6027 }
6028 }
6029 }
6030 }
6031 }
6032 }
6033 else
6034 rc = VERR_PATCHING_REFUSED;
6035
6036 if (RT_SUCCESS(rc))
6037 {
6038 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6039 }
6040 else
6041 {
6042 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6043 Assert(cbDirty);
6044
6045 /* Mark the whole instruction stream with breakpoints. */
6046 if (cbDirty)
6047 memset(pPatchInstrHC, 0xCC, cbDirty);
6048
6049 if ( pVM->patm.s.fOutOfMemory == false
6050 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6051 {
6052 rc = patmR3RefreshPatch(pVM, pPatch);
6053 if (RT_FAILURE(rc))
6054 {
6055 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6056 }
6057 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6058 rc = VERR_PATCHING_REFUSED;
6059 }
6060 }
6061 return rc;
6062}
6063
6064/**
6065 * Handle trap inside patch code
6066 *
6067 * @returns VBox status code.
6068 * @param pVM The VM to operate on.
6069 * @param pCtx CPU context
6070 * @param pEip GC pointer of trapping instruction
6071 * @param ppNewEip GC pointer to new instruction
6072 */
6073VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6074{
6075 PPATMPATCHREC pPatch = 0;
6076 void *pvPatchCoreOffset;
6077 RTRCUINTPTR offset;
6078 RTRCPTR pNewEip;
6079 int rc ;
6080 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6081 PVMCPU pVCpu = VMMGetCpu0(pVM);
6082
6083 Assert(pVM->cCpus == 1);
6084
6085 pNewEip = 0;
6086 *ppNewEip = 0;
6087
6088 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6089
6090 /* Find the patch record. */
6091 /* Note: there might not be a patch to guest translation record (global function) */
6092 offset = pEip - pVM->patm.s.pPatchMemGC;
6093 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6094 if (pvPatchCoreOffset)
6095 {
6096 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6097
6098 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6099
6100 if (pPatch->patch.uState == PATCH_DIRTY)
6101 {
6102 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6103 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6104 {
6105 /* Function duplication patches set fPIF to 1 on entry */
6106 pVM->patm.s.pGCStateHC->fPIF = 1;
6107 }
6108 }
6109 else
6110 if (pPatch->patch.uState == PATCH_DISABLED)
6111 {
6112 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6113 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6114 {
6115 /* Function duplication patches set fPIF to 1 on entry */
6116 pVM->patm.s.pGCStateHC->fPIF = 1;
6117 }
6118 }
6119 else
6120 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6121 {
6122 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6123
6124 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6125 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6126 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6127 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6128 }
6129
6130 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6131 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6132
6133 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6134 pPatch->patch.cTraps++;
6135 PATM_STAT_FAULT_INC(&pPatch->patch);
6136 }
6137 else
6138 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6139
6140 /* Check if we were interrupted in PATM generated instruction code. */
6141 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6142 {
6143 DISCPUSTATE Cpu;
6144 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6145 AssertRC(rc);
6146
6147 if ( rc == VINF_SUCCESS
6148 && ( Cpu.pCurInstr->opcode == OP_PUSHF
6149 || Cpu.pCurInstr->opcode == OP_PUSH
6150 || Cpu.pCurInstr->opcode == OP_CALL)
6151 )
6152 {
6153 uint64_t fFlags;
6154
6155 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6156
6157 if (Cpu.pCurInstr->opcode == OP_PUSH)
6158 {
6159 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6160 if ( rc == VINF_SUCCESS
6161 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6162 {
6163 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6164
6165 /* Reset the PATM stack. */
6166 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6167
6168 pVM->patm.s.pGCStateHC->fPIF = 1;
6169
6170 Log(("Faulting push -> go back to the original instruction\n"));
6171
6172 /* continue at the original instruction */
6173 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6174 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6175 return VINF_SUCCESS;
6176 }
6177 }
6178
6179 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6180 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6181 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6182 if (rc == VINF_SUCCESS)
6183 {
6184 /* The guest page *must* be present. */
6185 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6186 if ( rc == VINF_SUCCESS
6187 && (fFlags & X86_PTE_P))
6188 {
6189 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6190 return VINF_PATCH_CONTINUE;
6191 }
6192 }
6193 }
6194 else
6195 if (pPatch->patch.pPrivInstrGC == pNewEip)
6196 {
6197 /* Invalidated patch or first instruction overwritten.
6198 * We can ignore the fPIF state in this case.
6199 */
6200 /* Reset the PATM stack. */
6201 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6202
6203 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6204
6205 pVM->patm.s.pGCStateHC->fPIF = 1;
6206
6207 /* continue at the original instruction */
6208 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6209 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6210 return VINF_SUCCESS;
6211 }
6212
6213 char szBuf[256];
6214 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6215
6216 /* Very bad. We crashed in emitted code. Probably stack? */
6217 if (pPatch)
6218 {
6219 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6220 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6221 }
6222 else
6223 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6224 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6225 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6226 }
6227
6228 /* From here on, we must have a valid patch to guest translation. */
6229 if (pvPatchCoreOffset == 0)
6230 {
6231 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6232 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6233 return VERR_PATCH_NOT_FOUND;
6234 }
6235
6236 /* Take care of dirty/changed instructions. */
6237 if (pPatchToGuestRec->fDirty)
6238 {
6239 Assert(pPatchToGuestRec->Core.Key == offset);
6240 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6241
6242 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6243 if (RT_SUCCESS(rc))
6244 {
6245 /* Retry the current instruction. */
6246 pNewEip = pEip;
6247 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6248 }
6249 else
6250 {
6251 /* Reset the PATM stack. */
6252 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6253
6254 rc = VINF_SUCCESS; /* Continue at original instruction. */
6255 }
6256
6257 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6258 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6259 return rc;
6260 }
6261
6262#ifdef VBOX_STRICT
6263 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6264 {
6265 DISCPUSTATE cpu;
6266 bool disret;
6267 uint32_t opsize;
6268 PATMP2GLOOKUPREC cacheRec;
6269 RT_ZERO(cacheRec);
6270 cacheRec.pPatch = &pPatch->patch;
6271
6272 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6273 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6274 if (cacheRec.Lock.pvMap)
6275 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6276
6277 if (disret && cpu.pCurInstr->opcode == OP_RETN)
6278 {
6279 RTRCPTR retaddr;
6280 PCPUMCTX pCtx2;
6281
6282 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6283
6284 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6285 AssertRC(rc);
6286
6287 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6288 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6289 }
6290 }
6291#endif
6292
6293 /* Return original address, correct by subtracting the CS base address. */
6294 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6295
6296 /* Reset the PATM stack. */
6297 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6298
6299 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6300 {
6301 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6302 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6303#ifdef VBOX_STRICT
6304 DISCPUSTATE cpu;
6305 bool disret;
6306 uint32_t opsize;
6307 PATMP2GLOOKUPREC cacheRec;
6308 RT_ZERO(cacheRec);
6309 cacheRec.pPatch = &pPatch->patch;
6310
6311 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6312 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_ORGCODE);
6313 if (cacheRec.Lock.pvMap)
6314 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6315
6316 if (disret && (cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_INT3))
6317 {
6318 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6319 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6320 if (cacheRec.Lock.pvMap)
6321 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6322
6323 Assert(cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_IRET);
6324 }
6325#endif
6326 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6327 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6328 }
6329
6330 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6331#ifdef LOG_ENABLED
6332 CPUMR3DisasmInstr(pVM, pVCpu, pCtx, pNewEip, "PATCHRET: ");
6333#endif
6334 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6335 {
6336 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6337 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6338 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6339 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6340 return VERR_PATCH_DISABLED;
6341 }
6342
6343#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6344 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6345 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6346 {
6347 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6348 //we are only wasting time, back out the patch
6349 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6350 pTrapRec->pNextPatchInstr = 0;
6351 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6352 return VERR_PATCH_DISABLED;
6353 }
6354#endif
6355
6356 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6357 return VINF_SUCCESS;
6358}
6359
6360
6361/**
6362 * Handle page-fault in monitored page
6363 *
6364 * @returns VBox status code.
6365 * @param pVM The VM to operate on.
6366 */
6367VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6368{
6369 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6370
6371 addr &= PAGE_BASE_GC_MASK;
6372
6373 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6374 AssertRC(rc); NOREF(rc);
6375
6376 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6377 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6378 {
6379 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6380 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6381 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6382 if (rc == VWRN_PATCH_REMOVED)
6383 return VINF_SUCCESS;
6384
6385 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6386
6387 if (addr == pPatchRec->patch.pPrivInstrGC)
6388 addr++;
6389 }
6390
6391 for(;;)
6392 {
6393 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6394
6395 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6396 break;
6397
6398 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6399 {
6400 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6401 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6402 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6403 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6404 }
6405 addr = pPatchRec->patch.pPrivInstrGC + 1;
6406 }
6407
6408 pVM->patm.s.pvFaultMonitor = 0;
6409 return VINF_SUCCESS;
6410}
6411
6412
6413#ifdef VBOX_WITH_STATISTICS
6414
6415static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6416{
6417 if (pPatch->flags & PATMFL_SYSENTER)
6418 {
6419 return "SYSENT";
6420 }
6421 else
6422 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6423 {
6424 static char szTrap[16];
6425 uint32_t iGate;
6426
6427 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6428 if (iGate < 256)
6429 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6430 else
6431 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6432 return szTrap;
6433 }
6434 else
6435 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6436 return "DUPFUNC";
6437 else
6438 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6439 return "FUNCCALL";
6440 else
6441 if (pPatch->flags & PATMFL_TRAMPOLINE)
6442 return "TRAMP";
6443 else
6444 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6445}
6446
6447static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6448{
6449 switch(pPatch->uState)
6450 {
6451 case PATCH_ENABLED:
6452 return "ENA";
6453 case PATCH_DISABLED:
6454 return "DIS";
6455 case PATCH_DIRTY:
6456 return "DIR";
6457 case PATCH_UNUSABLE:
6458 return "UNU";
6459 case PATCH_REFUSED:
6460 return "REF";
6461 case PATCH_DISABLE_PENDING:
6462 return "DIP";
6463 default:
6464 AssertFailed();
6465 return " ";
6466 }
6467}
6468
6469/**
6470 * Resets the sample.
6471 * @param pVM The VM handle.
6472 * @param pvSample The sample registered using STAMR3RegisterCallback.
6473 */
6474static void patmResetStat(PVM pVM, void *pvSample)
6475{
6476 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6477 Assert(pPatch);
6478
6479 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6480 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6481}
6482
6483/**
6484 * Prints the sample into the buffer.
6485 *
6486 * @param pVM The VM handle.
6487 * @param pvSample The sample registered using STAMR3RegisterCallback.
6488 * @param pszBuf The buffer to print into.
6489 * @param cchBuf The size of the buffer.
6490 */
6491static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6492{
6493 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6494 Assert(pPatch);
6495
6496 Assert(pPatch->uState != PATCH_REFUSED);
6497 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6498
6499 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6500 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6501 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6502}
6503
6504/**
6505 * Returns the GC address of the corresponding patch statistics counter
6506 *
6507 * @returns Stat address
6508 * @param pVM The VM to operate on.
6509 * @param pPatch Patch structure
6510 */
6511RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6512{
6513 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6514 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6515}
6516
6517#endif /* VBOX_WITH_STATISTICS */
6518
6519#ifdef VBOX_WITH_DEBUGGER
6520/**
6521 * The '.patmoff' command.
6522 *
6523 * @returns VBox status.
6524 * @param pCmd Pointer to the command descriptor (as registered).
6525 * @param pCmdHlp Pointer to command helper functions.
6526 * @param pVM Pointer to the current VM (if any).
6527 * @param paArgs Pointer to (readonly) array of arguments.
6528 * @param cArgs Number of arguments in the array.
6529 */
6530static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6531{
6532 /*
6533 * Validate input.
6534 */
6535 if (!pVM)
6536 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6537
6538 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6539 PATMR3AllowPatching(pVM, false);
6540 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6541}
6542
6543/**
6544 * The '.patmon' command.
6545 *
6546 * @returns VBox status.
6547 * @param pCmd Pointer to the command descriptor (as registered).
6548 * @param pCmdHlp Pointer to command helper functions.
6549 * @param pVM Pointer to the current VM (if any).
6550 * @param paArgs Pointer to (readonly) array of arguments.
6551 * @param cArgs Number of arguments in the array.
6552 */
6553static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6554{
6555 /*
6556 * Validate input.
6557 */
6558 if (!pVM)
6559 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6560
6561 PATMR3AllowPatching(pVM, true);
6562 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6563 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6564}
6565#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette