VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 39034

Last change on this file since 39034 was 39034, checked in by vboxsync, 13 years ago

VMM,INTNET: Addressing unused variable warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 253.3 KB
Line 
1/* $Id: PATM.cpp 39034 2011-10-19 11:43:52Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * NOTE: Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/cpumdis.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/trpm.h>
34#include <VBox/vmm/cfgm.h>
35#include <VBox/param.h>
36#include <VBox/vmm/selm.h>
37#include <iprt/avl.h>
38#include "PATMInternal.h"
39#include "PATMPatch.h"
40#include <VBox/vmm/vm.h>
41#include <VBox/vmm/csam.h>
42#include <VBox/dbg.h>
43#include <VBox/err.h>
44#include <VBox/log.h>
45#include <iprt/assert.h>
46#include <iprt/asm.h>
47#include <VBox/dis.h>
48#include <VBox/disopcode.h>
49#include "internal/pgm.h"
50
51#include <iprt/string.h>
52#include "PATMA.h"
53
54//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
55//#define PATM_DISABLE_ALL
56
57/**
58 * Refresh trampoline patch state.
59 */
60typedef struct PATMREFRESHPATCH
61{
62 /** Pointer to the VM structure. */
63 PVM pVM;
64 /** The trampoline patch record. */
65 PPATCHINFO pPatchTrampoline;
66 /** The new patch we want to jump to. */
67 PPATCHINFO pPatchRec;
68} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
69
70
71/*******************************************************************************
72* Internal Functions *
73*******************************************************************************/
74
75static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
76static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
77static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
78
79#ifdef LOG_ENABLED // keep gcc quiet
80static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
81#endif
82#ifdef VBOX_WITH_STATISTICS
83static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
84static void patmResetStat(PVM pVM, void *pvSample);
85static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
86#endif
87
88#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
89#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
90
91static int patmReinit(PVM pVM);
92static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
93
94#ifdef VBOX_WITH_DEBUGGER
95static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
96static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
97static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
98
99/** Command descriptors. */
100static const DBGCCMD g_aCmds[] =
101{
102 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
103 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
104 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
105};
106#endif
107
108/* Don't want to break saved states, so put it here as a global variable. */
109static unsigned int cIDTHandlersDisabled = 0;
110
111/**
112 * Initializes the PATM.
113 *
114 * @returns VBox status code.
115 * @param pVM The VM to operate on.
116 */
117VMMR3DECL(int) PATMR3Init(PVM pVM)
118{
119 int rc;
120
121 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
122
123 /* These values can't change as they are hardcoded in patch code (old saved states!) */
124 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
125 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
126 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
127 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
128
129 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
130 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
131
132 /* Allocate patch memory and GC patch state memory. */
133 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
134 /* Add another page in case the generated code is much larger than expected. */
135 /** @todo bad safety precaution */
136 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
137 if (RT_FAILURE(rc))
138 {
139 Log(("MMHyperAlloc failed with %Rrc\n", rc));
140 return rc;
141 }
142 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
143
144 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
145 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
146 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
147
148 /*
149 * Hypervisor memory for GC status data (read/write)
150 *
151 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
152 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
153 *
154 */
155 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
156 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
157 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
158
159 /* Hypervisor memory for patch statistics */
160 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
161 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
162
163 /* Memory for patch lookup trees. */
164 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
165 AssertRCReturn(rc, rc);
166 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
167
168#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
169 /* Check CFGM option. */
170 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
171 if (RT_FAILURE(rc))
172# ifdef PATM_DISABLE_ALL
173 pVM->fPATMEnabled = false;
174# else
175 pVM->fPATMEnabled = true;
176# endif
177#endif
178
179 rc = patmReinit(pVM);
180 AssertRC(rc);
181 if (RT_FAILURE(rc))
182 return rc;
183
184 /*
185 * Register save and load state notifiers.
186 */
187 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
188 NULL, NULL, NULL,
189 NULL, patmR3Save, NULL,
190 NULL, patmR3Load, NULL);
191 AssertRCReturn(rc, rc);
192
193#ifdef VBOX_WITH_DEBUGGER
194 /*
195 * Debugger commands.
196 */
197 static bool s_fRegisteredCmds = false;
198 if (!s_fRegisteredCmds)
199 {
200 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
201 if (RT_SUCCESS(rc2))
202 s_fRegisteredCmds = true;
203 }
204#endif
205
206#ifdef VBOX_WITH_STATISTICS
207 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
208 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
209 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
210 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
211 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
212 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
213 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
214 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
215
216 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
217 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
218
219 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
220 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
221 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
222
223 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
224 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
225 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
226 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
227 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
228
229 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
230 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
231
232 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
233 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
234
235 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
236 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
237 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
238
239 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
240 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
241 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
242
243 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
244 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
245
246 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
247 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
248 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
249 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
250
251 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
252 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
253
254 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
255 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
256
257 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
258 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
259 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
260
261 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
262 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
263 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
264 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
265
266 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
267 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
268 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
269 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
270 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
271
272 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
273#endif /* VBOX_WITH_STATISTICS */
274
275 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
276 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
277 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
278 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
279 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
280 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
281 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
282 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
283
284 return rc;
285}
286
287/**
288 * Finalizes HMA page attributes.
289 *
290 * @returns VBox status code.
291 * @param pVM The VM handle.
292 */
293VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
294{
295 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
296 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
297 if (RT_FAILURE(rc))
298 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
299
300 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
301 if (RT_FAILURE(rc))
302 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
303
304 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
305 if (RT_FAILURE(rc))
306 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
307
308 return rc;
309}
310
311/**
312 * (Re)initializes PATM
313 *
314 * @param pVM The VM.
315 */
316static int patmReinit(PVM pVM)
317{
318 int rc;
319
320 /*
321 * Assert alignment and sizes.
322 */
323 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
324 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
325
326 /*
327 * Setup any fixed pointers and offsets.
328 */
329 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
330
331#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
332#ifndef PATM_DISABLE_ALL
333 pVM->fPATMEnabled = true;
334#endif
335#endif
336
337 Assert(pVM->patm.s.pGCStateHC);
338 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
339 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
340
341 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
342 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
343
344 Assert(pVM->patm.s.pGCStackHC);
345 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
346 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
347 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
348 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
349
350 Assert(pVM->patm.s.pStatsHC);
351 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
352 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
353
354 Assert(pVM->patm.s.pPatchMemHC);
355 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
356 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
357 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
358
359 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
360 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
361
362 Assert(pVM->patm.s.PatchLookupTreeHC);
363 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
364
365 /*
366 * (Re)Initialize PATM structure
367 */
368 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
369 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
370 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
371 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
372 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
373 pVM->patm.s.pvFaultMonitor = 0;
374 pVM->patm.s.deltaReloc = 0;
375
376 /* Lowest and highest patched instruction */
377 pVM->patm.s.pPatchedInstrGCLowest = ~0;
378 pVM->patm.s.pPatchedInstrGCHighest = 0;
379
380 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
381 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
382 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
383
384 pVM->patm.s.pfnSysEnterPatchGC = 0;
385 pVM->patm.s.pfnSysEnterGC = 0;
386
387 pVM->patm.s.fOutOfMemory = false;
388
389 pVM->patm.s.pfnHelperCallGC = 0;
390
391 /* Generate all global functions to be used by future patches. */
392 /* We generate a fake patch in order to use the existing code for relocation. */
393 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
394 if (RT_FAILURE(rc))
395 {
396 Log(("Out of memory!!!!\n"));
397 return VERR_NO_MEMORY;
398 }
399 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
400 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
401 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
402
403 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
404 AssertRC(rc);
405
406 /* Update free pointer in patch memory. */
407 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
408 /* Round to next 8 byte boundary. */
409 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
410 return rc;
411}
412
413
414/**
415 * Applies relocations to data and code managed by this
416 * component. This function will be called at init and
417 * whenever the VMM need to relocate it self inside the GC.
418 *
419 * The PATM will update the addresses used by the switcher.
420 *
421 * @param pVM The VM.
422 */
423VMMR3DECL(void) PATMR3Relocate(PVM pVM)
424{
425 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
426 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
427
428 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
429 if (delta)
430 {
431 PCPUMCTX pCtx;
432
433 /* Update CPUMCTX guest context pointer. */
434 pVM->patm.s.pCPUMCtxGC += delta;
435
436 pVM->patm.s.deltaReloc = delta;
437
438 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
439
440 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
441
442 /* If we are running patch code right now, then also adjust EIP. */
443 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
444 pCtx->eip += delta;
445
446 pVM->patm.s.pGCStateGC = GCPtrNew;
447 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
448
449 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
450
451 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
452
453 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
454
455 if (pVM->patm.s.pfnSysEnterPatchGC)
456 pVM->patm.s.pfnSysEnterPatchGC += delta;
457
458 /* Deal with the global patch functions. */
459 pVM->patm.s.pfnHelperCallGC += delta;
460 pVM->patm.s.pfnHelperRetGC += delta;
461 pVM->patm.s.pfnHelperIretGC += delta;
462 pVM->patm.s.pfnHelperJumpGC += delta;
463
464 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
465 }
466}
467
468
469/**
470 * Terminates the PATM.
471 *
472 * Termination means cleaning up and freeing all resources,
473 * the VM it self is at this point powered off or suspended.
474 *
475 * @returns VBox status code.
476 * @param pVM The VM to operate on.
477 */
478VMMR3DECL(int) PATMR3Term(PVM pVM)
479{
480 /* Memory was all allocated from the two MM heaps and requires no freeing. */
481 return VINF_SUCCESS;
482}
483
484
485/**
486 * PATM reset callback.
487 *
488 * @returns VBox status code.
489 * @param pVM The VM which is reset.
490 */
491VMMR3DECL(int) PATMR3Reset(PVM pVM)
492{
493 Log(("PATMR3Reset\n"));
494
495 /* Free all patches. */
496 while (true)
497 {
498 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
499 if (pPatchRec)
500 {
501 PATMRemovePatch(pVM, pPatchRec, true);
502 }
503 else
504 break;
505 }
506 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
507 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
508 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
509 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
510
511 int rc = patmReinit(pVM);
512 if (RT_SUCCESS(rc))
513 rc = PATMR3InitFinalize(pVM); /* paranoia */
514
515 return rc;
516}
517
518/**
519 * Read callback for disassembly function; supports reading bytes that cross a page boundary
520 *
521 * @returns VBox status code.
522 * @param pSrc GC source pointer
523 * @param pDest HC destination pointer
524 * @param size Number of bytes to read
525 * @param pvUserdata Callback specific user data (pCpu)
526 *
527 */
528int patmReadBytes(RTUINTPTR pSrc, uint8_t *pDest, unsigned size, void *pvUserdata)
529{
530 DISCPUSTATE *pCpu = (DISCPUSTATE *)pvUserdata;
531 PATMDISASM *pDisInfo = (PATMDISASM *)pCpu->apvUserData[0];
532 int orgsize = size;
533
534 Assert(size);
535 if (size == 0)
536 return VERR_INVALID_PARAMETER;
537
538 /*
539 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
540 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
541 */
542 /** @todo could change in the future! */
543 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
544 {
545 for (int i=0;i<orgsize;i++)
546 {
547 int rc = PATMR3QueryOpcode(pDisInfo->pVM, (RTRCPTR)pSrc, pDest);
548 if (RT_SUCCESS(rc))
549 {
550 pSrc++;
551 pDest++;
552 size--;
553 }
554 else break;
555 }
556 if (size == 0)
557 return VINF_SUCCESS;
558#ifdef VBOX_STRICT
559 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
560 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
561 {
562 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc, NULL) == false);
563 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc+size-1, NULL) == false);
564 }
565#endif
566 }
567
568 if ( !pDisInfo->pInstrHC
569 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(pSrc + size - 1)
570 && !PATMIsPatchGCAddr(pDisInfo->pVM, pSrc)))
571 {
572 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, pSrc));
573 return PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], pDest, pSrc, size);
574 }
575 else
576 {
577 Assert(pDisInfo->pInstrHC);
578
579 uint8_t *pInstrHC = pDisInfo->pInstrHC;
580
581 Assert(pInstrHC);
582
583 /* pInstrHC is the base address; adjust according to the GC pointer. */
584 pInstrHC = pInstrHC + (pSrc - pDisInfo->pInstrGC);
585
586 memcpy(pDest, (void *)pInstrHC, size);
587 }
588
589 return VINF_SUCCESS;
590}
591
592/**
593 * Callback function for RTAvloU32DoWithAll
594 *
595 * Updates all fixups in the patches
596 *
597 * @returns VBox status code.
598 * @param pNode Current node
599 * @param pParam The VM to operate on.
600 */
601static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
602{
603 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
604 PVM pVM = (PVM)pParam;
605 RTRCINTPTR delta;
606#ifdef LOG_ENABLED
607 DISCPUSTATE cpu;
608 char szOutput[256];
609 uint32_t opsize;
610 bool disret;
611#endif
612 int rc;
613
614 /* Nothing to do if the patch is not active. */
615 if (pPatch->patch.uState == PATCH_REFUSED)
616 return 0;
617
618#ifdef LOG_ENABLED
619 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
620 {
621 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
622 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
623 Log(("Org patch jump: %s", szOutput));
624 }
625#endif
626
627 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
628 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
629
630 /*
631 * Apply fixups
632 */
633 PRELOCREC pRec = 0;
634 AVLPVKEY key = 0;
635
636 while (true)
637 {
638 /* Get the record that's closest from above */
639 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
640 if (pRec == 0)
641 break;
642
643 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
644
645 switch (pRec->uType)
646 {
647 case FIXUP_ABSOLUTE:
648 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
649 if ( !pRec->pSource
650 || PATMIsPatchGCAddr(pVM, pRec->pSource))
651 {
652 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
653 }
654 else
655 {
656 uint8_t curInstr[15];
657 uint8_t oldInstr[15];
658 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
659
660 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
661
662 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
663 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
664
665 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
666 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
667
668 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
669
670 if ( rc == VERR_PAGE_NOT_PRESENT
671 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
672 {
673 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
674
675 Log(("PATM: Patch page not present -> check later!\n"));
676 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
677 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
678 }
679 else
680 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
681 {
682 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
683 /*
684 * Disable patch; this is not a good solution
685 */
686 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
687 pPatch->patch.uState = PATCH_DISABLED;
688 }
689 else
690 if (RT_SUCCESS(rc))
691 {
692 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
693 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
694 AssertRC(rc);
695 }
696 }
697 break;
698
699 case FIXUP_REL_JMPTOPATCH:
700 {
701 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
702
703 if ( pPatch->patch.uState == PATCH_ENABLED
704 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
705 {
706 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
707 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
708 RTRCPTR pJumpOffGC;
709 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
710 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
711
712#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
713 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
714#else
715 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
716#endif
717
718 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
719#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
720 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
721 {
722 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
723
724 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
725 oldJump[0] = pPatch->patch.aPrivInstr[0];
726 oldJump[1] = pPatch->patch.aPrivInstr[1];
727 *(RTRCUINTPTR *)&oldJump[2] = displOld;
728 }
729 else
730#endif
731 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
732 {
733 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
734 oldJump[0] = 0xE9;
735 *(RTRCUINTPTR *)&oldJump[1] = displOld;
736 }
737 else
738 {
739 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
740 continue; //this should never happen!!
741 }
742 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
743
744 /*
745 * Read old patch jump and compare it to the one we previously installed
746 */
747 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
748 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
749
750 if ( rc == VERR_PAGE_NOT_PRESENT
751 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
752 {
753 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
754
755 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
756 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
757 }
758 else
759 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
760 {
761 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
762 /*
763 * Disable patch; this is not a good solution
764 */
765 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
766 pPatch->patch.uState = PATCH_DISABLED;
767 }
768 else
769 if (RT_SUCCESS(rc))
770 {
771 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
772 AssertRC(rc);
773 }
774 else
775 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
776 }
777 else
778 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
779
780 pRec->pDest = pTarget;
781 break;
782 }
783
784 case FIXUP_REL_JMPTOGUEST:
785 {
786 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
787 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
788
789 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
790 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
791 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
792 pRec->pSource = pSource;
793 break;
794 }
795
796 default:
797 AssertMsg(0, ("Invalid fixup type!!\n"));
798 return VERR_INVALID_PARAMETER;
799 }
800 }
801
802#ifdef LOG_ENABLED
803 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
804 {
805 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
806 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
807 Log(("Rel patch jump: %s", szOutput));
808 }
809#endif
810 return 0;
811}
812
813/**
814 * \#PF Handler callback for virtual access handler ranges.
815 *
816 * Important to realize that a physical page in a range can have aliases, and
817 * for ALL and WRITE handlers these will also trigger.
818 *
819 * @returns VINF_SUCCESS if the handler have carried out the operation.
820 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
821 * @param pVM VM Handle.
822 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
823 * @param pvPtr The HC mapping of that address.
824 * @param pvBuf What the guest is reading/writing.
825 * @param cbBuf How much it's reading/writing.
826 * @param enmAccessType The access type.
827 * @param pvUser User argument.
828 */
829DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
830{
831 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
832 /** @todo could be the wrong virtual address (alias) */
833 pVM->patm.s.pvFaultMonitor = GCPtr;
834 PATMR3HandleMonitoredPage(pVM);
835 return VINF_PGM_HANDLER_DO_DEFAULT;
836}
837
838
839#ifdef VBOX_WITH_DEBUGGER
840/**
841 * Callback function for RTAvloU32DoWithAll
842 *
843 * Enables the patch that's being enumerated
844 *
845 * @returns 0 (continue enumeration).
846 * @param pNode Current node
847 * @param pVM The VM to operate on.
848 */
849static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
850{
851 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
852
853 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
854 return 0;
855}
856#endif /* VBOX_WITH_DEBUGGER */
857
858
859#ifdef VBOX_WITH_DEBUGGER
860/**
861 * Callback function for RTAvloU32DoWithAll
862 *
863 * Disables the patch that's being enumerated
864 *
865 * @returns 0 (continue enumeration).
866 * @param pNode Current node
867 * @param pVM The VM to operate on.
868 */
869static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
870{
871 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
872
873 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
874 return 0;
875}
876#endif
877
878/**
879 * Returns the host context pointer and size of the patch memory block
880 *
881 * @returns VBox status code.
882 * @param pVM The VM to operate on.
883 * @param pcb Size of the patch memory block
884 */
885VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
886{
887 if (pcb)
888 *pcb = pVM->patm.s.cbPatchMem;
889
890 return pVM->patm.s.pPatchMemHC;
891}
892
893
894/**
895 * Returns the guest context pointer and size of the patch memory block
896 *
897 * @returns VBox status code.
898 * @param pVM The VM to operate on.
899 * @param pcb Size of the patch memory block
900 */
901VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
902{
903 if (pcb)
904 *pcb = pVM->patm.s.cbPatchMem;
905
906 return pVM->patm.s.pPatchMemGC;
907}
908
909
910/**
911 * Returns the host context pointer of the GC context structure
912 *
913 * @returns VBox status code.
914 * @param pVM The VM to operate on.
915 */
916VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
917{
918 return pVM->patm.s.pGCStateHC;
919}
920
921
922/**
923 * Checks whether the HC address is part of our patch region
924 *
925 * @returns VBox status code.
926 * @param pVM The VM to operate on.
927 * @param pAddrGC Guest context address
928 */
929VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
930{
931 return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
932}
933
934
935/**
936 * Allows or disallow patching of privileged instructions executed by the guest OS
937 *
938 * @returns VBox status code.
939 * @param pVM The VM to operate on.
940 * @param fAllowPatching Allow/disallow patching
941 */
942VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
943{
944 pVM->fPATMEnabled = (fAllowPatching) ? true : false;
945 return VINF_SUCCESS;
946}
947
948/**
949 * Convert a GC patch block pointer to a HC patch pointer
950 *
951 * @returns HC pointer or NULL if it's not a GC patch pointer
952 * @param pVM The VM to operate on.
953 * @param pAddrGC GC pointer
954 */
955VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
956{
957 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
958 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
959 else
960 return NULL;
961}
962
963/**
964 * Query PATM state (enabled/disabled)
965 *
966 * @returns 0 - disabled, 1 - enabled
967 * @param pVM The VM to operate on.
968 */
969VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
970{
971 return pVM->fPATMEnabled;
972}
973
974
975/**
976 * Convert guest context address to host context pointer
977 *
978 * @returns VBox status code.
979 * @param pVM The VM to operate on.
980 * @param pCacheRec Address conversion cache record
981 * @param pGCPtr Guest context pointer
982 *
983 * @returns Host context pointer or NULL in case of an error
984 *
985 */
986R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
987{
988 int rc;
989 R3PTRTYPE(uint8_t *) pHCPtr;
990 uint32_t offset;
991
992 if (PATMIsPatchGCAddr(pVM, pGCPtr))
993 {
994 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
995 Assert(pPatch);
996 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
997 }
998
999 offset = pGCPtr & PAGE_OFFSET_MASK;
1000 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1001 return pCacheRec->pPageLocStartHC + offset;
1002
1003 /* Release previous lock if any. */
1004 if (pCacheRec->Lock.pvMap)
1005 {
1006 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1007 pCacheRec->Lock.pvMap = NULL;
1008 }
1009
1010 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1011 if (rc != VINF_SUCCESS)
1012 {
1013 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1014 return NULL;
1015 }
1016 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1017 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1018 return pHCPtr;
1019}
1020
1021
1022/* Calculates and fills in all branch targets
1023 *
1024 * @returns VBox status code.
1025 * @param pVM The VM to operate on.
1026 * @param pPatch Current patch block pointer
1027 *
1028 */
1029static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1030{
1031 int32_t displ;
1032
1033 PJUMPREC pRec = 0;
1034 unsigned nrJumpRecs = 0;
1035
1036 /*
1037 * Set all branch targets inside the patch block.
1038 * We remove all jump records as they are no longer needed afterwards.
1039 */
1040 while (true)
1041 {
1042 RCPTRTYPE(uint8_t *) pInstrGC;
1043 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1044
1045 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1046 if (pRec == 0)
1047 break;
1048
1049 nrJumpRecs++;
1050
1051 /* HC in patch block to GC in patch block. */
1052 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1053
1054 if (pRec->opcode == OP_CALL)
1055 {
1056 /* Special case: call function replacement patch from this patch block.
1057 */
1058 PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
1059 if (!pFunctionRec)
1060 {
1061 int rc;
1062
1063 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1064 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1065 else
1066 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1067
1068 if (RT_FAILURE(rc))
1069 {
1070 uint8_t *pPatchHC;
1071 RTRCPTR pPatchGC;
1072 RTRCPTR pOrgInstrGC;
1073
1074 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1075 Assert(pOrgInstrGC);
1076
1077 /* Failure for some reason -> mark exit point with int 3. */
1078 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1079
1080 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1081 Assert(pPatchGC);
1082
1083 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1084
1085 /* Set a breakpoint at the very beginning of the recompiled instruction */
1086 *pPatchHC = 0xCC;
1087
1088 continue;
1089 }
1090 }
1091 else
1092 {
1093 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1094 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1095 }
1096
1097 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1098 }
1099 else
1100 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1101
1102 if (pBranchTargetGC == 0)
1103 {
1104 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1105 return VERR_PATCHING_REFUSED;
1106 }
1107 /* Our jumps *always* have a dword displacement (to make things easier). */
1108 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1109 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1110 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1111 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1112 }
1113 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1114 Assert(pPatch->JumpTree == 0);
1115 return VINF_SUCCESS;
1116}
1117
1118/* Add an illegal instruction record
1119 *
1120 * @param pVM The VM to operate on.
1121 * @param pPatch Patch structure ptr
1122 * @param pInstrGC Guest context pointer to privileged instruction
1123 *
1124 */
1125static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1126{
1127 PAVLPVNODECORE pRec;
1128
1129 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1130 Assert(pRec);
1131 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1132
1133 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1134 Assert(ret); NOREF(ret);
1135 pPatch->pTempInfo->nrIllegalInstr++;
1136}
1137
1138static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1139{
1140 PAVLPVNODECORE pRec;
1141
1142 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1143 if (pRec)
1144 return true;
1145 else
1146 return false;
1147}
1148
1149/**
1150 * Add a patch to guest lookup record
1151 *
1152 * @param pVM The VM to operate on.
1153 * @param pPatch Patch structure ptr
1154 * @param pPatchInstrHC Guest context pointer to patch block
1155 * @param pInstrGC Guest context pointer to privileged instruction
1156 * @param enmType Lookup type
1157 * @param fDirty Dirty flag
1158 *
1159 */
1160 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1161void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1162{
1163 bool ret;
1164 PRECPATCHTOGUEST pPatchToGuestRec;
1165 PRECGUESTTOPATCH pGuestToPatchRec;
1166 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1167
1168 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1169 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1170
1171 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1172 {
1173 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1174 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1175 return; /* already there */
1176
1177 Assert(!pPatchToGuestRec);
1178 }
1179#ifdef VBOX_STRICT
1180 else
1181 {
1182 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1183 Assert(!pPatchToGuestRec);
1184 }
1185#endif
1186
1187 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1188 Assert(pPatchToGuestRec);
1189 pPatchToGuestRec->Core.Key = PatchOffset;
1190 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1191 pPatchToGuestRec->enmType = enmType;
1192 pPatchToGuestRec->fDirty = fDirty;
1193
1194 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1195 Assert(ret);
1196
1197 /* GC to patch address */
1198 if (enmType == PATM_LOOKUP_BOTHDIR)
1199 {
1200 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1201 if (!pGuestToPatchRec)
1202 {
1203 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1204 pGuestToPatchRec->Core.Key = pInstrGC;
1205 pGuestToPatchRec->PatchOffset = PatchOffset;
1206
1207 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1208 Assert(ret);
1209 }
1210 }
1211
1212 pPatch->nrPatch2GuestRecs++;
1213}
1214
1215
1216/**
1217 * Removes a patch to guest lookup record
1218 *
1219 * @param pVM The VM to operate on.
1220 * @param pPatch Patch structure ptr
1221 * @param pPatchInstrGC Guest context pointer to patch block
1222 */
1223void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1224{
1225 PAVLU32NODECORE pNode;
1226 PAVLU32NODECORE pNode2;
1227 PRECPATCHTOGUEST pPatchToGuestRec;
1228 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1229
1230 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1231 Assert(pPatchToGuestRec);
1232 if (pPatchToGuestRec)
1233 {
1234 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1235 {
1236 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1237
1238 Assert(pGuestToPatchRec->Core.Key);
1239 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1240 Assert(pNode2);
1241 }
1242 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1243 Assert(pNode);
1244
1245 MMR3HeapFree(pPatchToGuestRec);
1246 pPatch->nrPatch2GuestRecs--;
1247 }
1248}
1249
1250
1251/**
1252 * RTAvlPVDestroy callback.
1253 */
1254static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1255{
1256 MMR3HeapFree(pNode);
1257 return 0;
1258}
1259
1260/**
1261 * Empty the specified tree (PV tree, MMR3 heap)
1262 *
1263 * @param pVM The VM to operate on.
1264 * @param ppTree Tree to empty
1265 */
1266void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1267{
1268 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1269}
1270
1271
1272/**
1273 * RTAvlU32Destroy callback.
1274 */
1275static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1276{
1277 MMR3HeapFree(pNode);
1278 return 0;
1279}
1280
1281/**
1282 * Empty the specified tree (U32 tree, MMR3 heap)
1283 *
1284 * @param pVM The VM to operate on.
1285 * @param ppTree Tree to empty
1286 */
1287void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1288{
1289 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1290}
1291
1292
1293/**
1294 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1295 *
1296 * @returns VBox status code.
1297 * @param pVM The VM to operate on.
1298 * @param pCpu CPU disassembly state
1299 * @param pInstrGC Guest context pointer to privileged instruction
1300 * @param pCurInstrGC Guest context pointer to the current instruction
1301 * @param pCacheRec Cache record ptr
1302 *
1303 */
1304static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1305{
1306 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1307 bool fIllegalInstr = false;
1308
1309 /*
1310 * Preliminary heuristics:
1311 *- no call instructions without a fixed displacement between cli and sti/popf
1312 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1313 *- no nested pushf/cli
1314 *- sti/popf should be the (eventual) target of all branches
1315 *- no near or far returns; no int xx, no into
1316 *
1317 * Note: Later on we can impose less stricter guidelines if the need arises
1318 */
1319
1320 /* Bail out if the patch gets too big. */
1321 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1322 {
1323 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1324 fIllegalInstr = true;
1325 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1326 }
1327 else
1328 {
1329 /* No unconditional jumps or calls without fixed displacements. */
1330 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1331 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1332 )
1333 {
1334 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1335 if ( pCpu->param1.size == 6 /* far call/jmp */
1336 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1337 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1338 )
1339 {
1340 fIllegalInstr = true;
1341 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1342 }
1343 }
1344
1345 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1346 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->opcode == OP_JMP)
1347 {
1348 if ( pCurInstrGC > pPatch->pPrivInstrGC
1349 && pCurInstrGC + pCpu->opsize < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1350 {
1351 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1352 /* We turn this one into a int 3 callable patch. */
1353 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1354 }
1355 }
1356 else
1357 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1358 if (pPatch->opcode == OP_PUSHF)
1359 {
1360 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->opcode == OP_PUSHF)
1361 {
1362 fIllegalInstr = true;
1363 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1364 }
1365 }
1366
1367 /* no far returns */
1368 if (pCpu->pCurInstr->opcode == OP_RETF)
1369 {
1370 pPatch->pTempInfo->nrRetInstr++;
1371 fIllegalInstr = true;
1372 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1373 }
1374 else if ( pCpu->pCurInstr->opcode == OP_INT3
1375 || pCpu->pCurInstr->opcode == OP_INT
1376 || pCpu->pCurInstr->opcode == OP_INTO)
1377 {
1378 /* No int xx or into either. */
1379 fIllegalInstr = true;
1380 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1381 }
1382 }
1383
1384 pPatch->cbPatchBlockSize += pCpu->opsize;
1385
1386 /* Illegal instruction -> end of analysis phase for this code block */
1387 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1388 return VINF_SUCCESS;
1389
1390 /* Check for exit points. */
1391 switch (pCpu->pCurInstr->opcode)
1392 {
1393 case OP_SYSEXIT:
1394 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1395
1396 case OP_SYSENTER:
1397 case OP_ILLUD2:
1398 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1399 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1400 return VINF_SUCCESS;
1401
1402 case OP_STI:
1403 case OP_POPF:
1404 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1405 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1406 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1407 {
1408 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1409 return VERR_PATCHING_REFUSED;
1410 }
1411 if (pPatch->opcode == OP_PUSHF)
1412 {
1413 if (pCpu->pCurInstr->opcode == OP_POPF)
1414 {
1415 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1416 return VINF_SUCCESS;
1417
1418 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1419 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1420 pPatch->flags |= PATMFL_CHECK_SIZE;
1421 }
1422 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1423 }
1424 /* else: fall through. */
1425 case OP_RETN: /* exit point for function replacement */
1426 return VINF_SUCCESS;
1427
1428 case OP_IRET:
1429 return VINF_SUCCESS; /* exitpoint */
1430
1431 case OP_CPUID:
1432 case OP_CALL:
1433 case OP_JMP:
1434 break;
1435
1436 default:
1437 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1438 {
1439 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1440 return VINF_SUCCESS; /* exit point */
1441 }
1442 break;
1443 }
1444
1445 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1446 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW))
1447 {
1448 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1449 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->opsize));
1450 return VINF_SUCCESS;
1451 }
1452
1453 return VWRN_CONTINUE_ANALYSIS;
1454}
1455
1456/**
1457 * Analyses the instructions inside a function for compliance
1458 *
1459 * @returns VBox status code.
1460 * @param pVM The VM to operate on.
1461 * @param pCpu CPU disassembly state
1462 * @param pInstrGC Guest context pointer to privileged instruction
1463 * @param pCurInstrGC Guest context pointer to the current instruction
1464 * @param pCacheRec Cache record ptr
1465 *
1466 */
1467static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1468{
1469 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1470 bool fIllegalInstr = false;
1471
1472 //Preliminary heuristics:
1473 //- no call instructions
1474 //- ret ends a block
1475
1476 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1477
1478 // bail out if the patch gets too big
1479 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1480 {
1481 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1482 fIllegalInstr = true;
1483 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1484 }
1485 else
1486 {
1487 // no unconditional jumps or calls without fixed displacements
1488 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1489 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1490 )
1491 {
1492 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1493 if ( pCpu->param1.size == 6 /* far call/jmp */
1494 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1495 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1496 )
1497 {
1498 fIllegalInstr = true;
1499 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1500 }
1501 }
1502 else /* no far returns */
1503 if (pCpu->pCurInstr->opcode == OP_RETF)
1504 {
1505 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1506 fIllegalInstr = true;
1507 }
1508 else /* no int xx or into either */
1509 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1510 {
1511 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1512 fIllegalInstr = true;
1513 }
1514
1515 #if 0
1516 ///@todo we can handle certain in/out and privileged instructions in the guest context
1517 if (pCpu->pCurInstr->optype & OPTYPE_PRIVILEGED && pCpu->pCurInstr->opcode != OP_STI)
1518 {
1519 Log(("Illegal instructions for function patch!!\n"));
1520 return VERR_PATCHING_REFUSED;
1521 }
1522 #endif
1523 }
1524
1525 pPatch->cbPatchBlockSize += pCpu->opsize;
1526
1527 /* Illegal instruction -> end of analysis phase for this code block */
1528 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1529 {
1530 return VINF_SUCCESS;
1531 }
1532
1533 // Check for exit points
1534 switch (pCpu->pCurInstr->opcode)
1535 {
1536 case OP_ILLUD2:
1537 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1538 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1539 return VINF_SUCCESS;
1540
1541 case OP_IRET:
1542 case OP_SYSEXIT: /* will fault or emulated in GC */
1543 case OP_RETN:
1544 return VINF_SUCCESS;
1545
1546 case OP_POPF:
1547 case OP_STI:
1548 return VWRN_CONTINUE_ANALYSIS;
1549 default:
1550 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1551 {
1552 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1553 return VINF_SUCCESS; /* exit point */
1554 }
1555 return VWRN_CONTINUE_ANALYSIS;
1556 }
1557
1558 return VWRN_CONTINUE_ANALYSIS;
1559}
1560
1561/**
1562 * Recompiles the instructions in a code block
1563 *
1564 * @returns VBox status code.
1565 * @param pVM The VM to operate on.
1566 * @param pCpu CPU disassembly state
1567 * @param pInstrGC Guest context pointer to privileged instruction
1568 * @param pCurInstrGC Guest context pointer to the current instruction
1569 * @param pCacheRec Cache record ptr
1570 *
1571 */
1572static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1573{
1574 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1575 int rc = VINF_SUCCESS;
1576 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1577
1578 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1579
1580 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1581 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1582 {
1583 /*
1584 * Been there, done that; so insert a jump (we don't want to duplicate code)
1585 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1586 */
1587 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1588 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->prefix & PREFIX_OPSIZE));
1589 }
1590
1591 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1592 {
1593 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1594 }
1595 else
1596 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1597
1598 if (RT_FAILURE(rc))
1599 return rc;
1600
1601 /* Note: Never do a direct return unless a failure is encountered! */
1602
1603 /* Clear recompilation of next instruction flag; we are doing that right here. */
1604 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1605 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1606
1607 /* Add lookup record for patch to guest address translation */
1608 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1609
1610 /* Update lowest and highest instruction address for this patch */
1611 if (pCurInstrGC < pPatch->pInstrGCLowest)
1612 pPatch->pInstrGCLowest = pCurInstrGC;
1613 else
1614 if (pCurInstrGC > pPatch->pInstrGCHighest)
1615 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->opsize;
1616
1617 /* Illegal instruction -> end of recompile phase for this code block. */
1618 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1619 {
1620 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1621 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1622 goto end;
1623 }
1624
1625 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1626 * Indirect calls are handled below.
1627 */
1628 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1629 && (pCpu->pCurInstr->opcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1630 && (OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J))
1631 {
1632 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1633 if (pTargetGC == 0)
1634 {
1635 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
1636 return VERR_PATCHING_REFUSED;
1637 }
1638
1639 if (pCpu->pCurInstr->opcode == OP_CALL)
1640 {
1641 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1642 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1643 if (RT_FAILURE(rc))
1644 goto end;
1645 }
1646 else
1647 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->opcode, !!(pCpu->prefix & PREFIX_OPSIZE));
1648
1649 if (RT_SUCCESS(rc))
1650 rc = VWRN_CONTINUE_RECOMPILE;
1651
1652 goto end;
1653 }
1654
1655 switch (pCpu->pCurInstr->opcode)
1656 {
1657 case OP_CLI:
1658 {
1659 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1660 * until we've found the proper exit point(s).
1661 */
1662 if ( pCurInstrGC != pInstrGC
1663 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1664 )
1665 {
1666 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1667 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1668 }
1669 /* Set by irq inhibition; no longer valid now. */
1670 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1671
1672 rc = patmPatchGenCli(pVM, pPatch);
1673 if (RT_SUCCESS(rc))
1674 rc = VWRN_CONTINUE_RECOMPILE;
1675 break;
1676 }
1677
1678 case OP_MOV:
1679 if (pCpu->pCurInstr->optype & OPTYPE_POTENTIALLY_DANGEROUS)
1680 {
1681 /* mov ss, src? */
1682 if ( (pCpu->param1.flags & USE_REG_SEG)
1683 && (pCpu->param1.base.reg_seg == DIS_SELREG_SS))
1684 {
1685 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1686 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1687 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1688 }
1689#if 0 /* necessary for Haiku */
1690 else
1691 if ( (pCpu->param2.flags & USE_REG_SEG)
1692 && (pCpu->param2.base.reg_seg == USE_REG_SS)
1693 && (pCpu->param1.flags & (USE_REG_GEN32|USE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1694 {
1695 /* mov GPR, ss */
1696 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1697 if (RT_SUCCESS(rc))
1698 rc = VWRN_CONTINUE_RECOMPILE;
1699 break;
1700 }
1701#endif
1702 }
1703 goto duplicate_instr;
1704
1705 case OP_POP:
1706 if (pCpu->pCurInstr->param1 == OP_PARM_REG_SS)
1707 {
1708 Assert(pCpu->pCurInstr->optype & OPTYPE_INHIBIT_IRQS);
1709
1710 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1711 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1712 }
1713 goto duplicate_instr;
1714
1715 case OP_STI:
1716 {
1717 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1718
1719 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1720 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1721 {
1722 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1723 fInhibitIRQInstr = true;
1724 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1725 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1726 }
1727 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1728
1729 if (RT_SUCCESS(rc))
1730 {
1731 DISCPUSTATE cpu = *pCpu;
1732 unsigned opsize;
1733 int disret;
1734 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1735
1736 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1737
1738 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1739 { /* Force pNextInstrHC out of scope after using it */
1740 uint8_t *pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1741 if (pNextInstrHC == NULL)
1742 {
1743 AssertFailed();
1744 return VERR_PATCHING_REFUSED;
1745 }
1746
1747 // Disassemble the next instruction
1748 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pNextInstrGC, pNextInstrHC, &opsize, NULL);
1749 }
1750 if (disret == false)
1751 {
1752 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1753 return VERR_PATCHING_REFUSED;
1754 }
1755 pReturnInstrGC = pNextInstrGC + opsize;
1756
1757 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1758 || pReturnInstrGC <= pInstrGC
1759 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1760 )
1761 {
1762 /* Not an exit point for function duplication patches */
1763 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1764 && RT_SUCCESS(rc))
1765 {
1766 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1767 rc = VWRN_CONTINUE_RECOMPILE;
1768 }
1769 else
1770 rc = VINF_SUCCESS; //exit point
1771 }
1772 else {
1773 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1774 rc = VERR_PATCHING_REFUSED; //not allowed!!
1775 }
1776 }
1777 break;
1778 }
1779
1780 case OP_POPF:
1781 {
1782 bool fGenerateJmpBack = (pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32);
1783
1784 /* Not an exit point for IDT handler or function replacement patches */
1785 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1786 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1787 fGenerateJmpBack = false;
1788
1789 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->opsize, !!(pCpu->prefix & PREFIX_OPSIZE), fGenerateJmpBack);
1790 if (RT_SUCCESS(rc))
1791 {
1792 if (fGenerateJmpBack == false)
1793 {
1794 /* Not an exit point for IDT handler or function replacement patches */
1795 rc = VWRN_CONTINUE_RECOMPILE;
1796 }
1797 else
1798 {
1799 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1800 rc = VINF_SUCCESS; /* exit point! */
1801 }
1802 }
1803 break;
1804 }
1805
1806 case OP_PUSHF:
1807 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->prefix & PREFIX_OPSIZE));
1808 if (RT_SUCCESS(rc))
1809 rc = VWRN_CONTINUE_RECOMPILE;
1810 break;
1811
1812 case OP_PUSH:
1813 if (pCpu->pCurInstr->param1 == OP_PARM_REG_CS)
1814 {
1815 rc = patmPatchGenPushCS(pVM, pPatch);
1816 if (RT_SUCCESS(rc))
1817 rc = VWRN_CONTINUE_RECOMPILE;
1818 break;
1819 }
1820 goto duplicate_instr;
1821
1822 case OP_IRET:
1823 Log(("IRET at %RRv\n", pCurInstrGC));
1824 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->prefix & PREFIX_OPSIZE));
1825 if (RT_SUCCESS(rc))
1826 {
1827 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1828 rc = VINF_SUCCESS; /* exit point by definition */
1829 }
1830 break;
1831
1832 case OP_ILLUD2:
1833 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1834 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1835 if (RT_SUCCESS(rc))
1836 rc = VINF_SUCCESS; /* exit point by definition */
1837 Log(("Illegal opcode (0xf 0xb)\n"));
1838 break;
1839
1840 case OP_CPUID:
1841 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1842 if (RT_SUCCESS(rc))
1843 rc = VWRN_CONTINUE_RECOMPILE;
1844 break;
1845
1846 case OP_STR:
1847 case OP_SLDT:
1848 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1849 if (RT_SUCCESS(rc))
1850 rc = VWRN_CONTINUE_RECOMPILE;
1851 break;
1852
1853 case OP_SGDT:
1854 case OP_SIDT:
1855 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1856 if (RT_SUCCESS(rc))
1857 rc = VWRN_CONTINUE_RECOMPILE;
1858 break;
1859
1860 case OP_RETN:
1861 /* retn is an exit point for function patches */
1862 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1863 if (RT_SUCCESS(rc))
1864 rc = VINF_SUCCESS; /* exit point by definition */
1865 break;
1866
1867 case OP_SYSEXIT:
1868 /* Duplicate it, so it can be emulated in GC (or fault). */
1869 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1870 if (RT_SUCCESS(rc))
1871 rc = VINF_SUCCESS; /* exit point by definition */
1872 break;
1873
1874 case OP_CALL:
1875 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1876 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1877 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1878 */
1879 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1880 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far calls! */)
1881 {
1882 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
1883 if (RT_SUCCESS(rc))
1884 {
1885 rc = VWRN_CONTINUE_RECOMPILE;
1886 }
1887 break;
1888 }
1889 goto gen_illegal_instr;
1890
1891 case OP_JMP:
1892 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1893 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1894 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1895 */
1896 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1897 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far jumps! */)
1898 {
1899 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
1900 if (RT_SUCCESS(rc))
1901 rc = VINF_SUCCESS; /* end of branch */
1902 break;
1903 }
1904 goto gen_illegal_instr;
1905
1906 case OP_INT3:
1907 case OP_INT:
1908 case OP_INTO:
1909 goto gen_illegal_instr;
1910
1911 case OP_MOV_DR:
1912 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1913 if (pCpu->pCurInstr->param2 == OP_PARM_Dd)
1914 {
1915 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
1916 if (RT_SUCCESS(rc))
1917 rc = VWRN_CONTINUE_RECOMPILE;
1918 break;
1919 }
1920 goto duplicate_instr;
1921
1922 case OP_MOV_CR:
1923 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1924 if (pCpu->pCurInstr->param2 == OP_PARM_Cd)
1925 {
1926 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
1927 if (RT_SUCCESS(rc))
1928 rc = VWRN_CONTINUE_RECOMPILE;
1929 break;
1930 }
1931 goto duplicate_instr;
1932
1933 default:
1934 if (pCpu->pCurInstr->optype & (OPTYPE_CONTROLFLOW | OPTYPE_PRIVILEGED_NOTRAP))
1935 {
1936gen_illegal_instr:
1937 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1938 if (RT_SUCCESS(rc))
1939 rc = VINF_SUCCESS; /* exit point by definition */
1940 }
1941 else
1942 {
1943duplicate_instr:
1944 Log(("patmPatchGenDuplicate\n"));
1945 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1946 if (RT_SUCCESS(rc))
1947 rc = VWRN_CONTINUE_RECOMPILE;
1948 }
1949 break;
1950 }
1951
1952end:
1953
1954 if ( !fInhibitIRQInstr
1955 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
1956 {
1957 int rc2;
1958 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1959
1960 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
1961 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
1962 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
1963 {
1964 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
1965
1966 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
1967 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1968 rc = VINF_SUCCESS; /* end of the line */
1969 }
1970 else
1971 {
1972 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
1973 }
1974 if (RT_FAILURE(rc2))
1975 rc = rc2;
1976 }
1977
1978 if (RT_SUCCESS(rc))
1979 {
1980 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1981 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
1982 && pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32
1983 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
1984 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
1985 )
1986 {
1987 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1988
1989 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1990 Log(("patmRecompileCallback: end found for single instruction patch at %RRv opsize %d\n", pNextInstrGC, pCpu->opsize));
1991
1992 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
1993 AssertRC(rc);
1994 }
1995 }
1996 return rc;
1997}
1998
1999
2000#ifdef LOG_ENABLED
2001
2002/* Add a disasm jump record (temporary for prevent duplicate analysis)
2003 *
2004 * @param pVM The VM to operate on.
2005 * @param pPatch Patch structure ptr
2006 * @param pInstrGC Guest context pointer to privileged instruction
2007 *
2008 */
2009static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2010{
2011 PAVLPVNODECORE pRec;
2012
2013 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2014 Assert(pRec);
2015 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2016
2017 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2018 Assert(ret);
2019}
2020
2021/**
2022 * Checks if jump target has been analysed before.
2023 *
2024 * @returns VBox status code.
2025 * @param pPatch Patch struct
2026 * @param pInstrGC Jump target
2027 *
2028 */
2029static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2030{
2031 PAVLPVNODECORE pRec;
2032
2033 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2034 if (pRec)
2035 return true;
2036 return false;
2037}
2038
2039/**
2040 * For proper disassembly of the final patch block
2041 *
2042 * @returns VBox status code.
2043 * @param pVM The VM to operate on.
2044 * @param pCpu CPU disassembly state
2045 * @param pInstrGC Guest context pointer to privileged instruction
2046 * @param pCurInstrGC Guest context pointer to the current instruction
2047 * @param pCacheRec Cache record ptr
2048 *
2049 */
2050int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2051{
2052 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2053
2054 if (pCpu->pCurInstr->opcode == OP_INT3)
2055 {
2056 /* Could be an int3 inserted in a call patch. Check to be sure */
2057 DISCPUSTATE cpu;
2058 RTRCPTR pOrgJumpGC;
2059 uint32_t dummy;
2060
2061 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2062 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2063
2064 { /* Force pOrgJumpHC out of scope after using it */
2065 uint8_t *pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2066
2067 bool disret = PATMR3DISInstr(pVM, pPatch, &cpu, pOrgJumpGC, pOrgJumpHC, &dummy, NULL);
2068 if (!disret || cpu.pCurInstr->opcode != OP_CALL || cpu.param1.size != 4 /* only near calls */)
2069 return VINF_SUCCESS;
2070 }
2071 return VWRN_CONTINUE_ANALYSIS;
2072 }
2073
2074 if ( pCpu->pCurInstr->opcode == OP_ILLUD2
2075 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2076 {
2077 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2078 return VWRN_CONTINUE_ANALYSIS;
2079 }
2080
2081 if ( (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2082 || pCpu->pCurInstr->opcode == OP_INT
2083 || pCpu->pCurInstr->opcode == OP_IRET
2084 || pCpu->pCurInstr->opcode == OP_RETN
2085 || pCpu->pCurInstr->opcode == OP_RETF
2086 )
2087 {
2088 return VINF_SUCCESS;
2089 }
2090
2091 if (pCpu->pCurInstr->opcode == OP_ILLUD2)
2092 return VINF_SUCCESS;
2093
2094 return VWRN_CONTINUE_ANALYSIS;
2095}
2096
2097
2098/**
2099 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2100 *
2101 * @returns VBox status code.
2102 * @param pVM The VM to operate on.
2103 * @param pInstrGC Guest context pointer to the initial privileged instruction
2104 * @param pCurInstrGC Guest context pointer to the current instruction
2105 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2106 * @param pCacheRec Cache record ptr
2107 *
2108 */
2109int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2110{
2111 DISCPUSTATE cpu;
2112 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2113 int rc = VWRN_CONTINUE_ANALYSIS;
2114 uint32_t opsize, delta;
2115 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2116 bool disret;
2117 char szOutput[256];
2118
2119 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2120
2121 /* We need this to determine branch targets (and for disassembling). */
2122 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2123
2124 while(rc == VWRN_CONTINUE_ANALYSIS)
2125 {
2126 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2127
2128 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2129 if (pCurInstrHC == NULL)
2130 {
2131 rc = VERR_PATCHING_REFUSED;
2132 goto end;
2133 }
2134
2135 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2136 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2137 {
2138 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2139
2140 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2141 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2142 else
2143 Log(("DIS %s", szOutput));
2144
2145 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2146 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2147 {
2148 rc = VINF_SUCCESS;
2149 goto end;
2150 }
2151 }
2152 else
2153 Log(("DIS: %s", szOutput));
2154
2155 if (disret == false)
2156 {
2157 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2158 rc = VINF_SUCCESS;
2159 goto end;
2160 }
2161
2162 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2163 if (rc != VWRN_CONTINUE_ANALYSIS) {
2164 break; //done!
2165 }
2166
2167 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2168 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2169 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2170 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2171 )
2172 {
2173 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2174 RTRCPTR pOrgTargetGC;
2175
2176 if (pTargetGC == 0)
2177 {
2178 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2179 rc = VERR_PATCHING_REFUSED;
2180 break;
2181 }
2182
2183 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2184 {
2185 //jump back to guest code
2186 rc = VINF_SUCCESS;
2187 goto end;
2188 }
2189 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2190
2191 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2192 {
2193 rc = VINF_SUCCESS;
2194 goto end;
2195 }
2196
2197 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2198 {
2199 /* New jump, let's check it. */
2200 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2201
2202 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2203 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2204 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2205
2206 if (rc != VINF_SUCCESS) {
2207 break; //done!
2208 }
2209 }
2210 if (cpu.pCurInstr->opcode == OP_JMP)
2211 {
2212 /* Unconditional jump; return to caller. */
2213 rc = VINF_SUCCESS;
2214 goto end;
2215 }
2216
2217 rc = VWRN_CONTINUE_ANALYSIS;
2218 }
2219 pCurInstrGC += opsize;
2220 }
2221end:
2222 return rc;
2223}
2224
2225/**
2226 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2227 *
2228 * @returns VBox status code.
2229 * @param pVM The VM to operate on.
2230 * @param pInstrGC Guest context pointer to the initial privileged instruction
2231 * @param pCurInstrGC Guest context pointer to the current instruction
2232 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2233 * @param pCacheRec Cache record ptr
2234 *
2235 */
2236int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2237{
2238 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2239
2240 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2241 /* Free all disasm jump records. */
2242 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2243 return rc;
2244}
2245
2246#endif /* LOG_ENABLED */
2247
2248/**
2249 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2250 * If so, this patch is permanently disabled.
2251 *
2252 * @param pVM The VM to operate on.
2253 * @param pInstrGC Guest context pointer to instruction
2254 * @param pConflictGC Guest context pointer to check
2255 *
2256 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2257 *
2258 */
2259VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2260{
2261 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2262 if (pTargetPatch)
2263 {
2264 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2265 }
2266 return VERR_PATCH_NO_CONFLICT;
2267}
2268
2269/**
2270 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2271 *
2272 * @returns VBox status code.
2273 * @param pVM The VM to operate on.
2274 * @param pInstrGC Guest context pointer to privileged instruction
2275 * @param pCurInstrGC Guest context pointer to the current instruction
2276 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2277 * @param pCacheRec Cache record ptr
2278 *
2279 */
2280static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2281{
2282 DISCPUSTATE cpu;
2283 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2284 int rc = VWRN_CONTINUE_ANALYSIS;
2285 uint32_t opsize;
2286 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2287 bool disret;
2288#ifdef LOG_ENABLED
2289 char szOutput[256];
2290#endif
2291
2292 while (rc == VWRN_CONTINUE_RECOMPILE)
2293 {
2294 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2295
2296 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2297 if (pCurInstrHC == NULL)
2298 {
2299 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2300 goto end;
2301 }
2302#ifdef LOG_ENABLED
2303 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput);
2304 Log(("Recompile: %s", szOutput));
2305#else
2306 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2307#endif
2308 if (disret == false)
2309 {
2310 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2311
2312 /* Add lookup record for patch to guest address translation */
2313 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2314 patmPatchGenIllegalInstr(pVM, pPatch);
2315 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2316 goto end;
2317 }
2318
2319 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2320 if (rc != VWRN_CONTINUE_RECOMPILE)
2321 {
2322 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2323 if ( rc == VINF_SUCCESS
2324 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2325 {
2326 DISCPUSTATE cpunext;
2327 uint32_t opsizenext;
2328 uint8_t *pNextInstrHC;
2329 RTRCPTR pNextInstrGC = pCurInstrGC + opsize;
2330
2331 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2332
2333 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2334 * Recompile the next instruction as well
2335 */
2336 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2337 if (pNextInstrHC == NULL)
2338 {
2339 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2340 goto end;
2341 }
2342 cpunext.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2343 disret = PATMR3DISInstr(pVM, pPatch, &cpunext, pNextInstrGC, pNextInstrHC, &opsizenext, NULL);
2344 if (disret == false)
2345 {
2346 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2347 goto end;
2348 }
2349 switch(cpunext.pCurInstr->opcode)
2350 {
2351 case OP_IRET: /* inhibit cleared in generated code */
2352 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2353 case OP_HLT:
2354 break; /* recompile these */
2355
2356 default:
2357 if (cpunext.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2358 {
2359 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2360
2361 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2362 AssertRC(rc);
2363 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2364 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2365 }
2366 break;
2367 }
2368
2369 /* Note: after a cli we must continue to a proper exit point */
2370 if (cpunext.pCurInstr->opcode != OP_CLI)
2371 {
2372 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2373 if (RT_SUCCESS(rc))
2374 {
2375 rc = VINF_SUCCESS;
2376 goto end;
2377 }
2378 break;
2379 }
2380 else
2381 rc = VWRN_CONTINUE_RECOMPILE;
2382 }
2383 else
2384 break; /* done! */
2385 }
2386
2387 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2388
2389
2390 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2391 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2392 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2393 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2394 )
2395 {
2396 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2397 if (addr == 0)
2398 {
2399 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2400 rc = VERR_PATCHING_REFUSED;
2401 break;
2402 }
2403
2404 Log(("Jump encountered target %RRv\n", addr));
2405
2406 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2407 if (!(cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW))
2408 {
2409 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2410 /* First we need to finish this linear code stream until the next exit point. */
2411 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+opsize, pfnPATMR3Recompile, pCacheRec);
2412 if (RT_FAILURE(rc))
2413 {
2414 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2415 break; //fatal error
2416 }
2417 }
2418
2419 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2420 {
2421 /* New code; let's recompile it. */
2422 Log(("patmRecompileCodeStream continue with jump\n"));
2423
2424 /*
2425 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2426 * this patch so we can continue our analysis
2427 *
2428 * We rely on CSAM to detect and resolve conflicts
2429 */
2430 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
2431 if(pTargetPatch)
2432 {
2433 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2434 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2435 }
2436
2437 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2438 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2439 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2440
2441 if(pTargetPatch)
2442 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2443
2444 if (RT_FAILURE(rc))
2445 {
2446 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2447 break; //done!
2448 }
2449 }
2450 /* Always return to caller here; we're done! */
2451 rc = VINF_SUCCESS;
2452 goto end;
2453 }
2454 else
2455 if (cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW)
2456 {
2457 rc = VINF_SUCCESS;
2458 goto end;
2459 }
2460 pCurInstrGC += opsize;
2461 }
2462end:
2463 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2464 return rc;
2465}
2466
2467
2468/**
2469 * Generate the jump from guest to patch code
2470 *
2471 * @returns VBox status code.
2472 * @param pVM The VM to operate on.
2473 * @param pPatch Patch record
2474 * @param pCacheRec Guest translation lookup cache record
2475 */
2476static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2477{
2478 uint8_t temp[8];
2479 uint8_t *pPB;
2480 int rc;
2481
2482 Assert(pPatch->cbPatchJump <= sizeof(temp));
2483 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2484
2485 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2486 Assert(pPB);
2487
2488#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2489 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2490 {
2491 Assert(pPatch->pPatchJumpDestGC);
2492
2493 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2494 {
2495 // jmp [PatchCode]
2496 if (fAddFixup)
2497 {
2498 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2499 {
2500 Log(("Relocation failed for the jump in the guest code!!\n"));
2501 return VERR_PATCHING_REFUSED;
2502 }
2503 }
2504
2505 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2506 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2507 }
2508 else
2509 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2510 {
2511 // jmp [PatchCode]
2512 if (fAddFixup)
2513 {
2514 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2515 {
2516 Log(("Relocation failed for the jump in the guest code!!\n"));
2517 return VERR_PATCHING_REFUSED;
2518 }
2519 }
2520
2521 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2522 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2523 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2524 }
2525 else
2526 {
2527 Assert(0);
2528 return VERR_PATCHING_REFUSED;
2529 }
2530 }
2531 else
2532#endif
2533 {
2534 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2535
2536 // jmp [PatchCode]
2537 if (fAddFixup)
2538 {
2539 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2540 {
2541 Log(("Relocation failed for the jump in the guest code!!\n"));
2542 return VERR_PATCHING_REFUSED;
2543 }
2544 }
2545 temp[0] = 0xE9; //jmp
2546 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2547 }
2548 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2549 AssertRC(rc);
2550
2551 if (rc == VINF_SUCCESS)
2552 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2553
2554 return rc;
2555}
2556
2557/**
2558 * Remove the jump from guest to patch code
2559 *
2560 * @returns VBox status code.
2561 * @param pVM The VM to operate on.
2562 * @param pPatch Patch record
2563 */
2564static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2565{
2566#ifdef DEBUG
2567 DISCPUSTATE cpu;
2568 char szOutput[256];
2569 uint32_t opsize, i = 0;
2570 bool disret;
2571
2572 while (i < pPatch->cbPrivInstr)
2573 {
2574 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2575 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
2576 if (disret == false)
2577 break;
2578
2579 Log(("Org patch jump: %s", szOutput));
2580 Assert(opsize);
2581 i += opsize;
2582 }
2583#endif
2584
2585 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2586 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2587#ifdef DEBUG
2588 if (rc == VINF_SUCCESS)
2589 {
2590 i = 0;
2591 while(i < pPatch->cbPrivInstr)
2592 {
2593 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2594 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
2595 if (disret == false)
2596 break;
2597
2598 Log(("Org instr: %s", szOutput));
2599 Assert(opsize);
2600 i += opsize;
2601 }
2602 }
2603#endif
2604 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2605 return rc;
2606}
2607
2608/**
2609 * Generate the call from guest to patch code
2610 *
2611 * @returns VBox status code.
2612 * @param pVM The VM to operate on.
2613 * @param pPatch Patch record
2614 * @param pInstrHC HC address where to insert the jump
2615 * @param pCacheRec Guest translation cache record
2616 */
2617static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2618{
2619 uint8_t temp[8];
2620 uint8_t *pPB;
2621 int rc;
2622
2623 Assert(pPatch->cbPatchJump <= sizeof(temp));
2624
2625 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2626 Assert(pPB);
2627
2628 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2629
2630 // jmp [PatchCode]
2631 if (fAddFixup)
2632 {
2633 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2634 {
2635 Log(("Relocation failed for the jump in the guest code!!\n"));
2636 return VERR_PATCHING_REFUSED;
2637 }
2638 }
2639
2640 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2641 temp[0] = pPatch->aPrivInstr[0];
2642 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2643
2644 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2645 AssertRC(rc);
2646
2647 return rc;
2648}
2649
2650
2651/**
2652 * Patch cli/sti pushf/popf instruction block at specified location
2653 *
2654 * @returns VBox status code.
2655 * @param pVM The VM to operate on.
2656 * @param pInstrGC Guest context point to privileged instruction
2657 * @param pInstrHC Host context point to privileged instruction
2658 * @param uOpcode Instruction opcode
2659 * @param uOpSize Size of starting instruction
2660 * @param pPatchRec Patch record
2661 *
2662 * @note returns failure if patching is not allowed or possible
2663 *
2664 */
2665VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2666 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2667{
2668 PPATCHINFO pPatch = &pPatchRec->patch;
2669 int rc = VERR_PATCHING_REFUSED;
2670 DISCPUSTATE cpu;
2671 uint32_t orgOffsetPatchMem = ~0;
2672 RTRCPTR pInstrStart;
2673 bool fInserted;
2674#ifdef LOG_ENABLED
2675 uint32_t opsize;
2676 char szOutput[256];
2677 bool disret;
2678#endif
2679
2680 /* Save original offset (in case of failures later on) */
2681 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2682 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2683
2684 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2685 switch (uOpcode)
2686 {
2687 case OP_MOV:
2688 break;
2689
2690 case OP_CLI:
2691 case OP_PUSHF:
2692 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2693 /* Note: special precautions are taken when disabling and enabling such patches. */
2694 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2695 break;
2696
2697 default:
2698 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2699 {
2700 AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
2701 return VERR_INVALID_PARAMETER;
2702 }
2703 }
2704
2705 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2706 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2707
2708 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2709 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2710 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2711 )
2712 {
2713 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2714 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2715 rc = VERR_PATCHING_REFUSED;
2716 goto failure;
2717 }
2718
2719 pPatch->nrPatch2GuestRecs = 0;
2720 pInstrStart = pInstrGC;
2721
2722#ifdef PATM_ENABLE_CALL
2723 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2724#endif
2725
2726 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2727 pPatch->uCurPatchOffset = 0;
2728
2729 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2730
2731 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2732 {
2733 Assert(pPatch->flags & PATMFL_INTHANDLER);
2734
2735 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2736 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2737 if (RT_FAILURE(rc))
2738 goto failure;
2739 }
2740
2741 /***************************************************************************************************************************/
2742 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2743 /***************************************************************************************************************************/
2744#ifdef VBOX_WITH_STATISTICS
2745 if (!(pPatch->flags & PATMFL_SYSENTER))
2746 {
2747 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2748 if (RT_FAILURE(rc))
2749 goto failure;
2750 }
2751#endif
2752
2753 PATMP2GLOOKUPREC cacheRec;
2754 RT_ZERO(cacheRec);
2755 cacheRec.pPatch = pPatch;
2756
2757 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2758 /* Free leftover lock if any. */
2759 if (cacheRec.Lock.pvMap)
2760 {
2761 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2762 cacheRec.Lock.pvMap = NULL;
2763 }
2764 if (rc != VINF_SUCCESS)
2765 {
2766 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2767 goto failure;
2768 }
2769
2770 /* Calculated during analysis. */
2771 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2772 {
2773 /* Most likely cause: we encountered an illegal instruction very early on. */
2774 /** @todo could turn it into an int3 callable patch. */
2775 Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
2776 rc = VERR_PATCHING_REFUSED;
2777 goto failure;
2778 }
2779
2780 /* size of patch block */
2781 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2782
2783
2784 /* Update free pointer in patch memory. */
2785 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2786 /* Round to next 8 byte boundary. */
2787 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2788
2789 /*
2790 * Insert into patch to guest lookup tree
2791 */
2792 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2793 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2794 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2795 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2796 if (!fInserted)
2797 {
2798 rc = VERR_PATCHING_REFUSED;
2799 goto failure;
2800 }
2801
2802 /* Note that patmr3SetBranchTargets can install additional patches!! */
2803 rc = patmr3SetBranchTargets(pVM, pPatch);
2804 if (rc != VINF_SUCCESS)
2805 {
2806 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2807 goto failure;
2808 }
2809
2810#ifdef LOG_ENABLED
2811 Log(("Patch code ----------------------------------------------------------\n"));
2812 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2813 /* Free leftover lock if any. */
2814 if (cacheRec.Lock.pvMap)
2815 {
2816 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2817 cacheRec.Lock.pvMap = NULL;
2818 }
2819 Log(("Patch code ends -----------------------------------------------------\n"));
2820#endif
2821
2822 /* make a copy of the guest code bytes that will be overwritten */
2823 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2824
2825 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2826 AssertRC(rc);
2827
2828 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2829 {
2830 /*uint8_t ASMInt3 = 0xCC; - unused */
2831
2832 Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2833 /* Replace first opcode byte with 'int 3'. */
2834 rc = patmActivateInt3Patch(pVM, pPatch);
2835 if (RT_FAILURE(rc))
2836 goto failure;
2837
2838 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2839 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2840
2841 pPatch->flags &= ~PATMFL_INSTR_HINT;
2842 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2843 }
2844 else
2845 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2846 {
2847 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2848 /* now insert a jump in the guest code */
2849 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2850 AssertRC(rc);
2851 if (RT_FAILURE(rc))
2852 goto failure;
2853
2854 }
2855
2856#ifdef LOG_ENABLED
2857 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2858 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
2859 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
2860#endif
2861
2862 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2863 pPatch->pTempInfo->nrIllegalInstr = 0;
2864
2865 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2866
2867 pPatch->uState = PATCH_ENABLED;
2868 return VINF_SUCCESS;
2869
2870failure:
2871 if (pPatchRec->CoreOffset.Key)
2872 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2873
2874 patmEmptyTree(pVM, &pPatch->FixupTree);
2875 pPatch->nrFixups = 0;
2876
2877 patmEmptyTree(pVM, &pPatch->JumpTree);
2878 pPatch->nrJumpRecs = 0;
2879
2880 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2881 pPatch->pTempInfo->nrIllegalInstr = 0;
2882
2883 /* Turn this cli patch into a dummy. */
2884 pPatch->uState = PATCH_REFUSED;
2885 pPatch->pPatchBlockOffset = 0;
2886
2887 // Give back the patch memory we no longer need
2888 Assert(orgOffsetPatchMem != (uint32_t)~0);
2889 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2890
2891 return rc;
2892}
2893
2894/**
2895 * Patch IDT handler
2896 *
2897 * @returns VBox status code.
2898 * @param pVM The VM to operate on.
2899 * @param pInstrGC Guest context point to privileged instruction
2900 * @param uOpSize Size of starting instruction
2901 * @param pPatchRec Patch record
2902 * @param pCacheRec Cache record ptr
2903 *
2904 * @note returns failure if patching is not allowed or possible
2905 *
2906 */
2907static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
2908{
2909 PPATCHINFO pPatch = &pPatchRec->patch;
2910 bool disret;
2911 DISCPUSTATE cpuPush, cpuJmp;
2912 uint32_t opsize;
2913 RTRCPTR pCurInstrGC = pInstrGC;
2914 uint8_t *pCurInstrHC, *pInstrHC;
2915 uint32_t orgOffsetPatchMem = ~0;
2916
2917 pInstrHC = pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2918 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
2919
2920 /*
2921 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
2922 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
2923 * condition here and only patch the common entypoint once.
2924 */
2925 cpuPush.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2926 disret = PATMR3DISInstr(pVM, pPatch, &cpuPush, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2927 Assert(disret);
2928 if (disret && cpuPush.pCurInstr->opcode == OP_PUSH)
2929 {
2930 RTRCPTR pJmpInstrGC;
2931 int rc;
2932 pCurInstrGC += opsize;
2933
2934 cpuJmp.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2935 disret = PATMR3DISInstr(pVM, pPatch, &cpuJmp, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2936 if ( disret
2937 && cpuJmp.pCurInstr->opcode == OP_JMP
2938 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
2939 )
2940 {
2941 bool fInserted;
2942 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2943 if (pJmpPatch == 0)
2944 {
2945 /* Patch it first! */
2946 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
2947 if (rc != VINF_SUCCESS)
2948 goto failure;
2949 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2950 Assert(pJmpPatch);
2951 }
2952 if (pJmpPatch->patch.uState != PATCH_ENABLED)
2953 goto failure;
2954
2955 /* save original offset (in case of failures later on) */
2956 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2957
2958 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2959 pPatch->uCurPatchOffset = 0;
2960 pPatch->nrPatch2GuestRecs = 0;
2961
2962#ifdef VBOX_WITH_STATISTICS
2963 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2964 if (RT_FAILURE(rc))
2965 goto failure;
2966#endif
2967
2968 /* Install fake cli patch (to clear the virtual IF) */
2969 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2970 if (RT_FAILURE(rc))
2971 goto failure;
2972
2973 /* Add lookup record for patch to guest address translation (for the push) */
2974 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
2975
2976 /* Duplicate push. */
2977 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
2978 if (RT_FAILURE(rc))
2979 goto failure;
2980
2981 /* Generate jump to common entrypoint. */
2982 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
2983 if (RT_FAILURE(rc))
2984 goto failure;
2985
2986 /* size of patch block */
2987 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2988
2989 /* Update free pointer in patch memory. */
2990 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2991 /* Round to next 8 byte boundary */
2992 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2993
2994 /* There's no jump from guest to patch code. */
2995 pPatch->cbPatchJump = 0;
2996
2997
2998#ifdef LOG_ENABLED
2999 Log(("Patch code ----------------------------------------------------------\n"));
3000 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3001 Log(("Patch code ends -----------------------------------------------------\n"));
3002#endif
3003 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3004
3005 /*
3006 * Insert into patch to guest lookup tree
3007 */
3008 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3009 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3010 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3011 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3012
3013 pPatch->uState = PATCH_ENABLED;
3014
3015 return VINF_SUCCESS;
3016 }
3017 }
3018failure:
3019 /* Give back the patch memory we no longer need */
3020 if (orgOffsetPatchMem != (uint32_t)~0)
3021 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3022
3023 return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3024}
3025
3026/**
3027 * Install a trampoline to call a guest trap handler directly
3028 *
3029 * @returns VBox status code.
3030 * @param pVM The VM to operate on.
3031 * @param pInstrGC Guest context point to privileged instruction
3032 * @param pPatchRec Patch record
3033 * @param pCacheRec Cache record ptr
3034 *
3035 */
3036static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3037{
3038 PPATCHINFO pPatch = &pPatchRec->patch;
3039 int rc = VERR_PATCHING_REFUSED;
3040 uint32_t orgOffsetPatchMem = ~0;
3041 bool fInserted;
3042#ifdef LOG_ENABLED
3043 bool disret;
3044 DISCPUSTATE cpu;
3045 uint32_t opsize;
3046 char szOutput[256];
3047#endif
3048
3049 // save original offset (in case of failures later on)
3050 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3051
3052 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3053 pPatch->uCurPatchOffset = 0;
3054 pPatch->nrPatch2GuestRecs = 0;
3055
3056#ifdef VBOX_WITH_STATISTICS
3057 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3058 if (RT_FAILURE(rc))
3059 goto failure;
3060#endif
3061
3062 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3063 if (RT_FAILURE(rc))
3064 goto failure;
3065
3066 /* size of patch block */
3067 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3068
3069 /* Update free pointer in patch memory. */
3070 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3071 /* Round to next 8 byte boundary */
3072 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3073
3074 /* There's no jump from guest to patch code. */
3075 pPatch->cbPatchJump = 0;
3076
3077#ifdef LOG_ENABLED
3078 Log(("Patch code ----------------------------------------------------------\n"));
3079 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3080 Log(("Patch code ends -----------------------------------------------------\n"));
3081#endif
3082
3083#ifdef LOG_ENABLED
3084 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3085 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3086 Log(("TRAP handler patch: %s", szOutput));
3087#endif
3088 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3089
3090 /*
3091 * Insert into patch to guest lookup tree
3092 */
3093 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3094 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3095 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3096 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3097
3098 pPatch->uState = PATCH_ENABLED;
3099 return VINF_SUCCESS;
3100
3101failure:
3102 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3103
3104 /* Turn this cli patch into a dummy. */
3105 pPatch->uState = PATCH_REFUSED;
3106 pPatch->pPatchBlockOffset = 0;
3107
3108 /* Give back the patch memory we no longer need */
3109 Assert(orgOffsetPatchMem != (uint32_t)~0);
3110 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3111
3112 return rc;
3113}
3114
3115
3116#ifdef LOG_ENABLED
3117/**
3118 * Check if the instruction is patched as a common idt handler
3119 *
3120 * @returns true or false
3121 * @param pVM The VM to operate on.
3122 * @param pInstrGC Guest context point to the instruction
3123 *
3124 */
3125static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3126{
3127 PPATMPATCHREC pRec;
3128
3129 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3130 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3131 return true;
3132 return false;
3133}
3134#endif //DEBUG
3135
3136
3137/**
3138 * Duplicates a complete function
3139 *
3140 * @returns VBox status code.
3141 * @param pVM The VM to operate on.
3142 * @param pInstrGC Guest context point to privileged instruction
3143 * @param pPatchRec Patch record
3144 * @param pCacheRec Cache record ptr
3145 *
3146 */
3147static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3148{
3149 PPATCHINFO pPatch = &pPatchRec->patch;
3150 int rc = VERR_PATCHING_REFUSED;
3151 DISCPUSTATE cpu;
3152 uint32_t orgOffsetPatchMem = ~0;
3153 bool fInserted;
3154
3155 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3156 /* Save original offset (in case of failures later on). */
3157 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3158
3159 /* We will not go on indefinitely with call instruction handling. */
3160 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3161 {
3162 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3163 return VERR_PATCHING_REFUSED;
3164 }
3165
3166 pVM->patm.s.ulCallDepth++;
3167
3168#ifdef PATM_ENABLE_CALL
3169 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3170#endif
3171
3172 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3173
3174 pPatch->nrPatch2GuestRecs = 0;
3175 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3176 pPatch->uCurPatchOffset = 0;
3177
3178 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3179
3180 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3181 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3182 if (RT_FAILURE(rc))
3183 goto failure;
3184
3185#ifdef VBOX_WITH_STATISTICS
3186 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3187 if (RT_FAILURE(rc))
3188 goto failure;
3189#endif
3190
3191 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3192 if (rc != VINF_SUCCESS)
3193 {
3194 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3195 goto failure;
3196 }
3197
3198 //size of patch block
3199 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3200
3201 //update free pointer in patch memory
3202 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3203 /* Round to next 8 byte boundary. */
3204 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3205
3206 pPatch->uState = PATCH_ENABLED;
3207
3208 /*
3209 * Insert into patch to guest lookup tree
3210 */
3211 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3212 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3213 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3214 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3215 if (!fInserted)
3216 {
3217 rc = VERR_PATCHING_REFUSED;
3218 goto failure;
3219 }
3220
3221 /* Note that patmr3SetBranchTargets can install additional patches!! */
3222 rc = patmr3SetBranchTargets(pVM, pPatch);
3223 if (rc != VINF_SUCCESS)
3224 {
3225 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3226 goto failure;
3227 }
3228
3229#ifdef LOG_ENABLED
3230 Log(("Patch code ----------------------------------------------------------\n"));
3231 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3232 Log(("Patch code ends -----------------------------------------------------\n"));
3233#endif
3234
3235 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3236
3237 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3238 pPatch->pTempInfo->nrIllegalInstr = 0;
3239
3240 pVM->patm.s.ulCallDepth--;
3241 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3242 return VINF_SUCCESS;
3243
3244failure:
3245 if (pPatchRec->CoreOffset.Key)
3246 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3247
3248 patmEmptyTree(pVM, &pPatch->FixupTree);
3249 pPatch->nrFixups = 0;
3250
3251 patmEmptyTree(pVM, &pPatch->JumpTree);
3252 pPatch->nrJumpRecs = 0;
3253
3254 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3255 pPatch->pTempInfo->nrIllegalInstr = 0;
3256
3257 /* Turn this cli patch into a dummy. */
3258 pPatch->uState = PATCH_REFUSED;
3259 pPatch->pPatchBlockOffset = 0;
3260
3261 // Give back the patch memory we no longer need
3262 Assert(orgOffsetPatchMem != (uint32_t)~0);
3263 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3264
3265 pVM->patm.s.ulCallDepth--;
3266 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3267 return rc;
3268}
3269
3270/**
3271 * Creates trampoline code to jump inside an existing patch
3272 *
3273 * @returns VBox status code.
3274 * @param pVM The VM to operate on.
3275 * @param pInstrGC Guest context point to privileged instruction
3276 * @param pPatchRec Patch record
3277 *
3278 */
3279static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3280{
3281 PPATCHINFO pPatch = &pPatchRec->patch;
3282 RTRCPTR pPage, pPatchTargetGC = 0;
3283 uint32_t orgOffsetPatchMem = ~0;
3284 int rc = VERR_PATCHING_REFUSED;
3285 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3286 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3287 bool fInserted = false;
3288
3289 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3290 /* Save original offset (in case of failures later on). */
3291 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3292
3293 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3294 /** @todo we already checked this before */
3295 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3296
3297 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3298 if (pPatchPage)
3299 {
3300 uint32_t i;
3301
3302 for (i=0;i<pPatchPage->cCount;i++)
3303 {
3304 if (pPatchPage->aPatch[i])
3305 {
3306 pPatchToJmp = pPatchPage->aPatch[i];
3307
3308 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3309 && pPatchToJmp->uState == PATCH_ENABLED)
3310 {
3311 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3312 if (pPatchTargetGC)
3313 {
3314 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3315 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3316 Assert(pPatchToGuestRec);
3317
3318 pPatchToGuestRec->fJumpTarget = true;
3319 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3320 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3321 break;
3322 }
3323 }
3324 }
3325 }
3326 }
3327 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3328
3329 /*
3330 * Only record the trampoline patch if this is the first patch to the target
3331 * or we recorded other patches already.
3332 * The goal is to refuse refreshing function duplicates if the guest
3333 * modifies code after a saved state was loaded because it is not possible
3334 * to save the relation between trampoline and target without changing the
3335 * saved satte version.
3336 */
3337 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3338 || pPatchToJmp->pTrampolinePatchesHead)
3339 {
3340 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3341 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3342 if (!pTrampRec)
3343 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3344
3345 pTrampRec->pPatchTrampoline = pPatchRec;
3346 }
3347
3348 pPatch->nrPatch2GuestRecs = 0;
3349 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3350 pPatch->uCurPatchOffset = 0;
3351
3352 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3353 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3354 if (RT_FAILURE(rc))
3355 goto failure;
3356
3357#ifdef VBOX_WITH_STATISTICS
3358 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3359 if (RT_FAILURE(rc))
3360 goto failure;
3361#endif
3362
3363 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3364 if (RT_FAILURE(rc))
3365 goto failure;
3366
3367 /*
3368 * Insert into patch to guest lookup tree
3369 */
3370 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3371 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3372 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3373 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3374 if (!fInserted)
3375 {
3376 rc = VERR_PATCHING_REFUSED;
3377 goto failure;
3378 }
3379
3380 /* size of patch block */
3381 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3382
3383 /* Update free pointer in patch memory. */
3384 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3385 /* Round to next 8 byte boundary */
3386 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3387
3388 /* There's no jump from guest to patch code. */
3389 pPatch->cbPatchJump = 0;
3390
3391 /* Enable the patch. */
3392 pPatch->uState = PATCH_ENABLED;
3393 /* We allow this patch to be called as a function. */
3394 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3395
3396 if (pTrampRec)
3397 {
3398 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3399 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3400 }
3401 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3402 return VINF_SUCCESS;
3403
3404failure:
3405 if (pPatchRec->CoreOffset.Key)
3406 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3407
3408 patmEmptyTree(pVM, &pPatch->FixupTree);
3409 pPatch->nrFixups = 0;
3410
3411 patmEmptyTree(pVM, &pPatch->JumpTree);
3412 pPatch->nrJumpRecs = 0;
3413
3414 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3415 pPatch->pTempInfo->nrIllegalInstr = 0;
3416
3417 /* Turn this cli patch into a dummy. */
3418 pPatch->uState = PATCH_REFUSED;
3419 pPatch->pPatchBlockOffset = 0;
3420
3421 // Give back the patch memory we no longer need
3422 Assert(orgOffsetPatchMem != (uint32_t)~0);
3423 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3424
3425 if (pTrampRec)
3426 MMR3HeapFree(pTrampRec);
3427
3428 return rc;
3429}
3430
3431
3432/**
3433 * Patch branch target function for call/jump at specified location.
3434 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3435 *
3436 * @returns VBox status code.
3437 * @param pVM The VM to operate on.
3438 * @param pCtx Guest context
3439 *
3440 */
3441VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3442{
3443 RTRCPTR pBranchTarget, pPage;
3444 int rc;
3445 RTRCPTR pPatchTargetGC = 0;
3446
3447 pBranchTarget = pCtx->edx;
3448 pBranchTarget = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3449
3450 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3451 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3452
3453 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3454 if (pPatchPage)
3455 {
3456 uint32_t i;
3457
3458 for (i=0;i<pPatchPage->cCount;i++)
3459 {
3460 if (pPatchPage->aPatch[i])
3461 {
3462 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3463
3464 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3465 && pPatch->uState == PATCH_ENABLED)
3466 {
3467 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3468 if (pPatchTargetGC)
3469 {
3470 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3471 break;
3472 }
3473 }
3474 }
3475 }
3476 }
3477
3478 if (pPatchTargetGC)
3479 {
3480 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3481 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3482 }
3483 else
3484 {
3485 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3486 }
3487
3488 if (rc == VINF_SUCCESS)
3489 {
3490 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3491 Assert(pPatchTargetGC);
3492 }
3493
3494 if (pPatchTargetGC)
3495 {
3496 pCtx->eax = pPatchTargetGC;
3497 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3498 }
3499 else
3500 {
3501 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3502 pCtx->eax = 0;
3503 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3504 }
3505 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3506 rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3507 AssertRC(rc);
3508
3509 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3510 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3511 return VINF_SUCCESS;
3512}
3513
3514/**
3515 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3516 *
3517 * @returns VBox status code.
3518 * @param pVM The VM to operate on.
3519 * @param pCpu Disassembly CPU structure ptr
3520 * @param pInstrGC Guest context point to privileged instruction
3521 * @param pCacheRec Cache record ptr
3522 *
3523 */
3524static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3525{
3526 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3527 int rc = VERR_PATCHING_REFUSED;
3528 DISCPUSTATE cpu;
3529 RTRCPTR pTargetGC;
3530 PPATMPATCHREC pPatchFunction;
3531 uint32_t opsize;
3532 bool disret;
3533#ifdef LOG_ENABLED
3534 char szOutput[256];
3535#endif
3536
3537 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3538 Assert((pCpu->pCurInstr->opcode == OP_CALL || pCpu->pCurInstr->opcode == OP_JMP) && pCpu->opsize == SIZEOF_NEARJUMP32);
3539
3540 if ((pCpu->pCurInstr->opcode != OP_CALL && pCpu->pCurInstr->opcode != OP_JMP) || pCpu->opsize != SIZEOF_NEARJUMP32)
3541 {
3542 rc = VERR_PATCHING_REFUSED;
3543 goto failure;
3544 }
3545
3546 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3547 if (pTargetGC == 0)
3548 {
3549 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
3550 rc = VERR_PATCHING_REFUSED;
3551 goto failure;
3552 }
3553
3554 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3555 if (pPatchFunction == NULL)
3556 {
3557 for(;;)
3558 {
3559 /* It could be an indirect call (call -> jmp dest).
3560 * Note that it's dangerous to assume the jump will never change...
3561 */
3562 uint8_t *pTmpInstrHC;
3563
3564 pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3565 Assert(pTmpInstrHC);
3566 if (pTmpInstrHC == 0)
3567 break;
3568
3569 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3570 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pTargetGC, pTmpInstrHC, &opsize, NULL);
3571 if (disret == false || cpu.pCurInstr->opcode != OP_JMP)
3572 break;
3573
3574 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3575 if (pTargetGC == 0)
3576 {
3577 break;
3578 }
3579
3580 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3581 break;
3582 }
3583 if (pPatchFunction == 0)
3584 {
3585 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3586 rc = VERR_PATCHING_REFUSED;
3587 goto failure;
3588 }
3589 }
3590
3591 // make a copy of the guest code bytes that will be overwritten
3592 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3593
3594 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3595 AssertRC(rc);
3596
3597 /* Now replace the original call in the guest code */
3598 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3599 AssertRC(rc);
3600 if (RT_FAILURE(rc))
3601 goto failure;
3602
3603 /* Lowest and highest address for write monitoring. */
3604 pPatch->pInstrGCLowest = pInstrGC;
3605 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3606
3607#ifdef LOG_ENABLED
3608 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3609 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3610 Log(("Call patch: %s", szOutput));
3611#endif
3612
3613 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3614
3615 pPatch->uState = PATCH_ENABLED;
3616 return VINF_SUCCESS;
3617
3618failure:
3619 /* Turn this patch into a dummy. */
3620 pPatch->uState = PATCH_REFUSED;
3621
3622 return rc;
3623}
3624
3625/**
3626 * Replace the address in an MMIO instruction with the cached version.
3627 *
3628 * @returns VBox status code.
3629 * @param pVM The VM to operate on.
3630 * @param pInstrGC Guest context point to privileged instruction
3631 * @param pCpu Disassembly CPU structure ptr
3632 * @param pCacheRec Cache record ptr
3633 *
3634 * @note returns failure if patching is not allowed or possible
3635 *
3636 */
3637static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3638{
3639 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3640 uint8_t *pPB;
3641 int rc = VERR_PATCHING_REFUSED;
3642#ifdef LOG_ENABLED
3643 DISCPUSTATE cpu;
3644 uint32_t opsize;
3645 bool disret;
3646 char szOutput[256];
3647#endif
3648
3649 Assert(pVM->patm.s.mmio.pCachedData);
3650 if (!pVM->patm.s.mmio.pCachedData)
3651 goto failure;
3652
3653 if (pCpu->param2.flags != USE_DISPLACEMENT32)
3654 goto failure;
3655
3656 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3657 if (pPB == 0)
3658 goto failure;
3659
3660 /* Add relocation record for cached data access. */
3661 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3662 {
3663 Log(("Relocation failed for cached mmio address!!\n"));
3664 return VERR_PATCHING_REFUSED;
3665 }
3666#ifdef LOG_ENABLED
3667 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3668 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3669 Log(("MMIO patch old instruction: %s", szOutput));
3670#endif
3671
3672 /* Save original instruction. */
3673 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3674 AssertRC(rc);
3675
3676 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3677
3678 /* Replace address with that of the cached item. */
3679 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->opsize - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3680 AssertRC(rc);
3681 if (RT_FAILURE(rc))
3682 {
3683 goto failure;
3684 }
3685
3686#ifdef LOG_ENABLED
3687 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3688 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3689 Log(("MMIO patch: %s", szOutput));
3690#endif
3691 pVM->patm.s.mmio.pCachedData = 0;
3692 pVM->patm.s.mmio.GCPhys = 0;
3693 pPatch->uState = PATCH_ENABLED;
3694 return VINF_SUCCESS;
3695
3696failure:
3697 /* Turn this patch into a dummy. */
3698 pPatch->uState = PATCH_REFUSED;
3699
3700 return rc;
3701}
3702
3703
3704/**
3705 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3706 *
3707 * @returns VBox status code.
3708 * @param pVM The VM to operate on.
3709 * @param pInstrGC Guest context point to privileged instruction
3710 * @param pPatch Patch record
3711 *
3712 * @note returns failure if patching is not allowed or possible
3713 *
3714 */
3715static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3716{
3717 DISCPUSTATE cpu;
3718 uint32_t opsize;
3719 bool disret;
3720 uint8_t *pInstrHC;
3721#ifdef LOG_ENABLED
3722 char szOutput[256];
3723#endif
3724
3725 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3726
3727 /* Convert GC to HC address. */
3728 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3729 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3730
3731 /* Disassemble mmio instruction. */
3732 cpu.mode = pPatch->uOpMode;
3733 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
3734 if (disret == false)
3735 {
3736 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3737 return VERR_PATCHING_REFUSED;
3738 }
3739
3740 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
3741 if (opsize > MAX_INSTR_SIZE)
3742 return VERR_PATCHING_REFUSED;
3743 if (cpu.param2.flags != USE_DISPLACEMENT32)
3744 return VERR_PATCHING_REFUSED;
3745
3746 /* Add relocation record for cached data access. */
3747 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3748 {
3749 Log(("Relocation failed for cached mmio address!!\n"));
3750 return VERR_PATCHING_REFUSED;
3751 }
3752 /* Replace address with that of the cached item. */
3753 *(RTRCPTR *)&pInstrHC[cpu.opsize - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3754
3755 /* Lowest and highest address for write monitoring. */
3756 pPatch->pInstrGCLowest = pInstrGC;
3757 pPatch->pInstrGCHighest = pInstrGC + cpu.opsize;
3758
3759#ifdef LOG_ENABLED
3760 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3761 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3762 Log(("MMIO patch: %s", szOutput));
3763#endif
3764
3765 pVM->patm.s.mmio.pCachedData = 0;
3766 pVM->patm.s.mmio.GCPhys = 0;
3767 return VINF_SUCCESS;
3768}
3769
3770/**
3771 * Activates an int3 patch
3772 *
3773 * @returns VBox status code.
3774 * @param pVM The VM to operate on.
3775 * @param pPatch Patch record
3776 */
3777static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3778{
3779 uint8_t ASMInt3 = 0xCC;
3780 int rc;
3781
3782 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3783 Assert(pPatch->uState != PATCH_ENABLED);
3784
3785 /* Replace first opcode byte with 'int 3'. */
3786 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &ASMInt3, sizeof(ASMInt3));
3787 AssertRC(rc);
3788
3789 pPatch->cbPatchJump = sizeof(ASMInt3);
3790
3791 return rc;
3792}
3793
3794/**
3795 * Deactivates an int3 patch
3796 *
3797 * @returns VBox status code.
3798 * @param pVM The VM to operate on.
3799 * @param pPatch Patch record
3800 */
3801static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3802{
3803 uint8_t ASMInt3 = 0xCC;
3804 int rc;
3805
3806 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3807 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3808
3809 /* Restore first opcode byte. */
3810 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3811 AssertRC(rc);
3812 return rc;
3813}
3814
3815/**
3816 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically in the guest context.
3817 *
3818 * @returns VBox status code.
3819 * @param pVM The VM to operate on.
3820 * @param pInstrGC Guest context point to privileged instruction
3821 * @param pInstrHC Host context point to privileged instruction
3822 * @param pCpu Disassembly CPU structure ptr
3823 * @param pPatch Patch record
3824 *
3825 * @note returns failure if patching is not allowed or possible
3826 *
3827 */
3828VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3829{
3830 uint8_t ASMInt3 = 0xCC;
3831 int rc;
3832
3833 /* Note: Do not use patch memory here! It might called during patch installation too. */
3834
3835#ifdef LOG_ENABLED
3836 DISCPUSTATE cpu;
3837 char szOutput[256];
3838 uint32_t opsize;
3839
3840 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3841 PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3842 Log(("PATMR3PatchInstrInt3: %s", szOutput));
3843#endif
3844
3845 /* Save the original instruction. */
3846 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3847 AssertRC(rc);
3848 pPatch->cbPatchJump = sizeof(ASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3849
3850 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3851
3852 /* Replace first opcode byte with 'int 3'. */
3853 rc = patmActivateInt3Patch(pVM, pPatch);
3854 if (RT_FAILURE(rc))
3855 goto failure;
3856
3857 /* Lowest and highest address for write monitoring. */
3858 pPatch->pInstrGCLowest = pInstrGC;
3859 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3860
3861 pPatch->uState = PATCH_ENABLED;
3862 return VINF_SUCCESS;
3863
3864failure:
3865 /* Turn this patch into a dummy. */
3866 return VERR_PATCHING_REFUSED;
3867}
3868
3869#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3870/**
3871 * Patch a jump instruction at specified location
3872 *
3873 * @returns VBox status code.
3874 * @param pVM The VM to operate on.
3875 * @param pInstrGC Guest context point to privileged instruction
3876 * @param pInstrHC Host context point to privileged instruction
3877 * @param pCpu Disassembly CPU structure ptr
3878 * @param pPatchRec Patch record
3879 *
3880 * @note returns failure if patching is not allowed or possible
3881 *
3882 */
3883int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3884{
3885 PPATCHINFO pPatch = &pPatchRec->patch;
3886 int rc = VERR_PATCHING_REFUSED;
3887#ifdef LOG_ENABLED
3888 bool disret;
3889 DISCPUSTATE cpu;
3890 uint32_t opsize;
3891 char szOutput[256];
3892#endif
3893
3894 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3895 pPatch->uCurPatchOffset = 0;
3896 pPatch->cbPatchBlockSize = 0;
3897 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3898
3899 /*
3900 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3901 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3902 */
3903 switch (pCpu->pCurInstr->opcode)
3904 {
3905 case OP_JO:
3906 case OP_JNO:
3907 case OP_JC:
3908 case OP_JNC:
3909 case OP_JE:
3910 case OP_JNE:
3911 case OP_JBE:
3912 case OP_JNBE:
3913 case OP_JS:
3914 case OP_JNS:
3915 case OP_JP:
3916 case OP_JNP:
3917 case OP_JL:
3918 case OP_JNL:
3919 case OP_JLE:
3920 case OP_JNLE:
3921 case OP_JMP:
3922 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3923 Assert(pCpu->param1.flags & USE_IMMEDIATE32_REL);
3924 if (!(pCpu->param1.flags & USE_IMMEDIATE32_REL))
3925 goto failure;
3926
3927 Assert(pCpu->opsize == SIZEOF_NEARJUMP32 || pCpu->opsize == SIZEOF_NEAR_COND_JUMP32);
3928 if (pCpu->opsize != SIZEOF_NEARJUMP32 && pCpu->opsize != SIZEOF_NEAR_COND_JUMP32)
3929 goto failure;
3930
3931 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->opsize))
3932 {
3933 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3934 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3935 rc = VERR_PATCHING_REFUSED;
3936 goto failure;
3937 }
3938
3939 break;
3940
3941 default:
3942 goto failure;
3943 }
3944
3945 // make a copy of the guest code bytes that will be overwritten
3946 Assert(pCpu->opsize <= sizeof(pPatch->aPrivInstr));
3947 Assert(pCpu->opsize >= SIZEOF_NEARJUMP32);
3948 pPatch->cbPatchJump = pCpu->opsize;
3949
3950 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3951 AssertRC(rc);
3952
3953 /* Now insert a jump in the guest code. */
3954 /*
3955 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3956 * references the target instruction in the conflict patch.
3957 */
3958 RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval);
3959
3960 AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval));
3961 pPatch->pPatchJumpDestGC = pJmpDest;
3962
3963 PATMP2GLOOKUPREC cacheRec;
3964 RT_ZERO(cacheRec);
3965 cacheRec.pPatch = pPatch;
3966
3967 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
3968 /* Free leftover lock if any. */
3969 if (cacheRec.Lock.pvMap)
3970 {
3971 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
3972 cacheRec.Lock.pvMap = NULL;
3973 }
3974 AssertRC(rc);
3975 if (RT_FAILURE(rc))
3976 goto failure;
3977
3978 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
3979
3980#ifdef LOG_ENABLED
3981 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3982 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3983 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
3984#endif
3985
3986 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3987
3988 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
3989
3990 /* Lowest and highest address for write monitoring. */
3991 pPatch->pInstrGCLowest = pInstrGC;
3992 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
3993
3994 pPatch->uState = PATCH_ENABLED;
3995 return VINF_SUCCESS;
3996
3997failure:
3998 /* Turn this cli patch into a dummy. */
3999 pPatch->uState = PATCH_REFUSED;
4000
4001 return rc;
4002}
4003#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4004
4005
4006/**
4007 * Gives hint to PATM about supervisor guest instructions
4008 *
4009 * @returns VBox status code.
4010 * @param pVM The VM to operate on.
4011 * @param pInstr Guest context point to privileged instruction
4012 * @param flags Patch flags
4013 */
4014VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4015{
4016 Assert(pInstrGC);
4017 Assert(flags == PATMFL_CODE32);
4018
4019 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4020 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4021}
4022
4023/**
4024 * Patch privileged instruction at specified location
4025 *
4026 * @returns VBox status code.
4027 * @param pVM The VM to operate on.
4028 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4029 * @param flags Patch flags
4030 *
4031 * @note returns failure if patching is not allowed or possible
4032 */
4033VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4034{
4035 DISCPUSTATE cpu;
4036 R3PTRTYPE(uint8_t *) pInstrHC;
4037 uint32_t opsize;
4038 PPATMPATCHREC pPatchRec;
4039 PCPUMCTX pCtx = 0;
4040 bool disret;
4041 int rc;
4042 PVMCPU pVCpu = VMMGetCpu0(pVM);
4043
4044 if ( !pVM
4045 || pInstrGC == 0
4046 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4047 {
4048 AssertFailed();
4049 return VERR_INVALID_PARAMETER;
4050 }
4051
4052 if (PATMIsEnabled(pVM) == false)
4053 return VERR_PATCHING_REFUSED;
4054
4055 /* Test for patch conflict only with patches that actually change guest code. */
4056 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4057 {
4058 PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
4059 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4060 if (pConflictPatch != 0)
4061 return VERR_PATCHING_REFUSED;
4062 }
4063
4064 if (!(flags & PATMFL_CODE32))
4065 {
4066 /** @todo Only 32 bits code right now */
4067 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4068 return VERR_NOT_IMPLEMENTED;
4069 }
4070
4071 /* We ran out of patch memory; don't bother anymore. */
4072 if (pVM->patm.s.fOutOfMemory == true)
4073 return VERR_PATCHING_REFUSED;
4074
4075 /* Make sure the code selector is wide open; otherwise refuse. */
4076 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4077 if (CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0)
4078 {
4079 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4080 if (pInstrGCFlat != pInstrGC)
4081 {
4082 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4083 return VERR_PATCHING_REFUSED;
4084 }
4085 }
4086
4087 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4088 if (!(flags & PATMFL_GUEST_SPECIFIC))
4089 {
4090 /* New code. Make sure CSAM has a go at it first. */
4091 CSAMR3CheckCode(pVM, pInstrGC);
4092 }
4093
4094 /* Note: obsolete */
4095 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4096 && (flags & PATMFL_MMIO_ACCESS))
4097 {
4098 RTRCUINTPTR offset;
4099 void *pvPatchCoreOffset;
4100
4101 /* Find the patch record. */
4102 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4103 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4104 if (pvPatchCoreOffset == NULL)
4105 {
4106 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4107 return VERR_PATCH_NOT_FOUND; //fatal error
4108 }
4109 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4110
4111 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4112 }
4113
4114 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4115
4116 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4117 if (pPatchRec)
4118 {
4119 Assert(!(flags & PATMFL_TRAMPOLINE));
4120
4121 /* Hints about existing patches are ignored. */
4122 if (flags & PATMFL_INSTR_HINT)
4123 return VERR_PATCHING_REFUSED;
4124
4125 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4126 {
4127 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4128 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4129 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4130 }
4131
4132 if (pPatchRec->patch.uState == PATCH_DISABLED)
4133 {
4134 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4135 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4136 {
4137 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4138 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4139 }
4140 else
4141 Log(("Enabling patch %RRv again\n", pInstrGC));
4142
4143 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4144 rc = PATMR3EnablePatch(pVM, pInstrGC);
4145 if (RT_SUCCESS(rc))
4146 return VWRN_PATCH_ENABLED;
4147
4148 return rc;
4149 }
4150 if ( pPatchRec->patch.uState == PATCH_ENABLED
4151 || pPatchRec->patch.uState == PATCH_DIRTY)
4152 {
4153 /*
4154 * The patch might have been overwritten.
4155 */
4156 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4157 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4158 {
4159 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4160 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4161 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4162 {
4163 if (flags & PATMFL_IDTHANDLER)
4164 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4165
4166 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4167 }
4168 }
4169 rc = PATMR3RemovePatch(pVM, pInstrGC);
4170 if (RT_FAILURE(rc))
4171 return VERR_PATCHING_REFUSED;
4172 }
4173 else
4174 {
4175 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4176 /* already tried it once! */
4177 return VERR_PATCHING_REFUSED;
4178 }
4179 }
4180
4181 RTGCPHYS GCPhys;
4182 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4183 if (rc != VINF_SUCCESS)
4184 {
4185 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4186 return rc;
4187 }
4188 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4189 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4190 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4191 {
4192 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4193 return VERR_PATCHING_REFUSED;
4194 }
4195
4196 /* Initialize cache record for guest address translations. */
4197 bool fInserted;
4198 PATMP2GLOOKUPREC cacheRec;
4199 RT_ZERO(cacheRec);
4200
4201 pInstrHC = PATMGCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4202 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4203
4204 /* Allocate patch record. */
4205 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4206 if (RT_FAILURE(rc))
4207 {
4208 Log(("Out of memory!!!!\n"));
4209 return VERR_NO_MEMORY;
4210 }
4211 pPatchRec->Core.Key = pInstrGC;
4212 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4213 /* Insert patch record into the lookup tree. */
4214 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4215 Assert(fInserted);
4216
4217 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4218 pPatchRec->patch.flags = flags;
4219 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4220 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4221
4222 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4223 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4224
4225 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4226 {
4227 /*
4228 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4229 */
4230 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4231 if (pPatchNear)
4232 {
4233 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4234 {
4235 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4236
4237 pPatchRec->patch.uState = PATCH_UNUSABLE;
4238 /*
4239 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4240 */
4241 return VERR_PATCHING_REFUSED;
4242 }
4243 }
4244 }
4245
4246 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4247 if (pPatchRec->patch.pTempInfo == 0)
4248 {
4249 Log(("Out of memory!!!!\n"));
4250 return VERR_NO_MEMORY;
4251 }
4252
4253 cpu.mode = pPatchRec->patch.uOpMode;
4254 disret = PATMR3DISInstr(pVM, &pPatchRec->patch, &cpu, pInstrGC, NULL, &opsize, NULL);
4255 if (disret == false)
4256 {
4257 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4258 return VERR_PATCHING_REFUSED;
4259 }
4260
4261 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
4262 if (opsize > MAX_INSTR_SIZE)
4263 return VERR_PATCHING_REFUSED;
4264
4265 pPatchRec->patch.cbPrivInstr = opsize;
4266 pPatchRec->patch.opcode = cpu.pCurInstr->opcode;
4267
4268 /* Restricted hinting for now. */
4269 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->opcode == OP_CLI);
4270
4271 /* Initialize cache record patch pointer. */
4272 cacheRec.pPatch = &pPatchRec->patch;
4273
4274 /* Allocate statistics slot */
4275 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4276 {
4277 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4278 }
4279 else
4280 {
4281 Log(("WARNING: Patch index wrap around!!\n"));
4282 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4283 }
4284
4285 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4286 {
4287 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4288 }
4289 else
4290 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4291 {
4292 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4293 }
4294 else
4295 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4296 {
4297 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4298 }
4299 else
4300 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4301 {
4302 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4303 }
4304 else
4305 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4306 {
4307 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4308 }
4309 else
4310 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4311 {
4312 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4313 }
4314 else
4315 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4316 {
4317 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4318 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4319
4320 rc = patmIdtHandler(pVM, pInstrGC, opsize, pPatchRec, &cacheRec);
4321#ifdef VBOX_WITH_STATISTICS
4322 if ( rc == VINF_SUCCESS
4323 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4324 {
4325 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4326 }
4327#endif
4328 }
4329 else
4330 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4331 {
4332 switch (cpu.pCurInstr->opcode)
4333 {
4334 case OP_SYSENTER:
4335 case OP_PUSH:
4336 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4337 if (rc == VINF_SUCCESS)
4338 {
4339 if (rc == VINF_SUCCESS)
4340 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4341 return rc;
4342 }
4343 break;
4344
4345 default:
4346 rc = VERR_NOT_IMPLEMENTED;
4347 break;
4348 }
4349 }
4350 else
4351 {
4352 switch (cpu.pCurInstr->opcode)
4353 {
4354 case OP_SYSENTER:
4355 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4356 if (rc == VINF_SUCCESS)
4357 {
4358 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4359 return VINF_SUCCESS;
4360 }
4361 break;
4362
4363#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4364 case OP_JO:
4365 case OP_JNO:
4366 case OP_JC:
4367 case OP_JNC:
4368 case OP_JE:
4369 case OP_JNE:
4370 case OP_JBE:
4371 case OP_JNBE:
4372 case OP_JS:
4373 case OP_JNS:
4374 case OP_JP:
4375 case OP_JNP:
4376 case OP_JL:
4377 case OP_JNL:
4378 case OP_JLE:
4379 case OP_JNLE:
4380 case OP_JECXZ:
4381 case OP_LOOP:
4382 case OP_LOOPNE:
4383 case OP_LOOPE:
4384 case OP_JMP:
4385 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4386 {
4387 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4388 break;
4389 }
4390 return VERR_NOT_IMPLEMENTED;
4391#endif
4392
4393 case OP_PUSHF:
4394 case OP_CLI:
4395 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4396 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->opcode, opsize, pPatchRec);
4397 break;
4398
4399 case OP_STR:
4400 case OP_SGDT:
4401 case OP_SLDT:
4402 case OP_SIDT:
4403 case OP_CPUID:
4404 case OP_LSL:
4405 case OP_LAR:
4406 case OP_SMSW:
4407 case OP_VERW:
4408 case OP_VERR:
4409 case OP_IRET:
4410 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4411 break;
4412
4413 default:
4414 return VERR_NOT_IMPLEMENTED;
4415 }
4416 }
4417
4418 if (rc != VINF_SUCCESS)
4419 {
4420 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4421 {
4422 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4423 pPatchRec->patch.nrPatch2GuestRecs = 0;
4424 }
4425 pVM->patm.s.uCurrentPatchIdx--;
4426 }
4427 else
4428 {
4429 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4430 AssertRCReturn(rc, rc);
4431
4432 /* Keep track upper and lower boundaries of patched instructions */
4433 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4434 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4435 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4436 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4437
4438 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4439 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4440
4441 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4442 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4443
4444 rc = VINF_SUCCESS;
4445
4446 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4447 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4448 {
4449 rc = PATMR3DisablePatch(pVM, pInstrGC);
4450 AssertRCReturn(rc, rc);
4451 }
4452
4453#ifdef VBOX_WITH_STATISTICS
4454 /* Register statistics counter */
4455 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4456 {
4457 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4458 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4459#ifndef DEBUG_sandervl
4460 /* Full breakdown for the GUI. */
4461 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4462 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4463 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4464 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4465 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4466 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4467 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4468 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4469 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4470 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4471 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4472 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4473 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4474 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4475 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4476 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4477#endif
4478 }
4479#endif
4480 }
4481 /* Free leftover lock if any. */
4482 if (cacheRec.Lock.pvMap)
4483 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4484 return rc;
4485}
4486
4487/**
4488 * Query instruction size
4489 *
4490 * @returns VBox status code.
4491 * @param pVM The VM to operate on.
4492 * @param pPatch Patch record
4493 * @param pInstrGC Instruction address
4494 */
4495static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4496{
4497 uint8_t *pInstrHC;
4498 PGMPAGEMAPLOCK Lock;
4499
4500 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4501 if (rc == VINF_SUCCESS)
4502 {
4503 DISCPUSTATE cpu;
4504 bool disret;
4505 uint32_t opsize;
4506
4507 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4508 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL, PATMREAD_ORGCODE | PATMREAD_NOCHECK);
4509 PGMPhysReleasePageMappingLock(pVM, &Lock);
4510 if (disret)
4511 return opsize;
4512 }
4513 return 0;
4514}
4515
4516/**
4517 * Add patch to page record
4518 *
4519 * @returns VBox status code.
4520 * @param pVM The VM to operate on.
4521 * @param pPage Page address
4522 * @param pPatch Patch record
4523 */
4524int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4525{
4526 PPATMPATCHPAGE pPatchPage;
4527 int rc;
4528
4529 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4530
4531 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4532 if (pPatchPage)
4533 {
4534 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4535 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4536 {
4537 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4538 PPATCHINFO *paPatchOld = pPatchPage->aPatch;
4539
4540 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4541 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4542 if (RT_FAILURE(rc))
4543 {
4544 Log(("Out of memory!!!!\n"));
4545 return VERR_NO_MEMORY;
4546 }
4547 memcpy(pPatchPage->aPatch, paPatchOld, cMaxPatchesOld*sizeof(PPATCHINFO));
4548 MMHyperFree(pVM, paPatchOld);
4549 }
4550 pPatchPage->aPatch[pPatchPage->cCount] = pPatch;
4551 pPatchPage->cCount++;
4552 }
4553 else
4554 {
4555 bool fInserted;
4556
4557 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4558 if (RT_FAILURE(rc))
4559 {
4560 Log(("Out of memory!!!!\n"));
4561 return VERR_NO_MEMORY;
4562 }
4563 pPatchPage->Core.Key = pPage;
4564 pPatchPage->cCount = 1;
4565 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4566
4567 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4568 if (RT_FAILURE(rc))
4569 {
4570 Log(("Out of memory!!!!\n"));
4571 MMHyperFree(pVM, pPatchPage);
4572 return VERR_NO_MEMORY;
4573 }
4574 pPatchPage->aPatch[0] = pPatch;
4575
4576 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4577 Assert(fInserted);
4578 pVM->patm.s.cPageRecords++;
4579
4580 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4581 }
4582 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4583
4584 /* Get the closest guest instruction (from below) */
4585 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4586 Assert(pGuestToPatchRec);
4587 if (pGuestToPatchRec)
4588 {
4589 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4590 if ( pPatchPage->pLowestAddrGC == 0
4591 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4592 {
4593 RTRCUINTPTR offset;
4594
4595 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4596
4597 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4598 /* If we're too close to the page boundary, then make sure an instruction from the previous page doesn't cross the boundary itself. */
4599 if (offset && offset < MAX_INSTR_SIZE)
4600 {
4601 /* Get the closest guest instruction (from above) */
4602 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4603
4604 if (pGuestToPatchRec)
4605 {
4606 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4607 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4608 {
4609 pPatchPage->pLowestAddrGC = pPage;
4610 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4611 }
4612 }
4613 }
4614 }
4615 }
4616
4617 /* Get the closest guest instruction (from above) */
4618 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4619 Assert(pGuestToPatchRec);
4620 if (pGuestToPatchRec)
4621 {
4622 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4623 if ( pPatchPage->pHighestAddrGC == 0
4624 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4625 {
4626 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4627 /* Increase by instruction size. */
4628 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4629//// Assert(size);
4630 pPatchPage->pHighestAddrGC += size;
4631 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4632 }
4633 }
4634
4635 return VINF_SUCCESS;
4636}
4637
4638/**
4639 * Remove patch from page record
4640 *
4641 * @returns VBox status code.
4642 * @param pVM The VM to operate on.
4643 * @param pPage Page address
4644 * @param pPatch Patch record
4645 */
4646int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4647{
4648 PPATMPATCHPAGE pPatchPage;
4649 int rc;
4650
4651 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4652 Assert(pPatchPage);
4653
4654 if (!pPatchPage)
4655 return VERR_INVALID_PARAMETER;
4656
4657 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4658
4659 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4660 if (pPatchPage->cCount > 1)
4661 {
4662 uint32_t i;
4663
4664 /* Used by multiple patches */
4665 for (i=0;i<pPatchPage->cCount;i++)
4666 {
4667 if (pPatchPage->aPatch[i] == pPatch)
4668 {
4669 pPatchPage->aPatch[i] = 0;
4670 break;
4671 }
4672 }
4673 /* close the gap between the remaining pointers. */
4674 if (i < pPatchPage->cCount - 1)
4675 {
4676 memcpy(&pPatchPage->aPatch[i], &pPatchPage->aPatch[i+1], sizeof(PPATCHINFO)*(pPatchPage->cCount - (i+1)));
4677 }
4678 AssertMsg(i < pPatchPage->cCount, ("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4679
4680 pPatchPage->cCount--;
4681 }
4682 else
4683 {
4684 PPATMPATCHPAGE pPatchNode;
4685
4686 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4687
4688 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4689 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4690 Assert(pPatchNode && pPatchNode == pPatchPage);
4691
4692 Assert(pPatchPage->aPatch);
4693 rc = MMHyperFree(pVM, pPatchPage->aPatch);
4694 AssertRC(rc);
4695 rc = MMHyperFree(pVM, pPatchPage);
4696 AssertRC(rc);
4697 pVM->patm.s.cPageRecords--;
4698 }
4699 return VINF_SUCCESS;
4700}
4701
4702/**
4703 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4704 *
4705 * @returns VBox status code.
4706 * @param pVM The VM to operate on.
4707 * @param pPatch Patch record
4708 */
4709int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4710{
4711 int rc;
4712 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4713
4714 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4715 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4716 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4717
4718 /** @todo optimize better (large gaps between current and next used page) */
4719 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4720 {
4721 /* Get the closest guest instruction (from above) */
4722 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4723 if ( pGuestToPatchRec
4724 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4725 )
4726 {
4727 /* Code in page really patched -> add record */
4728 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4729 AssertRC(rc);
4730 }
4731 }
4732 pPatch->flags |= PATMFL_CODE_MONITORED;
4733 return VINF_SUCCESS;
4734}
4735
4736/**
4737 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4738 *
4739 * @returns VBox status code.
4740 * @param pVM The VM to operate on.
4741 * @param pPatch Patch record
4742 */
4743int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4744{
4745 int rc;
4746 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4747
4748 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4749 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4750 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4751
4752 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4753 {
4754 /* Get the closest guest instruction (from above) */
4755 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4756 if ( pGuestToPatchRec
4757 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4758 )
4759 {
4760 /* Code in page really patched -> remove record */
4761 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4762 AssertRC(rc);
4763 }
4764 }
4765 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4766 return VINF_SUCCESS;
4767}
4768
4769/**
4770 * Notifies PATM about a (potential) write to code that has been patched.
4771 *
4772 * @returns VBox status code.
4773 * @param pVM The VM to operate on.
4774 * @param GCPtr GC pointer to write address
4775 * @param cbWrite Nr of bytes to write
4776 *
4777 */
4778VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4779{
4780 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4781
4782 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4783
4784 Assert(VM_IS_EMT(pVM));
4785
4786 /* Quick boundary check */
4787 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4788 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4789 )
4790 return VINF_SUCCESS;
4791
4792 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4793
4794 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4795 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4796
4797 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4798 {
4799loop_start:
4800 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4801 if (pPatchPage)
4802 {
4803 uint32_t i;
4804 bool fValidPatchWrite = false;
4805
4806 /* Quick check to see if the write is in the patched part of the page */
4807 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4808 || pPatchPage->pHighestAddrGC < GCPtr)
4809 {
4810 break;
4811 }
4812
4813 for (i=0;i<pPatchPage->cCount;i++)
4814 {
4815 if (pPatchPage->aPatch[i])
4816 {
4817 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4818 RTRCPTR pPatchInstrGC;
4819 //unused: bool fForceBreak = false;
4820
4821 Assert(pPatchPage->aPatch[i]->flags & PATMFL_CODE_MONITORED);
4822 /** @todo inefficient and includes redundant checks for multiple pages. */
4823 for (uint32_t j=0; j<cbWrite; j++)
4824 {
4825 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4826
4827 if ( pPatch->cbPatchJump
4828 && pGuestPtrGC >= pPatch->pPrivInstrGC
4829 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4830 {
4831 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4832 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4833 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4834 if (rc == VINF_SUCCESS)
4835 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4836 goto loop_start;
4837
4838 continue;
4839 }
4840
4841 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4842 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4843 if (!pPatchInstrGC)
4844 {
4845 RTRCPTR pClosestInstrGC;
4846 uint32_t size;
4847
4848 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4849 if (pPatchInstrGC)
4850 {
4851 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4852 Assert(pClosestInstrGC <= pGuestPtrGC);
4853 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4854 /* Check if this is not a write into a gap between two patches */
4855 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4856 pPatchInstrGC = 0;
4857 }
4858 }
4859 if (pPatchInstrGC)
4860 {
4861 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4862
4863 fValidPatchWrite = true;
4864
4865 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4866 Assert(pPatchToGuestRec);
4867 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4868 {
4869 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4870
4871 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4872 {
4873 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4874
4875 PATMR3MarkDirtyPatch(pVM, pPatch);
4876
4877 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4878 goto loop_start;
4879 }
4880 else
4881 {
4882 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4883 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4884
4885 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4886 pPatchToGuestRec->fDirty = true;
4887
4888 *pInstrHC = 0xCC;
4889
4890 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4891 }
4892 }
4893 /* else already marked dirty */
4894 }
4895 }
4896 }
4897 } /* for each patch */
4898
4899 if (fValidPatchWrite == false)
4900 {
4901 /* Write to a part of the page that either:
4902 * - doesn't contain any code (shared code/data); rather unlikely
4903 * - old code page that's no longer in active use.
4904 */
4905invalid_write_loop_start:
4906 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4907
4908 if (pPatchPage)
4909 {
4910 for (i=0;i<pPatchPage->cCount;i++)
4911 {
4912 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4913
4914 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4915 {
4916 /* Note: possibly dangerous assumption that all future writes will be harmless. */
4917 if (pPatch->flags & PATMFL_IDTHANDLER)
4918 {
4919 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4920
4921 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4922 int rc = patmRemovePatchPages(pVM, pPatch);
4923 AssertRC(rc);
4924 }
4925 else
4926 {
4927 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4928 PATMR3MarkDirtyPatch(pVM, pPatch);
4929 }
4930 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4931 goto invalid_write_loop_start;
4932 }
4933 } /* for */
4934 }
4935 }
4936 }
4937 }
4938 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
4939 return VINF_SUCCESS;
4940
4941}
4942
4943/**
4944 * Disable all patches in a flushed page
4945 *
4946 * @returns VBox status code
4947 * @param pVM The VM to operate on.
4948 * @param addr GC address of the page to flush
4949 */
4950/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
4951 */
4952VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
4953{
4954 addr &= PAGE_BASE_GC_MASK;
4955
4956 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
4957 if (pPatchPage)
4958 {
4959 int i;
4960
4961 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
4962 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
4963 {
4964 if (pPatchPage->aPatch[i])
4965 {
4966 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4967
4968 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
4969 PATMR3MarkDirtyPatch(pVM, pPatch);
4970 }
4971 }
4972 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
4973 }
4974 return VINF_SUCCESS;
4975}
4976
4977/**
4978 * Checks if the instructions at the specified address has been patched already.
4979 *
4980 * @returns boolean, patched or not
4981 * @param pVM The VM to operate on.
4982 * @param pInstrGC Guest context pointer to instruction
4983 */
4984VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
4985{
4986 PPATMPATCHREC pPatchRec;
4987 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4988 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
4989 return true;
4990 return false;
4991}
4992
4993/**
4994 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
4995 *
4996 * @returns VBox status code.
4997 * @param pVM The VM to operate on.
4998 * @param pInstrGC GC address of instr
4999 * @param pByte opcode byte pointer (OUT)
5000 *
5001 */
5002VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
5003{
5004 PPATMPATCHREC pPatchRec;
5005
5006 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5007
5008 /* Shortcut. */
5009 if ( !PATMIsEnabled(pVM)
5010 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5011 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5012 {
5013 return VERR_PATCH_NOT_FOUND;
5014 }
5015
5016 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5017 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5018 if ( pPatchRec
5019 && pPatchRec->patch.uState == PATCH_ENABLED
5020 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5021 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5022 {
5023 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5024 *pByte = pPatchRec->patch.aPrivInstr[offset];
5025
5026 if (pPatchRec->patch.cbPatchJump == 1)
5027 {
5028 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5029 }
5030 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5031 return VINF_SUCCESS;
5032 }
5033 return VERR_PATCH_NOT_FOUND;
5034}
5035
5036/**
5037 * Disable patch for privileged instruction at specified location
5038 *
5039 * @returns VBox status code.
5040 * @param pVM The VM to operate on.
5041 * @param pInstr Guest context point to privileged instruction
5042 *
5043 * @note returns failure if patching is not allowed or possible
5044 *
5045 */
5046VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5047{
5048 PPATMPATCHREC pPatchRec;
5049 PPATCHINFO pPatch;
5050
5051 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5052 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5053 if (pPatchRec)
5054 {
5055 int rc = VINF_SUCCESS;
5056
5057 pPatch = &pPatchRec->patch;
5058
5059 /* Already disabled? */
5060 if (pPatch->uState == PATCH_DISABLED)
5061 return VINF_SUCCESS;
5062
5063 /* Clear the IDT entries for the patch we're disabling. */
5064 /* Note: very important as we clear IF in the patch itself */
5065 /** @todo this needs to be changed */
5066 if (pPatch->flags & PATMFL_IDTHANDLER)
5067 {
5068 uint32_t iGate;
5069
5070 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5071 if (iGate != (uint32_t)~0)
5072 {
5073 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5074 if (++cIDTHandlersDisabled < 256)
5075 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5076 }
5077 }
5078
5079 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5080 if ( pPatch->pPatchBlockOffset
5081 && pPatch->uState == PATCH_ENABLED)
5082 {
5083 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5084 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5085 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5086 }
5087
5088 /* IDT or function patches haven't changed any guest code. */
5089 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5090 {
5091 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5092 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5093
5094 if (pPatch->uState != PATCH_REFUSED)
5095 {
5096 uint8_t temp[16];
5097
5098 Assert(pPatch->cbPatchJump < sizeof(temp));
5099
5100 /* Let's first check if the guest code is still the same. */
5101 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5102 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5103 if (rc == VINF_SUCCESS)
5104 {
5105 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5106
5107 if ( temp[0] != 0xE9 /* jmp opcode */
5108 || *(RTRCINTPTR *)(&temp[1]) != displ
5109 )
5110 {
5111 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5112 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5113 /* Remove it completely */
5114 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5115 rc = PATMR3RemovePatch(pVM, pInstrGC);
5116 AssertRC(rc);
5117 return VWRN_PATCH_REMOVED;
5118 }
5119 patmRemoveJumpToPatch(pVM, pPatch);
5120 }
5121 else
5122 {
5123 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5124 pPatch->uState = PATCH_DISABLE_PENDING;
5125 }
5126 }
5127 else
5128 {
5129 AssertMsgFailed(("Patch was refused!\n"));
5130 return VERR_PATCH_ALREADY_DISABLED;
5131 }
5132 }
5133 else
5134 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5135 {
5136 uint8_t temp[16];
5137
5138 Assert(pPatch->cbPatchJump < sizeof(temp));
5139
5140 /* Let's first check if the guest code is still the same. */
5141 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5142 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5143 if (rc == VINF_SUCCESS)
5144 {
5145 if (temp[0] != 0xCC)
5146 {
5147 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5148 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5149 /* Remove it completely */
5150 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5151 rc = PATMR3RemovePatch(pVM, pInstrGC);
5152 AssertRC(rc);
5153 return VWRN_PATCH_REMOVED;
5154 }
5155 patmDeactivateInt3Patch(pVM, pPatch);
5156 }
5157 }
5158
5159 if (rc == VINF_SUCCESS)
5160 {
5161 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5162 if (pPatch->uState == PATCH_DISABLE_PENDING)
5163 {
5164 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5165 pPatch->uState = PATCH_UNUSABLE;
5166 }
5167 else
5168 if (pPatch->uState != PATCH_DIRTY)
5169 {
5170 pPatch->uOldState = pPatch->uState;
5171 pPatch->uState = PATCH_DISABLED;
5172 }
5173 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5174 }
5175
5176 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5177 return VINF_SUCCESS;
5178 }
5179 Log(("Patch not found!\n"));
5180 return VERR_PATCH_NOT_FOUND;
5181}
5182
5183/**
5184 * Permanently disable patch for privileged instruction at specified location
5185 *
5186 * @returns VBox status code.
5187 * @param pVM The VM to operate on.
5188 * @param pInstr Guest context instruction pointer
5189 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5190 * @param pConflictPatch Conflicting patch
5191 *
5192 */
5193static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5194{
5195#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5196 PATCHINFO patch;
5197 DISCPUSTATE cpu;
5198 R3PTRTYPE(uint8_t *) pInstrHC;
5199 uint32_t opsize;
5200 bool disret;
5201 int rc;
5202
5203 RT_ZERO(patch);
5204 pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
5205 cpu.mode = (pConflictPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5206 disret = PATMR3DISInstr(pVM, &patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
5207 /*
5208 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5209 * with one that jumps right into the conflict patch.
5210 * Otherwise we must disable the conflicting patch to avoid serious problems.
5211 */
5212 if ( disret == true
5213 && (pConflictPatch->flags & PATMFL_CODE32)
5214 && (cpu.pCurInstr->opcode == OP_JMP || (cpu.pCurInstr->optype & OPTYPE_COND_CONTROLFLOW))
5215 && (cpu.param1.flags & USE_IMMEDIATE32_REL))
5216 {
5217 /* Hint patches must be enabled first. */
5218 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5219 {
5220 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5221 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5222 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5223 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5224 /* Enabling might fail if the patched code has changed in the meantime. */
5225 if (rc != VINF_SUCCESS)
5226 return rc;
5227 }
5228
5229 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5230 if (RT_SUCCESS(rc))
5231 {
5232 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5233 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5234 return VINF_SUCCESS;
5235 }
5236 }
5237#endif
5238
5239 if (pConflictPatch->opcode == OP_CLI)
5240 {
5241 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5242 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5243 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5244 if (rc == VWRN_PATCH_REMOVED)
5245 return VINF_SUCCESS;
5246 if (RT_SUCCESS(rc))
5247 {
5248 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5249 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5250 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5251 if (rc == VERR_PATCH_NOT_FOUND)
5252 return VINF_SUCCESS; /* removed already */
5253
5254 AssertRC(rc);
5255 if (RT_SUCCESS(rc))
5256 {
5257 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5258 return VINF_SUCCESS;
5259 }
5260 }
5261 /* else turned into unusable patch (see below) */
5262 }
5263 else
5264 {
5265 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5266 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5267 if (rc == VWRN_PATCH_REMOVED)
5268 return VINF_SUCCESS;
5269 }
5270
5271 /* No need to monitor the code anymore. */
5272 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5273 {
5274 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5275 AssertRC(rc);
5276 }
5277 pConflictPatch->uState = PATCH_UNUSABLE;
5278 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5279 return VERR_PATCH_DISABLED;
5280}
5281
5282/**
5283 * Enable patch for privileged instruction at specified location
5284 *
5285 * @returns VBox status code.
5286 * @param pVM The VM to operate on.
5287 * @param pInstr Guest context point to privileged instruction
5288 *
5289 * @note returns failure if patching is not allowed or possible
5290 *
5291 */
5292VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5293{
5294 PPATMPATCHREC pPatchRec;
5295 PPATCHINFO pPatch;
5296
5297 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5298 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5299 if (pPatchRec)
5300 {
5301 int rc = VINF_SUCCESS;
5302
5303 pPatch = &pPatchRec->patch;
5304
5305 if (pPatch->uState == PATCH_DISABLED)
5306 {
5307 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5308 {
5309 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5310 uint8_t temp[16];
5311
5312 Assert(pPatch->cbPatchJump < sizeof(temp));
5313
5314 /* Let's first check if the guest code is still the same. */
5315 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5316 AssertRC(rc2);
5317 if (rc2 == VINF_SUCCESS)
5318 {
5319 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5320 {
5321 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5322 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5323 /* Remove it completely */
5324 rc = PATMR3RemovePatch(pVM, pInstrGC);
5325 AssertRC(rc);
5326 return VERR_PATCH_NOT_FOUND;
5327 }
5328
5329 PATMP2GLOOKUPREC cacheRec;
5330 RT_ZERO(cacheRec);
5331 cacheRec.pPatch = pPatch;
5332
5333 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5334 /* Free leftover lock if any. */
5335 if (cacheRec.Lock.pvMap)
5336 {
5337 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5338 cacheRec.Lock.pvMap = NULL;
5339 }
5340 AssertRC(rc2);
5341 if (RT_FAILURE(rc2))
5342 return rc2;
5343
5344#ifdef DEBUG
5345 {
5346 DISCPUSTATE cpu;
5347 char szOutput[256];
5348 uint32_t opsize, i = 0;
5349 bool disret;
5350 i = 0;
5351 while(i < pPatch->cbPatchJump)
5352 {
5353 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5354 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
5355 Log(("Renewed patch instr: %s", szOutput));
5356 i += opsize;
5357 }
5358 }
5359#endif
5360 }
5361 }
5362 else
5363 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5364 {
5365 uint8_t temp[16];
5366
5367 Assert(pPatch->cbPatchJump < sizeof(temp));
5368
5369 /* Let's first check if the guest code is still the same. */
5370 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5371 AssertRC(rc2);
5372
5373 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5374 {
5375 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5376 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5377 rc = PATMR3RemovePatch(pVM, pInstrGC);
5378 AssertRC(rc);
5379 return VERR_PATCH_NOT_FOUND;
5380 }
5381
5382 rc2 = patmActivateInt3Patch(pVM, pPatch);
5383 if (RT_FAILURE(rc2))
5384 return rc2;
5385 }
5386
5387 pPatch->uState = pPatch->uOldState; //restore state
5388
5389 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5390 if (pPatch->pPatchBlockOffset)
5391 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5392
5393 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5394 }
5395 else
5396 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5397
5398 return rc;
5399 }
5400 return VERR_PATCH_NOT_FOUND;
5401}
5402
5403/**
5404 * Remove patch for privileged instruction at specified location
5405 *
5406 * @returns VBox status code.
5407 * @param pVM The VM to operate on.
5408 * @param pPatchRec Patch record
5409 * @param fForceRemove Remove *all* patches
5410 */
5411int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5412{
5413 PPATCHINFO pPatch;
5414
5415 pPatch = &pPatchRec->patch;
5416
5417 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5418 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5419 {
5420 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5421 return VERR_ACCESS_DENIED;
5422 }
5423 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5424
5425 /* Note: NEVER EVER REUSE PATCH MEMORY */
5426 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5427
5428 if (pPatchRec->patch.pPatchBlockOffset)
5429 {
5430 PAVLOU32NODECORE pNode;
5431
5432 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5433 Assert(pNode);
5434 }
5435
5436 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5437 {
5438 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5439 AssertRC(rc);
5440 }
5441
5442#ifdef VBOX_WITH_STATISTICS
5443 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5444 {
5445 STAMR3Deregister(pVM, &pPatchRec->patch);
5446#ifndef DEBUG_sandervl
5447 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5448 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5449 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5450 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5451 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5452 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5453 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5454 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5455 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5456 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5457 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5458 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5459 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5460 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5461#endif
5462 }
5463#endif
5464
5465 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5466 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5467 pPatch->nrPatch2GuestRecs = 0;
5468 Assert(pPatch->Patch2GuestAddrTree == 0);
5469
5470 patmEmptyTree(pVM, &pPatch->FixupTree);
5471 pPatch->nrFixups = 0;
5472 Assert(pPatch->FixupTree == 0);
5473
5474 if (pPatchRec->patch.pTempInfo)
5475 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5476
5477 /* Note: might fail, because it has already been removed (e.g. during reset). */
5478 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5479
5480 /* Free the patch record */
5481 MMHyperFree(pVM, pPatchRec);
5482 return VINF_SUCCESS;
5483}
5484
5485/**
5486 * RTAvlU32DoWithAll() worker.
5487 * Checks whether the current trampoline instruction is the jump to the target patch
5488 * and updates the displacement to jump to the new target.
5489 *
5490 * @returns VBox status code.
5491 * @retval VERR_ALREADY_EXISTS if the jump was found.
5492 * @param pNode The current patch to guest record to check.
5493 * @param pvUser The refresh state.
5494 */
5495static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5496{
5497 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5498 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5499 PVM pVM = pRefreshPatchState->pVM;
5500
5501 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5502
5503 /*
5504 * Check if the patch instruction starts with a jump.
5505 * ASSUMES that there is no other patch to guest record that starts
5506 * with a jump.
5507 */
5508 if (*pPatchInstr == 0xE9)
5509 {
5510 /* Jump found, update the displacement. */
5511 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5512 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5513 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5514
5515 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5516 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5517
5518 *(uint32_t *)&pPatchInstr[1] = displ;
5519 return VERR_ALREADY_EXISTS; /** @todo better return code */
5520 }
5521
5522 return VINF_SUCCESS;
5523}
5524
5525/**
5526 * Attempt to refresh the patch by recompiling its entire code block
5527 *
5528 * @returns VBox status code.
5529 * @param pVM The VM to operate on.
5530 * @param pPatchRec Patch record
5531 */
5532int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5533{
5534 PPATCHINFO pPatch;
5535 int rc;
5536 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5537 PTRAMPREC pTrampolinePatchesHead = NULL;
5538
5539 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5540
5541 pPatch = &pPatchRec->patch;
5542 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5543 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5544 {
5545 if (!pPatch->pTrampolinePatchesHead)
5546 {
5547 /*
5548 * It is sometimes possible that there are trampoline patches to this patch
5549 * but they are not recorded (after a saved state load for example).
5550 * Refuse to refresh those patches.
5551 * Can hurt performance in theory if the patched code is modified by the guest
5552 * and is executed often. However most of the time states are saved after the guest
5553 * code was modified and is not updated anymore afterwards so this shouldn't be a
5554 * big problem.
5555 */
5556 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5557 return VERR_PATCHING_REFUSED;
5558 }
5559 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5560 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5561 }
5562
5563 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5564
5565 rc = PATMR3DisablePatch(pVM, pInstrGC);
5566 AssertRC(rc);
5567
5568 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5569 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5570#ifdef VBOX_WITH_STATISTICS
5571 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5572 {
5573 STAMR3Deregister(pVM, &pPatchRec->patch);
5574#ifndef DEBUG_sandervl
5575 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5576 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5577 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5578 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5579 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5580 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5581 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5582 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5583 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5584 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5585 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5586 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5587 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5588 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5589#endif
5590 }
5591#endif
5592
5593 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5594
5595 /* Attempt to install a new patch. */
5596 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5597 if (RT_SUCCESS(rc))
5598 {
5599 RTRCPTR pPatchTargetGC;
5600 PPATMPATCHREC pNewPatchRec;
5601
5602 /* Determine target address in new patch */
5603 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5604 Assert(pPatchTargetGC);
5605 if (!pPatchTargetGC)
5606 {
5607 rc = VERR_PATCHING_REFUSED;
5608 goto failure;
5609 }
5610
5611 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5612 pPatch->uCurPatchOffset = 0;
5613
5614 /* insert jump to new patch in old patch block */
5615 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5616 if (RT_FAILURE(rc))
5617 goto failure;
5618
5619 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5620 Assert(pNewPatchRec); /* can't fail */
5621
5622 /* Remove old patch (only do that when everything is finished) */
5623 int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
5624 AssertRC(rc2);
5625
5626 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5627 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5628 Assert(fInserted); NOREF(fInserted);
5629
5630 LogRel(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5631 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5632
5633 /* Used by another patch, so don't remove it! */
5634 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5635
5636 if (pTrampolinePatchesHead)
5637 {
5638 /* Update all trampoline patches to jump to the new patch. */
5639 PTRAMPREC pTrampRec = NULL;
5640 PATMREFRESHPATCH RefreshPatch;
5641
5642 RefreshPatch.pVM = pVM;
5643 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5644
5645 pTrampRec = pTrampolinePatchesHead;
5646
5647 while (pTrampRec)
5648 {
5649 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5650
5651 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5652 /*
5653 * We have to find the right patch2guest record because there might be others
5654 * for statistics.
5655 */
5656 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5657 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5658 Assert(rc == VERR_ALREADY_EXISTS);
5659 rc = VINF_SUCCESS;
5660 pTrampRec = pTrampRec->pNext;
5661 }
5662 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5663 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5664 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5665 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5666 }
5667 }
5668
5669failure:
5670 if (RT_FAILURE(rc))
5671 {
5672 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5673
5674 /* Remove the new inactive patch */
5675 rc = PATMR3RemovePatch(pVM, pInstrGC);
5676 AssertRC(rc);
5677
5678 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5679 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5680 Assert(fInserted); NOREF(fInserted);
5681
5682 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5683 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5684 AssertRC(rc2);
5685
5686 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5687 }
5688 return rc;
5689}
5690
5691/**
5692 * Find patch for privileged instruction at specified location
5693 *
5694 * @returns Patch structure pointer if found; else NULL
5695 * @param pVM The VM to operate on.
5696 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5697 * @param fIncludeHints Include hinted patches or not
5698 *
5699 */
5700PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5701{
5702 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5703 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5704 if (pPatchRec)
5705 {
5706 if ( pPatchRec->patch.uState == PATCH_ENABLED
5707 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5708 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5709 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5710 {
5711 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5712 return &pPatchRec->patch;
5713 }
5714 else
5715 if ( fIncludeHints
5716 && pPatchRec->patch.uState == PATCH_DISABLED
5717 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5718 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5719 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5720 {
5721 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5722 return &pPatchRec->patch;
5723 }
5724 }
5725 return NULL;
5726}
5727
5728/**
5729 * Checks whether the GC address is inside a generated patch jump
5730 *
5731 * @returns true -> yes, false -> no
5732 * @param pVM The VM to operate on.
5733 * @param pAddr Guest context address
5734 * @param pPatchAddr Guest context patch address (if true)
5735 */
5736VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5737{
5738 RTRCPTR addr;
5739 PPATCHINFO pPatch;
5740
5741 if (PATMIsEnabled(pVM) == false)
5742 return false;
5743
5744 if (pPatchAddr == NULL)
5745 pPatchAddr = &addr;
5746
5747 *pPatchAddr = 0;
5748
5749 pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
5750 if (pPatch)
5751 *pPatchAddr = pPatch->pPrivInstrGC;
5752
5753 return *pPatchAddr == 0 ? false : true;
5754}
5755
5756/**
5757 * Remove patch for privileged instruction at specified location
5758 *
5759 * @returns VBox status code.
5760 * @param pVM The VM to operate on.
5761 * @param pInstr Guest context point to privileged instruction
5762 *
5763 * @note returns failure if patching is not allowed or possible
5764 *
5765 */
5766VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5767{
5768 PPATMPATCHREC pPatchRec;
5769
5770 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5771 if (pPatchRec)
5772 {
5773 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5774 if (rc == VWRN_PATCH_REMOVED)
5775 return VINF_SUCCESS;
5776
5777 return PATMRemovePatch(pVM, pPatchRec, false);
5778 }
5779 AssertFailed();
5780 return VERR_PATCH_NOT_FOUND;
5781}
5782
5783/**
5784 * Mark patch as dirty
5785 *
5786 * @returns VBox status code.
5787 * @param pVM The VM to operate on.
5788 * @param pPatch Patch record
5789 *
5790 * @note returns failure if patching is not allowed or possible
5791 *
5792 */
5793VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5794{
5795 if (pPatch->pPatchBlockOffset)
5796 {
5797 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5798 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5799 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5800 }
5801
5802 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5803 /* Put back the replaced instruction. */
5804 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5805 if (rc == VWRN_PATCH_REMOVED)
5806 return VINF_SUCCESS;
5807
5808 /* Note: we don't restore patch pages for patches that are not enabled! */
5809 /* Note: be careful when changing this behaviour!! */
5810
5811 /* The patch pages are no longer marked for self-modifying code detection */
5812 if (pPatch->flags & PATMFL_CODE_MONITORED)
5813 {
5814 rc = patmRemovePatchPages(pVM, pPatch);
5815 AssertRCReturn(rc, rc);
5816 }
5817 pPatch->uState = PATCH_DIRTY;
5818
5819 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5820 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5821
5822 return VINF_SUCCESS;
5823}
5824
5825/**
5826 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5827 *
5828 * @returns VBox status code.
5829 * @param pVM The VM to operate on.
5830 * @param pPatch Patch block structure pointer
5831 * @param pPatchGC GC address in patch block
5832 */
5833RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5834{
5835 Assert(pPatch->Patch2GuestAddrTree);
5836 /* Get the closest record from below. */
5837 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5838 if (pPatchToGuestRec)
5839 return pPatchToGuestRec->pOrgInstrGC;
5840
5841 return 0;
5842}
5843
5844/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5845 *
5846 * @returns corresponding GC pointer in patch block
5847 * @param pVM The VM to operate on.
5848 * @param pPatch Current patch block pointer
5849 * @param pInstrGC Guest context pointer to privileged instruction
5850 *
5851 */
5852RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5853{
5854 if (pPatch->Guest2PatchAddrTree)
5855 {
5856 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5857 if (pGuestToPatchRec)
5858 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5859 }
5860
5861 return 0;
5862}
5863
5864/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
5865 *
5866 * @returns corresponding GC pointer in patch block
5867 * @param pVM The VM to operate on.
5868 * @param pPatch Current patch block pointer
5869 * @param pInstrGC Guest context pointer to privileged instruction
5870 *
5871 */
5872RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5873{
5874 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
5875 if (pGuestToPatchRec)
5876 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5877
5878 return 0;
5879}
5880
5881/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5882 *
5883 * @returns corresponding GC pointer in patch block
5884 * @param pVM The VM to operate on.
5885 * @param pInstrGC Guest context pointer to privileged instruction
5886 *
5887 */
5888VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5889{
5890 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5891 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5892 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
5893 else
5894 return 0;
5895}
5896
5897/**
5898 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5899 *
5900 * @returns original GC instruction pointer or 0 if not found
5901 * @param pVM The VM to operate on.
5902 * @param pPatchGC GC address in patch block
5903 * @param pEnmState State of the translated address (out)
5904 *
5905 */
5906VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
5907{
5908 PPATMPATCHREC pPatchRec;
5909 void *pvPatchCoreOffset;
5910 RTRCPTR pPrivInstrGC;
5911
5912 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
5913 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5914 if (pvPatchCoreOffset == 0)
5915 {
5916 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
5917 return 0;
5918 }
5919 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
5920 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
5921 if (pEnmState)
5922 {
5923 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
5924 || pPatchRec->patch.uState == PATCH_DIRTY
5925 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
5926 || pPatchRec->patch.uState == PATCH_UNUSABLE),
5927 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
5928
5929 if ( !pPrivInstrGC
5930 || pPatchRec->patch.uState == PATCH_UNUSABLE
5931 || pPatchRec->patch.uState == PATCH_REFUSED)
5932 {
5933 pPrivInstrGC = 0;
5934 *pEnmState = PATMTRANS_FAILED;
5935 }
5936 else
5937 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
5938 {
5939 *pEnmState = PATMTRANS_INHIBITIRQ;
5940 }
5941 else
5942 if ( pPatchRec->patch.uState == PATCH_ENABLED
5943 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
5944 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
5945 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5946 {
5947 *pEnmState = PATMTRANS_OVERWRITTEN;
5948 }
5949 else
5950 if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
5951 {
5952 *pEnmState = PATMTRANS_OVERWRITTEN;
5953 }
5954 else
5955 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
5956 {
5957 *pEnmState = PATMTRANS_PATCHSTART;
5958 }
5959 else
5960 *pEnmState = PATMTRANS_SAFE;
5961 }
5962 return pPrivInstrGC;
5963}
5964
5965/**
5966 * Returns the GC pointer of the patch for the specified GC address
5967 *
5968 * @returns VBox status code.
5969 * @param pVM The VM to operate on.
5970 * @param pAddrGC Guest context address
5971 */
5972VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
5973{
5974 PPATMPATCHREC pPatchRec;
5975
5976 /* Find the patch record. */
5977 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
5978 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
5979 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
5980 return PATCHCODE_PTR_GC(&pPatchRec->patch);
5981 else
5982 return 0;
5983}
5984
5985/**
5986 * Attempt to recover dirty instructions
5987 *
5988 * @returns VBox status code.
5989 * @param pVM The VM to operate on.
5990 * @param pCtx CPU context
5991 * @param pPatch Patch record
5992 * @param pPatchToGuestRec Patch to guest address record
5993 * @param pEip GC pointer of trapping instruction
5994 */
5995static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
5996{
5997 DISCPUSTATE CpuOld, CpuNew;
5998 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
5999 int rc;
6000 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
6001 uint32_t cbDirty;
6002 PRECPATCHTOGUEST pRec;
6003 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6004 PVMCPU pVCpu = VMMGetCpu0(pVM);
6005 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6006
6007 pRec = pPatchToGuestRec;
6008 pCurInstrGC = pOrgInstrGC;
6009 pCurPatchInstrGC = pEip;
6010 cbDirty = 0;
6011 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6012
6013 /* Find all adjacent dirty instructions */
6014 while (true)
6015 {
6016 if (pRec->fJumpTarget)
6017 {
6018 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6019 pRec->fDirty = false;
6020 return VERR_PATCHING_REFUSED;
6021 }
6022
6023 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6024 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6025 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6026
6027 /* Only harmless instructions are acceptable. */
6028 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6029 if ( RT_FAILURE(rc)
6030 || !(CpuOld.pCurInstr->optype & OPTYPE_HARMLESS))
6031 {
6032 if (RT_SUCCESS(rc))
6033 cbDirty += CpuOld.opsize;
6034 else
6035 if (!cbDirty)
6036 cbDirty = 1;
6037 break;
6038 }
6039
6040#ifdef DEBUG
6041 char szBuf[256];
6042 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6043 szBuf, sizeof(szBuf), NULL);
6044 Log(("DIRTY: %s\n", szBuf));
6045#endif
6046 /* Mark as clean; if we fail we'll let it always fault. */
6047 pRec->fDirty = false;
6048
6049 /* Remove old lookup record. */
6050 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6051 pPatchToGuestRec = NULL;
6052
6053 pCurPatchInstrGC += CpuOld.opsize;
6054 cbDirty += CpuOld.opsize;
6055
6056 /* Let's see if there's another dirty instruction right after. */
6057 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6058 if (!pRec || !pRec->fDirty)
6059 break; /* no more dirty instructions */
6060
6061 /* In case of complex instructions the next guest instruction could be quite far off. */
6062 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6063 }
6064
6065 if ( RT_SUCCESS(rc)
6066 && (CpuOld.pCurInstr->optype & OPTYPE_HARMLESS)
6067 )
6068 {
6069 uint32_t cbLeft;
6070
6071 pCurPatchInstrHC = pPatchInstrHC;
6072 pCurPatchInstrGC = pEip;
6073 cbLeft = cbDirty;
6074
6075 while (cbLeft && RT_SUCCESS(rc))
6076 {
6077 bool fValidInstr;
6078
6079 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6080
6081 fValidInstr = !!(CpuNew.pCurInstr->optype & OPTYPE_HARMLESS);
6082 if ( !fValidInstr
6083 && (CpuNew.pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
6084 )
6085 {
6086 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6087
6088 if ( pTargetGC >= pOrgInstrGC
6089 && pTargetGC <= pOrgInstrGC + cbDirty
6090 )
6091 {
6092 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6093 fValidInstr = true;
6094 }
6095 }
6096
6097 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6098 if ( rc == VINF_SUCCESS
6099 && CpuNew.opsize <= cbLeft /* must still fit */
6100 && fValidInstr
6101 )
6102 {
6103#ifdef DEBUG
6104 char szBuf[256];
6105 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6106 szBuf, sizeof(szBuf), NULL);
6107 Log(("NEW: %s\n", szBuf));
6108#endif
6109
6110 /* Copy the new instruction. */
6111 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.opsize);
6112 AssertRC(rc);
6113
6114 /* Add a new lookup record for the duplicated instruction. */
6115 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6116 }
6117 else
6118 {
6119#ifdef DEBUG
6120 char szBuf[256];
6121 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6122 szBuf, sizeof(szBuf), NULL);
6123 Log(("NEW: %s (FAILED)\n", szBuf));
6124#endif
6125 /* Restore the old lookup record for the duplicated instruction. */
6126 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6127
6128 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6129 rc = VERR_PATCHING_REFUSED;
6130 break;
6131 }
6132 pCurInstrGC += CpuNew.opsize;
6133 pCurPatchInstrHC += CpuNew.opsize;
6134 pCurPatchInstrGC += CpuNew.opsize;
6135 cbLeft -= CpuNew.opsize;
6136
6137 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6138 if (!cbLeft)
6139 {
6140 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6141 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6142 {
6143 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6144 if (pRec)
6145 {
6146 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6147 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6148
6149 Assert(!pRec->fDirty);
6150
6151 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6152 if (cbFiller >= SIZEOF_NEARJUMP32)
6153 {
6154 pPatchFillHC[0] = 0xE9;
6155 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6156#ifdef DEBUG
6157 char szBuf[256];
6158 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6159 szBuf, sizeof(szBuf), NULL);
6160 Log(("FILL: %s\n", szBuf));
6161#endif
6162 }
6163 else
6164 {
6165 for (unsigned i = 0; i < cbFiller; i++)
6166 {
6167 pPatchFillHC[i] = 0x90; /* NOP */
6168#ifdef DEBUG
6169 char szBuf[256];
6170 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC + i,
6171 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6172 Log(("FILL: %s\n", szBuf));
6173#endif
6174 }
6175 }
6176 }
6177 }
6178 }
6179 }
6180 }
6181 else
6182 rc = VERR_PATCHING_REFUSED;
6183
6184 if (RT_SUCCESS(rc))
6185 {
6186 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6187 }
6188 else
6189 {
6190 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6191 Assert(cbDirty);
6192
6193 /* Mark the whole instruction stream with breakpoints. */
6194 if (cbDirty)
6195 memset(pPatchInstrHC, 0xCC, cbDirty);
6196
6197 if ( pVM->patm.s.fOutOfMemory == false
6198 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6199 {
6200 rc = patmR3RefreshPatch(pVM, pPatch);
6201 if (RT_FAILURE(rc))
6202 {
6203 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6204 }
6205 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6206 rc = VERR_PATCHING_REFUSED;
6207 }
6208 }
6209 return rc;
6210}
6211
6212/**
6213 * Handle trap inside patch code
6214 *
6215 * @returns VBox status code.
6216 * @param pVM The VM to operate on.
6217 * @param pCtx CPU context
6218 * @param pEip GC pointer of trapping instruction
6219 * @param ppNewEip GC pointer to new instruction
6220 */
6221VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6222{
6223 PPATMPATCHREC pPatch = 0;
6224 void *pvPatchCoreOffset;
6225 RTRCUINTPTR offset;
6226 RTRCPTR pNewEip;
6227 int rc ;
6228 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6229 PVMCPU pVCpu = VMMGetCpu0(pVM);
6230
6231 Assert(pVM->cCpus == 1);
6232
6233 pNewEip = 0;
6234 *ppNewEip = 0;
6235
6236 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6237
6238 /* Find the patch record. */
6239 /* Note: there might not be a patch to guest translation record (global function) */
6240 offset = pEip - pVM->patm.s.pPatchMemGC;
6241 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6242 if (pvPatchCoreOffset)
6243 {
6244 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6245
6246 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6247
6248 if (pPatch->patch.uState == PATCH_DIRTY)
6249 {
6250 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6251 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6252 {
6253 /* Function duplication patches set fPIF to 1 on entry */
6254 pVM->patm.s.pGCStateHC->fPIF = 1;
6255 }
6256 }
6257 else
6258 if (pPatch->patch.uState == PATCH_DISABLED)
6259 {
6260 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6261 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6262 {
6263 /* Function duplication patches set fPIF to 1 on entry */
6264 pVM->patm.s.pGCStateHC->fPIF = 1;
6265 }
6266 }
6267 else
6268 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6269 {
6270 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6271
6272 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6273 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6274 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6275 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6276 }
6277
6278 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6279 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6280
6281 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6282 pPatch->patch.cTraps++;
6283 PATM_STAT_FAULT_INC(&pPatch->patch);
6284 }
6285 else
6286 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6287
6288 /* Check if we were interrupted in PATM generated instruction code. */
6289 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6290 {
6291 DISCPUSTATE Cpu;
6292 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6293 AssertRC(rc);
6294
6295 if ( rc == VINF_SUCCESS
6296 && ( Cpu.pCurInstr->opcode == OP_PUSHF
6297 || Cpu.pCurInstr->opcode == OP_PUSH
6298 || Cpu.pCurInstr->opcode == OP_CALL)
6299 )
6300 {
6301 uint64_t fFlags;
6302
6303 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6304
6305 if (Cpu.pCurInstr->opcode == OP_PUSH)
6306 {
6307 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6308 if ( rc == VINF_SUCCESS
6309 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6310 {
6311 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6312
6313 /* Reset the PATM stack. */
6314 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6315
6316 pVM->patm.s.pGCStateHC->fPIF = 1;
6317
6318 Log(("Faulting push -> go back to the original instruction\n"));
6319
6320 /* continue at the original instruction */
6321 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6322 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6323 return VINF_SUCCESS;
6324 }
6325 }
6326
6327 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6328 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6329 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6330 if (rc == VINF_SUCCESS)
6331 {
6332 /* The guest page *must* be present. */
6333 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6334 if ( rc == VINF_SUCCESS
6335 && (fFlags & X86_PTE_P))
6336 {
6337 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6338 return VINF_PATCH_CONTINUE;
6339 }
6340 }
6341 }
6342 else
6343 if (pPatch->patch.pPrivInstrGC == pNewEip)
6344 {
6345 /* Invalidated patch or first instruction overwritten.
6346 * We can ignore the fPIF state in this case.
6347 */
6348 /* Reset the PATM stack. */
6349 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6350
6351 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6352
6353 pVM->patm.s.pGCStateHC->fPIF = 1;
6354
6355 /* continue at the original instruction */
6356 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6357 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6358 return VINF_SUCCESS;
6359 }
6360
6361 char szBuf[256];
6362 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6363
6364 /* Very bad. We crashed in emitted code. Probably stack? */
6365 if (pPatch)
6366 {
6367 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6368 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6369 }
6370 else
6371 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6372 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6373 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6374 }
6375
6376 /* From here on, we must have a valid patch to guest translation. */
6377 if (pvPatchCoreOffset == 0)
6378 {
6379 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6380 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6381 return VERR_PATCH_NOT_FOUND;
6382 }
6383
6384 /* Take care of dirty/changed instructions. */
6385 if (pPatchToGuestRec->fDirty)
6386 {
6387 Assert(pPatchToGuestRec->Core.Key == offset);
6388 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6389
6390 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6391 if (RT_SUCCESS(rc))
6392 {
6393 /* Retry the current instruction. */
6394 pNewEip = pEip;
6395 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6396 }
6397 else
6398 {
6399 /* Reset the PATM stack. */
6400 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6401
6402 rc = VINF_SUCCESS; /* Continue at original instruction. */
6403 }
6404
6405 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6406 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6407 return rc;
6408 }
6409
6410#ifdef VBOX_STRICT
6411 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6412 {
6413 DISCPUSTATE cpu;
6414 bool disret;
6415 uint32_t opsize;
6416 PATMP2GLOOKUPREC cacheRec;
6417 RT_ZERO(cacheRec);
6418 cacheRec.pPatch = &pPatch->patch;
6419
6420 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6421 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6422 if (cacheRec.Lock.pvMap)
6423 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6424
6425 if (disret && cpu.pCurInstr->opcode == OP_RETN)
6426 {
6427 RTRCPTR retaddr;
6428 PCPUMCTX pCtx2;
6429
6430 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6431
6432 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6433 AssertRC(rc);
6434
6435 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6436 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6437 }
6438 }
6439#endif
6440
6441 /* Return original address, correct by subtracting the CS base address. */
6442 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6443
6444 /* Reset the PATM stack. */
6445 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6446
6447 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6448 {
6449 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6450 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6451#ifdef VBOX_STRICT
6452 DISCPUSTATE cpu;
6453 bool disret;
6454 uint32_t opsize;
6455 PATMP2GLOOKUPREC cacheRec;
6456 RT_ZERO(cacheRec);
6457 cacheRec.pPatch = &pPatch->patch;
6458
6459 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6460 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_ORGCODE);
6461 if (cacheRec.Lock.pvMap)
6462 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6463
6464 if (disret && (cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_INT3))
6465 {
6466 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6467 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6468 if (cacheRec.Lock.pvMap)
6469 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6470
6471 Assert(cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_IRET);
6472 }
6473#endif
6474 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6475 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6476 }
6477
6478 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6479#ifdef LOG_ENABLED
6480 CPUMR3DisasmInstr(pVM, pVCpu, pCtx, pNewEip, "PATCHRET: ");
6481#endif
6482 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6483 {
6484 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6485 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6486 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6487 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6488 return VERR_PATCH_DISABLED;
6489 }
6490
6491#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6492 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6493 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6494 {
6495 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6496 //we are only wasting time, back out the patch
6497 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6498 pTrapRec->pNextPatchInstr = 0;
6499 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6500 return VERR_PATCH_DISABLED;
6501 }
6502#endif
6503
6504 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6505 return VINF_SUCCESS;
6506}
6507
6508
6509/**
6510 * Handle page-fault in monitored page
6511 *
6512 * @returns VBox status code.
6513 * @param pVM The VM to operate on.
6514 */
6515VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6516{
6517 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6518
6519 addr &= PAGE_BASE_GC_MASK;
6520
6521 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6522 AssertRC(rc); NOREF(rc);
6523
6524 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6525 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6526 {
6527 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6528 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6529 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6530 if (rc == VWRN_PATCH_REMOVED)
6531 return VINF_SUCCESS;
6532
6533 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6534
6535 if (addr == pPatchRec->patch.pPrivInstrGC)
6536 addr++;
6537 }
6538
6539 for(;;)
6540 {
6541 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6542
6543 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6544 break;
6545
6546 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6547 {
6548 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6549 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6550 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6551 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6552 }
6553 addr = pPatchRec->patch.pPrivInstrGC + 1;
6554 }
6555
6556 pVM->patm.s.pvFaultMonitor = 0;
6557 return VINF_SUCCESS;
6558}
6559
6560
6561#ifdef VBOX_WITH_STATISTICS
6562
6563static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6564{
6565 if (pPatch->flags & PATMFL_SYSENTER)
6566 {
6567 return "SYSENT";
6568 }
6569 else
6570 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6571 {
6572 static char szTrap[16];
6573 uint32_t iGate;
6574
6575 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6576 if (iGate < 256)
6577 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6578 else
6579 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6580 return szTrap;
6581 }
6582 else
6583 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6584 return "DUPFUNC";
6585 else
6586 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6587 return "FUNCCALL";
6588 else
6589 if (pPatch->flags & PATMFL_TRAMPOLINE)
6590 return "TRAMP";
6591 else
6592 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6593}
6594
6595static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6596{
6597 switch(pPatch->uState)
6598 {
6599 case PATCH_ENABLED:
6600 return "ENA";
6601 case PATCH_DISABLED:
6602 return "DIS";
6603 case PATCH_DIRTY:
6604 return "DIR";
6605 case PATCH_UNUSABLE:
6606 return "UNU";
6607 case PATCH_REFUSED:
6608 return "REF";
6609 case PATCH_DISABLE_PENDING:
6610 return "DIP";
6611 default:
6612 AssertFailed();
6613 return " ";
6614 }
6615}
6616
6617/**
6618 * Resets the sample.
6619 * @param pVM The VM handle.
6620 * @param pvSample The sample registered using STAMR3RegisterCallback.
6621 */
6622static void patmResetStat(PVM pVM, void *pvSample)
6623{
6624 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6625 Assert(pPatch);
6626
6627 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6628 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6629}
6630
6631/**
6632 * Prints the sample into the buffer.
6633 *
6634 * @param pVM The VM handle.
6635 * @param pvSample The sample registered using STAMR3RegisterCallback.
6636 * @param pszBuf The buffer to print into.
6637 * @param cchBuf The size of the buffer.
6638 */
6639static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6640{
6641 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6642 Assert(pPatch);
6643
6644 Assert(pPatch->uState != PATCH_REFUSED);
6645 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6646
6647 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6648 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6649 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6650}
6651
6652/**
6653 * Returns the GC address of the corresponding patch statistics counter
6654 *
6655 * @returns Stat address
6656 * @param pVM The VM to operate on.
6657 * @param pPatch Patch structure
6658 */
6659RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6660{
6661 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6662 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6663}
6664
6665#endif /* VBOX_WITH_STATISTICS */
6666
6667#ifdef VBOX_WITH_DEBUGGER
6668/**
6669 * The '.patmoff' command.
6670 *
6671 * @returns VBox status.
6672 * @param pCmd Pointer to the command descriptor (as registered).
6673 * @param pCmdHlp Pointer to command helper functions.
6674 * @param pVM Pointer to the current VM (if any).
6675 * @param paArgs Pointer to (readonly) array of arguments.
6676 * @param cArgs Number of arguments in the array.
6677 */
6678static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6679{
6680 /*
6681 * Validate input.
6682 */
6683 if (!pVM)
6684 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6685
6686 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6687 PATMR3AllowPatching(pVM, false);
6688 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6689}
6690
6691/**
6692 * The '.patmon' command.
6693 *
6694 * @returns VBox status.
6695 * @param pCmd Pointer to the command descriptor (as registered).
6696 * @param pCmdHlp Pointer to command helper functions.
6697 * @param pVM Pointer to the current VM (if any).
6698 * @param paArgs Pointer to (readonly) array of arguments.
6699 * @param cArgs Number of arguments in the array.
6700 */
6701static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6702{
6703 /*
6704 * Validate input.
6705 */
6706 if (!pVM)
6707 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6708
6709 PATMR3AllowPatching(pVM, true);
6710 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6711 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6712}
6713#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette