VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 50575

Last change on this file since 50575 was 50575, checked in by vboxsync, 11 years ago

VMM: Added SSMR3RegisterStub and used it to provide saved state load-exec stubs for ignoring CSAM and PATM state when VBOX_WITH_RAW_MODE isn't defined or when HM is active.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 260.5 KB
Line 
1/* $Id: PATM.cpp 50575 2014-02-25 13:07:16Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * @note Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2013 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/cpumdis.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/hm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/trpm.h>
35#include <VBox/vmm/cfgm.h>
36#include <VBox/param.h>
37#include <VBox/vmm/selm.h>
38#include <VBox/vmm/csam.h>
39#include <iprt/avl.h>
40#include "PATMInternal.h"
41#include "PATMPatch.h"
42#include <VBox/vmm/vm.h>
43#include <VBox/vmm/uvm.h>
44#include <VBox/dbg.h>
45#include <VBox/err.h>
46#include <VBox/log.h>
47#include <iprt/assert.h>
48#include <iprt/asm.h>
49#include <VBox/dis.h>
50#include <VBox/disopcode.h>
51#include "internal/pgm.h"
52
53#include <iprt/string.h>
54#include "PATMA.h"
55
56//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
57//#define PATM_DISABLE_ALL
58
59/**
60 * Refresh trampoline patch state.
61 */
62typedef struct PATMREFRESHPATCH
63{
64 /** Pointer to the VM structure. */
65 PVM pVM;
66 /** The trampoline patch record. */
67 PPATCHINFO pPatchTrampoline;
68 /** The new patch we want to jump to. */
69 PPATCHINFO pPatchRec;
70} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
71
72
73#define PATMREAD_RAWCODE 1 /* read code as-is */
74#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
75#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
76
77/*
78 * Private structure used during disassembly
79 */
80typedef struct
81{
82 PVM pVM;
83 PPATCHINFO pPatchInfo;
84 R3PTRTYPE(uint8_t *) pbInstrHC;
85 RTRCPTR pInstrGC;
86 uint32_t fReadFlags;
87} PATMDISASM, *PPATMDISASM;
88
89
90/*******************************************************************************
91* Internal Functions *
92*******************************************************************************/
93
94static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
95static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
96static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
97
98#ifdef LOG_ENABLED // keep gcc quiet
99static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
100#endif
101#ifdef VBOX_WITH_STATISTICS
102static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
103static void patmResetStat(PVM pVM, void *pvSample);
104static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
105#endif
106
107#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
108#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
109
110static int patmReinit(PVM pVM);
111static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
112static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC);
113static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch);
114
115#ifdef VBOX_WITH_DEBUGGER
116static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
117static FNDBGCCMD patmr3CmdOn;
118static FNDBGCCMD patmr3CmdOff;
119
120/** Command descriptors. */
121static const DBGCCMD g_aCmds[] =
122{
123 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
124 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
125 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
126};
127#endif
128
129/* Don't want to break saved states, so put it here as a global variable. */
130static unsigned int cIDTHandlersDisabled = 0;
131
132/**
133 * Initializes the PATM.
134 *
135 * @returns VBox status code.
136 * @param pVM Pointer to the VM.
137 */
138VMMR3_INT_DECL(int) PATMR3Init(PVM pVM)
139{
140 int rc;
141
142 /*
143 * We only need a saved state dummy loader if HM is enabled.
144 */
145 if (HMIsEnabled(pVM))
146 {
147 pVM->fPATMEnabled = false;
148 return SSMR3RegisterStub(pVM, "PATM", 0);
149 }
150
151 /*
152 * Raw-mode.
153 */
154 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
155
156 /* These values can't change as they are hardcoded in patch code (old saved states!) */
157 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
158 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
159 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
160 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
161
162 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
163 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
164
165 /* Allocate patch memory and GC patch state memory. */
166 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
167 /* Add another page in case the generated code is much larger than expected. */
168 /** @todo bad safety precaution */
169 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
170 if (RT_FAILURE(rc))
171 {
172 Log(("MMHyperAlloc failed with %Rrc\n", rc));
173 return rc;
174 }
175 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
176
177 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
178 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
179 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
180
181 patmR3DbgInit(pVM);
182
183 /*
184 * Hypervisor memory for GC status data (read/write)
185 *
186 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
187 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
188 *
189 */
190 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
191 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
192 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
193
194 /* Hypervisor memory for patch statistics */
195 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
196 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
197
198 /* Memory for patch lookup trees. */
199 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
200 AssertRCReturn(rc, rc);
201 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
202
203#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
204 /* Check CFGM option. */
205 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
206 if (RT_FAILURE(rc))
207# ifdef PATM_DISABLE_ALL
208 pVM->fPATMEnabled = false;
209# else
210 pVM->fPATMEnabled = true;
211# endif
212#endif
213
214 rc = patmReinit(pVM);
215 AssertRC(rc);
216 if (RT_FAILURE(rc))
217 return rc;
218
219 /*
220 * Register save and load state notifiers.
221 */
222 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
223 NULL, NULL, NULL,
224 NULL, patmR3Save, NULL,
225 NULL, patmR3Load, NULL);
226 AssertRCReturn(rc, rc);
227
228#ifdef VBOX_WITH_DEBUGGER
229 /*
230 * Debugger commands.
231 */
232 static bool s_fRegisteredCmds = false;
233 if (!s_fRegisteredCmds)
234 {
235 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
236 if (RT_SUCCESS(rc2))
237 s_fRegisteredCmds = true;
238 }
239#endif
240
241#ifdef VBOX_WITH_STATISTICS
242 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
243 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
244 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
245 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
246 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
247 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
248 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
249 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
250
251 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
252 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
253
254 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
255 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
256 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
257
258 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
259 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
260 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
261 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
262 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
263
264 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
265 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
266
267 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
268 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
269
270 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
271 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
272 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
273
274 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
275 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
276 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
277
278 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
279 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
280
281 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
282 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
283 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
284 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
285
286 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
287 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
288
289 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
290 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
291
292 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
293 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
294 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
295
296 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
297 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
298 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
299 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
300
301 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
302 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
303 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
304 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
305 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
306
307 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
308#endif /* VBOX_WITH_STATISTICS */
309
310 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
311 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
312 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
313 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
314 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
315 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
316 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
317 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
318
319 return rc;
320}
321
322/**
323 * Finalizes HMA page attributes.
324 *
325 * @returns VBox status code.
326 * @param pVM Pointer to the VM.
327 */
328VMMR3_INT_DECL(int) PATMR3InitFinalize(PVM pVM)
329{
330 if (HMIsEnabled(pVM))
331 return VINF_SUCCESS;
332
333 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
334 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
335 if (RT_FAILURE(rc))
336 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
337
338 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
339 if (RT_FAILURE(rc))
340 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
341
342 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
343 if (RT_FAILURE(rc))
344 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
345
346 return rc;
347}
348
349/**
350 * (Re)initializes PATM
351 *
352 * @param pVM The VM.
353 */
354static int patmReinit(PVM pVM)
355{
356 int rc;
357
358 /*
359 * Assert alignment and sizes.
360 */
361 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
362 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
363
364 /*
365 * Setup any fixed pointers and offsets.
366 */
367 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
368
369#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
370#ifndef PATM_DISABLE_ALL
371 pVM->fPATMEnabled = true;
372#endif
373#endif
374
375 Assert(pVM->patm.s.pGCStateHC);
376 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
377 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
378
379 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
380 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
381
382 Assert(pVM->patm.s.pGCStackHC);
383 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
384 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
385 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
386 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
387
388 Assert(pVM->patm.s.pStatsHC);
389 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
390 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
391
392 Assert(pVM->patm.s.pPatchMemHC);
393 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
394 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
395 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
396
397 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
398 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
399
400 Assert(pVM->patm.s.PatchLookupTreeHC);
401 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
402
403 /*
404 * (Re)Initialize PATM structure
405 */
406 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
407 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
408 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
409 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
410 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
411 pVM->patm.s.pvFaultMonitor = 0;
412 pVM->patm.s.deltaReloc = 0;
413
414 /* Lowest and highest patched instruction */
415 pVM->patm.s.pPatchedInstrGCLowest = ~0;
416 pVM->patm.s.pPatchedInstrGCHighest = 0;
417
418 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
419 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
420 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
421
422 pVM->patm.s.pfnSysEnterPatchGC = 0;
423 pVM->patm.s.pfnSysEnterGC = 0;
424
425 pVM->patm.s.fOutOfMemory = false;
426
427 pVM->patm.s.pfnHelperCallGC = 0;
428 patmR3DbgReset(pVM);
429
430 /* Generate all global functions to be used by future patches. */
431 /* We generate a fake patch in order to use the existing code for relocation. */
432 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
433 if (RT_FAILURE(rc))
434 {
435 Log(("Out of memory!!!!\n"));
436 return VERR_NO_MEMORY;
437 }
438 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
439 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
440 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
441
442 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
443 AssertRC(rc);
444
445 /* Update free pointer in patch memory. */
446 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
447 /* Round to next 8 byte boundary. */
448 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
449
450
451 return rc;
452}
453
454
455/**
456 * Applies relocations to data and code managed by this
457 * component. This function will be called at init and
458 * whenever the VMM need to relocate it self inside the GC.
459 *
460 * The PATM will update the addresses used by the switcher.
461 *
462 * @param pVM The VM.
463 */
464VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM)
465{
466 if (HMIsEnabled(pVM))
467 return;
468
469 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
470 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
471
472 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
473 if (delta)
474 {
475 PCPUMCTX pCtx;
476
477 /* Update CPUMCTX guest context pointer. */
478 pVM->patm.s.pCPUMCtxGC += delta;
479
480 pVM->patm.s.deltaReloc = delta;
481
482 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
483
484 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
485
486 /* If we are running patch code right now, then also adjust EIP. */
487 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
488 pCtx->eip += delta;
489
490 pVM->patm.s.pGCStateGC = GCPtrNew;
491 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
492
493 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
494
495 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
496
497 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
498
499 if (pVM->patm.s.pfnSysEnterPatchGC)
500 pVM->patm.s.pfnSysEnterPatchGC += delta;
501
502 /* Deal with the global patch functions. */
503 pVM->patm.s.pfnHelperCallGC += delta;
504 pVM->patm.s.pfnHelperRetGC += delta;
505 pVM->patm.s.pfnHelperIretGC += delta;
506 pVM->patm.s.pfnHelperJumpGC += delta;
507
508 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
509 }
510}
511
512
513/**
514 * Terminates the PATM.
515 *
516 * Termination means cleaning up and freeing all resources,
517 * the VM it self is at this point powered off or suspended.
518 *
519 * @returns VBox status code.
520 * @param pVM Pointer to the VM.
521 */
522VMMR3_INT_DECL(int) PATMR3Term(PVM pVM)
523{
524 if (HMIsEnabled(pVM))
525 return VINF_SUCCESS;
526
527 patmR3DbgTerm(pVM);
528
529 /* Memory was all allocated from the two MM heaps and requires no freeing. */
530 return VINF_SUCCESS;
531}
532
533
534/**
535 * PATM reset callback.
536 *
537 * @returns VBox status code.
538 * @param pVM The VM which is reset.
539 */
540VMMR3_INT_DECL(int) PATMR3Reset(PVM pVM)
541{
542 Log(("PATMR3Reset\n"));
543 if (HMIsEnabled(pVM))
544 return VINF_SUCCESS;
545
546 /* Free all patches. */
547 for (;;)
548 {
549 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
550 if (pPatchRec)
551 patmR3RemovePatch(pVM, pPatchRec, true);
552 else
553 break;
554 }
555 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
556 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
557 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
558 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
559
560 int rc = patmReinit(pVM);
561 if (RT_SUCCESS(rc))
562 rc = PATMR3InitFinalize(pVM); /* paranoia */
563
564 return rc;
565}
566
567/**
568 * @callback_method_impl{FNDISREADBYTES}
569 */
570static DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
571{
572 PATMDISASM *pDisInfo = (PATMDISASM *)pDis->pvUser;
573
574/** @todo change this to read more! */
575 /*
576 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
577 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
578 */
579 /** @todo could change in the future! */
580 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
581 {
582 size_t cbRead = cbMaxRead;
583 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
584 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
585 if (RT_SUCCESS(rc))
586 {
587 if (cbRead >= cbMinRead)
588 {
589 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
590 return VINF_SUCCESS;
591 }
592
593 cbMinRead -= (uint8_t)cbRead;
594 cbMaxRead -= (uint8_t)cbRead;
595 offInstr += (uint8_t)cbRead;
596 uSrcAddr += cbRead;
597 }
598
599#ifdef VBOX_STRICT
600 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
601 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
602 {
603 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr, NULL) == false);
604 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr + cbMinRead-1, NULL) == false);
605 }
606#endif
607 }
608
609 int rc = VINF_SUCCESS;
610 RTGCPTR32 uSrcAddr = (RTGCPTR32)pDis->uInstrAddr + offInstr;
611 if ( !pDisInfo->pbInstrHC
612 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbMinRead - 1)
613 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
614 {
615 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
616 rc = PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
617 offInstr += cbMinRead;
618 }
619 else
620 {
621 /*
622 * pbInstrHC is the base address; adjust according to the GC pointer.
623 *
624 * Try read the max number of bytes here. Since the disassembler only
625 * ever uses these bytes for the current instruction, it doesn't matter
626 * much if we accidentally read the start of the next instruction even
627 * if it happens to be a patch jump or int3.
628 */
629 uint8_t const *pbInstrHC = pDisInfo->pbInstrHC; AssertPtr(pbInstrHC);
630 pbInstrHC += uSrcAddr - pDisInfo->pInstrGC;
631
632 size_t cbMaxRead1 = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
633 size_t cbMaxRead2 = PAGE_SIZE - ((uintptr_t)pbInstrHC & PAGE_OFFSET_MASK);
634 size_t cbToRead = RT_MIN(cbMaxRead1, RT_MAX(cbMaxRead2, cbMinRead));
635 if (cbToRead > cbMaxRead)
636 cbToRead = cbMaxRead;
637
638 memcpy(&pDis->abInstr[offInstr], pbInstrHC, cbToRead);
639 offInstr += (uint8_t)cbToRead;
640 }
641
642 pDis->cbCachedInstr = offInstr;
643 return rc;
644}
645
646
647DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
648 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
649{
650 PATMDISASM disinfo;
651 disinfo.pVM = pVM;
652 disinfo.pPatchInfo = pPatch;
653 disinfo.pbInstrHC = pbInstrHC;
654 disinfo.pInstrGC = InstrGCPtr32;
655 disinfo.fReadFlags = fReadFlags;
656 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
657 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
658 patmReadBytes, &disinfo,
659 pCpu, pcbInstr, pszOutput, cbOutput));
660}
661
662
663DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
664 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
665{
666 PATMDISASM disinfo;
667 disinfo.pVM = pVM;
668 disinfo.pPatchInfo = pPatch;
669 disinfo.pbInstrHC = pbInstrHC;
670 disinfo.pInstrGC = InstrGCPtr32;
671 disinfo.fReadFlags = fReadFlags;
672 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
673 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
674 patmReadBytes, &disinfo,
675 pCpu, pcbInstr));
676}
677
678
679DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
680 uint32_t fReadFlags,
681 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
682{
683 PATMDISASM disinfo;
684 disinfo.pVM = pVM;
685 disinfo.pPatchInfo = pPatch;
686 disinfo.pbInstrHC = pbInstrHC;
687 disinfo.pInstrGC = InstrGCPtr32;
688 disinfo.fReadFlags = fReadFlags;
689 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
690 pCpu, pcbInstr));
691}
692
693#ifdef LOG_ENABLED
694# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
695 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
696# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
697 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
698
699# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
700 do { \
701 if (LogIsEnabled()) \
702 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
703 } while (0)
704
705static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
706 const char *pszComment1, const char *pszComment2)
707{
708 DISCPUSTATE DisState;
709 char szOutput[128];
710 szOutput[0] = '\0';
711 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
712 &DisState, NULL, szOutput, sizeof(szOutput));
713 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
714}
715
716#else
717# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
718# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
719# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
720#endif
721
722
723/**
724 * Callback function for RTAvloU32DoWithAll
725 *
726 * Updates all fixups in the patches
727 *
728 * @returns VBox status code.
729 * @param pNode Current node
730 * @param pParam Pointer to the VM.
731 */
732static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
733{
734 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
735 PVM pVM = (PVM)pParam;
736 RTRCINTPTR delta;
737 int rc;
738
739 /* Nothing to do if the patch is not active. */
740 if (pPatch->patch.uState == PATCH_REFUSED)
741 return 0;
742
743 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
744 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
745
746 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
747 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
748
749 /*
750 * Apply fixups
751 */
752 PRELOCREC pRec = 0;
753 AVLPVKEY key = 0;
754
755 while (true)
756 {
757 /* Get the record that's closest from above */
758 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
759 if (pRec == 0)
760 break;
761
762 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
763
764 switch (pRec->uType)
765 {
766 case FIXUP_ABSOLUTE:
767 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
768 if ( !pRec->pSource
769 || PATMIsPatchGCAddr(pVM, pRec->pSource))
770 {
771 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
772 }
773 else
774 {
775 uint8_t curInstr[15];
776 uint8_t oldInstr[15];
777 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
778
779 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
780
781 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
782 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
783
784 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
785 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
786
787 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
788
789 if ( rc == VERR_PAGE_NOT_PRESENT
790 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
791 {
792 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
793
794 Log(("PATM: Patch page not present -> check later!\n"));
795 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
796 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
797 }
798 else
799 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
800 {
801 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
802 /*
803 * Disable patch; this is not a good solution
804 */
805 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
806 pPatch->patch.uState = PATCH_DISABLED;
807 }
808 else
809 if (RT_SUCCESS(rc))
810 {
811 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
812 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
813 AssertRC(rc);
814 }
815 }
816 break;
817
818 case FIXUP_REL_JMPTOPATCH:
819 {
820 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
821
822 if ( pPatch->patch.uState == PATCH_ENABLED
823 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
824 {
825 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
826 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
827 RTRCPTR pJumpOffGC;
828 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
829 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
830
831#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
832 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
833#else
834 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
835#endif
836
837 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
838#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
839 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
840 {
841 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
842
843 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
844 oldJump[0] = pPatch->patch.aPrivInstr[0];
845 oldJump[1] = pPatch->patch.aPrivInstr[1];
846 *(RTRCUINTPTR *)&oldJump[2] = displOld;
847 }
848 else
849#endif
850 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
851 {
852 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
853 oldJump[0] = 0xE9;
854 *(RTRCUINTPTR *)&oldJump[1] = displOld;
855 }
856 else
857 {
858 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
859 continue; //this should never happen!!
860 }
861 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
862
863 /*
864 * Read old patch jump and compare it to the one we previously installed
865 */
866 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
867 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
868
869 if ( rc == VERR_PAGE_NOT_PRESENT
870 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
871 {
872 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
873
874 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
875 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
876 }
877 else
878 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
879 {
880 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
881 /*
882 * Disable patch; this is not a good solution
883 */
884 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
885 pPatch->patch.uState = PATCH_DISABLED;
886 }
887 else
888 if (RT_SUCCESS(rc))
889 {
890 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
891 AssertRC(rc);
892 }
893 else
894 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
895 }
896 else
897 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
898
899 pRec->pDest = pTarget;
900 break;
901 }
902
903 case FIXUP_REL_JMPTOGUEST:
904 {
905 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
906 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
907
908 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
909 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
910 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
911 pRec->pSource = pSource;
912 break;
913 }
914
915 default:
916 AssertMsg(0, ("Invalid fixup type!!\n"));
917 return VERR_INVALID_PARAMETER;
918 }
919 }
920
921 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
922 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
923 return 0;
924}
925
926/**
927 * \#PF Handler callback for virtual access handler ranges.
928 *
929 * Important to realize that a physical page in a range can have aliases, and
930 * for ALL and WRITE handlers these will also trigger.
931 *
932 * @returns VINF_SUCCESS if the handler have carried out the operation.
933 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
934 * @param pVM Pointer to the VM.
935 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
936 * @param pvPtr The HC mapping of that address.
937 * @param pvBuf What the guest is reading/writing.
938 * @param cbBuf How much it's reading/writing.
939 * @param enmAccessType The access type.
940 * @param pvUser User argument.
941 */
942DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
943 PGMACCESSTYPE enmAccessType, void *pvUser)
944{
945 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
946 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(pvUser);
947
948 /** @todo could be the wrong virtual address (alias) */
949 pVM->patm.s.pvFaultMonitor = GCPtr;
950 PATMR3HandleMonitoredPage(pVM);
951 return VINF_PGM_HANDLER_DO_DEFAULT;
952}
953
954#ifdef VBOX_WITH_DEBUGGER
955
956/**
957 * Callback function for RTAvloU32DoWithAll
958 *
959 * Enables the patch that's being enumerated
960 *
961 * @returns 0 (continue enumeration).
962 * @param pNode Current node
963 * @param pVM Pointer to the VM.
964 */
965static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
966{
967 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
968
969 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
970 return 0;
971}
972
973
974/**
975 * Callback function for RTAvloU32DoWithAll
976 *
977 * Disables the patch that's being enumerated
978 *
979 * @returns 0 (continue enumeration).
980 * @param pNode Current node
981 * @param pVM Pointer to the VM.
982 */
983static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
984{
985 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
986
987 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
988 return 0;
989}
990
991#endif /* VBOX_WITH_DEBUGGER */
992#ifdef UNUSED_FUNCTIONS
993
994/**
995 * Returns the host context pointer and size of the patch memory block
996 *
997 * @returns Host context pointer.
998 * @param pVM Pointer to the VM.
999 * @param pcb Size of the patch memory block
1000 * @internal
1001 */
1002VMMR3_INT_DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
1003{
1004 AssertReturn(!HMIsEnabled(pVM), NULL);
1005 if (pcb)
1006 *pcb = pVM->patm.s.cbPatchMem;
1007 return pVM->patm.s.pPatchMemHC;
1008}
1009
1010
1011/**
1012 * Returns the guest context pointer and size of the patch memory block
1013 *
1014 * @returns Guest context pointer.
1015 * @param pVM Pointer to the VM.
1016 * @param pcb Size of the patch memory block
1017 */
1018VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
1019{
1020 AssertReturn(!HMIsEnabled(pVM), NIL_RTRCPTR);
1021 if (pcb)
1022 *pcb = pVM->patm.s.cbPatchMem;
1023 return pVM->patm.s.pPatchMemGC;
1024}
1025
1026#endif /* UNUSED_FUNCTIONS */
1027
1028/**
1029 * Returns the host context pointer of the GC context structure
1030 *
1031 * @returns VBox status code.
1032 * @param pVM Pointer to the VM.
1033 */
1034VMMR3_INT_DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
1035{
1036 AssertReturn(!HMIsEnabled(pVM), NULL);
1037 return pVM->patm.s.pGCStateHC;
1038}
1039
1040
1041#ifdef UNUSED_FUNCTION
1042/**
1043 * Checks whether the HC address is part of our patch region
1044 *
1045 * @returns true/false.
1046 * @param pVM Pointer to the VM.
1047 * @param pAddrHC Host context ring-3 address to check.
1048 */
1049VMMR3_INT_DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, void *pAddrHC)
1050{
1051 return (uintptr_t)pAddrHC >= (uintptr_t)pVM->patm.s.pPatchMemHC
1052 && (uintptr_t)pAddrHC < (uintptr_t)pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem;
1053}
1054#endif
1055
1056
1057/**
1058 * Allows or disallow patching of privileged instructions executed by the guest OS
1059 *
1060 * @returns VBox status code.
1061 * @param pUVM The user mode VM handle.
1062 * @param fAllowPatching Allow/disallow patching
1063 */
1064VMMR3DECL(int) PATMR3AllowPatching(PUVM pUVM, bool fAllowPatching)
1065{
1066 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1067 PVM pVM = pUVM->pVM;
1068 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1069
1070 if (!HMIsEnabled(pVM))
1071 pVM->fPATMEnabled = fAllowPatching;
1072 else
1073 Assert(!pVM->fPATMEnabled);
1074 return VINF_SUCCESS;
1075}
1076
1077
1078/**
1079 * Checks if the patch manager is enabled or not.
1080 *
1081 * @returns true if enabled, false if not (or if invalid handle).
1082 * @param pUVM The user mode VM handle.
1083 */
1084VMMR3DECL(bool) PATMR3IsEnabled(PUVM pUVM)
1085{
1086 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1087 PVM pVM = pUVM->pVM;
1088 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1089 return PATMIsEnabled(pVM);
1090}
1091
1092
1093/**
1094 * Convert a GC patch block pointer to a HC patch pointer
1095 *
1096 * @returns HC pointer or NULL if it's not a GC patch pointer
1097 * @param pVM Pointer to the VM.
1098 * @param pAddrGC GC pointer
1099 */
1100VMMR3_INT_DECL(void *) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
1101{
1102 AssertReturn(!HMIsEnabled(pVM), NULL);
1103 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
1104 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
1105 return NULL;
1106}
1107
1108
1109/**
1110 * Convert guest context address to host context pointer
1111 *
1112 * @returns VBox status code.
1113 * @param pVM Pointer to the VM.
1114 * @param pCacheRec Address conversion cache record
1115 * @param pGCPtr Guest context pointer
1116 *
1117 * @returns Host context pointer or NULL in case of an error
1118 *
1119 */
1120R3PTRTYPE(uint8_t *) patmR3GCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
1121{
1122 int rc;
1123 R3PTRTYPE(uint8_t *) pHCPtr;
1124 uint32_t offset;
1125
1126 if (PATMIsPatchGCAddr(pVM, pGCPtr))
1127 {
1128 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1129 Assert(pPatch);
1130 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
1131 }
1132
1133 offset = pGCPtr & PAGE_OFFSET_MASK;
1134 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1135 return pCacheRec->pPageLocStartHC + offset;
1136
1137 /* Release previous lock if any. */
1138 if (pCacheRec->Lock.pvMap)
1139 {
1140 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1141 pCacheRec->Lock.pvMap = NULL;
1142 }
1143
1144 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1145 if (rc != VINF_SUCCESS)
1146 {
1147 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1148 return NULL;
1149 }
1150 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1151 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1152 return pHCPtr;
1153}
1154
1155
1156/**
1157 * Calculates and fills in all branch targets
1158 *
1159 * @returns VBox status code.
1160 * @param pVM Pointer to the VM.
1161 * @param pPatch Current patch block pointer
1162 *
1163 */
1164static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1165{
1166 int32_t displ;
1167
1168 PJUMPREC pRec = 0;
1169 unsigned nrJumpRecs = 0;
1170
1171 /*
1172 * Set all branch targets inside the patch block.
1173 * We remove all jump records as they are no longer needed afterwards.
1174 */
1175 while (true)
1176 {
1177 RCPTRTYPE(uint8_t *) pInstrGC;
1178 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1179
1180 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1181 if (pRec == 0)
1182 break;
1183
1184 nrJumpRecs++;
1185
1186 /* HC in patch block to GC in patch block. */
1187 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1188
1189 if (pRec->opcode == OP_CALL)
1190 {
1191 /* Special case: call function replacement patch from this patch block.
1192 */
1193 PPATMPATCHREC pFunctionRec = patmQueryFunctionPatch(pVM, pRec->pTargetGC);
1194 if (!pFunctionRec)
1195 {
1196 int rc;
1197
1198 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1199 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1200 else
1201 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1202
1203 if (RT_FAILURE(rc))
1204 {
1205 uint8_t *pPatchHC;
1206 RTRCPTR pPatchGC;
1207 RTRCPTR pOrgInstrGC;
1208
1209 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1210 Assert(pOrgInstrGC);
1211
1212 /* Failure for some reason -> mark exit point with int 3. */
1213 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1214
1215 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1216 Assert(pPatchGC);
1217
1218 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1219
1220 /* Set a breakpoint at the very beginning of the recompiled instruction */
1221 *pPatchHC = 0xCC;
1222
1223 continue;
1224 }
1225 }
1226 else
1227 {
1228 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1229 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1230 }
1231
1232 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1233 }
1234 else
1235 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1236
1237 if (pBranchTargetGC == 0)
1238 {
1239 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1240 return VERR_PATCHING_REFUSED;
1241 }
1242 /* Our jumps *always* have a dword displacement (to make things easier). */
1243 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1244 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1245 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1246 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1247 }
1248 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1249 Assert(pPatch->JumpTree == 0);
1250 return VINF_SUCCESS;
1251}
1252
1253/**
1254 * Add an illegal instruction record
1255 *
1256 * @param pVM Pointer to the VM.
1257 * @param pPatch Patch structure ptr
1258 * @param pInstrGC Guest context pointer to privileged instruction
1259 *
1260 */
1261static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1262{
1263 PAVLPVNODECORE pRec;
1264
1265 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1266 Assert(pRec);
1267 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1268
1269 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1270 Assert(ret); NOREF(ret);
1271 pPatch->pTempInfo->nrIllegalInstr++;
1272}
1273
1274static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1275{
1276 PAVLPVNODECORE pRec;
1277
1278 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1279 if (pRec)
1280 return true;
1281 else
1282 return false;
1283}
1284
1285/**
1286 * Add a patch to guest lookup record
1287 *
1288 * @param pVM Pointer to the VM.
1289 * @param pPatch Patch structure ptr
1290 * @param pPatchInstrHC Guest context pointer to patch block
1291 * @param pInstrGC Guest context pointer to privileged instruction
1292 * @param enmType Lookup type
1293 * @param fDirty Dirty flag
1294 *
1295 * @note Be extremely careful with this function. Make absolutely sure the guest
1296 * address is correct! (to avoid executing instructions twice!)
1297 */
1298void patmR3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1299{
1300 bool ret;
1301 PRECPATCHTOGUEST pPatchToGuestRec;
1302 PRECGUESTTOPATCH pGuestToPatchRec;
1303 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1304
1305 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1306 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1307
1308 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1309 {
1310 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1311 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1312 return; /* already there */
1313
1314 Assert(!pPatchToGuestRec);
1315 }
1316#ifdef VBOX_STRICT
1317 else
1318 {
1319 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1320 Assert(!pPatchToGuestRec);
1321 }
1322#endif
1323
1324 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1325 Assert(pPatchToGuestRec);
1326 pPatchToGuestRec->Core.Key = PatchOffset;
1327 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1328 pPatchToGuestRec->enmType = enmType;
1329 pPatchToGuestRec->fDirty = fDirty;
1330
1331 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1332 Assert(ret);
1333
1334 /* GC to patch address */
1335 if (enmType == PATM_LOOKUP_BOTHDIR)
1336 {
1337 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1338 if (!pGuestToPatchRec)
1339 {
1340 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1341 pGuestToPatchRec->Core.Key = pInstrGC;
1342 pGuestToPatchRec->PatchOffset = PatchOffset;
1343
1344 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1345 Assert(ret);
1346 }
1347 }
1348
1349 pPatch->nrPatch2GuestRecs++;
1350}
1351
1352
1353/**
1354 * Removes a patch to guest lookup record
1355 *
1356 * @param pVM Pointer to the VM.
1357 * @param pPatch Patch structure ptr
1358 * @param pPatchInstrGC Guest context pointer to patch block
1359 */
1360void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1361{
1362 PAVLU32NODECORE pNode;
1363 PAVLU32NODECORE pNode2;
1364 PRECPATCHTOGUEST pPatchToGuestRec;
1365 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1366
1367 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1368 Assert(pPatchToGuestRec);
1369 if (pPatchToGuestRec)
1370 {
1371 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1372 {
1373 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1374
1375 Assert(pGuestToPatchRec->Core.Key);
1376 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1377 Assert(pNode2);
1378 }
1379 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1380 Assert(pNode);
1381
1382 MMR3HeapFree(pPatchToGuestRec);
1383 pPatch->nrPatch2GuestRecs--;
1384 }
1385}
1386
1387
1388/**
1389 * RTAvlPVDestroy callback.
1390 */
1391static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1392{
1393 MMR3HeapFree(pNode);
1394 return 0;
1395}
1396
1397/**
1398 * Empty the specified tree (PV tree, MMR3 heap)
1399 *
1400 * @param pVM Pointer to the VM.
1401 * @param ppTree Tree to empty
1402 */
1403static void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1404{
1405 NOREF(pVM);
1406 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1407}
1408
1409
1410/**
1411 * RTAvlU32Destroy callback.
1412 */
1413static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1414{
1415 MMR3HeapFree(pNode);
1416 return 0;
1417}
1418
1419/**
1420 * Empty the specified tree (U32 tree, MMR3 heap)
1421 *
1422 * @param pVM Pointer to the VM.
1423 * @param ppTree Tree to empty
1424 */
1425static void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1426{
1427 NOREF(pVM);
1428 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1429}
1430
1431
1432/**
1433 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1434 *
1435 * @returns VBox status code.
1436 * @param pVM Pointer to the VM.
1437 * @param pCpu CPU disassembly state
1438 * @param pInstrGC Guest context pointer to privileged instruction
1439 * @param pCurInstrGC Guest context pointer to the current instruction
1440 * @param pCacheRec Cache record ptr
1441 *
1442 */
1443static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1444{
1445 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1446 bool fIllegalInstr = false;
1447
1448 /*
1449 * Preliminary heuristics:
1450 *- no call instructions without a fixed displacement between cli and sti/popf
1451 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1452 *- no nested pushf/cli
1453 *- sti/popf should be the (eventual) target of all branches
1454 *- no near or far returns; no int xx, no into
1455 *
1456 * Note: Later on we can impose less stricter guidelines if the need arises
1457 */
1458
1459 /* Bail out if the patch gets too big. */
1460 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1461 {
1462 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1463 fIllegalInstr = true;
1464 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1465 }
1466 else
1467 {
1468 /* No unconditional jumps or calls without fixed displacements. */
1469 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1470 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1471 )
1472 {
1473 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1474 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1475 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1476 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1477 )
1478 {
1479 fIllegalInstr = true;
1480 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1481 }
1482 }
1483
1484 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1485 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP)
1486 {
1487 if ( pCurInstrGC > pPatch->pPrivInstrGC
1488 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1489 {
1490 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1491 /* We turn this one into a int 3 callable patch. */
1492 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1493 }
1494 }
1495 else
1496 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1497 if (pPatch->opcode == OP_PUSHF)
1498 {
1499 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF)
1500 {
1501 fIllegalInstr = true;
1502 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1503 }
1504 }
1505
1506 /* no far returns */
1507 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1508 {
1509 pPatch->pTempInfo->nrRetInstr++;
1510 fIllegalInstr = true;
1511 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1512 }
1513 else if ( pCpu->pCurInstr->uOpcode == OP_INT3
1514 || pCpu->pCurInstr->uOpcode == OP_INT
1515 || pCpu->pCurInstr->uOpcode == OP_INTO)
1516 {
1517 /* No int xx or into either. */
1518 fIllegalInstr = true;
1519 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1520 }
1521 }
1522
1523 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1524
1525 /* Illegal instruction -> end of analysis phase for this code block */
1526 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1527 return VINF_SUCCESS;
1528
1529 /* Check for exit points. */
1530 switch (pCpu->pCurInstr->uOpcode)
1531 {
1532 case OP_SYSEXIT:
1533 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1534
1535 case OP_SYSENTER:
1536 case OP_ILLUD2:
1537 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1538 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1539 return VINF_SUCCESS;
1540
1541 case OP_STI:
1542 case OP_POPF:
1543 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1544 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1545 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1546 {
1547 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1548 return VERR_PATCHING_REFUSED;
1549 }
1550 if (pPatch->opcode == OP_PUSHF)
1551 {
1552 if (pCpu->pCurInstr->uOpcode == OP_POPF)
1553 {
1554 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1555 return VINF_SUCCESS;
1556
1557 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1558 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1559 pPatch->flags |= PATMFL_CHECK_SIZE;
1560 }
1561 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1562 }
1563 /* else: fall through. */
1564 case OP_RETN: /* exit point for function replacement */
1565 return VINF_SUCCESS;
1566
1567 case OP_IRET:
1568 return VINF_SUCCESS; /* exitpoint */
1569
1570 case OP_CPUID:
1571 case OP_CALL:
1572 case OP_JMP:
1573 break;
1574
1575#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1576 case OP_STR:
1577 break;
1578#endif
1579
1580 default:
1581 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1582 {
1583 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1584 return VINF_SUCCESS; /* exit point */
1585 }
1586 break;
1587 }
1588
1589 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1590 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW))
1591 {
1592 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1593 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
1594 return VINF_SUCCESS;
1595 }
1596
1597 return VWRN_CONTINUE_ANALYSIS;
1598}
1599
1600/**
1601 * Analyses the instructions inside a function for compliance
1602 *
1603 * @returns VBox status code.
1604 * @param pVM Pointer to the VM.
1605 * @param pCpu CPU disassembly state
1606 * @param pInstrGC Guest context pointer to privileged instruction
1607 * @param pCurInstrGC Guest context pointer to the current instruction
1608 * @param pCacheRec Cache record ptr
1609 *
1610 */
1611static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1612{
1613 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1614 bool fIllegalInstr = false;
1615 NOREF(pInstrGC);
1616
1617 //Preliminary heuristics:
1618 //- no call instructions
1619 //- ret ends a block
1620
1621 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1622
1623 // bail out if the patch gets too big
1624 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1625 {
1626 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1627 fIllegalInstr = true;
1628 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1629 }
1630 else
1631 {
1632 // no unconditional jumps or calls without fixed displacements
1633 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1634 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1635 )
1636 {
1637 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1638 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1639 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1640 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1641 )
1642 {
1643 fIllegalInstr = true;
1644 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1645 }
1646 }
1647 else /* no far returns */
1648 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1649 {
1650 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1651 fIllegalInstr = true;
1652 }
1653 else /* no int xx or into either */
1654 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO)
1655 {
1656 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1657 fIllegalInstr = true;
1658 }
1659
1660 #if 0
1661 ///@todo we can handle certain in/out and privileged instructions in the guest context
1662 if (pCpu->pCurInstr->fOpType & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI)
1663 {
1664 Log(("Illegal instructions for function patch!!\n"));
1665 return VERR_PATCHING_REFUSED;
1666 }
1667 #endif
1668 }
1669
1670 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1671
1672 /* Illegal instruction -> end of analysis phase for this code block */
1673 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1674 {
1675 return VINF_SUCCESS;
1676 }
1677
1678 // Check for exit points
1679 switch (pCpu->pCurInstr->uOpcode)
1680 {
1681 case OP_ILLUD2:
1682 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1683 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1684 return VINF_SUCCESS;
1685
1686 case OP_IRET:
1687 case OP_SYSEXIT: /* will fault or emulated in GC */
1688 case OP_RETN:
1689 return VINF_SUCCESS;
1690
1691#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1692 case OP_STR:
1693 break;
1694#endif
1695
1696 case OP_POPF:
1697 case OP_STI:
1698 return VWRN_CONTINUE_ANALYSIS;
1699 default:
1700 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1701 {
1702 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1703 return VINF_SUCCESS; /* exit point */
1704 }
1705 return VWRN_CONTINUE_ANALYSIS;
1706 }
1707
1708 return VWRN_CONTINUE_ANALYSIS;
1709}
1710
1711/**
1712 * Recompiles the instructions in a code block
1713 *
1714 * @returns VBox status code.
1715 * @param pVM Pointer to the VM.
1716 * @param pCpu CPU disassembly state
1717 * @param pInstrGC Guest context pointer to privileged instruction
1718 * @param pCurInstrGC Guest context pointer to the current instruction
1719 * @param pCacheRec Cache record ptr
1720 *
1721 */
1722static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1723{
1724 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1725 int rc = VINF_SUCCESS;
1726 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1727
1728 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1729
1730 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1731 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1732 {
1733 /*
1734 * Been there, done that; so insert a jump (we don't want to duplicate code)
1735 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1736 */
1737 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1738 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1739 }
1740
1741 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1742 {
1743 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1744 }
1745 else
1746 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1747
1748 if (RT_FAILURE(rc))
1749 return rc;
1750
1751 /* Note: Never do a direct return unless a failure is encountered! */
1752
1753 /* Clear recompilation of next instruction flag; we are doing that right here. */
1754 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1755 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1756
1757 /* Add lookup record for patch to guest address translation */
1758 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1759
1760 /* Update lowest and highest instruction address for this patch */
1761 if (pCurInstrGC < pPatch->pInstrGCLowest)
1762 pPatch->pInstrGCLowest = pCurInstrGC;
1763 else
1764 if (pCurInstrGC > pPatch->pInstrGCHighest)
1765 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
1766
1767 /* Illegal instruction -> end of recompile phase for this code block. */
1768 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1769 {
1770 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1771 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1772 goto end;
1773 }
1774
1775 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1776 * Indirect calls are handled below.
1777 */
1778 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1779 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1780 && (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J))
1781 {
1782 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1783 if (pTargetGC == 0)
1784 {
1785 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
1786 return VERR_PATCHING_REFUSED;
1787 }
1788
1789 if (pCpu->pCurInstr->uOpcode == OP_CALL)
1790 {
1791 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1792 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1793 if (RT_FAILURE(rc))
1794 goto end;
1795 }
1796 else
1797 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1798
1799 if (RT_SUCCESS(rc))
1800 rc = VWRN_CONTINUE_RECOMPILE;
1801
1802 goto end;
1803 }
1804
1805 switch (pCpu->pCurInstr->uOpcode)
1806 {
1807 case OP_CLI:
1808 {
1809 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1810 * until we've found the proper exit point(s).
1811 */
1812 if ( pCurInstrGC != pInstrGC
1813 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1814 )
1815 {
1816 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1817 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1818 }
1819 /* Set by irq inhibition; no longer valid now. */
1820 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1821
1822 rc = patmPatchGenCli(pVM, pPatch);
1823 if (RT_SUCCESS(rc))
1824 rc = VWRN_CONTINUE_RECOMPILE;
1825 break;
1826 }
1827
1828 case OP_MOV:
1829 if (pCpu->pCurInstr->fOpType & DISOPTYPE_POTENTIALLY_DANGEROUS)
1830 {
1831 /* mov ss, src? */
1832 if ( (pCpu->Param1.fUse & DISUSE_REG_SEG)
1833 && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS))
1834 {
1835 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1836 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1837 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1838 }
1839#if 0 /* necessary for Haiku */
1840 else
1841 if ( (pCpu->Param2.fUse & DISUSE_REG_SEG)
1842 && (pCpu->Param2.Base.idxSegReg == USE_REG_SS)
1843 && (pCpu->Param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1844 {
1845 /* mov GPR, ss */
1846 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1847 if (RT_SUCCESS(rc))
1848 rc = VWRN_CONTINUE_RECOMPILE;
1849 break;
1850 }
1851#endif
1852 }
1853 goto duplicate_instr;
1854
1855 case OP_POP:
1856 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1857 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
1858 {
1859 Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
1860
1861 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1862 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1863 }
1864 goto duplicate_instr;
1865
1866 case OP_STI:
1867 {
1868 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1869
1870 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1871 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1872 {
1873 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1874 fInhibitIRQInstr = true;
1875 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1876 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1877 }
1878 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1879
1880 if (RT_SUCCESS(rc))
1881 {
1882 DISCPUSTATE cpu = *pCpu;
1883 unsigned cbInstr;
1884 int disret;
1885 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1886
1887 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1888
1889 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1890 { /* Force pNextInstrHC out of scope after using it */
1891 uint8_t *pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1892 if (pNextInstrHC == NULL)
1893 {
1894 AssertFailed();
1895 return VERR_PATCHING_REFUSED;
1896 }
1897
1898 // Disassemble the next instruction
1899 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
1900 }
1901 if (disret == false)
1902 {
1903 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1904 return VERR_PATCHING_REFUSED;
1905 }
1906 pReturnInstrGC = pNextInstrGC + cbInstr;
1907
1908 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1909 || pReturnInstrGC <= pInstrGC
1910 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1911 )
1912 {
1913 /* Not an exit point for function duplication patches */
1914 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1915 && RT_SUCCESS(rc))
1916 {
1917 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1918 rc = VWRN_CONTINUE_RECOMPILE;
1919 }
1920 else
1921 rc = VINF_SUCCESS; //exit point
1922 }
1923 else {
1924 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1925 rc = VERR_PATCHING_REFUSED; //not allowed!!
1926 }
1927 }
1928 break;
1929 }
1930
1931 case OP_POPF:
1932 {
1933 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
1934
1935 /* Not an exit point for IDT handler or function replacement patches */
1936 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1937 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1938 fGenerateJmpBack = false;
1939
1940 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->fPrefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
1941 if (RT_SUCCESS(rc))
1942 {
1943 if (fGenerateJmpBack == false)
1944 {
1945 /* Not an exit point for IDT handler or function replacement patches */
1946 rc = VWRN_CONTINUE_RECOMPILE;
1947 }
1948 else
1949 {
1950 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1951 rc = VINF_SUCCESS; /* exit point! */
1952 }
1953 }
1954 break;
1955 }
1956
1957 case OP_PUSHF:
1958 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1959 if (RT_SUCCESS(rc))
1960 rc = VWRN_CONTINUE_RECOMPILE;
1961 break;
1962
1963 case OP_PUSH:
1964 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1965 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
1966 {
1967 rc = patmPatchGenPushCS(pVM, pPatch);
1968 if (RT_SUCCESS(rc))
1969 rc = VWRN_CONTINUE_RECOMPILE;
1970 break;
1971 }
1972 goto duplicate_instr;
1973
1974 case OP_IRET:
1975 Log(("IRET at %RRv\n", pCurInstrGC));
1976 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1977 if (RT_SUCCESS(rc))
1978 {
1979 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1980 rc = VINF_SUCCESS; /* exit point by definition */
1981 }
1982 break;
1983
1984 case OP_ILLUD2:
1985 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1986 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1987 if (RT_SUCCESS(rc))
1988 rc = VINF_SUCCESS; /* exit point by definition */
1989 Log(("Illegal opcode (0xf 0xb)\n"));
1990 break;
1991
1992 case OP_CPUID:
1993 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1994 if (RT_SUCCESS(rc))
1995 rc = VWRN_CONTINUE_RECOMPILE;
1996 break;
1997
1998 case OP_STR:
1999#ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into #ifndef */
2000 /* Now safe because our shadow TR entry is identical to the guest's. */
2001 goto duplicate_instr;
2002#endif
2003 case OP_SLDT:
2004 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
2005 if (RT_SUCCESS(rc))
2006 rc = VWRN_CONTINUE_RECOMPILE;
2007 break;
2008
2009 case OP_SGDT:
2010 case OP_SIDT:
2011 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
2012 if (RT_SUCCESS(rc))
2013 rc = VWRN_CONTINUE_RECOMPILE;
2014 break;
2015
2016 case OP_RETN:
2017 /* retn is an exit point for function patches */
2018 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
2019 if (RT_SUCCESS(rc))
2020 rc = VINF_SUCCESS; /* exit point by definition */
2021 break;
2022
2023 case OP_SYSEXIT:
2024 /* Duplicate it, so it can be emulated in GC (or fault). */
2025 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2026 if (RT_SUCCESS(rc))
2027 rc = VINF_SUCCESS; /* exit point by definition */
2028 break;
2029
2030 case OP_CALL:
2031 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2032 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2033 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2034 */
2035 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2036 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far calls! */)
2037 {
2038 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
2039 if (RT_SUCCESS(rc))
2040 {
2041 rc = VWRN_CONTINUE_RECOMPILE;
2042 }
2043 break;
2044 }
2045 goto gen_illegal_instr;
2046
2047 case OP_JMP:
2048 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2049 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2050 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2051 */
2052 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2053 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far jumps! */)
2054 {
2055 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
2056 if (RT_SUCCESS(rc))
2057 rc = VINF_SUCCESS; /* end of branch */
2058 break;
2059 }
2060 goto gen_illegal_instr;
2061
2062 case OP_INT3:
2063 case OP_INT:
2064 case OP_INTO:
2065 goto gen_illegal_instr;
2066
2067 case OP_MOV_DR:
2068 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2069 if (pCpu->pCurInstr->fParam2 == OP_PARM_Dd)
2070 {
2071 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
2072 if (RT_SUCCESS(rc))
2073 rc = VWRN_CONTINUE_RECOMPILE;
2074 break;
2075 }
2076 goto duplicate_instr;
2077
2078 case OP_MOV_CR:
2079 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2080 if (pCpu->pCurInstr->fParam2 == OP_PARM_Cd)
2081 {
2082 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
2083 if (RT_SUCCESS(rc))
2084 rc = VWRN_CONTINUE_RECOMPILE;
2085 break;
2086 }
2087 goto duplicate_instr;
2088
2089 default:
2090 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
2091 {
2092gen_illegal_instr:
2093 rc = patmPatchGenIllegalInstr(pVM, pPatch);
2094 if (RT_SUCCESS(rc))
2095 rc = VINF_SUCCESS; /* exit point by definition */
2096 }
2097 else
2098 {
2099duplicate_instr:
2100 Log(("patmPatchGenDuplicate\n"));
2101 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2102 if (RT_SUCCESS(rc))
2103 rc = VWRN_CONTINUE_RECOMPILE;
2104 }
2105 break;
2106 }
2107
2108end:
2109
2110 if ( !fInhibitIRQInstr
2111 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2112 {
2113 int rc2;
2114 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2115
2116 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2117 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
2118 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2119 {
2120 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
2121
2122 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2123 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2124 rc = VINF_SUCCESS; /* end of the line */
2125 }
2126 else
2127 {
2128 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2129 }
2130 if (RT_FAILURE(rc2))
2131 rc = rc2;
2132 }
2133
2134 if (RT_SUCCESS(rc))
2135 {
2136 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2137 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2138 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
2139 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
2140 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2141 )
2142 {
2143 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2144
2145 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2146 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
2147
2148 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2149 AssertRC(rc);
2150 }
2151 }
2152 return rc;
2153}
2154
2155
2156#ifdef LOG_ENABLED
2157
2158/**
2159 * Add a disasm jump record (temporary for prevent duplicate analysis)
2160 *
2161 * @param pVM Pointer to the VM.
2162 * @param pPatch Patch structure ptr
2163 * @param pInstrGC Guest context pointer to privileged instruction
2164 *
2165 */
2166static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2167{
2168 PAVLPVNODECORE pRec;
2169
2170 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2171 Assert(pRec);
2172 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2173
2174 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2175 Assert(ret);
2176}
2177
2178/**
2179 * Checks if jump target has been analysed before.
2180 *
2181 * @returns VBox status code.
2182 * @param pPatch Patch struct
2183 * @param pInstrGC Jump target
2184 *
2185 */
2186static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2187{
2188 PAVLPVNODECORE pRec;
2189
2190 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2191 if (pRec)
2192 return true;
2193 return false;
2194}
2195
2196/**
2197 * For proper disassembly of the final patch block
2198 *
2199 * @returns VBox status code.
2200 * @param pVM Pointer to the VM.
2201 * @param pCpu CPU disassembly state
2202 * @param pInstrGC Guest context pointer to privileged instruction
2203 * @param pCurInstrGC Guest context pointer to the current instruction
2204 * @param pCacheRec Cache record ptr
2205 *
2206 */
2207int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2208{
2209 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2210 NOREF(pInstrGC);
2211
2212 if (pCpu->pCurInstr->uOpcode == OP_INT3)
2213 {
2214 /* Could be an int3 inserted in a call patch. Check to be sure */
2215 DISCPUSTATE cpu;
2216 RTRCPTR pOrgJumpGC;
2217
2218 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2219
2220 { /* Force pOrgJumpHC out of scope after using it */
2221 uint8_t *pOrgJumpHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2222
2223 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
2224 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.Param1.cb != 4 /* only near calls */)
2225 return VINF_SUCCESS;
2226 }
2227 return VWRN_CONTINUE_ANALYSIS;
2228 }
2229
2230 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2
2231 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2232 {
2233 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2234 return VWRN_CONTINUE_ANALYSIS;
2235 }
2236
2237 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2238 || pCpu->pCurInstr->uOpcode == OP_INT
2239 || pCpu->pCurInstr->uOpcode == OP_IRET
2240 || pCpu->pCurInstr->uOpcode == OP_RETN
2241 || pCpu->pCurInstr->uOpcode == OP_RETF
2242 )
2243 {
2244 return VINF_SUCCESS;
2245 }
2246
2247 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2)
2248 return VINF_SUCCESS;
2249
2250 return VWRN_CONTINUE_ANALYSIS;
2251}
2252
2253
2254/**
2255 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2256 *
2257 * @returns VBox status code.
2258 * @param pVM Pointer to the VM.
2259 * @param pInstrGC Guest context pointer to the initial privileged instruction
2260 * @param pCurInstrGC Guest context pointer to the current instruction
2261 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2262 * @param pCacheRec Cache record ptr
2263 *
2264 */
2265int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2266{
2267 DISCPUSTATE cpu;
2268 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2269 int rc = VWRN_CONTINUE_ANALYSIS;
2270 uint32_t cbInstr, delta;
2271 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2272 bool disret;
2273 char szOutput[256];
2274
2275 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2276
2277 /* We need this to determine branch targets (and for disassembling). */
2278 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2279
2280 while (rc == VWRN_CONTINUE_ANALYSIS)
2281 {
2282 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2283 if (pCurInstrHC == NULL)
2284 {
2285 rc = VERR_PATCHING_REFUSED;
2286 goto end;
2287 }
2288
2289 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
2290 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2291 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2292 {
2293 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2294
2295 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2296 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2297 else
2298 Log(("DIS %s", szOutput));
2299
2300 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2301 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2302 {
2303 rc = VINF_SUCCESS;
2304 goto end;
2305 }
2306 }
2307 else
2308 Log(("DIS: %s", szOutput));
2309
2310 if (disret == false)
2311 {
2312 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2313 rc = VINF_SUCCESS;
2314 goto end;
2315 }
2316
2317 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2318 if (rc != VWRN_CONTINUE_ANALYSIS) {
2319 break; //done!
2320 }
2321
2322 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2323 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2324 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2325 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2326 )
2327 {
2328 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2329 RTRCPTR pOrgTargetGC;
2330
2331 if (pTargetGC == 0)
2332 {
2333 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2334 rc = VERR_PATCHING_REFUSED;
2335 break;
2336 }
2337
2338 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2339 {
2340 //jump back to guest code
2341 rc = VINF_SUCCESS;
2342 goto end;
2343 }
2344 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2345
2346 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2347 {
2348 rc = VINF_SUCCESS;
2349 goto end;
2350 }
2351
2352 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2353 {
2354 /* New jump, let's check it. */
2355 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2356
2357 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2358 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2359 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2360
2361 if (rc != VINF_SUCCESS) {
2362 break; //done!
2363 }
2364 }
2365 if (cpu.pCurInstr->uOpcode == OP_JMP)
2366 {
2367 /* Unconditional jump; return to caller. */
2368 rc = VINF_SUCCESS;
2369 goto end;
2370 }
2371
2372 rc = VWRN_CONTINUE_ANALYSIS;
2373 }
2374 pCurInstrGC += cbInstr;
2375 }
2376end:
2377 return rc;
2378}
2379
2380/**
2381 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2382 *
2383 * @returns VBox status code.
2384 * @param pVM Pointer to the VM.
2385 * @param pInstrGC Guest context pointer to the initial privileged instruction
2386 * @param pCurInstrGC Guest context pointer to the current instruction
2387 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2388 * @param pCacheRec Cache record ptr
2389 *
2390 */
2391int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2392{
2393 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2394
2395 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2396 /* Free all disasm jump records. */
2397 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2398 return rc;
2399}
2400
2401#endif /* LOG_ENABLED */
2402
2403/**
2404 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2405 * If so, this patch is permanently disabled.
2406 *
2407 * @param pVM Pointer to the VM.
2408 * @param pInstrGC Guest context pointer to instruction
2409 * @param pConflictGC Guest context pointer to check
2410 *
2411 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2412 *
2413 */
2414VMMR3_INT_DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2415{
2416 AssertReturn(!HMIsEnabled(pVM), VERR_PATCH_NO_CONFLICT);
2417 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2418 if (pTargetPatch)
2419 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2420 return VERR_PATCH_NO_CONFLICT;
2421}
2422
2423/**
2424 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2425 *
2426 * @returns VBox status code.
2427 * @param pVM Pointer to the VM.
2428 * @param pInstrGC Guest context pointer to privileged instruction
2429 * @param pCurInstrGC Guest context pointer to the current instruction
2430 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2431 * @param pCacheRec Cache record ptr
2432 *
2433 */
2434static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2435{
2436 DISCPUSTATE cpu;
2437 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2438 int rc = VWRN_CONTINUE_ANALYSIS;
2439 uint32_t cbInstr;
2440 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2441 bool disret;
2442#ifdef LOG_ENABLED
2443 char szOutput[256];
2444#endif
2445
2446 while (rc == VWRN_CONTINUE_RECOMPILE)
2447 {
2448 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2449 if (pCurInstrHC == NULL)
2450 {
2451 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2452 goto end;
2453 }
2454#ifdef LOG_ENABLED
2455 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
2456 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2457 Log(("Recompile: %s", szOutput));
2458#else
2459 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
2460#endif
2461 if (disret == false)
2462 {
2463 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2464
2465 /* Add lookup record for patch to guest address translation */
2466 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2467 patmPatchGenIllegalInstr(pVM, pPatch);
2468 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2469 goto end;
2470 }
2471
2472 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2473 if (rc != VWRN_CONTINUE_RECOMPILE)
2474 {
2475 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2476 if ( rc == VINF_SUCCESS
2477 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2478 {
2479 DISCPUSTATE cpunext;
2480 uint32_t opsizenext;
2481 uint8_t *pNextInstrHC;
2482 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
2483
2484 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2485
2486 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2487 * Recompile the next instruction as well
2488 */
2489 pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2490 if (pNextInstrHC == NULL)
2491 {
2492 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2493 goto end;
2494 }
2495 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
2496 if (disret == false)
2497 {
2498 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2499 goto end;
2500 }
2501 switch(cpunext.pCurInstr->uOpcode)
2502 {
2503 case OP_IRET: /* inhibit cleared in generated code */
2504 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2505 case OP_HLT:
2506 break; /* recompile these */
2507
2508 default:
2509 if (cpunext.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2510 {
2511 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2512
2513 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2514 AssertRC(rc);
2515 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2516 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2517 }
2518 break;
2519 }
2520
2521 /* Note: after a cli we must continue to a proper exit point */
2522 if (cpunext.pCurInstr->uOpcode != OP_CLI)
2523 {
2524 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2525 if (RT_SUCCESS(rc))
2526 {
2527 rc = VINF_SUCCESS;
2528 goto end;
2529 }
2530 break;
2531 }
2532 else
2533 rc = VWRN_CONTINUE_RECOMPILE;
2534 }
2535 else
2536 break; /* done! */
2537 }
2538
2539 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2540
2541
2542 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2543 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2544 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2545 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2546 )
2547 {
2548 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2549 if (addr == 0)
2550 {
2551 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2552 rc = VERR_PATCHING_REFUSED;
2553 break;
2554 }
2555
2556 Log(("Jump encountered target %RRv\n", addr));
2557
2558 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2559 if (!(cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW))
2560 {
2561 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2562 /* First we need to finish this linear code stream until the next exit point. */
2563 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
2564 if (RT_FAILURE(rc))
2565 {
2566 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2567 break; //fatal error
2568 }
2569 }
2570
2571 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2572 {
2573 /* New code; let's recompile it. */
2574 Log(("patmRecompileCodeStream continue with jump\n"));
2575
2576 /*
2577 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2578 * this patch so we can continue our analysis
2579 *
2580 * We rely on CSAM to detect and resolve conflicts
2581 */
2582 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, addr);
2583 if(pTargetPatch)
2584 {
2585 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2586 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2587 }
2588
2589 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2590 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2591 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2592
2593 if(pTargetPatch)
2594 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2595
2596 if (RT_FAILURE(rc))
2597 {
2598 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2599 break; //done!
2600 }
2601 }
2602 /* Always return to caller here; we're done! */
2603 rc = VINF_SUCCESS;
2604 goto end;
2605 }
2606 else
2607 if (cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW)
2608 {
2609 rc = VINF_SUCCESS;
2610 goto end;
2611 }
2612 pCurInstrGC += cbInstr;
2613 }
2614end:
2615 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2616 return rc;
2617}
2618
2619
2620/**
2621 * Generate the jump from guest to patch code
2622 *
2623 * @returns VBox status code.
2624 * @param pVM Pointer to the VM.
2625 * @param pPatch Patch record
2626 * @param pCacheRec Guest translation lookup cache record
2627 */
2628static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2629{
2630 uint8_t temp[8];
2631 uint8_t *pPB;
2632 int rc;
2633
2634 Assert(pPatch->cbPatchJump <= sizeof(temp));
2635 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2636
2637 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2638 Assert(pPB);
2639
2640#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2641 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2642 {
2643 Assert(pPatch->pPatchJumpDestGC);
2644
2645 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2646 {
2647 // jmp [PatchCode]
2648 if (fAddFixup)
2649 {
2650 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2651 {
2652 Log(("Relocation failed for the jump in the guest code!!\n"));
2653 return VERR_PATCHING_REFUSED;
2654 }
2655 }
2656
2657 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2658 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2659 }
2660 else
2661 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2662 {
2663 // jmp [PatchCode]
2664 if (fAddFixup)
2665 {
2666 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2667 {
2668 Log(("Relocation failed for the jump in the guest code!!\n"));
2669 return VERR_PATCHING_REFUSED;
2670 }
2671 }
2672
2673 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2674 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2675 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2676 }
2677 else
2678 {
2679 Assert(0);
2680 return VERR_PATCHING_REFUSED;
2681 }
2682 }
2683 else
2684#endif
2685 {
2686 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2687
2688 // jmp [PatchCode]
2689 if (fAddFixup)
2690 {
2691 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2692 {
2693 Log(("Relocation failed for the jump in the guest code!!\n"));
2694 return VERR_PATCHING_REFUSED;
2695 }
2696 }
2697 temp[0] = 0xE9; //jmp
2698 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2699 }
2700 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2701 AssertRC(rc);
2702
2703 if (rc == VINF_SUCCESS)
2704 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2705
2706 return rc;
2707}
2708
2709/**
2710 * Remove the jump from guest to patch code
2711 *
2712 * @returns VBox status code.
2713 * @param pVM Pointer to the VM.
2714 * @param pPatch Patch record
2715 */
2716static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2717{
2718#ifdef DEBUG
2719 DISCPUSTATE cpu;
2720 char szOutput[256];
2721 uint32_t cbInstr, i = 0;
2722 bool disret;
2723
2724 while (i < pPatch->cbPrivInstr)
2725 {
2726 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2727 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2728 if (disret == false)
2729 break;
2730
2731 Log(("Org patch jump: %s", szOutput));
2732 Assert(cbInstr);
2733 i += cbInstr;
2734 }
2735#endif
2736
2737 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2738 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2739#ifdef DEBUG
2740 if (rc == VINF_SUCCESS)
2741 {
2742 i = 0;
2743 while (i < pPatch->cbPrivInstr)
2744 {
2745 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2746 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2747 if (disret == false)
2748 break;
2749
2750 Log(("Org instr: %s", szOutput));
2751 Assert(cbInstr);
2752 i += cbInstr;
2753 }
2754 }
2755#endif
2756 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2757 return rc;
2758}
2759
2760/**
2761 * Generate the call from guest to patch code
2762 *
2763 * @returns VBox status code.
2764 * @param pVM Pointer to the VM.
2765 * @param pPatch Patch record
2766 * @param pInstrHC HC address where to insert the jump
2767 * @param pCacheRec Guest translation cache record
2768 */
2769static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2770{
2771 uint8_t temp[8];
2772 uint8_t *pPB;
2773 int rc;
2774
2775 Assert(pPatch->cbPatchJump <= sizeof(temp));
2776
2777 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2778 Assert(pPB);
2779
2780 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2781
2782 // jmp [PatchCode]
2783 if (fAddFixup)
2784 {
2785 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2786 {
2787 Log(("Relocation failed for the jump in the guest code!!\n"));
2788 return VERR_PATCHING_REFUSED;
2789 }
2790 }
2791
2792 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2793 temp[0] = pPatch->aPrivInstr[0];
2794 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2795
2796 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2797 AssertRC(rc);
2798
2799 return rc;
2800}
2801
2802
2803/**
2804 * Patch cli/sti pushf/popf instruction block at specified location
2805 *
2806 * @returns VBox status code.
2807 * @param pVM Pointer to the VM.
2808 * @param pInstrGC Guest context point to privileged instruction
2809 * @param pInstrHC Host context point to privileged instruction
2810 * @param uOpcode Instruction opcode
2811 * @param uOpSize Size of starting instruction
2812 * @param pPatchRec Patch record
2813 *
2814 * @note returns failure if patching is not allowed or possible
2815 *
2816 */
2817static int patmR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2818 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2819{
2820 PPATCHINFO pPatch = &pPatchRec->patch;
2821 int rc = VERR_PATCHING_REFUSED;
2822 uint32_t orgOffsetPatchMem = ~0;
2823 RTRCPTR pInstrStart;
2824 bool fInserted;
2825 NOREF(pInstrHC); NOREF(uOpSize);
2826
2827 /* Save original offset (in case of failures later on) */
2828 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2829 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2830
2831 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2832 switch (uOpcode)
2833 {
2834 case OP_MOV:
2835 break;
2836
2837 case OP_CLI:
2838 case OP_PUSHF:
2839 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2840 /* Note: special precautions are taken when disabling and enabling such patches. */
2841 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2842 break;
2843
2844 default:
2845 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2846 {
2847 AssertMsg(0, ("patmR3PatchBlock: Invalid opcode %x\n", uOpcode));
2848 return VERR_INVALID_PARAMETER;
2849 }
2850 }
2851
2852 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2853 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2854
2855 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2856 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2857 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2858 )
2859 {
2860 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2861 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2862 rc = VERR_PATCHING_REFUSED;
2863 goto failure;
2864 }
2865
2866 pPatch->nrPatch2GuestRecs = 0;
2867 pInstrStart = pInstrGC;
2868
2869#ifdef PATM_ENABLE_CALL
2870 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2871#endif
2872
2873 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2874 pPatch->uCurPatchOffset = 0;
2875
2876 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2877 {
2878 Assert(pPatch->flags & PATMFL_INTHANDLER);
2879
2880 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2881 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2882 if (RT_FAILURE(rc))
2883 goto failure;
2884 }
2885
2886 /***************************************************************************************************************************/
2887 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2888 /***************************************************************************************************************************/
2889#ifdef VBOX_WITH_STATISTICS
2890 if (!(pPatch->flags & PATMFL_SYSENTER))
2891 {
2892 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2893 if (RT_FAILURE(rc))
2894 goto failure;
2895 }
2896#endif
2897
2898 PATMP2GLOOKUPREC cacheRec;
2899 RT_ZERO(cacheRec);
2900 cacheRec.pPatch = pPatch;
2901
2902 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2903 /* Free leftover lock if any. */
2904 if (cacheRec.Lock.pvMap)
2905 {
2906 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2907 cacheRec.Lock.pvMap = NULL;
2908 }
2909 if (rc != VINF_SUCCESS)
2910 {
2911 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2912 goto failure;
2913 }
2914
2915 /* Calculated during analysis. */
2916 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2917 {
2918 /* Most likely cause: we encountered an illegal instruction very early on. */
2919 /** @todo could turn it into an int3 callable patch. */
2920 Log(("patmR3PatchBlock: patch block too small -> refuse\n"));
2921 rc = VERR_PATCHING_REFUSED;
2922 goto failure;
2923 }
2924
2925 /* size of patch block */
2926 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2927
2928
2929 /* Update free pointer in patch memory. */
2930 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2931 /* Round to next 8 byte boundary. */
2932 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2933
2934 /*
2935 * Insert into patch to guest lookup tree
2936 */
2937 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2938 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2939 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2940 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2941 if (!fInserted)
2942 {
2943 rc = VERR_PATCHING_REFUSED;
2944 goto failure;
2945 }
2946
2947 /* Note that patmr3SetBranchTargets can install additional patches!! */
2948 rc = patmr3SetBranchTargets(pVM, pPatch);
2949 if (rc != VINF_SUCCESS)
2950 {
2951 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2952 goto failure;
2953 }
2954
2955#ifdef LOG_ENABLED
2956 Log(("Patch code ----------------------------------------------------------\n"));
2957 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2958 /* Free leftover lock if any. */
2959 if (cacheRec.Lock.pvMap)
2960 {
2961 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2962 cacheRec.Lock.pvMap = NULL;
2963 }
2964 Log(("Patch code ends -----------------------------------------------------\n"));
2965#endif
2966
2967 /* make a copy of the guest code bytes that will be overwritten */
2968 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2969
2970 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2971 AssertRC(rc);
2972
2973 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2974 {
2975 /*uint8_t bASMInt3 = 0xCC; - unused */
2976
2977 Log(("patmR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2978 /* Replace first opcode byte with 'int 3'. */
2979 rc = patmActivateInt3Patch(pVM, pPatch);
2980 if (RT_FAILURE(rc))
2981 goto failure;
2982
2983 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2984 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2985
2986 pPatch->flags &= ~PATMFL_INSTR_HINT;
2987 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2988 }
2989 else
2990 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2991 {
2992 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2993 /* now insert a jump in the guest code */
2994 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2995 AssertRC(rc);
2996 if (RT_FAILURE(rc))
2997 goto failure;
2998
2999 }
3000
3001 patmR3DbgAddPatch(pVM, pPatchRec);
3002
3003 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
3004
3005 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3006 pPatch->pTempInfo->nrIllegalInstr = 0;
3007
3008 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3009
3010 pPatch->uState = PATCH_ENABLED;
3011 return VINF_SUCCESS;
3012
3013failure:
3014 if (pPatchRec->CoreOffset.Key)
3015 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3016
3017 patmEmptyTree(pVM, &pPatch->FixupTree);
3018 pPatch->nrFixups = 0;
3019
3020 patmEmptyTree(pVM, &pPatch->JumpTree);
3021 pPatch->nrJumpRecs = 0;
3022
3023 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3024 pPatch->pTempInfo->nrIllegalInstr = 0;
3025
3026 /* Turn this cli patch into a dummy. */
3027 pPatch->uState = PATCH_REFUSED;
3028 pPatch->pPatchBlockOffset = 0;
3029
3030 // Give back the patch memory we no longer need
3031 Assert(orgOffsetPatchMem != (uint32_t)~0);
3032 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3033
3034 return rc;
3035}
3036
3037/**
3038 * Patch IDT handler
3039 *
3040 * @returns VBox status code.
3041 * @param pVM Pointer to the VM.
3042 * @param pInstrGC Guest context point to privileged instruction
3043 * @param uOpSize Size of starting instruction
3044 * @param pPatchRec Patch record
3045 * @param pCacheRec Cache record ptr
3046 *
3047 * @note returns failure if patching is not allowed or possible
3048 *
3049 */
3050static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3051{
3052 PPATCHINFO pPatch = &pPatchRec->patch;
3053 bool disret;
3054 DISCPUSTATE cpuPush, cpuJmp;
3055 uint32_t cbInstr;
3056 RTRCPTR pCurInstrGC = pInstrGC;
3057 uint8_t *pCurInstrHC, *pInstrHC;
3058 uint32_t orgOffsetPatchMem = ~0;
3059
3060 pInstrHC = pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
3061 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
3062
3063 /*
3064 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
3065 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
3066 * condition here and only patch the common entypoint once.
3067 */
3068 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
3069 Assert(disret);
3070 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH)
3071 {
3072 RTRCPTR pJmpInstrGC;
3073 int rc;
3074 pCurInstrGC += cbInstr;
3075
3076 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
3077 if ( disret
3078 && cpuJmp.pCurInstr->uOpcode == OP_JMP
3079 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
3080 )
3081 {
3082 bool fInserted;
3083 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3084 if (pJmpPatch == 0)
3085 {
3086 /* Patch it first! */
3087 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
3088 if (rc != VINF_SUCCESS)
3089 goto failure;
3090 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3091 Assert(pJmpPatch);
3092 }
3093 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3094 goto failure;
3095
3096 /* save original offset (in case of failures later on) */
3097 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3098
3099 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3100 pPatch->uCurPatchOffset = 0;
3101 pPatch->nrPatch2GuestRecs = 0;
3102
3103#ifdef VBOX_WITH_STATISTICS
3104 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3105 if (RT_FAILURE(rc))
3106 goto failure;
3107#endif
3108
3109 /* Install fake cli patch (to clear the virtual IF) */
3110 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
3111 if (RT_FAILURE(rc))
3112 goto failure;
3113
3114 /* Add lookup record for patch to guest address translation (for the push) */
3115 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
3116
3117 /* Duplicate push. */
3118 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
3119 if (RT_FAILURE(rc))
3120 goto failure;
3121
3122 /* Generate jump to common entrypoint. */
3123 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
3124 if (RT_FAILURE(rc))
3125 goto failure;
3126
3127 /* size of patch block */
3128 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3129
3130 /* Update free pointer in patch memory. */
3131 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3132 /* Round to next 8 byte boundary */
3133 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3134
3135 /* There's no jump from guest to patch code. */
3136 pPatch->cbPatchJump = 0;
3137
3138
3139#ifdef LOG_ENABLED
3140 Log(("Patch code ----------------------------------------------------------\n"));
3141 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3142 Log(("Patch code ends -----------------------------------------------------\n"));
3143#endif
3144 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3145
3146 /*
3147 * Insert into patch to guest lookup tree
3148 */
3149 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3150 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3151 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3152 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3153 patmR3DbgAddPatch(pVM, pPatchRec);
3154
3155 pPatch->uState = PATCH_ENABLED;
3156
3157 return VINF_SUCCESS;
3158 }
3159 }
3160failure:
3161 /* Give back the patch memory we no longer need */
3162 if (orgOffsetPatchMem != (uint32_t)~0)
3163 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3164
3165 return patmR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3166}
3167
3168/**
3169 * Install a trampoline to call a guest trap handler directly
3170 *
3171 * @returns VBox status code.
3172 * @param pVM Pointer to the VM.
3173 * @param pInstrGC Guest context point to privileged instruction
3174 * @param pPatchRec Patch record
3175 * @param pCacheRec Cache record ptr
3176 *
3177 */
3178static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3179{
3180 PPATCHINFO pPatch = &pPatchRec->patch;
3181 int rc = VERR_PATCHING_REFUSED;
3182 uint32_t orgOffsetPatchMem = ~0;
3183 bool fInserted;
3184
3185 // save original offset (in case of failures later on)
3186 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3187
3188 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3189 pPatch->uCurPatchOffset = 0;
3190 pPatch->nrPatch2GuestRecs = 0;
3191
3192#ifdef VBOX_WITH_STATISTICS
3193 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3194 if (RT_FAILURE(rc))
3195 goto failure;
3196#endif
3197
3198 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3199 if (RT_FAILURE(rc))
3200 goto failure;
3201
3202 /* size of patch block */
3203 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3204
3205 /* Update free pointer in patch memory. */
3206 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3207 /* Round to next 8 byte boundary */
3208 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3209
3210 /* There's no jump from guest to patch code. */
3211 pPatch->cbPatchJump = 0;
3212
3213#ifdef LOG_ENABLED
3214 Log(("Patch code ----------------------------------------------------------\n"));
3215 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3216 Log(("Patch code ends -----------------------------------------------------\n"));
3217#endif
3218 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
3219 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3220
3221 /*
3222 * Insert into patch to guest lookup tree
3223 */
3224 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3225 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3226 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3227 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3228 patmR3DbgAddPatch(pVM, pPatchRec);
3229
3230 pPatch->uState = PATCH_ENABLED;
3231 return VINF_SUCCESS;
3232
3233failure:
3234 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3235
3236 /* Turn this cli patch into a dummy. */
3237 pPatch->uState = PATCH_REFUSED;
3238 pPatch->pPatchBlockOffset = 0;
3239
3240 /* Give back the patch memory we no longer need */
3241 Assert(orgOffsetPatchMem != (uint32_t)~0);
3242 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3243
3244 return rc;
3245}
3246
3247
3248#ifdef LOG_ENABLED
3249/**
3250 * Check if the instruction is patched as a common idt handler
3251 *
3252 * @returns true or false
3253 * @param pVM Pointer to the VM.
3254 * @param pInstrGC Guest context point to the instruction
3255 *
3256 */
3257static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3258{
3259 PPATMPATCHREC pRec;
3260
3261 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3262 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3263 return true;
3264 return false;
3265}
3266#endif //DEBUG
3267
3268
3269/**
3270 * Duplicates a complete function
3271 *
3272 * @returns VBox status code.
3273 * @param pVM Pointer to the VM.
3274 * @param pInstrGC Guest context point to privileged instruction
3275 * @param pPatchRec Patch record
3276 * @param pCacheRec Cache record ptr
3277 *
3278 */
3279static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3280{
3281 PPATCHINFO pPatch = &pPatchRec->patch;
3282 int rc = VERR_PATCHING_REFUSED;
3283 uint32_t orgOffsetPatchMem = ~0;
3284 bool fInserted;
3285
3286 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3287 /* Save original offset (in case of failures later on). */
3288 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3289
3290 /* We will not go on indefinitely with call instruction handling. */
3291 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3292 {
3293 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3294 return VERR_PATCHING_REFUSED;
3295 }
3296
3297 pVM->patm.s.ulCallDepth++;
3298
3299#ifdef PATM_ENABLE_CALL
3300 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3301#endif
3302
3303 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3304
3305 pPatch->nrPatch2GuestRecs = 0;
3306 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3307 pPatch->uCurPatchOffset = 0;
3308
3309 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3310 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3311 if (RT_FAILURE(rc))
3312 goto failure;
3313
3314#ifdef VBOX_WITH_STATISTICS
3315 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3316 if (RT_FAILURE(rc))
3317 goto failure;
3318#endif
3319
3320 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3321 if (rc != VINF_SUCCESS)
3322 {
3323 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3324 goto failure;
3325 }
3326
3327 //size of patch block
3328 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3329
3330 //update free pointer in patch memory
3331 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3332 /* Round to next 8 byte boundary. */
3333 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3334
3335 pPatch->uState = PATCH_ENABLED;
3336
3337 /*
3338 * Insert into patch to guest lookup tree
3339 */
3340 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3341 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3342 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3343 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3344 if (!fInserted)
3345 {
3346 rc = VERR_PATCHING_REFUSED;
3347 goto failure;
3348 }
3349
3350 /* Note that patmr3SetBranchTargets can install additional patches!! */
3351 rc = patmr3SetBranchTargets(pVM, pPatch);
3352 if (rc != VINF_SUCCESS)
3353 {
3354 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3355 goto failure;
3356 }
3357
3358 patmR3DbgAddPatch(pVM, pPatchRec);
3359
3360#ifdef LOG_ENABLED
3361 Log(("Patch code ----------------------------------------------------------\n"));
3362 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3363 Log(("Patch code ends -----------------------------------------------------\n"));
3364#endif
3365
3366 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3367
3368 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3369 pPatch->pTempInfo->nrIllegalInstr = 0;
3370
3371 pVM->patm.s.ulCallDepth--;
3372 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3373 return VINF_SUCCESS;
3374
3375failure:
3376 if (pPatchRec->CoreOffset.Key)
3377 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3378
3379 patmEmptyTree(pVM, &pPatch->FixupTree);
3380 pPatch->nrFixups = 0;
3381
3382 patmEmptyTree(pVM, &pPatch->JumpTree);
3383 pPatch->nrJumpRecs = 0;
3384
3385 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3386 pPatch->pTempInfo->nrIllegalInstr = 0;
3387
3388 /* Turn this cli patch into a dummy. */
3389 pPatch->uState = PATCH_REFUSED;
3390 pPatch->pPatchBlockOffset = 0;
3391
3392 // Give back the patch memory we no longer need
3393 Assert(orgOffsetPatchMem != (uint32_t)~0);
3394 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3395
3396 pVM->patm.s.ulCallDepth--;
3397 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3398 return rc;
3399}
3400
3401/**
3402 * Creates trampoline code to jump inside an existing patch
3403 *
3404 * @returns VBox status code.
3405 * @param pVM Pointer to the VM.
3406 * @param pInstrGC Guest context point to privileged instruction
3407 * @param pPatchRec Patch record
3408 *
3409 */
3410static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3411{
3412 PPATCHINFO pPatch = &pPatchRec->patch;
3413 RTRCPTR pPage, pPatchTargetGC = 0;
3414 uint32_t orgOffsetPatchMem = ~0;
3415 int rc = VERR_PATCHING_REFUSED;
3416 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3417 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3418 bool fInserted = false;
3419
3420 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3421 /* Save original offset (in case of failures later on). */
3422 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3423
3424 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3425 /** @todo we already checked this before */
3426 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3427
3428 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3429 if (pPatchPage)
3430 {
3431 uint32_t i;
3432
3433 for (i=0;i<pPatchPage->cCount;i++)
3434 {
3435 if (pPatchPage->papPatch[i])
3436 {
3437 pPatchToJmp = pPatchPage->papPatch[i];
3438
3439 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3440 && pPatchToJmp->uState == PATCH_ENABLED)
3441 {
3442 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3443 if (pPatchTargetGC)
3444 {
3445 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3446 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3447 Assert(pPatchToGuestRec);
3448
3449 pPatchToGuestRec->fJumpTarget = true;
3450 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3451 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3452 break;
3453 }
3454 }
3455 }
3456 }
3457 }
3458 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3459
3460 /*
3461 * Only record the trampoline patch if this is the first patch to the target
3462 * or we recorded other patches already.
3463 * The goal is to refuse refreshing function duplicates if the guest
3464 * modifies code after a saved state was loaded because it is not possible
3465 * to save the relation between trampoline and target without changing the
3466 * saved satte version.
3467 */
3468 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3469 || pPatchToJmp->pTrampolinePatchesHead)
3470 {
3471 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3472 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3473 if (!pTrampRec)
3474 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3475
3476 pTrampRec->pPatchTrampoline = pPatchRec;
3477 }
3478
3479 pPatch->nrPatch2GuestRecs = 0;
3480 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3481 pPatch->uCurPatchOffset = 0;
3482
3483 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3484 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3485 if (RT_FAILURE(rc))
3486 goto failure;
3487
3488#ifdef VBOX_WITH_STATISTICS
3489 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3490 if (RT_FAILURE(rc))
3491 goto failure;
3492#endif
3493
3494 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3495 if (RT_FAILURE(rc))
3496 goto failure;
3497
3498 /*
3499 * Insert into patch to guest lookup tree
3500 */
3501 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3502 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3503 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3504 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3505 if (!fInserted)
3506 {
3507 rc = VERR_PATCHING_REFUSED;
3508 goto failure;
3509 }
3510 patmR3DbgAddPatch(pVM, pPatchRec);
3511
3512 /* size of patch block */
3513 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3514
3515 /* Update free pointer in patch memory. */
3516 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3517 /* Round to next 8 byte boundary */
3518 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3519
3520 /* There's no jump from guest to patch code. */
3521 pPatch->cbPatchJump = 0;
3522
3523 /* Enable the patch. */
3524 pPatch->uState = PATCH_ENABLED;
3525 /* We allow this patch to be called as a function. */
3526 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3527
3528 if (pTrampRec)
3529 {
3530 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3531 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3532 }
3533 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3534 return VINF_SUCCESS;
3535
3536failure:
3537 if (pPatchRec->CoreOffset.Key)
3538 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3539
3540 patmEmptyTree(pVM, &pPatch->FixupTree);
3541 pPatch->nrFixups = 0;
3542
3543 patmEmptyTree(pVM, &pPatch->JumpTree);
3544 pPatch->nrJumpRecs = 0;
3545
3546 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3547 pPatch->pTempInfo->nrIllegalInstr = 0;
3548
3549 /* Turn this cli patch into a dummy. */
3550 pPatch->uState = PATCH_REFUSED;
3551 pPatch->pPatchBlockOffset = 0;
3552
3553 // Give back the patch memory we no longer need
3554 Assert(orgOffsetPatchMem != (uint32_t)~0);
3555 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3556
3557 if (pTrampRec)
3558 MMR3HeapFree(pTrampRec);
3559
3560 return rc;
3561}
3562
3563
3564/**
3565 * Patch branch target function for call/jump at specified location.
3566 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3567 *
3568 * @returns VBox status code.
3569 * @param pVM Pointer to the VM.
3570 * @param pCtx Pointer to the guest CPU context.
3571 *
3572 */
3573VMMR3_INT_DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3574{
3575 RTRCPTR pBranchTarget, pPage;
3576 int rc;
3577 RTRCPTR pPatchTargetGC = 0;
3578 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
3579
3580 pBranchTarget = pCtx->edx;
3581 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3582
3583 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3584 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3585
3586 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3587 if (pPatchPage)
3588 {
3589 uint32_t i;
3590
3591 for (i=0;i<pPatchPage->cCount;i++)
3592 {
3593 if (pPatchPage->papPatch[i])
3594 {
3595 PPATCHINFO pPatch = pPatchPage->papPatch[i];
3596
3597 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3598 && pPatch->uState == PATCH_ENABLED)
3599 {
3600 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3601 if (pPatchTargetGC)
3602 {
3603 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3604 break;
3605 }
3606 }
3607 }
3608 }
3609 }
3610
3611 if (pPatchTargetGC)
3612 {
3613 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3614 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3615 }
3616 else
3617 {
3618 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3619 }
3620
3621 if (rc == VINF_SUCCESS)
3622 {
3623 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3624 Assert(pPatchTargetGC);
3625 }
3626
3627 if (pPatchTargetGC)
3628 {
3629 pCtx->eax = pPatchTargetGC;
3630 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3631 }
3632 else
3633 {
3634 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3635 pCtx->eax = 0;
3636 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3637 }
3638 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3639 rc = patmAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3640 AssertRC(rc);
3641
3642 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3643 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3644 return VINF_SUCCESS;
3645}
3646
3647/**
3648 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3649 *
3650 * @returns VBox status code.
3651 * @param pVM Pointer to the VM.
3652 * @param pCpu Disassembly CPU structure ptr
3653 * @param pInstrGC Guest context point to privileged instruction
3654 * @param pCacheRec Cache record ptr
3655 *
3656 */
3657static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3658{
3659 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3660 int rc = VERR_PATCHING_REFUSED;
3661 DISCPUSTATE cpu;
3662 RTRCPTR pTargetGC;
3663 PPATMPATCHREC pPatchFunction;
3664 uint32_t cbInstr;
3665 bool disret;
3666
3667 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3668 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
3669
3670 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
3671 {
3672 rc = VERR_PATCHING_REFUSED;
3673 goto failure;
3674 }
3675
3676 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3677 if (pTargetGC == 0)
3678 {
3679 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
3680 rc = VERR_PATCHING_REFUSED;
3681 goto failure;
3682 }
3683
3684 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3685 if (pPatchFunction == NULL)
3686 {
3687 for(;;)
3688 {
3689 /* It could be an indirect call (call -> jmp dest).
3690 * Note that it's dangerous to assume the jump will never change...
3691 */
3692 uint8_t *pTmpInstrHC;
3693
3694 pTmpInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3695 Assert(pTmpInstrHC);
3696 if (pTmpInstrHC == 0)
3697 break;
3698
3699 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
3700 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP)
3701 break;
3702
3703 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3704 if (pTargetGC == 0)
3705 {
3706 break;
3707 }
3708
3709 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3710 break;
3711 }
3712 if (pPatchFunction == 0)
3713 {
3714 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3715 rc = VERR_PATCHING_REFUSED;
3716 goto failure;
3717 }
3718 }
3719
3720 // make a copy of the guest code bytes that will be overwritten
3721 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3722
3723 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3724 AssertRC(rc);
3725
3726 /* Now replace the original call in the guest code */
3727 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3728 AssertRC(rc);
3729 if (RT_FAILURE(rc))
3730 goto failure;
3731
3732 /* Lowest and highest address for write monitoring. */
3733 pPatch->pInstrGCLowest = pInstrGC;
3734 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3735 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
3736
3737 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3738
3739 pPatch->uState = PATCH_ENABLED;
3740 return VINF_SUCCESS;
3741
3742failure:
3743 /* Turn this patch into a dummy. */
3744 pPatch->uState = PATCH_REFUSED;
3745
3746 return rc;
3747}
3748
3749/**
3750 * Replace the address in an MMIO instruction with the cached version.
3751 *
3752 * @returns VBox status code.
3753 * @param pVM Pointer to the VM.
3754 * @param pInstrGC Guest context point to privileged instruction
3755 * @param pCpu Disassembly CPU structure ptr
3756 * @param pCacheRec Cache record ptr
3757 *
3758 * @note returns failure if patching is not allowed or possible
3759 *
3760 */
3761static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3762{
3763 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3764 uint8_t *pPB;
3765 int rc = VERR_PATCHING_REFUSED;
3766
3767 Assert(pVM->patm.s.mmio.pCachedData);
3768 if (!pVM->patm.s.mmio.pCachedData)
3769 goto failure;
3770
3771 if (pCpu->Param2.fUse != DISUSE_DISPLACEMENT32)
3772 goto failure;
3773
3774 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3775 if (pPB == 0)
3776 goto failure;
3777
3778 /* Add relocation record for cached data access. */
3779 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC,
3780 pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3781 {
3782 Log(("Relocation failed for cached mmio address!!\n"));
3783 return VERR_PATCHING_REFUSED;
3784 }
3785 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
3786
3787 /* Save original instruction. */
3788 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3789 AssertRC(rc);
3790
3791 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3792
3793 /* Replace address with that of the cached item. */
3794 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR),
3795 &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3796 AssertRC(rc);
3797 if (RT_FAILURE(rc))
3798 {
3799 goto failure;
3800 }
3801
3802 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3803 pVM->patm.s.mmio.pCachedData = 0;
3804 pVM->patm.s.mmio.GCPhys = 0;
3805 pPatch->uState = PATCH_ENABLED;
3806 return VINF_SUCCESS;
3807
3808failure:
3809 /* Turn this patch into a dummy. */
3810 pPatch->uState = PATCH_REFUSED;
3811
3812 return rc;
3813}
3814
3815
3816/**
3817 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3818 *
3819 * @returns VBox status code.
3820 * @param pVM Pointer to the VM.
3821 * @param pInstrGC Guest context point to privileged instruction
3822 * @param pPatch Patch record
3823 *
3824 * @note returns failure if patching is not allowed or possible
3825 *
3826 */
3827static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3828{
3829 DISCPUSTATE cpu;
3830 uint32_t cbInstr;
3831 bool disret;
3832 uint8_t *pInstrHC;
3833
3834 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3835
3836 /* Convert GC to HC address. */
3837 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3838 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3839
3840 /* Disassemble mmio instruction. */
3841 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
3842 &cpu, &cbInstr);
3843 if (disret == false)
3844 {
3845 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3846 return VERR_PATCHING_REFUSED;
3847 }
3848
3849 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3850 if (cbInstr > MAX_INSTR_SIZE)
3851 return VERR_PATCHING_REFUSED;
3852 if (cpu.Param2.fUse != DISUSE_DISPLACEMENT32)
3853 return VERR_PATCHING_REFUSED;
3854
3855 /* Add relocation record for cached data access. */
3856 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3857 {
3858 Log(("Relocation failed for cached mmio address!!\n"));
3859 return VERR_PATCHING_REFUSED;
3860 }
3861 /* Replace address with that of the cached item. */
3862 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3863
3864 /* Lowest and highest address for write monitoring. */
3865 pPatch->pInstrGCLowest = pInstrGC;
3866 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
3867
3868 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3869 pVM->patm.s.mmio.pCachedData = 0;
3870 pVM->patm.s.mmio.GCPhys = 0;
3871 return VINF_SUCCESS;
3872}
3873
3874/**
3875 * Activates an int3 patch
3876 *
3877 * @returns VBox status code.
3878 * @param pVM Pointer to the VM.
3879 * @param pPatch Patch record
3880 */
3881static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3882{
3883 uint8_t bASMInt3 = 0xCC;
3884 int rc;
3885
3886 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3887 Assert(pPatch->uState != PATCH_ENABLED);
3888
3889 /* Replace first opcode byte with 'int 3'. */
3890 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3891 AssertRC(rc);
3892
3893 pPatch->cbPatchJump = sizeof(bASMInt3);
3894
3895 return rc;
3896}
3897
3898/**
3899 * Deactivates an int3 patch
3900 *
3901 * @returns VBox status code.
3902 * @param pVM Pointer to the VM.
3903 * @param pPatch Patch record
3904 */
3905static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3906{
3907 uint8_t ASMInt3 = 0xCC;
3908 int rc;
3909
3910 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3911 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3912
3913 /* Restore first opcode byte. */
3914 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3915 AssertRC(rc);
3916 return rc;
3917}
3918
3919/**
3920 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3921 * in the raw-mode context.
3922 *
3923 * @returns VBox status code.
3924 * @param pVM Pointer to the VM.
3925 * @param pInstrGC Guest context point to privileged instruction
3926 * @param pInstrHC Host context point to privileged instruction
3927 * @param pCpu Disassembly CPU structure ptr
3928 * @param pPatch Patch record
3929 *
3930 * @note returns failure if patching is not allowed or possible
3931 *
3932 */
3933int patmR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3934{
3935 uint8_t bASMInt3 = 0xCC;
3936 int rc;
3937
3938 /* Note: Do not use patch memory here! It might called during patch installation too. */
3939 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "patmR3PatchInstrInt3:", "");
3940
3941 /* Save the original instruction. */
3942 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3943 AssertRC(rc);
3944 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3945
3946 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3947
3948 /* Replace first opcode byte with 'int 3'. */
3949 rc = patmActivateInt3Patch(pVM, pPatch);
3950 if (RT_FAILURE(rc))
3951 goto failure;
3952
3953 /* Lowest and highest address for write monitoring. */
3954 pPatch->pInstrGCLowest = pInstrGC;
3955 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3956
3957 pPatch->uState = PATCH_ENABLED;
3958 return VINF_SUCCESS;
3959
3960failure:
3961 /* Turn this patch into a dummy. */
3962 return VERR_PATCHING_REFUSED;
3963}
3964
3965#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3966/**
3967 * Patch a jump instruction at specified location
3968 *
3969 * @returns VBox status code.
3970 * @param pVM Pointer to the VM.
3971 * @param pInstrGC Guest context point to privileged instruction
3972 * @param pInstrHC Host context point to privileged instruction
3973 * @param pCpu Disassembly CPU structure ptr
3974 * @param pPatchRec Patch record
3975 *
3976 * @note returns failure if patching is not allowed or possible
3977 *
3978 */
3979int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3980{
3981 PPATCHINFO pPatch = &pPatchRec->patch;
3982 int rc = VERR_PATCHING_REFUSED;
3983
3984 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3985 pPatch->uCurPatchOffset = 0;
3986 pPatch->cbPatchBlockSize = 0;
3987 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3988
3989 /*
3990 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3991 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3992 */
3993 switch (pCpu->pCurInstr->uOpcode)
3994 {
3995 case OP_JO:
3996 case OP_JNO:
3997 case OP_JC:
3998 case OP_JNC:
3999 case OP_JE:
4000 case OP_JNE:
4001 case OP_JBE:
4002 case OP_JNBE:
4003 case OP_JS:
4004 case OP_JNS:
4005 case OP_JP:
4006 case OP_JNP:
4007 case OP_JL:
4008 case OP_JNL:
4009 case OP_JLE:
4010 case OP_JNLE:
4011 case OP_JMP:
4012 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
4013 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL);
4014 if (!(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL))
4015 goto failure;
4016
4017 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
4018 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
4019 goto failure;
4020
4021 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
4022 {
4023 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
4024 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
4025 rc = VERR_PATCHING_REFUSED;
4026 goto failure;
4027 }
4028
4029 break;
4030
4031 default:
4032 goto failure;
4033 }
4034
4035 // make a copy of the guest code bytes that will be overwritten
4036 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
4037 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
4038 pPatch->cbPatchJump = pCpu->cbInstr;
4039
4040 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
4041 AssertRC(rc);
4042
4043 /* Now insert a jump in the guest code. */
4044 /*
4045 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
4046 * references the target instruction in the conflict patch.
4047 */
4048 RTRCPTR pJmpDest = patmR3GuestGCPtrToPatchGCPtrSimple(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
4049
4050 AssertMsg(pJmpDest, ("patmR3GuestGCPtrToPatchGCPtrSimple failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
4051 pPatch->pPatchJumpDestGC = pJmpDest;
4052
4053 PATMP2GLOOKUPREC cacheRec;
4054 RT_ZERO(cacheRec);
4055 cacheRec.pPatch = pPatch;
4056
4057 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
4058 /* Free leftover lock if any. */
4059 if (cacheRec.Lock.pvMap)
4060 {
4061 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4062 cacheRec.Lock.pvMap = NULL;
4063 }
4064 AssertRC(rc);
4065 if (RT_FAILURE(rc))
4066 goto failure;
4067
4068 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
4069
4070 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
4071 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
4072
4073 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
4074
4075 /* Lowest and highest address for write monitoring. */
4076 pPatch->pInstrGCLowest = pInstrGC;
4077 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
4078
4079 pPatch->uState = PATCH_ENABLED;
4080 return VINF_SUCCESS;
4081
4082failure:
4083 /* Turn this cli patch into a dummy. */
4084 pPatch->uState = PATCH_REFUSED;
4085
4086 return rc;
4087}
4088#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4089
4090
4091/**
4092 * Gives hint to PATM about supervisor guest instructions
4093 *
4094 * @returns VBox status code.
4095 * @param pVM Pointer to the VM.
4096 * @param pInstr Guest context point to privileged instruction
4097 * @param flags Patch flags
4098 */
4099VMMR3_INT_DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4100{
4101 Assert(pInstrGC);
4102 Assert(flags == PATMFL_CODE32);
4103
4104 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4105 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4106}
4107
4108/**
4109 * Patch privileged instruction at specified location
4110 *
4111 * @returns VBox status code.
4112 * @param pVM Pointer to the VM.
4113 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4114 * @param flags Patch flags
4115 *
4116 * @note returns failure if patching is not allowed or possible
4117 */
4118VMMR3_INT_DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4119{
4120 DISCPUSTATE cpu;
4121 R3PTRTYPE(uint8_t *) pInstrHC;
4122 uint32_t cbInstr;
4123 PPATMPATCHREC pPatchRec;
4124 PCPUMCTX pCtx = 0;
4125 bool disret;
4126 int rc;
4127 PVMCPU pVCpu = VMMGetCpu0(pVM);
4128 LogFlow(("PATMR3InstallPatch: %08x (%#llx)\n", pInstrGC, flags));
4129
4130 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4131
4132 if ( !pVM
4133 || pInstrGC == 0
4134 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4135 {
4136 AssertFailed();
4137 return VERR_INVALID_PARAMETER;
4138 }
4139
4140 if (PATMIsEnabled(pVM) == false)
4141 return VERR_PATCHING_REFUSED;
4142
4143 /* Test for patch conflict only with patches that actually change guest code. */
4144 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4145 {
4146 PPATCHINFO pConflictPatch = patmFindActivePatchByEntrypoint(pVM, pInstrGC);
4147 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4148 if (pConflictPatch != 0)
4149 return VERR_PATCHING_REFUSED;
4150 }
4151
4152 if (!(flags & PATMFL_CODE32))
4153 {
4154 /** @todo Only 32 bits code right now */
4155 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4156 return VERR_NOT_IMPLEMENTED;
4157 }
4158
4159 /* We ran out of patch memory; don't bother anymore. */
4160 if (pVM->patm.s.fOutOfMemory == true)
4161 return VERR_PATCHING_REFUSED;
4162
4163#if 1 /* DONT COMMIT ENABLED! */
4164 /* Blacklisted NT4SP1 areas - debugging why we sometimes crash early on, */
4165 if ( 0
4166 //|| (pInstrGC - 0x80010000U) < 0x10000U // NT4SP1 HAL
4167 //|| (pInstrGC - 0x80010000U) < 0x5000U // NT4SP1 HAL
4168 //|| (pInstrGC - 0x80013000U) < 0x2000U // NT4SP1 HAL
4169 //|| (pInstrGC - 0x80014000U) < 0x1000U // NT4SP1 HAL
4170 //|| (pInstrGC - 0x80014000U) < 0x800U // NT4SP1 HAL
4171 //|| (pInstrGC - 0x80014400U) < 0x400U // NT4SP1 HAL
4172 //|| (pInstrGC - 0x80014400U) < 0x200U // NT4SP1 HAL
4173 //|| (pInstrGC - 0x80014400U) < 0x100U // NT4SP1 HAL
4174 //|| (pInstrGC - 0x80014500U) < 0x100U // NT4SP1 HAL - negative
4175 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4176 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4177 //|| (pInstrGC - 0x80014440U) < 0x40U // NT4SP1 HAL
4178 //|| (pInstrGC - 0x80014440U) < 0x20U // NT4SP1 HAL
4179 || pInstrGC == 0x80014447 /* KfLowerIrql */
4180 || 0)
4181 {
4182 Log(("PATMR3InstallPatch: %08x is blacklisted\n", pInstrGC));
4183 return VERR_PATCHING_REFUSED;
4184 }
4185#endif
4186
4187 /* Make sure the code selector is wide open; otherwise refuse. */
4188 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4189 if (CPUMGetGuestCPL(pVCpu) == 0)
4190 {
4191 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4192 if (pInstrGCFlat != pInstrGC)
4193 {
4194 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs.Sel, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4195 return VERR_PATCHING_REFUSED;
4196 }
4197 }
4198
4199 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4200 if (!(flags & PATMFL_GUEST_SPECIFIC))
4201 {
4202 /* New code. Make sure CSAM has a go at it first. */
4203 CSAMR3CheckCode(pVM, pInstrGC);
4204 }
4205
4206 /* Note: obsolete */
4207 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4208 && (flags & PATMFL_MMIO_ACCESS))
4209 {
4210 RTRCUINTPTR offset;
4211 void *pvPatchCoreOffset;
4212
4213 /* Find the patch record. */
4214 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4215 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4216 if (pvPatchCoreOffset == NULL)
4217 {
4218 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4219 return VERR_PATCH_NOT_FOUND; //fatal error
4220 }
4221 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4222
4223 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4224 }
4225
4226 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4227
4228 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4229 if (pPatchRec)
4230 {
4231 Assert(!(flags & PATMFL_TRAMPOLINE));
4232
4233 /* Hints about existing patches are ignored. */
4234 if (flags & PATMFL_INSTR_HINT)
4235 return VERR_PATCHING_REFUSED;
4236
4237 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4238 {
4239 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4240 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4241 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4242 }
4243
4244 if (pPatchRec->patch.uState == PATCH_DISABLED)
4245 {
4246 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4247 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4248 {
4249 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4250 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4251 }
4252 else
4253 Log(("Enabling patch %RRv again\n", pInstrGC));
4254
4255 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4256 rc = PATMR3EnablePatch(pVM, pInstrGC);
4257 if (RT_SUCCESS(rc))
4258 return VWRN_PATCH_ENABLED;
4259
4260 return rc;
4261 }
4262 if ( pPatchRec->patch.uState == PATCH_ENABLED
4263 || pPatchRec->patch.uState == PATCH_DIRTY)
4264 {
4265 /*
4266 * The patch might have been overwritten.
4267 */
4268 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4269 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4270 {
4271 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4272 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4273 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4274 {
4275 if (flags & PATMFL_IDTHANDLER)
4276 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4277
4278 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4279 }
4280 }
4281 rc = PATMR3RemovePatch(pVM, pInstrGC);
4282 if (RT_FAILURE(rc))
4283 return VERR_PATCHING_REFUSED;
4284 }
4285 else
4286 {
4287 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4288 /* already tried it once! */
4289 return VERR_PATCHING_REFUSED;
4290 }
4291 }
4292
4293 RTGCPHYS GCPhys;
4294 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4295 if (rc != VINF_SUCCESS)
4296 {
4297 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4298 return rc;
4299 }
4300 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4301 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4302 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4303 {
4304 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4305 return VERR_PATCHING_REFUSED;
4306 }
4307
4308 /* Initialize cache record for guest address translations. */
4309 bool fInserted;
4310 PATMP2GLOOKUPREC cacheRec;
4311 RT_ZERO(cacheRec);
4312
4313 pInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4314 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4315
4316 /* Allocate patch record. */
4317 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4318 if (RT_FAILURE(rc))
4319 {
4320 Log(("Out of memory!!!!\n"));
4321 return VERR_NO_MEMORY;
4322 }
4323 pPatchRec->Core.Key = pInstrGC;
4324 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4325 /* Insert patch record into the lookup tree. */
4326 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4327 Assert(fInserted);
4328
4329 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4330 pPatchRec->patch.flags = flags;
4331 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
4332 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4333
4334 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4335 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4336
4337 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4338 {
4339 /*
4340 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4341 */
4342 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4343 if (pPatchNear)
4344 {
4345 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4346 {
4347 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4348
4349 pPatchRec->patch.uState = PATCH_UNUSABLE;
4350 /*
4351 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4352 */
4353 return VERR_PATCHING_REFUSED;
4354 }
4355 }
4356 }
4357
4358 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4359 if (pPatchRec->patch.pTempInfo == 0)
4360 {
4361 Log(("Out of memory!!!!\n"));
4362 return VERR_NO_MEMORY;
4363 }
4364
4365 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
4366 if (disret == false)
4367 {
4368 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4369 return VERR_PATCHING_REFUSED;
4370 }
4371
4372 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4373 if (cbInstr > MAX_INSTR_SIZE)
4374 return VERR_PATCHING_REFUSED;
4375
4376 pPatchRec->patch.cbPrivInstr = cbInstr;
4377 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode;
4378
4379 /* Restricted hinting for now. */
4380 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI);
4381
4382 /* Initialize cache record patch pointer. */
4383 cacheRec.pPatch = &pPatchRec->patch;
4384
4385 /* Allocate statistics slot */
4386 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4387 {
4388 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4389 }
4390 else
4391 {
4392 Log(("WARNING: Patch index wrap around!!\n"));
4393 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4394 }
4395
4396 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4397 {
4398 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4399 }
4400 else
4401 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4402 {
4403 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4404 }
4405 else
4406 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4407 {
4408 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4409 }
4410 else
4411 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4412 {
4413 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4414 }
4415 else
4416 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4417 {
4418 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4419 }
4420 else
4421 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4422 {
4423 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4424 }
4425 else
4426 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4427 {
4428 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4429 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4430
4431 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
4432#ifdef VBOX_WITH_STATISTICS
4433 if ( rc == VINF_SUCCESS
4434 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4435 {
4436 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4437 }
4438#endif
4439 }
4440 else
4441 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4442 {
4443 switch (cpu.pCurInstr->uOpcode)
4444 {
4445 case OP_SYSENTER:
4446 case OP_PUSH:
4447 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4448 if (rc == VINF_SUCCESS)
4449 {
4450 if (rc == VINF_SUCCESS)
4451 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4452 return rc;
4453 }
4454 break;
4455
4456 default:
4457 rc = VERR_NOT_IMPLEMENTED;
4458 break;
4459 }
4460 }
4461 else
4462 {
4463 switch (cpu.pCurInstr->uOpcode)
4464 {
4465 case OP_SYSENTER:
4466 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4467 if (rc == VINF_SUCCESS)
4468 {
4469 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4470 return VINF_SUCCESS;
4471 }
4472 break;
4473
4474#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4475 case OP_JO:
4476 case OP_JNO:
4477 case OP_JC:
4478 case OP_JNC:
4479 case OP_JE:
4480 case OP_JNE:
4481 case OP_JBE:
4482 case OP_JNBE:
4483 case OP_JS:
4484 case OP_JNS:
4485 case OP_JP:
4486 case OP_JNP:
4487 case OP_JL:
4488 case OP_JNL:
4489 case OP_JLE:
4490 case OP_JNLE:
4491 case OP_JECXZ:
4492 case OP_LOOP:
4493 case OP_LOOPNE:
4494 case OP_LOOPE:
4495 case OP_JMP:
4496 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4497 {
4498 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4499 break;
4500 }
4501 return VERR_NOT_IMPLEMENTED;
4502#endif
4503
4504 case OP_PUSHF:
4505 case OP_CLI:
4506 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4507 rc = patmR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
4508 break;
4509
4510#ifndef VBOX_WITH_SAFE_STR
4511 case OP_STR:
4512#endif
4513 case OP_SGDT:
4514 case OP_SLDT:
4515 case OP_SIDT:
4516 case OP_CPUID:
4517 case OP_LSL:
4518 case OP_LAR:
4519 case OP_SMSW:
4520 case OP_VERW:
4521 case OP_VERR:
4522 case OP_IRET:
4523#ifdef VBOX_WITH_RAW_RING1
4524 case OP_MOV:
4525#endif
4526 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4527 break;
4528
4529 default:
4530 return VERR_NOT_IMPLEMENTED;
4531 }
4532 }
4533
4534 if (rc != VINF_SUCCESS)
4535 {
4536 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4537 {
4538 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4539 pPatchRec->patch.nrPatch2GuestRecs = 0;
4540 }
4541 pVM->patm.s.uCurrentPatchIdx--;
4542 }
4543 else
4544 {
4545 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4546 AssertRCReturn(rc, rc);
4547
4548 /* Keep track upper and lower boundaries of patched instructions */
4549 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4550 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4551 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4552 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4553
4554 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4555 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4556
4557 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4558 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4559
4560 rc = VINF_SUCCESS;
4561
4562 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4563 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4564 {
4565 rc = PATMR3DisablePatch(pVM, pInstrGC);
4566 AssertRCReturn(rc, rc);
4567 }
4568
4569#ifdef VBOX_WITH_STATISTICS
4570 /* Register statistics counter */
4571 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4572 {
4573 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4574 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4575#ifndef DEBUG_sandervl
4576 /* Full breakdown for the GUI. */
4577 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4578 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4579 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4580 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4581 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4582 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4583 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4584 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4585 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4586 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4587 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4588 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4589 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4590 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4591 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4592 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4593#endif
4594 }
4595#endif
4596
4597 /* Add debug symbol. */
4598 patmR3DbgAddPatch(pVM, pPatchRec);
4599 }
4600 /* Free leftover lock if any. */
4601 if (cacheRec.Lock.pvMap)
4602 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4603 return rc;
4604}
4605
4606/**
4607 * Query instruction size
4608 *
4609 * @returns VBox status code.
4610 * @param pVM Pointer to the VM.
4611 * @param pPatch Patch record
4612 * @param pInstrGC Instruction address
4613 */
4614static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4615{
4616 uint8_t *pInstrHC;
4617 PGMPAGEMAPLOCK Lock;
4618
4619 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4620 if (rc == VINF_SUCCESS)
4621 {
4622 DISCPUSTATE cpu;
4623 bool disret;
4624 uint32_t cbInstr;
4625
4626 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
4627 PGMPhysReleasePageMappingLock(pVM, &Lock);
4628 if (disret)
4629 return cbInstr;
4630 }
4631 return 0;
4632}
4633
4634/**
4635 * Add patch to page record
4636 *
4637 * @returns VBox status code.
4638 * @param pVM Pointer to the VM.
4639 * @param pPage Page address
4640 * @param pPatch Patch record
4641 */
4642int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4643{
4644 PPATMPATCHPAGE pPatchPage;
4645 int rc;
4646
4647 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4648
4649 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4650 if (pPatchPage)
4651 {
4652 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4653 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4654 {
4655 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4656 PPATCHINFO *papPatchOld = pPatchPage->papPatch;
4657
4658 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4659 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH,
4660 (void **)&pPatchPage->papPatch);
4661 if (RT_FAILURE(rc))
4662 {
4663 Log(("Out of memory!!!!\n"));
4664 return VERR_NO_MEMORY;
4665 }
4666 memcpy(pPatchPage->papPatch, papPatchOld, cMaxPatchesOld * sizeof(pPatchPage->papPatch[0]));
4667 MMHyperFree(pVM, papPatchOld);
4668 }
4669 pPatchPage->papPatch[pPatchPage->cCount] = pPatch;
4670 pPatchPage->cCount++;
4671 }
4672 else
4673 {
4674 bool fInserted;
4675
4676 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4677 if (RT_FAILURE(rc))
4678 {
4679 Log(("Out of memory!!!!\n"));
4680 return VERR_NO_MEMORY;
4681 }
4682 pPatchPage->Core.Key = pPage;
4683 pPatchPage->cCount = 1;
4684 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4685
4686 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH,
4687 (void **)&pPatchPage->papPatch);
4688 if (RT_FAILURE(rc))
4689 {
4690 Log(("Out of memory!!!!\n"));
4691 MMHyperFree(pVM, pPatchPage);
4692 return VERR_NO_MEMORY;
4693 }
4694 pPatchPage->papPatch[0] = pPatch;
4695
4696 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4697 Assert(fInserted);
4698 pVM->patm.s.cPageRecords++;
4699
4700 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4701 }
4702 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4703
4704 /* Get the closest guest instruction (from below) */
4705 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4706 Assert(pGuestToPatchRec);
4707 if (pGuestToPatchRec)
4708 {
4709 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4710 if ( pPatchPage->pLowestAddrGC == 0
4711 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4712 {
4713 RTRCUINTPTR offset;
4714
4715 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4716
4717 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4718 /* If we're too close to the page boundary, then make sure an
4719 instruction from the previous page doesn't cross the
4720 boundary itself. */
4721 if (offset && offset < MAX_INSTR_SIZE)
4722 {
4723 /* Get the closest guest instruction (from above) */
4724 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4725
4726 if (pGuestToPatchRec)
4727 {
4728 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4729 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4730 {
4731 pPatchPage->pLowestAddrGC = pPage;
4732 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4733 }
4734 }
4735 }
4736 }
4737 }
4738
4739 /* Get the closest guest instruction (from above) */
4740 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4741 Assert(pGuestToPatchRec);
4742 if (pGuestToPatchRec)
4743 {
4744 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4745 if ( pPatchPage->pHighestAddrGC == 0
4746 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4747 {
4748 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4749 /* Increase by instruction size. */
4750 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4751//// Assert(size);
4752 pPatchPage->pHighestAddrGC += size;
4753 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4754 }
4755 }
4756
4757 return VINF_SUCCESS;
4758}
4759
4760/**
4761 * Remove patch from page record
4762 *
4763 * @returns VBox status code.
4764 * @param pVM Pointer to the VM.
4765 * @param pPage Page address
4766 * @param pPatch Patch record
4767 */
4768int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4769{
4770 PPATMPATCHPAGE pPatchPage;
4771 int rc;
4772
4773 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4774 Assert(pPatchPage);
4775
4776 if (!pPatchPage)
4777 return VERR_INVALID_PARAMETER;
4778
4779 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4780
4781 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4782 if (pPatchPage->cCount > 1)
4783 {
4784 uint32_t i;
4785
4786 /* Used by multiple patches */
4787 for (i = 0; i < pPatchPage->cCount; i++)
4788 {
4789 if (pPatchPage->papPatch[i] == pPatch)
4790 {
4791 /* close the gap between the remaining pointers. */
4792 uint32_t cNew = --pPatchPage->cCount;
4793 if (i < cNew)
4794 pPatchPage->papPatch[i] = pPatchPage->papPatch[cNew];
4795 pPatchPage->papPatch[cNew] = NULL;
4796 return VINF_SUCCESS;
4797 }
4798 }
4799 AssertMsgFailed(("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4800 }
4801 else
4802 {
4803 PPATMPATCHPAGE pPatchNode;
4804
4805 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4806
4807 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4808 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4809 Assert(pPatchNode && pPatchNode == pPatchPage);
4810
4811 Assert(pPatchPage->papPatch);
4812 rc = MMHyperFree(pVM, pPatchPage->papPatch);
4813 AssertRC(rc);
4814 rc = MMHyperFree(pVM, pPatchPage);
4815 AssertRC(rc);
4816 pVM->patm.s.cPageRecords--;
4817 }
4818 return VINF_SUCCESS;
4819}
4820
4821/**
4822 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4823 *
4824 * @returns VBox status code.
4825 * @param pVM Pointer to the VM.
4826 * @param pPatch Patch record
4827 */
4828int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4829{
4830 int rc;
4831 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4832
4833 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4834 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4835 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4836
4837 /** @todo optimize better (large gaps between current and next used page) */
4838 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4839 {
4840 /* Get the closest guest instruction (from above) */
4841 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4842 if ( pGuestToPatchRec
4843 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4844 )
4845 {
4846 /* Code in page really patched -> add record */
4847 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4848 AssertRC(rc);
4849 }
4850 }
4851 pPatch->flags |= PATMFL_CODE_MONITORED;
4852 return VINF_SUCCESS;
4853}
4854
4855/**
4856 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4857 *
4858 * @returns VBox status code.
4859 * @param pVM Pointer to the VM.
4860 * @param pPatch Patch record
4861 */
4862static int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4863{
4864 int rc;
4865 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4866
4867 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4868 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4869 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4870
4871 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4872 {
4873 /* Get the closest guest instruction (from above) */
4874 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4875 if ( pGuestToPatchRec
4876 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4877 )
4878 {
4879 /* Code in page really patched -> remove record */
4880 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4881 AssertRC(rc);
4882 }
4883 }
4884 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4885 return VINF_SUCCESS;
4886}
4887
4888/**
4889 * Notifies PATM about a (potential) write to code that has been patched.
4890 *
4891 * @returns VBox status code.
4892 * @param pVM Pointer to the VM.
4893 * @param GCPtr GC pointer to write address
4894 * @param cbWrite Nr of bytes to write
4895 *
4896 */
4897VMMR3_INT_DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4898{
4899 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4900
4901 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4902
4903 Assert(VM_IS_EMT(pVM));
4904 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4905
4906 /* Quick boundary check */
4907 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4908 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4909 )
4910 return VINF_SUCCESS;
4911
4912 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4913
4914 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4915 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4916
4917 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4918 {
4919loop_start:
4920 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4921 if (pPatchPage)
4922 {
4923 uint32_t i;
4924 bool fValidPatchWrite = false;
4925
4926 /* Quick check to see if the write is in the patched part of the page */
4927 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4928 || pPatchPage->pHighestAddrGC < GCPtr)
4929 {
4930 break;
4931 }
4932
4933 for (i=0;i<pPatchPage->cCount;i++)
4934 {
4935 if (pPatchPage->papPatch[i])
4936 {
4937 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4938 RTRCPTR pPatchInstrGC;
4939 //unused: bool fForceBreak = false;
4940
4941 Assert(pPatchPage->papPatch[i]->flags & PATMFL_CODE_MONITORED);
4942 /** @todo inefficient and includes redundant checks for multiple pages. */
4943 for (uint32_t j=0; j<cbWrite; j++)
4944 {
4945 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4946
4947 if ( pPatch->cbPatchJump
4948 && pGuestPtrGC >= pPatch->pPrivInstrGC
4949 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4950 {
4951 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4952 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4953 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4954 if (rc == VINF_SUCCESS)
4955 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4956 goto loop_start;
4957
4958 continue;
4959 }
4960
4961 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4962 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4963 if (!pPatchInstrGC)
4964 {
4965 RTRCPTR pClosestInstrGC;
4966 uint32_t size;
4967
4968 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4969 if (pPatchInstrGC)
4970 {
4971 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4972 Assert(pClosestInstrGC <= pGuestPtrGC);
4973 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4974 /* Check if this is not a write into a gap between two patches */
4975 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4976 pPatchInstrGC = 0;
4977 }
4978 }
4979 if (pPatchInstrGC)
4980 {
4981 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4982
4983 fValidPatchWrite = true;
4984
4985 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4986 Assert(pPatchToGuestRec);
4987 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4988 {
4989 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4990
4991 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4992 {
4993 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4994
4995 patmR3MarkDirtyPatch(pVM, pPatch);
4996
4997 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4998 goto loop_start;
4999 }
5000 else
5001 {
5002 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
5003 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
5004
5005 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
5006 pPatchToGuestRec->fDirty = true;
5007
5008 *pInstrHC = 0xCC;
5009
5010 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
5011 }
5012 }
5013 /* else already marked dirty */
5014 }
5015 }
5016 }
5017 } /* for each patch */
5018
5019 if (fValidPatchWrite == false)
5020 {
5021 /* Write to a part of the page that either:
5022 * - doesn't contain any code (shared code/data); rather unlikely
5023 * - old code page that's no longer in active use.
5024 */
5025invalid_write_loop_start:
5026 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
5027
5028 if (pPatchPage)
5029 {
5030 for (i=0;i<pPatchPage->cCount;i++)
5031 {
5032 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5033
5034 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
5035 {
5036 /* Note: possibly dangerous assumption that all future writes will be harmless. */
5037 if (pPatch->flags & PATMFL_IDTHANDLER)
5038 {
5039 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5040
5041 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
5042 int rc = patmRemovePatchPages(pVM, pPatch);
5043 AssertRC(rc);
5044 }
5045 else
5046 {
5047 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5048 patmR3MarkDirtyPatch(pVM, pPatch);
5049 }
5050 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
5051 goto invalid_write_loop_start;
5052 }
5053 } /* for */
5054 }
5055 }
5056 }
5057 }
5058 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
5059 return VINF_SUCCESS;
5060
5061}
5062
5063/**
5064 * Disable all patches in a flushed page
5065 *
5066 * @returns VBox status code
5067 * @param pVM Pointer to the VM.
5068 * @param addr GC address of the page to flush
5069 * @note Currently only called by CSAMR3FlushPage; optimization to avoid
5070 * having to double check if the physical address has changed
5071 */
5072VMMR3_INT_DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
5073{
5074 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5075
5076 addr &= PAGE_BASE_GC_MASK;
5077
5078 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
5079 if (pPatchPage)
5080 {
5081 int i;
5082
5083 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
5084 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
5085 {
5086 if (pPatchPage->papPatch[i])
5087 {
5088 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5089
5090 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
5091 patmR3MarkDirtyPatch(pVM, pPatch);
5092 }
5093 }
5094 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
5095 }
5096 return VINF_SUCCESS;
5097}
5098
5099/**
5100 * Checks if the instructions at the specified address has been patched already.
5101 *
5102 * @returns boolean, patched or not
5103 * @param pVM Pointer to the VM.
5104 * @param pInstrGC Guest context pointer to instruction
5105 */
5106VMMR3_INT_DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
5107{
5108 Assert(!HMIsEnabled(pVM));
5109 PPATMPATCHREC pPatchRec;
5110 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5111 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
5112 return true;
5113 return false;
5114}
5115
5116/**
5117 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
5118 *
5119 * @returns VBox status code.
5120 * @param pVM Pointer to the VM.
5121 * @param pInstrGC GC address of instr
5122 * @param pByte opcode byte pointer (OUT)
5123 *
5124 */
5125VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
5126{
5127 PPATMPATCHREC pPatchRec;
5128
5129 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5130
5131 /* Shortcut. */
5132 if (!PATMIsEnabled(pVM))
5133 return VERR_PATCH_NOT_FOUND;
5134 Assert(!HMIsEnabled(pVM));
5135 if ( pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5136 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5137 return VERR_PATCH_NOT_FOUND;
5138
5139 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5140 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5141 if ( pPatchRec
5142 && pPatchRec->patch.uState == PATCH_ENABLED
5143 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5144 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5145 {
5146 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5147 *pByte = pPatchRec->patch.aPrivInstr[offset];
5148
5149 if (pPatchRec->patch.cbPatchJump == 1)
5150 {
5151 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5152 }
5153 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5154 return VINF_SUCCESS;
5155 }
5156 return VERR_PATCH_NOT_FOUND;
5157}
5158
5159/**
5160 * Read instruction bytes of the original code that was overwritten by the 5
5161 * bytes patch jump.
5162 *
5163 * @returns VINF_SUCCESS or VERR_PATCH_NOT_FOUND.
5164 * @param pVM Pointer to the VM.
5165 * @param GCPtrInstr GC address of instr
5166 * @param pbDst The output buffer.
5167 * @param cbToRead The maximum number bytes to read.
5168 * @param pcbRead Where to return the acutal number of bytes read.
5169 */
5170VMMR3_INT_DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
5171{
5172 /* Shortcut. */
5173 if (!PATMIsEnabled(pVM))
5174 return VERR_PATCH_NOT_FOUND;
5175 Assert(!HMIsEnabled(pVM));
5176 if ( GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
5177 || GCPtrInstr > pVM->patm.s.pPatchedInstrGCHighest)
5178 return VERR_PATCH_NOT_FOUND;
5179
5180 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5181
5182 /*
5183 * If the patch is enabled and the pointer lies within 5 bytes of this
5184 * priv instr ptr, then we've got a hit!
5185 */
5186 RTGCPTR32 off;
5187 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree,
5188 GCPtrInstr, false /*fAbove*/);
5189 if ( pPatchRec
5190 && pPatchRec->patch.uState == PATCH_ENABLED
5191 && (off = GCPtrInstr - pPatchRec->patch.pPrivInstrGC) < pPatchRec->patch.cbPatchJump)
5192 {
5193 uint8_t const *pbSrc = &pPatchRec->patch.aPrivInstr[off];
5194 uint32_t const cbMax = pPatchRec->patch.cbPatchJump - off;
5195 if (cbToRead > cbMax)
5196 cbToRead = cbMax;
5197 switch (cbToRead)
5198 {
5199 case 5: pbDst[4] = pbSrc[4];
5200 case 4: pbDst[3] = pbSrc[3];
5201 case 3: pbDst[2] = pbSrc[2];
5202 case 2: pbDst[1] = pbSrc[1];
5203 case 1: pbDst[0] = pbSrc[0];
5204 break;
5205 default:
5206 memcpy(pbDst, pbSrc, cbToRead);
5207 }
5208 *pcbRead = cbToRead;
5209
5210 if (pPatchRec->patch.cbPatchJump == 1)
5211 Log(("PATMR3ReadOrgInstr: returning opcode %.*Rhxs for instruction at %RX32\n", cbToRead, pbSrc, GCPtrInstr));
5212 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5213 return VINF_SUCCESS;
5214 }
5215
5216 return VERR_PATCH_NOT_FOUND;
5217}
5218
5219/**
5220 * Disable patch for privileged instruction at specified location
5221 *
5222 * @returns VBox status code.
5223 * @param pVM Pointer to the VM.
5224 * @param pInstr Guest context point to privileged instruction
5225 *
5226 * @note returns failure if patching is not allowed or possible
5227 *
5228 */
5229VMMR3_INT_DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5230{
5231 PPATMPATCHREC pPatchRec;
5232 PPATCHINFO pPatch;
5233
5234 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5235 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5236 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5237 if (pPatchRec)
5238 {
5239 int rc = VINF_SUCCESS;
5240
5241 pPatch = &pPatchRec->patch;
5242
5243 /* Already disabled? */
5244 if (pPatch->uState == PATCH_DISABLED)
5245 return VINF_SUCCESS;
5246
5247 /* Clear the IDT entries for the patch we're disabling. */
5248 /* Note: very important as we clear IF in the patch itself */
5249 /** @todo this needs to be changed */
5250 if (pPatch->flags & PATMFL_IDTHANDLER)
5251 {
5252 uint32_t iGate;
5253
5254 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5255 if (iGate != (uint32_t)~0)
5256 {
5257 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5258 if (++cIDTHandlersDisabled < 256)
5259 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5260 }
5261 }
5262
5263 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5264 if ( pPatch->pPatchBlockOffset
5265 && pPatch->uState == PATCH_ENABLED)
5266 {
5267 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5268 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5269 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5270 }
5271
5272 /* IDT or function patches haven't changed any guest code. */
5273 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5274 {
5275 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5276 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5277
5278 if (pPatch->uState != PATCH_REFUSED)
5279 {
5280 uint8_t temp[16];
5281
5282 Assert(pPatch->cbPatchJump < sizeof(temp));
5283
5284 /* Let's first check if the guest code is still the same. */
5285 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5286 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5287 if (rc == VINF_SUCCESS)
5288 {
5289 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5290
5291 if ( temp[0] != 0xE9 /* jmp opcode */
5292 || *(RTRCINTPTR *)(&temp[1]) != displ
5293 )
5294 {
5295 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5296 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5297 /* Remove it completely */
5298 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5299 rc = PATMR3RemovePatch(pVM, pInstrGC);
5300 AssertRC(rc);
5301 return VWRN_PATCH_REMOVED;
5302 }
5303 patmRemoveJumpToPatch(pVM, pPatch);
5304 }
5305 else
5306 {
5307 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5308 pPatch->uState = PATCH_DISABLE_PENDING;
5309 }
5310 }
5311 else
5312 {
5313 AssertMsgFailed(("Patch was refused!\n"));
5314 return VERR_PATCH_ALREADY_DISABLED;
5315 }
5316 }
5317 else
5318 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5319 {
5320 uint8_t temp[16];
5321
5322 Assert(pPatch->cbPatchJump < sizeof(temp));
5323
5324 /* Let's first check if the guest code is still the same. */
5325 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5326 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5327 if (rc == VINF_SUCCESS)
5328 {
5329 if (temp[0] != 0xCC)
5330 {
5331 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5332 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5333 /* Remove it completely */
5334 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5335 rc = PATMR3RemovePatch(pVM, pInstrGC);
5336 AssertRC(rc);
5337 return VWRN_PATCH_REMOVED;
5338 }
5339 patmDeactivateInt3Patch(pVM, pPatch);
5340 }
5341 }
5342
5343 if (rc == VINF_SUCCESS)
5344 {
5345 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5346 if (pPatch->uState == PATCH_DISABLE_PENDING)
5347 {
5348 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5349 pPatch->uState = PATCH_UNUSABLE;
5350 }
5351 else
5352 if (pPatch->uState != PATCH_DIRTY)
5353 {
5354 pPatch->uOldState = pPatch->uState;
5355 pPatch->uState = PATCH_DISABLED;
5356 }
5357 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5358 }
5359
5360 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5361 return VINF_SUCCESS;
5362 }
5363 Log(("Patch not found!\n"));
5364 return VERR_PATCH_NOT_FOUND;
5365}
5366
5367/**
5368 * Permanently disable patch for privileged instruction at specified location
5369 *
5370 * @returns VBox status code.
5371 * @param pVM Pointer to the VM.
5372 * @param pInstr Guest context instruction pointer
5373 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5374 * @param pConflictPatch Conflicting patch
5375 *
5376 */
5377static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5378{
5379 NOREF(pConflictAddr);
5380#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5381 PATCHINFO patch;
5382 DISCPUSTATE cpu;
5383 R3PTRTYPE(uint8_t *) pInstrHC;
5384 uint32_t cbInstr;
5385 bool disret;
5386 int rc;
5387
5388 RT_ZERO(patch);
5389 pInstrHC = patmR3GCVirtToHCVirt(pVM, &patch, pInstrGC);
5390 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
5391 /*
5392 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5393 * with one that jumps right into the conflict patch.
5394 * Otherwise we must disable the conflicting patch to avoid serious problems.
5395 */
5396 if ( disret == true
5397 && (pConflictPatch->flags & PATMFL_CODE32)
5398 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW))
5399 && (cpu.Param1.fUse & DISUSE_IMMEDIATE32_REL))
5400 {
5401 /* Hint patches must be enabled first. */
5402 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5403 {
5404 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5405 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5406 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5407 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5408 /* Enabling might fail if the patched code has changed in the meantime. */
5409 if (rc != VINF_SUCCESS)
5410 return rc;
5411 }
5412
5413 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5414 if (RT_SUCCESS(rc))
5415 {
5416 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5417 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5418 return VINF_SUCCESS;
5419 }
5420 }
5421#endif
5422
5423 if (pConflictPatch->opcode == OP_CLI)
5424 {
5425 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5426 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5427 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5428 if (rc == VWRN_PATCH_REMOVED)
5429 return VINF_SUCCESS;
5430 if (RT_SUCCESS(rc))
5431 {
5432 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5433 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5434 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5435 if (rc == VERR_PATCH_NOT_FOUND)
5436 return VINF_SUCCESS; /* removed already */
5437
5438 AssertRC(rc);
5439 if (RT_SUCCESS(rc))
5440 {
5441 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5442 return VINF_SUCCESS;
5443 }
5444 }
5445 /* else turned into unusable patch (see below) */
5446 }
5447 else
5448 {
5449 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5450 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5451 if (rc == VWRN_PATCH_REMOVED)
5452 return VINF_SUCCESS;
5453 }
5454
5455 /* No need to monitor the code anymore. */
5456 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5457 {
5458 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5459 AssertRC(rc);
5460 }
5461 pConflictPatch->uState = PATCH_UNUSABLE;
5462 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5463 return VERR_PATCH_DISABLED;
5464}
5465
5466/**
5467 * Enable patch for privileged instruction at specified location
5468 *
5469 * @returns VBox status code.
5470 * @param pVM Pointer to the VM.
5471 * @param pInstr Guest context point to privileged instruction
5472 *
5473 * @note returns failure if patching is not allowed or possible
5474 *
5475 */
5476VMMR3_INT_DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5477{
5478 PPATMPATCHREC pPatchRec;
5479 PPATCHINFO pPatch;
5480
5481 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5482 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5483 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5484 if (pPatchRec)
5485 {
5486 int rc = VINF_SUCCESS;
5487
5488 pPatch = &pPatchRec->patch;
5489
5490 if (pPatch->uState == PATCH_DISABLED)
5491 {
5492 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5493 {
5494 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5495 uint8_t temp[16];
5496
5497 Assert(pPatch->cbPatchJump < sizeof(temp));
5498
5499 /* Let's first check if the guest code is still the same. */
5500 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5501 AssertRC(rc2);
5502 if (rc2 == VINF_SUCCESS)
5503 {
5504 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5505 {
5506 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5507 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5508 /* Remove it completely */
5509 rc = PATMR3RemovePatch(pVM, pInstrGC);
5510 AssertRC(rc);
5511 return VERR_PATCH_NOT_FOUND;
5512 }
5513
5514 PATMP2GLOOKUPREC cacheRec;
5515 RT_ZERO(cacheRec);
5516 cacheRec.pPatch = pPatch;
5517
5518 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5519 /* Free leftover lock if any. */
5520 if (cacheRec.Lock.pvMap)
5521 {
5522 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5523 cacheRec.Lock.pvMap = NULL;
5524 }
5525 AssertRC(rc2);
5526 if (RT_FAILURE(rc2))
5527 return rc2;
5528
5529#ifdef DEBUG
5530 {
5531 DISCPUSTATE cpu;
5532 char szOutput[256];
5533 uint32_t cbInstr;
5534 uint32_t i = 0;
5535 bool disret;
5536 while(i < pPatch->cbPatchJump)
5537 {
5538 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
5539 &cpu, &cbInstr, szOutput, sizeof(szOutput));
5540 Log(("Renewed patch instr: %s", szOutput));
5541 i += cbInstr;
5542 }
5543 }
5544#endif
5545 }
5546 }
5547 else
5548 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5549 {
5550 uint8_t temp[16];
5551
5552 Assert(pPatch->cbPatchJump < sizeof(temp));
5553
5554 /* Let's first check if the guest code is still the same. */
5555 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5556 AssertRC(rc2);
5557
5558 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5559 {
5560 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5561 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5562 rc = PATMR3RemovePatch(pVM, pInstrGC);
5563 AssertRC(rc);
5564 return VERR_PATCH_NOT_FOUND;
5565 }
5566
5567 rc2 = patmActivateInt3Patch(pVM, pPatch);
5568 if (RT_FAILURE(rc2))
5569 return rc2;
5570 }
5571
5572 pPatch->uState = pPatch->uOldState; //restore state
5573
5574 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5575 if (pPatch->pPatchBlockOffset)
5576 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5577
5578 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5579 }
5580 else
5581 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5582
5583 return rc;
5584 }
5585 return VERR_PATCH_NOT_FOUND;
5586}
5587
5588/**
5589 * Remove patch for privileged instruction at specified location
5590 *
5591 * @returns VBox status code.
5592 * @param pVM Pointer to the VM.
5593 * @param pPatchRec Patch record
5594 * @param fForceRemove Remove *all* patches
5595 */
5596int patmR3RemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5597{
5598 PPATCHINFO pPatch;
5599
5600 pPatch = &pPatchRec->patch;
5601
5602 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5603 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5604 {
5605 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5606 return VERR_ACCESS_DENIED;
5607 }
5608 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5609
5610 /* Note: NEVER EVER REUSE PATCH MEMORY */
5611 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5612
5613 if (pPatchRec->patch.pPatchBlockOffset)
5614 {
5615 PAVLOU32NODECORE pNode;
5616
5617 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5618 Assert(pNode);
5619 }
5620
5621 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5622 {
5623 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5624 AssertRC(rc);
5625 }
5626
5627#ifdef VBOX_WITH_STATISTICS
5628 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5629 {
5630 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
5631 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
5632 }
5633#endif
5634
5635 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5636 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5637 pPatch->nrPatch2GuestRecs = 0;
5638 Assert(pPatch->Patch2GuestAddrTree == 0);
5639
5640 patmEmptyTree(pVM, &pPatch->FixupTree);
5641 pPatch->nrFixups = 0;
5642 Assert(pPatch->FixupTree == 0);
5643
5644 if (pPatchRec->patch.pTempInfo)
5645 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5646
5647 /* Note: might fail, because it has already been removed (e.g. during reset). */
5648 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5649
5650 /* Free the patch record */
5651 MMHyperFree(pVM, pPatchRec);
5652 return VINF_SUCCESS;
5653}
5654
5655/**
5656 * RTAvlU32DoWithAll() worker.
5657 * Checks whether the current trampoline instruction is the jump to the target patch
5658 * and updates the displacement to jump to the new target.
5659 *
5660 * @returns VBox status code.
5661 * @retval VERR_ALREADY_EXISTS if the jump was found.
5662 * @param pNode The current patch to guest record to check.
5663 * @param pvUser The refresh state.
5664 */
5665static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5666{
5667 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5668 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5669 PVM pVM = pRefreshPatchState->pVM;
5670
5671 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5672
5673 /*
5674 * Check if the patch instruction starts with a jump.
5675 * ASSUMES that there is no other patch to guest record that starts
5676 * with a jump.
5677 */
5678 if (*pPatchInstr == 0xE9)
5679 {
5680 /* Jump found, update the displacement. */
5681 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5682 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5683 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5684
5685 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5686 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5687
5688 *(uint32_t *)&pPatchInstr[1] = displ;
5689 return VERR_ALREADY_EXISTS; /** @todo better return code */
5690 }
5691
5692 return VINF_SUCCESS;
5693}
5694
5695/**
5696 * Attempt to refresh the patch by recompiling its entire code block
5697 *
5698 * @returns VBox status code.
5699 * @param pVM Pointer to the VM.
5700 * @param pPatchRec Patch record
5701 */
5702int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5703{
5704 PPATCHINFO pPatch;
5705 int rc;
5706 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5707 PTRAMPREC pTrampolinePatchesHead = NULL;
5708
5709 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5710
5711 pPatch = &pPatchRec->patch;
5712 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5713 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5714 {
5715 if (!pPatch->pTrampolinePatchesHead)
5716 {
5717 /*
5718 * It is sometimes possible that there are trampoline patches to this patch
5719 * but they are not recorded (after a saved state load for example).
5720 * Refuse to refresh those patches.
5721 * Can hurt performance in theory if the patched code is modified by the guest
5722 * and is executed often. However most of the time states are saved after the guest
5723 * code was modified and is not updated anymore afterwards so this shouldn't be a
5724 * big problem.
5725 */
5726 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5727 return VERR_PATCHING_REFUSED;
5728 }
5729 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5730 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5731 }
5732
5733 /* Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5734
5735 rc = PATMR3DisablePatch(pVM, pInstrGC);
5736 AssertRC(rc);
5737
5738 /* Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5739 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5740#ifdef VBOX_WITH_STATISTICS
5741 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5742 {
5743 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
5744 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
5745 }
5746#endif
5747
5748 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5749
5750 /* Attempt to install a new patch. */
5751 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5752 if (RT_SUCCESS(rc))
5753 {
5754 RTRCPTR pPatchTargetGC;
5755 PPATMPATCHREC pNewPatchRec;
5756
5757 /* Determine target address in new patch */
5758 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5759 Assert(pPatchTargetGC);
5760 if (!pPatchTargetGC)
5761 {
5762 rc = VERR_PATCHING_REFUSED;
5763 goto failure;
5764 }
5765
5766 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5767 pPatch->uCurPatchOffset = 0;
5768
5769 /* insert jump to new patch in old patch block */
5770 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5771 if (RT_FAILURE(rc))
5772 goto failure;
5773
5774 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5775 Assert(pNewPatchRec); /* can't fail */
5776
5777 /* Remove old patch (only do that when everything is finished) */
5778 int rc2 = patmR3RemovePatch(pVM, pPatchRec, true /* force removal */);
5779 AssertRC(rc2);
5780
5781 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5782 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5783 Assert(fInserted); NOREF(fInserted);
5784
5785 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5786 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5787
5788 /* Used by another patch, so don't remove it! */
5789 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5790
5791 if (pTrampolinePatchesHead)
5792 {
5793 /* Update all trampoline patches to jump to the new patch. */
5794 PTRAMPREC pTrampRec = NULL;
5795 PATMREFRESHPATCH RefreshPatch;
5796
5797 RefreshPatch.pVM = pVM;
5798 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5799
5800 pTrampRec = pTrampolinePatchesHead;
5801
5802 while (pTrampRec)
5803 {
5804 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5805
5806 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5807 /*
5808 * We have to find the right patch2guest record because there might be others
5809 * for statistics.
5810 */
5811 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5812 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5813 Assert(rc == VERR_ALREADY_EXISTS);
5814 rc = VINF_SUCCESS;
5815 pTrampRec = pTrampRec->pNext;
5816 }
5817 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5818 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5819 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5820 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5821 }
5822 }
5823
5824failure:
5825 if (RT_FAILURE(rc))
5826 {
5827 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5828
5829 /* Remove the new inactive patch */
5830 rc = PATMR3RemovePatch(pVM, pInstrGC);
5831 AssertRC(rc);
5832
5833 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5834 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5835 Assert(fInserted); NOREF(fInserted);
5836
5837 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5838 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5839 AssertRC(rc2);
5840
5841 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5842 }
5843 return rc;
5844}
5845
5846/**
5847 * Find patch for privileged instruction at specified location
5848 *
5849 * @returns Patch structure pointer if found; else NULL
5850 * @param pVM Pointer to the VM.
5851 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5852 * @param fIncludeHints Include hinted patches or not
5853 *
5854 */
5855PPATCHINFO patmFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5856{
5857 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5858 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5859 if (pPatchRec)
5860 {
5861 if ( pPatchRec->patch.uState == PATCH_ENABLED
5862 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5863 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5864 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5865 {
5866 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5867 return &pPatchRec->patch;
5868 }
5869 else
5870 if ( fIncludeHints
5871 && pPatchRec->patch.uState == PATCH_DISABLED
5872 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5873 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5874 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5875 {
5876 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5877 return &pPatchRec->patch;
5878 }
5879 }
5880 return NULL;
5881}
5882
5883/**
5884 * Checks whether the GC address is inside a generated patch jump
5885 *
5886 * @returns true -> yes, false -> no
5887 * @param pVM Pointer to the VM.
5888 * @param pAddr Guest context address.
5889 * @param pPatchAddr Guest context patch address (if true).
5890 */
5891VMMR3_INT_DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5892{
5893 RTRCPTR addr;
5894 PPATCHINFO pPatch;
5895
5896 Assert(!HMIsEnabled(pVM));
5897 if (PATMIsEnabled(pVM) == false)
5898 return false;
5899
5900 if (pPatchAddr == NULL)
5901 pPatchAddr = &addr;
5902
5903 *pPatchAddr = 0;
5904
5905 pPatch = patmFindActivePatchByEntrypoint(pVM, pAddr);
5906 if (pPatch)
5907 *pPatchAddr = pPatch->pPrivInstrGC;
5908
5909 return *pPatchAddr == 0 ? false : true;
5910}
5911
5912/**
5913 * Remove patch for privileged instruction at specified location
5914 *
5915 * @returns VBox status code.
5916 * @param pVM Pointer to the VM.
5917 * @param pInstr Guest context point to privileged instruction
5918 *
5919 * @note returns failure if patching is not allowed or possible
5920 *
5921 */
5922VMMR3_INT_DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5923{
5924 PPATMPATCHREC pPatchRec;
5925
5926 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5927 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5928 if (pPatchRec)
5929 {
5930 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5931 if (rc == VWRN_PATCH_REMOVED)
5932 return VINF_SUCCESS;
5933
5934 return patmR3RemovePatch(pVM, pPatchRec, false);
5935 }
5936 AssertFailed();
5937 return VERR_PATCH_NOT_FOUND;
5938}
5939
5940/**
5941 * Mark patch as dirty
5942 *
5943 * @returns VBox status code.
5944 * @param pVM Pointer to the VM.
5945 * @param pPatch Patch record
5946 *
5947 * @note returns failure if patching is not allowed or possible
5948 *
5949 */
5950static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5951{
5952 if (pPatch->pPatchBlockOffset)
5953 {
5954 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5955 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5956 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5957 }
5958
5959 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5960 /* Put back the replaced instruction. */
5961 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5962 if (rc == VWRN_PATCH_REMOVED)
5963 return VINF_SUCCESS;
5964
5965 /* Note: we don't restore patch pages for patches that are not enabled! */
5966 /* Note: be careful when changing this behaviour!! */
5967
5968 /* The patch pages are no longer marked for self-modifying code detection */
5969 if (pPatch->flags & PATMFL_CODE_MONITORED)
5970 {
5971 rc = patmRemovePatchPages(pVM, pPatch);
5972 AssertRCReturn(rc, rc);
5973 }
5974 pPatch->uState = PATCH_DIRTY;
5975
5976 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5977 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5978
5979 return VINF_SUCCESS;
5980}
5981
5982/**
5983 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5984 *
5985 * @returns VBox status code.
5986 * @param pVM Pointer to the VM.
5987 * @param pPatch Patch block structure pointer
5988 * @param pPatchGC GC address in patch block
5989 */
5990RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5991{
5992 Assert(pPatch->Patch2GuestAddrTree);
5993 /* Get the closest record from below. */
5994 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5995 if (pPatchToGuestRec)
5996 return pPatchToGuestRec->pOrgInstrGC;
5997
5998 return 0;
5999}
6000
6001/**
6002 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6003 *
6004 * @returns corresponding GC pointer in patch block
6005 * @param pVM Pointer to the VM.
6006 * @param pPatch Current patch block pointer
6007 * @param pInstrGC Guest context pointer to privileged instruction
6008 *
6009 */
6010RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6011{
6012 if (pPatch->Guest2PatchAddrTree)
6013 {
6014 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
6015 if (pGuestToPatchRec)
6016 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6017 }
6018
6019 return 0;
6020}
6021
6022/**
6023 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6024 *
6025 * @returns corresponding GC pointer in patch block
6026 * @param pVM Pointer to the VM.
6027 * @param pInstrGC Guest context pointer to privileged instruction
6028 */
6029static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
6030{
6031 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
6032 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
6033 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
6034 return NIL_RTRCPTR;
6035}
6036
6037/**
6038 * Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no
6039 * identical match)
6040 *
6041 * @returns corresponding GC pointer in patch block
6042 * @param pVM Pointer to the VM.
6043 * @param pPatch Current patch block pointer
6044 * @param pInstrGC Guest context pointer to privileged instruction
6045 *
6046 */
6047RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6048{
6049 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
6050 if (pGuestToPatchRec)
6051 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6052 return NIL_RTRCPTR;
6053}
6054
6055/**
6056 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6057 *
6058 * @returns original GC instruction pointer or 0 if not found
6059 * @param pVM Pointer to the VM.
6060 * @param pPatchGC GC address in patch block
6061 * @param pEnmState State of the translated address (out)
6062 *
6063 */
6064VMMR3_INT_DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
6065{
6066 PPATMPATCHREC pPatchRec;
6067 void *pvPatchCoreOffset;
6068 RTRCPTR pPrivInstrGC;
6069
6070 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
6071 Assert(!HMIsEnabled(pVM));
6072 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6073 if (pvPatchCoreOffset == 0)
6074 {
6075 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
6076 return 0;
6077 }
6078 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6079 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
6080 if (pEnmState)
6081 {
6082 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
6083 || pPatchRec->patch.uState == PATCH_DIRTY
6084 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
6085 || pPatchRec->patch.uState == PATCH_UNUSABLE),
6086 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
6087
6088 if ( !pPrivInstrGC
6089 || pPatchRec->patch.uState == PATCH_UNUSABLE
6090 || pPatchRec->patch.uState == PATCH_REFUSED)
6091 {
6092 pPrivInstrGC = 0;
6093 *pEnmState = PATMTRANS_FAILED;
6094 }
6095 else
6096 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
6097 {
6098 *pEnmState = PATMTRANS_INHIBITIRQ;
6099 }
6100 else
6101 if ( pPatchRec->patch.uState == PATCH_ENABLED
6102 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
6103 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
6104 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
6105 {
6106 *pEnmState = PATMTRANS_OVERWRITTEN;
6107 }
6108 else
6109 if (patmFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
6110 {
6111 *pEnmState = PATMTRANS_OVERWRITTEN;
6112 }
6113 else
6114 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
6115 {
6116 *pEnmState = PATMTRANS_PATCHSTART;
6117 }
6118 else
6119 *pEnmState = PATMTRANS_SAFE;
6120 }
6121 return pPrivInstrGC;
6122}
6123
6124/**
6125 * Returns the GC pointer of the patch for the specified GC address
6126 *
6127 * @returns VBox status code.
6128 * @param pVM Pointer to the VM.
6129 * @param pAddrGC Guest context address
6130 */
6131VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
6132{
6133 PPATMPATCHREC pPatchRec;
6134
6135 Assert(!HMIsEnabled(pVM));
6136
6137 /* Find the patch record. */
6138 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
6139 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
6140 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
6141 return PATCHCODE_PTR_GC(&pPatchRec->patch);
6142 return NIL_RTRCPTR;
6143}
6144
6145/**
6146 * Attempt to recover dirty instructions
6147 *
6148 * @returns VBox status code.
6149 * @param pVM Pointer to the VM.
6150 * @param pCtx Pointer to the guest CPU context.
6151 * @param pPatch Patch record.
6152 * @param pPatchToGuestRec Patch to guest address record.
6153 * @param pEip GC pointer of trapping instruction.
6154 */
6155static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
6156{
6157 DISCPUSTATE CpuOld, CpuNew;
6158 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
6159 int rc;
6160 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
6161 uint32_t cbDirty;
6162 PRECPATCHTOGUEST pRec;
6163 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6164 PVMCPU pVCpu = VMMGetCpu0(pVM);
6165 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6166
6167 pRec = pPatchToGuestRec;
6168 pCurInstrGC = pOrgInstrGC;
6169 pCurPatchInstrGC = pEip;
6170 cbDirty = 0;
6171 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6172
6173 /* Find all adjacent dirty instructions */
6174 while (true)
6175 {
6176 if (pRec->fJumpTarget)
6177 {
6178 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6179 pRec->fDirty = false;
6180 return VERR_PATCHING_REFUSED;
6181 }
6182
6183 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6184 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6185 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6186
6187 /* Only harmless instructions are acceptable. */
6188 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6189 if ( RT_FAILURE(rc)
6190 || !(CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS))
6191 {
6192 if (RT_SUCCESS(rc))
6193 cbDirty += CpuOld.cbInstr;
6194 else
6195 if (!cbDirty)
6196 cbDirty = 1;
6197 break;
6198 }
6199
6200#ifdef DEBUG
6201 char szBuf[256];
6202 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6203 szBuf, sizeof(szBuf), NULL);
6204 Log(("DIRTY: %s\n", szBuf));
6205#endif
6206 /* Mark as clean; if we fail we'll let it always fault. */
6207 pRec->fDirty = false;
6208
6209 /* Remove old lookup record. */
6210 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6211 pPatchToGuestRec = NULL;
6212
6213 pCurPatchInstrGC += CpuOld.cbInstr;
6214 cbDirty += CpuOld.cbInstr;
6215
6216 /* Let's see if there's another dirty instruction right after. */
6217 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6218 if (!pRec || !pRec->fDirty)
6219 break; /* no more dirty instructions */
6220
6221 /* In case of complex instructions the next guest instruction could be quite far off. */
6222 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6223 }
6224
6225 if ( RT_SUCCESS(rc)
6226 && (CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS)
6227 )
6228 {
6229 uint32_t cbLeft;
6230
6231 pCurPatchInstrHC = pPatchInstrHC;
6232 pCurPatchInstrGC = pEip;
6233 cbLeft = cbDirty;
6234
6235 while (cbLeft && RT_SUCCESS(rc))
6236 {
6237 bool fValidInstr;
6238
6239 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6240
6241 fValidInstr = !!(CpuNew.pCurInstr->fOpType & DISOPTYPE_HARMLESS);
6242 if ( !fValidInstr
6243 && (CpuNew.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
6244 )
6245 {
6246 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6247
6248 if ( pTargetGC >= pOrgInstrGC
6249 && pTargetGC <= pOrgInstrGC + cbDirty
6250 )
6251 {
6252 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6253 fValidInstr = true;
6254 }
6255 }
6256
6257 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6258 if ( rc == VINF_SUCCESS
6259 && CpuNew.cbInstr <= cbLeft /* must still fit */
6260 && fValidInstr
6261 )
6262 {
6263#ifdef DEBUG
6264 char szBuf[256];
6265 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6266 szBuf, sizeof(szBuf), NULL);
6267 Log(("NEW: %s\n", szBuf));
6268#endif
6269
6270 /* Copy the new instruction. */
6271 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
6272 AssertRC(rc);
6273
6274 /* Add a new lookup record for the duplicated instruction. */
6275 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6276 }
6277 else
6278 {
6279#ifdef DEBUG
6280 char szBuf[256];
6281 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6282 szBuf, sizeof(szBuf), NULL);
6283 Log(("NEW: %s (FAILED)\n", szBuf));
6284#endif
6285 /* Restore the old lookup record for the duplicated instruction. */
6286 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6287
6288 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6289 rc = VERR_PATCHING_REFUSED;
6290 break;
6291 }
6292 pCurInstrGC += CpuNew.cbInstr;
6293 pCurPatchInstrHC += CpuNew.cbInstr;
6294 pCurPatchInstrGC += CpuNew.cbInstr;
6295 cbLeft -= CpuNew.cbInstr;
6296
6297 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6298 if (!cbLeft)
6299 {
6300 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6301 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6302 {
6303 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6304 if (pRec)
6305 {
6306 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6307 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6308
6309 Assert(!pRec->fDirty);
6310
6311 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6312 if (cbFiller >= SIZEOF_NEARJUMP32)
6313 {
6314 pPatchFillHC[0] = 0xE9;
6315 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6316#ifdef DEBUG
6317 char szBuf[256];
6318 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC,
6319 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6320 Log(("FILL: %s\n", szBuf));
6321#endif
6322 }
6323 else
6324 {
6325 for (unsigned i = 0; i < cbFiller; i++)
6326 {
6327 pPatchFillHC[i] = 0x90; /* NOP */
6328#ifdef DEBUG
6329 char szBuf[256];
6330 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
6331 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6332 Log(("FILL: %s\n", szBuf));
6333#endif
6334 }
6335 }
6336 }
6337 }
6338 }
6339 }
6340 }
6341 else
6342 rc = VERR_PATCHING_REFUSED;
6343
6344 if (RT_SUCCESS(rc))
6345 {
6346 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6347 }
6348 else
6349 {
6350 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6351 Assert(cbDirty);
6352
6353 /* Mark the whole instruction stream with breakpoints. */
6354 if (cbDirty)
6355 memset(pPatchInstrHC, 0xCC, cbDirty);
6356
6357 if ( pVM->patm.s.fOutOfMemory == false
6358 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6359 {
6360 rc = patmR3RefreshPatch(pVM, pPatch);
6361 if (RT_FAILURE(rc))
6362 {
6363 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6364 }
6365 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6366 rc = VERR_PATCHING_REFUSED;
6367 }
6368 }
6369 return rc;
6370}
6371
6372/**
6373 * Handle trap inside patch code
6374 *
6375 * @returns VBox status code.
6376 * @param pVM Pointer to the VM.
6377 * @param pCtx Pointer to the guest CPU context.
6378 * @param pEip GC pointer of trapping instruction.
6379 * @param ppNewEip GC pointer to new instruction.
6380 */
6381VMMR3_INT_DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6382{
6383 PPATMPATCHREC pPatch = 0;
6384 void *pvPatchCoreOffset;
6385 RTRCUINTPTR offset;
6386 RTRCPTR pNewEip;
6387 int rc ;
6388 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6389 PVMCPU pVCpu = VMMGetCpu0(pVM);
6390
6391 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6392 Assert(pVM->cCpus == 1);
6393
6394 pNewEip = 0;
6395 *ppNewEip = 0;
6396
6397 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6398
6399 /* Find the patch record. */
6400 /* Note: there might not be a patch to guest translation record (global function) */
6401 offset = pEip - pVM->patm.s.pPatchMemGC;
6402 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6403 if (pvPatchCoreOffset)
6404 {
6405 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6406
6407 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6408
6409 if (pPatch->patch.uState == PATCH_DIRTY)
6410 {
6411 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6412 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6413 {
6414 /* Function duplication patches set fPIF to 1 on entry */
6415 pVM->patm.s.pGCStateHC->fPIF = 1;
6416 }
6417 }
6418 else
6419 if (pPatch->patch.uState == PATCH_DISABLED)
6420 {
6421 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6422 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6423 {
6424 /* Function duplication patches set fPIF to 1 on entry */
6425 pVM->patm.s.pGCStateHC->fPIF = 1;
6426 }
6427 }
6428 else
6429 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6430 {
6431 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6432
6433 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6434 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6435 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6436 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6437 }
6438
6439 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6440 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6441
6442 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6443 pPatch->patch.cTraps++;
6444 PATM_STAT_FAULT_INC(&pPatch->patch);
6445 }
6446 else
6447 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6448
6449 /* Check if we were interrupted in PATM generated instruction code. */
6450 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6451 {
6452 DISCPUSTATE Cpu;
6453 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6454 AssertRC(rc);
6455
6456 if ( rc == VINF_SUCCESS
6457 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF
6458 || Cpu.pCurInstr->uOpcode == OP_PUSH
6459 || Cpu.pCurInstr->uOpcode == OP_CALL)
6460 )
6461 {
6462 uint64_t fFlags;
6463
6464 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6465
6466 if (Cpu.pCurInstr->uOpcode == OP_PUSH)
6467 {
6468 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6469 if ( rc == VINF_SUCCESS
6470 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6471 {
6472 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6473
6474 /* Reset the PATM stack. */
6475 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6476
6477 pVM->patm.s.pGCStateHC->fPIF = 1;
6478
6479 Log(("Faulting push -> go back to the original instruction\n"));
6480
6481 /* continue at the original instruction */
6482 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6483 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6484 return VINF_SUCCESS;
6485 }
6486 }
6487
6488 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6489 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6490 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6491 if (rc == VINF_SUCCESS)
6492 {
6493 /* The guest page *must* be present. */
6494 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6495 if ( rc == VINF_SUCCESS
6496 && (fFlags & X86_PTE_P))
6497 {
6498 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6499 return VINF_PATCH_CONTINUE;
6500 }
6501 }
6502 }
6503 else
6504 if (pPatch->patch.pPrivInstrGC == pNewEip)
6505 {
6506 /* Invalidated patch or first instruction overwritten.
6507 * We can ignore the fPIF state in this case.
6508 */
6509 /* Reset the PATM stack. */
6510 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6511
6512 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6513
6514 pVM->patm.s.pGCStateHC->fPIF = 1;
6515
6516 /* continue at the original instruction */
6517 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6518 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6519 return VINF_SUCCESS;
6520 }
6521
6522 char szBuf[256];
6523 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6524
6525 /* Very bad. We crashed in emitted code. Probably stack? */
6526 if (pPatch)
6527 {
6528 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6529 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6530 }
6531 else
6532 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6533 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6534 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6535 }
6536
6537 /* From here on, we must have a valid patch to guest translation. */
6538 if (pvPatchCoreOffset == 0)
6539 {
6540 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6541 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6542 return VERR_PATCH_NOT_FOUND;
6543 }
6544
6545 /* Take care of dirty/changed instructions. */
6546 if (pPatchToGuestRec->fDirty)
6547 {
6548 Assert(pPatchToGuestRec->Core.Key == offset);
6549 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6550
6551 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6552 if (RT_SUCCESS(rc))
6553 {
6554 /* Retry the current instruction. */
6555 pNewEip = pEip;
6556 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6557 }
6558 else
6559 {
6560 /* Reset the PATM stack. */
6561 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6562
6563 rc = VINF_SUCCESS; /* Continue at original instruction. */
6564 }
6565
6566 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6567 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6568 return rc;
6569 }
6570
6571#ifdef VBOX_STRICT
6572 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6573 {
6574 DISCPUSTATE cpu;
6575 bool disret;
6576 uint32_t cbInstr;
6577 PATMP2GLOOKUPREC cacheRec;
6578 RT_ZERO(cacheRec);
6579 cacheRec.pPatch = &pPatch->patch;
6580
6581 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6582 &cpu, &cbInstr);
6583 if (cacheRec.Lock.pvMap)
6584 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6585
6586 if (disret && cpu.pCurInstr->uOpcode == OP_RETN)
6587 {
6588 RTRCPTR retaddr;
6589 PCPUMCTX pCtx2;
6590
6591 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6592
6593 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6594 AssertRC(rc);
6595
6596 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6597 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6598 }
6599 }
6600#endif
6601
6602 /* Return original address, correct by subtracting the CS base address. */
6603 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6604
6605 /* Reset the PATM stack. */
6606 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6607
6608 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6609 {
6610 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6611 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6612#ifdef VBOX_STRICT
6613 DISCPUSTATE cpu;
6614 bool disret;
6615 uint32_t cbInstr;
6616 PATMP2GLOOKUPREC cacheRec;
6617 RT_ZERO(cacheRec);
6618 cacheRec.pPatch = &pPatch->patch;
6619
6620 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
6621 &cpu, &cbInstr);
6622 if (cacheRec.Lock.pvMap)
6623 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6624
6625 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
6626 {
6627 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6628 &cpu, &cbInstr);
6629 if (cacheRec.Lock.pvMap)
6630 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6631
6632 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET);
6633 }
6634#endif
6635 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6636 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6637 }
6638
6639 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6640 DBGFR3_DISAS_INSTR_LOG(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
6641 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6642 {
6643 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6644 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6645 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6646 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6647 return VERR_PATCH_DISABLED;
6648 }
6649
6650#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6651 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6652 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6653 {
6654 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6655 //we are only wasting time, back out the patch
6656 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6657 pTrapRec->pNextPatchInstr = 0;
6658 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6659 return VERR_PATCH_DISABLED;
6660 }
6661#endif
6662
6663 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6664 return VINF_SUCCESS;
6665}
6666
6667
6668/**
6669 * Handle page-fault in monitored page
6670 *
6671 * @returns VBox status code.
6672 * @param pVM Pointer to the VM.
6673 */
6674VMMR3_INT_DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6675{
6676 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6677
6678 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6679 addr &= PAGE_BASE_GC_MASK;
6680
6681 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6682 AssertRC(rc); NOREF(rc);
6683
6684 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6685 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6686 {
6687 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6688 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6689 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6690 if (rc == VWRN_PATCH_REMOVED)
6691 return VINF_SUCCESS;
6692
6693 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6694
6695 if (addr == pPatchRec->patch.pPrivInstrGC)
6696 addr++;
6697 }
6698
6699 for(;;)
6700 {
6701 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6702
6703 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6704 break;
6705
6706 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6707 {
6708 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6709 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6710 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6711 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6712 }
6713 addr = pPatchRec->patch.pPrivInstrGC + 1;
6714 }
6715
6716 pVM->patm.s.pvFaultMonitor = 0;
6717 return VINF_SUCCESS;
6718}
6719
6720
6721#ifdef VBOX_WITH_STATISTICS
6722
6723static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6724{
6725 if (pPatch->flags & PATMFL_SYSENTER)
6726 {
6727 return "SYSENT";
6728 }
6729 else
6730 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6731 {
6732 static char szTrap[16];
6733 uint32_t iGate;
6734
6735 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6736 if (iGate < 256)
6737 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6738 else
6739 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6740 return szTrap;
6741 }
6742 else
6743 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6744 return "DUPFUNC";
6745 else
6746 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6747 return "FUNCCALL";
6748 else
6749 if (pPatch->flags & PATMFL_TRAMPOLINE)
6750 return "TRAMP";
6751 else
6752 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6753}
6754
6755static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6756{
6757 NOREF(pVM);
6758 switch(pPatch->uState)
6759 {
6760 case PATCH_ENABLED:
6761 return "ENA";
6762 case PATCH_DISABLED:
6763 return "DIS";
6764 case PATCH_DIRTY:
6765 return "DIR";
6766 case PATCH_UNUSABLE:
6767 return "UNU";
6768 case PATCH_REFUSED:
6769 return "REF";
6770 case PATCH_DISABLE_PENDING:
6771 return "DIP";
6772 default:
6773 AssertFailed();
6774 return " ";
6775 }
6776}
6777
6778/**
6779 * Resets the sample.
6780 * @param pVM Pointer to the VM.
6781 * @param pvSample The sample registered using STAMR3RegisterCallback.
6782 */
6783static void patmResetStat(PVM pVM, void *pvSample)
6784{
6785 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6786 Assert(pPatch);
6787
6788 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6789 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6790}
6791
6792/**
6793 * Prints the sample into the buffer.
6794 *
6795 * @param pVM Pointer to the VM.
6796 * @param pvSample The sample registered using STAMR3RegisterCallback.
6797 * @param pszBuf The buffer to print into.
6798 * @param cchBuf The size of the buffer.
6799 */
6800static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6801{
6802 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6803 Assert(pPatch);
6804
6805 Assert(pPatch->uState != PATCH_REFUSED);
6806 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6807
6808 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6809 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6810 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6811}
6812
6813/**
6814 * Returns the GC address of the corresponding patch statistics counter
6815 *
6816 * @returns Stat address
6817 * @param pVM Pointer to the VM.
6818 * @param pPatch Patch structure
6819 */
6820RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6821{
6822 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6823 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6824}
6825
6826#endif /* VBOX_WITH_STATISTICS */
6827#ifdef VBOX_WITH_DEBUGGER
6828
6829/**
6830 * The '.patmoff' command.
6831 *
6832 * @returns VBox status.
6833 * @param pCmd Pointer to the command descriptor (as registered).
6834 * @param pCmdHlp Pointer to command helper functions.
6835 * @param pVM Pointer to the current VM (if any).
6836 * @param paArgs Pointer to (readonly) array of arguments.
6837 * @param cArgs Number of arguments in the array.
6838 */
6839static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6840{
6841 /*
6842 * Validate input.
6843 */
6844 NOREF(cArgs); NOREF(paArgs);
6845 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6846 PVM pVM = pUVM->pVM;
6847 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6848
6849 if (HMIsEnabled(pVM))
6850 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6851
6852 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6853 PATMR3AllowPatching(pVM->pUVM, false);
6854 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6855}
6856
6857/**
6858 * The '.patmon' command.
6859 *
6860 * @returns VBox status.
6861 * @param pCmd Pointer to the command descriptor (as registered).
6862 * @param pCmdHlp Pointer to command helper functions.
6863 * @param pVM Pointer to the current VM (if any).
6864 * @param paArgs Pointer to (readonly) array of arguments.
6865 * @param cArgs Number of arguments in the array.
6866 */
6867static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6868{
6869 /*
6870 * Validate input.
6871 */
6872 NOREF(cArgs); NOREF(paArgs);
6873 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6874 PVM pVM = pUVM->pVM;
6875 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6876
6877 if (HMIsEnabled(pVM))
6878 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6879
6880 PATMR3AllowPatching(pVM->pUVM, true);
6881 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6882 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6883}
6884
6885#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette