VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 43394

Last change on this file since 43394 was 42781, checked in by vboxsync, 12 years ago

PATM: Log statement. Added some notes wrt to NT4SP1 boot issues.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 258.4 KB
Line 
1/* $Id: PATM.cpp 42781 2012-08-11 22:50:07Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * NOTE: Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2012 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/cpumdis.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/trpm.h>
34#include <VBox/vmm/cfgm.h>
35#include <VBox/param.h>
36#include <VBox/vmm/selm.h>
37#include <iprt/avl.h>
38#include "PATMInternal.h"
39#include "PATMPatch.h"
40#include <VBox/vmm/vm.h>
41#include <VBox/vmm/csam.h>
42#include <VBox/dbg.h>
43#include <VBox/err.h>
44#include <VBox/log.h>
45#include <iprt/assert.h>
46#include <iprt/asm.h>
47#include <VBox/dis.h>
48#include <VBox/disopcode.h>
49#include "internal/pgm.h"
50
51#include <iprt/string.h>
52#include "PATMA.h"
53
54//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
55//#define PATM_DISABLE_ALL
56
57/**
58 * Refresh trampoline patch state.
59 */
60typedef struct PATMREFRESHPATCH
61{
62 /** Pointer to the VM structure. */
63 PVM pVM;
64 /** The trampoline patch record. */
65 PPATCHINFO pPatchTrampoline;
66 /** The new patch we want to jump to. */
67 PPATCHINFO pPatchRec;
68} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
69
70
71#define PATMREAD_RAWCODE 1 /* read code as-is */
72#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
73#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
74
75/*
76 * Private structure used during disassembly
77 */
78typedef struct
79{
80 PVM pVM;
81 PPATCHINFO pPatchInfo;
82 R3PTRTYPE(uint8_t *) pbInstrHC;
83 RTRCPTR pInstrGC;
84 uint32_t fReadFlags;
85} PATMDISASM, *PPATMDISASM;
86
87
88/*******************************************************************************
89* Internal Functions *
90*******************************************************************************/
91
92static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
93static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
94static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
95
96#ifdef LOG_ENABLED // keep gcc quiet
97static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
98#endif
99#ifdef VBOX_WITH_STATISTICS
100static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
101static void patmResetStat(PVM pVM, void *pvSample);
102static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
103#endif
104
105#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
106#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
107
108static int patmReinit(PVM pVM);
109static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
110
111#ifdef VBOX_WITH_DEBUGGER
112static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
113static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
114static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
115
116/** Command descriptors. */
117static const DBGCCMD g_aCmds[] =
118{
119 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
120 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
121 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
122};
123#endif
124
125/* Don't want to break saved states, so put it here as a global variable. */
126static unsigned int cIDTHandlersDisabled = 0;
127
128/**
129 * Initializes the PATM.
130 *
131 * @returns VBox status code.
132 * @param pVM Pointer to the VM.
133 */
134VMMR3DECL(int) PATMR3Init(PVM pVM)
135{
136 int rc;
137
138 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
139
140 /* These values can't change as they are hardcoded in patch code (old saved states!) */
141 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
142 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
143 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
144 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
145
146 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
147 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
148
149 /* Allocate patch memory and GC patch state memory. */
150 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
151 /* Add another page in case the generated code is much larger than expected. */
152 /** @todo bad safety precaution */
153 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
154 if (RT_FAILURE(rc))
155 {
156 Log(("MMHyperAlloc failed with %Rrc\n", rc));
157 return rc;
158 }
159 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
160
161 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
162 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
163 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
164
165 /*
166 * Hypervisor memory for GC status data (read/write)
167 *
168 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
169 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
170 *
171 */
172 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
173 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
174 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
175
176 /* Hypervisor memory for patch statistics */
177 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
178 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
179
180 /* Memory for patch lookup trees. */
181 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
182 AssertRCReturn(rc, rc);
183 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
184
185#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
186 /* Check CFGM option. */
187 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
188 if (RT_FAILURE(rc))
189# ifdef PATM_DISABLE_ALL
190 pVM->fPATMEnabled = false;
191# else
192 pVM->fPATMEnabled = true;
193# endif
194#endif
195
196 rc = patmReinit(pVM);
197 AssertRC(rc);
198 if (RT_FAILURE(rc))
199 return rc;
200
201 /*
202 * Register save and load state notifiers.
203 */
204 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
205 NULL, NULL, NULL,
206 NULL, patmR3Save, NULL,
207 NULL, patmR3Load, NULL);
208 AssertRCReturn(rc, rc);
209
210#ifdef VBOX_WITH_DEBUGGER
211 /*
212 * Debugger commands.
213 */
214 static bool s_fRegisteredCmds = false;
215 if (!s_fRegisteredCmds)
216 {
217 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
218 if (RT_SUCCESS(rc2))
219 s_fRegisteredCmds = true;
220 }
221#endif
222
223#ifdef VBOX_WITH_STATISTICS
224 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
225 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
226 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
227 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
228 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
229 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
230 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
231 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
232
233 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
234 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
235
236 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
237 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
238 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
239
240 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
241 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
242 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
243 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
244 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
245
246 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
247 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
248
249 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
250 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
251
252 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
253 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
254 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
255
256 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
257 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
258 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
259
260 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
261 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
262
263 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
264 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
265 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
266 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
267
268 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
269 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
270
271 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
272 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
273
274 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
275 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
276 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
277
278 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
279 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
280 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
281 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
282
283 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
284 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
285 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
286 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
287 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
288
289 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
290#endif /* VBOX_WITH_STATISTICS */
291
292 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
293 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
294 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
295 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
296 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
297 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
298 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
299 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
300
301 return rc;
302}
303
304/**
305 * Finalizes HMA page attributes.
306 *
307 * @returns VBox status code.
308 * @param pVM Pointer to the VM.
309 */
310VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
311{
312 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
313 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
314 if (RT_FAILURE(rc))
315 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
316
317 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
318 if (RT_FAILURE(rc))
319 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
320
321 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
322 if (RT_FAILURE(rc))
323 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
324
325 return rc;
326}
327
328/**
329 * (Re)initializes PATM
330 *
331 * @param pVM The VM.
332 */
333static int patmReinit(PVM pVM)
334{
335 int rc;
336
337 /*
338 * Assert alignment and sizes.
339 */
340 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
341 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
342
343 /*
344 * Setup any fixed pointers and offsets.
345 */
346 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
347
348#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
349#ifndef PATM_DISABLE_ALL
350 pVM->fPATMEnabled = true;
351#endif
352#endif
353
354 Assert(pVM->patm.s.pGCStateHC);
355 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
356 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
357
358 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
359 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
360
361 Assert(pVM->patm.s.pGCStackHC);
362 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
363 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
364 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
365 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
366
367 Assert(pVM->patm.s.pStatsHC);
368 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
369 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
370
371 Assert(pVM->patm.s.pPatchMemHC);
372 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
373 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
374 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
375
376 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
377 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
378
379 Assert(pVM->patm.s.PatchLookupTreeHC);
380 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
381
382 /*
383 * (Re)Initialize PATM structure
384 */
385 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
386 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
387 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
388 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
389 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
390 pVM->patm.s.pvFaultMonitor = 0;
391 pVM->patm.s.deltaReloc = 0;
392
393 /* Lowest and highest patched instruction */
394 pVM->patm.s.pPatchedInstrGCLowest = ~0;
395 pVM->patm.s.pPatchedInstrGCHighest = 0;
396
397 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
398 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
399 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
400
401 pVM->patm.s.pfnSysEnterPatchGC = 0;
402 pVM->patm.s.pfnSysEnterGC = 0;
403
404 pVM->patm.s.fOutOfMemory = false;
405
406 pVM->patm.s.pfnHelperCallGC = 0;
407
408 /* Generate all global functions to be used by future patches. */
409 /* We generate a fake patch in order to use the existing code for relocation. */
410 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
411 if (RT_FAILURE(rc))
412 {
413 Log(("Out of memory!!!!\n"));
414 return VERR_NO_MEMORY;
415 }
416 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
417 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
418 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
419
420 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
421 AssertRC(rc);
422
423 /* Update free pointer in patch memory. */
424 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
425 /* Round to next 8 byte boundary. */
426 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
427 return rc;
428}
429
430
431/**
432 * Applies relocations to data and code managed by this
433 * component. This function will be called at init and
434 * whenever the VMM need to relocate it self inside the GC.
435 *
436 * The PATM will update the addresses used by the switcher.
437 *
438 * @param pVM The VM.
439 */
440VMMR3DECL(void) PATMR3Relocate(PVM pVM)
441{
442 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
443 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
444
445 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
446 if (delta)
447 {
448 PCPUMCTX pCtx;
449
450 /* Update CPUMCTX guest context pointer. */
451 pVM->patm.s.pCPUMCtxGC += delta;
452
453 pVM->patm.s.deltaReloc = delta;
454
455 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
456
457 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
458
459 /* If we are running patch code right now, then also adjust EIP. */
460 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
461 pCtx->eip += delta;
462
463 pVM->patm.s.pGCStateGC = GCPtrNew;
464 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
465
466 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
467
468 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
469
470 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
471
472 if (pVM->patm.s.pfnSysEnterPatchGC)
473 pVM->patm.s.pfnSysEnterPatchGC += delta;
474
475 /* Deal with the global patch functions. */
476 pVM->patm.s.pfnHelperCallGC += delta;
477 pVM->patm.s.pfnHelperRetGC += delta;
478 pVM->patm.s.pfnHelperIretGC += delta;
479 pVM->patm.s.pfnHelperJumpGC += delta;
480
481 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
482 }
483}
484
485
486/**
487 * Terminates the PATM.
488 *
489 * Termination means cleaning up and freeing all resources,
490 * the VM it self is at this point powered off or suspended.
491 *
492 * @returns VBox status code.
493 * @param pVM Pointer to the VM.
494 */
495VMMR3DECL(int) PATMR3Term(PVM pVM)
496{
497 /* Memory was all allocated from the two MM heaps and requires no freeing. */
498 NOREF(pVM);
499 return VINF_SUCCESS;
500}
501
502
503/**
504 * PATM reset callback.
505 *
506 * @returns VBox status code.
507 * @param pVM The VM which is reset.
508 */
509VMMR3DECL(int) PATMR3Reset(PVM pVM)
510{
511 Log(("PATMR3Reset\n"));
512
513 /* Free all patches. */
514 while (true)
515 {
516 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
517 if (pPatchRec)
518 {
519 PATMRemovePatch(pVM, pPatchRec, true);
520 }
521 else
522 break;
523 }
524 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
525 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
526 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
527 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
528
529 int rc = patmReinit(pVM);
530 if (RT_SUCCESS(rc))
531 rc = PATMR3InitFinalize(pVM); /* paranoia */
532
533 return rc;
534}
535
536/**
537 * @callback_method_impl{FNDISREADBYTES}
538 */
539static DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
540{
541 PATMDISASM *pDisInfo = (PATMDISASM *)pDis->pvUser;
542
543/** @todo change this to read more! */
544 /*
545 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
546 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
547 */
548 /** @todo could change in the future! */
549 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
550 {
551 size_t cbRead = cbMaxRead;
552 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
553 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
554 if (RT_SUCCESS(rc))
555 {
556 if (cbRead >= cbMinRead)
557 {
558 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
559 return VINF_SUCCESS;
560 }
561
562 cbMinRead -= (uint8_t)cbRead;
563 cbMaxRead -= (uint8_t)cbRead;
564 offInstr += (uint8_t)cbRead;
565 uSrcAddr += cbRead;
566 }
567
568#ifdef VBOX_STRICT
569 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
570 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
571 {
572 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr, NULL) == false);
573 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr + cbMinRead-1, NULL) == false);
574 }
575#endif
576 }
577
578 int rc = VINF_SUCCESS;
579 RTGCPTR32 uSrcAddr = (RTGCPTR32)pDis->uInstrAddr + offInstr;
580 if ( !pDisInfo->pbInstrHC
581 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbMinRead - 1)
582 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
583 {
584 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
585 rc = PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
586 offInstr += cbMinRead;
587 }
588 else
589 {
590 /*
591 * pbInstrHC is the base address; adjust according to the GC pointer.
592 *
593 * Try read the max number of bytes here. Since the disassembler only
594 * ever uses these bytes for the current instruction, it doesn't matter
595 * much if we accidentally read the start of the next instruction even
596 * if it happens to be a patch jump or int3.
597 */
598 uint8_t const *pbInstrHC = pDisInfo->pbInstrHC; AssertPtr(pbInstrHC);
599 pbInstrHC += uSrcAddr - pDisInfo->pInstrGC;
600
601 size_t cbMaxRead1 = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
602 size_t cbMaxRead2 = PAGE_SIZE - ((uintptr_t)pbInstrHC & PAGE_OFFSET_MASK);
603 size_t cbToRead = RT_MIN(cbMaxRead1, RT_MAX(cbMaxRead2, cbMinRead));
604 if (cbToRead > cbMaxRead)
605 cbToRead = cbMaxRead;
606
607 memcpy(&pDis->abInstr[offInstr], pbInstrHC, cbToRead);
608 offInstr += (uint8_t)cbToRead;
609 }
610
611 pDis->cbCachedInstr = offInstr;
612 return rc;
613}
614
615
616DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
617 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
618{
619 PATMDISASM disinfo;
620 disinfo.pVM = pVM;
621 disinfo.pPatchInfo = pPatch;
622 disinfo.pbInstrHC = pbInstrHC;
623 disinfo.pInstrGC = InstrGCPtr32;
624 disinfo.fReadFlags = fReadFlags;
625 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
626 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
627 patmReadBytes, &disinfo,
628 pCpu, pcbInstr, pszOutput, cbOutput));
629}
630
631
632DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
633 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
634{
635 PATMDISASM disinfo;
636 disinfo.pVM = pVM;
637 disinfo.pPatchInfo = pPatch;
638 disinfo.pbInstrHC = pbInstrHC;
639 disinfo.pInstrGC = InstrGCPtr32;
640 disinfo.fReadFlags = fReadFlags;
641 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
642 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
643 patmReadBytes, &disinfo,
644 pCpu, pcbInstr));
645}
646
647
648DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
649 uint32_t fReadFlags,
650 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
651{
652 PATMDISASM disinfo;
653 disinfo.pVM = pVM;
654 disinfo.pPatchInfo = pPatch;
655 disinfo.pbInstrHC = pbInstrHC;
656 disinfo.pInstrGC = InstrGCPtr32;
657 disinfo.fReadFlags = fReadFlags;
658 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
659 pCpu, pcbInstr));
660}
661
662#ifdef LOG_ENABLED
663# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
664 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
665# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
666 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
667
668# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
669 do { \
670 if (LogIsEnabled()) \
671 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
672 } while (0)
673
674static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
675 const char *pszComment1, const char *pszComment2)
676{
677 DISCPUSTATE DisState;
678 char szOutput[128];
679 szOutput[0] = '\0';
680 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
681 &DisState, NULL, szOutput, sizeof(szOutput));
682 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
683}
684
685#else
686# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
687# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
688# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
689#endif
690
691
692/**
693 * Callback function for RTAvloU32DoWithAll
694 *
695 * Updates all fixups in the patches
696 *
697 * @returns VBox status code.
698 * @param pNode Current node
699 * @param pParam Pointer to the VM.
700 */
701static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
702{
703 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
704 PVM pVM = (PVM)pParam;
705 RTRCINTPTR delta;
706 int rc;
707
708 /* Nothing to do if the patch is not active. */
709 if (pPatch->patch.uState == PATCH_REFUSED)
710 return 0;
711
712 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
713 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
714
715 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
716 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
717
718 /*
719 * Apply fixups
720 */
721 PRELOCREC pRec = 0;
722 AVLPVKEY key = 0;
723
724 while (true)
725 {
726 /* Get the record that's closest from above */
727 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
728 if (pRec == 0)
729 break;
730
731 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
732
733 switch (pRec->uType)
734 {
735 case FIXUP_ABSOLUTE:
736 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
737 if ( !pRec->pSource
738 || PATMIsPatchGCAddr(pVM, pRec->pSource))
739 {
740 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
741 }
742 else
743 {
744 uint8_t curInstr[15];
745 uint8_t oldInstr[15];
746 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
747
748 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
749
750 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
751 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
752
753 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
754 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
755
756 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
757
758 if ( rc == VERR_PAGE_NOT_PRESENT
759 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
760 {
761 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
762
763 Log(("PATM: Patch page not present -> check later!\n"));
764 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
765 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
766 }
767 else
768 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
769 {
770 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
771 /*
772 * Disable patch; this is not a good solution
773 */
774 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
775 pPatch->patch.uState = PATCH_DISABLED;
776 }
777 else
778 if (RT_SUCCESS(rc))
779 {
780 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
781 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
782 AssertRC(rc);
783 }
784 }
785 break;
786
787 case FIXUP_REL_JMPTOPATCH:
788 {
789 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
790
791 if ( pPatch->patch.uState == PATCH_ENABLED
792 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
793 {
794 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
795 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
796 RTRCPTR pJumpOffGC;
797 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
798 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
799
800#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
801 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
802#else
803 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
804#endif
805
806 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
807#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
808 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
809 {
810 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
811
812 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
813 oldJump[0] = pPatch->patch.aPrivInstr[0];
814 oldJump[1] = pPatch->patch.aPrivInstr[1];
815 *(RTRCUINTPTR *)&oldJump[2] = displOld;
816 }
817 else
818#endif
819 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
820 {
821 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
822 oldJump[0] = 0xE9;
823 *(RTRCUINTPTR *)&oldJump[1] = displOld;
824 }
825 else
826 {
827 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
828 continue; //this should never happen!!
829 }
830 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
831
832 /*
833 * Read old patch jump and compare it to the one we previously installed
834 */
835 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
836 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
837
838 if ( rc == VERR_PAGE_NOT_PRESENT
839 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
840 {
841 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
842
843 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
844 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
845 }
846 else
847 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
848 {
849 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
850 /*
851 * Disable patch; this is not a good solution
852 */
853 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
854 pPatch->patch.uState = PATCH_DISABLED;
855 }
856 else
857 if (RT_SUCCESS(rc))
858 {
859 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
860 AssertRC(rc);
861 }
862 else
863 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
864 }
865 else
866 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
867
868 pRec->pDest = pTarget;
869 break;
870 }
871
872 case FIXUP_REL_JMPTOGUEST:
873 {
874 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
875 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
876
877 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
878 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
879 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
880 pRec->pSource = pSource;
881 break;
882 }
883
884 default:
885 AssertMsg(0, ("Invalid fixup type!!\n"));
886 return VERR_INVALID_PARAMETER;
887 }
888 }
889
890 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
891 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
892 return 0;
893}
894
895/**
896 * \#PF Handler callback for virtual access handler ranges.
897 *
898 * Important to realize that a physical page in a range can have aliases, and
899 * for ALL and WRITE handlers these will also trigger.
900 *
901 * @returns VINF_SUCCESS if the handler have carried out the operation.
902 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
903 * @param pVM Pointer to the VM.
904 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
905 * @param pvPtr The HC mapping of that address.
906 * @param pvBuf What the guest is reading/writing.
907 * @param cbBuf How much it's reading/writing.
908 * @param enmAccessType The access type.
909 * @param pvUser User argument.
910 */
911DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
912 PGMACCESSTYPE enmAccessType, void *pvUser)
913{
914 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
915 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(pvUser);
916
917 /** @todo could be the wrong virtual address (alias) */
918 pVM->patm.s.pvFaultMonitor = GCPtr;
919 PATMR3HandleMonitoredPage(pVM);
920 return VINF_PGM_HANDLER_DO_DEFAULT;
921}
922
923
924#ifdef VBOX_WITH_DEBUGGER
925/**
926 * Callback function for RTAvloU32DoWithAll
927 *
928 * Enables the patch that's being enumerated
929 *
930 * @returns 0 (continue enumeration).
931 * @param pNode Current node
932 * @param pVM Pointer to the VM.
933 */
934static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
935{
936 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
937
938 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
939 return 0;
940}
941#endif /* VBOX_WITH_DEBUGGER */
942
943
944#ifdef VBOX_WITH_DEBUGGER
945/**
946 * Callback function for RTAvloU32DoWithAll
947 *
948 * Disables the patch that's being enumerated
949 *
950 * @returns 0 (continue enumeration).
951 * @param pNode Current node
952 * @param pVM Pointer to the VM.
953 */
954static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
955{
956 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
957
958 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
959 return 0;
960}
961#endif
962
963/**
964 * Returns the host context pointer and size of the patch memory block
965 *
966 * @returns VBox status code.
967 * @param pVM Pointer to the VM.
968 * @param pcb Size of the patch memory block
969 */
970VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
971{
972 if (pcb)
973 *pcb = pVM->patm.s.cbPatchMem;
974
975 return pVM->patm.s.pPatchMemHC;
976}
977
978
979/**
980 * Returns the guest context pointer and size of the patch memory block
981 *
982 * @returns VBox status code.
983 * @param pVM Pointer to the VM.
984 * @param pcb Size of the patch memory block
985 */
986VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
987{
988 if (pcb)
989 *pcb = pVM->patm.s.cbPatchMem;
990
991 return pVM->patm.s.pPatchMemGC;
992}
993
994
995/**
996 * Returns the host context pointer of the GC context structure
997 *
998 * @returns VBox status code.
999 * @param pVM Pointer to the VM.
1000 */
1001VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
1002{
1003 return pVM->patm.s.pGCStateHC;
1004}
1005
1006
1007/**
1008 * Checks whether the HC address is part of our patch region
1009 *
1010 * @returns VBox status code.
1011 * @param pVM Pointer to the VM.
1012 * @param pAddrGC Guest context address
1013 */
1014VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
1015{
1016 return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
1017}
1018
1019
1020/**
1021 * Allows or disallow patching of privileged instructions executed by the guest OS
1022 *
1023 * @returns VBox status code.
1024 * @param pVM Pointer to the VM.
1025 * @param fAllowPatching Allow/disallow patching
1026 */
1027VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
1028{
1029 pVM->fPATMEnabled = (fAllowPatching) ? true : false;
1030 return VINF_SUCCESS;
1031}
1032
1033/**
1034 * Convert a GC patch block pointer to a HC patch pointer
1035 *
1036 * @returns HC pointer or NULL if it's not a GC patch pointer
1037 * @param pVM Pointer to the VM.
1038 * @param pAddrGC GC pointer
1039 */
1040VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
1041{
1042 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
1043 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
1044 else
1045 return NULL;
1046}
1047
1048/**
1049 * Query PATM state (enabled/disabled)
1050 *
1051 * @returns 0 - disabled, 1 - enabled
1052 * @param pVM Pointer to the VM.
1053 */
1054VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
1055{
1056 return pVM->fPATMEnabled;
1057}
1058
1059
1060/**
1061 * Convert guest context address to host context pointer
1062 *
1063 * @returns VBox status code.
1064 * @param pVM Pointer to the VM.
1065 * @param pCacheRec Address conversion cache record
1066 * @param pGCPtr Guest context pointer
1067 *
1068 * @returns Host context pointer or NULL in case of an error
1069 *
1070 */
1071R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
1072{
1073 int rc;
1074 R3PTRTYPE(uint8_t *) pHCPtr;
1075 uint32_t offset;
1076
1077 if (PATMIsPatchGCAddr(pVM, pGCPtr))
1078 {
1079 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1080 Assert(pPatch);
1081 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
1082 }
1083
1084 offset = pGCPtr & PAGE_OFFSET_MASK;
1085 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1086 return pCacheRec->pPageLocStartHC + offset;
1087
1088 /* Release previous lock if any. */
1089 if (pCacheRec->Lock.pvMap)
1090 {
1091 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1092 pCacheRec->Lock.pvMap = NULL;
1093 }
1094
1095 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1096 if (rc != VINF_SUCCESS)
1097 {
1098 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1099 return NULL;
1100 }
1101 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1102 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1103 return pHCPtr;
1104}
1105
1106
1107/* Calculates and fills in all branch targets
1108 *
1109 * @returns VBox status code.
1110 * @param pVM Pointer to the VM.
1111 * @param pPatch Current patch block pointer
1112 *
1113 */
1114static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1115{
1116 int32_t displ;
1117
1118 PJUMPREC pRec = 0;
1119 unsigned nrJumpRecs = 0;
1120
1121 /*
1122 * Set all branch targets inside the patch block.
1123 * We remove all jump records as they are no longer needed afterwards.
1124 */
1125 while (true)
1126 {
1127 RCPTRTYPE(uint8_t *) pInstrGC;
1128 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1129
1130 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1131 if (pRec == 0)
1132 break;
1133
1134 nrJumpRecs++;
1135
1136 /* HC in patch block to GC in patch block. */
1137 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1138
1139 if (pRec->opcode == OP_CALL)
1140 {
1141 /* Special case: call function replacement patch from this patch block.
1142 */
1143 PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
1144 if (!pFunctionRec)
1145 {
1146 int rc;
1147
1148 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1149 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1150 else
1151 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1152
1153 if (RT_FAILURE(rc))
1154 {
1155 uint8_t *pPatchHC;
1156 RTRCPTR pPatchGC;
1157 RTRCPTR pOrgInstrGC;
1158
1159 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1160 Assert(pOrgInstrGC);
1161
1162 /* Failure for some reason -> mark exit point with int 3. */
1163 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1164
1165 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1166 Assert(pPatchGC);
1167
1168 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1169
1170 /* Set a breakpoint at the very beginning of the recompiled instruction */
1171 *pPatchHC = 0xCC;
1172
1173 continue;
1174 }
1175 }
1176 else
1177 {
1178 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1179 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1180 }
1181
1182 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1183 }
1184 else
1185 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1186
1187 if (pBranchTargetGC == 0)
1188 {
1189 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1190 return VERR_PATCHING_REFUSED;
1191 }
1192 /* Our jumps *always* have a dword displacement (to make things easier). */
1193 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1194 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1195 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1196 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1197 }
1198 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1199 Assert(pPatch->JumpTree == 0);
1200 return VINF_SUCCESS;
1201}
1202
1203/* Add an illegal instruction record
1204 *
1205 * @param pVM Pointer to the VM.
1206 * @param pPatch Patch structure ptr
1207 * @param pInstrGC Guest context pointer to privileged instruction
1208 *
1209 */
1210static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1211{
1212 PAVLPVNODECORE pRec;
1213
1214 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1215 Assert(pRec);
1216 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1217
1218 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1219 Assert(ret); NOREF(ret);
1220 pPatch->pTempInfo->nrIllegalInstr++;
1221}
1222
1223static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1224{
1225 PAVLPVNODECORE pRec;
1226
1227 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1228 if (pRec)
1229 return true;
1230 else
1231 return false;
1232}
1233
1234/**
1235 * Add a patch to guest lookup record
1236 *
1237 * @param pVM Pointer to the VM.
1238 * @param pPatch Patch structure ptr
1239 * @param pPatchInstrHC Guest context pointer to patch block
1240 * @param pInstrGC Guest context pointer to privileged instruction
1241 * @param enmType Lookup type
1242 * @param fDirty Dirty flag
1243 *
1244 */
1245 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1246void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1247{
1248 bool ret;
1249 PRECPATCHTOGUEST pPatchToGuestRec;
1250 PRECGUESTTOPATCH pGuestToPatchRec;
1251 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1252
1253 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1254 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1255
1256 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1257 {
1258 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1259 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1260 return; /* already there */
1261
1262 Assert(!pPatchToGuestRec);
1263 }
1264#ifdef VBOX_STRICT
1265 else
1266 {
1267 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1268 Assert(!pPatchToGuestRec);
1269 }
1270#endif
1271
1272 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1273 Assert(pPatchToGuestRec);
1274 pPatchToGuestRec->Core.Key = PatchOffset;
1275 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1276 pPatchToGuestRec->enmType = enmType;
1277 pPatchToGuestRec->fDirty = fDirty;
1278
1279 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1280 Assert(ret);
1281
1282 /* GC to patch address */
1283 if (enmType == PATM_LOOKUP_BOTHDIR)
1284 {
1285 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1286 if (!pGuestToPatchRec)
1287 {
1288 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1289 pGuestToPatchRec->Core.Key = pInstrGC;
1290 pGuestToPatchRec->PatchOffset = PatchOffset;
1291
1292 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1293 Assert(ret);
1294 }
1295 }
1296
1297 pPatch->nrPatch2GuestRecs++;
1298}
1299
1300
1301/**
1302 * Removes a patch to guest lookup record
1303 *
1304 * @param pVM Pointer to the VM.
1305 * @param pPatch Patch structure ptr
1306 * @param pPatchInstrGC Guest context pointer to patch block
1307 */
1308void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1309{
1310 PAVLU32NODECORE pNode;
1311 PAVLU32NODECORE pNode2;
1312 PRECPATCHTOGUEST pPatchToGuestRec;
1313 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1314
1315 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1316 Assert(pPatchToGuestRec);
1317 if (pPatchToGuestRec)
1318 {
1319 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1320 {
1321 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1322
1323 Assert(pGuestToPatchRec->Core.Key);
1324 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1325 Assert(pNode2);
1326 }
1327 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1328 Assert(pNode);
1329
1330 MMR3HeapFree(pPatchToGuestRec);
1331 pPatch->nrPatch2GuestRecs--;
1332 }
1333}
1334
1335
1336/**
1337 * RTAvlPVDestroy callback.
1338 */
1339static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1340{
1341 MMR3HeapFree(pNode);
1342 return 0;
1343}
1344
1345/**
1346 * Empty the specified tree (PV tree, MMR3 heap)
1347 *
1348 * @param pVM Pointer to the VM.
1349 * @param ppTree Tree to empty
1350 */
1351void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1352{
1353 NOREF(pVM);
1354 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1355}
1356
1357
1358/**
1359 * RTAvlU32Destroy callback.
1360 */
1361static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1362{
1363 MMR3HeapFree(pNode);
1364 return 0;
1365}
1366
1367/**
1368 * Empty the specified tree (U32 tree, MMR3 heap)
1369 *
1370 * @param pVM Pointer to the VM.
1371 * @param ppTree Tree to empty
1372 */
1373void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1374{
1375 NOREF(pVM);
1376 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1377}
1378
1379
1380/**
1381 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1382 *
1383 * @returns VBox status code.
1384 * @param pVM Pointer to the VM.
1385 * @param pCpu CPU disassembly state
1386 * @param pInstrGC Guest context pointer to privileged instruction
1387 * @param pCurInstrGC Guest context pointer to the current instruction
1388 * @param pCacheRec Cache record ptr
1389 *
1390 */
1391static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1392{
1393 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1394 bool fIllegalInstr = false;
1395
1396 /*
1397 * Preliminary heuristics:
1398 *- no call instructions without a fixed displacement between cli and sti/popf
1399 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1400 *- no nested pushf/cli
1401 *- sti/popf should be the (eventual) target of all branches
1402 *- no near or far returns; no int xx, no into
1403 *
1404 * Note: Later on we can impose less stricter guidelines if the need arises
1405 */
1406
1407 /* Bail out if the patch gets too big. */
1408 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1409 {
1410 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1411 fIllegalInstr = true;
1412 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1413 }
1414 else
1415 {
1416 /* No unconditional jumps or calls without fixed displacements. */
1417 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1418 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1419 )
1420 {
1421 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1422 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1423 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1424 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1425 )
1426 {
1427 fIllegalInstr = true;
1428 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1429 }
1430 }
1431
1432 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1433 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP)
1434 {
1435 if ( pCurInstrGC > pPatch->pPrivInstrGC
1436 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1437 {
1438 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1439 /* We turn this one into a int 3 callable patch. */
1440 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1441 }
1442 }
1443 else
1444 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1445 if (pPatch->opcode == OP_PUSHF)
1446 {
1447 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF)
1448 {
1449 fIllegalInstr = true;
1450 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1451 }
1452 }
1453
1454 /* no far returns */
1455 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1456 {
1457 pPatch->pTempInfo->nrRetInstr++;
1458 fIllegalInstr = true;
1459 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1460 }
1461 else if ( pCpu->pCurInstr->uOpcode == OP_INT3
1462 || pCpu->pCurInstr->uOpcode == OP_INT
1463 || pCpu->pCurInstr->uOpcode == OP_INTO)
1464 {
1465 /* No int xx or into either. */
1466 fIllegalInstr = true;
1467 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1468 }
1469 }
1470
1471 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1472
1473 /* Illegal instruction -> end of analysis phase for this code block */
1474 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1475 return VINF_SUCCESS;
1476
1477 /* Check for exit points. */
1478 switch (pCpu->pCurInstr->uOpcode)
1479 {
1480 case OP_SYSEXIT:
1481 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1482
1483 case OP_SYSENTER:
1484 case OP_ILLUD2:
1485 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1486 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1487 return VINF_SUCCESS;
1488
1489 case OP_STI:
1490 case OP_POPF:
1491 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1492 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1493 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1494 {
1495 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1496 return VERR_PATCHING_REFUSED;
1497 }
1498 if (pPatch->opcode == OP_PUSHF)
1499 {
1500 if (pCpu->pCurInstr->uOpcode == OP_POPF)
1501 {
1502 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1503 return VINF_SUCCESS;
1504
1505 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1506 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1507 pPatch->flags |= PATMFL_CHECK_SIZE;
1508 }
1509 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1510 }
1511 /* else: fall through. */
1512 case OP_RETN: /* exit point for function replacement */
1513 return VINF_SUCCESS;
1514
1515 case OP_IRET:
1516 return VINF_SUCCESS; /* exitpoint */
1517
1518 case OP_CPUID:
1519 case OP_CALL:
1520 case OP_JMP:
1521 break;
1522
1523 default:
1524 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1525 {
1526 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1527 return VINF_SUCCESS; /* exit point */
1528 }
1529 break;
1530 }
1531
1532 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1533 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW))
1534 {
1535 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1536 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
1537 return VINF_SUCCESS;
1538 }
1539
1540 return VWRN_CONTINUE_ANALYSIS;
1541}
1542
1543/**
1544 * Analyses the instructions inside a function for compliance
1545 *
1546 * @returns VBox status code.
1547 * @param pVM Pointer to the VM.
1548 * @param pCpu CPU disassembly state
1549 * @param pInstrGC Guest context pointer to privileged instruction
1550 * @param pCurInstrGC Guest context pointer to the current instruction
1551 * @param pCacheRec Cache record ptr
1552 *
1553 */
1554static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1555{
1556 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1557 bool fIllegalInstr = false;
1558 NOREF(pInstrGC);
1559
1560 //Preliminary heuristics:
1561 //- no call instructions
1562 //- ret ends a block
1563
1564 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1565
1566 // bail out if the patch gets too big
1567 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1568 {
1569 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1570 fIllegalInstr = true;
1571 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1572 }
1573 else
1574 {
1575 // no unconditional jumps or calls without fixed displacements
1576 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1577 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1578 )
1579 {
1580 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1581 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1582 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1583 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1584 )
1585 {
1586 fIllegalInstr = true;
1587 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1588 }
1589 }
1590 else /* no far returns */
1591 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1592 {
1593 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1594 fIllegalInstr = true;
1595 }
1596 else /* no int xx or into either */
1597 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO)
1598 {
1599 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1600 fIllegalInstr = true;
1601 }
1602
1603 #if 0
1604 ///@todo we can handle certain in/out and privileged instructions in the guest context
1605 if (pCpu->pCurInstr->fOpType & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI)
1606 {
1607 Log(("Illegal instructions for function patch!!\n"));
1608 return VERR_PATCHING_REFUSED;
1609 }
1610 #endif
1611 }
1612
1613 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1614
1615 /* Illegal instruction -> end of analysis phase for this code block */
1616 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1617 {
1618 return VINF_SUCCESS;
1619 }
1620
1621 // Check for exit points
1622 switch (pCpu->pCurInstr->uOpcode)
1623 {
1624 case OP_ILLUD2:
1625 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1626 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1627 return VINF_SUCCESS;
1628
1629 case OP_IRET:
1630 case OP_SYSEXIT: /* will fault or emulated in GC */
1631 case OP_RETN:
1632 return VINF_SUCCESS;
1633
1634 case OP_POPF:
1635 case OP_STI:
1636 return VWRN_CONTINUE_ANALYSIS;
1637 default:
1638 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1639 {
1640 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1641 return VINF_SUCCESS; /* exit point */
1642 }
1643 return VWRN_CONTINUE_ANALYSIS;
1644 }
1645
1646 return VWRN_CONTINUE_ANALYSIS;
1647}
1648
1649/**
1650 * Recompiles the instructions in a code block
1651 *
1652 * @returns VBox status code.
1653 * @param pVM Pointer to the VM.
1654 * @param pCpu CPU disassembly state
1655 * @param pInstrGC Guest context pointer to privileged instruction
1656 * @param pCurInstrGC Guest context pointer to the current instruction
1657 * @param pCacheRec Cache record ptr
1658 *
1659 */
1660static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1661{
1662 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1663 int rc = VINF_SUCCESS;
1664 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1665
1666 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1667
1668 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1669 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1670 {
1671 /*
1672 * Been there, done that; so insert a jump (we don't want to duplicate code)
1673 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1674 */
1675 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1676 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1677 }
1678
1679 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1680 {
1681 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1682 }
1683 else
1684 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1685
1686 if (RT_FAILURE(rc))
1687 return rc;
1688
1689 /* Note: Never do a direct return unless a failure is encountered! */
1690
1691 /* Clear recompilation of next instruction flag; we are doing that right here. */
1692 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1693 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1694
1695 /* Add lookup record for patch to guest address translation */
1696 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1697
1698 /* Update lowest and highest instruction address for this patch */
1699 if (pCurInstrGC < pPatch->pInstrGCLowest)
1700 pPatch->pInstrGCLowest = pCurInstrGC;
1701 else
1702 if (pCurInstrGC > pPatch->pInstrGCHighest)
1703 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
1704
1705 /* Illegal instruction -> end of recompile phase for this code block. */
1706 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1707 {
1708 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1709 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1710 goto end;
1711 }
1712
1713 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1714 * Indirect calls are handled below.
1715 */
1716 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1717 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1718 && (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J))
1719 {
1720 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1721 if (pTargetGC == 0)
1722 {
1723 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
1724 return VERR_PATCHING_REFUSED;
1725 }
1726
1727 if (pCpu->pCurInstr->uOpcode == OP_CALL)
1728 {
1729 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1730 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1731 if (RT_FAILURE(rc))
1732 goto end;
1733 }
1734 else
1735 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1736
1737 if (RT_SUCCESS(rc))
1738 rc = VWRN_CONTINUE_RECOMPILE;
1739
1740 goto end;
1741 }
1742
1743 switch (pCpu->pCurInstr->uOpcode)
1744 {
1745 case OP_CLI:
1746 {
1747 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1748 * until we've found the proper exit point(s).
1749 */
1750 if ( pCurInstrGC != pInstrGC
1751 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1752 )
1753 {
1754 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1755 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1756 }
1757 /* Set by irq inhibition; no longer valid now. */
1758 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1759
1760 rc = patmPatchGenCli(pVM, pPatch);
1761 if (RT_SUCCESS(rc))
1762 rc = VWRN_CONTINUE_RECOMPILE;
1763 break;
1764 }
1765
1766 case OP_MOV:
1767 if (pCpu->pCurInstr->fOpType & DISOPTYPE_POTENTIALLY_DANGEROUS)
1768 {
1769 /* mov ss, src? */
1770 if ( (pCpu->Param1.fUse & DISUSE_REG_SEG)
1771 && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS))
1772 {
1773 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1774 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1775 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1776 }
1777#if 0 /* necessary for Haiku */
1778 else
1779 if ( (pCpu->Param2.fUse & DISUSE_REG_SEG)
1780 && (pCpu->Param2.Base.idxSegReg == USE_REG_SS)
1781 && (pCpu->Param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1782 {
1783 /* mov GPR, ss */
1784 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1785 if (RT_SUCCESS(rc))
1786 rc = VWRN_CONTINUE_RECOMPILE;
1787 break;
1788 }
1789#endif
1790 }
1791 goto duplicate_instr;
1792
1793 case OP_POP:
1794 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
1795 {
1796 Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
1797
1798 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1799 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1800 }
1801 goto duplicate_instr;
1802
1803 case OP_STI:
1804 {
1805 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1806
1807 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1808 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1809 {
1810 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1811 fInhibitIRQInstr = true;
1812 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1813 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1814 }
1815 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1816
1817 if (RT_SUCCESS(rc))
1818 {
1819 DISCPUSTATE cpu = *pCpu;
1820 unsigned cbInstr;
1821 int disret;
1822 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1823
1824 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1825
1826 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1827 { /* Force pNextInstrHC out of scope after using it */
1828 uint8_t *pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1829 if (pNextInstrHC == NULL)
1830 {
1831 AssertFailed();
1832 return VERR_PATCHING_REFUSED;
1833 }
1834
1835 // Disassemble the next instruction
1836 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
1837 }
1838 if (disret == false)
1839 {
1840 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1841 return VERR_PATCHING_REFUSED;
1842 }
1843 pReturnInstrGC = pNextInstrGC + cbInstr;
1844
1845 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1846 || pReturnInstrGC <= pInstrGC
1847 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1848 )
1849 {
1850 /* Not an exit point for function duplication patches */
1851 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1852 && RT_SUCCESS(rc))
1853 {
1854 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1855 rc = VWRN_CONTINUE_RECOMPILE;
1856 }
1857 else
1858 rc = VINF_SUCCESS; //exit point
1859 }
1860 else {
1861 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1862 rc = VERR_PATCHING_REFUSED; //not allowed!!
1863 }
1864 }
1865 break;
1866 }
1867
1868 case OP_POPF:
1869 {
1870 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
1871
1872 /* Not an exit point for IDT handler or function replacement patches */
1873 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1874 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1875 fGenerateJmpBack = false;
1876
1877 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->fPrefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
1878 if (RT_SUCCESS(rc))
1879 {
1880 if (fGenerateJmpBack == false)
1881 {
1882 /* Not an exit point for IDT handler or function replacement patches */
1883 rc = VWRN_CONTINUE_RECOMPILE;
1884 }
1885 else
1886 {
1887 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1888 rc = VINF_SUCCESS; /* exit point! */
1889 }
1890 }
1891 break;
1892 }
1893
1894 case OP_PUSHF:
1895 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1896 if (RT_SUCCESS(rc))
1897 rc = VWRN_CONTINUE_RECOMPILE;
1898 break;
1899
1900 case OP_PUSH:
1901 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
1902 {
1903 rc = patmPatchGenPushCS(pVM, pPatch);
1904 if (RT_SUCCESS(rc))
1905 rc = VWRN_CONTINUE_RECOMPILE;
1906 break;
1907 }
1908 goto duplicate_instr;
1909
1910 case OP_IRET:
1911 Log(("IRET at %RRv\n", pCurInstrGC));
1912 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1913 if (RT_SUCCESS(rc))
1914 {
1915 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1916 rc = VINF_SUCCESS; /* exit point by definition */
1917 }
1918 break;
1919
1920 case OP_ILLUD2:
1921 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1922 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1923 if (RT_SUCCESS(rc))
1924 rc = VINF_SUCCESS; /* exit point by definition */
1925 Log(("Illegal opcode (0xf 0xb)\n"));
1926 break;
1927
1928 case OP_CPUID:
1929 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1930 if (RT_SUCCESS(rc))
1931 rc = VWRN_CONTINUE_RECOMPILE;
1932 break;
1933
1934 case OP_STR:
1935 case OP_SLDT:
1936 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1937 if (RT_SUCCESS(rc))
1938 rc = VWRN_CONTINUE_RECOMPILE;
1939 break;
1940
1941 case OP_SGDT:
1942 case OP_SIDT:
1943 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1944 if (RT_SUCCESS(rc))
1945 rc = VWRN_CONTINUE_RECOMPILE;
1946 break;
1947
1948 case OP_RETN:
1949 /* retn is an exit point for function patches */
1950 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1951 if (RT_SUCCESS(rc))
1952 rc = VINF_SUCCESS; /* exit point by definition */
1953 break;
1954
1955 case OP_SYSEXIT:
1956 /* Duplicate it, so it can be emulated in GC (or fault). */
1957 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1958 if (RT_SUCCESS(rc))
1959 rc = VINF_SUCCESS; /* exit point by definition */
1960 break;
1961
1962 case OP_CALL:
1963 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1964 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1965 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1966 */
1967 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
1968 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far calls! */)
1969 {
1970 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
1971 if (RT_SUCCESS(rc))
1972 {
1973 rc = VWRN_CONTINUE_RECOMPILE;
1974 }
1975 break;
1976 }
1977 goto gen_illegal_instr;
1978
1979 case OP_JMP:
1980 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1981 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1982 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1983 */
1984 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
1985 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far jumps! */)
1986 {
1987 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
1988 if (RT_SUCCESS(rc))
1989 rc = VINF_SUCCESS; /* end of branch */
1990 break;
1991 }
1992 goto gen_illegal_instr;
1993
1994 case OP_INT3:
1995 case OP_INT:
1996 case OP_INTO:
1997 goto gen_illegal_instr;
1998
1999 case OP_MOV_DR:
2000 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2001 if (pCpu->pCurInstr->fParam2 == OP_PARM_Dd)
2002 {
2003 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
2004 if (RT_SUCCESS(rc))
2005 rc = VWRN_CONTINUE_RECOMPILE;
2006 break;
2007 }
2008 goto duplicate_instr;
2009
2010 case OP_MOV_CR:
2011 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2012 if (pCpu->pCurInstr->fParam2 == OP_PARM_Cd)
2013 {
2014 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
2015 if (RT_SUCCESS(rc))
2016 rc = VWRN_CONTINUE_RECOMPILE;
2017 break;
2018 }
2019 goto duplicate_instr;
2020
2021 default:
2022 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
2023 {
2024gen_illegal_instr:
2025 rc = patmPatchGenIllegalInstr(pVM, pPatch);
2026 if (RT_SUCCESS(rc))
2027 rc = VINF_SUCCESS; /* exit point by definition */
2028 }
2029 else
2030 {
2031duplicate_instr:
2032 Log(("patmPatchGenDuplicate\n"));
2033 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2034 if (RT_SUCCESS(rc))
2035 rc = VWRN_CONTINUE_RECOMPILE;
2036 }
2037 break;
2038 }
2039
2040end:
2041
2042 if ( !fInhibitIRQInstr
2043 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2044 {
2045 int rc2;
2046 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2047
2048 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2049 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
2050 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2051 {
2052 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
2053
2054 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2055 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2056 rc = VINF_SUCCESS; /* end of the line */
2057 }
2058 else
2059 {
2060 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2061 }
2062 if (RT_FAILURE(rc2))
2063 rc = rc2;
2064 }
2065
2066 if (RT_SUCCESS(rc))
2067 {
2068 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2069 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2070 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
2071 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
2072 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2073 )
2074 {
2075 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2076
2077 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2078 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
2079
2080 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2081 AssertRC(rc);
2082 }
2083 }
2084 return rc;
2085}
2086
2087
2088#ifdef LOG_ENABLED
2089
2090/* Add a disasm jump record (temporary for prevent duplicate analysis)
2091 *
2092 * @param pVM Pointer to the VM.
2093 * @param pPatch Patch structure ptr
2094 * @param pInstrGC Guest context pointer to privileged instruction
2095 *
2096 */
2097static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2098{
2099 PAVLPVNODECORE pRec;
2100
2101 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2102 Assert(pRec);
2103 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2104
2105 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2106 Assert(ret);
2107}
2108
2109/**
2110 * Checks if jump target has been analysed before.
2111 *
2112 * @returns VBox status code.
2113 * @param pPatch Patch struct
2114 * @param pInstrGC Jump target
2115 *
2116 */
2117static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2118{
2119 PAVLPVNODECORE pRec;
2120
2121 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2122 if (pRec)
2123 return true;
2124 return false;
2125}
2126
2127/**
2128 * For proper disassembly of the final patch block
2129 *
2130 * @returns VBox status code.
2131 * @param pVM Pointer to the VM.
2132 * @param pCpu CPU disassembly state
2133 * @param pInstrGC Guest context pointer to privileged instruction
2134 * @param pCurInstrGC Guest context pointer to the current instruction
2135 * @param pCacheRec Cache record ptr
2136 *
2137 */
2138int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2139{
2140 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2141 NOREF(pInstrGC);
2142
2143 if (pCpu->pCurInstr->uOpcode == OP_INT3)
2144 {
2145 /* Could be an int3 inserted in a call patch. Check to be sure */
2146 DISCPUSTATE cpu;
2147 RTRCPTR pOrgJumpGC;
2148
2149 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2150
2151 { /* Force pOrgJumpHC out of scope after using it */
2152 uint8_t *pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2153
2154 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
2155 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.Param1.cb != 4 /* only near calls */)
2156 return VINF_SUCCESS;
2157 }
2158 return VWRN_CONTINUE_ANALYSIS;
2159 }
2160
2161 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2
2162 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2163 {
2164 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2165 return VWRN_CONTINUE_ANALYSIS;
2166 }
2167
2168 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2169 || pCpu->pCurInstr->uOpcode == OP_INT
2170 || pCpu->pCurInstr->uOpcode == OP_IRET
2171 || pCpu->pCurInstr->uOpcode == OP_RETN
2172 || pCpu->pCurInstr->uOpcode == OP_RETF
2173 )
2174 {
2175 return VINF_SUCCESS;
2176 }
2177
2178 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2)
2179 return VINF_SUCCESS;
2180
2181 return VWRN_CONTINUE_ANALYSIS;
2182}
2183
2184
2185/**
2186 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2187 *
2188 * @returns VBox status code.
2189 * @param pVM Pointer to the VM.
2190 * @param pInstrGC Guest context pointer to the initial privileged instruction
2191 * @param pCurInstrGC Guest context pointer to the current instruction
2192 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2193 * @param pCacheRec Cache record ptr
2194 *
2195 */
2196int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2197{
2198 DISCPUSTATE cpu;
2199 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2200 int rc = VWRN_CONTINUE_ANALYSIS;
2201 uint32_t cbInstr, delta;
2202 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2203 bool disret;
2204 char szOutput[256];
2205
2206 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2207
2208 /* We need this to determine branch targets (and for disassembling). */
2209 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2210
2211 while (rc == VWRN_CONTINUE_ANALYSIS)
2212 {
2213 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2214 if (pCurInstrHC == NULL)
2215 {
2216 rc = VERR_PATCHING_REFUSED;
2217 goto end;
2218 }
2219
2220 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
2221 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2222 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2223 {
2224 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2225
2226 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2227 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2228 else
2229 Log(("DIS %s", szOutput));
2230
2231 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2232 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2233 {
2234 rc = VINF_SUCCESS;
2235 goto end;
2236 }
2237 }
2238 else
2239 Log(("DIS: %s", szOutput));
2240
2241 if (disret == false)
2242 {
2243 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2244 rc = VINF_SUCCESS;
2245 goto end;
2246 }
2247
2248 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2249 if (rc != VWRN_CONTINUE_ANALYSIS) {
2250 break; //done!
2251 }
2252
2253 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2254 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2255 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2256 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2257 )
2258 {
2259 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2260 RTRCPTR pOrgTargetGC;
2261
2262 if (pTargetGC == 0)
2263 {
2264 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2265 rc = VERR_PATCHING_REFUSED;
2266 break;
2267 }
2268
2269 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2270 {
2271 //jump back to guest code
2272 rc = VINF_SUCCESS;
2273 goto end;
2274 }
2275 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2276
2277 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2278 {
2279 rc = VINF_SUCCESS;
2280 goto end;
2281 }
2282
2283 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2284 {
2285 /* New jump, let's check it. */
2286 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2287
2288 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2289 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2290 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2291
2292 if (rc != VINF_SUCCESS) {
2293 break; //done!
2294 }
2295 }
2296 if (cpu.pCurInstr->uOpcode == OP_JMP)
2297 {
2298 /* Unconditional jump; return to caller. */
2299 rc = VINF_SUCCESS;
2300 goto end;
2301 }
2302
2303 rc = VWRN_CONTINUE_ANALYSIS;
2304 }
2305 pCurInstrGC += cbInstr;
2306 }
2307end:
2308 return rc;
2309}
2310
2311/**
2312 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2313 *
2314 * @returns VBox status code.
2315 * @param pVM Pointer to the VM.
2316 * @param pInstrGC Guest context pointer to the initial privileged instruction
2317 * @param pCurInstrGC Guest context pointer to the current instruction
2318 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2319 * @param pCacheRec Cache record ptr
2320 *
2321 */
2322int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2323{
2324 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2325
2326 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2327 /* Free all disasm jump records. */
2328 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2329 return rc;
2330}
2331
2332#endif /* LOG_ENABLED */
2333
2334/**
2335 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2336 * If so, this patch is permanently disabled.
2337 *
2338 * @param pVM Pointer to the VM.
2339 * @param pInstrGC Guest context pointer to instruction
2340 * @param pConflictGC Guest context pointer to check
2341 *
2342 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2343 *
2344 */
2345VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2346{
2347 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2348 if (pTargetPatch)
2349 {
2350 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2351 }
2352 return VERR_PATCH_NO_CONFLICT;
2353}
2354
2355/**
2356 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2357 *
2358 * @returns VBox status code.
2359 * @param pVM Pointer to the VM.
2360 * @param pInstrGC Guest context pointer to privileged instruction
2361 * @param pCurInstrGC Guest context pointer to the current instruction
2362 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2363 * @param pCacheRec Cache record ptr
2364 *
2365 */
2366static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2367{
2368 DISCPUSTATE cpu;
2369 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2370 int rc = VWRN_CONTINUE_ANALYSIS;
2371 uint32_t cbInstr;
2372 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2373 bool disret;
2374#ifdef LOG_ENABLED
2375 char szOutput[256];
2376#endif
2377
2378 while (rc == VWRN_CONTINUE_RECOMPILE)
2379 {
2380 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2381 if (pCurInstrHC == NULL)
2382 {
2383 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2384 goto end;
2385 }
2386#ifdef LOG_ENABLED
2387 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
2388 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2389 Log(("Recompile: %s", szOutput));
2390#else
2391 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
2392#endif
2393 if (disret == false)
2394 {
2395 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2396
2397 /* Add lookup record for patch to guest address translation */
2398 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2399 patmPatchGenIllegalInstr(pVM, pPatch);
2400 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2401 goto end;
2402 }
2403
2404 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2405 if (rc != VWRN_CONTINUE_RECOMPILE)
2406 {
2407 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2408 if ( rc == VINF_SUCCESS
2409 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2410 {
2411 DISCPUSTATE cpunext;
2412 uint32_t opsizenext;
2413 uint8_t *pNextInstrHC;
2414 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
2415
2416 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2417
2418 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2419 * Recompile the next instruction as well
2420 */
2421 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2422 if (pNextInstrHC == NULL)
2423 {
2424 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2425 goto end;
2426 }
2427 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
2428 if (disret == false)
2429 {
2430 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2431 goto end;
2432 }
2433 switch(cpunext.pCurInstr->uOpcode)
2434 {
2435 case OP_IRET: /* inhibit cleared in generated code */
2436 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2437 case OP_HLT:
2438 break; /* recompile these */
2439
2440 default:
2441 if (cpunext.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2442 {
2443 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2444
2445 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2446 AssertRC(rc);
2447 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2448 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2449 }
2450 break;
2451 }
2452
2453 /* Note: after a cli we must continue to a proper exit point */
2454 if (cpunext.pCurInstr->uOpcode != OP_CLI)
2455 {
2456 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2457 if (RT_SUCCESS(rc))
2458 {
2459 rc = VINF_SUCCESS;
2460 goto end;
2461 }
2462 break;
2463 }
2464 else
2465 rc = VWRN_CONTINUE_RECOMPILE;
2466 }
2467 else
2468 break; /* done! */
2469 }
2470
2471 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2472
2473
2474 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2475 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2476 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2477 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2478 )
2479 {
2480 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2481 if (addr == 0)
2482 {
2483 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2484 rc = VERR_PATCHING_REFUSED;
2485 break;
2486 }
2487
2488 Log(("Jump encountered target %RRv\n", addr));
2489
2490 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2491 if (!(cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW))
2492 {
2493 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2494 /* First we need to finish this linear code stream until the next exit point. */
2495 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
2496 if (RT_FAILURE(rc))
2497 {
2498 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2499 break; //fatal error
2500 }
2501 }
2502
2503 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2504 {
2505 /* New code; let's recompile it. */
2506 Log(("patmRecompileCodeStream continue with jump\n"));
2507
2508 /*
2509 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2510 * this patch so we can continue our analysis
2511 *
2512 * We rely on CSAM to detect and resolve conflicts
2513 */
2514 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
2515 if(pTargetPatch)
2516 {
2517 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2518 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2519 }
2520
2521 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2522 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2523 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2524
2525 if(pTargetPatch)
2526 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2527
2528 if (RT_FAILURE(rc))
2529 {
2530 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2531 break; //done!
2532 }
2533 }
2534 /* Always return to caller here; we're done! */
2535 rc = VINF_SUCCESS;
2536 goto end;
2537 }
2538 else
2539 if (cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW)
2540 {
2541 rc = VINF_SUCCESS;
2542 goto end;
2543 }
2544 pCurInstrGC += cbInstr;
2545 }
2546end:
2547 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2548 return rc;
2549}
2550
2551
2552/**
2553 * Generate the jump from guest to patch code
2554 *
2555 * @returns VBox status code.
2556 * @param pVM Pointer to the VM.
2557 * @param pPatch Patch record
2558 * @param pCacheRec Guest translation lookup cache record
2559 */
2560static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2561{
2562 uint8_t temp[8];
2563 uint8_t *pPB;
2564 int rc;
2565
2566 Assert(pPatch->cbPatchJump <= sizeof(temp));
2567 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2568
2569 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2570 Assert(pPB);
2571
2572#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2573 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2574 {
2575 Assert(pPatch->pPatchJumpDestGC);
2576
2577 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2578 {
2579 // jmp [PatchCode]
2580 if (fAddFixup)
2581 {
2582 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2583 {
2584 Log(("Relocation failed for the jump in the guest code!!\n"));
2585 return VERR_PATCHING_REFUSED;
2586 }
2587 }
2588
2589 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2590 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2591 }
2592 else
2593 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2594 {
2595 // jmp [PatchCode]
2596 if (fAddFixup)
2597 {
2598 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2599 {
2600 Log(("Relocation failed for the jump in the guest code!!\n"));
2601 return VERR_PATCHING_REFUSED;
2602 }
2603 }
2604
2605 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2606 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2607 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2608 }
2609 else
2610 {
2611 Assert(0);
2612 return VERR_PATCHING_REFUSED;
2613 }
2614 }
2615 else
2616#endif
2617 {
2618 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2619
2620 // jmp [PatchCode]
2621 if (fAddFixup)
2622 {
2623 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2624 {
2625 Log(("Relocation failed for the jump in the guest code!!\n"));
2626 return VERR_PATCHING_REFUSED;
2627 }
2628 }
2629 temp[0] = 0xE9; //jmp
2630 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2631 }
2632 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2633 AssertRC(rc);
2634
2635 if (rc == VINF_SUCCESS)
2636 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2637
2638 return rc;
2639}
2640
2641/**
2642 * Remove the jump from guest to patch code
2643 *
2644 * @returns VBox status code.
2645 * @param pVM Pointer to the VM.
2646 * @param pPatch Patch record
2647 */
2648static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2649{
2650#ifdef DEBUG
2651 DISCPUSTATE cpu;
2652 char szOutput[256];
2653 uint32_t cbInstr, i = 0;
2654 bool disret;
2655
2656 while (i < pPatch->cbPrivInstr)
2657 {
2658 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2659 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2660 if (disret == false)
2661 break;
2662
2663 Log(("Org patch jump: %s", szOutput));
2664 Assert(cbInstr);
2665 i += cbInstr;
2666 }
2667#endif
2668
2669 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2670 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2671#ifdef DEBUG
2672 if (rc == VINF_SUCCESS)
2673 {
2674 i = 0;
2675 while (i < pPatch->cbPrivInstr)
2676 {
2677 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2678 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2679 if (disret == false)
2680 break;
2681
2682 Log(("Org instr: %s", szOutput));
2683 Assert(cbInstr);
2684 i += cbInstr;
2685 }
2686 }
2687#endif
2688 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2689 return rc;
2690}
2691
2692/**
2693 * Generate the call from guest to patch code
2694 *
2695 * @returns VBox status code.
2696 * @param pVM Pointer to the VM.
2697 * @param pPatch Patch record
2698 * @param pInstrHC HC address where to insert the jump
2699 * @param pCacheRec Guest translation cache record
2700 */
2701static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2702{
2703 uint8_t temp[8];
2704 uint8_t *pPB;
2705 int rc;
2706
2707 Assert(pPatch->cbPatchJump <= sizeof(temp));
2708
2709 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2710 Assert(pPB);
2711
2712 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2713
2714 // jmp [PatchCode]
2715 if (fAddFixup)
2716 {
2717 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2718 {
2719 Log(("Relocation failed for the jump in the guest code!!\n"));
2720 return VERR_PATCHING_REFUSED;
2721 }
2722 }
2723
2724 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2725 temp[0] = pPatch->aPrivInstr[0];
2726 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2727
2728 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2729 AssertRC(rc);
2730
2731 return rc;
2732}
2733
2734
2735/**
2736 * Patch cli/sti pushf/popf instruction block at specified location
2737 *
2738 * @returns VBox status code.
2739 * @param pVM Pointer to the VM.
2740 * @param pInstrGC Guest context point to privileged instruction
2741 * @param pInstrHC Host context point to privileged instruction
2742 * @param uOpcode Instruction opcode
2743 * @param uOpSize Size of starting instruction
2744 * @param pPatchRec Patch record
2745 *
2746 * @note returns failure if patching is not allowed or possible
2747 *
2748 */
2749VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2750 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2751{
2752 PPATCHINFO pPatch = &pPatchRec->patch;
2753 int rc = VERR_PATCHING_REFUSED;
2754 uint32_t orgOffsetPatchMem = ~0;
2755 RTRCPTR pInstrStart;
2756 bool fInserted;
2757 NOREF(pInstrHC); NOREF(uOpSize);
2758
2759 /* Save original offset (in case of failures later on) */
2760 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2761 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2762
2763 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2764 switch (uOpcode)
2765 {
2766 case OP_MOV:
2767 break;
2768
2769 case OP_CLI:
2770 case OP_PUSHF:
2771 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2772 /* Note: special precautions are taken when disabling and enabling such patches. */
2773 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2774 break;
2775
2776 default:
2777 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2778 {
2779 AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
2780 return VERR_INVALID_PARAMETER;
2781 }
2782 }
2783
2784 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2785 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2786
2787 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2788 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2789 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2790 )
2791 {
2792 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2793 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2794 rc = VERR_PATCHING_REFUSED;
2795 goto failure;
2796 }
2797
2798 pPatch->nrPatch2GuestRecs = 0;
2799 pInstrStart = pInstrGC;
2800
2801#ifdef PATM_ENABLE_CALL
2802 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2803#endif
2804
2805 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2806 pPatch->uCurPatchOffset = 0;
2807
2808 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2809 {
2810 Assert(pPatch->flags & PATMFL_INTHANDLER);
2811
2812 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2813 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2814 if (RT_FAILURE(rc))
2815 goto failure;
2816 }
2817
2818 /***************************************************************************************************************************/
2819 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2820 /***************************************************************************************************************************/
2821#ifdef VBOX_WITH_STATISTICS
2822 if (!(pPatch->flags & PATMFL_SYSENTER))
2823 {
2824 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2825 if (RT_FAILURE(rc))
2826 goto failure;
2827 }
2828#endif
2829
2830 PATMP2GLOOKUPREC cacheRec;
2831 RT_ZERO(cacheRec);
2832 cacheRec.pPatch = pPatch;
2833
2834 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2835 /* Free leftover lock if any. */
2836 if (cacheRec.Lock.pvMap)
2837 {
2838 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2839 cacheRec.Lock.pvMap = NULL;
2840 }
2841 if (rc != VINF_SUCCESS)
2842 {
2843 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2844 goto failure;
2845 }
2846
2847 /* Calculated during analysis. */
2848 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2849 {
2850 /* Most likely cause: we encountered an illegal instruction very early on. */
2851 /** @todo could turn it into an int3 callable patch. */
2852 Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
2853 rc = VERR_PATCHING_REFUSED;
2854 goto failure;
2855 }
2856
2857 /* size of patch block */
2858 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2859
2860
2861 /* Update free pointer in patch memory. */
2862 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2863 /* Round to next 8 byte boundary. */
2864 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2865
2866 /*
2867 * Insert into patch to guest lookup tree
2868 */
2869 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2870 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2871 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2872 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2873 if (!fInserted)
2874 {
2875 rc = VERR_PATCHING_REFUSED;
2876 goto failure;
2877 }
2878
2879 /* Note that patmr3SetBranchTargets can install additional patches!! */
2880 rc = patmr3SetBranchTargets(pVM, pPatch);
2881 if (rc != VINF_SUCCESS)
2882 {
2883 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2884 goto failure;
2885 }
2886
2887#ifdef LOG_ENABLED
2888 Log(("Patch code ----------------------------------------------------------\n"));
2889 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2890 /* Free leftover lock if any. */
2891 if (cacheRec.Lock.pvMap)
2892 {
2893 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2894 cacheRec.Lock.pvMap = NULL;
2895 }
2896 Log(("Patch code ends -----------------------------------------------------\n"));
2897#endif
2898
2899 /* make a copy of the guest code bytes that will be overwritten */
2900 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2901
2902 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2903 AssertRC(rc);
2904
2905 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2906 {
2907 /*uint8_t bASMInt3 = 0xCC; - unused */
2908
2909 Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2910 /* Replace first opcode byte with 'int 3'. */
2911 rc = patmActivateInt3Patch(pVM, pPatch);
2912 if (RT_FAILURE(rc))
2913 goto failure;
2914
2915 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2916 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2917
2918 pPatch->flags &= ~PATMFL_INSTR_HINT;
2919 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2920 }
2921 else
2922 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2923 {
2924 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2925 /* now insert a jump in the guest code */
2926 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2927 AssertRC(rc);
2928 if (RT_FAILURE(rc))
2929 goto failure;
2930
2931 }
2932
2933 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
2934
2935 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2936 pPatch->pTempInfo->nrIllegalInstr = 0;
2937
2938 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2939
2940 pPatch->uState = PATCH_ENABLED;
2941 return VINF_SUCCESS;
2942
2943failure:
2944 if (pPatchRec->CoreOffset.Key)
2945 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2946
2947 patmEmptyTree(pVM, &pPatch->FixupTree);
2948 pPatch->nrFixups = 0;
2949
2950 patmEmptyTree(pVM, &pPatch->JumpTree);
2951 pPatch->nrJumpRecs = 0;
2952
2953 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2954 pPatch->pTempInfo->nrIllegalInstr = 0;
2955
2956 /* Turn this cli patch into a dummy. */
2957 pPatch->uState = PATCH_REFUSED;
2958 pPatch->pPatchBlockOffset = 0;
2959
2960 // Give back the patch memory we no longer need
2961 Assert(orgOffsetPatchMem != (uint32_t)~0);
2962 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2963
2964 return rc;
2965}
2966
2967/**
2968 * Patch IDT handler
2969 *
2970 * @returns VBox status code.
2971 * @param pVM Pointer to the VM.
2972 * @param pInstrGC Guest context point to privileged instruction
2973 * @param uOpSize Size of starting instruction
2974 * @param pPatchRec Patch record
2975 * @param pCacheRec Cache record ptr
2976 *
2977 * @note returns failure if patching is not allowed or possible
2978 *
2979 */
2980static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
2981{
2982 PPATCHINFO pPatch = &pPatchRec->patch;
2983 bool disret;
2984 DISCPUSTATE cpuPush, cpuJmp;
2985 uint32_t cbInstr;
2986 RTRCPTR pCurInstrGC = pInstrGC;
2987 uint8_t *pCurInstrHC, *pInstrHC;
2988 uint32_t orgOffsetPatchMem = ~0;
2989
2990 pInstrHC = pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2991 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
2992
2993 /*
2994 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
2995 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
2996 * condition here and only patch the common entypoint once.
2997 */
2998 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
2999 Assert(disret);
3000 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH)
3001 {
3002 RTRCPTR pJmpInstrGC;
3003 int rc;
3004 pCurInstrGC += cbInstr;
3005
3006 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
3007 if ( disret
3008 && cpuJmp.pCurInstr->uOpcode == OP_JMP
3009 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
3010 )
3011 {
3012 bool fInserted;
3013 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3014 if (pJmpPatch == 0)
3015 {
3016 /* Patch it first! */
3017 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
3018 if (rc != VINF_SUCCESS)
3019 goto failure;
3020 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3021 Assert(pJmpPatch);
3022 }
3023 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3024 goto failure;
3025
3026 /* save original offset (in case of failures later on) */
3027 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3028
3029 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3030 pPatch->uCurPatchOffset = 0;
3031 pPatch->nrPatch2GuestRecs = 0;
3032
3033#ifdef VBOX_WITH_STATISTICS
3034 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3035 if (RT_FAILURE(rc))
3036 goto failure;
3037#endif
3038
3039 /* Install fake cli patch (to clear the virtual IF) */
3040 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
3041 if (RT_FAILURE(rc))
3042 goto failure;
3043
3044 /* Add lookup record for patch to guest address translation (for the push) */
3045 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
3046
3047 /* Duplicate push. */
3048 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
3049 if (RT_FAILURE(rc))
3050 goto failure;
3051
3052 /* Generate jump to common entrypoint. */
3053 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
3054 if (RT_FAILURE(rc))
3055 goto failure;
3056
3057 /* size of patch block */
3058 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3059
3060 /* Update free pointer in patch memory. */
3061 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3062 /* Round to next 8 byte boundary */
3063 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3064
3065 /* There's no jump from guest to patch code. */
3066 pPatch->cbPatchJump = 0;
3067
3068
3069#ifdef LOG_ENABLED
3070 Log(("Patch code ----------------------------------------------------------\n"));
3071 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3072 Log(("Patch code ends -----------------------------------------------------\n"));
3073#endif
3074 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3075
3076 /*
3077 * Insert into patch to guest lookup tree
3078 */
3079 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3080 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3081 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3082 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3083
3084 pPatch->uState = PATCH_ENABLED;
3085
3086 return VINF_SUCCESS;
3087 }
3088 }
3089failure:
3090 /* Give back the patch memory we no longer need */
3091 if (orgOffsetPatchMem != (uint32_t)~0)
3092 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3093
3094 return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3095}
3096
3097/**
3098 * Install a trampoline to call a guest trap handler directly
3099 *
3100 * @returns VBox status code.
3101 * @param pVM Pointer to the VM.
3102 * @param pInstrGC Guest context point to privileged instruction
3103 * @param pPatchRec Patch record
3104 * @param pCacheRec Cache record ptr
3105 *
3106 */
3107static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3108{
3109 PPATCHINFO pPatch = &pPatchRec->patch;
3110 int rc = VERR_PATCHING_REFUSED;
3111 uint32_t orgOffsetPatchMem = ~0;
3112 bool fInserted;
3113
3114 // save original offset (in case of failures later on)
3115 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3116
3117 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3118 pPatch->uCurPatchOffset = 0;
3119 pPatch->nrPatch2GuestRecs = 0;
3120
3121#ifdef VBOX_WITH_STATISTICS
3122 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3123 if (RT_FAILURE(rc))
3124 goto failure;
3125#endif
3126
3127 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3128 if (RT_FAILURE(rc))
3129 goto failure;
3130
3131 /* size of patch block */
3132 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3133
3134 /* Update free pointer in patch memory. */
3135 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3136 /* Round to next 8 byte boundary */
3137 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3138
3139 /* There's no jump from guest to patch code. */
3140 pPatch->cbPatchJump = 0;
3141
3142#ifdef LOG_ENABLED
3143 Log(("Patch code ----------------------------------------------------------\n"));
3144 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3145 Log(("Patch code ends -----------------------------------------------------\n"));
3146#endif
3147 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
3148 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3149
3150 /*
3151 * Insert into patch to guest lookup tree
3152 */
3153 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3154 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3155 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3156 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3157
3158 pPatch->uState = PATCH_ENABLED;
3159 return VINF_SUCCESS;
3160
3161failure:
3162 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3163
3164 /* Turn this cli patch into a dummy. */
3165 pPatch->uState = PATCH_REFUSED;
3166 pPatch->pPatchBlockOffset = 0;
3167
3168 /* Give back the patch memory we no longer need */
3169 Assert(orgOffsetPatchMem != (uint32_t)~0);
3170 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3171
3172 return rc;
3173}
3174
3175
3176#ifdef LOG_ENABLED
3177/**
3178 * Check if the instruction is patched as a common idt handler
3179 *
3180 * @returns true or false
3181 * @param pVM Pointer to the VM.
3182 * @param pInstrGC Guest context point to the instruction
3183 *
3184 */
3185static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3186{
3187 PPATMPATCHREC pRec;
3188
3189 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3190 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3191 return true;
3192 return false;
3193}
3194#endif //DEBUG
3195
3196
3197/**
3198 * Duplicates a complete function
3199 *
3200 * @returns VBox status code.
3201 * @param pVM Pointer to the VM.
3202 * @param pInstrGC Guest context point to privileged instruction
3203 * @param pPatchRec Patch record
3204 * @param pCacheRec Cache record ptr
3205 *
3206 */
3207static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3208{
3209 PPATCHINFO pPatch = &pPatchRec->patch;
3210 int rc = VERR_PATCHING_REFUSED;
3211 uint32_t orgOffsetPatchMem = ~0;
3212 bool fInserted;
3213
3214 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3215 /* Save original offset (in case of failures later on). */
3216 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3217
3218 /* We will not go on indefinitely with call instruction handling. */
3219 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3220 {
3221 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3222 return VERR_PATCHING_REFUSED;
3223 }
3224
3225 pVM->patm.s.ulCallDepth++;
3226
3227#ifdef PATM_ENABLE_CALL
3228 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3229#endif
3230
3231 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3232
3233 pPatch->nrPatch2GuestRecs = 0;
3234 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3235 pPatch->uCurPatchOffset = 0;
3236
3237 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3238 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3239 if (RT_FAILURE(rc))
3240 goto failure;
3241
3242#ifdef VBOX_WITH_STATISTICS
3243 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3244 if (RT_FAILURE(rc))
3245 goto failure;
3246#endif
3247
3248 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3249 if (rc != VINF_SUCCESS)
3250 {
3251 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3252 goto failure;
3253 }
3254
3255 //size of patch block
3256 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3257
3258 //update free pointer in patch memory
3259 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3260 /* Round to next 8 byte boundary. */
3261 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3262
3263 pPatch->uState = PATCH_ENABLED;
3264
3265 /*
3266 * Insert into patch to guest lookup tree
3267 */
3268 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3269 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3270 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3271 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3272 if (!fInserted)
3273 {
3274 rc = VERR_PATCHING_REFUSED;
3275 goto failure;
3276 }
3277
3278 /* Note that patmr3SetBranchTargets can install additional patches!! */
3279 rc = patmr3SetBranchTargets(pVM, pPatch);
3280 if (rc != VINF_SUCCESS)
3281 {
3282 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3283 goto failure;
3284 }
3285
3286#ifdef LOG_ENABLED
3287 Log(("Patch code ----------------------------------------------------------\n"));
3288 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3289 Log(("Patch code ends -----------------------------------------------------\n"));
3290#endif
3291
3292 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3293
3294 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3295 pPatch->pTempInfo->nrIllegalInstr = 0;
3296
3297 pVM->patm.s.ulCallDepth--;
3298 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3299 return VINF_SUCCESS;
3300
3301failure:
3302 if (pPatchRec->CoreOffset.Key)
3303 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3304
3305 patmEmptyTree(pVM, &pPatch->FixupTree);
3306 pPatch->nrFixups = 0;
3307
3308 patmEmptyTree(pVM, &pPatch->JumpTree);
3309 pPatch->nrJumpRecs = 0;
3310
3311 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3312 pPatch->pTempInfo->nrIllegalInstr = 0;
3313
3314 /* Turn this cli patch into a dummy. */
3315 pPatch->uState = PATCH_REFUSED;
3316 pPatch->pPatchBlockOffset = 0;
3317
3318 // Give back the patch memory we no longer need
3319 Assert(orgOffsetPatchMem != (uint32_t)~0);
3320 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3321
3322 pVM->patm.s.ulCallDepth--;
3323 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3324 return rc;
3325}
3326
3327/**
3328 * Creates trampoline code to jump inside an existing patch
3329 *
3330 * @returns VBox status code.
3331 * @param pVM Pointer to the VM.
3332 * @param pInstrGC Guest context point to privileged instruction
3333 * @param pPatchRec Patch record
3334 *
3335 */
3336static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3337{
3338 PPATCHINFO pPatch = &pPatchRec->patch;
3339 RTRCPTR pPage, pPatchTargetGC = 0;
3340 uint32_t orgOffsetPatchMem = ~0;
3341 int rc = VERR_PATCHING_REFUSED;
3342 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3343 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3344 bool fInserted = false;
3345
3346 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3347 /* Save original offset (in case of failures later on). */
3348 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3349
3350 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3351 /** @todo we already checked this before */
3352 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3353
3354 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3355 if (pPatchPage)
3356 {
3357 uint32_t i;
3358
3359 for (i=0;i<pPatchPage->cCount;i++)
3360 {
3361 if (pPatchPage->papPatch[i])
3362 {
3363 pPatchToJmp = pPatchPage->papPatch[i];
3364
3365 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3366 && pPatchToJmp->uState == PATCH_ENABLED)
3367 {
3368 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3369 if (pPatchTargetGC)
3370 {
3371 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3372 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3373 Assert(pPatchToGuestRec);
3374
3375 pPatchToGuestRec->fJumpTarget = true;
3376 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3377 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3378 break;
3379 }
3380 }
3381 }
3382 }
3383 }
3384 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3385
3386 /*
3387 * Only record the trampoline patch if this is the first patch to the target
3388 * or we recorded other patches already.
3389 * The goal is to refuse refreshing function duplicates if the guest
3390 * modifies code after a saved state was loaded because it is not possible
3391 * to save the relation between trampoline and target without changing the
3392 * saved satte version.
3393 */
3394 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3395 || pPatchToJmp->pTrampolinePatchesHead)
3396 {
3397 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3398 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3399 if (!pTrampRec)
3400 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3401
3402 pTrampRec->pPatchTrampoline = pPatchRec;
3403 }
3404
3405 pPatch->nrPatch2GuestRecs = 0;
3406 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3407 pPatch->uCurPatchOffset = 0;
3408
3409 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3410 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3411 if (RT_FAILURE(rc))
3412 goto failure;
3413
3414#ifdef VBOX_WITH_STATISTICS
3415 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3416 if (RT_FAILURE(rc))
3417 goto failure;
3418#endif
3419
3420 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3421 if (RT_FAILURE(rc))
3422 goto failure;
3423
3424 /*
3425 * Insert into patch to guest lookup tree
3426 */
3427 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3428 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3429 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3430 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3431 if (!fInserted)
3432 {
3433 rc = VERR_PATCHING_REFUSED;
3434 goto failure;
3435 }
3436
3437 /* size of patch block */
3438 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3439
3440 /* Update free pointer in patch memory. */
3441 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3442 /* Round to next 8 byte boundary */
3443 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3444
3445 /* There's no jump from guest to patch code. */
3446 pPatch->cbPatchJump = 0;
3447
3448 /* Enable the patch. */
3449 pPatch->uState = PATCH_ENABLED;
3450 /* We allow this patch to be called as a function. */
3451 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3452
3453 if (pTrampRec)
3454 {
3455 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3456 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3457 }
3458 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3459 return VINF_SUCCESS;
3460
3461failure:
3462 if (pPatchRec->CoreOffset.Key)
3463 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3464
3465 patmEmptyTree(pVM, &pPatch->FixupTree);
3466 pPatch->nrFixups = 0;
3467
3468 patmEmptyTree(pVM, &pPatch->JumpTree);
3469 pPatch->nrJumpRecs = 0;
3470
3471 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3472 pPatch->pTempInfo->nrIllegalInstr = 0;
3473
3474 /* Turn this cli patch into a dummy. */
3475 pPatch->uState = PATCH_REFUSED;
3476 pPatch->pPatchBlockOffset = 0;
3477
3478 // Give back the patch memory we no longer need
3479 Assert(orgOffsetPatchMem != (uint32_t)~0);
3480 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3481
3482 if (pTrampRec)
3483 MMR3HeapFree(pTrampRec);
3484
3485 return rc;
3486}
3487
3488
3489/**
3490 * Patch branch target function for call/jump at specified location.
3491 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3492 *
3493 * @returns VBox status code.
3494 * @param pVM Pointer to the VM.
3495 * @param pCtx Pointer to the guest CPU context.
3496 *
3497 */
3498VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3499{
3500 RTRCPTR pBranchTarget, pPage;
3501 int rc;
3502 RTRCPTR pPatchTargetGC = 0;
3503
3504 pBranchTarget = pCtx->edx;
3505 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3506
3507 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3508 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3509
3510 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3511 if (pPatchPage)
3512 {
3513 uint32_t i;
3514
3515 for (i=0;i<pPatchPage->cCount;i++)
3516 {
3517 if (pPatchPage->papPatch[i])
3518 {
3519 PPATCHINFO pPatch = pPatchPage->papPatch[i];
3520
3521 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3522 && pPatch->uState == PATCH_ENABLED)
3523 {
3524 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3525 if (pPatchTargetGC)
3526 {
3527 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3528 break;
3529 }
3530 }
3531 }
3532 }
3533 }
3534
3535 if (pPatchTargetGC)
3536 {
3537 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3538 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3539 }
3540 else
3541 {
3542 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3543 }
3544
3545 if (rc == VINF_SUCCESS)
3546 {
3547 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3548 Assert(pPatchTargetGC);
3549 }
3550
3551 if (pPatchTargetGC)
3552 {
3553 pCtx->eax = pPatchTargetGC;
3554 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3555 }
3556 else
3557 {
3558 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3559 pCtx->eax = 0;
3560 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3561 }
3562 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3563 rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3564 AssertRC(rc);
3565
3566 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3567 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3568 return VINF_SUCCESS;
3569}
3570
3571/**
3572 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3573 *
3574 * @returns VBox status code.
3575 * @param pVM Pointer to the VM.
3576 * @param pCpu Disassembly CPU structure ptr
3577 * @param pInstrGC Guest context point to privileged instruction
3578 * @param pCacheRec Cache record ptr
3579 *
3580 */
3581static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3582{
3583 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3584 int rc = VERR_PATCHING_REFUSED;
3585 DISCPUSTATE cpu;
3586 RTRCPTR pTargetGC;
3587 PPATMPATCHREC pPatchFunction;
3588 uint32_t cbInstr;
3589 bool disret;
3590
3591 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3592 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
3593
3594 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
3595 {
3596 rc = VERR_PATCHING_REFUSED;
3597 goto failure;
3598 }
3599
3600 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3601 if (pTargetGC == 0)
3602 {
3603 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
3604 rc = VERR_PATCHING_REFUSED;
3605 goto failure;
3606 }
3607
3608 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3609 if (pPatchFunction == NULL)
3610 {
3611 for(;;)
3612 {
3613 /* It could be an indirect call (call -> jmp dest).
3614 * Note that it's dangerous to assume the jump will never change...
3615 */
3616 uint8_t *pTmpInstrHC;
3617
3618 pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3619 Assert(pTmpInstrHC);
3620 if (pTmpInstrHC == 0)
3621 break;
3622
3623 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
3624 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP)
3625 break;
3626
3627 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3628 if (pTargetGC == 0)
3629 {
3630 break;
3631 }
3632
3633 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3634 break;
3635 }
3636 if (pPatchFunction == 0)
3637 {
3638 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3639 rc = VERR_PATCHING_REFUSED;
3640 goto failure;
3641 }
3642 }
3643
3644 // make a copy of the guest code bytes that will be overwritten
3645 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3646
3647 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3648 AssertRC(rc);
3649
3650 /* Now replace the original call in the guest code */
3651 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3652 AssertRC(rc);
3653 if (RT_FAILURE(rc))
3654 goto failure;
3655
3656 /* Lowest and highest address for write monitoring. */
3657 pPatch->pInstrGCLowest = pInstrGC;
3658 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3659 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
3660
3661 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3662
3663 pPatch->uState = PATCH_ENABLED;
3664 return VINF_SUCCESS;
3665
3666failure:
3667 /* Turn this patch into a dummy. */
3668 pPatch->uState = PATCH_REFUSED;
3669
3670 return rc;
3671}
3672
3673/**
3674 * Replace the address in an MMIO instruction with the cached version.
3675 *
3676 * @returns VBox status code.
3677 * @param pVM Pointer to the VM.
3678 * @param pInstrGC Guest context point to privileged instruction
3679 * @param pCpu Disassembly CPU structure ptr
3680 * @param pCacheRec Cache record ptr
3681 *
3682 * @note returns failure if patching is not allowed or possible
3683 *
3684 */
3685static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3686{
3687 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3688 uint8_t *pPB;
3689 int rc = VERR_PATCHING_REFUSED;
3690
3691 Assert(pVM->patm.s.mmio.pCachedData);
3692 if (!pVM->patm.s.mmio.pCachedData)
3693 goto failure;
3694
3695 if (pCpu->Param2.fUse != DISUSE_DISPLACEMENT32)
3696 goto failure;
3697
3698 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3699 if (pPB == 0)
3700 goto failure;
3701
3702 /* Add relocation record for cached data access. */
3703 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3704 {
3705 Log(("Relocation failed for cached mmio address!!\n"));
3706 return VERR_PATCHING_REFUSED;
3707 }
3708 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
3709
3710 /* Save original instruction. */
3711 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3712 AssertRC(rc);
3713
3714 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3715
3716 /* Replace address with that of the cached item. */
3717 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3718 AssertRC(rc);
3719 if (RT_FAILURE(rc))
3720 {
3721 goto failure;
3722 }
3723
3724 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3725 pVM->patm.s.mmio.pCachedData = 0;
3726 pVM->patm.s.mmio.GCPhys = 0;
3727 pPatch->uState = PATCH_ENABLED;
3728 return VINF_SUCCESS;
3729
3730failure:
3731 /* Turn this patch into a dummy. */
3732 pPatch->uState = PATCH_REFUSED;
3733
3734 return rc;
3735}
3736
3737
3738/**
3739 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3740 *
3741 * @returns VBox status code.
3742 * @param pVM Pointer to the VM.
3743 * @param pInstrGC Guest context point to privileged instruction
3744 * @param pPatch Patch record
3745 *
3746 * @note returns failure if patching is not allowed or possible
3747 *
3748 */
3749static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3750{
3751 DISCPUSTATE cpu;
3752 uint32_t cbInstr;
3753 bool disret;
3754 uint8_t *pInstrHC;
3755
3756 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3757
3758 /* Convert GC to HC address. */
3759 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3760 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3761
3762 /* Disassemble mmio instruction. */
3763 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
3764 &cpu, &cbInstr);
3765 if (disret == false)
3766 {
3767 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3768 return VERR_PATCHING_REFUSED;
3769 }
3770
3771 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3772 if (cbInstr > MAX_INSTR_SIZE)
3773 return VERR_PATCHING_REFUSED;
3774 if (cpu.Param2.fUse != DISUSE_DISPLACEMENT32)
3775 return VERR_PATCHING_REFUSED;
3776
3777 /* Add relocation record for cached data access. */
3778 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3779 {
3780 Log(("Relocation failed for cached mmio address!!\n"));
3781 return VERR_PATCHING_REFUSED;
3782 }
3783 /* Replace address with that of the cached item. */
3784 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3785
3786 /* Lowest and highest address for write monitoring. */
3787 pPatch->pInstrGCLowest = pInstrGC;
3788 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
3789
3790 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3791 pVM->patm.s.mmio.pCachedData = 0;
3792 pVM->patm.s.mmio.GCPhys = 0;
3793 return VINF_SUCCESS;
3794}
3795
3796/**
3797 * Activates an int3 patch
3798 *
3799 * @returns VBox status code.
3800 * @param pVM Pointer to the VM.
3801 * @param pPatch Patch record
3802 */
3803static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3804{
3805 uint8_t bASMInt3 = 0xCC;
3806 int rc;
3807
3808 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3809 Assert(pPatch->uState != PATCH_ENABLED);
3810
3811 /* Replace first opcode byte with 'int 3'. */
3812 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3813 AssertRC(rc);
3814
3815 pPatch->cbPatchJump = sizeof(bASMInt3);
3816
3817 return rc;
3818}
3819
3820/**
3821 * Deactivates an int3 patch
3822 *
3823 * @returns VBox status code.
3824 * @param pVM Pointer to the VM.
3825 * @param pPatch Patch record
3826 */
3827static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3828{
3829 uint8_t ASMInt3 = 0xCC;
3830 int rc;
3831
3832 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3833 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3834
3835 /* Restore first opcode byte. */
3836 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3837 AssertRC(rc);
3838 return rc;
3839}
3840
3841/**
3842 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3843 * in the raw-mode context.
3844 *
3845 * @returns VBox status code.
3846 * @param pVM Pointer to the VM.
3847 * @param pInstrGC Guest context point to privileged instruction
3848 * @param pInstrHC Host context point to privileged instruction
3849 * @param pCpu Disassembly CPU structure ptr
3850 * @param pPatch Patch record
3851 *
3852 * @note returns failure if patching is not allowed or possible
3853 *
3854 */
3855VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu,
3856 PPATCHINFO pPatch)
3857{
3858 uint8_t bASMInt3 = 0xCC;
3859 int rc;
3860
3861 /* Note: Do not use patch memory here! It might called during patch installation too. */
3862 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "PATMR3PatchInstrInt3:", "");
3863
3864 /* Save the original instruction. */
3865 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3866 AssertRC(rc);
3867 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3868
3869 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3870
3871 /* Replace first opcode byte with 'int 3'. */
3872 rc = patmActivateInt3Patch(pVM, pPatch);
3873 if (RT_FAILURE(rc))
3874 goto failure;
3875
3876 /* Lowest and highest address for write monitoring. */
3877 pPatch->pInstrGCLowest = pInstrGC;
3878 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3879
3880 pPatch->uState = PATCH_ENABLED;
3881 return VINF_SUCCESS;
3882
3883failure:
3884 /* Turn this patch into a dummy. */
3885 return VERR_PATCHING_REFUSED;
3886}
3887
3888#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3889/**
3890 * Patch a jump instruction at specified location
3891 *
3892 * @returns VBox status code.
3893 * @param pVM Pointer to the VM.
3894 * @param pInstrGC Guest context point to privileged instruction
3895 * @param pInstrHC Host context point to privileged instruction
3896 * @param pCpu Disassembly CPU structure ptr
3897 * @param pPatchRec Patch record
3898 *
3899 * @note returns failure if patching is not allowed or possible
3900 *
3901 */
3902int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3903{
3904 PPATCHINFO pPatch = &pPatchRec->patch;
3905 int rc = VERR_PATCHING_REFUSED;
3906
3907 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3908 pPatch->uCurPatchOffset = 0;
3909 pPatch->cbPatchBlockSize = 0;
3910 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3911
3912 /*
3913 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3914 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3915 */
3916 switch (pCpu->pCurInstr->uOpcode)
3917 {
3918 case OP_JO:
3919 case OP_JNO:
3920 case OP_JC:
3921 case OP_JNC:
3922 case OP_JE:
3923 case OP_JNE:
3924 case OP_JBE:
3925 case OP_JNBE:
3926 case OP_JS:
3927 case OP_JNS:
3928 case OP_JP:
3929 case OP_JNP:
3930 case OP_JL:
3931 case OP_JNL:
3932 case OP_JLE:
3933 case OP_JNLE:
3934 case OP_JMP:
3935 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3936 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL);
3937 if (!(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL))
3938 goto failure;
3939
3940 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
3941 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
3942 goto failure;
3943
3944 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
3945 {
3946 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3947 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3948 rc = VERR_PATCHING_REFUSED;
3949 goto failure;
3950 }
3951
3952 break;
3953
3954 default:
3955 goto failure;
3956 }
3957
3958 // make a copy of the guest code bytes that will be overwritten
3959 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
3960 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
3961 pPatch->cbPatchJump = pCpu->cbInstr;
3962
3963 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3964 AssertRC(rc);
3965
3966 /* Now insert a jump in the guest code. */
3967 /*
3968 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3969 * references the target instruction in the conflict patch.
3970 */
3971 RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
3972
3973 AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
3974 pPatch->pPatchJumpDestGC = pJmpDest;
3975
3976 PATMP2GLOOKUPREC cacheRec;
3977 RT_ZERO(cacheRec);
3978 cacheRec.pPatch = pPatch;
3979
3980 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
3981 /* Free leftover lock if any. */
3982 if (cacheRec.Lock.pvMap)
3983 {
3984 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
3985 cacheRec.Lock.pvMap = NULL;
3986 }
3987 AssertRC(rc);
3988 if (RT_FAILURE(rc))
3989 goto failure;
3990
3991 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
3992
3993 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
3994 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3995
3996 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
3997
3998 /* Lowest and highest address for write monitoring. */
3999 pPatch->pInstrGCLowest = pInstrGC;
4000 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
4001
4002 pPatch->uState = PATCH_ENABLED;
4003 return VINF_SUCCESS;
4004
4005failure:
4006 /* Turn this cli patch into a dummy. */
4007 pPatch->uState = PATCH_REFUSED;
4008
4009 return rc;
4010}
4011#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4012
4013
4014/**
4015 * Gives hint to PATM about supervisor guest instructions
4016 *
4017 * @returns VBox status code.
4018 * @param pVM Pointer to the VM.
4019 * @param pInstr Guest context point to privileged instruction
4020 * @param flags Patch flags
4021 */
4022VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4023{
4024 Assert(pInstrGC);
4025 Assert(flags == PATMFL_CODE32);
4026
4027 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4028 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4029}
4030
4031/**
4032 * Patch privileged instruction at specified location
4033 *
4034 * @returns VBox status code.
4035 * @param pVM Pointer to the VM.
4036 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4037 * @param flags Patch flags
4038 *
4039 * @note returns failure if patching is not allowed or possible
4040 */
4041VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4042{
4043 DISCPUSTATE cpu;
4044 R3PTRTYPE(uint8_t *) pInstrHC;
4045 uint32_t cbInstr;
4046 PPATMPATCHREC pPatchRec;
4047 PCPUMCTX pCtx = 0;
4048 bool disret;
4049 int rc;
4050 PVMCPU pVCpu = VMMGetCpu0(pVM);
4051 LogFlow(("PATMR3InstallPatch: %08x (%#llx)\n", pInstrGC, flags));
4052
4053 if ( !pVM
4054 || pInstrGC == 0
4055 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4056 {
4057 AssertFailed();
4058 return VERR_INVALID_PARAMETER;
4059 }
4060
4061 if (PATMIsEnabled(pVM) == false)
4062 return VERR_PATCHING_REFUSED;
4063
4064 /* Test for patch conflict only with patches that actually change guest code. */
4065 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4066 {
4067 PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
4068 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4069 if (pConflictPatch != 0)
4070 return VERR_PATCHING_REFUSED;
4071 }
4072
4073 if (!(flags & PATMFL_CODE32))
4074 {
4075 /** @todo Only 32 bits code right now */
4076 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4077 return VERR_NOT_IMPLEMENTED;
4078 }
4079
4080 /* We ran out of patch memory; don't bother anymore. */
4081 if (pVM->patm.s.fOutOfMemory == true)
4082 return VERR_PATCHING_REFUSED;
4083
4084#if 0 /* DONT COMMIT ENABLED! */
4085 /* Blacklisted NT4SP1 areas - debugging why we sometimes crash early on, */
4086 if ( 0
4087 //|| (pInstrGC - 0x80010000U) < 0x10000U // NT4SP1 HAL
4088 //|| (pInstrGC - 0x80010000U) < 0x5000U // NT4SP1 HAL
4089 //|| (pInstrGC - 0x80013000U) < 0x2000U // NT4SP1 HAL
4090 //|| (pInstrGC - 0x80014000U) < 0x1000U // NT4SP1 HAL
4091 //|| (pInstrGC - 0x80014000U) < 0x800U // NT4SP1 HAL
4092 //|| (pInstrGC - 0x80014400U) < 0x400U // NT4SP1 HAL
4093 //|| (pInstrGC - 0x80014400U) < 0x200U // NT4SP1 HAL
4094 //|| (pInstrGC - 0x80014400U) < 0x100U // NT4SP1 HAL
4095 //|| (pInstrGC - 0x80014500U) < 0x100U // NT4SP1 HAL - negative
4096 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4097 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4098 //|| (pInstrGC - 0x80014440U) < 0x40U // NT4SP1 HAL
4099 //|| (pInstrGC - 0x80014440U) < 0x20U // NT4SP1 HAL
4100 || pInstrGC == 0x80014447 /* KfLowerIrql */
4101 || 0)
4102 {
4103 Log(("PATMR3InstallPatch: %08x is blacklisted\n", pInstrGC));
4104 return VERR_PATCHING_REFUSED;
4105 }
4106#endif
4107
4108 /* Make sure the code selector is wide open; otherwise refuse. */
4109 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4110 if (CPUMGetGuestCPL(pVCpu) == 0)
4111 {
4112 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4113 if (pInstrGCFlat != pInstrGC)
4114 {
4115 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs.Sel, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4116 return VERR_PATCHING_REFUSED;
4117 }
4118 }
4119
4120 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4121 if (!(flags & PATMFL_GUEST_SPECIFIC))
4122 {
4123 /* New code. Make sure CSAM has a go at it first. */
4124 CSAMR3CheckCode(pVM, pInstrGC);
4125 }
4126
4127 /* Note: obsolete */
4128 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4129 && (flags & PATMFL_MMIO_ACCESS))
4130 {
4131 RTRCUINTPTR offset;
4132 void *pvPatchCoreOffset;
4133
4134 /* Find the patch record. */
4135 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4136 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4137 if (pvPatchCoreOffset == NULL)
4138 {
4139 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4140 return VERR_PATCH_NOT_FOUND; //fatal error
4141 }
4142 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4143
4144 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4145 }
4146
4147 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4148
4149 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4150 if (pPatchRec)
4151 {
4152 Assert(!(flags & PATMFL_TRAMPOLINE));
4153
4154 /* Hints about existing patches are ignored. */
4155 if (flags & PATMFL_INSTR_HINT)
4156 return VERR_PATCHING_REFUSED;
4157
4158 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4159 {
4160 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4161 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4162 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4163 }
4164
4165 if (pPatchRec->patch.uState == PATCH_DISABLED)
4166 {
4167 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4168 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4169 {
4170 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4171 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4172 }
4173 else
4174 Log(("Enabling patch %RRv again\n", pInstrGC));
4175
4176 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4177 rc = PATMR3EnablePatch(pVM, pInstrGC);
4178 if (RT_SUCCESS(rc))
4179 return VWRN_PATCH_ENABLED;
4180
4181 return rc;
4182 }
4183 if ( pPatchRec->patch.uState == PATCH_ENABLED
4184 || pPatchRec->patch.uState == PATCH_DIRTY)
4185 {
4186 /*
4187 * The patch might have been overwritten.
4188 */
4189 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4190 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4191 {
4192 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4193 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4194 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4195 {
4196 if (flags & PATMFL_IDTHANDLER)
4197 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4198
4199 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4200 }
4201 }
4202 rc = PATMR3RemovePatch(pVM, pInstrGC);
4203 if (RT_FAILURE(rc))
4204 return VERR_PATCHING_REFUSED;
4205 }
4206 else
4207 {
4208 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4209 /* already tried it once! */
4210 return VERR_PATCHING_REFUSED;
4211 }
4212 }
4213
4214 RTGCPHYS GCPhys;
4215 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4216 if (rc != VINF_SUCCESS)
4217 {
4218 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4219 return rc;
4220 }
4221 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4222 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4223 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4224 {
4225 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4226 return VERR_PATCHING_REFUSED;
4227 }
4228
4229 /* Initialize cache record for guest address translations. */
4230 bool fInserted;
4231 PATMP2GLOOKUPREC cacheRec;
4232 RT_ZERO(cacheRec);
4233
4234 pInstrHC = PATMGCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4235 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4236
4237 /* Allocate patch record. */
4238 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4239 if (RT_FAILURE(rc))
4240 {
4241 Log(("Out of memory!!!!\n"));
4242 return VERR_NO_MEMORY;
4243 }
4244 pPatchRec->Core.Key = pInstrGC;
4245 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4246 /* Insert patch record into the lookup tree. */
4247 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4248 Assert(fInserted);
4249
4250 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4251 pPatchRec->patch.flags = flags;
4252 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
4253 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4254
4255 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4256 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4257
4258 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4259 {
4260 /*
4261 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4262 */
4263 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4264 if (pPatchNear)
4265 {
4266 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4267 {
4268 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4269
4270 pPatchRec->patch.uState = PATCH_UNUSABLE;
4271 /*
4272 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4273 */
4274 return VERR_PATCHING_REFUSED;
4275 }
4276 }
4277 }
4278
4279 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4280 if (pPatchRec->patch.pTempInfo == 0)
4281 {
4282 Log(("Out of memory!!!!\n"));
4283 return VERR_NO_MEMORY;
4284 }
4285
4286 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
4287 if (disret == false)
4288 {
4289 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4290 return VERR_PATCHING_REFUSED;
4291 }
4292
4293 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4294 if (cbInstr > MAX_INSTR_SIZE)
4295 return VERR_PATCHING_REFUSED;
4296
4297 pPatchRec->patch.cbPrivInstr = cbInstr;
4298 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode;
4299
4300 /* Restricted hinting for now. */
4301 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI);
4302
4303 /* Initialize cache record patch pointer. */
4304 cacheRec.pPatch = &pPatchRec->patch;
4305
4306 /* Allocate statistics slot */
4307 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4308 {
4309 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4310 }
4311 else
4312 {
4313 Log(("WARNING: Patch index wrap around!!\n"));
4314 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4315 }
4316
4317 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4318 {
4319 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4320 }
4321 else
4322 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4323 {
4324 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4325 }
4326 else
4327 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4328 {
4329 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4330 }
4331 else
4332 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4333 {
4334 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4335 }
4336 else
4337 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4338 {
4339 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4340 }
4341 else
4342 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4343 {
4344 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4345 }
4346 else
4347 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4348 {
4349 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4350 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4351
4352 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
4353#ifdef VBOX_WITH_STATISTICS
4354 if ( rc == VINF_SUCCESS
4355 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4356 {
4357 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4358 }
4359#endif
4360 }
4361 else
4362 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4363 {
4364 switch (cpu.pCurInstr->uOpcode)
4365 {
4366 case OP_SYSENTER:
4367 case OP_PUSH:
4368 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4369 if (rc == VINF_SUCCESS)
4370 {
4371 if (rc == VINF_SUCCESS)
4372 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4373 return rc;
4374 }
4375 break;
4376
4377 default:
4378 rc = VERR_NOT_IMPLEMENTED;
4379 break;
4380 }
4381 }
4382 else
4383 {
4384 switch (cpu.pCurInstr->uOpcode)
4385 {
4386 case OP_SYSENTER:
4387 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4388 if (rc == VINF_SUCCESS)
4389 {
4390 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4391 return VINF_SUCCESS;
4392 }
4393 break;
4394
4395#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4396 case OP_JO:
4397 case OP_JNO:
4398 case OP_JC:
4399 case OP_JNC:
4400 case OP_JE:
4401 case OP_JNE:
4402 case OP_JBE:
4403 case OP_JNBE:
4404 case OP_JS:
4405 case OP_JNS:
4406 case OP_JP:
4407 case OP_JNP:
4408 case OP_JL:
4409 case OP_JNL:
4410 case OP_JLE:
4411 case OP_JNLE:
4412 case OP_JECXZ:
4413 case OP_LOOP:
4414 case OP_LOOPNE:
4415 case OP_LOOPE:
4416 case OP_JMP:
4417 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4418 {
4419 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4420 break;
4421 }
4422 return VERR_NOT_IMPLEMENTED;
4423#endif
4424
4425 case OP_PUSHF:
4426 case OP_CLI:
4427 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4428 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
4429 break;
4430
4431 case OP_STR:
4432 case OP_SGDT:
4433 case OP_SLDT:
4434 case OP_SIDT:
4435 case OP_CPUID:
4436 case OP_LSL:
4437 case OP_LAR:
4438 case OP_SMSW:
4439 case OP_VERW:
4440 case OP_VERR:
4441 case OP_IRET:
4442 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4443 break;
4444
4445 default:
4446 return VERR_NOT_IMPLEMENTED;
4447 }
4448 }
4449
4450 if (rc != VINF_SUCCESS)
4451 {
4452 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4453 {
4454 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4455 pPatchRec->patch.nrPatch2GuestRecs = 0;
4456 }
4457 pVM->patm.s.uCurrentPatchIdx--;
4458 }
4459 else
4460 {
4461 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4462 AssertRCReturn(rc, rc);
4463
4464 /* Keep track upper and lower boundaries of patched instructions */
4465 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4466 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4467 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4468 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4469
4470 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4471 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4472
4473 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4474 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4475
4476 rc = VINF_SUCCESS;
4477
4478 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4479 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4480 {
4481 rc = PATMR3DisablePatch(pVM, pInstrGC);
4482 AssertRCReturn(rc, rc);
4483 }
4484
4485#ifdef VBOX_WITH_STATISTICS
4486 /* Register statistics counter */
4487 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4488 {
4489 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4490 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4491#ifndef DEBUG_sandervl
4492 /* Full breakdown for the GUI. */
4493 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4494 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4495 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4496 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4497 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4498 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4499 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4500 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4501 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4502 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4503 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4504 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4505 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4506 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4507 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4508 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4509#endif
4510 }
4511#endif
4512 }
4513 /* Free leftover lock if any. */
4514 if (cacheRec.Lock.pvMap)
4515 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4516 return rc;
4517}
4518
4519/**
4520 * Query instruction size
4521 *
4522 * @returns VBox status code.
4523 * @param pVM Pointer to the VM.
4524 * @param pPatch Patch record
4525 * @param pInstrGC Instruction address
4526 */
4527static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4528{
4529 uint8_t *pInstrHC;
4530 PGMPAGEMAPLOCK Lock;
4531
4532 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4533 if (rc == VINF_SUCCESS)
4534 {
4535 DISCPUSTATE cpu;
4536 bool disret;
4537 uint32_t cbInstr;
4538
4539 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
4540 PGMPhysReleasePageMappingLock(pVM, &Lock);
4541 if (disret)
4542 return cbInstr;
4543 }
4544 return 0;
4545}
4546
4547/**
4548 * Add patch to page record
4549 *
4550 * @returns VBox status code.
4551 * @param pVM Pointer to the VM.
4552 * @param pPage Page address
4553 * @param pPatch Patch record
4554 */
4555int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4556{
4557 PPATMPATCHPAGE pPatchPage;
4558 int rc;
4559
4560 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4561
4562 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4563 if (pPatchPage)
4564 {
4565 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4566 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4567 {
4568 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4569 PPATCHINFO *papPatchOld = pPatchPage->papPatch;
4570
4571 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4572 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH,
4573 (void **)&pPatchPage->papPatch);
4574 if (RT_FAILURE(rc))
4575 {
4576 Log(("Out of memory!!!!\n"));
4577 return VERR_NO_MEMORY;
4578 }
4579 memcpy(pPatchPage->papPatch, papPatchOld, cMaxPatchesOld * sizeof(pPatchPage->papPatch[0]));
4580 MMHyperFree(pVM, papPatchOld);
4581 }
4582 pPatchPage->papPatch[pPatchPage->cCount] = pPatch;
4583 pPatchPage->cCount++;
4584 }
4585 else
4586 {
4587 bool fInserted;
4588
4589 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4590 if (RT_FAILURE(rc))
4591 {
4592 Log(("Out of memory!!!!\n"));
4593 return VERR_NO_MEMORY;
4594 }
4595 pPatchPage->Core.Key = pPage;
4596 pPatchPage->cCount = 1;
4597 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4598
4599 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH,
4600 (void **)&pPatchPage->papPatch);
4601 if (RT_FAILURE(rc))
4602 {
4603 Log(("Out of memory!!!!\n"));
4604 MMHyperFree(pVM, pPatchPage);
4605 return VERR_NO_MEMORY;
4606 }
4607 pPatchPage->papPatch[0] = pPatch;
4608
4609 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4610 Assert(fInserted);
4611 pVM->patm.s.cPageRecords++;
4612
4613 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4614 }
4615 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4616
4617 /* Get the closest guest instruction (from below) */
4618 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4619 Assert(pGuestToPatchRec);
4620 if (pGuestToPatchRec)
4621 {
4622 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4623 if ( pPatchPage->pLowestAddrGC == 0
4624 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4625 {
4626 RTRCUINTPTR offset;
4627
4628 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4629
4630 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4631 /* If we're too close to the page boundary, then make sure an
4632 instruction from the previous page doesn't cross the
4633 boundary itself. */
4634 if (offset && offset < MAX_INSTR_SIZE)
4635 {
4636 /* Get the closest guest instruction (from above) */
4637 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4638
4639 if (pGuestToPatchRec)
4640 {
4641 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4642 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4643 {
4644 pPatchPage->pLowestAddrGC = pPage;
4645 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4646 }
4647 }
4648 }
4649 }
4650 }
4651
4652 /* Get the closest guest instruction (from above) */
4653 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4654 Assert(pGuestToPatchRec);
4655 if (pGuestToPatchRec)
4656 {
4657 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4658 if ( pPatchPage->pHighestAddrGC == 0
4659 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4660 {
4661 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4662 /* Increase by instruction size. */
4663 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4664//// Assert(size);
4665 pPatchPage->pHighestAddrGC += size;
4666 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4667 }
4668 }
4669
4670 return VINF_SUCCESS;
4671}
4672
4673/**
4674 * Remove patch from page record
4675 *
4676 * @returns VBox status code.
4677 * @param pVM Pointer to the VM.
4678 * @param pPage Page address
4679 * @param pPatch Patch record
4680 */
4681int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4682{
4683 PPATMPATCHPAGE pPatchPage;
4684 int rc;
4685
4686 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4687 Assert(pPatchPage);
4688
4689 if (!pPatchPage)
4690 return VERR_INVALID_PARAMETER;
4691
4692 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4693
4694 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4695 if (pPatchPage->cCount > 1)
4696 {
4697 uint32_t i;
4698
4699 /* Used by multiple patches */
4700 for (i = 0; i < pPatchPage->cCount; i++)
4701 {
4702 if (pPatchPage->papPatch[i] == pPatch)
4703 {
4704 /* close the gap between the remaining pointers. */
4705 uint32_t cNew = --pPatchPage->cCount;
4706 if (i < cNew)
4707 pPatchPage->papPatch[i] = pPatchPage->papPatch[cNew];
4708 pPatchPage->papPatch[cNew] = NULL;
4709 return VINF_SUCCESS;
4710 }
4711 }
4712 AssertMsgFailed(("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4713 }
4714 else
4715 {
4716 PPATMPATCHPAGE pPatchNode;
4717
4718 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4719
4720 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4721 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4722 Assert(pPatchNode && pPatchNode == pPatchPage);
4723
4724 Assert(pPatchPage->papPatch);
4725 rc = MMHyperFree(pVM, pPatchPage->papPatch);
4726 AssertRC(rc);
4727 rc = MMHyperFree(pVM, pPatchPage);
4728 AssertRC(rc);
4729 pVM->patm.s.cPageRecords--;
4730 }
4731 return VINF_SUCCESS;
4732}
4733
4734/**
4735 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4736 *
4737 * @returns VBox status code.
4738 * @param pVM Pointer to the VM.
4739 * @param pPatch Patch record
4740 */
4741int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4742{
4743 int rc;
4744 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4745
4746 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4747 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4748 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4749
4750 /** @todo optimize better (large gaps between current and next used page) */
4751 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4752 {
4753 /* Get the closest guest instruction (from above) */
4754 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4755 if ( pGuestToPatchRec
4756 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4757 )
4758 {
4759 /* Code in page really patched -> add record */
4760 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4761 AssertRC(rc);
4762 }
4763 }
4764 pPatch->flags |= PATMFL_CODE_MONITORED;
4765 return VINF_SUCCESS;
4766}
4767
4768/**
4769 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4770 *
4771 * @returns VBox status code.
4772 * @param pVM Pointer to the VM.
4773 * @param pPatch Patch record
4774 */
4775int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4776{
4777 int rc;
4778 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4779
4780 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4781 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4782 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4783
4784 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4785 {
4786 /* Get the closest guest instruction (from above) */
4787 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4788 if ( pGuestToPatchRec
4789 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4790 )
4791 {
4792 /* Code in page really patched -> remove record */
4793 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4794 AssertRC(rc);
4795 }
4796 }
4797 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4798 return VINF_SUCCESS;
4799}
4800
4801/**
4802 * Notifies PATM about a (potential) write to code that has been patched.
4803 *
4804 * @returns VBox status code.
4805 * @param pVM Pointer to the VM.
4806 * @param GCPtr GC pointer to write address
4807 * @param cbWrite Nr of bytes to write
4808 *
4809 */
4810VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4811{
4812 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4813
4814 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4815
4816 Assert(VM_IS_EMT(pVM));
4817
4818 /* Quick boundary check */
4819 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4820 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4821 )
4822 return VINF_SUCCESS;
4823
4824 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4825
4826 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4827 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4828
4829 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4830 {
4831loop_start:
4832 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4833 if (pPatchPage)
4834 {
4835 uint32_t i;
4836 bool fValidPatchWrite = false;
4837
4838 /* Quick check to see if the write is in the patched part of the page */
4839 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4840 || pPatchPage->pHighestAddrGC < GCPtr)
4841 {
4842 break;
4843 }
4844
4845 for (i=0;i<pPatchPage->cCount;i++)
4846 {
4847 if (pPatchPage->papPatch[i])
4848 {
4849 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4850 RTRCPTR pPatchInstrGC;
4851 //unused: bool fForceBreak = false;
4852
4853 Assert(pPatchPage->papPatch[i]->flags & PATMFL_CODE_MONITORED);
4854 /** @todo inefficient and includes redundant checks for multiple pages. */
4855 for (uint32_t j=0; j<cbWrite; j++)
4856 {
4857 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4858
4859 if ( pPatch->cbPatchJump
4860 && pGuestPtrGC >= pPatch->pPrivInstrGC
4861 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4862 {
4863 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4864 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4865 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4866 if (rc == VINF_SUCCESS)
4867 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4868 goto loop_start;
4869
4870 continue;
4871 }
4872
4873 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4874 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4875 if (!pPatchInstrGC)
4876 {
4877 RTRCPTR pClosestInstrGC;
4878 uint32_t size;
4879
4880 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4881 if (pPatchInstrGC)
4882 {
4883 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4884 Assert(pClosestInstrGC <= pGuestPtrGC);
4885 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4886 /* Check if this is not a write into a gap between two patches */
4887 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4888 pPatchInstrGC = 0;
4889 }
4890 }
4891 if (pPatchInstrGC)
4892 {
4893 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4894
4895 fValidPatchWrite = true;
4896
4897 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4898 Assert(pPatchToGuestRec);
4899 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4900 {
4901 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4902
4903 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4904 {
4905 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4906
4907 PATMR3MarkDirtyPatch(pVM, pPatch);
4908
4909 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4910 goto loop_start;
4911 }
4912 else
4913 {
4914 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4915 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4916
4917 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4918 pPatchToGuestRec->fDirty = true;
4919
4920 *pInstrHC = 0xCC;
4921
4922 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4923 }
4924 }
4925 /* else already marked dirty */
4926 }
4927 }
4928 }
4929 } /* for each patch */
4930
4931 if (fValidPatchWrite == false)
4932 {
4933 /* Write to a part of the page that either:
4934 * - doesn't contain any code (shared code/data); rather unlikely
4935 * - old code page that's no longer in active use.
4936 */
4937invalid_write_loop_start:
4938 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4939
4940 if (pPatchPage)
4941 {
4942 for (i=0;i<pPatchPage->cCount;i++)
4943 {
4944 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4945
4946 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4947 {
4948 /* Note: possibly dangerous assumption that all future writes will be harmless. */
4949 if (pPatch->flags & PATMFL_IDTHANDLER)
4950 {
4951 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4952
4953 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4954 int rc = patmRemovePatchPages(pVM, pPatch);
4955 AssertRC(rc);
4956 }
4957 else
4958 {
4959 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4960 PATMR3MarkDirtyPatch(pVM, pPatch);
4961 }
4962 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4963 goto invalid_write_loop_start;
4964 }
4965 } /* for */
4966 }
4967 }
4968 }
4969 }
4970 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
4971 return VINF_SUCCESS;
4972
4973}
4974
4975/**
4976 * Disable all patches in a flushed page
4977 *
4978 * @returns VBox status code
4979 * @param pVM Pointer to the VM.
4980 * @param addr GC address of the page to flush
4981 */
4982/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
4983 */
4984VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
4985{
4986 addr &= PAGE_BASE_GC_MASK;
4987
4988 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
4989 if (pPatchPage)
4990 {
4991 int i;
4992
4993 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
4994 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
4995 {
4996 if (pPatchPage->papPatch[i])
4997 {
4998 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4999
5000 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
5001 PATMR3MarkDirtyPatch(pVM, pPatch);
5002 }
5003 }
5004 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
5005 }
5006 return VINF_SUCCESS;
5007}
5008
5009/**
5010 * Checks if the instructions at the specified address has been patched already.
5011 *
5012 * @returns boolean, patched or not
5013 * @param pVM Pointer to the VM.
5014 * @param pInstrGC Guest context pointer to instruction
5015 */
5016VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
5017{
5018 PPATMPATCHREC pPatchRec;
5019 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5020 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
5021 return true;
5022 return false;
5023}
5024
5025/**
5026 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
5027 *
5028 * @returns VBox status code.
5029 * @param pVM Pointer to the VM.
5030 * @param pInstrGC GC address of instr
5031 * @param pByte opcode byte pointer (OUT)
5032 *
5033 */
5034VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
5035{
5036 PPATMPATCHREC pPatchRec;
5037
5038 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5039
5040 /* Shortcut. */
5041 if ( !PATMIsEnabled(pVM)
5042 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5043 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5044 {
5045 return VERR_PATCH_NOT_FOUND;
5046 }
5047
5048 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5049 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5050 if ( pPatchRec
5051 && pPatchRec->patch.uState == PATCH_ENABLED
5052 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5053 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5054 {
5055 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5056 *pByte = pPatchRec->patch.aPrivInstr[offset];
5057
5058 if (pPatchRec->patch.cbPatchJump == 1)
5059 {
5060 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5061 }
5062 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5063 return VINF_SUCCESS;
5064 }
5065 return VERR_PATCH_NOT_FOUND;
5066}
5067
5068/**
5069 * Read instruction bytes of the original code that was overwritten by the 5
5070 * bytes patch jump.
5071 *
5072 * @returns VINF_SUCCESS or VERR_PATCH_NOT_FOUND.
5073 * @param pVM Pointer to the VM.
5074 * @param GCPtrInstr GC address of instr
5075 * @param pbDst The output buffer.
5076 * @param cbToRead The maximum number bytes to read.
5077 * @param pcbRead Where to return the acutal number of bytes read.
5078 */
5079VMMR3DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
5080{
5081 /* Shortcut. */
5082 if ( !PATMIsEnabled(pVM)
5083 || GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
5084 || GCPtrInstr > pVM->patm.s.pPatchedInstrGCHighest)
5085 return VERR_PATCH_NOT_FOUND;
5086
5087 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5088
5089 /*
5090 * If the patch is enabled and the pointer lies within 5 bytes of this
5091 * priv instr ptr, then we've got a hit!
5092 */
5093 RTGCPTR32 off;
5094 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree,
5095 GCPtrInstr, false /*fAbove*/);
5096 if ( pPatchRec
5097 && pPatchRec->patch.uState == PATCH_ENABLED
5098 && (off = GCPtrInstr - pPatchRec->patch.pPrivInstrGC) < pPatchRec->patch.cbPatchJump)
5099 {
5100 uint8_t const *pbSrc = &pPatchRec->patch.aPrivInstr[off];
5101 uint32_t const cbMax = pPatchRec->patch.cbPatchJump - off;
5102 if (cbToRead > cbMax)
5103 cbToRead = cbMax;
5104 switch (cbToRead)
5105 {
5106 case 5: pbDst[4] = pbSrc[4];
5107 case 4: pbDst[3] = pbSrc[3];
5108 case 3: pbDst[2] = pbSrc[2];
5109 case 2: pbDst[1] = pbSrc[1];
5110 case 1: pbDst[0] = pbSrc[0];
5111 break;
5112 default:
5113 memcpy(pbDst, pbSrc, cbToRead);
5114 }
5115 *pcbRead = cbToRead;
5116
5117 if (pPatchRec->patch.cbPatchJump == 1)
5118 Log(("PATMR3ReadOrgInstr: returning opcode %.*Rhxs for instruction at %RX32\n", cbToRead, pbSrc, GCPtrInstr));
5119 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5120 return VINF_SUCCESS;
5121 }
5122
5123 return VERR_PATCH_NOT_FOUND;
5124}
5125
5126/**
5127 * Disable patch for privileged instruction at specified location
5128 *
5129 * @returns VBox status code.
5130 * @param pVM Pointer to the VM.
5131 * @param pInstr Guest context point to privileged instruction
5132 *
5133 * @note returns failure if patching is not allowed or possible
5134 *
5135 */
5136VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5137{
5138 PPATMPATCHREC pPatchRec;
5139 PPATCHINFO pPatch;
5140
5141 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5142 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5143 if (pPatchRec)
5144 {
5145 int rc = VINF_SUCCESS;
5146
5147 pPatch = &pPatchRec->patch;
5148
5149 /* Already disabled? */
5150 if (pPatch->uState == PATCH_DISABLED)
5151 return VINF_SUCCESS;
5152
5153 /* Clear the IDT entries for the patch we're disabling. */
5154 /* Note: very important as we clear IF in the patch itself */
5155 /** @todo this needs to be changed */
5156 if (pPatch->flags & PATMFL_IDTHANDLER)
5157 {
5158 uint32_t iGate;
5159
5160 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5161 if (iGate != (uint32_t)~0)
5162 {
5163 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5164 if (++cIDTHandlersDisabled < 256)
5165 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5166 }
5167 }
5168
5169 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5170 if ( pPatch->pPatchBlockOffset
5171 && pPatch->uState == PATCH_ENABLED)
5172 {
5173 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5174 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5175 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5176 }
5177
5178 /* IDT or function patches haven't changed any guest code. */
5179 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5180 {
5181 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5182 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5183
5184 if (pPatch->uState != PATCH_REFUSED)
5185 {
5186 uint8_t temp[16];
5187
5188 Assert(pPatch->cbPatchJump < sizeof(temp));
5189
5190 /* Let's first check if the guest code is still the same. */
5191 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5192 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5193 if (rc == VINF_SUCCESS)
5194 {
5195 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5196
5197 if ( temp[0] != 0xE9 /* jmp opcode */
5198 || *(RTRCINTPTR *)(&temp[1]) != displ
5199 )
5200 {
5201 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5202 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5203 /* Remove it completely */
5204 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5205 rc = PATMR3RemovePatch(pVM, pInstrGC);
5206 AssertRC(rc);
5207 return VWRN_PATCH_REMOVED;
5208 }
5209 patmRemoveJumpToPatch(pVM, pPatch);
5210 }
5211 else
5212 {
5213 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5214 pPatch->uState = PATCH_DISABLE_PENDING;
5215 }
5216 }
5217 else
5218 {
5219 AssertMsgFailed(("Patch was refused!\n"));
5220 return VERR_PATCH_ALREADY_DISABLED;
5221 }
5222 }
5223 else
5224 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5225 {
5226 uint8_t temp[16];
5227
5228 Assert(pPatch->cbPatchJump < sizeof(temp));
5229
5230 /* Let's first check if the guest code is still the same. */
5231 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5232 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5233 if (rc == VINF_SUCCESS)
5234 {
5235 if (temp[0] != 0xCC)
5236 {
5237 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5238 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5239 /* Remove it completely */
5240 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5241 rc = PATMR3RemovePatch(pVM, pInstrGC);
5242 AssertRC(rc);
5243 return VWRN_PATCH_REMOVED;
5244 }
5245 patmDeactivateInt3Patch(pVM, pPatch);
5246 }
5247 }
5248
5249 if (rc == VINF_SUCCESS)
5250 {
5251 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5252 if (pPatch->uState == PATCH_DISABLE_PENDING)
5253 {
5254 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5255 pPatch->uState = PATCH_UNUSABLE;
5256 }
5257 else
5258 if (pPatch->uState != PATCH_DIRTY)
5259 {
5260 pPatch->uOldState = pPatch->uState;
5261 pPatch->uState = PATCH_DISABLED;
5262 }
5263 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5264 }
5265
5266 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5267 return VINF_SUCCESS;
5268 }
5269 Log(("Patch not found!\n"));
5270 return VERR_PATCH_NOT_FOUND;
5271}
5272
5273/**
5274 * Permanently disable patch for privileged instruction at specified location
5275 *
5276 * @returns VBox status code.
5277 * @param pVM Pointer to the VM.
5278 * @param pInstr Guest context instruction pointer
5279 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5280 * @param pConflictPatch Conflicting patch
5281 *
5282 */
5283static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5284{
5285 NOREF(pConflictAddr);
5286#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5287 PATCHINFO patch;
5288 DISCPUSTATE cpu;
5289 R3PTRTYPE(uint8_t *) pInstrHC;
5290 uint32_t cbInstr;
5291 bool disret;
5292 int rc;
5293
5294 RT_ZERO(patch);
5295 pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
5296 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
5297 /*
5298 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5299 * with one that jumps right into the conflict patch.
5300 * Otherwise we must disable the conflicting patch to avoid serious problems.
5301 */
5302 if ( disret == true
5303 && (pConflictPatch->flags & PATMFL_CODE32)
5304 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW))
5305 && (cpu.Param1.fUse & DISUSE_IMMEDIATE32_REL))
5306 {
5307 /* Hint patches must be enabled first. */
5308 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5309 {
5310 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5311 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5312 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5313 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5314 /* Enabling might fail if the patched code has changed in the meantime. */
5315 if (rc != VINF_SUCCESS)
5316 return rc;
5317 }
5318
5319 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5320 if (RT_SUCCESS(rc))
5321 {
5322 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5323 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5324 return VINF_SUCCESS;
5325 }
5326 }
5327#endif
5328
5329 if (pConflictPatch->opcode == OP_CLI)
5330 {
5331 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5332 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5333 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5334 if (rc == VWRN_PATCH_REMOVED)
5335 return VINF_SUCCESS;
5336 if (RT_SUCCESS(rc))
5337 {
5338 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5339 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5340 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5341 if (rc == VERR_PATCH_NOT_FOUND)
5342 return VINF_SUCCESS; /* removed already */
5343
5344 AssertRC(rc);
5345 if (RT_SUCCESS(rc))
5346 {
5347 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5348 return VINF_SUCCESS;
5349 }
5350 }
5351 /* else turned into unusable patch (see below) */
5352 }
5353 else
5354 {
5355 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5356 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5357 if (rc == VWRN_PATCH_REMOVED)
5358 return VINF_SUCCESS;
5359 }
5360
5361 /* No need to monitor the code anymore. */
5362 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5363 {
5364 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5365 AssertRC(rc);
5366 }
5367 pConflictPatch->uState = PATCH_UNUSABLE;
5368 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5369 return VERR_PATCH_DISABLED;
5370}
5371
5372/**
5373 * Enable patch for privileged instruction at specified location
5374 *
5375 * @returns VBox status code.
5376 * @param pVM Pointer to the VM.
5377 * @param pInstr Guest context point to privileged instruction
5378 *
5379 * @note returns failure if patching is not allowed or possible
5380 *
5381 */
5382VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5383{
5384 PPATMPATCHREC pPatchRec;
5385 PPATCHINFO pPatch;
5386
5387 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5388 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5389 if (pPatchRec)
5390 {
5391 int rc = VINF_SUCCESS;
5392
5393 pPatch = &pPatchRec->patch;
5394
5395 if (pPatch->uState == PATCH_DISABLED)
5396 {
5397 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5398 {
5399 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5400 uint8_t temp[16];
5401
5402 Assert(pPatch->cbPatchJump < sizeof(temp));
5403
5404 /* Let's first check if the guest code is still the same. */
5405 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5406 AssertRC(rc2);
5407 if (rc2 == VINF_SUCCESS)
5408 {
5409 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5410 {
5411 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5412 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5413 /* Remove it completely */
5414 rc = PATMR3RemovePatch(pVM, pInstrGC);
5415 AssertRC(rc);
5416 return VERR_PATCH_NOT_FOUND;
5417 }
5418
5419 PATMP2GLOOKUPREC cacheRec;
5420 RT_ZERO(cacheRec);
5421 cacheRec.pPatch = pPatch;
5422
5423 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5424 /* Free leftover lock if any. */
5425 if (cacheRec.Lock.pvMap)
5426 {
5427 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5428 cacheRec.Lock.pvMap = NULL;
5429 }
5430 AssertRC(rc2);
5431 if (RT_FAILURE(rc2))
5432 return rc2;
5433
5434#ifdef DEBUG
5435 {
5436 DISCPUSTATE cpu;
5437 char szOutput[256];
5438 uint32_t cbInstr;
5439 uint32_t i = 0;
5440 bool disret;
5441 while(i < pPatch->cbPatchJump)
5442 {
5443 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
5444 &cpu, &cbInstr, szOutput, sizeof(szOutput));
5445 Log(("Renewed patch instr: %s", szOutput));
5446 i += cbInstr;
5447 }
5448 }
5449#endif
5450 }
5451 }
5452 else
5453 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5454 {
5455 uint8_t temp[16];
5456
5457 Assert(pPatch->cbPatchJump < sizeof(temp));
5458
5459 /* Let's first check if the guest code is still the same. */
5460 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5461 AssertRC(rc2);
5462
5463 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5464 {
5465 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5466 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5467 rc = PATMR3RemovePatch(pVM, pInstrGC);
5468 AssertRC(rc);
5469 return VERR_PATCH_NOT_FOUND;
5470 }
5471
5472 rc2 = patmActivateInt3Patch(pVM, pPatch);
5473 if (RT_FAILURE(rc2))
5474 return rc2;
5475 }
5476
5477 pPatch->uState = pPatch->uOldState; //restore state
5478
5479 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5480 if (pPatch->pPatchBlockOffset)
5481 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5482
5483 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5484 }
5485 else
5486 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5487
5488 return rc;
5489 }
5490 return VERR_PATCH_NOT_FOUND;
5491}
5492
5493/**
5494 * Remove patch for privileged instruction at specified location
5495 *
5496 * @returns VBox status code.
5497 * @param pVM Pointer to the VM.
5498 * @param pPatchRec Patch record
5499 * @param fForceRemove Remove *all* patches
5500 */
5501int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5502{
5503 PPATCHINFO pPatch;
5504
5505 pPatch = &pPatchRec->patch;
5506
5507 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5508 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5509 {
5510 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5511 return VERR_ACCESS_DENIED;
5512 }
5513 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5514
5515 /* Note: NEVER EVER REUSE PATCH MEMORY */
5516 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5517
5518 if (pPatchRec->patch.pPatchBlockOffset)
5519 {
5520 PAVLOU32NODECORE pNode;
5521
5522 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5523 Assert(pNode);
5524 }
5525
5526 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5527 {
5528 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5529 AssertRC(rc);
5530 }
5531
5532#ifdef VBOX_WITH_STATISTICS
5533 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5534 {
5535 STAMR3Deregister(pVM, &pPatchRec->patch);
5536#ifndef DEBUG_sandervl
5537 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5538 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5539 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5540 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5541 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5542 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5543 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5544 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5545 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5546 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5547 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5548 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5549 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5550 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5551#endif
5552 }
5553#endif
5554
5555 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5556 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5557 pPatch->nrPatch2GuestRecs = 0;
5558 Assert(pPatch->Patch2GuestAddrTree == 0);
5559
5560 patmEmptyTree(pVM, &pPatch->FixupTree);
5561 pPatch->nrFixups = 0;
5562 Assert(pPatch->FixupTree == 0);
5563
5564 if (pPatchRec->patch.pTempInfo)
5565 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5566
5567 /* Note: might fail, because it has already been removed (e.g. during reset). */
5568 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5569
5570 /* Free the patch record */
5571 MMHyperFree(pVM, pPatchRec);
5572 return VINF_SUCCESS;
5573}
5574
5575/**
5576 * RTAvlU32DoWithAll() worker.
5577 * Checks whether the current trampoline instruction is the jump to the target patch
5578 * and updates the displacement to jump to the new target.
5579 *
5580 * @returns VBox status code.
5581 * @retval VERR_ALREADY_EXISTS if the jump was found.
5582 * @param pNode The current patch to guest record to check.
5583 * @param pvUser The refresh state.
5584 */
5585static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5586{
5587 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5588 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5589 PVM pVM = pRefreshPatchState->pVM;
5590
5591 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5592
5593 /*
5594 * Check if the patch instruction starts with a jump.
5595 * ASSUMES that there is no other patch to guest record that starts
5596 * with a jump.
5597 */
5598 if (*pPatchInstr == 0xE9)
5599 {
5600 /* Jump found, update the displacement. */
5601 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5602 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5603 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5604
5605 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5606 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5607
5608 *(uint32_t *)&pPatchInstr[1] = displ;
5609 return VERR_ALREADY_EXISTS; /** @todo better return code */
5610 }
5611
5612 return VINF_SUCCESS;
5613}
5614
5615/**
5616 * Attempt to refresh the patch by recompiling its entire code block
5617 *
5618 * @returns VBox status code.
5619 * @param pVM Pointer to the VM.
5620 * @param pPatchRec Patch record
5621 */
5622int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5623{
5624 PPATCHINFO pPatch;
5625 int rc;
5626 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5627 PTRAMPREC pTrampolinePatchesHead = NULL;
5628
5629 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5630
5631 pPatch = &pPatchRec->patch;
5632 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5633 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5634 {
5635 if (!pPatch->pTrampolinePatchesHead)
5636 {
5637 /*
5638 * It is sometimes possible that there are trampoline patches to this patch
5639 * but they are not recorded (after a saved state load for example).
5640 * Refuse to refresh those patches.
5641 * Can hurt performance in theory if the patched code is modified by the guest
5642 * and is executed often. However most of the time states are saved after the guest
5643 * code was modified and is not updated anymore afterwards so this shouldn't be a
5644 * big problem.
5645 */
5646 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5647 return VERR_PATCHING_REFUSED;
5648 }
5649 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5650 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5651 }
5652
5653 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5654
5655 rc = PATMR3DisablePatch(pVM, pInstrGC);
5656 AssertRC(rc);
5657
5658 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5659 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5660#ifdef VBOX_WITH_STATISTICS
5661 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5662 {
5663 STAMR3Deregister(pVM, &pPatchRec->patch);
5664#ifndef DEBUG_sandervl
5665 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5666 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5667 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5668 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5669 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5670 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5671 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5672 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5673 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5674 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5675 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5676 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5677 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5678 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5679#endif
5680 }
5681#endif
5682
5683 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5684
5685 /* Attempt to install a new patch. */
5686 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5687 if (RT_SUCCESS(rc))
5688 {
5689 RTRCPTR pPatchTargetGC;
5690 PPATMPATCHREC pNewPatchRec;
5691
5692 /* Determine target address in new patch */
5693 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5694 Assert(pPatchTargetGC);
5695 if (!pPatchTargetGC)
5696 {
5697 rc = VERR_PATCHING_REFUSED;
5698 goto failure;
5699 }
5700
5701 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5702 pPatch->uCurPatchOffset = 0;
5703
5704 /* insert jump to new patch in old patch block */
5705 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5706 if (RT_FAILURE(rc))
5707 goto failure;
5708
5709 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5710 Assert(pNewPatchRec); /* can't fail */
5711
5712 /* Remove old patch (only do that when everything is finished) */
5713 int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
5714 AssertRC(rc2);
5715
5716 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5717 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5718 Assert(fInserted); NOREF(fInserted);
5719
5720 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5721 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5722
5723 /* Used by another patch, so don't remove it! */
5724 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5725
5726 if (pTrampolinePatchesHead)
5727 {
5728 /* Update all trampoline patches to jump to the new patch. */
5729 PTRAMPREC pTrampRec = NULL;
5730 PATMREFRESHPATCH RefreshPatch;
5731
5732 RefreshPatch.pVM = pVM;
5733 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5734
5735 pTrampRec = pTrampolinePatchesHead;
5736
5737 while (pTrampRec)
5738 {
5739 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5740
5741 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5742 /*
5743 * We have to find the right patch2guest record because there might be others
5744 * for statistics.
5745 */
5746 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5747 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5748 Assert(rc == VERR_ALREADY_EXISTS);
5749 rc = VINF_SUCCESS;
5750 pTrampRec = pTrampRec->pNext;
5751 }
5752 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5753 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5754 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5755 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5756 }
5757 }
5758
5759failure:
5760 if (RT_FAILURE(rc))
5761 {
5762 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5763
5764 /* Remove the new inactive patch */
5765 rc = PATMR3RemovePatch(pVM, pInstrGC);
5766 AssertRC(rc);
5767
5768 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5769 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5770 Assert(fInserted); NOREF(fInserted);
5771
5772 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5773 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5774 AssertRC(rc2);
5775
5776 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5777 }
5778 return rc;
5779}
5780
5781/**
5782 * Find patch for privileged instruction at specified location
5783 *
5784 * @returns Patch structure pointer if found; else NULL
5785 * @param pVM Pointer to the VM.
5786 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5787 * @param fIncludeHints Include hinted patches or not
5788 *
5789 */
5790PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5791{
5792 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5793 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5794 if (pPatchRec)
5795 {
5796 if ( pPatchRec->patch.uState == PATCH_ENABLED
5797 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5798 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5799 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5800 {
5801 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5802 return &pPatchRec->patch;
5803 }
5804 else
5805 if ( fIncludeHints
5806 && pPatchRec->patch.uState == PATCH_DISABLED
5807 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5808 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5809 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5810 {
5811 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5812 return &pPatchRec->patch;
5813 }
5814 }
5815 return NULL;
5816}
5817
5818/**
5819 * Checks whether the GC address is inside a generated patch jump
5820 *
5821 * @returns true -> yes, false -> no
5822 * @param pVM Pointer to the VM.
5823 * @param pAddr Guest context address
5824 * @param pPatchAddr Guest context patch address (if true)
5825 */
5826VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5827{
5828 RTRCPTR addr;
5829 PPATCHINFO pPatch;
5830
5831 if (PATMIsEnabled(pVM) == false)
5832 return false;
5833
5834 if (pPatchAddr == NULL)
5835 pPatchAddr = &addr;
5836
5837 *pPatchAddr = 0;
5838
5839 pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
5840 if (pPatch)
5841 *pPatchAddr = pPatch->pPrivInstrGC;
5842
5843 return *pPatchAddr == 0 ? false : true;
5844}
5845
5846/**
5847 * Remove patch for privileged instruction at specified location
5848 *
5849 * @returns VBox status code.
5850 * @param pVM Pointer to the VM.
5851 * @param pInstr Guest context point to privileged instruction
5852 *
5853 * @note returns failure if patching is not allowed or possible
5854 *
5855 */
5856VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5857{
5858 PPATMPATCHREC pPatchRec;
5859
5860 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5861 if (pPatchRec)
5862 {
5863 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5864 if (rc == VWRN_PATCH_REMOVED)
5865 return VINF_SUCCESS;
5866
5867 return PATMRemovePatch(pVM, pPatchRec, false);
5868 }
5869 AssertFailed();
5870 return VERR_PATCH_NOT_FOUND;
5871}
5872
5873/**
5874 * Mark patch as dirty
5875 *
5876 * @returns VBox status code.
5877 * @param pVM Pointer to the VM.
5878 * @param pPatch Patch record
5879 *
5880 * @note returns failure if patching is not allowed or possible
5881 *
5882 */
5883VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5884{
5885 if (pPatch->pPatchBlockOffset)
5886 {
5887 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5888 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5889 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5890 }
5891
5892 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5893 /* Put back the replaced instruction. */
5894 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5895 if (rc == VWRN_PATCH_REMOVED)
5896 return VINF_SUCCESS;
5897
5898 /* Note: we don't restore patch pages for patches that are not enabled! */
5899 /* Note: be careful when changing this behaviour!! */
5900
5901 /* The patch pages are no longer marked for self-modifying code detection */
5902 if (pPatch->flags & PATMFL_CODE_MONITORED)
5903 {
5904 rc = patmRemovePatchPages(pVM, pPatch);
5905 AssertRCReturn(rc, rc);
5906 }
5907 pPatch->uState = PATCH_DIRTY;
5908
5909 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5910 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5911
5912 return VINF_SUCCESS;
5913}
5914
5915/**
5916 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5917 *
5918 * @returns VBox status code.
5919 * @param pVM Pointer to the VM.
5920 * @param pPatch Patch block structure pointer
5921 * @param pPatchGC GC address in patch block
5922 */
5923RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5924{
5925 Assert(pPatch->Patch2GuestAddrTree);
5926 /* Get the closest record from below. */
5927 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5928 if (pPatchToGuestRec)
5929 return pPatchToGuestRec->pOrgInstrGC;
5930
5931 return 0;
5932}
5933
5934/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5935 *
5936 * @returns corresponding GC pointer in patch block
5937 * @param pVM Pointer to the VM.
5938 * @param pPatch Current patch block pointer
5939 * @param pInstrGC Guest context pointer to privileged instruction
5940 *
5941 */
5942RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5943{
5944 if (pPatch->Guest2PatchAddrTree)
5945 {
5946 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5947 if (pGuestToPatchRec)
5948 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5949 }
5950
5951 return 0;
5952}
5953
5954/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
5955 *
5956 * @returns corresponding GC pointer in patch block
5957 * @param pVM Pointer to the VM.
5958 * @param pPatch Current patch block pointer
5959 * @param pInstrGC Guest context pointer to privileged instruction
5960 *
5961 */
5962RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5963{
5964 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
5965 if (pGuestToPatchRec)
5966 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5967
5968 return 0;
5969}
5970
5971/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5972 *
5973 * @returns corresponding GC pointer in patch block
5974 * @param pVM Pointer to the VM.
5975 * @param pInstrGC Guest context pointer to privileged instruction
5976 *
5977 */
5978VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5979{
5980 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5981 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5982 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
5983 else
5984 return 0;
5985}
5986
5987/**
5988 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5989 *
5990 * @returns original GC instruction pointer or 0 if not found
5991 * @param pVM Pointer to the VM.
5992 * @param pPatchGC GC address in patch block
5993 * @param pEnmState State of the translated address (out)
5994 *
5995 */
5996VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
5997{
5998 PPATMPATCHREC pPatchRec;
5999 void *pvPatchCoreOffset;
6000 RTRCPTR pPrivInstrGC;
6001
6002 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
6003 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6004 if (pvPatchCoreOffset == 0)
6005 {
6006 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
6007 return 0;
6008 }
6009 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6010 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
6011 if (pEnmState)
6012 {
6013 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
6014 || pPatchRec->patch.uState == PATCH_DIRTY
6015 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
6016 || pPatchRec->patch.uState == PATCH_UNUSABLE),
6017 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
6018
6019 if ( !pPrivInstrGC
6020 || pPatchRec->patch.uState == PATCH_UNUSABLE
6021 || pPatchRec->patch.uState == PATCH_REFUSED)
6022 {
6023 pPrivInstrGC = 0;
6024 *pEnmState = PATMTRANS_FAILED;
6025 }
6026 else
6027 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
6028 {
6029 *pEnmState = PATMTRANS_INHIBITIRQ;
6030 }
6031 else
6032 if ( pPatchRec->patch.uState == PATCH_ENABLED
6033 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
6034 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
6035 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
6036 {
6037 *pEnmState = PATMTRANS_OVERWRITTEN;
6038 }
6039 else
6040 if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
6041 {
6042 *pEnmState = PATMTRANS_OVERWRITTEN;
6043 }
6044 else
6045 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
6046 {
6047 *pEnmState = PATMTRANS_PATCHSTART;
6048 }
6049 else
6050 *pEnmState = PATMTRANS_SAFE;
6051 }
6052 return pPrivInstrGC;
6053}
6054
6055/**
6056 * Returns the GC pointer of the patch for the specified GC address
6057 *
6058 * @returns VBox status code.
6059 * @param pVM Pointer to the VM.
6060 * @param pAddrGC Guest context address
6061 */
6062VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
6063{
6064 PPATMPATCHREC pPatchRec;
6065
6066 /* Find the patch record. */
6067 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
6068 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
6069 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
6070 return PATCHCODE_PTR_GC(&pPatchRec->patch);
6071 else
6072 return 0;
6073}
6074
6075/**
6076 * Attempt to recover dirty instructions
6077 *
6078 * @returns VBox status code.
6079 * @param pVM Pointer to the VM.
6080 * @param pCtx Pointer to the guest CPU context.
6081 * @param pPatch Patch record.
6082 * @param pPatchToGuestRec Patch to guest address record.
6083 * @param pEip GC pointer of trapping instruction.
6084 */
6085static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
6086{
6087 DISCPUSTATE CpuOld, CpuNew;
6088 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
6089 int rc;
6090 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
6091 uint32_t cbDirty;
6092 PRECPATCHTOGUEST pRec;
6093 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6094 PVMCPU pVCpu = VMMGetCpu0(pVM);
6095 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6096
6097 pRec = pPatchToGuestRec;
6098 pCurInstrGC = pOrgInstrGC;
6099 pCurPatchInstrGC = pEip;
6100 cbDirty = 0;
6101 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6102
6103 /* Find all adjacent dirty instructions */
6104 while (true)
6105 {
6106 if (pRec->fJumpTarget)
6107 {
6108 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6109 pRec->fDirty = false;
6110 return VERR_PATCHING_REFUSED;
6111 }
6112
6113 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6114 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6115 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6116
6117 /* Only harmless instructions are acceptable. */
6118 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6119 if ( RT_FAILURE(rc)
6120 || !(CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS))
6121 {
6122 if (RT_SUCCESS(rc))
6123 cbDirty += CpuOld.cbInstr;
6124 else
6125 if (!cbDirty)
6126 cbDirty = 1;
6127 break;
6128 }
6129
6130#ifdef DEBUG
6131 char szBuf[256];
6132 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6133 szBuf, sizeof(szBuf), NULL);
6134 Log(("DIRTY: %s\n", szBuf));
6135#endif
6136 /* Mark as clean; if we fail we'll let it always fault. */
6137 pRec->fDirty = false;
6138
6139 /* Remove old lookup record. */
6140 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6141 pPatchToGuestRec = NULL;
6142
6143 pCurPatchInstrGC += CpuOld.cbInstr;
6144 cbDirty += CpuOld.cbInstr;
6145
6146 /* Let's see if there's another dirty instruction right after. */
6147 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6148 if (!pRec || !pRec->fDirty)
6149 break; /* no more dirty instructions */
6150
6151 /* In case of complex instructions the next guest instruction could be quite far off. */
6152 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6153 }
6154
6155 if ( RT_SUCCESS(rc)
6156 && (CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS)
6157 )
6158 {
6159 uint32_t cbLeft;
6160
6161 pCurPatchInstrHC = pPatchInstrHC;
6162 pCurPatchInstrGC = pEip;
6163 cbLeft = cbDirty;
6164
6165 while (cbLeft && RT_SUCCESS(rc))
6166 {
6167 bool fValidInstr;
6168
6169 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6170
6171 fValidInstr = !!(CpuNew.pCurInstr->fOpType & DISOPTYPE_HARMLESS);
6172 if ( !fValidInstr
6173 && (CpuNew.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
6174 )
6175 {
6176 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6177
6178 if ( pTargetGC >= pOrgInstrGC
6179 && pTargetGC <= pOrgInstrGC + cbDirty
6180 )
6181 {
6182 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6183 fValidInstr = true;
6184 }
6185 }
6186
6187 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6188 if ( rc == VINF_SUCCESS
6189 && CpuNew.cbInstr <= cbLeft /* must still fit */
6190 && fValidInstr
6191 )
6192 {
6193#ifdef DEBUG
6194 char szBuf[256];
6195 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6196 szBuf, sizeof(szBuf), NULL);
6197 Log(("NEW: %s\n", szBuf));
6198#endif
6199
6200 /* Copy the new instruction. */
6201 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
6202 AssertRC(rc);
6203
6204 /* Add a new lookup record for the duplicated instruction. */
6205 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6206 }
6207 else
6208 {
6209#ifdef DEBUG
6210 char szBuf[256];
6211 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6212 szBuf, sizeof(szBuf), NULL);
6213 Log(("NEW: %s (FAILED)\n", szBuf));
6214#endif
6215 /* Restore the old lookup record for the duplicated instruction. */
6216 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6217
6218 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6219 rc = VERR_PATCHING_REFUSED;
6220 break;
6221 }
6222 pCurInstrGC += CpuNew.cbInstr;
6223 pCurPatchInstrHC += CpuNew.cbInstr;
6224 pCurPatchInstrGC += CpuNew.cbInstr;
6225 cbLeft -= CpuNew.cbInstr;
6226
6227 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6228 if (!cbLeft)
6229 {
6230 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6231 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6232 {
6233 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6234 if (pRec)
6235 {
6236 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6237 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6238
6239 Assert(!pRec->fDirty);
6240
6241 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6242 if (cbFiller >= SIZEOF_NEARJUMP32)
6243 {
6244 pPatchFillHC[0] = 0xE9;
6245 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6246#ifdef DEBUG
6247 char szBuf[256];
6248 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6249 szBuf, sizeof(szBuf), NULL);
6250 Log(("FILL: %s\n", szBuf));
6251#endif
6252 }
6253 else
6254 {
6255 for (unsigned i = 0; i < cbFiller; i++)
6256 {
6257 pPatchFillHC[i] = 0x90; /* NOP */
6258#ifdef DEBUG
6259 char szBuf[256];
6260 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
6261 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6262 Log(("FILL: %s\n", szBuf));
6263#endif
6264 }
6265 }
6266 }
6267 }
6268 }
6269 }
6270 }
6271 else
6272 rc = VERR_PATCHING_REFUSED;
6273
6274 if (RT_SUCCESS(rc))
6275 {
6276 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6277 }
6278 else
6279 {
6280 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6281 Assert(cbDirty);
6282
6283 /* Mark the whole instruction stream with breakpoints. */
6284 if (cbDirty)
6285 memset(pPatchInstrHC, 0xCC, cbDirty);
6286
6287 if ( pVM->patm.s.fOutOfMemory == false
6288 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6289 {
6290 rc = patmR3RefreshPatch(pVM, pPatch);
6291 if (RT_FAILURE(rc))
6292 {
6293 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6294 }
6295 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6296 rc = VERR_PATCHING_REFUSED;
6297 }
6298 }
6299 return rc;
6300}
6301
6302/**
6303 * Handle trap inside patch code
6304 *
6305 * @returns VBox status code.
6306 * @param pVM Pointer to the VM.
6307 * @param pCtx Pointer to the guest CPU context.
6308 * @param pEip GC pointer of trapping instruction.
6309 * @param ppNewEip GC pointer to new instruction.
6310 */
6311VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6312{
6313 PPATMPATCHREC pPatch = 0;
6314 void *pvPatchCoreOffset;
6315 RTRCUINTPTR offset;
6316 RTRCPTR pNewEip;
6317 int rc ;
6318 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6319 PVMCPU pVCpu = VMMGetCpu0(pVM);
6320
6321 Assert(pVM->cCpus == 1);
6322
6323 pNewEip = 0;
6324 *ppNewEip = 0;
6325
6326 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6327
6328 /* Find the patch record. */
6329 /* Note: there might not be a patch to guest translation record (global function) */
6330 offset = pEip - pVM->patm.s.pPatchMemGC;
6331 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6332 if (pvPatchCoreOffset)
6333 {
6334 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6335
6336 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6337
6338 if (pPatch->patch.uState == PATCH_DIRTY)
6339 {
6340 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6341 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6342 {
6343 /* Function duplication patches set fPIF to 1 on entry */
6344 pVM->patm.s.pGCStateHC->fPIF = 1;
6345 }
6346 }
6347 else
6348 if (pPatch->patch.uState == PATCH_DISABLED)
6349 {
6350 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6351 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6352 {
6353 /* Function duplication patches set fPIF to 1 on entry */
6354 pVM->patm.s.pGCStateHC->fPIF = 1;
6355 }
6356 }
6357 else
6358 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6359 {
6360 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6361
6362 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6363 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6364 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6365 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6366 }
6367
6368 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6369 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6370
6371 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6372 pPatch->patch.cTraps++;
6373 PATM_STAT_FAULT_INC(&pPatch->patch);
6374 }
6375 else
6376 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6377
6378 /* Check if we were interrupted in PATM generated instruction code. */
6379 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6380 {
6381 DISCPUSTATE Cpu;
6382 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6383 AssertRC(rc);
6384
6385 if ( rc == VINF_SUCCESS
6386 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF
6387 || Cpu.pCurInstr->uOpcode == OP_PUSH
6388 || Cpu.pCurInstr->uOpcode == OP_CALL)
6389 )
6390 {
6391 uint64_t fFlags;
6392
6393 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6394
6395 if (Cpu.pCurInstr->uOpcode == OP_PUSH)
6396 {
6397 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6398 if ( rc == VINF_SUCCESS
6399 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6400 {
6401 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6402
6403 /* Reset the PATM stack. */
6404 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6405
6406 pVM->patm.s.pGCStateHC->fPIF = 1;
6407
6408 Log(("Faulting push -> go back to the original instruction\n"));
6409
6410 /* continue at the original instruction */
6411 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6412 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6413 return VINF_SUCCESS;
6414 }
6415 }
6416
6417 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6418 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6419 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6420 if (rc == VINF_SUCCESS)
6421 {
6422 /* The guest page *must* be present. */
6423 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6424 if ( rc == VINF_SUCCESS
6425 && (fFlags & X86_PTE_P))
6426 {
6427 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6428 return VINF_PATCH_CONTINUE;
6429 }
6430 }
6431 }
6432 else
6433 if (pPatch->patch.pPrivInstrGC == pNewEip)
6434 {
6435 /* Invalidated patch or first instruction overwritten.
6436 * We can ignore the fPIF state in this case.
6437 */
6438 /* Reset the PATM stack. */
6439 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6440
6441 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6442
6443 pVM->patm.s.pGCStateHC->fPIF = 1;
6444
6445 /* continue at the original instruction */
6446 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6447 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6448 return VINF_SUCCESS;
6449 }
6450
6451 char szBuf[256];
6452 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6453
6454 /* Very bad. We crashed in emitted code. Probably stack? */
6455 if (pPatch)
6456 {
6457 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6458 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6459 }
6460 else
6461 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6462 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6463 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6464 }
6465
6466 /* From here on, we must have a valid patch to guest translation. */
6467 if (pvPatchCoreOffset == 0)
6468 {
6469 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6470 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6471 return VERR_PATCH_NOT_FOUND;
6472 }
6473
6474 /* Take care of dirty/changed instructions. */
6475 if (pPatchToGuestRec->fDirty)
6476 {
6477 Assert(pPatchToGuestRec->Core.Key == offset);
6478 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6479
6480 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6481 if (RT_SUCCESS(rc))
6482 {
6483 /* Retry the current instruction. */
6484 pNewEip = pEip;
6485 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6486 }
6487 else
6488 {
6489 /* Reset the PATM stack. */
6490 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6491
6492 rc = VINF_SUCCESS; /* Continue at original instruction. */
6493 }
6494
6495 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6496 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6497 return rc;
6498 }
6499
6500#ifdef VBOX_STRICT
6501 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6502 {
6503 DISCPUSTATE cpu;
6504 bool disret;
6505 uint32_t cbInstr;
6506 PATMP2GLOOKUPREC cacheRec;
6507 RT_ZERO(cacheRec);
6508 cacheRec.pPatch = &pPatch->patch;
6509
6510 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6511 &cpu, &cbInstr);
6512 if (cacheRec.Lock.pvMap)
6513 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6514
6515 if (disret && cpu.pCurInstr->uOpcode == OP_RETN)
6516 {
6517 RTRCPTR retaddr;
6518 PCPUMCTX pCtx2;
6519
6520 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6521
6522 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6523 AssertRC(rc);
6524
6525 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6526 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6527 }
6528 }
6529#endif
6530
6531 /* Return original address, correct by subtracting the CS base address. */
6532 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6533
6534 /* Reset the PATM stack. */
6535 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6536
6537 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6538 {
6539 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6540 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6541#ifdef VBOX_STRICT
6542 DISCPUSTATE cpu;
6543 bool disret;
6544 uint32_t cbInstr;
6545 PATMP2GLOOKUPREC cacheRec;
6546 RT_ZERO(cacheRec);
6547 cacheRec.pPatch = &pPatch->patch;
6548
6549 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
6550 &cpu, &cbInstr);
6551 if (cacheRec.Lock.pvMap)
6552 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6553
6554 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
6555 {
6556 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6557 &cpu, &cbInstr);
6558 if (cacheRec.Lock.pvMap)
6559 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6560
6561 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET);
6562 }
6563#endif
6564 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6565 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6566 }
6567
6568 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6569 DBGFR3DisasInstrLog(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
6570 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6571 {
6572 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6573 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6574 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6575 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6576 return VERR_PATCH_DISABLED;
6577 }
6578
6579#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6580 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6581 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6582 {
6583 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6584 //we are only wasting time, back out the patch
6585 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6586 pTrapRec->pNextPatchInstr = 0;
6587 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6588 return VERR_PATCH_DISABLED;
6589 }
6590#endif
6591
6592 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6593 return VINF_SUCCESS;
6594}
6595
6596
6597/**
6598 * Handle page-fault in monitored page
6599 *
6600 * @returns VBox status code.
6601 * @param pVM Pointer to the VM.
6602 */
6603VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6604{
6605 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6606
6607 addr &= PAGE_BASE_GC_MASK;
6608
6609 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6610 AssertRC(rc); NOREF(rc);
6611
6612 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6613 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6614 {
6615 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6616 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6617 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6618 if (rc == VWRN_PATCH_REMOVED)
6619 return VINF_SUCCESS;
6620
6621 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6622
6623 if (addr == pPatchRec->patch.pPrivInstrGC)
6624 addr++;
6625 }
6626
6627 for(;;)
6628 {
6629 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6630
6631 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6632 break;
6633
6634 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6635 {
6636 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6637 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6638 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6639 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6640 }
6641 addr = pPatchRec->patch.pPrivInstrGC + 1;
6642 }
6643
6644 pVM->patm.s.pvFaultMonitor = 0;
6645 return VINF_SUCCESS;
6646}
6647
6648
6649#ifdef VBOX_WITH_STATISTICS
6650
6651static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6652{
6653 if (pPatch->flags & PATMFL_SYSENTER)
6654 {
6655 return "SYSENT";
6656 }
6657 else
6658 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6659 {
6660 static char szTrap[16];
6661 uint32_t iGate;
6662
6663 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6664 if (iGate < 256)
6665 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6666 else
6667 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6668 return szTrap;
6669 }
6670 else
6671 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6672 return "DUPFUNC";
6673 else
6674 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6675 return "FUNCCALL";
6676 else
6677 if (pPatch->flags & PATMFL_TRAMPOLINE)
6678 return "TRAMP";
6679 else
6680 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6681}
6682
6683static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6684{
6685 NOREF(pVM);
6686 switch(pPatch->uState)
6687 {
6688 case PATCH_ENABLED:
6689 return "ENA";
6690 case PATCH_DISABLED:
6691 return "DIS";
6692 case PATCH_DIRTY:
6693 return "DIR";
6694 case PATCH_UNUSABLE:
6695 return "UNU";
6696 case PATCH_REFUSED:
6697 return "REF";
6698 case PATCH_DISABLE_PENDING:
6699 return "DIP";
6700 default:
6701 AssertFailed();
6702 return " ";
6703 }
6704}
6705
6706/**
6707 * Resets the sample.
6708 * @param pVM Pointer to the VM.
6709 * @param pvSample The sample registered using STAMR3RegisterCallback.
6710 */
6711static void patmResetStat(PVM pVM, void *pvSample)
6712{
6713 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6714 Assert(pPatch);
6715
6716 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6717 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6718}
6719
6720/**
6721 * Prints the sample into the buffer.
6722 *
6723 * @param pVM Pointer to the VM.
6724 * @param pvSample The sample registered using STAMR3RegisterCallback.
6725 * @param pszBuf The buffer to print into.
6726 * @param cchBuf The size of the buffer.
6727 */
6728static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6729{
6730 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6731 Assert(pPatch);
6732
6733 Assert(pPatch->uState != PATCH_REFUSED);
6734 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6735
6736 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6737 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6738 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6739}
6740
6741/**
6742 * Returns the GC address of the corresponding patch statistics counter
6743 *
6744 * @returns Stat address
6745 * @param pVM Pointer to the VM.
6746 * @param pPatch Patch structure
6747 */
6748RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6749{
6750 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6751 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6752}
6753
6754#endif /* VBOX_WITH_STATISTICS */
6755
6756#ifdef VBOX_WITH_DEBUGGER
6757/**
6758 * The '.patmoff' command.
6759 *
6760 * @returns VBox status.
6761 * @param pCmd Pointer to the command descriptor (as registered).
6762 * @param pCmdHlp Pointer to command helper functions.
6763 * @param pVM Pointer to the current VM (if any).
6764 * @param paArgs Pointer to (readonly) array of arguments.
6765 * @param cArgs Number of arguments in the array.
6766 */
6767static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6768{
6769 /*
6770 * Validate input.
6771 */
6772 NOREF(pCmd); NOREF(cArgs); NOREF(paArgs);
6773 if (!pVM)
6774 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6775
6776 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6777 PATMR3AllowPatching(pVM, false);
6778 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6779}
6780
6781/**
6782 * The '.patmon' command.
6783 *
6784 * @returns VBox status.
6785 * @param pCmd Pointer to the command descriptor (as registered).
6786 * @param pCmdHlp Pointer to command helper functions.
6787 * @param pVM Pointer to the current VM (if any).
6788 * @param paArgs Pointer to (readonly) array of arguments.
6789 * @param cArgs Number of arguments in the array.
6790 */
6791static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6792{
6793 /*
6794 * Validate input.
6795 */
6796 NOREF(pCmd); NOREF(cArgs); NOREF(paArgs);
6797 if (!pVM)
6798 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6799
6800 PATMR3AllowPatching(pVM, true);
6801 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6802 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6803}
6804#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette